1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 */ 25 26 /* Portions Copyright 2007 Jeremy Teo */ 27 /* Portions Copyright 2010 Robert Milkowski */ 28 29 #include <sys/types.h> 30 #include <sys/param.h> 31 #include <sys/time.h> 32 #include <sys/systm.h> 33 #include <sys/sysmacros.h> 34 #include <sys/resource.h> 35 #include <sys/vfs.h> 36 #include <sys/vfs_opreg.h> 37 #include <sys/vnode.h> 38 #include <sys/file.h> 39 #include <sys/stat.h> 40 #include <sys/kmem.h> 41 #include <sys/taskq.h> 42 #include <sys/uio.h> 43 #include <sys/vmsystm.h> 44 #include <sys/atomic.h> 45 #include <sys/vm.h> 46 #include <vm/seg_vn.h> 47 #include <vm/pvn.h> 48 #include <vm/as.h> 49 #include <vm/kpm.h> 50 #include <vm/seg_kpm.h> 51 #include <sys/mman.h> 52 #include <sys/pathname.h> 53 #include <sys/cmn_err.h> 54 #include <sys/errno.h> 55 #include <sys/unistd.h> 56 #include <sys/zfs_dir.h> 57 #include <sys/zfs_acl.h> 58 #include <sys/zfs_ioctl.h> 59 #include <sys/fs/zfs.h> 60 #include <sys/dmu.h> 61 #include <sys/dmu_objset.h> 62 #include <sys/spa.h> 63 #include <sys/txg.h> 64 #include <sys/dbuf.h> 65 #include <sys/zap.h> 66 #include <sys/sa.h> 67 #include <sys/dirent.h> 68 #include <sys/policy.h> 69 #include <sys/sunddi.h> 70 #include <sys/filio.h> 71 #include <sys/sid.h> 72 #include "fs/fs_subr.h" 73 #include <sys/zfs_ctldir.h> 74 #include <sys/zfs_fuid.h> 75 #include <sys/zfs_sa.h> 76 #include <sys/dnlc.h> 77 #include <sys/zfs_rlock.h> 78 #include <sys/extdirent.h> 79 #include <sys/kidmap.h> 80 #include <sys/cred.h> 81 #include <sys/attr.h> 82 83 /* 84 * Programming rules. 85 * 86 * Each vnode op performs some logical unit of work. To do this, the ZPL must 87 * properly lock its in-core state, create a DMU transaction, do the work, 88 * record this work in the intent log (ZIL), commit the DMU transaction, 89 * and wait for the intent log to commit if it is a synchronous operation. 90 * Moreover, the vnode ops must work in both normal and log replay context. 91 * The ordering of events is important to avoid deadlocks and references 92 * to freed memory. The example below illustrates the following Big Rules: 93 * 94 * (1) A check must be made in each zfs thread for a mounted file system. 95 * This is done avoiding races using ZFS_ENTER(zfsvfs). 96 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes 97 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros 98 * can return EIO from the calling function. 99 * 100 * (2) VN_RELE() should always be the last thing except for zil_commit() 101 * (if necessary) and ZFS_EXIT(). This is for 3 reasons: 102 * First, if it's the last reference, the vnode/znode 103 * can be freed, so the zp may point to freed memory. Second, the last 104 * reference will call zfs_zinactive(), which may induce a lot of work -- 105 * pushing cached pages (which acquires range locks) and syncing out 106 * cached atime changes. Third, zfs_zinactive() may require a new tx, 107 * which could deadlock the system if you were already holding one. 108 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC(). 109 * 110 * (3) All range locks must be grabbed before calling dmu_tx_assign(), 111 * as they can span dmu_tx_assign() calls. 112 * 113 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign(). 114 * This is critical because we don't want to block while holding locks. 115 * Note, in particular, that if a lock is sometimes acquired before 116 * the tx assigns, and sometimes after (e.g. z_lock), then failing to 117 * use a non-blocking assign can deadlock the system. The scenario: 118 * 119 * Thread A has grabbed a lock before calling dmu_tx_assign(). 120 * Thread B is in an already-assigned tx, and blocks for this lock. 121 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() 122 * forever, because the previous txg can't quiesce until B's tx commits. 123 * 124 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, 125 * then drop all locks, call dmu_tx_wait(), and try again. 126 * 127 * (5) If the operation succeeded, generate the intent log entry for it 128 * before dropping locks. This ensures that the ordering of events 129 * in the intent log matches the order in which they actually occurred. 130 * During ZIL replay the zfs_log_* functions will update the sequence 131 * number to indicate the zil transaction has replayed. 132 * 133 * (6) At the end of each vnode op, the DMU tx must always commit, 134 * regardless of whether there were any errors. 135 * 136 * (7) After dropping all locks, invoke zil_commit(zilog, foid) 137 * to ensure that synchronous semantics are provided when necessary. 138 * 139 * In general, this is how things should be ordered in each vnode op: 140 * 141 * ZFS_ENTER(zfsvfs); // exit if unmounted 142 * top: 143 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD()) 144 * rw_enter(...); // grab any other locks you need 145 * tx = dmu_tx_create(...); // get DMU tx 146 * dmu_tx_hold_*(); // hold each object you might modify 147 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign 148 * if (error) { 149 * rw_exit(...); // drop locks 150 * zfs_dirent_unlock(dl); // unlock directory entry 151 * VN_RELE(...); // release held vnodes 152 * if (error == ERESTART) { 153 * dmu_tx_wait(tx); 154 * dmu_tx_abort(tx); 155 * goto top; 156 * } 157 * dmu_tx_abort(tx); // abort DMU tx 158 * ZFS_EXIT(zfsvfs); // finished in zfs 159 * return (error); // really out of space 160 * } 161 * error = do_real_work(); // do whatever this VOP does 162 * if (error == 0) 163 * zfs_log_*(...); // on success, make ZIL entry 164 * dmu_tx_commit(tx); // commit DMU tx -- error or not 165 * rw_exit(...); // drop locks 166 * zfs_dirent_unlock(dl); // unlock directory entry 167 * VN_RELE(...); // release held vnodes 168 * zil_commit(zilog, foid); // synchronous when necessary 169 * ZFS_EXIT(zfsvfs); // finished in zfs 170 * return (error); // done, report error 171 */ 172 173 /* ARGSUSED */ 174 static int 175 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) 176 { 177 znode_t *zp = VTOZ(*vpp); 178 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 179 180 ZFS_ENTER(zfsvfs); 181 ZFS_VERIFY_ZP(zp); 182 183 if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) && 184 ((flag & FAPPEND) == 0)) { 185 ZFS_EXIT(zfsvfs); 186 return (EPERM); 187 } 188 189 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && 190 ZTOV(zp)->v_type == VREG && 191 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) { 192 if (fs_vscan(*vpp, cr, 0) != 0) { 193 ZFS_EXIT(zfsvfs); 194 return (EACCES); 195 } 196 } 197 198 /* Keep a count of the synchronous opens in the znode */ 199 if (flag & (FSYNC | FDSYNC)) 200 atomic_inc_32(&zp->z_sync_cnt); 201 202 ZFS_EXIT(zfsvfs); 203 return (0); 204 } 205 206 /* ARGSUSED */ 207 static int 208 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, 209 caller_context_t *ct) 210 { 211 znode_t *zp = VTOZ(vp); 212 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 213 214 /* 215 * Clean up any locks held by this process on the vp. 216 */ 217 cleanlocks(vp, ddi_get_pid(), 0); 218 cleanshares(vp, ddi_get_pid()); 219 220 ZFS_ENTER(zfsvfs); 221 ZFS_VERIFY_ZP(zp); 222 223 /* Decrement the synchronous opens in the znode */ 224 if ((flag & (FSYNC | FDSYNC)) && (count == 1)) 225 atomic_dec_32(&zp->z_sync_cnt); 226 227 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && 228 ZTOV(zp)->v_type == VREG && 229 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) 230 VERIFY(fs_vscan(vp, cr, 1) == 0); 231 232 ZFS_EXIT(zfsvfs); 233 return (0); 234 } 235 236 /* 237 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and 238 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter. 239 */ 240 static int 241 zfs_holey(vnode_t *vp, int cmd, offset_t *off) 242 { 243 znode_t *zp = VTOZ(vp); 244 uint64_t noff = (uint64_t)*off; /* new offset */ 245 uint64_t file_sz; 246 int error; 247 boolean_t hole; 248 249 file_sz = zp->z_size; 250 if (noff >= file_sz) { 251 return (ENXIO); 252 } 253 254 if (cmd == _FIO_SEEK_HOLE) 255 hole = B_TRUE; 256 else 257 hole = B_FALSE; 258 259 error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff); 260 261 /* end of file? */ 262 if ((error == ESRCH) || (noff > file_sz)) { 263 /* 264 * Handle the virtual hole at the end of file. 265 */ 266 if (hole) { 267 *off = file_sz; 268 return (0); 269 } 270 return (ENXIO); 271 } 272 273 if (noff < *off) 274 return (error); 275 *off = noff; 276 return (error); 277 } 278 279 /* ARGSUSED */ 280 static int 281 zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred, 282 int *rvalp, caller_context_t *ct) 283 { 284 offset_t off; 285 int error; 286 zfsvfs_t *zfsvfs; 287 znode_t *zp; 288 289 switch (com) { 290 case _FIOFFS: 291 return (zfs_sync(vp->v_vfsp, 0, cred)); 292 293 /* 294 * The following two ioctls are used by bfu. Faking out, 295 * necessary to avoid bfu errors. 296 */ 297 case _FIOGDIO: 298 case _FIOSDIO: 299 return (0); 300 301 case _FIO_SEEK_DATA: 302 case _FIO_SEEK_HOLE: 303 if (ddi_copyin((void *)data, &off, sizeof (off), flag)) 304 return (EFAULT); 305 306 zp = VTOZ(vp); 307 zfsvfs = zp->z_zfsvfs; 308 ZFS_ENTER(zfsvfs); 309 ZFS_VERIFY_ZP(zp); 310 311 /* offset parameter is in/out */ 312 error = zfs_holey(vp, com, &off); 313 ZFS_EXIT(zfsvfs); 314 if (error) 315 return (error); 316 if (ddi_copyout(&off, (void *)data, sizeof (off), flag)) 317 return (EFAULT); 318 return (0); 319 } 320 return (ENOTTY); 321 } 322 323 /* 324 * Utility functions to map and unmap a single physical page. These 325 * are used to manage the mappable copies of ZFS file data, and therefore 326 * do not update ref/mod bits. 327 */ 328 caddr_t 329 zfs_map_page(page_t *pp, enum seg_rw rw) 330 { 331 if (kpm_enable) 332 return (hat_kpm_mapin(pp, 0)); 333 ASSERT(rw == S_READ || rw == S_WRITE); 334 return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0), 335 (caddr_t)-1)); 336 } 337 338 void 339 zfs_unmap_page(page_t *pp, caddr_t addr) 340 { 341 if (kpm_enable) { 342 hat_kpm_mapout(pp, 0, addr); 343 } else { 344 ppmapout(addr); 345 } 346 } 347 348 /* 349 * When a file is memory mapped, we must keep the IO data synchronized 350 * between the DMU cache and the memory mapped pages. What this means: 351 * 352 * On Write: If we find a memory mapped page, we write to *both* 353 * the page and the dmu buffer. 354 */ 355 static void 356 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid) 357 { 358 int64_t off; 359 360 off = start & PAGEOFFSET; 361 for (start &= PAGEMASK; len > 0; start += PAGESIZE) { 362 page_t *pp; 363 uint64_t nbytes = MIN(PAGESIZE - off, len); 364 365 if (pp = page_lookup(vp, start, SE_SHARED)) { 366 caddr_t va; 367 368 va = zfs_map_page(pp, S_WRITE); 369 (void) dmu_read(os, oid, start+off, nbytes, va+off, 370 DMU_READ_PREFETCH); 371 zfs_unmap_page(pp, va); 372 page_unlock(pp); 373 } 374 len -= nbytes; 375 off = 0; 376 } 377 } 378 379 /* 380 * When a file is memory mapped, we must keep the IO data synchronized 381 * between the DMU cache and the memory mapped pages. What this means: 382 * 383 * On Read: We "read" preferentially from memory mapped pages, 384 * else we default from the dmu buffer. 385 * 386 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when 387 * the file is memory mapped. 388 */ 389 static int 390 mappedread(vnode_t *vp, int nbytes, uio_t *uio) 391 { 392 znode_t *zp = VTOZ(vp); 393 objset_t *os = zp->z_zfsvfs->z_os; 394 int64_t start, off; 395 int len = nbytes; 396 int error = 0; 397 398 start = uio->uio_loffset; 399 off = start & PAGEOFFSET; 400 for (start &= PAGEMASK; len > 0; start += PAGESIZE) { 401 page_t *pp; 402 uint64_t bytes = MIN(PAGESIZE - off, len); 403 404 if (pp = page_lookup(vp, start, SE_SHARED)) { 405 caddr_t va; 406 407 va = zfs_map_page(pp, S_READ); 408 error = uiomove(va + off, bytes, UIO_READ, uio); 409 zfs_unmap_page(pp, va); 410 page_unlock(pp); 411 } else { 412 error = dmu_read_uio(os, zp->z_id, uio, bytes); 413 } 414 len -= bytes; 415 off = 0; 416 if (error) 417 break; 418 } 419 return (error); 420 } 421 422 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */ 423 424 /* 425 * Read bytes from specified file into supplied buffer. 426 * 427 * IN: vp - vnode of file to be read from. 428 * uio - structure supplying read location, range info, 429 * and return buffer. 430 * ioflag - SYNC flags; used to provide FRSYNC semantics. 431 * cr - credentials of caller. 432 * ct - caller context 433 * 434 * OUT: uio - updated offset and range, buffer filled. 435 * 436 * RETURN: 0 if success 437 * error code if failure 438 * 439 * Side Effects: 440 * vp - atime updated if byte count > 0 441 */ 442 /* ARGSUSED */ 443 static int 444 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) 445 { 446 znode_t *zp = VTOZ(vp); 447 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 448 objset_t *os; 449 ssize_t n, nbytes; 450 int error; 451 rl_t *rl; 452 xuio_t *xuio = NULL; 453 454 ZFS_ENTER(zfsvfs); 455 ZFS_VERIFY_ZP(zp); 456 os = zfsvfs->z_os; 457 458 if (zp->z_pflags & ZFS_AV_QUARANTINED) { 459 ZFS_EXIT(zfsvfs); 460 return (EACCES); 461 } 462 463 /* 464 * Validate file offset 465 */ 466 if (uio->uio_loffset < (offset_t)0) { 467 ZFS_EXIT(zfsvfs); 468 return (EINVAL); 469 } 470 471 /* 472 * Fasttrack empty reads 473 */ 474 if (uio->uio_resid == 0) { 475 ZFS_EXIT(zfsvfs); 476 return (0); 477 } 478 479 /* 480 * Check for mandatory locks 481 */ 482 if (MANDMODE(zp->z_mode)) { 483 if (error = chklock(vp, FREAD, 484 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) { 485 ZFS_EXIT(zfsvfs); 486 return (error); 487 } 488 } 489 490 /* 491 * If we're in FRSYNC mode, sync out this znode before reading it. 492 */ 493 if (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 494 zil_commit(zfsvfs->z_log, zp->z_id); 495 496 /* 497 * Lock the range against changes. 498 */ 499 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER); 500 501 /* 502 * If we are reading past end-of-file we can skip 503 * to the end; but we might still need to set atime. 504 */ 505 if (uio->uio_loffset >= zp->z_size) { 506 error = 0; 507 goto out; 508 } 509 510 ASSERT(uio->uio_loffset < zp->z_size); 511 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset); 512 513 if ((uio->uio_extflg == UIO_XUIO) && 514 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) { 515 int nblk; 516 int blksz = zp->z_blksz; 517 uint64_t offset = uio->uio_loffset; 518 519 xuio = (xuio_t *)uio; 520 if ((ISP2(blksz))) { 521 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset, 522 blksz)) / blksz; 523 } else { 524 ASSERT(offset + n <= blksz); 525 nblk = 1; 526 } 527 (void) dmu_xuio_init(xuio, nblk); 528 529 if (vn_has_cached_data(vp)) { 530 /* 531 * For simplicity, we always allocate a full buffer 532 * even if we only expect to read a portion of a block. 533 */ 534 while (--nblk >= 0) { 535 (void) dmu_xuio_add(xuio, 536 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 537 blksz), 0, blksz); 538 } 539 } 540 } 541 542 while (n > 0) { 543 nbytes = MIN(n, zfs_read_chunk_size - 544 P2PHASE(uio->uio_loffset, zfs_read_chunk_size)); 545 546 if (vn_has_cached_data(vp)) 547 error = mappedread(vp, nbytes, uio); 548 else 549 error = dmu_read_uio(os, zp->z_id, uio, nbytes); 550 if (error) { 551 /* convert checksum errors into IO errors */ 552 if (error == ECKSUM) 553 error = EIO; 554 break; 555 } 556 557 n -= nbytes; 558 } 559 out: 560 zfs_range_unlock(rl); 561 562 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 563 ZFS_EXIT(zfsvfs); 564 return (error); 565 } 566 567 /* 568 * Write the bytes to a file. 569 * 570 * IN: vp - vnode of file to be written to. 571 * uio - structure supplying write location, range info, 572 * and data buffer. 573 * ioflag - FAPPEND flag set if in append mode. 574 * cr - credentials of caller. 575 * ct - caller context (NFS/CIFS fem monitor only) 576 * 577 * OUT: uio - updated offset and range. 578 * 579 * RETURN: 0 if success 580 * error code if failure 581 * 582 * Timestamps: 583 * vp - ctime|mtime updated if byte count > 0 584 */ 585 586 /* ARGSUSED */ 587 static int 588 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) 589 { 590 znode_t *zp = VTOZ(vp); 591 rlim64_t limit = uio->uio_llimit; 592 ssize_t start_resid = uio->uio_resid; 593 ssize_t tx_bytes; 594 uint64_t end_size; 595 dmu_tx_t *tx; 596 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 597 zilog_t *zilog; 598 offset_t woff; 599 ssize_t n, nbytes; 600 rl_t *rl; 601 int max_blksz = zfsvfs->z_max_blksz; 602 int error; 603 arc_buf_t *abuf; 604 iovec_t *aiov; 605 xuio_t *xuio = NULL; 606 int i_iov = 0; 607 int iovcnt = uio->uio_iovcnt; 608 iovec_t *iovp = uio->uio_iov; 609 int write_eof; 610 int count = 0; 611 sa_bulk_attr_t bulk[4]; 612 uint64_t mtime[2], ctime[2]; 613 614 /* 615 * Fasttrack empty write 616 */ 617 n = start_resid; 618 if (n == 0) 619 return (0); 620 621 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T) 622 limit = MAXOFFSET_T; 623 624 ZFS_ENTER(zfsvfs); 625 ZFS_VERIFY_ZP(zp); 626 627 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); 628 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); 629 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, 630 &zp->z_size, 8); 631 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 632 &zp->z_pflags, 8); 633 634 /* 635 * If immutable or not appending then return EPERM 636 */ 637 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || 638 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) && 639 (uio->uio_loffset < zp->z_size))) { 640 ZFS_EXIT(zfsvfs); 641 return (EPERM); 642 } 643 644 zilog = zfsvfs->z_log; 645 646 /* 647 * Validate file offset 648 */ 649 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset; 650 if (woff < 0) { 651 ZFS_EXIT(zfsvfs); 652 return (EINVAL); 653 } 654 655 /* 656 * Check for mandatory locks before calling zfs_range_lock() 657 * in order to prevent a deadlock with locks set via fcntl(). 658 */ 659 if (MANDMODE((mode_t)zp->z_mode) && 660 (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) { 661 ZFS_EXIT(zfsvfs); 662 return (error); 663 } 664 665 /* 666 * Pre-fault the pages to ensure slow (eg NFS) pages 667 * don't hold up txg. 668 * Skip this if uio contains loaned arc_buf. 669 */ 670 if ((uio->uio_extflg == UIO_XUIO) && 671 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) 672 xuio = (xuio_t *)uio; 673 else 674 uio_prefaultpages(MIN(n, max_blksz), uio); 675 676 /* 677 * If in append mode, set the io offset pointer to eof. 678 */ 679 if (ioflag & FAPPEND) { 680 /* 681 * Obtain an appending range lock to guarantee file append 682 * semantics. We reset the write offset once we have the lock. 683 */ 684 rl = zfs_range_lock(zp, 0, n, RL_APPEND); 685 woff = rl->r_off; 686 if (rl->r_len == UINT64_MAX) { 687 /* 688 * We overlocked the file because this write will cause 689 * the file block size to increase. 690 * Note that zp_size cannot change with this lock held. 691 */ 692 woff = zp->z_size; 693 } 694 uio->uio_loffset = woff; 695 } else { 696 /* 697 * Note that if the file block size will change as a result of 698 * this write, then this range lock will lock the entire file 699 * so that we can re-write the block safely. 700 */ 701 rl = zfs_range_lock(zp, woff, n, RL_WRITER); 702 } 703 704 if (woff >= limit) { 705 zfs_range_unlock(rl); 706 ZFS_EXIT(zfsvfs); 707 return (EFBIG); 708 } 709 710 if ((woff + n) > limit || woff > (limit - n)) 711 n = limit - woff; 712 713 /* Will this write extend the file length? */ 714 write_eof = (woff + n > zp->z_size); 715 716 end_size = MAX(zp->z_size, woff + n); 717 718 /* 719 * Write the file in reasonable size chunks. Each chunk is written 720 * in a separate transaction; this keeps the intent log records small 721 * and allows us to do more fine-grained space accounting. 722 */ 723 while (n > 0) { 724 abuf = NULL; 725 woff = uio->uio_loffset; 726 again: 727 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) || 728 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) { 729 if (abuf != NULL) 730 dmu_return_arcbuf(abuf); 731 error = EDQUOT; 732 break; 733 } 734 735 if (xuio && abuf == NULL) { 736 ASSERT(i_iov < iovcnt); 737 aiov = &iovp[i_iov]; 738 abuf = dmu_xuio_arcbuf(xuio, i_iov); 739 dmu_xuio_clear(xuio, i_iov); 740 DTRACE_PROBE3(zfs_cp_write, int, i_iov, 741 iovec_t *, aiov, arc_buf_t *, abuf); 742 ASSERT((aiov->iov_base == abuf->b_data) || 743 ((char *)aiov->iov_base - (char *)abuf->b_data + 744 aiov->iov_len == arc_buf_size(abuf))); 745 i_iov++; 746 } else if (abuf == NULL && n >= max_blksz && 747 woff >= zp->z_size && 748 P2PHASE(woff, max_blksz) == 0 && 749 zp->z_blksz == max_blksz) { 750 /* 751 * This write covers a full block. "Borrow" a buffer 752 * from the dmu so that we can fill it before we enter 753 * a transaction. This avoids the possibility of 754 * holding up the transaction if the data copy hangs 755 * up on a pagefault (e.g., from an NFS server mapping). 756 */ 757 size_t cbytes; 758 759 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 760 max_blksz); 761 ASSERT(abuf != NULL); 762 ASSERT(arc_buf_size(abuf) == max_blksz); 763 if (error = uiocopy(abuf->b_data, max_blksz, 764 UIO_WRITE, uio, &cbytes)) { 765 dmu_return_arcbuf(abuf); 766 break; 767 } 768 ASSERT(cbytes == max_blksz); 769 } 770 771 /* 772 * Start a transaction. 773 */ 774 tx = dmu_tx_create(zfsvfs->z_os); 775 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 776 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz)); 777 zfs_sa_upgrade_txholds(tx, zp); 778 error = dmu_tx_assign(tx, TXG_NOWAIT); 779 if (error) { 780 if (error == ERESTART) { 781 dmu_tx_wait(tx); 782 dmu_tx_abort(tx); 783 goto again; 784 } 785 dmu_tx_abort(tx); 786 if (abuf != NULL) 787 dmu_return_arcbuf(abuf); 788 break; 789 } 790 791 /* 792 * If zfs_range_lock() over-locked we grow the blocksize 793 * and then reduce the lock range. This will only happen 794 * on the first iteration since zfs_range_reduce() will 795 * shrink down r_len to the appropriate size. 796 */ 797 if (rl->r_len == UINT64_MAX) { 798 uint64_t new_blksz; 799 800 if (zp->z_blksz > max_blksz) { 801 ASSERT(!ISP2(zp->z_blksz)); 802 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE); 803 } else { 804 new_blksz = MIN(end_size, max_blksz); 805 } 806 zfs_grow_blocksize(zp, new_blksz, tx); 807 zfs_range_reduce(rl, woff, n); 808 } 809 810 /* 811 * XXX - should we really limit each write to z_max_blksz? 812 * Perhaps we should use SPA_MAXBLOCKSIZE chunks? 813 */ 814 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz)); 815 816 if (abuf == NULL) { 817 tx_bytes = uio->uio_resid; 818 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), 819 uio, nbytes, tx); 820 tx_bytes -= uio->uio_resid; 821 } else { 822 tx_bytes = nbytes; 823 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len); 824 /* 825 * If this is not a full block write, but we are 826 * extending the file past EOF and this data starts 827 * block-aligned, use assign_arcbuf(). Otherwise, 828 * write via dmu_write(). 829 */ 830 if (tx_bytes < max_blksz && (!write_eof || 831 aiov->iov_base != abuf->b_data)) { 832 ASSERT(xuio); 833 dmu_write(zfsvfs->z_os, zp->z_id, woff, 834 aiov->iov_len, aiov->iov_base, tx); 835 dmu_return_arcbuf(abuf); 836 xuio_stat_wbuf_copied(); 837 } else { 838 ASSERT(xuio || tx_bytes == max_blksz); 839 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl), 840 woff, abuf, tx); 841 } 842 ASSERT(tx_bytes <= uio->uio_resid); 843 uioskip(uio, tx_bytes); 844 } 845 if (tx_bytes && vn_has_cached_data(vp)) { 846 update_pages(vp, woff, 847 tx_bytes, zfsvfs->z_os, zp->z_id); 848 } 849 850 /* 851 * If we made no progress, we're done. If we made even 852 * partial progress, update the znode and ZIL accordingly. 853 */ 854 if (tx_bytes == 0) { 855 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), 856 (void *)&zp->z_size, sizeof (uint64_t), tx); 857 dmu_tx_commit(tx); 858 ASSERT(error != 0); 859 break; 860 } 861 862 /* 863 * Clear Set-UID/Set-GID bits on successful write if not 864 * privileged and at least one of the excute bits is set. 865 * 866 * It would be nice to to this after all writes have 867 * been done, but that would still expose the ISUID/ISGID 868 * to another app after the partial write is committed. 869 * 870 * Note: we don't call zfs_fuid_map_id() here because 871 * user 0 is not an ephemeral uid. 872 */ 873 mutex_enter(&zp->z_acl_lock); 874 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | 875 (S_IXUSR >> 6))) != 0 && 876 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 && 877 secpolicy_vnode_setid_retain(cr, 878 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) { 879 uint64_t newmode; 880 zp->z_mode &= ~(S_ISUID | S_ISGID); 881 newmode = zp->z_mode; 882 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), 883 (void *)&newmode, sizeof (uint64_t), tx); 884 } 885 mutex_exit(&zp->z_acl_lock); 886 887 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, 888 B_TRUE); 889 890 /* 891 * Update the file size (zp_size) if it has changed; 892 * account for possible concurrent updates. 893 */ 894 while ((end_size = zp->z_size) < uio->uio_loffset) { 895 (void) atomic_cas_64(&zp->z_size, end_size, 896 uio->uio_loffset); 897 ASSERT(error == 0); 898 } 899 /* 900 * If we are replaying and eof is non zero then force 901 * the file size to the specified eof. Note, there's no 902 * concurrency during replay. 903 */ 904 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0) 905 zp->z_size = zfsvfs->z_replay_eof; 906 907 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); 908 909 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag); 910 dmu_tx_commit(tx); 911 912 if (error != 0) 913 break; 914 ASSERT(tx_bytes == nbytes); 915 n -= nbytes; 916 917 if (!xuio && n > 0) 918 uio_prefaultpages(MIN(n, max_blksz), uio); 919 } 920 921 zfs_range_unlock(rl); 922 923 /* 924 * If we're in replay mode, or we made no progress, return error. 925 * Otherwise, it's at least a partial write, so it's successful. 926 */ 927 if (zfsvfs->z_replay || uio->uio_resid == start_resid) { 928 ZFS_EXIT(zfsvfs); 929 return (error); 930 } 931 932 if (ioflag & (FSYNC | FDSYNC) || 933 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 934 zil_commit(zilog, zp->z_id); 935 936 ZFS_EXIT(zfsvfs); 937 return (0); 938 } 939 940 void 941 zfs_get_done(zgd_t *zgd, int error) 942 { 943 znode_t *zp = zgd->zgd_private; 944 objset_t *os = zp->z_zfsvfs->z_os; 945 946 if (zgd->zgd_db) 947 dmu_buf_rele(zgd->zgd_db, zgd); 948 949 zfs_range_unlock(zgd->zgd_rl); 950 951 /* 952 * Release the vnode asynchronously as we currently have the 953 * txg stopped from syncing. 954 */ 955 VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os))); 956 957 if (error == 0 && zgd->zgd_bp) 958 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 959 960 kmem_free(zgd, sizeof (zgd_t)); 961 } 962 963 #ifdef DEBUG 964 static int zil_fault_io = 0; 965 #endif 966 967 /* 968 * Get data to generate a TX_WRITE intent log record. 969 */ 970 int 971 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 972 { 973 zfsvfs_t *zfsvfs = arg; 974 objset_t *os = zfsvfs->z_os; 975 znode_t *zp; 976 uint64_t object = lr->lr_foid; 977 uint64_t offset = lr->lr_offset; 978 uint64_t size = lr->lr_length; 979 blkptr_t *bp = &lr->lr_blkptr; 980 dmu_buf_t *db; 981 zgd_t *zgd; 982 int error = 0; 983 984 ASSERT(zio != NULL); 985 ASSERT(size != 0); 986 987 /* 988 * Nothing to do if the file has been removed 989 */ 990 if (zfs_zget(zfsvfs, object, &zp) != 0) 991 return (ENOENT); 992 if (zp->z_unlinked) { 993 /* 994 * Release the vnode asynchronously as we currently have the 995 * txg stopped from syncing. 996 */ 997 VN_RELE_ASYNC(ZTOV(zp), 998 dsl_pool_vnrele_taskq(dmu_objset_pool(os))); 999 return (ENOENT); 1000 } 1001 1002 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 1003 zgd->zgd_zilog = zfsvfs->z_log; 1004 zgd->zgd_private = zp; 1005 1006 /* 1007 * Write records come in two flavors: immediate and indirect. 1008 * For small writes it's cheaper to store the data with the 1009 * log record (immediate); for large writes it's cheaper to 1010 * sync the data and get a pointer to it (indirect) so that 1011 * we don't have to write the data twice. 1012 */ 1013 if (buf != NULL) { /* immediate write */ 1014 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER); 1015 /* test for truncation needs to be done while range locked */ 1016 if (offset >= zp->z_size) { 1017 error = ENOENT; 1018 } else { 1019 error = dmu_read(os, object, offset, size, buf, 1020 DMU_READ_NO_PREFETCH); 1021 } 1022 ASSERT(error == 0 || error == ENOENT); 1023 } else { /* indirect write */ 1024 /* 1025 * Have to lock the whole block to ensure when it's 1026 * written out and it's checksum is being calculated 1027 * that no one can change the data. We need to re-check 1028 * blocksize after we get the lock in case it's changed! 1029 */ 1030 for (;;) { 1031 uint64_t blkoff; 1032 size = zp->z_blksz; 1033 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset; 1034 offset -= blkoff; 1035 zgd->zgd_rl = zfs_range_lock(zp, offset, size, 1036 RL_READER); 1037 if (zp->z_blksz == size) 1038 break; 1039 offset += blkoff; 1040 zfs_range_unlock(zgd->zgd_rl); 1041 } 1042 /* test for truncation needs to be done while range locked */ 1043 if (lr->lr_offset >= zp->z_size) 1044 error = ENOENT; 1045 #ifdef DEBUG 1046 if (zil_fault_io) { 1047 error = EIO; 1048 zil_fault_io = 0; 1049 } 1050 #endif 1051 if (error == 0) 1052 error = dmu_buf_hold(os, object, offset, zgd, &db, 1053 DMU_READ_NO_PREFETCH); 1054 1055 if (error == 0) { 1056 blkptr_t *obp = dmu_buf_get_blkptr(db); 1057 if (obp) { 1058 ASSERT(BP_IS_HOLE(bp)); 1059 *bp = *obp; 1060 } 1061 1062 zgd->zgd_db = db; 1063 zgd->zgd_bp = bp; 1064 1065 ASSERT(db->db_offset == offset); 1066 ASSERT(db->db_size == size); 1067 1068 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1069 zfs_get_done, zgd); 1070 ASSERT(error || lr->lr_length <= zp->z_blksz); 1071 1072 /* 1073 * On success, we need to wait for the write I/O 1074 * initiated by dmu_sync() to complete before we can 1075 * release this dbuf. We will finish everything up 1076 * in the zfs_get_done() callback. 1077 */ 1078 if (error == 0) 1079 return (0); 1080 1081 if (error == EALREADY) { 1082 lr->lr_common.lrc_txtype = TX_WRITE2; 1083 error = 0; 1084 } 1085 } 1086 } 1087 1088 zfs_get_done(zgd, error); 1089 1090 return (error); 1091 } 1092 1093 /*ARGSUSED*/ 1094 static int 1095 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr, 1096 caller_context_t *ct) 1097 { 1098 znode_t *zp = VTOZ(vp); 1099 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1100 int error; 1101 1102 ZFS_ENTER(zfsvfs); 1103 ZFS_VERIFY_ZP(zp); 1104 1105 if (flag & V_ACE_MASK) 1106 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr); 1107 else 1108 error = zfs_zaccess_rwx(zp, mode, flag, cr); 1109 1110 ZFS_EXIT(zfsvfs); 1111 return (error); 1112 } 1113 1114 /* 1115 * If vnode is for a device return a specfs vnode instead. 1116 */ 1117 static int 1118 specvp_check(vnode_t **vpp, cred_t *cr) 1119 { 1120 int error = 0; 1121 1122 if (IS_DEVVP(*vpp)) { 1123 struct vnode *svp; 1124 1125 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); 1126 VN_RELE(*vpp); 1127 if (svp == NULL) 1128 error = ENOSYS; 1129 *vpp = svp; 1130 } 1131 return (error); 1132 } 1133 1134 1135 /* 1136 * Lookup an entry in a directory, or an extended attribute directory. 1137 * If it exists, return a held vnode reference for it. 1138 * 1139 * IN: dvp - vnode of directory to search. 1140 * nm - name of entry to lookup. 1141 * pnp - full pathname to lookup [UNUSED]. 1142 * flags - LOOKUP_XATTR set if looking for an attribute. 1143 * rdir - root directory vnode [UNUSED]. 1144 * cr - credentials of caller. 1145 * ct - caller context 1146 * direntflags - directory lookup flags 1147 * realpnp - returned pathname. 1148 * 1149 * OUT: vpp - vnode of located entry, NULL if not found. 1150 * 1151 * RETURN: 0 if success 1152 * error code if failure 1153 * 1154 * Timestamps: 1155 * NA 1156 */ 1157 /* ARGSUSED */ 1158 static int 1159 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, 1160 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, 1161 int *direntflags, pathname_t *realpnp) 1162 { 1163 znode_t *zdp = VTOZ(dvp); 1164 zfsvfs_t *zfsvfs = zdp->z_zfsvfs; 1165 int error = 0; 1166 1167 /* fast path */ 1168 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) { 1169 1170 if (dvp->v_type != VDIR) { 1171 return (ENOTDIR); 1172 } else if (zdp->z_sa_hdl == NULL) { 1173 return (EIO); 1174 } 1175 1176 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) { 1177 error = zfs_fastaccesschk_execute(zdp, cr); 1178 if (!error) { 1179 *vpp = dvp; 1180 VN_HOLD(*vpp); 1181 return (0); 1182 } 1183 return (error); 1184 } else { 1185 vnode_t *tvp = dnlc_lookup(dvp, nm); 1186 1187 if (tvp) { 1188 error = zfs_fastaccesschk_execute(zdp, cr); 1189 if (error) { 1190 VN_RELE(tvp); 1191 return (error); 1192 } 1193 if (tvp == DNLC_NO_VNODE) { 1194 VN_RELE(tvp); 1195 return (ENOENT); 1196 } else { 1197 *vpp = tvp; 1198 return (specvp_check(vpp, cr)); 1199 } 1200 } 1201 } 1202 } 1203 1204 DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm); 1205 1206 ZFS_ENTER(zfsvfs); 1207 ZFS_VERIFY_ZP(zdp); 1208 1209 *vpp = NULL; 1210 1211 if (flags & LOOKUP_XATTR) { 1212 /* 1213 * If the xattr property is off, refuse the lookup request. 1214 */ 1215 if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) { 1216 ZFS_EXIT(zfsvfs); 1217 return (EINVAL); 1218 } 1219 1220 /* 1221 * We don't allow recursive attributes.. 1222 * Maybe someday we will. 1223 */ 1224 if (zdp->z_pflags & ZFS_XATTR) { 1225 ZFS_EXIT(zfsvfs); 1226 return (EINVAL); 1227 } 1228 1229 if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) { 1230 ZFS_EXIT(zfsvfs); 1231 return (error); 1232 } 1233 1234 /* 1235 * Do we have permission to get into attribute directory? 1236 */ 1237 1238 if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0, 1239 B_FALSE, cr)) { 1240 VN_RELE(*vpp); 1241 *vpp = NULL; 1242 } 1243 1244 ZFS_EXIT(zfsvfs); 1245 return (error); 1246 } 1247 1248 if (dvp->v_type != VDIR) { 1249 ZFS_EXIT(zfsvfs); 1250 return (ENOTDIR); 1251 } 1252 1253 /* 1254 * Check accessibility of directory. 1255 */ 1256 1257 if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) { 1258 ZFS_EXIT(zfsvfs); 1259 return (error); 1260 } 1261 1262 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm), 1263 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1264 ZFS_EXIT(zfsvfs); 1265 return (EILSEQ); 1266 } 1267 1268 error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp); 1269 if (error == 0) 1270 error = specvp_check(vpp, cr); 1271 1272 ZFS_EXIT(zfsvfs); 1273 return (error); 1274 } 1275 1276 /* 1277 * Attempt to create a new entry in a directory. If the entry 1278 * already exists, truncate the file if permissible, else return 1279 * an error. Return the vp of the created or trunc'd file. 1280 * 1281 * IN: dvp - vnode of directory to put new file entry in. 1282 * name - name of new file entry. 1283 * vap - attributes of new file. 1284 * excl - flag indicating exclusive or non-exclusive mode. 1285 * mode - mode to open file with. 1286 * cr - credentials of caller. 1287 * flag - large file flag [UNUSED]. 1288 * ct - caller context 1289 * vsecp - ACL to be set 1290 * 1291 * OUT: vpp - vnode of created or trunc'd entry. 1292 * 1293 * RETURN: 0 if success 1294 * error code if failure 1295 * 1296 * Timestamps: 1297 * dvp - ctime|mtime updated if new entry created 1298 * vp - ctime|mtime always, atime if new 1299 */ 1300 1301 /* ARGSUSED */ 1302 static int 1303 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl, 1304 int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct, 1305 vsecattr_t *vsecp) 1306 { 1307 znode_t *zp, *dzp = VTOZ(dvp); 1308 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1309 zilog_t *zilog; 1310 objset_t *os; 1311 zfs_dirlock_t *dl; 1312 dmu_tx_t *tx; 1313 int error; 1314 ksid_t *ksid; 1315 uid_t uid; 1316 gid_t gid = crgetgid(cr); 1317 zfs_acl_ids_t acl_ids; 1318 boolean_t fuid_dirtied; 1319 boolean_t have_acl = B_FALSE; 1320 1321 /* 1322 * If we have an ephemeral id, ACL, or XVATTR then 1323 * make sure file system is at proper version 1324 */ 1325 1326 ksid = crgetsid(cr, KSID_OWNER); 1327 if (ksid) 1328 uid = ksid_getid(ksid); 1329 else 1330 uid = crgetuid(cr); 1331 1332 if (zfsvfs->z_use_fuids == B_FALSE && 1333 (vsecp || (vap->va_mask & AT_XVATTR) || 1334 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 1335 return (EINVAL); 1336 1337 ZFS_ENTER(zfsvfs); 1338 ZFS_VERIFY_ZP(dzp); 1339 os = zfsvfs->z_os; 1340 zilog = zfsvfs->z_log; 1341 1342 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 1343 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1344 ZFS_EXIT(zfsvfs); 1345 return (EILSEQ); 1346 } 1347 1348 if (vap->va_mask & AT_XVATTR) { 1349 if ((error = secpolicy_xvattr((xvattr_t *)vap, 1350 crgetuid(cr), cr, vap->va_type)) != 0) { 1351 ZFS_EXIT(zfsvfs); 1352 return (error); 1353 } 1354 } 1355 top: 1356 *vpp = NULL; 1357 1358 if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr)) 1359 vap->va_mode &= ~VSVTX; 1360 1361 if (*name == '\0') { 1362 /* 1363 * Null component name refers to the directory itself. 1364 */ 1365 VN_HOLD(dvp); 1366 zp = dzp; 1367 dl = NULL; 1368 error = 0; 1369 } else { 1370 /* possible VN_HOLD(zp) */ 1371 int zflg = 0; 1372 1373 if (flag & FIGNORECASE) 1374 zflg |= ZCILOOK; 1375 1376 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1377 NULL, NULL); 1378 if (error) { 1379 if (have_acl) 1380 zfs_acl_ids_free(&acl_ids); 1381 if (strcmp(name, "..") == 0) 1382 error = EISDIR; 1383 ZFS_EXIT(zfsvfs); 1384 return (error); 1385 } 1386 } 1387 1388 if (zp == NULL) { 1389 uint64_t txtype; 1390 1391 /* 1392 * Create a new file object and update the directory 1393 * to reference it. 1394 */ 1395 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 1396 if (have_acl) 1397 zfs_acl_ids_free(&acl_ids); 1398 goto out; 1399 } 1400 1401 /* 1402 * We only support the creation of regular files in 1403 * extended attribute directories. 1404 */ 1405 1406 if ((dzp->z_pflags & ZFS_XATTR) && 1407 (vap->va_type != VREG)) { 1408 if (have_acl) 1409 zfs_acl_ids_free(&acl_ids); 1410 error = EINVAL; 1411 goto out; 1412 } 1413 1414 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap, 1415 cr, vsecp, &acl_ids)) != 0) 1416 goto out; 1417 have_acl = B_TRUE; 1418 1419 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { 1420 zfs_acl_ids_free(&acl_ids); 1421 error = EDQUOT; 1422 goto out; 1423 } 1424 1425 tx = dmu_tx_create(os); 1426 1427 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 1428 ZFS_SA_BASE_ATTR_SIZE); 1429 1430 fuid_dirtied = zfsvfs->z_fuid_dirty; 1431 if (fuid_dirtied) 1432 zfs_fuid_txhold(zfsvfs, tx); 1433 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 1434 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); 1435 if (!zfsvfs->z_use_sa && 1436 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 1437 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 1438 0, acl_ids.z_aclp->z_acl_bytes); 1439 } 1440 error = dmu_tx_assign(tx, TXG_NOWAIT); 1441 if (error) { 1442 zfs_dirent_unlock(dl); 1443 if (error == ERESTART) { 1444 dmu_tx_wait(tx); 1445 dmu_tx_abort(tx); 1446 goto top; 1447 } 1448 zfs_acl_ids_free(&acl_ids); 1449 dmu_tx_abort(tx); 1450 ZFS_EXIT(zfsvfs); 1451 return (error); 1452 } 1453 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); 1454 1455 if (fuid_dirtied) 1456 zfs_fuid_sync(zfsvfs, tx); 1457 1458 (void) zfs_link_create(dl, zp, tx, ZNEW); 1459 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap); 1460 if (flag & FIGNORECASE) 1461 txtype |= TX_CI; 1462 zfs_log_create(zilog, tx, txtype, dzp, zp, name, 1463 vsecp, acl_ids.z_fuidp, vap); 1464 zfs_acl_ids_free(&acl_ids); 1465 dmu_tx_commit(tx); 1466 } else { 1467 int aflags = (flag & FAPPEND) ? V_APPEND : 0; 1468 1469 if (have_acl) 1470 zfs_acl_ids_free(&acl_ids); 1471 have_acl = B_FALSE; 1472 1473 /* 1474 * A directory entry already exists for this name. 1475 */ 1476 /* 1477 * Can't truncate an existing file if in exclusive mode. 1478 */ 1479 if (excl == EXCL) { 1480 error = EEXIST; 1481 goto out; 1482 } 1483 /* 1484 * Can't open a directory for writing. 1485 */ 1486 if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) { 1487 error = EISDIR; 1488 goto out; 1489 } 1490 /* 1491 * Verify requested access to file. 1492 */ 1493 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) { 1494 goto out; 1495 } 1496 1497 mutex_enter(&dzp->z_lock); 1498 dzp->z_seq++; 1499 mutex_exit(&dzp->z_lock); 1500 1501 /* 1502 * Truncate regular files if requested. 1503 */ 1504 if ((ZTOV(zp)->v_type == VREG) && 1505 (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) { 1506 /* we can't hold any locks when calling zfs_freesp() */ 1507 zfs_dirent_unlock(dl); 1508 dl = NULL; 1509 error = zfs_freesp(zp, 0, 0, mode, TRUE); 1510 if (error == 0) { 1511 vnevent_create(ZTOV(zp), ct); 1512 } 1513 } 1514 } 1515 out: 1516 1517 if (dl) 1518 zfs_dirent_unlock(dl); 1519 1520 if (error) { 1521 if (zp) 1522 VN_RELE(ZTOV(zp)); 1523 } else { 1524 *vpp = ZTOV(zp); 1525 error = specvp_check(vpp, cr); 1526 } 1527 1528 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 1529 zil_commit(zilog, 0); 1530 1531 ZFS_EXIT(zfsvfs); 1532 return (error); 1533 } 1534 1535 /* 1536 * Remove an entry from a directory. 1537 * 1538 * IN: dvp - vnode of directory to remove entry from. 1539 * name - name of entry to remove. 1540 * cr - credentials of caller. 1541 * ct - caller context 1542 * flags - case flags 1543 * 1544 * RETURN: 0 if success 1545 * error code if failure 1546 * 1547 * Timestamps: 1548 * dvp - ctime|mtime 1549 * vp - ctime (if nlink > 0) 1550 */ 1551 1552 uint64_t null_xattr = 0; 1553 1554 /*ARGSUSED*/ 1555 static int 1556 zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct, 1557 int flags) 1558 { 1559 znode_t *zp, *dzp = VTOZ(dvp); 1560 znode_t *xzp; 1561 vnode_t *vp; 1562 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1563 zilog_t *zilog; 1564 uint64_t acl_obj, xattr_obj; 1565 uint64_t xattr_obj_unlinked = 0; 1566 uint64_t obj = 0; 1567 zfs_dirlock_t *dl; 1568 dmu_tx_t *tx; 1569 boolean_t may_delete_now, delete_now = FALSE; 1570 boolean_t unlinked, toobig = FALSE; 1571 uint64_t txtype; 1572 pathname_t *realnmp = NULL; 1573 pathname_t realnm; 1574 int error; 1575 int zflg = ZEXISTS; 1576 1577 ZFS_ENTER(zfsvfs); 1578 ZFS_VERIFY_ZP(dzp); 1579 zilog = zfsvfs->z_log; 1580 1581 if (flags & FIGNORECASE) { 1582 zflg |= ZCILOOK; 1583 pn_alloc(&realnm); 1584 realnmp = &realnm; 1585 } 1586 1587 top: 1588 xattr_obj = 0; 1589 xzp = NULL; 1590 /* 1591 * Attempt to lock directory; fail if entry doesn't exist. 1592 */ 1593 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1594 NULL, realnmp)) { 1595 if (realnmp) 1596 pn_free(realnmp); 1597 ZFS_EXIT(zfsvfs); 1598 return (error); 1599 } 1600 1601 vp = ZTOV(zp); 1602 1603 if (error = zfs_zaccess_delete(dzp, zp, cr)) { 1604 goto out; 1605 } 1606 1607 /* 1608 * Need to use rmdir for removing directories. 1609 */ 1610 if (vp->v_type == VDIR) { 1611 error = EPERM; 1612 goto out; 1613 } 1614 1615 vnevent_remove(vp, dvp, name, ct); 1616 1617 if (realnmp) 1618 dnlc_remove(dvp, realnmp->pn_buf); 1619 else 1620 dnlc_remove(dvp, name); 1621 1622 mutex_enter(&vp->v_lock); 1623 may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp); 1624 mutex_exit(&vp->v_lock); 1625 1626 /* 1627 * We may delete the znode now, or we may put it in the unlinked set; 1628 * it depends on whether we're the last link, and on whether there are 1629 * other holds on the vnode. So we dmu_tx_hold() the right things to 1630 * allow for either case. 1631 */ 1632 obj = zp->z_id; 1633 tx = dmu_tx_create(zfsvfs->z_os); 1634 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 1635 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1636 zfs_sa_upgrade_txholds(tx, zp); 1637 zfs_sa_upgrade_txholds(tx, dzp); 1638 if (may_delete_now) { 1639 toobig = 1640 zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT; 1641 /* if the file is too big, only hold_free a token amount */ 1642 dmu_tx_hold_free(tx, zp->z_id, 0, 1643 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END)); 1644 } 1645 1646 /* are there any extended attributes? */ 1647 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), 1648 &xattr_obj, sizeof (xattr_obj)); 1649 if (error == 0 && xattr_obj) { 1650 error = zfs_zget(zfsvfs, xattr_obj, &xzp); 1651 ASSERT0(error); 1652 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 1653 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); 1654 } 1655 1656 mutex_enter(&zp->z_lock); 1657 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now) 1658 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); 1659 mutex_exit(&zp->z_lock); 1660 1661 /* charge as an update -- would be nice not to charge at all */ 1662 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 1663 1664 error = dmu_tx_assign(tx, TXG_NOWAIT); 1665 if (error) { 1666 zfs_dirent_unlock(dl); 1667 VN_RELE(vp); 1668 if (xzp) 1669 VN_RELE(ZTOV(xzp)); 1670 if (error == ERESTART) { 1671 dmu_tx_wait(tx); 1672 dmu_tx_abort(tx); 1673 goto top; 1674 } 1675 if (realnmp) 1676 pn_free(realnmp); 1677 dmu_tx_abort(tx); 1678 ZFS_EXIT(zfsvfs); 1679 return (error); 1680 } 1681 1682 /* 1683 * Remove the directory entry. 1684 */ 1685 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked); 1686 1687 if (error) { 1688 dmu_tx_commit(tx); 1689 goto out; 1690 } 1691 1692 if (unlinked) { 1693 1694 /* 1695 * Hold z_lock so that we can make sure that the ACL obj 1696 * hasn't changed. Could have been deleted due to 1697 * zfs_sa_upgrade(). 1698 */ 1699 mutex_enter(&zp->z_lock); 1700 mutex_enter(&vp->v_lock); 1701 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), 1702 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked)); 1703 delete_now = may_delete_now && !toobig && 1704 vp->v_count == 1 && !vn_has_cached_data(vp) && 1705 xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) == 1706 acl_obj; 1707 mutex_exit(&vp->v_lock); 1708 } 1709 1710 if (delete_now) { 1711 if (xattr_obj_unlinked) { 1712 ASSERT3U(xzp->z_links, ==, 2); 1713 mutex_enter(&xzp->z_lock); 1714 xzp->z_unlinked = 1; 1715 xzp->z_links = 0; 1716 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), 1717 &xzp->z_links, sizeof (xzp->z_links), tx); 1718 ASSERT3U(error, ==, 0); 1719 mutex_exit(&xzp->z_lock); 1720 zfs_unlinked_add(xzp, tx); 1721 1722 if (zp->z_is_sa) 1723 error = sa_remove(zp->z_sa_hdl, 1724 SA_ZPL_XATTR(zfsvfs), tx); 1725 else 1726 error = sa_update(zp->z_sa_hdl, 1727 SA_ZPL_XATTR(zfsvfs), &null_xattr, 1728 sizeof (uint64_t), tx); 1729 ASSERT0(error); 1730 } 1731 mutex_enter(&vp->v_lock); 1732 vp->v_count--; 1733 ASSERT0(vp->v_count); 1734 mutex_exit(&vp->v_lock); 1735 mutex_exit(&zp->z_lock); 1736 zfs_znode_delete(zp, tx); 1737 } else if (unlinked) { 1738 mutex_exit(&zp->z_lock); 1739 zfs_unlinked_add(zp, tx); 1740 } 1741 1742 txtype = TX_REMOVE; 1743 if (flags & FIGNORECASE) 1744 txtype |= TX_CI; 1745 zfs_log_remove(zilog, tx, txtype, dzp, name, obj); 1746 1747 dmu_tx_commit(tx); 1748 out: 1749 if (realnmp) 1750 pn_free(realnmp); 1751 1752 zfs_dirent_unlock(dl); 1753 1754 if (!delete_now) 1755 VN_RELE(vp); 1756 if (xzp) 1757 VN_RELE(ZTOV(xzp)); 1758 1759 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 1760 zil_commit(zilog, 0); 1761 1762 ZFS_EXIT(zfsvfs); 1763 return (error); 1764 } 1765 1766 /* 1767 * Create a new directory and insert it into dvp using the name 1768 * provided. Return a pointer to the inserted directory. 1769 * 1770 * IN: dvp - vnode of directory to add subdir to. 1771 * dirname - name of new directory. 1772 * vap - attributes of new directory. 1773 * cr - credentials of caller. 1774 * ct - caller context 1775 * vsecp - ACL to be set 1776 * 1777 * OUT: vpp - vnode of created directory. 1778 * 1779 * RETURN: 0 if success 1780 * error code if failure 1781 * 1782 * Timestamps: 1783 * dvp - ctime|mtime updated 1784 * vp - ctime|mtime|atime updated 1785 */ 1786 /*ARGSUSED*/ 1787 static int 1788 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr, 1789 caller_context_t *ct, int flags, vsecattr_t *vsecp) 1790 { 1791 znode_t *zp, *dzp = VTOZ(dvp); 1792 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1793 zilog_t *zilog; 1794 zfs_dirlock_t *dl; 1795 uint64_t txtype; 1796 dmu_tx_t *tx; 1797 int error; 1798 int zf = ZNEW; 1799 ksid_t *ksid; 1800 uid_t uid; 1801 gid_t gid = crgetgid(cr); 1802 zfs_acl_ids_t acl_ids; 1803 boolean_t fuid_dirtied; 1804 1805 ASSERT(vap->va_type == VDIR); 1806 1807 /* 1808 * If we have an ephemeral id, ACL, or XVATTR then 1809 * make sure file system is at proper version 1810 */ 1811 1812 ksid = crgetsid(cr, KSID_OWNER); 1813 if (ksid) 1814 uid = ksid_getid(ksid); 1815 else 1816 uid = crgetuid(cr); 1817 if (zfsvfs->z_use_fuids == B_FALSE && 1818 (vsecp || (vap->va_mask & AT_XVATTR) || 1819 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 1820 return (EINVAL); 1821 1822 ZFS_ENTER(zfsvfs); 1823 ZFS_VERIFY_ZP(dzp); 1824 zilog = zfsvfs->z_log; 1825 1826 if (dzp->z_pflags & ZFS_XATTR) { 1827 ZFS_EXIT(zfsvfs); 1828 return (EINVAL); 1829 } 1830 1831 if (zfsvfs->z_utf8 && u8_validate(dirname, 1832 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1833 ZFS_EXIT(zfsvfs); 1834 return (EILSEQ); 1835 } 1836 if (flags & FIGNORECASE) 1837 zf |= ZCILOOK; 1838 1839 if (vap->va_mask & AT_XVATTR) { 1840 if ((error = secpolicy_xvattr((xvattr_t *)vap, 1841 crgetuid(cr), cr, vap->va_type)) != 0) { 1842 ZFS_EXIT(zfsvfs); 1843 return (error); 1844 } 1845 } 1846 1847 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, 1848 vsecp, &acl_ids)) != 0) { 1849 ZFS_EXIT(zfsvfs); 1850 return (error); 1851 } 1852 /* 1853 * First make sure the new directory doesn't exist. 1854 * 1855 * Existence is checked first to make sure we don't return 1856 * EACCES instead of EEXIST which can cause some applications 1857 * to fail. 1858 */ 1859 top: 1860 *vpp = NULL; 1861 1862 if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, 1863 NULL, NULL)) { 1864 zfs_acl_ids_free(&acl_ids); 1865 ZFS_EXIT(zfsvfs); 1866 return (error); 1867 } 1868 1869 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) { 1870 zfs_acl_ids_free(&acl_ids); 1871 zfs_dirent_unlock(dl); 1872 ZFS_EXIT(zfsvfs); 1873 return (error); 1874 } 1875 1876 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { 1877 zfs_acl_ids_free(&acl_ids); 1878 zfs_dirent_unlock(dl); 1879 ZFS_EXIT(zfsvfs); 1880 return (EDQUOT); 1881 } 1882 1883 /* 1884 * Add a new entry to the directory. 1885 */ 1886 tx = dmu_tx_create(zfsvfs->z_os); 1887 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); 1888 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); 1889 fuid_dirtied = zfsvfs->z_fuid_dirty; 1890 if (fuid_dirtied) 1891 zfs_fuid_txhold(zfsvfs, tx); 1892 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 1893 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1894 acl_ids.z_aclp->z_acl_bytes); 1895 } 1896 1897 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 1898 ZFS_SA_BASE_ATTR_SIZE); 1899 1900 error = dmu_tx_assign(tx, TXG_NOWAIT); 1901 if (error) { 1902 zfs_dirent_unlock(dl); 1903 if (error == ERESTART) { 1904 dmu_tx_wait(tx); 1905 dmu_tx_abort(tx); 1906 goto top; 1907 } 1908 zfs_acl_ids_free(&acl_ids); 1909 dmu_tx_abort(tx); 1910 ZFS_EXIT(zfsvfs); 1911 return (error); 1912 } 1913 1914 /* 1915 * Create new node. 1916 */ 1917 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); 1918 1919 if (fuid_dirtied) 1920 zfs_fuid_sync(zfsvfs, tx); 1921 1922 /* 1923 * Now put new name in parent dir. 1924 */ 1925 (void) zfs_link_create(dl, zp, tx, ZNEW); 1926 1927 *vpp = ZTOV(zp); 1928 1929 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap); 1930 if (flags & FIGNORECASE) 1931 txtype |= TX_CI; 1932 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, 1933 acl_ids.z_fuidp, vap); 1934 1935 zfs_acl_ids_free(&acl_ids); 1936 1937 dmu_tx_commit(tx); 1938 1939 zfs_dirent_unlock(dl); 1940 1941 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 1942 zil_commit(zilog, 0); 1943 1944 ZFS_EXIT(zfsvfs); 1945 return (0); 1946 } 1947 1948 /* 1949 * Remove a directory subdir entry. If the current working 1950 * directory is the same as the subdir to be removed, the 1951 * remove will fail. 1952 * 1953 * IN: dvp - vnode of directory to remove from. 1954 * name - name of directory to be removed. 1955 * cwd - vnode of current working directory. 1956 * cr - credentials of caller. 1957 * ct - caller context 1958 * flags - case flags 1959 * 1960 * RETURN: 0 if success 1961 * error code if failure 1962 * 1963 * Timestamps: 1964 * dvp - ctime|mtime updated 1965 */ 1966 /*ARGSUSED*/ 1967 static int 1968 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, 1969 caller_context_t *ct, int flags) 1970 { 1971 znode_t *dzp = VTOZ(dvp); 1972 znode_t *zp; 1973 vnode_t *vp; 1974 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1975 zilog_t *zilog; 1976 zfs_dirlock_t *dl; 1977 dmu_tx_t *tx; 1978 int error; 1979 int zflg = ZEXISTS; 1980 1981 ZFS_ENTER(zfsvfs); 1982 ZFS_VERIFY_ZP(dzp); 1983 zilog = zfsvfs->z_log; 1984 1985 if (flags & FIGNORECASE) 1986 zflg |= ZCILOOK; 1987 top: 1988 zp = NULL; 1989 1990 /* 1991 * Attempt to lock directory; fail if entry doesn't exist. 1992 */ 1993 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1994 NULL, NULL)) { 1995 ZFS_EXIT(zfsvfs); 1996 return (error); 1997 } 1998 1999 vp = ZTOV(zp); 2000 2001 if (error = zfs_zaccess_delete(dzp, zp, cr)) { 2002 goto out; 2003 } 2004 2005 if (vp->v_type != VDIR) { 2006 error = ENOTDIR; 2007 goto out; 2008 } 2009 2010 if (vp == cwd) { 2011 error = EINVAL; 2012 goto out; 2013 } 2014 2015 vnevent_rmdir(vp, dvp, name, ct); 2016 2017 /* 2018 * Grab a lock on the directory to make sure that noone is 2019 * trying to add (or lookup) entries while we are removing it. 2020 */ 2021 rw_enter(&zp->z_name_lock, RW_WRITER); 2022 2023 /* 2024 * Grab a lock on the parent pointer to make sure we play well 2025 * with the treewalk and directory rename code. 2026 */ 2027 rw_enter(&zp->z_parent_lock, RW_WRITER); 2028 2029 tx = dmu_tx_create(zfsvfs->z_os); 2030 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 2031 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 2032 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 2033 zfs_sa_upgrade_txholds(tx, zp); 2034 zfs_sa_upgrade_txholds(tx, dzp); 2035 error = dmu_tx_assign(tx, TXG_NOWAIT); 2036 if (error) { 2037 rw_exit(&zp->z_parent_lock); 2038 rw_exit(&zp->z_name_lock); 2039 zfs_dirent_unlock(dl); 2040 VN_RELE(vp); 2041 if (error == ERESTART) { 2042 dmu_tx_wait(tx); 2043 dmu_tx_abort(tx); 2044 goto top; 2045 } 2046 dmu_tx_abort(tx); 2047 ZFS_EXIT(zfsvfs); 2048 return (error); 2049 } 2050 2051 error = zfs_link_destroy(dl, zp, tx, zflg, NULL); 2052 2053 if (error == 0) { 2054 uint64_t txtype = TX_RMDIR; 2055 if (flags & FIGNORECASE) 2056 txtype |= TX_CI; 2057 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT); 2058 } 2059 2060 dmu_tx_commit(tx); 2061 2062 rw_exit(&zp->z_parent_lock); 2063 rw_exit(&zp->z_name_lock); 2064 out: 2065 zfs_dirent_unlock(dl); 2066 2067 VN_RELE(vp); 2068 2069 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 2070 zil_commit(zilog, 0); 2071 2072 ZFS_EXIT(zfsvfs); 2073 return (error); 2074 } 2075 2076 /* 2077 * Read as many directory entries as will fit into the provided 2078 * buffer from the given directory cursor position (specified in 2079 * the uio structure. 2080 * 2081 * IN: vp - vnode of directory to read. 2082 * uio - structure supplying read location, range info, 2083 * and return buffer. 2084 * cr - credentials of caller. 2085 * ct - caller context 2086 * flags - case flags 2087 * 2088 * OUT: uio - updated offset and range, buffer filled. 2089 * eofp - set to true if end-of-file detected. 2090 * 2091 * RETURN: 0 if success 2092 * error code if failure 2093 * 2094 * Timestamps: 2095 * vp - atime updated 2096 * 2097 * Note that the low 4 bits of the cookie returned by zap is always zero. 2098 * This allows us to use the low range for "special" directory entries: 2099 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem, 2100 * we use the offset 2 for the '.zfs' directory. 2101 */ 2102 /* ARGSUSED */ 2103 static int 2104 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, 2105 caller_context_t *ct, int flags) 2106 { 2107 znode_t *zp = VTOZ(vp); 2108 iovec_t *iovp; 2109 edirent_t *eodp; 2110 dirent64_t *odp; 2111 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2112 objset_t *os; 2113 caddr_t outbuf; 2114 size_t bufsize; 2115 zap_cursor_t zc; 2116 zap_attribute_t zap; 2117 uint_t bytes_wanted; 2118 uint64_t offset; /* must be unsigned; checks for < 1 */ 2119 uint64_t parent; 2120 int local_eof; 2121 int outcount; 2122 int error; 2123 uint8_t prefetch; 2124 boolean_t check_sysattrs; 2125 2126 ZFS_ENTER(zfsvfs); 2127 ZFS_VERIFY_ZP(zp); 2128 2129 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), 2130 &parent, sizeof (parent))) != 0) { 2131 ZFS_EXIT(zfsvfs); 2132 return (error); 2133 } 2134 2135 /* 2136 * If we are not given an eof variable, 2137 * use a local one. 2138 */ 2139 if (eofp == NULL) 2140 eofp = &local_eof; 2141 2142 /* 2143 * Check for valid iov_len. 2144 */ 2145 if (uio->uio_iov->iov_len <= 0) { 2146 ZFS_EXIT(zfsvfs); 2147 return (EINVAL); 2148 } 2149 2150 /* 2151 * Quit if directory has been removed (posix) 2152 */ 2153 if ((*eofp = zp->z_unlinked) != 0) { 2154 ZFS_EXIT(zfsvfs); 2155 return (0); 2156 } 2157 2158 error = 0; 2159 os = zfsvfs->z_os; 2160 offset = uio->uio_loffset; 2161 prefetch = zp->z_zn_prefetch; 2162 2163 /* 2164 * Initialize the iterator cursor. 2165 */ 2166 if (offset <= 3) { 2167 /* 2168 * Start iteration from the beginning of the directory. 2169 */ 2170 zap_cursor_init(&zc, os, zp->z_id); 2171 } else { 2172 /* 2173 * The offset is a serialized cursor. 2174 */ 2175 zap_cursor_init_serialized(&zc, os, zp->z_id, offset); 2176 } 2177 2178 /* 2179 * Get space to change directory entries into fs independent format. 2180 */ 2181 iovp = uio->uio_iov; 2182 bytes_wanted = iovp->iov_len; 2183 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) { 2184 bufsize = bytes_wanted; 2185 outbuf = kmem_alloc(bufsize, KM_SLEEP); 2186 odp = (struct dirent64 *)outbuf; 2187 } else { 2188 bufsize = bytes_wanted; 2189 odp = (struct dirent64 *)iovp->iov_base; 2190 } 2191 eodp = (struct edirent *)odp; 2192 2193 /* 2194 * If this VFS supports the system attribute view interface; and 2195 * we're looking at an extended attribute directory; and we care 2196 * about normalization conflicts on this vfs; then we must check 2197 * for normalization conflicts with the sysattr name space. 2198 */ 2199 check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && 2200 (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm && 2201 (flags & V_RDDIR_ENTFLAGS); 2202 2203 /* 2204 * Transform to file-system independent format 2205 */ 2206 outcount = 0; 2207 while (outcount < bytes_wanted) { 2208 ino64_t objnum; 2209 ushort_t reclen; 2210 off64_t *next = NULL; 2211 2212 /* 2213 * Special case `.', `..', and `.zfs'. 2214 */ 2215 if (offset == 0) { 2216 (void) strcpy(zap.za_name, "."); 2217 zap.za_normalization_conflict = 0; 2218 objnum = zp->z_id; 2219 } else if (offset == 1) { 2220 (void) strcpy(zap.za_name, ".."); 2221 zap.za_normalization_conflict = 0; 2222 objnum = parent; 2223 } else if (offset == 2 && zfs_show_ctldir(zp)) { 2224 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME); 2225 zap.za_normalization_conflict = 0; 2226 objnum = ZFSCTL_INO_ROOT; 2227 } else { 2228 /* 2229 * Grab next entry. 2230 */ 2231 if (error = zap_cursor_retrieve(&zc, &zap)) { 2232 if ((*eofp = (error == ENOENT)) != 0) 2233 break; 2234 else 2235 goto update; 2236 } 2237 2238 if (zap.za_integer_length != 8 || 2239 zap.za_num_integers != 1) { 2240 cmn_err(CE_WARN, "zap_readdir: bad directory " 2241 "entry, obj = %lld, offset = %lld\n", 2242 (u_longlong_t)zp->z_id, 2243 (u_longlong_t)offset); 2244 error = ENXIO; 2245 goto update; 2246 } 2247 2248 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer); 2249 /* 2250 * MacOS X can extract the object type here such as: 2251 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer); 2252 */ 2253 2254 if (check_sysattrs && !zap.za_normalization_conflict) { 2255 zap.za_normalization_conflict = 2256 xattr_sysattr_casechk(zap.za_name); 2257 } 2258 } 2259 2260 if (flags & V_RDDIR_ACCFILTER) { 2261 /* 2262 * If we have no access at all, don't include 2263 * this entry in the returned information 2264 */ 2265 znode_t *ezp; 2266 if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0) 2267 goto skip_entry; 2268 if (!zfs_has_access(ezp, cr)) { 2269 VN_RELE(ZTOV(ezp)); 2270 goto skip_entry; 2271 } 2272 VN_RELE(ZTOV(ezp)); 2273 } 2274 2275 if (flags & V_RDDIR_ENTFLAGS) 2276 reclen = EDIRENT_RECLEN(strlen(zap.za_name)); 2277 else 2278 reclen = DIRENT64_RECLEN(strlen(zap.za_name)); 2279 2280 /* 2281 * Will this entry fit in the buffer? 2282 */ 2283 if (outcount + reclen > bufsize) { 2284 /* 2285 * Did we manage to fit anything in the buffer? 2286 */ 2287 if (!outcount) { 2288 error = EINVAL; 2289 goto update; 2290 } 2291 break; 2292 } 2293 if (flags & V_RDDIR_ENTFLAGS) { 2294 /* 2295 * Add extended flag entry: 2296 */ 2297 eodp->ed_ino = objnum; 2298 eodp->ed_reclen = reclen; 2299 /* NOTE: ed_off is the offset for the *next* entry */ 2300 next = &(eodp->ed_off); 2301 eodp->ed_eflags = zap.za_normalization_conflict ? 2302 ED_CASE_CONFLICT : 0; 2303 (void) strncpy(eodp->ed_name, zap.za_name, 2304 EDIRENT_NAMELEN(reclen)); 2305 eodp = (edirent_t *)((intptr_t)eodp + reclen); 2306 } else { 2307 /* 2308 * Add normal entry: 2309 */ 2310 odp->d_ino = objnum; 2311 odp->d_reclen = reclen; 2312 /* NOTE: d_off is the offset for the *next* entry */ 2313 next = &(odp->d_off); 2314 (void) strncpy(odp->d_name, zap.za_name, 2315 DIRENT64_NAMELEN(reclen)); 2316 odp = (dirent64_t *)((intptr_t)odp + reclen); 2317 } 2318 outcount += reclen; 2319 2320 ASSERT(outcount <= bufsize); 2321 2322 /* Prefetch znode */ 2323 if (prefetch) 2324 dmu_prefetch(os, objnum, 0, 0); 2325 2326 skip_entry: 2327 /* 2328 * Move to the next entry, fill in the previous offset. 2329 */ 2330 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) { 2331 zap_cursor_advance(&zc); 2332 offset = zap_cursor_serialize(&zc); 2333 } else { 2334 offset += 1; 2335 } 2336 if (next) 2337 *next = offset; 2338 } 2339 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */ 2340 2341 if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) { 2342 iovp->iov_base += outcount; 2343 iovp->iov_len -= outcount; 2344 uio->uio_resid -= outcount; 2345 } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) { 2346 /* 2347 * Reset the pointer. 2348 */ 2349 offset = uio->uio_loffset; 2350 } 2351 2352 update: 2353 zap_cursor_fini(&zc); 2354 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) 2355 kmem_free(outbuf, bufsize); 2356 2357 if (error == ENOENT) 2358 error = 0; 2359 2360 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 2361 2362 uio->uio_loffset = offset; 2363 ZFS_EXIT(zfsvfs); 2364 return (error); 2365 } 2366 2367 ulong_t zfs_fsync_sync_cnt = 4; 2368 2369 static int 2370 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) 2371 { 2372 znode_t *zp = VTOZ(vp); 2373 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2374 2375 /* 2376 * Regardless of whether this is required for standards conformance, 2377 * this is the logical behavior when fsync() is called on a file with 2378 * dirty pages. We use B_ASYNC since the ZIL transactions are already 2379 * going to be pushed out as part of the zil_commit(). 2380 */ 2381 if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) && 2382 (vp->v_type == VREG) && !(IS_SWAPVP(vp))) 2383 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct); 2384 2385 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt); 2386 2387 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) { 2388 ZFS_ENTER(zfsvfs); 2389 ZFS_VERIFY_ZP(zp); 2390 zil_commit(zfsvfs->z_log, zp->z_id); 2391 ZFS_EXIT(zfsvfs); 2392 } 2393 return (0); 2394 } 2395 2396 2397 /* 2398 * Get the requested file attributes and place them in the provided 2399 * vattr structure. 2400 * 2401 * IN: vp - vnode of file. 2402 * vap - va_mask identifies requested attributes. 2403 * If AT_XVATTR set, then optional attrs are requested 2404 * flags - ATTR_NOACLCHECK (CIFS server context) 2405 * cr - credentials of caller. 2406 * ct - caller context 2407 * 2408 * OUT: vap - attribute values. 2409 * 2410 * RETURN: 0 (always succeeds) 2411 */ 2412 /* ARGSUSED */ 2413 static int 2414 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2415 caller_context_t *ct) 2416 { 2417 znode_t *zp = VTOZ(vp); 2418 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2419 int error = 0; 2420 uint64_t links; 2421 uint64_t mtime[2], ctime[2]; 2422 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ 2423 xoptattr_t *xoap = NULL; 2424 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 2425 sa_bulk_attr_t bulk[2]; 2426 int count = 0; 2427 2428 ZFS_ENTER(zfsvfs); 2429 ZFS_VERIFY_ZP(zp); 2430 2431 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid); 2432 2433 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); 2434 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); 2435 2436 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) { 2437 ZFS_EXIT(zfsvfs); 2438 return (error); 2439 } 2440 2441 /* 2442 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES. 2443 * Also, if we are the owner don't bother, since owner should 2444 * always be allowed to read basic attributes of file. 2445 */ 2446 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) && 2447 (vap->va_uid != crgetuid(cr))) { 2448 if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0, 2449 skipaclchk, cr)) { 2450 ZFS_EXIT(zfsvfs); 2451 return (error); 2452 } 2453 } 2454 2455 /* 2456 * Return all attributes. It's cheaper to provide the answer 2457 * than to determine whether we were asked the question. 2458 */ 2459 2460 mutex_enter(&zp->z_lock); 2461 vap->va_type = vp->v_type; 2462 vap->va_mode = zp->z_mode & MODEMASK; 2463 vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev; 2464 vap->va_nodeid = zp->z_id; 2465 if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp)) 2466 links = zp->z_links + 1; 2467 else 2468 links = zp->z_links; 2469 vap->va_nlink = MIN(links, UINT32_MAX); /* nlink_t limit! */ 2470 vap->va_size = zp->z_size; 2471 vap->va_rdev = vp->v_rdev; 2472 vap->va_seq = zp->z_seq; 2473 2474 /* 2475 * Add in any requested optional attributes and the create time. 2476 * Also set the corresponding bits in the returned attribute bitmap. 2477 */ 2478 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) { 2479 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { 2480 xoap->xoa_archive = 2481 ((zp->z_pflags & ZFS_ARCHIVE) != 0); 2482 XVA_SET_RTN(xvap, XAT_ARCHIVE); 2483 } 2484 2485 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { 2486 xoap->xoa_readonly = 2487 ((zp->z_pflags & ZFS_READONLY) != 0); 2488 XVA_SET_RTN(xvap, XAT_READONLY); 2489 } 2490 2491 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { 2492 xoap->xoa_system = 2493 ((zp->z_pflags & ZFS_SYSTEM) != 0); 2494 XVA_SET_RTN(xvap, XAT_SYSTEM); 2495 } 2496 2497 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { 2498 xoap->xoa_hidden = 2499 ((zp->z_pflags & ZFS_HIDDEN) != 0); 2500 XVA_SET_RTN(xvap, XAT_HIDDEN); 2501 } 2502 2503 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 2504 xoap->xoa_nounlink = 2505 ((zp->z_pflags & ZFS_NOUNLINK) != 0); 2506 XVA_SET_RTN(xvap, XAT_NOUNLINK); 2507 } 2508 2509 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 2510 xoap->xoa_immutable = 2511 ((zp->z_pflags & ZFS_IMMUTABLE) != 0); 2512 XVA_SET_RTN(xvap, XAT_IMMUTABLE); 2513 } 2514 2515 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 2516 xoap->xoa_appendonly = 2517 ((zp->z_pflags & ZFS_APPENDONLY) != 0); 2518 XVA_SET_RTN(xvap, XAT_APPENDONLY); 2519 } 2520 2521 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 2522 xoap->xoa_nodump = 2523 ((zp->z_pflags & ZFS_NODUMP) != 0); 2524 XVA_SET_RTN(xvap, XAT_NODUMP); 2525 } 2526 2527 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) { 2528 xoap->xoa_opaque = 2529 ((zp->z_pflags & ZFS_OPAQUE) != 0); 2530 XVA_SET_RTN(xvap, XAT_OPAQUE); 2531 } 2532 2533 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 2534 xoap->xoa_av_quarantined = 2535 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0); 2536 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED); 2537 } 2538 2539 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 2540 xoap->xoa_av_modified = 2541 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0); 2542 XVA_SET_RTN(xvap, XAT_AV_MODIFIED); 2543 } 2544 2545 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) && 2546 vp->v_type == VREG) { 2547 zfs_sa_get_scanstamp(zp, xvap); 2548 } 2549 2550 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { 2551 uint64_t times[2]; 2552 2553 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), 2554 times, sizeof (times)); 2555 ZFS_TIME_DECODE(&xoap->xoa_createtime, times); 2556 XVA_SET_RTN(xvap, XAT_CREATETIME); 2557 } 2558 2559 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { 2560 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0); 2561 XVA_SET_RTN(xvap, XAT_REPARSE); 2562 } 2563 if (XVA_ISSET_REQ(xvap, XAT_GEN)) { 2564 xoap->xoa_generation = zp->z_gen; 2565 XVA_SET_RTN(xvap, XAT_GEN); 2566 } 2567 2568 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) { 2569 xoap->xoa_offline = 2570 ((zp->z_pflags & ZFS_OFFLINE) != 0); 2571 XVA_SET_RTN(xvap, XAT_OFFLINE); 2572 } 2573 2574 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) { 2575 xoap->xoa_sparse = 2576 ((zp->z_pflags & ZFS_SPARSE) != 0); 2577 XVA_SET_RTN(xvap, XAT_SPARSE); 2578 } 2579 } 2580 2581 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime); 2582 ZFS_TIME_DECODE(&vap->va_mtime, mtime); 2583 ZFS_TIME_DECODE(&vap->va_ctime, ctime); 2584 2585 mutex_exit(&zp->z_lock); 2586 2587 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks); 2588 2589 if (zp->z_blksz == 0) { 2590 /* 2591 * Block size hasn't been set; suggest maximal I/O transfers. 2592 */ 2593 vap->va_blksize = zfsvfs->z_max_blksz; 2594 } 2595 2596 ZFS_EXIT(zfsvfs); 2597 return (0); 2598 } 2599 2600 /* 2601 * Set the file attributes to the values contained in the 2602 * vattr structure. 2603 * 2604 * IN: vp - vnode of file to be modified. 2605 * vap - new attribute values. 2606 * If AT_XVATTR set, then optional attrs are being set 2607 * flags - ATTR_UTIME set if non-default time values provided. 2608 * - ATTR_NOACLCHECK (CIFS context only). 2609 * cr - credentials of caller. 2610 * ct - caller context 2611 * 2612 * RETURN: 0 if success 2613 * error code if failure 2614 * 2615 * Timestamps: 2616 * vp - ctime updated, mtime updated if size changed. 2617 */ 2618 /* ARGSUSED */ 2619 static int 2620 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2621 caller_context_t *ct) 2622 { 2623 znode_t *zp = VTOZ(vp); 2624 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2625 zilog_t *zilog; 2626 dmu_tx_t *tx; 2627 vattr_t oldva; 2628 xvattr_t tmpxvattr; 2629 uint_t mask = vap->va_mask; 2630 uint_t saved_mask; 2631 int trim_mask = 0; 2632 uint64_t new_mode; 2633 uint64_t new_uid, new_gid; 2634 uint64_t xattr_obj; 2635 uint64_t mtime[2], ctime[2]; 2636 znode_t *attrzp; 2637 int need_policy = FALSE; 2638 int err, err2; 2639 zfs_fuid_info_t *fuidp = NULL; 2640 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ 2641 xoptattr_t *xoap; 2642 zfs_acl_t *aclp; 2643 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 2644 boolean_t fuid_dirtied = B_FALSE; 2645 sa_bulk_attr_t bulk[7], xattr_bulk[7]; 2646 int count = 0, xattr_count = 0; 2647 2648 if (mask == 0) 2649 return (0); 2650 2651 if (mask & AT_NOSET) 2652 return (EINVAL); 2653 2654 ZFS_ENTER(zfsvfs); 2655 ZFS_VERIFY_ZP(zp); 2656 2657 zilog = zfsvfs->z_log; 2658 2659 /* 2660 * Make sure that if we have ephemeral uid/gid or xvattr specified 2661 * that file system is at proper version level 2662 */ 2663 2664 if (zfsvfs->z_use_fuids == B_FALSE && 2665 (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || 2666 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) || 2667 (mask & AT_XVATTR))) { 2668 ZFS_EXIT(zfsvfs); 2669 return (EINVAL); 2670 } 2671 2672 if (mask & AT_SIZE && vp->v_type == VDIR) { 2673 ZFS_EXIT(zfsvfs); 2674 return (EISDIR); 2675 } 2676 2677 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) { 2678 ZFS_EXIT(zfsvfs); 2679 return (EINVAL); 2680 } 2681 2682 /* 2683 * If this is an xvattr_t, then get a pointer to the structure of 2684 * optional attributes. If this is NULL, then we have a vattr_t. 2685 */ 2686 xoap = xva_getxoptattr(xvap); 2687 2688 xva_init(&tmpxvattr); 2689 2690 /* 2691 * Immutable files can only alter immutable bit and atime 2692 */ 2693 if ((zp->z_pflags & ZFS_IMMUTABLE) && 2694 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) || 2695 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) { 2696 ZFS_EXIT(zfsvfs); 2697 return (EPERM); 2698 } 2699 2700 if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) { 2701 ZFS_EXIT(zfsvfs); 2702 return (EPERM); 2703 } 2704 2705 /* 2706 * Verify timestamps doesn't overflow 32 bits. 2707 * ZFS can handle large timestamps, but 32bit syscalls can't 2708 * handle times greater than 2039. This check should be removed 2709 * once large timestamps are fully supported. 2710 */ 2711 if (mask & (AT_ATIME | AT_MTIME)) { 2712 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) || 2713 ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) { 2714 ZFS_EXIT(zfsvfs); 2715 return (EOVERFLOW); 2716 } 2717 } 2718 2719 top: 2720 attrzp = NULL; 2721 aclp = NULL; 2722 2723 /* Can this be moved to before the top label? */ 2724 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) { 2725 ZFS_EXIT(zfsvfs); 2726 return (EROFS); 2727 } 2728 2729 /* 2730 * First validate permissions 2731 */ 2732 2733 if (mask & AT_SIZE) { 2734 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr); 2735 if (err) { 2736 ZFS_EXIT(zfsvfs); 2737 return (err); 2738 } 2739 /* 2740 * XXX - Note, we are not providing any open 2741 * mode flags here (like FNDELAY), so we may 2742 * block if there are locks present... this 2743 * should be addressed in openat(). 2744 */ 2745 /* XXX - would it be OK to generate a log record here? */ 2746 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE); 2747 if (err) { 2748 ZFS_EXIT(zfsvfs); 2749 return (err); 2750 } 2751 } 2752 2753 if (mask & (AT_ATIME|AT_MTIME) || 2754 ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) || 2755 XVA_ISSET_REQ(xvap, XAT_READONLY) || 2756 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) || 2757 XVA_ISSET_REQ(xvap, XAT_OFFLINE) || 2758 XVA_ISSET_REQ(xvap, XAT_SPARSE) || 2759 XVA_ISSET_REQ(xvap, XAT_CREATETIME) || 2760 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) { 2761 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0, 2762 skipaclchk, cr); 2763 } 2764 2765 if (mask & (AT_UID|AT_GID)) { 2766 int idmask = (mask & (AT_UID|AT_GID)); 2767 int take_owner; 2768 int take_group; 2769 2770 /* 2771 * NOTE: even if a new mode is being set, 2772 * we may clear S_ISUID/S_ISGID bits. 2773 */ 2774 2775 if (!(mask & AT_MODE)) 2776 vap->va_mode = zp->z_mode; 2777 2778 /* 2779 * Take ownership or chgrp to group we are a member of 2780 */ 2781 2782 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr)); 2783 take_group = (mask & AT_GID) && 2784 zfs_groupmember(zfsvfs, vap->va_gid, cr); 2785 2786 /* 2787 * If both AT_UID and AT_GID are set then take_owner and 2788 * take_group must both be set in order to allow taking 2789 * ownership. 2790 * 2791 * Otherwise, send the check through secpolicy_vnode_setattr() 2792 * 2793 */ 2794 2795 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) || 2796 ((idmask == AT_UID) && take_owner) || 2797 ((idmask == AT_GID) && take_group)) { 2798 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0, 2799 skipaclchk, cr) == 0) { 2800 /* 2801 * Remove setuid/setgid for non-privileged users 2802 */ 2803 secpolicy_setid_clear(vap, cr); 2804 trim_mask = (mask & (AT_UID|AT_GID)); 2805 } else { 2806 need_policy = TRUE; 2807 } 2808 } else { 2809 need_policy = TRUE; 2810 } 2811 } 2812 2813 mutex_enter(&zp->z_lock); 2814 oldva.va_mode = zp->z_mode; 2815 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid); 2816 if (mask & AT_XVATTR) { 2817 /* 2818 * Update xvattr mask to include only those attributes 2819 * that are actually changing. 2820 * 2821 * the bits will be restored prior to actually setting 2822 * the attributes so the caller thinks they were set. 2823 */ 2824 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 2825 if (xoap->xoa_appendonly != 2826 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) { 2827 need_policy = TRUE; 2828 } else { 2829 XVA_CLR_REQ(xvap, XAT_APPENDONLY); 2830 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY); 2831 } 2832 } 2833 2834 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 2835 if (xoap->xoa_nounlink != 2836 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) { 2837 need_policy = TRUE; 2838 } else { 2839 XVA_CLR_REQ(xvap, XAT_NOUNLINK); 2840 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK); 2841 } 2842 } 2843 2844 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 2845 if (xoap->xoa_immutable != 2846 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) { 2847 need_policy = TRUE; 2848 } else { 2849 XVA_CLR_REQ(xvap, XAT_IMMUTABLE); 2850 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE); 2851 } 2852 } 2853 2854 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 2855 if (xoap->xoa_nodump != 2856 ((zp->z_pflags & ZFS_NODUMP) != 0)) { 2857 need_policy = TRUE; 2858 } else { 2859 XVA_CLR_REQ(xvap, XAT_NODUMP); 2860 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP); 2861 } 2862 } 2863 2864 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 2865 if (xoap->xoa_av_modified != 2866 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) { 2867 need_policy = TRUE; 2868 } else { 2869 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED); 2870 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED); 2871 } 2872 } 2873 2874 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 2875 if ((vp->v_type != VREG && 2876 xoap->xoa_av_quarantined) || 2877 xoap->xoa_av_quarantined != 2878 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) { 2879 need_policy = TRUE; 2880 } else { 2881 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED); 2882 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED); 2883 } 2884 } 2885 2886 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { 2887 mutex_exit(&zp->z_lock); 2888 ZFS_EXIT(zfsvfs); 2889 return (EPERM); 2890 } 2891 2892 if (need_policy == FALSE && 2893 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) || 2894 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) { 2895 need_policy = TRUE; 2896 } 2897 } 2898 2899 mutex_exit(&zp->z_lock); 2900 2901 if (mask & AT_MODE) { 2902 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) { 2903 err = secpolicy_setid_setsticky_clear(vp, vap, 2904 &oldva, cr); 2905 if (err) { 2906 ZFS_EXIT(zfsvfs); 2907 return (err); 2908 } 2909 trim_mask |= AT_MODE; 2910 } else { 2911 need_policy = TRUE; 2912 } 2913 } 2914 2915 if (need_policy) { 2916 /* 2917 * If trim_mask is set then take ownership 2918 * has been granted or write_acl is present and user 2919 * has the ability to modify mode. In that case remove 2920 * UID|GID and or MODE from mask so that 2921 * secpolicy_vnode_setattr() doesn't revoke it. 2922 */ 2923 2924 if (trim_mask) { 2925 saved_mask = vap->va_mask; 2926 vap->va_mask &= ~trim_mask; 2927 } 2928 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags, 2929 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp); 2930 if (err) { 2931 ZFS_EXIT(zfsvfs); 2932 return (err); 2933 } 2934 2935 if (trim_mask) 2936 vap->va_mask |= saved_mask; 2937 } 2938 2939 /* 2940 * secpolicy_vnode_setattr, or take ownership may have 2941 * changed va_mask 2942 */ 2943 mask = vap->va_mask; 2944 2945 if ((mask & (AT_UID | AT_GID))) { 2946 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), 2947 &xattr_obj, sizeof (xattr_obj)); 2948 2949 if (err == 0 && xattr_obj) { 2950 err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp); 2951 if (err) 2952 goto out2; 2953 } 2954 if (mask & AT_UID) { 2955 new_uid = zfs_fuid_create(zfsvfs, 2956 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp); 2957 if (new_uid != zp->z_uid && 2958 zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) { 2959 if (attrzp) 2960 VN_RELE(ZTOV(attrzp)); 2961 err = EDQUOT; 2962 goto out2; 2963 } 2964 } 2965 2966 if (mask & AT_GID) { 2967 new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid, 2968 cr, ZFS_GROUP, &fuidp); 2969 if (new_gid != zp->z_gid && 2970 zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) { 2971 if (attrzp) 2972 VN_RELE(ZTOV(attrzp)); 2973 err = EDQUOT; 2974 goto out2; 2975 } 2976 } 2977 } 2978 tx = dmu_tx_create(zfsvfs->z_os); 2979 2980 if (mask & AT_MODE) { 2981 uint64_t pmode = zp->z_mode; 2982 uint64_t acl_obj; 2983 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT); 2984 2985 if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED && 2986 !(zp->z_pflags & ZFS_ACL_TRIVIAL)) { 2987 err = EPERM; 2988 goto out; 2989 } 2990 2991 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)) 2992 goto out; 2993 2994 mutex_enter(&zp->z_lock); 2995 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) { 2996 /* 2997 * Are we upgrading ACL from old V0 format 2998 * to V1 format? 2999 */ 3000 if (zfsvfs->z_version >= ZPL_VERSION_FUID && 3001 zfs_znode_acl_version(zp) == 3002 ZFS_ACL_VERSION_INITIAL) { 3003 dmu_tx_hold_free(tx, acl_obj, 0, 3004 DMU_OBJECT_END); 3005 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 3006 0, aclp->z_acl_bytes); 3007 } else { 3008 dmu_tx_hold_write(tx, acl_obj, 0, 3009 aclp->z_acl_bytes); 3010 } 3011 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) { 3012 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 3013 0, aclp->z_acl_bytes); 3014 } 3015 mutex_exit(&zp->z_lock); 3016 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 3017 } else { 3018 if ((mask & AT_XVATTR) && 3019 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) 3020 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 3021 else 3022 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 3023 } 3024 3025 if (attrzp) { 3026 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE); 3027 } 3028 3029 fuid_dirtied = zfsvfs->z_fuid_dirty; 3030 if (fuid_dirtied) 3031 zfs_fuid_txhold(zfsvfs, tx); 3032 3033 zfs_sa_upgrade_txholds(tx, zp); 3034 3035 err = dmu_tx_assign(tx, TXG_NOWAIT); 3036 if (err) { 3037 if (err == ERESTART) 3038 dmu_tx_wait(tx); 3039 goto out; 3040 } 3041 3042 count = 0; 3043 /* 3044 * Set each attribute requested. 3045 * We group settings according to the locks they need to acquire. 3046 * 3047 * Note: you cannot set ctime directly, although it will be 3048 * updated as a side-effect of calling this function. 3049 */ 3050 3051 3052 if (mask & (AT_UID|AT_GID|AT_MODE)) 3053 mutex_enter(&zp->z_acl_lock); 3054 mutex_enter(&zp->z_lock); 3055 3056 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 3057 &zp->z_pflags, sizeof (zp->z_pflags)); 3058 3059 if (attrzp) { 3060 if (mask & (AT_UID|AT_GID|AT_MODE)) 3061 mutex_enter(&attrzp->z_acl_lock); 3062 mutex_enter(&attrzp->z_lock); 3063 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 3064 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags, 3065 sizeof (attrzp->z_pflags)); 3066 } 3067 3068 if (mask & (AT_UID|AT_GID)) { 3069 3070 if (mask & AT_UID) { 3071 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, 3072 &new_uid, sizeof (new_uid)); 3073 zp->z_uid = new_uid; 3074 if (attrzp) { 3075 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 3076 SA_ZPL_UID(zfsvfs), NULL, &new_uid, 3077 sizeof (new_uid)); 3078 attrzp->z_uid = new_uid; 3079 } 3080 } 3081 3082 if (mask & AT_GID) { 3083 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), 3084 NULL, &new_gid, sizeof (new_gid)); 3085 zp->z_gid = new_gid; 3086 if (attrzp) { 3087 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 3088 SA_ZPL_GID(zfsvfs), NULL, &new_gid, 3089 sizeof (new_gid)); 3090 attrzp->z_gid = new_gid; 3091 } 3092 } 3093 if (!(mask & AT_MODE)) { 3094 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), 3095 NULL, &new_mode, sizeof (new_mode)); 3096 new_mode = zp->z_mode; 3097 } 3098 err = zfs_acl_chown_setattr(zp); 3099 ASSERT(err == 0); 3100 if (attrzp) { 3101 err = zfs_acl_chown_setattr(attrzp); 3102 ASSERT(err == 0); 3103 } 3104 } 3105 3106 if (mask & AT_MODE) { 3107 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, 3108 &new_mode, sizeof (new_mode)); 3109 zp->z_mode = new_mode; 3110 ASSERT3U((uintptr_t)aclp, !=, NULL); 3111 err = zfs_aclset_common(zp, aclp, cr, tx); 3112 ASSERT0(err); 3113 if (zp->z_acl_cached) 3114 zfs_acl_free(zp->z_acl_cached); 3115 zp->z_acl_cached = aclp; 3116 aclp = NULL; 3117 } 3118 3119 3120 if (mask & AT_ATIME) { 3121 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime); 3122 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, 3123 &zp->z_atime, sizeof (zp->z_atime)); 3124 } 3125 3126 if (mask & AT_MTIME) { 3127 ZFS_TIME_ENCODE(&vap->va_mtime, mtime); 3128 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, 3129 mtime, sizeof (mtime)); 3130 } 3131 3132 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */ 3133 if (mask & AT_SIZE && !(mask & AT_MTIME)) { 3134 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), 3135 NULL, mtime, sizeof (mtime)); 3136 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, 3137 &ctime, sizeof (ctime)); 3138 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, 3139 B_TRUE); 3140 } else if (mask != 0) { 3141 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, 3142 &ctime, sizeof (ctime)); 3143 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime, 3144 B_TRUE); 3145 if (attrzp) { 3146 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 3147 SA_ZPL_CTIME(zfsvfs), NULL, 3148 &ctime, sizeof (ctime)); 3149 zfs_tstamp_update_setup(attrzp, STATE_CHANGED, 3150 mtime, ctime, B_TRUE); 3151 } 3152 } 3153 /* 3154 * Do this after setting timestamps to prevent timestamp 3155 * update from toggling bit 3156 */ 3157 3158 if (xoap && (mask & AT_XVATTR)) { 3159 3160 /* 3161 * restore trimmed off masks 3162 * so that return masks can be set for caller. 3163 */ 3164 3165 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) { 3166 XVA_SET_REQ(xvap, XAT_APPENDONLY); 3167 } 3168 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) { 3169 XVA_SET_REQ(xvap, XAT_NOUNLINK); 3170 } 3171 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) { 3172 XVA_SET_REQ(xvap, XAT_IMMUTABLE); 3173 } 3174 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) { 3175 XVA_SET_REQ(xvap, XAT_NODUMP); 3176 } 3177 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) { 3178 XVA_SET_REQ(xvap, XAT_AV_MODIFIED); 3179 } 3180 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) { 3181 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED); 3182 } 3183 3184 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) 3185 ASSERT(vp->v_type == VREG); 3186 3187 zfs_xvattr_set(zp, xvap, tx); 3188 } 3189 3190 if (fuid_dirtied) 3191 zfs_fuid_sync(zfsvfs, tx); 3192 3193 if (mask != 0) 3194 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp); 3195 3196 mutex_exit(&zp->z_lock); 3197 if (mask & (AT_UID|AT_GID|AT_MODE)) 3198 mutex_exit(&zp->z_acl_lock); 3199 3200 if (attrzp) { 3201 if (mask & (AT_UID|AT_GID|AT_MODE)) 3202 mutex_exit(&attrzp->z_acl_lock); 3203 mutex_exit(&attrzp->z_lock); 3204 } 3205 out: 3206 if (err == 0 && attrzp) { 3207 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk, 3208 xattr_count, tx); 3209 ASSERT(err2 == 0); 3210 } 3211 3212 if (attrzp) 3213 VN_RELE(ZTOV(attrzp)); 3214 if (aclp) 3215 zfs_acl_free(aclp); 3216 3217 if (fuidp) { 3218 zfs_fuid_info_free(fuidp); 3219 fuidp = NULL; 3220 } 3221 3222 if (err) { 3223 dmu_tx_abort(tx); 3224 if (err == ERESTART) 3225 goto top; 3226 } else { 3227 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); 3228 dmu_tx_commit(tx); 3229 } 3230 3231 out2: 3232 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3233 zil_commit(zilog, 0); 3234 3235 ZFS_EXIT(zfsvfs); 3236 return (err); 3237 } 3238 3239 typedef struct zfs_zlock { 3240 krwlock_t *zl_rwlock; /* lock we acquired */ 3241 znode_t *zl_znode; /* znode we held */ 3242 struct zfs_zlock *zl_next; /* next in list */ 3243 } zfs_zlock_t; 3244 3245 /* 3246 * Drop locks and release vnodes that were held by zfs_rename_lock(). 3247 */ 3248 static void 3249 zfs_rename_unlock(zfs_zlock_t **zlpp) 3250 { 3251 zfs_zlock_t *zl; 3252 3253 while ((zl = *zlpp) != NULL) { 3254 if (zl->zl_znode != NULL) 3255 VN_RELE(ZTOV(zl->zl_znode)); 3256 rw_exit(zl->zl_rwlock); 3257 *zlpp = zl->zl_next; 3258 kmem_free(zl, sizeof (*zl)); 3259 } 3260 } 3261 3262 /* 3263 * Search back through the directory tree, using the ".." entries. 3264 * Lock each directory in the chain to prevent concurrent renames. 3265 * Fail any attempt to move a directory into one of its own descendants. 3266 * XXX - z_parent_lock can overlap with map or grow locks 3267 */ 3268 static int 3269 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) 3270 { 3271 zfs_zlock_t *zl; 3272 znode_t *zp = tdzp; 3273 uint64_t rootid = zp->z_zfsvfs->z_root; 3274 uint64_t oidp = zp->z_id; 3275 krwlock_t *rwlp = &szp->z_parent_lock; 3276 krw_t rw = RW_WRITER; 3277 3278 /* 3279 * First pass write-locks szp and compares to zp->z_id. 3280 * Later passes read-lock zp and compare to zp->z_parent. 3281 */ 3282 do { 3283 if (!rw_tryenter(rwlp, rw)) { 3284 /* 3285 * Another thread is renaming in this path. 3286 * Note that if we are a WRITER, we don't have any 3287 * parent_locks held yet. 3288 */ 3289 if (rw == RW_READER && zp->z_id > szp->z_id) { 3290 /* 3291 * Drop our locks and restart 3292 */ 3293 zfs_rename_unlock(&zl); 3294 *zlpp = NULL; 3295 zp = tdzp; 3296 oidp = zp->z_id; 3297 rwlp = &szp->z_parent_lock; 3298 rw = RW_WRITER; 3299 continue; 3300 } else { 3301 /* 3302 * Wait for other thread to drop its locks 3303 */ 3304 rw_enter(rwlp, rw); 3305 } 3306 } 3307 3308 zl = kmem_alloc(sizeof (*zl), KM_SLEEP); 3309 zl->zl_rwlock = rwlp; 3310 zl->zl_znode = NULL; 3311 zl->zl_next = *zlpp; 3312 *zlpp = zl; 3313 3314 if (oidp == szp->z_id) /* We're a descendant of szp */ 3315 return (EINVAL); 3316 3317 if (oidp == rootid) /* We've hit the top */ 3318 return (0); 3319 3320 if (rw == RW_READER) { /* i.e. not the first pass */ 3321 int error = zfs_zget(zp->z_zfsvfs, oidp, &zp); 3322 if (error) 3323 return (error); 3324 zl->zl_znode = zp; 3325 } 3326 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs), 3327 &oidp, sizeof (oidp)); 3328 rwlp = &zp->z_parent_lock; 3329 rw = RW_READER; 3330 3331 } while (zp->z_id != sdzp->z_id); 3332 3333 return (0); 3334 } 3335 3336 /* 3337 * Move an entry from the provided source directory to the target 3338 * directory. Change the entry name as indicated. 3339 * 3340 * IN: sdvp - Source directory containing the "old entry". 3341 * snm - Old entry name. 3342 * tdvp - Target directory to contain the "new entry". 3343 * tnm - New entry name. 3344 * cr - credentials of caller. 3345 * ct - caller context 3346 * flags - case flags 3347 * 3348 * RETURN: 0 if success 3349 * error code if failure 3350 * 3351 * Timestamps: 3352 * sdvp,tdvp - ctime|mtime updated 3353 */ 3354 /*ARGSUSED*/ 3355 static int 3356 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr, 3357 caller_context_t *ct, int flags) 3358 { 3359 znode_t *tdzp, *szp, *tzp; 3360 znode_t *sdzp = VTOZ(sdvp); 3361 zfsvfs_t *zfsvfs = sdzp->z_zfsvfs; 3362 zilog_t *zilog; 3363 vnode_t *realvp; 3364 zfs_dirlock_t *sdl, *tdl; 3365 dmu_tx_t *tx; 3366 zfs_zlock_t *zl; 3367 int cmp, serr, terr; 3368 int error = 0; 3369 int zflg = 0; 3370 3371 ZFS_ENTER(zfsvfs); 3372 ZFS_VERIFY_ZP(sdzp); 3373 zilog = zfsvfs->z_log; 3374 3375 /* 3376 * Make sure we have the real vp for the target directory. 3377 */ 3378 if (VOP_REALVP(tdvp, &realvp, ct) == 0) 3379 tdvp = realvp; 3380 3381 if (tdvp->v_vfsp != sdvp->v_vfsp || zfsctl_is_node(tdvp)) { 3382 ZFS_EXIT(zfsvfs); 3383 return (EXDEV); 3384 } 3385 3386 tdzp = VTOZ(tdvp); 3387 ZFS_VERIFY_ZP(tdzp); 3388 if (zfsvfs->z_utf8 && u8_validate(tnm, 3389 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3390 ZFS_EXIT(zfsvfs); 3391 return (EILSEQ); 3392 } 3393 3394 if (flags & FIGNORECASE) 3395 zflg |= ZCILOOK; 3396 3397 top: 3398 szp = NULL; 3399 tzp = NULL; 3400 zl = NULL; 3401 3402 /* 3403 * This is to prevent the creation of links into attribute space 3404 * by renaming a linked file into/outof an attribute directory. 3405 * See the comment in zfs_link() for why this is considered bad. 3406 */ 3407 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) { 3408 ZFS_EXIT(zfsvfs); 3409 return (EINVAL); 3410 } 3411 3412 /* 3413 * Lock source and target directory entries. To prevent deadlock, 3414 * a lock ordering must be defined. We lock the directory with 3415 * the smallest object id first, or if it's a tie, the one with 3416 * the lexically first name. 3417 */ 3418 if (sdzp->z_id < tdzp->z_id) { 3419 cmp = -1; 3420 } else if (sdzp->z_id > tdzp->z_id) { 3421 cmp = 1; 3422 } else { 3423 /* 3424 * First compare the two name arguments without 3425 * considering any case folding. 3426 */ 3427 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER); 3428 3429 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error); 3430 ASSERT(error == 0 || !zfsvfs->z_utf8); 3431 if (cmp == 0) { 3432 /* 3433 * POSIX: "If the old argument and the new argument 3434 * both refer to links to the same existing file, 3435 * the rename() function shall return successfully 3436 * and perform no other action." 3437 */ 3438 ZFS_EXIT(zfsvfs); 3439 return (0); 3440 } 3441 /* 3442 * If the file system is case-folding, then we may 3443 * have some more checking to do. A case-folding file 3444 * system is either supporting mixed case sensitivity 3445 * access or is completely case-insensitive. Note 3446 * that the file system is always case preserving. 3447 * 3448 * In mixed sensitivity mode case sensitive behavior 3449 * is the default. FIGNORECASE must be used to 3450 * explicitly request case insensitive behavior. 3451 * 3452 * If the source and target names provided differ only 3453 * by case (e.g., a request to rename 'tim' to 'Tim'), 3454 * we will treat this as a special case in the 3455 * case-insensitive mode: as long as the source name 3456 * is an exact match, we will allow this to proceed as 3457 * a name-change request. 3458 */ 3459 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE || 3460 (zfsvfs->z_case == ZFS_CASE_MIXED && 3461 flags & FIGNORECASE)) && 3462 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST, 3463 &error) == 0) { 3464 /* 3465 * case preserving rename request, require exact 3466 * name matches 3467 */ 3468 zflg |= ZCIEXACT; 3469 zflg &= ~ZCILOOK; 3470 } 3471 } 3472 3473 /* 3474 * If the source and destination directories are the same, we should 3475 * grab the z_name_lock of that directory only once. 3476 */ 3477 if (sdzp == tdzp) { 3478 zflg |= ZHAVELOCK; 3479 rw_enter(&sdzp->z_name_lock, RW_READER); 3480 } 3481 3482 if (cmp < 0) { 3483 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, 3484 ZEXISTS | zflg, NULL, NULL); 3485 terr = zfs_dirent_lock(&tdl, 3486 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL); 3487 } else { 3488 terr = zfs_dirent_lock(&tdl, 3489 tdzp, tnm, &tzp, zflg, NULL, NULL); 3490 serr = zfs_dirent_lock(&sdl, 3491 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg, 3492 NULL, NULL); 3493 } 3494 3495 if (serr) { 3496 /* 3497 * Source entry invalid or not there. 3498 */ 3499 if (!terr) { 3500 zfs_dirent_unlock(tdl); 3501 if (tzp) 3502 VN_RELE(ZTOV(tzp)); 3503 } 3504 3505 if (sdzp == tdzp) 3506 rw_exit(&sdzp->z_name_lock); 3507 3508 if (strcmp(snm, "..") == 0) 3509 serr = EINVAL; 3510 ZFS_EXIT(zfsvfs); 3511 return (serr); 3512 } 3513 if (terr) { 3514 zfs_dirent_unlock(sdl); 3515 VN_RELE(ZTOV(szp)); 3516 3517 if (sdzp == tdzp) 3518 rw_exit(&sdzp->z_name_lock); 3519 3520 if (strcmp(tnm, "..") == 0) 3521 terr = EINVAL; 3522 ZFS_EXIT(zfsvfs); 3523 return (terr); 3524 } 3525 3526 /* 3527 * Must have write access at the source to remove the old entry 3528 * and write access at the target to create the new entry. 3529 * Note that if target and source are the same, this can be 3530 * done in a single check. 3531 */ 3532 3533 if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)) 3534 goto out; 3535 3536 if (ZTOV(szp)->v_type == VDIR) { 3537 /* 3538 * Check to make sure rename is valid. 3539 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d 3540 */ 3541 if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl)) 3542 goto out; 3543 } 3544 3545 /* 3546 * Does target exist? 3547 */ 3548 if (tzp) { 3549 /* 3550 * Source and target must be the same type. 3551 */ 3552 if (ZTOV(szp)->v_type == VDIR) { 3553 if (ZTOV(tzp)->v_type != VDIR) { 3554 error = ENOTDIR; 3555 goto out; 3556 } 3557 } else { 3558 if (ZTOV(tzp)->v_type == VDIR) { 3559 error = EISDIR; 3560 goto out; 3561 } 3562 } 3563 /* 3564 * POSIX dictates that when the source and target 3565 * entries refer to the same file object, rename 3566 * must do nothing and exit without error. 3567 */ 3568 if (szp->z_id == tzp->z_id) { 3569 error = 0; 3570 goto out; 3571 } 3572 } 3573 3574 vnevent_rename_src(ZTOV(szp), sdvp, snm, ct); 3575 if (tzp) 3576 vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct); 3577 3578 /* 3579 * notify the target directory if it is not the same 3580 * as source directory. 3581 */ 3582 if (tdvp != sdvp) { 3583 vnevent_rename_dest_dir(tdvp, ct); 3584 } 3585 3586 tx = dmu_tx_create(zfsvfs->z_os); 3587 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); 3588 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE); 3589 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm); 3590 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm); 3591 if (sdzp != tdzp) { 3592 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE); 3593 zfs_sa_upgrade_txholds(tx, tdzp); 3594 } 3595 if (tzp) { 3596 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE); 3597 zfs_sa_upgrade_txholds(tx, tzp); 3598 } 3599 3600 zfs_sa_upgrade_txholds(tx, szp); 3601 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 3602 error = dmu_tx_assign(tx, TXG_NOWAIT); 3603 if (error) { 3604 if (zl != NULL) 3605 zfs_rename_unlock(&zl); 3606 zfs_dirent_unlock(sdl); 3607 zfs_dirent_unlock(tdl); 3608 3609 if (sdzp == tdzp) 3610 rw_exit(&sdzp->z_name_lock); 3611 3612 VN_RELE(ZTOV(szp)); 3613 if (tzp) 3614 VN_RELE(ZTOV(tzp)); 3615 if (error == ERESTART) { 3616 dmu_tx_wait(tx); 3617 dmu_tx_abort(tx); 3618 goto top; 3619 } 3620 dmu_tx_abort(tx); 3621 ZFS_EXIT(zfsvfs); 3622 return (error); 3623 } 3624 3625 if (tzp) /* Attempt to remove the existing target */ 3626 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL); 3627 3628 if (error == 0) { 3629 error = zfs_link_create(tdl, szp, tx, ZRENAMING); 3630 if (error == 0) { 3631 szp->z_pflags |= ZFS_AV_MODIFIED; 3632 3633 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs), 3634 (void *)&szp->z_pflags, sizeof (uint64_t), tx); 3635 ASSERT0(error); 3636 3637 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL); 3638 if (error == 0) { 3639 zfs_log_rename(zilog, tx, TX_RENAME | 3640 (flags & FIGNORECASE ? TX_CI : 0), sdzp, 3641 sdl->dl_name, tdzp, tdl->dl_name, szp); 3642 3643 /* 3644 * Update path information for the target vnode 3645 */ 3646 vn_renamepath(tdvp, ZTOV(szp), tnm, 3647 strlen(tnm)); 3648 } else { 3649 /* 3650 * At this point, we have successfully created 3651 * the target name, but have failed to remove 3652 * the source name. Since the create was done 3653 * with the ZRENAMING flag, there are 3654 * complications; for one, the link count is 3655 * wrong. The easiest way to deal with this 3656 * is to remove the newly created target, and 3657 * return the original error. This must 3658 * succeed; fortunately, it is very unlikely to 3659 * fail, since we just created it. 3660 */ 3661 VERIFY3U(zfs_link_destroy(tdl, szp, tx, 3662 ZRENAMING, NULL), ==, 0); 3663 } 3664 } 3665 } 3666 3667 dmu_tx_commit(tx); 3668 out: 3669 if (zl != NULL) 3670 zfs_rename_unlock(&zl); 3671 3672 zfs_dirent_unlock(sdl); 3673 zfs_dirent_unlock(tdl); 3674 3675 if (sdzp == tdzp) 3676 rw_exit(&sdzp->z_name_lock); 3677 3678 3679 VN_RELE(ZTOV(szp)); 3680 if (tzp) 3681 VN_RELE(ZTOV(tzp)); 3682 3683 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3684 zil_commit(zilog, 0); 3685 3686 ZFS_EXIT(zfsvfs); 3687 return (error); 3688 } 3689 3690 /* 3691 * Insert the indicated symbolic reference entry into the directory. 3692 * 3693 * IN: dvp - Directory to contain new symbolic link. 3694 * link - Name for new symlink entry. 3695 * vap - Attributes of new entry. 3696 * target - Target path of new symlink. 3697 * cr - credentials of caller. 3698 * ct - caller context 3699 * flags - case flags 3700 * 3701 * RETURN: 0 if success 3702 * error code if failure 3703 * 3704 * Timestamps: 3705 * dvp - ctime|mtime updated 3706 */ 3707 /*ARGSUSED*/ 3708 static int 3709 zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr, 3710 caller_context_t *ct, int flags) 3711 { 3712 znode_t *zp, *dzp = VTOZ(dvp); 3713 zfs_dirlock_t *dl; 3714 dmu_tx_t *tx; 3715 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 3716 zilog_t *zilog; 3717 uint64_t len = strlen(link); 3718 int error; 3719 int zflg = ZNEW; 3720 zfs_acl_ids_t acl_ids; 3721 boolean_t fuid_dirtied; 3722 uint64_t txtype = TX_SYMLINK; 3723 3724 ASSERT(vap->va_type == VLNK); 3725 3726 ZFS_ENTER(zfsvfs); 3727 ZFS_VERIFY_ZP(dzp); 3728 zilog = zfsvfs->z_log; 3729 3730 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 3731 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3732 ZFS_EXIT(zfsvfs); 3733 return (EILSEQ); 3734 } 3735 if (flags & FIGNORECASE) 3736 zflg |= ZCILOOK; 3737 3738 if (len > MAXPATHLEN) { 3739 ZFS_EXIT(zfsvfs); 3740 return (ENAMETOOLONG); 3741 } 3742 3743 if ((error = zfs_acl_ids_create(dzp, 0, 3744 vap, cr, NULL, &acl_ids)) != 0) { 3745 ZFS_EXIT(zfsvfs); 3746 return (error); 3747 } 3748 top: 3749 /* 3750 * Attempt to lock directory; fail if entry already exists. 3751 */ 3752 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL); 3753 if (error) { 3754 zfs_acl_ids_free(&acl_ids); 3755 ZFS_EXIT(zfsvfs); 3756 return (error); 3757 } 3758 3759 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 3760 zfs_acl_ids_free(&acl_ids); 3761 zfs_dirent_unlock(dl); 3762 ZFS_EXIT(zfsvfs); 3763 return (error); 3764 } 3765 3766 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { 3767 zfs_acl_ids_free(&acl_ids); 3768 zfs_dirent_unlock(dl); 3769 ZFS_EXIT(zfsvfs); 3770 return (EDQUOT); 3771 } 3772 tx = dmu_tx_create(zfsvfs->z_os); 3773 fuid_dirtied = zfsvfs->z_fuid_dirty; 3774 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len)); 3775 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 3776 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 3777 ZFS_SA_BASE_ATTR_SIZE + len); 3778 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); 3779 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 3780 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 3781 acl_ids.z_aclp->z_acl_bytes); 3782 } 3783 if (fuid_dirtied) 3784 zfs_fuid_txhold(zfsvfs, tx); 3785 error = dmu_tx_assign(tx, TXG_NOWAIT); 3786 if (error) { 3787 zfs_dirent_unlock(dl); 3788 if (error == ERESTART) { 3789 dmu_tx_wait(tx); 3790 dmu_tx_abort(tx); 3791 goto top; 3792 } 3793 zfs_acl_ids_free(&acl_ids); 3794 dmu_tx_abort(tx); 3795 ZFS_EXIT(zfsvfs); 3796 return (error); 3797 } 3798 3799 /* 3800 * Create a new object for the symlink. 3801 * for version 4 ZPL datsets the symlink will be an SA attribute 3802 */ 3803 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); 3804 3805 if (fuid_dirtied) 3806 zfs_fuid_sync(zfsvfs, tx); 3807 3808 mutex_enter(&zp->z_lock); 3809 if (zp->z_is_sa) 3810 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs), 3811 link, len, tx); 3812 else 3813 zfs_sa_symlink(zp, link, len, tx); 3814 mutex_exit(&zp->z_lock); 3815 3816 zp->z_size = len; 3817 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), 3818 &zp->z_size, sizeof (zp->z_size), tx); 3819 /* 3820 * Insert the new object into the directory. 3821 */ 3822 (void) zfs_link_create(dl, zp, tx, ZNEW); 3823 3824 if (flags & FIGNORECASE) 3825 txtype |= TX_CI; 3826 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link); 3827 3828 zfs_acl_ids_free(&acl_ids); 3829 3830 dmu_tx_commit(tx); 3831 3832 zfs_dirent_unlock(dl); 3833 3834 VN_RELE(ZTOV(zp)); 3835 3836 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3837 zil_commit(zilog, 0); 3838 3839 ZFS_EXIT(zfsvfs); 3840 return (error); 3841 } 3842 3843 /* 3844 * Return, in the buffer contained in the provided uio structure, 3845 * the symbolic path referred to by vp. 3846 * 3847 * IN: vp - vnode of symbolic link. 3848 * uoip - structure to contain the link path. 3849 * cr - credentials of caller. 3850 * ct - caller context 3851 * 3852 * OUT: uio - structure to contain the link path. 3853 * 3854 * RETURN: 0 if success 3855 * error code if failure 3856 * 3857 * Timestamps: 3858 * vp - atime updated 3859 */ 3860 /* ARGSUSED */ 3861 static int 3862 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct) 3863 { 3864 znode_t *zp = VTOZ(vp); 3865 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3866 int error; 3867 3868 ZFS_ENTER(zfsvfs); 3869 ZFS_VERIFY_ZP(zp); 3870 3871 mutex_enter(&zp->z_lock); 3872 if (zp->z_is_sa) 3873 error = sa_lookup_uio(zp->z_sa_hdl, 3874 SA_ZPL_SYMLINK(zfsvfs), uio); 3875 else 3876 error = zfs_sa_readlink(zp, uio); 3877 mutex_exit(&zp->z_lock); 3878 3879 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 3880 3881 ZFS_EXIT(zfsvfs); 3882 return (error); 3883 } 3884 3885 /* 3886 * Insert a new entry into directory tdvp referencing svp. 3887 * 3888 * IN: tdvp - Directory to contain new entry. 3889 * svp - vnode of new entry. 3890 * name - name of new entry. 3891 * cr - credentials of caller. 3892 * ct - caller context 3893 * 3894 * RETURN: 0 if success 3895 * error code if failure 3896 * 3897 * Timestamps: 3898 * tdvp - ctime|mtime updated 3899 * svp - ctime updated 3900 */ 3901 /* ARGSUSED */ 3902 static int 3903 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr, 3904 caller_context_t *ct, int flags) 3905 { 3906 znode_t *dzp = VTOZ(tdvp); 3907 znode_t *tzp, *szp; 3908 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 3909 zilog_t *zilog; 3910 zfs_dirlock_t *dl; 3911 dmu_tx_t *tx; 3912 vnode_t *realvp; 3913 int error; 3914 int zf = ZNEW; 3915 uint64_t parent; 3916 uid_t owner; 3917 3918 ASSERT(tdvp->v_type == VDIR); 3919 3920 ZFS_ENTER(zfsvfs); 3921 ZFS_VERIFY_ZP(dzp); 3922 zilog = zfsvfs->z_log; 3923 3924 if (VOP_REALVP(svp, &realvp, ct) == 0) 3925 svp = realvp; 3926 3927 /* 3928 * POSIX dictates that we return EPERM here. 3929 * Better choices include ENOTSUP or EISDIR. 3930 */ 3931 if (svp->v_type == VDIR) { 3932 ZFS_EXIT(zfsvfs); 3933 return (EPERM); 3934 } 3935 3936 if (svp->v_vfsp != tdvp->v_vfsp || zfsctl_is_node(svp)) { 3937 ZFS_EXIT(zfsvfs); 3938 return (EXDEV); 3939 } 3940 3941 szp = VTOZ(svp); 3942 ZFS_VERIFY_ZP(szp); 3943 3944 /* Prevent links to .zfs/shares files */ 3945 3946 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), 3947 &parent, sizeof (uint64_t))) != 0) { 3948 ZFS_EXIT(zfsvfs); 3949 return (error); 3950 } 3951 if (parent == zfsvfs->z_shares_dir) { 3952 ZFS_EXIT(zfsvfs); 3953 return (EPERM); 3954 } 3955 3956 if (zfsvfs->z_utf8 && u8_validate(name, 3957 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3958 ZFS_EXIT(zfsvfs); 3959 return (EILSEQ); 3960 } 3961 if (flags & FIGNORECASE) 3962 zf |= ZCILOOK; 3963 3964 /* 3965 * We do not support links between attributes and non-attributes 3966 * because of the potential security risk of creating links 3967 * into "normal" file space in order to circumvent restrictions 3968 * imposed in attribute space. 3969 */ 3970 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) { 3971 ZFS_EXIT(zfsvfs); 3972 return (EINVAL); 3973 } 3974 3975 3976 owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER); 3977 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) { 3978 ZFS_EXIT(zfsvfs); 3979 return (EPERM); 3980 } 3981 3982 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 3983 ZFS_EXIT(zfsvfs); 3984 return (error); 3985 } 3986 3987 top: 3988 /* 3989 * Attempt to lock directory; fail if entry already exists. 3990 */ 3991 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL); 3992 if (error) { 3993 ZFS_EXIT(zfsvfs); 3994 return (error); 3995 } 3996 3997 tx = dmu_tx_create(zfsvfs->z_os); 3998 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); 3999 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 4000 zfs_sa_upgrade_txholds(tx, szp); 4001 zfs_sa_upgrade_txholds(tx, dzp); 4002 error = dmu_tx_assign(tx, TXG_NOWAIT); 4003 if (error) { 4004 zfs_dirent_unlock(dl); 4005 if (error == ERESTART) { 4006 dmu_tx_wait(tx); 4007 dmu_tx_abort(tx); 4008 goto top; 4009 } 4010 dmu_tx_abort(tx); 4011 ZFS_EXIT(zfsvfs); 4012 return (error); 4013 } 4014 4015 error = zfs_link_create(dl, szp, tx, 0); 4016 4017 if (error == 0) { 4018 uint64_t txtype = TX_LINK; 4019 if (flags & FIGNORECASE) 4020 txtype |= TX_CI; 4021 zfs_log_link(zilog, tx, txtype, dzp, szp, name); 4022 } 4023 4024 dmu_tx_commit(tx); 4025 4026 zfs_dirent_unlock(dl); 4027 4028 if (error == 0) { 4029 vnevent_link(svp, ct); 4030 } 4031 4032 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 4033 zil_commit(zilog, 0); 4034 4035 ZFS_EXIT(zfsvfs); 4036 return (error); 4037 } 4038 4039 /* 4040 * zfs_null_putapage() is used when the file system has been force 4041 * unmounted. It just drops the pages. 4042 */ 4043 /* ARGSUSED */ 4044 static int 4045 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, 4046 size_t *lenp, int flags, cred_t *cr) 4047 { 4048 pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR); 4049 return (0); 4050 } 4051 4052 /* 4053 * Push a page out to disk, klustering if possible. 4054 * 4055 * IN: vp - file to push page to. 4056 * pp - page to push. 4057 * flags - additional flags. 4058 * cr - credentials of caller. 4059 * 4060 * OUT: offp - start of range pushed. 4061 * lenp - len of range pushed. 4062 * 4063 * RETURN: 0 if success 4064 * error code if failure 4065 * 4066 * NOTE: callers must have locked the page to be pushed. On 4067 * exit, the page (and all other pages in the kluster) must be 4068 * unlocked. 4069 */ 4070 /* ARGSUSED */ 4071 static int 4072 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, 4073 size_t *lenp, int flags, cred_t *cr) 4074 { 4075 znode_t *zp = VTOZ(vp); 4076 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4077 dmu_tx_t *tx; 4078 u_offset_t off, koff; 4079 size_t len, klen; 4080 int err; 4081 4082 off = pp->p_offset; 4083 len = PAGESIZE; 4084 /* 4085 * If our blocksize is bigger than the page size, try to kluster 4086 * multiple pages so that we write a full block (thus avoiding 4087 * a read-modify-write). 4088 */ 4089 if (off < zp->z_size && zp->z_blksz > PAGESIZE) { 4090 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE); 4091 koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0; 4092 ASSERT(koff <= zp->z_size); 4093 if (koff + klen > zp->z_size) 4094 klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE); 4095 pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags); 4096 } 4097 ASSERT3U(btop(len), ==, btopr(len)); 4098 4099 /* 4100 * Can't push pages past end-of-file. 4101 */ 4102 if (off >= zp->z_size) { 4103 /* ignore all pages */ 4104 err = 0; 4105 goto out; 4106 } else if (off + len > zp->z_size) { 4107 int npages = btopr(zp->z_size - off); 4108 page_t *trunc; 4109 4110 page_list_break(&pp, &trunc, npages); 4111 /* ignore pages past end of file */ 4112 if (trunc) 4113 pvn_write_done(trunc, flags); 4114 len = zp->z_size - off; 4115 } 4116 4117 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) || 4118 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) { 4119 err = EDQUOT; 4120 goto out; 4121 } 4122 top: 4123 tx = dmu_tx_create(zfsvfs->z_os); 4124 dmu_tx_hold_write(tx, zp->z_id, off, len); 4125 4126 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 4127 zfs_sa_upgrade_txholds(tx, zp); 4128 err = dmu_tx_assign(tx, TXG_NOWAIT); 4129 if (err != 0) { 4130 if (err == ERESTART) { 4131 dmu_tx_wait(tx); 4132 dmu_tx_abort(tx); 4133 goto top; 4134 } 4135 dmu_tx_abort(tx); 4136 goto out; 4137 } 4138 4139 if (zp->z_blksz <= PAGESIZE) { 4140 caddr_t va = zfs_map_page(pp, S_READ); 4141 ASSERT3U(len, <=, PAGESIZE); 4142 dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx); 4143 zfs_unmap_page(pp, va); 4144 } else { 4145 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx); 4146 } 4147 4148 if (err == 0) { 4149 uint64_t mtime[2], ctime[2]; 4150 sa_bulk_attr_t bulk[3]; 4151 int count = 0; 4152 4153 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, 4154 &mtime, 16); 4155 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, 4156 &ctime, 16); 4157 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 4158 &zp->z_pflags, 8); 4159 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, 4160 B_TRUE); 4161 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0); 4162 } 4163 dmu_tx_commit(tx); 4164 4165 out: 4166 pvn_write_done(pp, (err ? B_ERROR : 0) | flags); 4167 if (offp) 4168 *offp = off; 4169 if (lenp) 4170 *lenp = len; 4171 4172 return (err); 4173 } 4174 4175 /* 4176 * Copy the portion of the file indicated from pages into the file. 4177 * The pages are stored in a page list attached to the files vnode. 4178 * 4179 * IN: vp - vnode of file to push page data to. 4180 * off - position in file to put data. 4181 * len - amount of data to write. 4182 * flags - flags to control the operation. 4183 * cr - credentials of caller. 4184 * ct - caller context. 4185 * 4186 * RETURN: 0 if success 4187 * error code if failure 4188 * 4189 * Timestamps: 4190 * vp - ctime|mtime updated 4191 */ 4192 /*ARGSUSED*/ 4193 static int 4194 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr, 4195 caller_context_t *ct) 4196 { 4197 znode_t *zp = VTOZ(vp); 4198 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4199 page_t *pp; 4200 size_t io_len; 4201 u_offset_t io_off; 4202 uint_t blksz; 4203 rl_t *rl; 4204 int error = 0; 4205 4206 ZFS_ENTER(zfsvfs); 4207 ZFS_VERIFY_ZP(zp); 4208 4209 /* 4210 * There's nothing to do if no data is cached. 4211 */ 4212 if (!vn_has_cached_data(vp)) { 4213 ZFS_EXIT(zfsvfs); 4214 return (0); 4215 } 4216 4217 /* 4218 * Align this request to the file block size in case we kluster. 4219 * XXX - this can result in pretty aggresive locking, which can 4220 * impact simultanious read/write access. One option might be 4221 * to break up long requests (len == 0) into block-by-block 4222 * operations to get narrower locking. 4223 */ 4224 blksz = zp->z_blksz; 4225 if (ISP2(blksz)) 4226 io_off = P2ALIGN_TYPED(off, blksz, u_offset_t); 4227 else 4228 io_off = 0; 4229 if (len > 0 && ISP2(blksz)) 4230 io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t); 4231 else 4232 io_len = 0; 4233 4234 if (io_len == 0) { 4235 /* 4236 * Search the entire vp list for pages >= io_off. 4237 */ 4238 rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER); 4239 error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr); 4240 goto out; 4241 } 4242 rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER); 4243 4244 if (off > zp->z_size) { 4245 /* past end of file */ 4246 zfs_range_unlock(rl); 4247 ZFS_EXIT(zfsvfs); 4248 return (0); 4249 } 4250 4251 len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off); 4252 4253 for (off = io_off; io_off < off + len; io_off += io_len) { 4254 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) { 4255 pp = page_lookup(vp, io_off, 4256 (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED); 4257 } else { 4258 pp = page_lookup_nowait(vp, io_off, 4259 (flags & B_FREE) ? SE_EXCL : SE_SHARED); 4260 } 4261 4262 if (pp != NULL && pvn_getdirty(pp, flags)) { 4263 int err; 4264 4265 /* 4266 * Found a dirty page to push 4267 */ 4268 err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr); 4269 if (err) 4270 error = err; 4271 } else { 4272 io_len = PAGESIZE; 4273 } 4274 } 4275 out: 4276 zfs_range_unlock(rl); 4277 if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 4278 zil_commit(zfsvfs->z_log, zp->z_id); 4279 ZFS_EXIT(zfsvfs); 4280 return (error); 4281 } 4282 4283 /*ARGSUSED*/ 4284 void 4285 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) 4286 { 4287 znode_t *zp = VTOZ(vp); 4288 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4289 int error; 4290 4291 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER); 4292 if (zp->z_sa_hdl == NULL) { 4293 /* 4294 * The fs has been unmounted, or we did a 4295 * suspend/resume and this file no longer exists. 4296 */ 4297 if (vn_has_cached_data(vp)) { 4298 (void) pvn_vplist_dirty(vp, 0, zfs_null_putapage, 4299 B_INVAL, cr); 4300 } 4301 4302 mutex_enter(&zp->z_lock); 4303 mutex_enter(&vp->v_lock); 4304 ASSERT(vp->v_count == 1); 4305 vp->v_count = 0; 4306 mutex_exit(&vp->v_lock); 4307 mutex_exit(&zp->z_lock); 4308 rw_exit(&zfsvfs->z_teardown_inactive_lock); 4309 zfs_znode_free(zp); 4310 return; 4311 } 4312 4313 /* 4314 * Attempt to push any data in the page cache. If this fails 4315 * we will get kicked out later in zfs_zinactive(). 4316 */ 4317 if (vn_has_cached_data(vp)) { 4318 (void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC, 4319 cr); 4320 } 4321 4322 if (zp->z_atime_dirty && zp->z_unlinked == 0) { 4323 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); 4324 4325 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 4326 zfs_sa_upgrade_txholds(tx, zp); 4327 error = dmu_tx_assign(tx, TXG_WAIT); 4328 if (error) { 4329 dmu_tx_abort(tx); 4330 } else { 4331 mutex_enter(&zp->z_lock); 4332 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs), 4333 (void *)&zp->z_atime, sizeof (zp->z_atime), tx); 4334 zp->z_atime_dirty = 0; 4335 mutex_exit(&zp->z_lock); 4336 dmu_tx_commit(tx); 4337 } 4338 } 4339 4340 zfs_zinactive(zp); 4341 rw_exit(&zfsvfs->z_teardown_inactive_lock); 4342 } 4343 4344 /* 4345 * Bounds-check the seek operation. 4346 * 4347 * IN: vp - vnode seeking within 4348 * ooff - old file offset 4349 * noffp - pointer to new file offset 4350 * ct - caller context 4351 * 4352 * RETURN: 0 if success 4353 * EINVAL if new offset invalid 4354 */ 4355 /* ARGSUSED */ 4356 static int 4357 zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, 4358 caller_context_t *ct) 4359 { 4360 if (vp->v_type == VDIR) 4361 return (0); 4362 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0); 4363 } 4364 4365 /* 4366 * Pre-filter the generic locking function to trap attempts to place 4367 * a mandatory lock on a memory mapped file. 4368 */ 4369 static int 4370 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset, 4371 flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct) 4372 { 4373 znode_t *zp = VTOZ(vp); 4374 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4375 4376 ZFS_ENTER(zfsvfs); 4377 ZFS_VERIFY_ZP(zp); 4378 4379 /* 4380 * We are following the UFS semantics with respect to mapcnt 4381 * here: If we see that the file is mapped already, then we will 4382 * return an error, but we don't worry about races between this 4383 * function and zfs_map(). 4384 */ 4385 if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) { 4386 ZFS_EXIT(zfsvfs); 4387 return (EAGAIN); 4388 } 4389 ZFS_EXIT(zfsvfs); 4390 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct)); 4391 } 4392 4393 /* 4394 * If we can't find a page in the cache, we will create a new page 4395 * and fill it with file data. For efficiency, we may try to fill 4396 * multiple pages at once (klustering) to fill up the supplied page 4397 * list. Note that the pages to be filled are held with an exclusive 4398 * lock to prevent access by other threads while they are being filled. 4399 */ 4400 static int 4401 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg, 4402 caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw) 4403 { 4404 znode_t *zp = VTOZ(vp); 4405 page_t *pp, *cur_pp; 4406 objset_t *os = zp->z_zfsvfs->z_os; 4407 u_offset_t io_off, total; 4408 size_t io_len; 4409 int err; 4410 4411 if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) { 4412 /* 4413 * We only have a single page, don't bother klustering 4414 */ 4415 io_off = off; 4416 io_len = PAGESIZE; 4417 pp = page_create_va(vp, io_off, io_len, 4418 PG_EXCL | PG_WAIT, seg, addr); 4419 } else { 4420 /* 4421 * Try to find enough pages to fill the page list 4422 */ 4423 pp = pvn_read_kluster(vp, off, seg, addr, &io_off, 4424 &io_len, off, plsz, 0); 4425 } 4426 if (pp == NULL) { 4427 /* 4428 * The page already exists, nothing to do here. 4429 */ 4430 *pl = NULL; 4431 return (0); 4432 } 4433 4434 /* 4435 * Fill the pages in the kluster. 4436 */ 4437 cur_pp = pp; 4438 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) { 4439 caddr_t va; 4440 4441 ASSERT3U(io_off, ==, cur_pp->p_offset); 4442 va = zfs_map_page(cur_pp, S_WRITE); 4443 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va, 4444 DMU_READ_PREFETCH); 4445 zfs_unmap_page(cur_pp, va); 4446 if (err) { 4447 /* On error, toss the entire kluster */ 4448 pvn_read_done(pp, B_ERROR); 4449 /* convert checksum errors into IO errors */ 4450 if (err == ECKSUM) 4451 err = EIO; 4452 return (err); 4453 } 4454 cur_pp = cur_pp->p_next; 4455 } 4456 4457 /* 4458 * Fill in the page list array from the kluster starting 4459 * from the desired offset `off'. 4460 * NOTE: the page list will always be null terminated. 4461 */ 4462 pvn_plist_init(pp, pl, plsz, off, io_len, rw); 4463 ASSERT(pl == NULL || (*pl)->p_offset == off); 4464 4465 return (0); 4466 } 4467 4468 /* 4469 * Return pointers to the pages for the file region [off, off + len] 4470 * in the pl array. If plsz is greater than len, this function may 4471 * also return page pointers from after the specified region 4472 * (i.e. the region [off, off + plsz]). These additional pages are 4473 * only returned if they are already in the cache, or were created as 4474 * part of a klustered read. 4475 * 4476 * IN: vp - vnode of file to get data from. 4477 * off - position in file to get data from. 4478 * len - amount of data to retrieve. 4479 * plsz - length of provided page list. 4480 * seg - segment to obtain pages for. 4481 * addr - virtual address of fault. 4482 * rw - mode of created pages. 4483 * cr - credentials of caller. 4484 * ct - caller context. 4485 * 4486 * OUT: protp - protection mode of created pages. 4487 * pl - list of pages created. 4488 * 4489 * RETURN: 0 if success 4490 * error code if failure 4491 * 4492 * Timestamps: 4493 * vp - atime updated 4494 */ 4495 /* ARGSUSED */ 4496 static int 4497 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, 4498 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, 4499 enum seg_rw rw, cred_t *cr, caller_context_t *ct) 4500 { 4501 znode_t *zp = VTOZ(vp); 4502 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4503 page_t **pl0 = pl; 4504 int err = 0; 4505 4506 /* we do our own caching, faultahead is unnecessary */ 4507 if (pl == NULL) 4508 return (0); 4509 else if (len > plsz) 4510 len = plsz; 4511 else 4512 len = P2ROUNDUP(len, PAGESIZE); 4513 ASSERT(plsz >= len); 4514 4515 ZFS_ENTER(zfsvfs); 4516 ZFS_VERIFY_ZP(zp); 4517 4518 if (protp) 4519 *protp = PROT_ALL; 4520 4521 /* 4522 * Loop through the requested range [off, off + len) looking 4523 * for pages. If we don't find a page, we will need to create 4524 * a new page and fill it with data from the file. 4525 */ 4526 while (len > 0) { 4527 if (*pl = page_lookup(vp, off, SE_SHARED)) 4528 *(pl+1) = NULL; 4529 else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw)) 4530 goto out; 4531 while (*pl) { 4532 ASSERT3U((*pl)->p_offset, ==, off); 4533 off += PAGESIZE; 4534 addr += PAGESIZE; 4535 if (len > 0) { 4536 ASSERT3U(len, >=, PAGESIZE); 4537 len -= PAGESIZE; 4538 } 4539 ASSERT3U(plsz, >=, PAGESIZE); 4540 plsz -= PAGESIZE; 4541 pl++; 4542 } 4543 } 4544 4545 /* 4546 * Fill out the page array with any pages already in the cache. 4547 */ 4548 while (plsz > 0 && 4549 (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) { 4550 off += PAGESIZE; 4551 plsz -= PAGESIZE; 4552 } 4553 out: 4554 if (err) { 4555 /* 4556 * Release any pages we have previously locked. 4557 */ 4558 while (pl > pl0) 4559 page_unlock(*--pl); 4560 } else { 4561 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 4562 } 4563 4564 *pl = NULL; 4565 4566 ZFS_EXIT(zfsvfs); 4567 return (err); 4568 } 4569 4570 /* 4571 * Request a memory map for a section of a file. This code interacts 4572 * with common code and the VM system as follows: 4573 * 4574 * common code calls mmap(), which ends up in smmap_common() 4575 * 4576 * this calls VOP_MAP(), which takes you into (say) zfs 4577 * 4578 * zfs_map() calls as_map(), passing segvn_create() as the callback 4579 * 4580 * segvn_create() creates the new segment and calls VOP_ADDMAP() 4581 * 4582 * zfs_addmap() updates z_mapcnt 4583 */ 4584 /*ARGSUSED*/ 4585 static int 4586 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp, 4587 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4588 caller_context_t *ct) 4589 { 4590 znode_t *zp = VTOZ(vp); 4591 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4592 segvn_crargs_t vn_a; 4593 int error; 4594 4595 ZFS_ENTER(zfsvfs); 4596 ZFS_VERIFY_ZP(zp); 4597 4598 if ((prot & PROT_WRITE) && (zp->z_pflags & 4599 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) { 4600 ZFS_EXIT(zfsvfs); 4601 return (EPERM); 4602 } 4603 4604 if ((prot & (PROT_READ | PROT_EXEC)) && 4605 (zp->z_pflags & ZFS_AV_QUARANTINED)) { 4606 ZFS_EXIT(zfsvfs); 4607 return (EACCES); 4608 } 4609 4610 if (vp->v_flag & VNOMAP) { 4611 ZFS_EXIT(zfsvfs); 4612 return (ENOSYS); 4613 } 4614 4615 if (off < 0 || len > MAXOFFSET_T - off) { 4616 ZFS_EXIT(zfsvfs); 4617 return (ENXIO); 4618 } 4619 4620 if (vp->v_type != VREG) { 4621 ZFS_EXIT(zfsvfs); 4622 return (ENODEV); 4623 } 4624 4625 /* 4626 * If file is locked, disallow mapping. 4627 */ 4628 if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) { 4629 ZFS_EXIT(zfsvfs); 4630 return (EAGAIN); 4631 } 4632 4633 as_rangelock(as); 4634 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags); 4635 if (error != 0) { 4636 as_rangeunlock(as); 4637 ZFS_EXIT(zfsvfs); 4638 return (error); 4639 } 4640 4641 vn_a.vp = vp; 4642 vn_a.offset = (u_offset_t)off; 4643 vn_a.type = flags & MAP_TYPE; 4644 vn_a.prot = prot; 4645 vn_a.maxprot = maxprot; 4646 vn_a.cred = cr; 4647 vn_a.amp = NULL; 4648 vn_a.flags = flags & ~MAP_TYPE; 4649 vn_a.szc = 0; 4650 vn_a.lgrp_mem_policy_flags = 0; 4651 4652 error = as_map(as, *addrp, len, segvn_create, &vn_a); 4653 4654 as_rangeunlock(as); 4655 ZFS_EXIT(zfsvfs); 4656 return (error); 4657 } 4658 4659 /* ARGSUSED */ 4660 static int 4661 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4662 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4663 caller_context_t *ct) 4664 { 4665 uint64_t pages = btopr(len); 4666 4667 atomic_add_64(&VTOZ(vp)->z_mapcnt, pages); 4668 return (0); 4669 } 4670 4671 /* 4672 * The reason we push dirty pages as part of zfs_delmap() is so that we get a 4673 * more accurate mtime for the associated file. Since we don't have a way of 4674 * detecting when the data was actually modified, we have to resort to 4675 * heuristics. If an explicit msync() is done, then we mark the mtime when the 4676 * last page is pushed. The problem occurs when the msync() call is omitted, 4677 * which by far the most common case: 4678 * 4679 * open() 4680 * mmap() 4681 * <modify memory> 4682 * munmap() 4683 * close() 4684 * <time lapse> 4685 * putpage() via fsflush 4686 * 4687 * If we wait until fsflush to come along, we can have a modification time that 4688 * is some arbitrary point in the future. In order to prevent this in the 4689 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is 4690 * torn down. 4691 */ 4692 /* ARGSUSED */ 4693 static int 4694 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4695 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr, 4696 caller_context_t *ct) 4697 { 4698 uint64_t pages = btopr(len); 4699 4700 ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages); 4701 atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages); 4702 4703 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && 4704 vn_has_cached_data(vp)) 4705 (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct); 4706 4707 return (0); 4708 } 4709 4710 /* 4711 * Free or allocate space in a file. Currently, this function only 4712 * supports the `F_FREESP' command. However, this command is somewhat 4713 * misnamed, as its functionality includes the ability to allocate as 4714 * well as free space. 4715 * 4716 * IN: vp - vnode of file to free data in. 4717 * cmd - action to take (only F_FREESP supported). 4718 * bfp - section of file to free/alloc. 4719 * flag - current file open mode flags. 4720 * offset - current file offset. 4721 * cr - credentials of caller [UNUSED]. 4722 * ct - caller context. 4723 * 4724 * RETURN: 0 if success 4725 * error code if failure 4726 * 4727 * Timestamps: 4728 * vp - ctime|mtime updated 4729 */ 4730 /* ARGSUSED */ 4731 static int 4732 zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag, 4733 offset_t offset, cred_t *cr, caller_context_t *ct) 4734 { 4735 znode_t *zp = VTOZ(vp); 4736 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4737 uint64_t off, len; 4738 int error; 4739 4740 ZFS_ENTER(zfsvfs); 4741 ZFS_VERIFY_ZP(zp); 4742 4743 if (cmd != F_FREESP) { 4744 ZFS_EXIT(zfsvfs); 4745 return (EINVAL); 4746 } 4747 4748 if (error = convoff(vp, bfp, 0, offset)) { 4749 ZFS_EXIT(zfsvfs); 4750 return (error); 4751 } 4752 4753 if (bfp->l_len < 0) { 4754 ZFS_EXIT(zfsvfs); 4755 return (EINVAL); 4756 } 4757 4758 off = bfp->l_start; 4759 len = bfp->l_len; /* 0 means from off to end of file */ 4760 4761 error = zfs_freesp(zp, off, len, flag, TRUE); 4762 4763 ZFS_EXIT(zfsvfs); 4764 return (error); 4765 } 4766 4767 /*ARGSUSED*/ 4768 static int 4769 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct) 4770 { 4771 znode_t *zp = VTOZ(vp); 4772 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4773 uint32_t gen; 4774 uint64_t gen64; 4775 uint64_t object = zp->z_id; 4776 zfid_short_t *zfid; 4777 int size, i, error; 4778 4779 ZFS_ENTER(zfsvfs); 4780 ZFS_VERIFY_ZP(zp); 4781 4782 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), 4783 &gen64, sizeof (uint64_t))) != 0) { 4784 ZFS_EXIT(zfsvfs); 4785 return (error); 4786 } 4787 4788 gen = (uint32_t)gen64; 4789 4790 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN; 4791 if (fidp->fid_len < size) { 4792 fidp->fid_len = size; 4793 ZFS_EXIT(zfsvfs); 4794 return (ENOSPC); 4795 } 4796 4797 zfid = (zfid_short_t *)fidp; 4798 4799 zfid->zf_len = size; 4800 4801 for (i = 0; i < sizeof (zfid->zf_object); i++) 4802 zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); 4803 4804 /* Must have a non-zero generation number to distinguish from .zfs */ 4805 if (gen == 0) 4806 gen = 1; 4807 for (i = 0; i < sizeof (zfid->zf_gen); i++) 4808 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i)); 4809 4810 if (size == LONG_FID_LEN) { 4811 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os); 4812 zfid_long_t *zlfid; 4813 4814 zlfid = (zfid_long_t *)fidp; 4815 4816 for (i = 0; i < sizeof (zlfid->zf_setid); i++) 4817 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i)); 4818 4819 /* XXX - this should be the generation number for the objset */ 4820 for (i = 0; i < sizeof (zlfid->zf_setgen); i++) 4821 zlfid->zf_setgen[i] = 0; 4822 } 4823 4824 ZFS_EXIT(zfsvfs); 4825 return (0); 4826 } 4827 4828 static int 4829 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr, 4830 caller_context_t *ct) 4831 { 4832 znode_t *zp, *xzp; 4833 zfsvfs_t *zfsvfs; 4834 zfs_dirlock_t *dl; 4835 int error; 4836 4837 switch (cmd) { 4838 case _PC_LINK_MAX: 4839 *valp = ULONG_MAX; 4840 return (0); 4841 4842 case _PC_FILESIZEBITS: 4843 *valp = 64; 4844 return (0); 4845 4846 case _PC_XATTR_EXISTS: 4847 zp = VTOZ(vp); 4848 zfsvfs = zp->z_zfsvfs; 4849 ZFS_ENTER(zfsvfs); 4850 ZFS_VERIFY_ZP(zp); 4851 *valp = 0; 4852 error = zfs_dirent_lock(&dl, zp, "", &xzp, 4853 ZXATTR | ZEXISTS | ZSHARED, NULL, NULL); 4854 if (error == 0) { 4855 zfs_dirent_unlock(dl); 4856 if (!zfs_dirempty(xzp)) 4857 *valp = 1; 4858 VN_RELE(ZTOV(xzp)); 4859 } else if (error == ENOENT) { 4860 /* 4861 * If there aren't extended attributes, it's the 4862 * same as having zero of them. 4863 */ 4864 error = 0; 4865 } 4866 ZFS_EXIT(zfsvfs); 4867 return (error); 4868 4869 case _PC_SATTR_ENABLED: 4870 case _PC_SATTR_EXISTS: 4871 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && 4872 (vp->v_type == VREG || vp->v_type == VDIR); 4873 return (0); 4874 4875 case _PC_ACCESS_FILTERING: 4876 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) && 4877 vp->v_type == VDIR; 4878 return (0); 4879 4880 case _PC_ACL_ENABLED: 4881 *valp = _ACL_ACE_ENABLED; 4882 return (0); 4883 4884 case _PC_MIN_HOLE_SIZE: 4885 *valp = (ulong_t)SPA_MINBLOCKSIZE; 4886 return (0); 4887 4888 case _PC_TIMESTAMP_RESOLUTION: 4889 /* nanosecond timestamp resolution */ 4890 *valp = 1L; 4891 return (0); 4892 4893 default: 4894 return (fs_pathconf(vp, cmd, valp, cr, ct)); 4895 } 4896 } 4897 4898 /*ARGSUSED*/ 4899 static int 4900 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr, 4901 caller_context_t *ct) 4902 { 4903 znode_t *zp = VTOZ(vp); 4904 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4905 int error; 4906 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 4907 4908 ZFS_ENTER(zfsvfs); 4909 ZFS_VERIFY_ZP(zp); 4910 error = zfs_getacl(zp, vsecp, skipaclchk, cr); 4911 ZFS_EXIT(zfsvfs); 4912 4913 return (error); 4914 } 4915 4916 /*ARGSUSED*/ 4917 static int 4918 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr, 4919 caller_context_t *ct) 4920 { 4921 znode_t *zp = VTOZ(vp); 4922 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4923 int error; 4924 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 4925 zilog_t *zilog = zfsvfs->z_log; 4926 4927 ZFS_ENTER(zfsvfs); 4928 ZFS_VERIFY_ZP(zp); 4929 4930 error = zfs_setacl(zp, vsecp, skipaclchk, cr); 4931 4932 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 4933 zil_commit(zilog, 0); 4934 4935 ZFS_EXIT(zfsvfs); 4936 return (error); 4937 } 4938 4939 /* 4940 * Tunable, both must be a power of 2. 4941 * 4942 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf 4943 * zcr_blksz_max: if set to less than the file block size, allow loaning out of 4944 * an arcbuf for a partial block read 4945 */ 4946 int zcr_blksz_min = (1 << 10); /* 1K */ 4947 int zcr_blksz_max = (1 << 17); /* 128K */ 4948 4949 /*ARGSUSED*/ 4950 static int 4951 zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr, 4952 caller_context_t *ct) 4953 { 4954 znode_t *zp = VTOZ(vp); 4955 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4956 int max_blksz = zfsvfs->z_max_blksz; 4957 uio_t *uio = &xuio->xu_uio; 4958 ssize_t size = uio->uio_resid; 4959 offset_t offset = uio->uio_loffset; 4960 int blksz; 4961 int fullblk, i; 4962 arc_buf_t *abuf; 4963 ssize_t maxsize; 4964 int preamble, postamble; 4965 4966 if (xuio->xu_type != UIOTYPE_ZEROCOPY) 4967 return (EINVAL); 4968 4969 ZFS_ENTER(zfsvfs); 4970 ZFS_VERIFY_ZP(zp); 4971 switch (ioflag) { 4972 case UIO_WRITE: 4973 /* 4974 * Loan out an arc_buf for write if write size is bigger than 4975 * max_blksz, and the file's block size is also max_blksz. 4976 */ 4977 blksz = max_blksz; 4978 if (size < blksz || zp->z_blksz != blksz) { 4979 ZFS_EXIT(zfsvfs); 4980 return (EINVAL); 4981 } 4982 /* 4983 * Caller requests buffers for write before knowing where the 4984 * write offset might be (e.g. NFS TCP write). 4985 */ 4986 if (offset == -1) { 4987 preamble = 0; 4988 } else { 4989 preamble = P2PHASE(offset, blksz); 4990 if (preamble) { 4991 preamble = blksz - preamble; 4992 size -= preamble; 4993 } 4994 } 4995 4996 postamble = P2PHASE(size, blksz); 4997 size -= postamble; 4998 4999 fullblk = size / blksz; 5000 (void) dmu_xuio_init(xuio, 5001 (preamble != 0) + fullblk + (postamble != 0)); 5002 DTRACE_PROBE3(zfs_reqzcbuf_align, int, preamble, 5003 int, postamble, int, 5004 (preamble != 0) + fullblk + (postamble != 0)); 5005 5006 /* 5007 * Have to fix iov base/len for partial buffers. They 5008 * currently represent full arc_buf's. 5009 */ 5010 if (preamble) { 5011 /* data begins in the middle of the arc_buf */ 5012 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 5013 blksz); 5014 ASSERT(abuf); 5015 (void) dmu_xuio_add(xuio, abuf, 5016 blksz - preamble, preamble); 5017 } 5018 5019 for (i = 0; i < fullblk; i++) { 5020 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 5021 blksz); 5022 ASSERT(abuf); 5023 (void) dmu_xuio_add(xuio, abuf, 0, blksz); 5024 } 5025 5026 if (postamble) { 5027 /* data ends in the middle of the arc_buf */ 5028 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 5029 blksz); 5030 ASSERT(abuf); 5031 (void) dmu_xuio_add(xuio, abuf, 0, postamble); 5032 } 5033 break; 5034 case UIO_READ: 5035 /* 5036 * Loan out an arc_buf for read if the read size is larger than 5037 * the current file block size. Block alignment is not 5038 * considered. Partial arc_buf will be loaned out for read. 5039 */ 5040 blksz = zp->z_blksz; 5041 if (blksz < zcr_blksz_min) 5042 blksz = zcr_blksz_min; 5043 if (blksz > zcr_blksz_max) 5044 blksz = zcr_blksz_max; 5045 /* avoid potential complexity of dealing with it */ 5046 if (blksz > max_blksz) { 5047 ZFS_EXIT(zfsvfs); 5048 return (EINVAL); 5049 } 5050 5051 maxsize = zp->z_size - uio->uio_loffset; 5052 if (size > maxsize) 5053 size = maxsize; 5054 5055 if (size < blksz || vn_has_cached_data(vp)) { 5056 ZFS_EXIT(zfsvfs); 5057 return (EINVAL); 5058 } 5059 break; 5060 default: 5061 ZFS_EXIT(zfsvfs); 5062 return (EINVAL); 5063 } 5064 5065 uio->uio_extflg = UIO_XUIO; 5066 XUIO_XUZC_RW(xuio) = ioflag; 5067 ZFS_EXIT(zfsvfs); 5068 return (0); 5069 } 5070 5071 /*ARGSUSED*/ 5072 static int 5073 zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct) 5074 { 5075 int i; 5076 arc_buf_t *abuf; 5077 int ioflag = XUIO_XUZC_RW(xuio); 5078 5079 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY); 5080 5081 i = dmu_xuio_cnt(xuio); 5082 while (i-- > 0) { 5083 abuf = dmu_xuio_arcbuf(xuio, i); 5084 /* 5085 * if abuf == NULL, it must be a write buffer 5086 * that has been returned in zfs_write(). 5087 */ 5088 if (abuf) 5089 dmu_return_arcbuf(abuf); 5090 ASSERT(abuf || ioflag == UIO_WRITE); 5091 } 5092 5093 dmu_xuio_fini(xuio); 5094 return (0); 5095 } 5096 5097 /* 5098 * Predeclare these here so that the compiler assumes that 5099 * this is an "old style" function declaration that does 5100 * not include arguments => we won't get type mismatch errors 5101 * in the initializations that follow. 5102 */ 5103 static int zfs_inval(); 5104 static int zfs_isdir(); 5105 5106 static int 5107 zfs_inval() 5108 { 5109 return (EINVAL); 5110 } 5111 5112 static int 5113 zfs_isdir() 5114 { 5115 return (EISDIR); 5116 } 5117 /* 5118 * Directory vnode operations template 5119 */ 5120 vnodeops_t *zfs_dvnodeops; 5121 const fs_operation_def_t zfs_dvnodeops_template[] = { 5122 VOPNAME_OPEN, { .vop_open = zfs_open }, 5123 VOPNAME_CLOSE, { .vop_close = zfs_close }, 5124 VOPNAME_READ, { .error = zfs_isdir }, 5125 VOPNAME_WRITE, { .error = zfs_isdir }, 5126 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 5127 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 5128 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 5129 VOPNAME_ACCESS, { .vop_access = zfs_access }, 5130 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 5131 VOPNAME_CREATE, { .vop_create = zfs_create }, 5132 VOPNAME_REMOVE, { .vop_remove = zfs_remove }, 5133 VOPNAME_LINK, { .vop_link = zfs_link }, 5134 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 5135 VOPNAME_MKDIR, { .vop_mkdir = zfs_mkdir }, 5136 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir }, 5137 VOPNAME_READDIR, { .vop_readdir = zfs_readdir }, 5138 VOPNAME_SYMLINK, { .vop_symlink = zfs_symlink }, 5139 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 5140 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5141 VOPNAME_FID, { .vop_fid = zfs_fid }, 5142 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 5143 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5144 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 5145 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 5146 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5147 NULL, NULL 5148 }; 5149 5150 /* 5151 * Regular file vnode operations template 5152 */ 5153 vnodeops_t *zfs_fvnodeops; 5154 const fs_operation_def_t zfs_fvnodeops_template[] = { 5155 VOPNAME_OPEN, { .vop_open = zfs_open }, 5156 VOPNAME_CLOSE, { .vop_close = zfs_close }, 5157 VOPNAME_READ, { .vop_read = zfs_read }, 5158 VOPNAME_WRITE, { .vop_write = zfs_write }, 5159 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 5160 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 5161 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 5162 VOPNAME_ACCESS, { .vop_access = zfs_access }, 5163 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 5164 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 5165 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 5166 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5167 VOPNAME_FID, { .vop_fid = zfs_fid }, 5168 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 5169 VOPNAME_FRLOCK, { .vop_frlock = zfs_frlock }, 5170 VOPNAME_SPACE, { .vop_space = zfs_space }, 5171 VOPNAME_GETPAGE, { .vop_getpage = zfs_getpage }, 5172 VOPNAME_PUTPAGE, { .vop_putpage = zfs_putpage }, 5173 VOPNAME_MAP, { .vop_map = zfs_map }, 5174 VOPNAME_ADDMAP, { .vop_addmap = zfs_addmap }, 5175 VOPNAME_DELMAP, { .vop_delmap = zfs_delmap }, 5176 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5177 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 5178 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 5179 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5180 VOPNAME_REQZCBUF, { .vop_reqzcbuf = zfs_reqzcbuf }, 5181 VOPNAME_RETZCBUF, { .vop_retzcbuf = zfs_retzcbuf }, 5182 NULL, NULL 5183 }; 5184 5185 /* 5186 * Symbolic link vnode operations template 5187 */ 5188 vnodeops_t *zfs_symvnodeops; 5189 const fs_operation_def_t zfs_symvnodeops_template[] = { 5190 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 5191 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 5192 VOPNAME_ACCESS, { .vop_access = zfs_access }, 5193 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 5194 VOPNAME_READLINK, { .vop_readlink = zfs_readlink }, 5195 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5196 VOPNAME_FID, { .vop_fid = zfs_fid }, 5197 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5198 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5199 NULL, NULL 5200 }; 5201 5202 /* 5203 * special share hidden files vnode operations template 5204 */ 5205 vnodeops_t *zfs_sharevnodeops; 5206 const fs_operation_def_t zfs_sharevnodeops_template[] = { 5207 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 5208 VOPNAME_ACCESS, { .vop_access = zfs_access }, 5209 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5210 VOPNAME_FID, { .vop_fid = zfs_fid }, 5211 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5212 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 5213 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 5214 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5215 NULL, NULL 5216 }; 5217 5218 /* 5219 * Extended attribute directory vnode operations template 5220 * This template is identical to the directory vnodes 5221 * operation template except for restricted operations: 5222 * VOP_MKDIR() 5223 * VOP_SYMLINK() 5224 * Note that there are other restrictions embedded in: 5225 * zfs_create() - restrict type to VREG 5226 * zfs_link() - no links into/out of attribute space 5227 * zfs_rename() - no moves into/out of attribute space 5228 */ 5229 vnodeops_t *zfs_xdvnodeops; 5230 const fs_operation_def_t zfs_xdvnodeops_template[] = { 5231 VOPNAME_OPEN, { .vop_open = zfs_open }, 5232 VOPNAME_CLOSE, { .vop_close = zfs_close }, 5233 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 5234 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 5235 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 5236 VOPNAME_ACCESS, { .vop_access = zfs_access }, 5237 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 5238 VOPNAME_CREATE, { .vop_create = zfs_create }, 5239 VOPNAME_REMOVE, { .vop_remove = zfs_remove }, 5240 VOPNAME_LINK, { .vop_link = zfs_link }, 5241 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 5242 VOPNAME_MKDIR, { .error = zfs_inval }, 5243 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir }, 5244 VOPNAME_READDIR, { .vop_readdir = zfs_readdir }, 5245 VOPNAME_SYMLINK, { .error = zfs_inval }, 5246 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 5247 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5248 VOPNAME_FID, { .vop_fid = zfs_fid }, 5249 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 5250 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5251 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 5252 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 5253 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5254 NULL, NULL 5255 }; 5256 5257 /* 5258 * Error vnode operations template 5259 */ 5260 vnodeops_t *zfs_evnodeops; 5261 const fs_operation_def_t zfs_evnodeops_template[] = { 5262 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5263 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5264 NULL, NULL 5265 }; 5266