1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved. 26 * Copyright 2017 Nexenta Systems, Inc. 27 */ 28 29 /* Portions Copyright 2007 Jeremy Teo */ 30 /* Portions Copyright 2010 Robert Milkowski */ 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/time.h> 35 #include <sys/sysmacros.h> 36 #include <sys/vfs.h> 37 #include <sys/uio.h> 38 #include <sys/file.h> 39 #include <sys/stat.h> 40 #include <sys/kmem.h> 41 #include <sys/cmn_err.h> 42 #include <sys/errno.h> 43 #include <sys/zfs_dir.h> 44 #include <sys/zfs_acl.h> 45 #include <sys/zfs_ioctl.h> 46 #include <sys/fs/zfs.h> 47 #include <sys/dmu.h> 48 #include <sys/dmu_objset.h> 49 #include <sys/spa.h> 50 #include <sys/txg.h> 51 #include <sys/dbuf.h> 52 #include <sys/policy.h> 53 #include <sys/zfs_vnops.h> 54 #include <sys/zfs_quota.h> 55 #include <sys/zfs_vfsops.h> 56 #include <sys/zfs_znode.h> 57 58 59 static ulong_t zfs_fsync_sync_cnt = 4; 60 61 int 62 zfs_fsync(znode_t *zp, int syncflag, cred_t *cr) 63 { 64 zfsvfs_t *zfsvfs = ZTOZSB(zp); 65 66 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt); 67 68 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) { 69 ZFS_ENTER(zfsvfs); 70 ZFS_VERIFY_ZP(zp); 71 zil_commit(zfsvfs->z_log, zp->z_id); 72 ZFS_EXIT(zfsvfs); 73 } 74 tsd_set(zfs_fsyncer_key, NULL); 75 76 return (0); 77 } 78 79 80 #if defined(SEEK_HOLE) && defined(SEEK_DATA) 81 /* 82 * Lseek support for finding holes (cmd == SEEK_HOLE) and 83 * data (cmd == SEEK_DATA). "off" is an in/out parameter. 84 */ 85 static int 86 zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off) 87 { 88 uint64_t noff = (uint64_t)*off; /* new offset */ 89 uint64_t file_sz; 90 int error; 91 boolean_t hole; 92 93 file_sz = zp->z_size; 94 if (noff >= file_sz) { 95 return (SET_ERROR(ENXIO)); 96 } 97 98 if (cmd == F_SEEK_HOLE) 99 hole = B_TRUE; 100 else 101 hole = B_FALSE; 102 103 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff); 104 105 if (error == ESRCH) 106 return (SET_ERROR(ENXIO)); 107 108 /* file was dirty, so fall back to using generic logic */ 109 if (error == EBUSY) { 110 if (hole) 111 *off = file_sz; 112 113 return (0); 114 } 115 116 /* 117 * We could find a hole that begins after the logical end-of-file, 118 * because dmu_offset_next() only works on whole blocks. If the 119 * EOF falls mid-block, then indicate that the "virtual hole" 120 * at the end of the file begins at the logical EOF, rather than 121 * at the end of the last block. 122 */ 123 if (noff > file_sz) { 124 ASSERT(hole); 125 noff = file_sz; 126 } 127 128 if (noff < *off) 129 return (error); 130 *off = noff; 131 return (error); 132 } 133 134 int 135 zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off) 136 { 137 zfsvfs_t *zfsvfs = ZTOZSB(zp); 138 int error; 139 140 ZFS_ENTER(zfsvfs); 141 ZFS_VERIFY_ZP(zp); 142 143 error = zfs_holey_common(zp, cmd, off); 144 145 ZFS_EXIT(zfsvfs); 146 return (error); 147 } 148 #endif /* SEEK_HOLE && SEEK_DATA */ 149 150 /*ARGSUSED*/ 151 int 152 zfs_access(znode_t *zp, int mode, int flag, cred_t *cr) 153 { 154 zfsvfs_t *zfsvfs = ZTOZSB(zp); 155 int error; 156 157 ZFS_ENTER(zfsvfs); 158 ZFS_VERIFY_ZP(zp); 159 160 if (flag & V_ACE_MASK) 161 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr); 162 else 163 error = zfs_zaccess_rwx(zp, mode, flag, cr); 164 165 ZFS_EXIT(zfsvfs); 166 return (error); 167 } 168 169 static unsigned long zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */ 170 171 /* 172 * Read bytes from specified file into supplied buffer. 173 * 174 * IN: zp - inode of file to be read from. 175 * uio - structure supplying read location, range info, 176 * and return buffer. 177 * ioflag - O_SYNC flags; used to provide FRSYNC semantics. 178 * O_DIRECT flag; used to bypass page cache. 179 * cr - credentials of caller. 180 * 181 * OUT: uio - updated offset and range, buffer filled. 182 * 183 * RETURN: 0 on success, error code on failure. 184 * 185 * Side Effects: 186 * inode - atime updated if byte count > 0 187 */ 188 /* ARGSUSED */ 189 int 190 zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr) 191 { 192 int error = 0; 193 boolean_t frsync = B_FALSE; 194 195 zfsvfs_t *zfsvfs = ZTOZSB(zp); 196 ZFS_ENTER(zfsvfs); 197 ZFS_VERIFY_ZP(zp); 198 199 if (zp->z_pflags & ZFS_AV_QUARANTINED) { 200 ZFS_EXIT(zfsvfs); 201 return (SET_ERROR(EACCES)); 202 } 203 204 /* We don't copy out anything useful for directories. */ 205 if (Z_ISDIR(ZTOTYPE(zp))) { 206 ZFS_EXIT(zfsvfs); 207 return (SET_ERROR(EISDIR)); 208 } 209 210 /* 211 * Validate file offset 212 */ 213 if (uio->uio_loffset < (offset_t)0) { 214 ZFS_EXIT(zfsvfs); 215 return (SET_ERROR(EINVAL)); 216 } 217 218 /* 219 * Fasttrack empty reads 220 */ 221 if (uio->uio_resid == 0) { 222 ZFS_EXIT(zfsvfs); 223 return (0); 224 } 225 226 #ifdef FRSYNC 227 /* 228 * If we're in FRSYNC mode, sync out this znode before reading it. 229 * Only do this for non-snapshots. 230 * 231 * Some platforms do not support FRSYNC and instead map it 232 * to O_SYNC, which results in unnecessary calls to zil_commit. We 233 * only honor FRSYNC requests on platforms which support it. 234 */ 235 frsync = !!(ioflag & FRSYNC); 236 #endif 237 if (zfsvfs->z_log && 238 (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)) 239 zil_commit(zfsvfs->z_log, zp->z_id); 240 241 /* 242 * Lock the range against changes. 243 */ 244 zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock, 245 uio->uio_loffset, uio->uio_resid, RL_READER); 246 247 /* 248 * If we are reading past end-of-file we can skip 249 * to the end; but we might still need to set atime. 250 */ 251 if (uio->uio_loffset >= zp->z_size) { 252 error = 0; 253 goto out; 254 } 255 256 ASSERT(uio->uio_loffset < zp->z_size); 257 ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset); 258 ssize_t start_resid = n; 259 260 while (n > 0) { 261 ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size - 262 P2PHASE(uio->uio_loffset, zfs_vnops_read_chunk_size)); 263 #ifdef UIO_NOCOPY 264 if (uio->uio_segflg == UIO_NOCOPY) 265 error = mappedread_sf(zp, nbytes, uio); 266 else 267 #endif 268 if (zn_has_cached_data(zp) && !(ioflag & O_DIRECT)) { 269 error = mappedread(zp, nbytes, uio); 270 } else { 271 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), 272 uio, nbytes); 273 } 274 275 if (error) { 276 /* convert checksum errors into IO errors */ 277 if (error == ECKSUM) 278 error = SET_ERROR(EIO); 279 break; 280 } 281 282 n -= nbytes; 283 } 284 285 int64_t nread = start_resid - n; 286 dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread); 287 task_io_account_read(nread); 288 out: 289 zfs_rangelock_exit(lr); 290 291 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 292 ZFS_EXIT(zfsvfs); 293 return (error); 294 } 295 296 /* 297 * Write the bytes to a file. 298 * 299 * IN: zp - znode of file to be written to. 300 * uio - structure supplying write location, range info, 301 * and data buffer. 302 * ioflag - O_APPEND flag set if in append mode. 303 * O_DIRECT flag; used to bypass page cache. 304 * cr - credentials of caller. 305 * 306 * OUT: uio - updated offset and range. 307 * 308 * RETURN: 0 if success 309 * error code if failure 310 * 311 * Timestamps: 312 * ip - ctime|mtime updated if byte count > 0 313 */ 314 315 /* ARGSUSED */ 316 int 317 zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) 318 { 319 int error = 0; 320 ssize_t start_resid = uio->uio_resid; 321 322 /* 323 * Fasttrack empty write 324 */ 325 ssize_t n = start_resid; 326 if (n == 0) 327 return (0); 328 329 zfsvfs_t *zfsvfs = ZTOZSB(zp); 330 ZFS_ENTER(zfsvfs); 331 ZFS_VERIFY_ZP(zp); 332 333 sa_bulk_attr_t bulk[4]; 334 int count = 0; 335 uint64_t mtime[2], ctime[2]; 336 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); 337 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); 338 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, 339 &zp->z_size, 8); 340 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 341 &zp->z_pflags, 8); 342 343 /* 344 * Callers might not be able to detect properly that we are read-only, 345 * so check it explicitly here. 346 */ 347 if (zfs_is_readonly(zfsvfs)) { 348 ZFS_EXIT(zfsvfs); 349 return (SET_ERROR(EROFS)); 350 } 351 352 /* 353 * If immutable or not appending then return EPERM 354 */ 355 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || 356 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) && 357 (uio->uio_loffset < zp->z_size))) { 358 ZFS_EXIT(zfsvfs); 359 return (SET_ERROR(EPERM)); 360 } 361 362 /* 363 * Validate file offset 364 */ 365 offset_t woff = ioflag & O_APPEND ? zp->z_size : uio->uio_loffset; 366 if (woff < 0) { 367 ZFS_EXIT(zfsvfs); 368 return (SET_ERROR(EINVAL)); 369 } 370 371 const uint64_t max_blksz = zfsvfs->z_max_blksz; 372 373 /* 374 * Pre-fault the pages to ensure slow (eg NFS) pages 375 * don't hold up txg. 376 * Skip this if uio contains loaned arc_buf. 377 */ 378 if (uio_prefaultpages(MIN(n, max_blksz), uio)) { 379 ZFS_EXIT(zfsvfs); 380 return (SET_ERROR(EFAULT)); 381 } 382 383 /* 384 * If in append mode, set the io offset pointer to eof. 385 */ 386 zfs_locked_range_t *lr; 387 if (ioflag & O_APPEND) { 388 /* 389 * Obtain an appending range lock to guarantee file append 390 * semantics. We reset the write offset once we have the lock. 391 */ 392 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND); 393 woff = lr->lr_offset; 394 if (lr->lr_length == UINT64_MAX) { 395 /* 396 * We overlocked the file because this write will cause 397 * the file block size to increase. 398 * Note that zp_size cannot change with this lock held. 399 */ 400 woff = zp->z_size; 401 } 402 uio->uio_loffset = woff; 403 } else { 404 /* 405 * Note that if the file block size will change as a result of 406 * this write, then this range lock will lock the entire file 407 * so that we can re-write the block safely. 408 */ 409 lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER); 410 } 411 412 if (zn_rlimit_fsize(zp, uio, uio->uio_td)) { 413 zfs_rangelock_exit(lr); 414 ZFS_EXIT(zfsvfs); 415 return (SET_ERROR(EFBIG)); 416 } 417 418 const rlim64_t limit = MAXOFFSET_T; 419 420 if (woff >= limit) { 421 zfs_rangelock_exit(lr); 422 ZFS_EXIT(zfsvfs); 423 return (SET_ERROR(EFBIG)); 424 } 425 426 if (n > limit - woff) 427 n = limit - woff; 428 429 uint64_t end_size = MAX(zp->z_size, woff + n); 430 zilog_t *zilog = zfsvfs->z_log; 431 432 const uint64_t uid = KUID_TO_SUID(ZTOUID(zp)); 433 const uint64_t gid = KGID_TO_SGID(ZTOGID(zp)); 434 const uint64_t projid = zp->z_projid; 435 436 /* 437 * Write the file in reasonable size chunks. Each chunk is written 438 * in a separate transaction; this keeps the intent log records small 439 * and allows us to do more fine-grained space accounting. 440 */ 441 while (n > 0) { 442 woff = uio->uio_loffset; 443 444 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) || 445 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) || 446 (projid != ZFS_DEFAULT_PROJID && 447 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT, 448 projid))) { 449 error = SET_ERROR(EDQUOT); 450 break; 451 } 452 453 arc_buf_t *abuf = NULL; 454 if (n >= max_blksz && woff >= zp->z_size && 455 P2PHASE(woff, max_blksz) == 0 && 456 zp->z_blksz == max_blksz) { 457 /* 458 * This write covers a full block. "Borrow" a buffer 459 * from the dmu so that we can fill it before we enter 460 * a transaction. This avoids the possibility of 461 * holding up the transaction if the data copy hangs 462 * up on a pagefault (e.g., from an NFS server mapping). 463 */ 464 size_t cbytes; 465 466 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 467 max_blksz); 468 ASSERT(abuf != NULL); 469 ASSERT(arc_buf_size(abuf) == max_blksz); 470 if ((error = uiocopy(abuf->b_data, max_blksz, 471 UIO_WRITE, uio, &cbytes))) { 472 dmu_return_arcbuf(abuf); 473 break; 474 } 475 ASSERT3S(cbytes, ==, max_blksz); 476 } 477 478 /* 479 * Start a transaction. 480 */ 481 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); 482 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 483 dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl); 484 DB_DNODE_ENTER(db); 485 dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff, 486 MIN(n, max_blksz)); 487 DB_DNODE_EXIT(db); 488 zfs_sa_upgrade_txholds(tx, zp); 489 error = dmu_tx_assign(tx, TXG_WAIT); 490 if (error) { 491 dmu_tx_abort(tx); 492 if (abuf != NULL) 493 dmu_return_arcbuf(abuf); 494 break; 495 } 496 497 /* 498 * If rangelock_enter() over-locked we grow the blocksize 499 * and then reduce the lock range. This will only happen 500 * on the first iteration since rangelock_reduce() will 501 * shrink down lr_length to the appropriate size. 502 */ 503 if (lr->lr_length == UINT64_MAX) { 504 uint64_t new_blksz; 505 506 if (zp->z_blksz > max_blksz) { 507 /* 508 * File's blocksize is already larger than the 509 * "recordsize" property. Only let it grow to 510 * the next power of 2. 511 */ 512 ASSERT(!ISP2(zp->z_blksz)); 513 new_blksz = MIN(end_size, 514 1 << highbit64(zp->z_blksz)); 515 } else { 516 new_blksz = MIN(end_size, max_blksz); 517 } 518 zfs_grow_blocksize(zp, new_blksz, tx); 519 zfs_rangelock_reduce(lr, woff, n); 520 } 521 522 /* 523 * XXX - should we really limit each write to z_max_blksz? 524 * Perhaps we should use SPA_MAXBLOCKSIZE chunks? 525 */ 526 const ssize_t nbytes = 527 MIN(n, max_blksz - P2PHASE(woff, max_blksz)); 528 529 ssize_t tx_bytes; 530 if (abuf == NULL) { 531 tx_bytes = uio->uio_resid; 532 uio_fault_disable(uio, B_TRUE); 533 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), 534 uio, nbytes, tx); 535 uio_fault_disable(uio, B_FALSE); 536 #ifdef __linux__ 537 if (error == EFAULT) { 538 dmu_tx_commit(tx); 539 /* 540 * Account for partial writes before 541 * continuing the loop. 542 * Update needs to occur before the next 543 * uio_prefaultpages, or prefaultpages may 544 * error, and we may break the loop early. 545 */ 546 if (tx_bytes != uio->uio_resid) 547 n -= tx_bytes - uio->uio_resid; 548 if (uio_prefaultpages(MIN(n, max_blksz), uio)) { 549 break; 550 } 551 continue; 552 } 553 #endif 554 if (error != 0) { 555 dmu_tx_commit(tx); 556 break; 557 } 558 tx_bytes -= uio->uio_resid; 559 } else { 560 /* Implied by abuf != NULL: */ 561 ASSERT3S(n, >=, max_blksz); 562 ASSERT0(P2PHASE(woff, max_blksz)); 563 /* 564 * We can simplify nbytes to MIN(n, max_blksz) since 565 * P2PHASE(woff, max_blksz) is 0, and knowing 566 * n >= max_blksz lets us simplify further: 567 */ 568 ASSERT3S(nbytes, ==, max_blksz); 569 /* 570 * Thus, we're writing a full block at a block-aligned 571 * offset and extending the file past EOF. 572 * 573 * dmu_assign_arcbuf_by_dbuf() will directly assign the 574 * arc buffer to a dbuf. 575 */ 576 error = dmu_assign_arcbuf_by_dbuf( 577 sa_get_db(zp->z_sa_hdl), woff, abuf, tx); 578 if (error != 0) { 579 dmu_return_arcbuf(abuf); 580 dmu_tx_commit(tx); 581 break; 582 } 583 ASSERT3S(nbytes, <=, uio->uio_resid); 584 uioskip(uio, nbytes); 585 tx_bytes = nbytes; 586 } 587 if (tx_bytes && zn_has_cached_data(zp) && 588 !(ioflag & O_DIRECT)) { 589 update_pages(zp, woff, tx_bytes, zfsvfs->z_os); 590 } 591 592 /* 593 * If we made no progress, we're done. If we made even 594 * partial progress, update the znode and ZIL accordingly. 595 */ 596 if (tx_bytes == 0) { 597 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), 598 (void *)&zp->z_size, sizeof (uint64_t), tx); 599 dmu_tx_commit(tx); 600 ASSERT(error != 0); 601 break; 602 } 603 604 /* 605 * Clear Set-UID/Set-GID bits on successful write if not 606 * privileged and at least one of the execute bits is set. 607 * 608 * It would be nice to do this after all writes have 609 * been done, but that would still expose the ISUID/ISGID 610 * to another app after the partial write is committed. 611 * 612 * Note: we don't call zfs_fuid_map_id() here because 613 * user 0 is not an ephemeral uid. 614 */ 615 mutex_enter(&zp->z_acl_lock); 616 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | 617 (S_IXUSR >> 6))) != 0 && 618 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 && 619 secpolicy_vnode_setid_retain(zp, cr, 620 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) { 621 uint64_t newmode; 622 zp->z_mode &= ~(S_ISUID | S_ISGID); 623 newmode = zp->z_mode; 624 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), 625 (void *)&newmode, sizeof (uint64_t), tx); 626 } 627 mutex_exit(&zp->z_acl_lock); 628 629 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime); 630 631 /* 632 * Update the file size (zp_size) if it has changed; 633 * account for possible concurrent updates. 634 */ 635 while ((end_size = zp->z_size) < uio->uio_loffset) { 636 (void) atomic_cas_64(&zp->z_size, end_size, 637 uio->uio_loffset); 638 ASSERT(error == 0); 639 } 640 /* 641 * If we are replaying and eof is non zero then force 642 * the file size to the specified eof. Note, there's no 643 * concurrency during replay. 644 */ 645 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0) 646 zp->z_size = zfsvfs->z_replay_eof; 647 648 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); 649 650 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag, 651 NULL, NULL); 652 dmu_tx_commit(tx); 653 654 if (error != 0) 655 break; 656 ASSERT3S(tx_bytes, ==, nbytes); 657 n -= nbytes; 658 659 if (n > 0) { 660 if (uio_prefaultpages(MIN(n, max_blksz), uio)) { 661 error = SET_ERROR(EFAULT); 662 break; 663 } 664 } 665 } 666 667 zfs_inode_update(zp); 668 zfs_rangelock_exit(lr); 669 670 /* 671 * If we're in replay mode, or we made no progress, or the 672 * uio data is inaccessible return an error. Otherwise, it's 673 * at least a partial write, so it's successful. 674 */ 675 if (zfsvfs->z_replay || uio->uio_resid == start_resid || 676 error == EFAULT) { 677 ZFS_EXIT(zfsvfs); 678 return (error); 679 } 680 681 if (ioflag & (O_SYNC | O_DSYNC) || 682 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 683 zil_commit(zilog, zp->z_id); 684 685 const int64_t nwritten = start_resid - uio->uio_resid; 686 dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten); 687 task_io_account_write(nwritten); 688 689 ZFS_EXIT(zfsvfs); 690 return (0); 691 } 692 693 /*ARGSUSED*/ 694 int 695 zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr) 696 { 697 zfsvfs_t *zfsvfs = ZTOZSB(zp); 698 int error; 699 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 700 701 ZFS_ENTER(zfsvfs); 702 ZFS_VERIFY_ZP(zp); 703 error = zfs_getacl(zp, vsecp, skipaclchk, cr); 704 ZFS_EXIT(zfsvfs); 705 706 return (error); 707 } 708 709 /*ARGSUSED*/ 710 int 711 zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr) 712 { 713 zfsvfs_t *zfsvfs = ZTOZSB(zp); 714 int error; 715 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 716 zilog_t *zilog = zfsvfs->z_log; 717 718 ZFS_ENTER(zfsvfs); 719 ZFS_VERIFY_ZP(zp); 720 721 error = zfs_setacl(zp, vsecp, skipaclchk, cr); 722 723 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 724 zil_commit(zilog, 0); 725 726 ZFS_EXIT(zfsvfs); 727 return (error); 728 } 729 730 #ifdef ZFS_DEBUG 731 static int zil_fault_io = 0; 732 #endif 733 734 static void zfs_get_done(zgd_t *zgd, int error); 735 736 /* 737 * Get data to generate a TX_WRITE intent log record. 738 */ 739 int 740 zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio) 741 { 742 zfsvfs_t *zfsvfs = arg; 743 objset_t *os = zfsvfs->z_os; 744 znode_t *zp; 745 uint64_t object = lr->lr_foid; 746 uint64_t offset = lr->lr_offset; 747 uint64_t size = lr->lr_length; 748 dmu_buf_t *db; 749 zgd_t *zgd; 750 int error = 0; 751 752 ASSERT3P(lwb, !=, NULL); 753 ASSERT3P(zio, !=, NULL); 754 ASSERT3U(size, !=, 0); 755 756 /* 757 * Nothing to do if the file has been removed 758 */ 759 if (zfs_zget(zfsvfs, object, &zp) != 0) 760 return (SET_ERROR(ENOENT)); 761 if (zp->z_unlinked) { 762 /* 763 * Release the vnode asynchronously as we currently have the 764 * txg stopped from syncing. 765 */ 766 zfs_zrele_async(zp); 767 return (SET_ERROR(ENOENT)); 768 } 769 770 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 771 zgd->zgd_lwb = lwb; 772 zgd->zgd_private = zp; 773 774 /* 775 * Write records come in two flavors: immediate and indirect. 776 * For small writes it's cheaper to store the data with the 777 * log record (immediate); for large writes it's cheaper to 778 * sync the data and get a pointer to it (indirect) so that 779 * we don't have to write the data twice. 780 */ 781 if (buf != NULL) { /* immediate write */ 782 zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock, 783 offset, size, RL_READER); 784 /* test for truncation needs to be done while range locked */ 785 if (offset >= zp->z_size) { 786 error = SET_ERROR(ENOENT); 787 } else { 788 error = dmu_read(os, object, offset, size, buf, 789 DMU_READ_NO_PREFETCH); 790 } 791 ASSERT(error == 0 || error == ENOENT); 792 } else { /* indirect write */ 793 /* 794 * Have to lock the whole block to ensure when it's 795 * written out and its checksum is being calculated 796 * that no one can change the data. We need to re-check 797 * blocksize after we get the lock in case it's changed! 798 */ 799 for (;;) { 800 uint64_t blkoff; 801 size = zp->z_blksz; 802 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset; 803 offset -= blkoff; 804 zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock, 805 offset, size, RL_READER); 806 if (zp->z_blksz == size) 807 break; 808 offset += blkoff; 809 zfs_rangelock_exit(zgd->zgd_lr); 810 } 811 /* test for truncation needs to be done while range locked */ 812 if (lr->lr_offset >= zp->z_size) 813 error = SET_ERROR(ENOENT); 814 #ifdef ZFS_DEBUG 815 if (zil_fault_io) { 816 error = SET_ERROR(EIO); 817 zil_fault_io = 0; 818 } 819 #endif 820 if (error == 0) 821 error = dmu_buf_hold(os, object, offset, zgd, &db, 822 DMU_READ_NO_PREFETCH); 823 824 if (error == 0) { 825 blkptr_t *bp = &lr->lr_blkptr; 826 827 zgd->zgd_db = db; 828 zgd->zgd_bp = bp; 829 830 ASSERT(db->db_offset == offset); 831 ASSERT(db->db_size == size); 832 833 error = dmu_sync(zio, lr->lr_common.lrc_txg, 834 zfs_get_done, zgd); 835 ASSERT(error || lr->lr_length <= size); 836 837 /* 838 * On success, we need to wait for the write I/O 839 * initiated by dmu_sync() to complete before we can 840 * release this dbuf. We will finish everything up 841 * in the zfs_get_done() callback. 842 */ 843 if (error == 0) 844 return (0); 845 846 if (error == EALREADY) { 847 lr->lr_common.lrc_txtype = TX_WRITE2; 848 /* 849 * TX_WRITE2 relies on the data previously 850 * written by the TX_WRITE that caused 851 * EALREADY. We zero out the BP because 852 * it is the old, currently-on-disk BP. 853 */ 854 zgd->zgd_bp = NULL; 855 BP_ZERO(bp); 856 error = 0; 857 } 858 } 859 } 860 861 zfs_get_done(zgd, error); 862 863 return (error); 864 } 865 866 867 /* ARGSUSED */ 868 static void 869 zfs_get_done(zgd_t *zgd, int error) 870 { 871 znode_t *zp = zgd->zgd_private; 872 873 if (zgd->zgd_db) 874 dmu_buf_rele(zgd->zgd_db, zgd); 875 876 zfs_rangelock_exit(zgd->zgd_lr); 877 878 /* 879 * Release the vnode asynchronously as we currently have the 880 * txg stopped from syncing. 881 */ 882 zfs_zrele_async(zp); 883 884 kmem_free(zgd, sizeof (zgd_t)); 885 } 886 887 EXPORT_SYMBOL(zfs_access); 888 EXPORT_SYMBOL(zfs_fsync); 889 EXPORT_SYMBOL(zfs_holey); 890 EXPORT_SYMBOL(zfs_read); 891 EXPORT_SYMBOL(zfs_write); 892 EXPORT_SYMBOL(zfs_getsecattr); 893 EXPORT_SYMBOL(zfs_setsecattr); 894 895 ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, ULONG, ZMOD_RW, 896 "Bytes to read per chunk"); 897