1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2015 by Chunwei Chen. All rights reserved. 28 * Copyright (c) 2019 Datto Inc. 29 * Copyright (c) 2019, 2023, Klara Inc. 30 * Copyright (c) 2019, Allan Jude 31 * Copyright (c) 2022 Hewlett Packard Enterprise Development LP. 32 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek 33 */ 34 35 #include <sys/dmu.h> 36 #include <sys/dmu_impl.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/dbuf.h> 39 #include <sys/dnode.h> 40 #include <sys/zfs_context.h> 41 #include <sys/dmu_objset.h> 42 #include <sys/dmu_traverse.h> 43 #include <sys/dsl_dataset.h> 44 #include <sys/dsl_dir.h> 45 #include <sys/dsl_pool.h> 46 #include <sys/dsl_synctask.h> 47 #include <sys/dsl_prop.h> 48 #include <sys/dmu_zfetch.h> 49 #include <sys/zfs_ioctl.h> 50 #include <sys/zap.h> 51 #include <sys/zio_checksum.h> 52 #include <sys/zio_compress.h> 53 #include <sys/sa.h> 54 #include <sys/zfeature.h> 55 #include <sys/abd.h> 56 #include <sys/brt.h> 57 #include <sys/trace_zfs.h> 58 #include <sys/zfs_racct.h> 59 #include <sys/zfs_rlock.h> 60 #ifdef _KERNEL 61 #include <sys/vmsystm.h> 62 #include <sys/zfs_znode.h> 63 #endif 64 65 /* 66 * Enable/disable nopwrite feature. 67 */ 68 static int zfs_nopwrite_enabled = 1; 69 70 /* 71 * Tunable to control percentage of dirtied L1 blocks from frees allowed into 72 * one TXG. After this threshold is crossed, additional dirty blocks from frees 73 * will wait until the next TXG. 74 * A value of zero will disable this throttle. 75 */ 76 static uint_t zfs_per_txg_dirty_frees_percent = 30; 77 78 /* 79 * Enable/disable forcing txg sync when dirty checking for holes with lseek(). 80 * By default this is enabled to ensure accurate hole reporting, it can result 81 * in a significant performance penalty for lseek(SEEK_HOLE) heavy workloads. 82 * Disabling this option will result in holes never being reported in dirty 83 * files which is always safe. 84 */ 85 static int zfs_dmu_offset_next_sync = 1; 86 87 /* 88 * Limit the amount we can prefetch with one call to this amount. This 89 * helps to limit the amount of memory that can be used by prefetching. 90 * Larger objects should be prefetched a bit at a time. 91 */ 92 #ifdef _ILP32 93 uint_t dmu_prefetch_max = 8 * 1024 * 1024; 94 #else 95 uint_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE; 96 #endif 97 98 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 99 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" }, 100 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" }, 101 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" }, 102 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" }, 103 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" }, 104 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" }, 105 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" }, 106 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" }, 107 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" }, 108 {DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" }, 109 {DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" }, 110 {DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" }, 111 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" }, 112 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"}, 113 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" }, 114 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" }, 115 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" }, 116 {DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" }, 117 {DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" }, 118 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" }, 119 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" }, 120 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" }, 121 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" }, 122 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" }, 123 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" }, 124 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" }, 125 {DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" }, 126 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" }, 127 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" }, 128 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" }, 129 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" }, 130 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" }, 131 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" }, 132 {DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" }, 133 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" }, 134 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" }, 135 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" }, 136 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"}, 137 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" }, 138 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" }, 139 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"}, 140 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"}, 141 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" }, 142 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" }, 143 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" }, 144 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" }, 145 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" }, 146 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" }, 147 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" }, 148 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" }, 149 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" }, 150 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" }, 151 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" }, 152 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" } 153 }; 154 155 dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = { 156 { byteswap_uint8_array, "uint8" }, 157 { byteswap_uint16_array, "uint16" }, 158 { byteswap_uint32_array, "uint32" }, 159 { byteswap_uint64_array, "uint64" }, 160 { zap_byteswap, "zap" }, 161 { dnode_buf_byteswap, "dnode" }, 162 { dmu_objset_byteswap, "objset" }, 163 { zfs_znode_byteswap, "znode" }, 164 { zfs_oldacl_byteswap, "oldacl" }, 165 { zfs_acl_byteswap, "acl" } 166 }; 167 168 int 169 dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset, 170 const void *tag, dmu_buf_t **dbp) 171 { 172 uint64_t blkid; 173 dmu_buf_impl_t *db; 174 175 rw_enter(&dn->dn_struct_rwlock, RW_READER); 176 blkid = dbuf_whichblock(dn, 0, offset); 177 db = dbuf_hold(dn, blkid, tag); 178 rw_exit(&dn->dn_struct_rwlock); 179 180 if (db == NULL) { 181 *dbp = NULL; 182 return (SET_ERROR(EIO)); 183 } 184 185 *dbp = &db->db; 186 return (0); 187 } 188 189 int 190 dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset, 191 const void *tag, dmu_buf_t **dbp) 192 { 193 dnode_t *dn; 194 uint64_t blkid; 195 dmu_buf_impl_t *db; 196 int err; 197 198 err = dnode_hold(os, object, FTAG, &dn); 199 if (err) 200 return (err); 201 rw_enter(&dn->dn_struct_rwlock, RW_READER); 202 blkid = dbuf_whichblock(dn, 0, offset); 203 db = dbuf_hold(dn, blkid, tag); 204 rw_exit(&dn->dn_struct_rwlock); 205 dnode_rele(dn, FTAG); 206 207 if (db == NULL) { 208 *dbp = NULL; 209 return (SET_ERROR(EIO)); 210 } 211 212 *dbp = &db->db; 213 return (err); 214 } 215 216 int 217 dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, 218 const void *tag, dmu_buf_t **dbp, int flags) 219 { 220 int err; 221 int db_flags = DB_RF_CANFAIL; 222 223 if (flags & DMU_READ_NO_PREFETCH) 224 db_flags |= DB_RF_NOPREFETCH; 225 if (flags & DMU_READ_NO_DECRYPT) 226 db_flags |= DB_RF_NO_DECRYPT; 227 228 err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp); 229 if (err == 0) { 230 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 231 err = dbuf_read(db, NULL, db_flags); 232 if (err != 0) { 233 dbuf_rele(db, tag); 234 *dbp = NULL; 235 } 236 } 237 238 return (err); 239 } 240 241 int 242 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 243 const void *tag, dmu_buf_t **dbp, int flags) 244 { 245 int err; 246 int db_flags = DB_RF_CANFAIL; 247 248 if (flags & DMU_READ_NO_PREFETCH) 249 db_flags |= DB_RF_NOPREFETCH; 250 if (flags & DMU_READ_NO_DECRYPT) 251 db_flags |= DB_RF_NO_DECRYPT; 252 253 err = dmu_buf_hold_noread(os, object, offset, tag, dbp); 254 if (err == 0) { 255 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 256 err = dbuf_read(db, NULL, db_flags); 257 if (err != 0) { 258 dbuf_rele(db, tag); 259 *dbp = NULL; 260 } 261 } 262 263 return (err); 264 } 265 266 int 267 dmu_bonus_max(void) 268 { 269 return (DN_OLD_MAX_BONUSLEN); 270 } 271 272 int 273 dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx) 274 { 275 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 276 dnode_t *dn; 277 int error; 278 279 if (newsize < 0 || newsize > db_fake->db_size) 280 return (SET_ERROR(EINVAL)); 281 282 DB_DNODE_ENTER(db); 283 dn = DB_DNODE(db); 284 285 if (dn->dn_bonus != db) { 286 error = SET_ERROR(EINVAL); 287 } else { 288 dnode_setbonuslen(dn, newsize, tx); 289 error = 0; 290 } 291 292 DB_DNODE_EXIT(db); 293 return (error); 294 } 295 296 int 297 dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx) 298 { 299 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 300 dnode_t *dn; 301 int error; 302 303 if (!DMU_OT_IS_VALID(type)) 304 return (SET_ERROR(EINVAL)); 305 306 DB_DNODE_ENTER(db); 307 dn = DB_DNODE(db); 308 309 if (dn->dn_bonus != db) { 310 error = SET_ERROR(EINVAL); 311 } else { 312 dnode_setbonus_type(dn, type, tx); 313 error = 0; 314 } 315 316 DB_DNODE_EXIT(db); 317 return (error); 318 } 319 320 dmu_object_type_t 321 dmu_get_bonustype(dmu_buf_t *db_fake) 322 { 323 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 324 dmu_object_type_t type; 325 326 DB_DNODE_ENTER(db); 327 type = DB_DNODE(db)->dn_bonustype; 328 DB_DNODE_EXIT(db); 329 330 return (type); 331 } 332 333 int 334 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx) 335 { 336 dnode_t *dn; 337 int error; 338 339 error = dnode_hold(os, object, FTAG, &dn); 340 dbuf_rm_spill(dn, tx); 341 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 342 dnode_rm_spill(dn, tx); 343 rw_exit(&dn->dn_struct_rwlock); 344 dnode_rele(dn, FTAG); 345 return (error); 346 } 347 348 /* 349 * Lookup and hold the bonus buffer for the provided dnode. If the dnode 350 * has not yet been allocated a new bonus dbuf a will be allocated. 351 * Returns ENOENT, EIO, or 0. 352 */ 353 int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp, 354 uint32_t flags) 355 { 356 dmu_buf_impl_t *db; 357 int error; 358 uint32_t db_flags = DB_RF_MUST_SUCCEED; 359 360 if (flags & DMU_READ_NO_PREFETCH) 361 db_flags |= DB_RF_NOPREFETCH; 362 if (flags & DMU_READ_NO_DECRYPT) 363 db_flags |= DB_RF_NO_DECRYPT; 364 365 rw_enter(&dn->dn_struct_rwlock, RW_READER); 366 if (dn->dn_bonus == NULL) { 367 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) { 368 rw_exit(&dn->dn_struct_rwlock); 369 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 370 } 371 if (dn->dn_bonus == NULL) 372 dbuf_create_bonus(dn); 373 } 374 db = dn->dn_bonus; 375 376 /* as long as the bonus buf is held, the dnode will be held */ 377 if (zfs_refcount_add(&db->db_holds, tag) == 1) { 378 VERIFY(dnode_add_ref(dn, db)); 379 atomic_inc_32(&dn->dn_dbufs_count); 380 } 381 382 /* 383 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 384 * hold and incrementing the dbuf count to ensure that dnode_move() sees 385 * a dnode hold for every dbuf. 386 */ 387 rw_exit(&dn->dn_struct_rwlock); 388 389 error = dbuf_read(db, NULL, db_flags); 390 if (error) { 391 dnode_evict_bonus(dn); 392 dbuf_rele(db, tag); 393 *dbp = NULL; 394 return (error); 395 } 396 397 *dbp = &db->db; 398 return (0); 399 } 400 401 int 402 dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag, dmu_buf_t **dbp) 403 { 404 dnode_t *dn; 405 int error; 406 407 error = dnode_hold(os, object, FTAG, &dn); 408 if (error) 409 return (error); 410 411 error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH); 412 dnode_rele(dn, FTAG); 413 414 return (error); 415 } 416 417 /* 418 * returns ENOENT, EIO, or 0. 419 * 420 * This interface will allocate a blank spill dbuf when a spill blk 421 * doesn't already exist on the dnode. 422 * 423 * if you only want to find an already existing spill db, then 424 * dmu_spill_hold_existing() should be used. 425 */ 426 int 427 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, const void *tag, 428 dmu_buf_t **dbp) 429 { 430 dmu_buf_impl_t *db = NULL; 431 int err; 432 433 if ((flags & DB_RF_HAVESTRUCT) == 0) 434 rw_enter(&dn->dn_struct_rwlock, RW_READER); 435 436 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag); 437 438 if ((flags & DB_RF_HAVESTRUCT) == 0) 439 rw_exit(&dn->dn_struct_rwlock); 440 441 if (db == NULL) { 442 *dbp = NULL; 443 return (SET_ERROR(EIO)); 444 } 445 err = dbuf_read(db, NULL, flags); 446 if (err == 0) 447 *dbp = &db->db; 448 else { 449 dbuf_rele(db, tag); 450 *dbp = NULL; 451 } 452 return (err); 453 } 454 455 int 456 dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp) 457 { 458 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 459 dnode_t *dn; 460 int err; 461 462 DB_DNODE_ENTER(db); 463 dn = DB_DNODE(db); 464 465 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) { 466 err = SET_ERROR(EINVAL); 467 } else { 468 rw_enter(&dn->dn_struct_rwlock, RW_READER); 469 470 if (!dn->dn_have_spill) { 471 err = SET_ERROR(ENOENT); 472 } else { 473 err = dmu_spill_hold_by_dnode(dn, 474 DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp); 475 } 476 477 rw_exit(&dn->dn_struct_rwlock); 478 } 479 480 DB_DNODE_EXIT(db); 481 return (err); 482 } 483 484 int 485 dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, const void *tag, 486 dmu_buf_t **dbp) 487 { 488 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 489 int err; 490 uint32_t db_flags = DB_RF_CANFAIL; 491 492 if (flags & DMU_READ_NO_DECRYPT) 493 db_flags |= DB_RF_NO_DECRYPT; 494 495 DB_DNODE_ENTER(db); 496 err = dmu_spill_hold_by_dnode(DB_DNODE(db), db_flags, tag, dbp); 497 DB_DNODE_EXIT(db); 498 499 return (err); 500 } 501 502 /* 503 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 504 * to take a held dnode rather than <os, object> -- the lookup is wasteful, 505 * and can induce severe lock contention when writing to several files 506 * whose dnodes are in the same block. 507 */ 508 int 509 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, 510 boolean_t read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp, 511 uint32_t flags) 512 { 513 dmu_buf_t **dbp; 514 zstream_t *zs = NULL; 515 uint64_t blkid, nblks, i; 516 uint32_t dbuf_flags; 517 int err; 518 zio_t *zio = NULL; 519 boolean_t missed = B_FALSE; 520 521 ASSERT(!read || length <= DMU_MAX_ACCESS); 522 523 /* 524 * Note: We directly notify the prefetch code of this read, so that 525 * we can tell it about the multi-block read. dbuf_read() only knows 526 * about the one block it is accessing. 527 */ 528 dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT | 529 DB_RF_NOPREFETCH; 530 531 if ((flags & DMU_READ_NO_DECRYPT) != 0) 532 dbuf_flags |= DB_RF_NO_DECRYPT; 533 534 rw_enter(&dn->dn_struct_rwlock, RW_READER); 535 if (dn->dn_datablkshift) { 536 int blkshift = dn->dn_datablkshift; 537 nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) - 538 P2ALIGN_TYPED(offset, 1ULL << blkshift, uint64_t)) 539 >> blkshift; 540 } else { 541 if (offset + length > dn->dn_datablksz) { 542 zfs_panic_recover("zfs: accessing past end of object " 543 "%llx/%llx (size=%u access=%llu+%llu)", 544 (longlong_t)dn->dn_objset-> 545 os_dsl_dataset->ds_object, 546 (longlong_t)dn->dn_object, dn->dn_datablksz, 547 (longlong_t)offset, (longlong_t)length); 548 rw_exit(&dn->dn_struct_rwlock); 549 return (SET_ERROR(EIO)); 550 } 551 nblks = 1; 552 } 553 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 554 555 if (read) 556 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, 557 ZIO_FLAG_CANFAIL); 558 blkid = dbuf_whichblock(dn, 0, offset); 559 if ((flags & DMU_READ_NO_PREFETCH) == 0) { 560 /* 561 * Prepare the zfetch before initiating the demand reads, so 562 * that if multiple threads block on same indirect block, we 563 * base predictions on the original less racy request order. 564 */ 565 zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks, read, 566 B_TRUE); 567 } 568 for (i = 0; i < nblks; i++) { 569 dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag); 570 if (db == NULL) { 571 if (zs) { 572 dmu_zfetch_run(&dn->dn_zfetch, zs, missed, 573 B_TRUE); 574 } 575 rw_exit(&dn->dn_struct_rwlock); 576 dmu_buf_rele_array(dbp, nblks, tag); 577 if (read) 578 zio_nowait(zio); 579 return (SET_ERROR(EIO)); 580 } 581 582 /* 583 * Initiate async demand data read. 584 * We check the db_state after calling dbuf_read() because 585 * (1) dbuf_read() may change the state to CACHED due to a 586 * hit in the ARC, and (2) on a cache miss, a child will 587 * have been added to "zio" but not yet completed, so the 588 * state will not yet be CACHED. 589 */ 590 if (read) { 591 if (i == nblks - 1 && blkid + i < dn->dn_maxblkid && 592 offset + length < db->db.db_offset + 593 db->db.db_size) { 594 if (offset <= db->db.db_offset) 595 dbuf_flags |= DB_RF_PARTIAL_FIRST; 596 else 597 dbuf_flags |= DB_RF_PARTIAL_MORE; 598 } 599 (void) dbuf_read(db, zio, dbuf_flags); 600 if (db->db_state != DB_CACHED) 601 missed = B_TRUE; 602 } 603 dbp[i] = &db->db; 604 } 605 606 if (!read) 607 zfs_racct_write(length, nblks); 608 609 if (zs) 610 dmu_zfetch_run(&dn->dn_zfetch, zs, missed, B_TRUE); 611 rw_exit(&dn->dn_struct_rwlock); 612 613 if (read) { 614 /* wait for async read i/o */ 615 err = zio_wait(zio); 616 if (err) { 617 dmu_buf_rele_array(dbp, nblks, tag); 618 return (err); 619 } 620 621 /* wait for other io to complete */ 622 for (i = 0; i < nblks; i++) { 623 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 624 mutex_enter(&db->db_mtx); 625 while (db->db_state == DB_READ || 626 db->db_state == DB_FILL) 627 cv_wait(&db->db_changed, &db->db_mtx); 628 if (db->db_state == DB_UNCACHED) 629 err = SET_ERROR(EIO); 630 mutex_exit(&db->db_mtx); 631 if (err) { 632 dmu_buf_rele_array(dbp, nblks, tag); 633 return (err); 634 } 635 } 636 } 637 638 *numbufsp = nblks; 639 *dbpp = dbp; 640 return (0); 641 } 642 643 int 644 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 645 uint64_t length, int read, const void *tag, int *numbufsp, 646 dmu_buf_t ***dbpp) 647 { 648 dnode_t *dn; 649 int err; 650 651 err = dnode_hold(os, object, FTAG, &dn); 652 if (err) 653 return (err); 654 655 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 656 numbufsp, dbpp, DMU_READ_PREFETCH); 657 658 dnode_rele(dn, FTAG); 659 660 return (err); 661 } 662 663 int 664 dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset, 665 uint64_t length, boolean_t read, const void *tag, int *numbufsp, 666 dmu_buf_t ***dbpp) 667 { 668 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 669 int err; 670 671 DB_DNODE_ENTER(db); 672 err = dmu_buf_hold_array_by_dnode(DB_DNODE(db), offset, length, read, 673 tag, numbufsp, dbpp, DMU_READ_PREFETCH); 674 DB_DNODE_EXIT(db); 675 676 return (err); 677 } 678 679 void 680 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, const void *tag) 681 { 682 int i; 683 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 684 685 if (numbufs == 0) 686 return; 687 688 for (i = 0; i < numbufs; i++) { 689 if (dbp[i]) 690 dbuf_rele(dbp[i], tag); 691 } 692 693 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 694 } 695 696 /* 697 * Issue prefetch I/Os for the given blocks. If level is greater than 0, the 698 * indirect blocks prefetched will be those that point to the blocks containing 699 * the data starting at offset, and continuing to offset + len. If the range 700 * is too long, prefetch the first dmu_prefetch_max bytes as requested, while 701 * for the rest only a higher level, also fitting within dmu_prefetch_max. It 702 * should primarily help random reads, since for long sequential reads there is 703 * a speculative prefetcher. 704 * 705 * Note that if the indirect blocks above the blocks being prefetched are not 706 * in cache, they will be asynchronously read in. Dnode read by dnode_hold() 707 * is currently synchronous. 708 */ 709 void 710 dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 711 uint64_t len, zio_priority_t pri) 712 { 713 dnode_t *dn; 714 715 if (dmu_prefetch_max == 0 || len == 0) { 716 dmu_prefetch_dnode(os, object, pri); 717 return; 718 } 719 720 if (dnode_hold(os, object, FTAG, &dn) != 0) 721 return; 722 723 dmu_prefetch_by_dnode(dn, level, offset, len, pri); 724 725 dnode_rele(dn, FTAG); 726 } 727 728 void 729 dmu_prefetch_by_dnode(dnode_t *dn, int64_t level, uint64_t offset, 730 uint64_t len, zio_priority_t pri) 731 { 732 int64_t level2 = level; 733 uint64_t start, end, start2, end2; 734 735 /* 736 * Depending on len we may do two prefetches: blocks [start, end) at 737 * level, and following blocks [start2, end2) at higher level2. 738 */ 739 rw_enter(&dn->dn_struct_rwlock, RW_READER); 740 if (dn->dn_datablkshift != 0) { 741 /* 742 * The object has multiple blocks. Calculate the full range 743 * of blocks [start, end2) and then split it into two parts, 744 * so that the first [start, end) fits into dmu_prefetch_max. 745 */ 746 start = dbuf_whichblock(dn, level, offset); 747 end2 = dbuf_whichblock(dn, level, offset + len - 1) + 1; 748 uint8_t ibs = dn->dn_indblkshift; 749 uint8_t bs = (level == 0) ? dn->dn_datablkshift : ibs; 750 uint_t limit = P2ROUNDUP(dmu_prefetch_max, 1 << bs) >> bs; 751 start2 = end = MIN(end2, start + limit); 752 753 /* 754 * Find level2 where [start2, end2) fits into dmu_prefetch_max. 755 */ 756 uint8_t ibps = ibs - SPA_BLKPTRSHIFT; 757 limit = P2ROUNDUP(dmu_prefetch_max, 1 << ibs) >> ibs; 758 do { 759 level2++; 760 start2 = P2ROUNDUP(start2, 1 << ibps) >> ibps; 761 end2 = P2ROUNDUP(end2, 1 << ibps) >> ibps; 762 } while (end2 - start2 > limit); 763 } else { 764 /* There is only one block. Prefetch it or nothing. */ 765 start = start2 = end2 = 0; 766 end = start + (level == 0 && offset < dn->dn_datablksz); 767 } 768 769 for (uint64_t i = start; i < end; i++) 770 dbuf_prefetch(dn, level, i, pri, 0); 771 for (uint64_t i = start2; i < end2; i++) 772 dbuf_prefetch(dn, level2, i, pri, 0); 773 rw_exit(&dn->dn_struct_rwlock); 774 } 775 776 typedef struct { 777 kmutex_t dpa_lock; 778 kcondvar_t dpa_cv; 779 uint64_t dpa_pending_io; 780 } dmu_prefetch_arg_t; 781 782 static void 783 dmu_prefetch_done(void *arg, uint64_t level, uint64_t blkid, boolean_t issued) 784 { 785 (void) level; (void) blkid; (void)issued; 786 dmu_prefetch_arg_t *dpa = arg; 787 788 ASSERT0(level); 789 790 mutex_enter(&dpa->dpa_lock); 791 ASSERT3U(dpa->dpa_pending_io, >, 0); 792 if (--dpa->dpa_pending_io == 0) 793 cv_broadcast(&dpa->dpa_cv); 794 mutex_exit(&dpa->dpa_lock); 795 } 796 797 static void 798 dmu_prefetch_wait_by_dnode(dnode_t *dn, uint64_t offset, uint64_t len) 799 { 800 dmu_prefetch_arg_t dpa; 801 802 mutex_init(&dpa.dpa_lock, NULL, MUTEX_DEFAULT, NULL); 803 cv_init(&dpa.dpa_cv, NULL, CV_DEFAULT, NULL); 804 805 rw_enter(&dn->dn_struct_rwlock, RW_READER); 806 807 uint64_t start = dbuf_whichblock(dn, 0, offset); 808 uint64_t end = dbuf_whichblock(dn, 0, offset + len - 1) + 1; 809 dpa.dpa_pending_io = end - start; 810 811 for (uint64_t blk = start; blk < end; blk++) { 812 (void) dbuf_prefetch_impl(dn, 0, blk, ZIO_PRIORITY_ASYNC_READ, 813 0, dmu_prefetch_done, &dpa); 814 } 815 816 rw_exit(&dn->dn_struct_rwlock); 817 818 /* wait for prefetch L0 reads to finish */ 819 mutex_enter(&dpa.dpa_lock); 820 while (dpa.dpa_pending_io > 0) { 821 cv_wait(&dpa.dpa_cv, &dpa.dpa_lock); 822 823 } 824 mutex_exit(&dpa.dpa_lock); 825 826 mutex_destroy(&dpa.dpa_lock); 827 cv_destroy(&dpa.dpa_cv); 828 } 829 830 /* 831 * Issue prefetch I/Os for the given L0 block range and wait for the I/O 832 * to complete. This does not enforce dmu_prefetch_max and will prefetch 833 * the entire range. The blocks are read from disk into the ARC but no 834 * decompression occurs (i.e., the dbuf cache is not required). 835 */ 836 int 837 dmu_prefetch_wait(objset_t *os, uint64_t object, uint64_t offset, uint64_t size) 838 { 839 dnode_t *dn; 840 int err = 0; 841 842 err = dnode_hold(os, object, FTAG, &dn); 843 if (err != 0) 844 return (err); 845 846 /* 847 * Chunk the requests (16 indirects worth) so that we can be interrupted 848 */ 849 uint64_t chunksize; 850 if (dn->dn_indblkshift) { 851 uint64_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1); 852 chunksize = (nbps * 16) << dn->dn_datablkshift; 853 } else { 854 chunksize = dn->dn_datablksz; 855 } 856 857 while (size > 0) { 858 uint64_t mylen = MIN(size, chunksize); 859 860 dmu_prefetch_wait_by_dnode(dn, offset, mylen); 861 862 offset += mylen; 863 size -= mylen; 864 865 if (issig()) { 866 err = SET_ERROR(EINTR); 867 break; 868 } 869 } 870 871 dnode_rele(dn, FTAG); 872 873 return (err); 874 } 875 876 /* 877 * Issue prefetch I/Os for the given object's dnode. 878 */ 879 void 880 dmu_prefetch_dnode(objset_t *os, uint64_t object, zio_priority_t pri) 881 { 882 if (object == 0 || object >= DN_MAX_OBJECT) 883 return; 884 885 dnode_t *dn = DMU_META_DNODE(os); 886 rw_enter(&dn->dn_struct_rwlock, RW_READER); 887 uint64_t blkid = dbuf_whichblock(dn, 0, object * sizeof (dnode_phys_t)); 888 dbuf_prefetch(dn, 0, blkid, pri, 0); 889 rw_exit(&dn->dn_struct_rwlock); 890 } 891 892 /* 893 * Get the next "chunk" of file data to free. We traverse the file from 894 * the end so that the file gets shorter over time (if we crashes in the 895 * middle, this will leave us in a better state). We find allocated file 896 * data by simply searching the allocated level 1 indirects. 897 * 898 * On input, *start should be the first offset that does not need to be 899 * freed (e.g. "offset + length"). On return, *start will be the first 900 * offset that should be freed and l1blks is set to the number of level 1 901 * indirect blocks found within the chunk. 902 */ 903 static int 904 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks) 905 { 906 uint64_t blks; 907 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1); 908 /* bytes of data covered by a level-1 indirect block */ 909 uint64_t iblkrange = (uint64_t)dn->dn_datablksz * 910 EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); 911 912 ASSERT3U(minimum, <=, *start); 913 914 /* dn_nlevels == 1 means we don't have any L1 blocks */ 915 if (dn->dn_nlevels <= 1) { 916 *l1blks = 0; 917 *start = minimum; 918 return (0); 919 } 920 921 /* 922 * Check if we can free the entire range assuming that all of the 923 * L1 blocks in this range have data. If we can, we use this 924 * worst case value as an estimate so we can avoid having to look 925 * at the object's actual data. 926 */ 927 uint64_t total_l1blks = 928 (roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) / 929 iblkrange; 930 if (total_l1blks <= maxblks) { 931 *l1blks = total_l1blks; 932 *start = minimum; 933 return (0); 934 } 935 ASSERT(ISP2(iblkrange)); 936 937 for (blks = 0; *start > minimum && blks < maxblks; blks++) { 938 int err; 939 940 /* 941 * dnode_next_offset(BACKWARDS) will find an allocated L1 942 * indirect block at or before the input offset. We must 943 * decrement *start so that it is at the end of the region 944 * to search. 945 */ 946 (*start)--; 947 948 err = dnode_next_offset(dn, 949 DNODE_FIND_BACKWARDS, start, 2, 1, 0); 950 951 /* if there are no indirect blocks before start, we are done */ 952 if (err == ESRCH) { 953 *start = minimum; 954 break; 955 } else if (err != 0) { 956 *l1blks = blks; 957 return (err); 958 } 959 960 /* set start to the beginning of this L1 indirect */ 961 *start = P2ALIGN_TYPED(*start, iblkrange, uint64_t); 962 } 963 if (*start < minimum) 964 *start = minimum; 965 *l1blks = blks; 966 967 return (0); 968 } 969 970 /* 971 * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set, 972 * otherwise return false. 973 * Used below in dmu_free_long_range_impl() to enable abort when unmounting 974 */ 975 static boolean_t 976 dmu_objset_zfs_unmounting(objset_t *os) 977 { 978 #ifdef _KERNEL 979 if (dmu_objset_type(os) == DMU_OST_ZFS) 980 return (zfs_get_vfs_flag_unmounted(os)); 981 #else 982 (void) os; 983 #endif 984 return (B_FALSE); 985 } 986 987 static int 988 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, 989 uint64_t length) 990 { 991 uint64_t object_size; 992 int err; 993 uint64_t dirty_frees_threshold; 994 dsl_pool_t *dp = dmu_objset_pool(os); 995 996 if (dn == NULL) 997 return (SET_ERROR(EINVAL)); 998 999 object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 1000 if (offset >= object_size) 1001 return (0); 1002 1003 if (zfs_per_txg_dirty_frees_percent <= 100) 1004 dirty_frees_threshold = 1005 zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100; 1006 else 1007 dirty_frees_threshold = zfs_dirty_data_max / 20; 1008 1009 if (length == DMU_OBJECT_END || offset + length > object_size) 1010 length = object_size - offset; 1011 1012 while (length != 0) { 1013 uint64_t chunk_end, chunk_begin, chunk_len; 1014 uint64_t l1blks; 1015 dmu_tx_t *tx; 1016 1017 if (dmu_objset_zfs_unmounting(dn->dn_objset)) 1018 return (SET_ERROR(EINTR)); 1019 1020 chunk_end = chunk_begin = offset + length; 1021 1022 /* move chunk_begin backwards to the beginning of this chunk */ 1023 err = get_next_chunk(dn, &chunk_begin, offset, &l1blks); 1024 if (err) 1025 return (err); 1026 ASSERT3U(chunk_begin, >=, offset); 1027 ASSERT3U(chunk_begin, <=, chunk_end); 1028 1029 chunk_len = chunk_end - chunk_begin; 1030 1031 tx = dmu_tx_create(os); 1032 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len); 1033 1034 /* 1035 * Mark this transaction as typically resulting in a net 1036 * reduction in space used. 1037 */ 1038 dmu_tx_mark_netfree(tx); 1039 err = dmu_tx_assign(tx, TXG_WAIT); 1040 if (err) { 1041 dmu_tx_abort(tx); 1042 return (err); 1043 } 1044 1045 uint64_t txg = dmu_tx_get_txg(tx); 1046 1047 mutex_enter(&dp->dp_lock); 1048 uint64_t long_free_dirty = 1049 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK]; 1050 mutex_exit(&dp->dp_lock); 1051 1052 /* 1053 * To avoid filling up a TXG with just frees, wait for 1054 * the next TXG to open before freeing more chunks if 1055 * we have reached the threshold of frees. 1056 */ 1057 if (dirty_frees_threshold != 0 && 1058 long_free_dirty >= dirty_frees_threshold) { 1059 DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay); 1060 dmu_tx_commit(tx); 1061 txg_wait_open(dp, 0, B_TRUE); 1062 continue; 1063 } 1064 1065 /* 1066 * In order to prevent unnecessary write throttling, for each 1067 * TXG, we track the cumulative size of L1 blocks being dirtied 1068 * in dnode_free_range() below. We compare this number to a 1069 * tunable threshold, past which we prevent new L1 dirty freeing 1070 * blocks from being added into the open TXG. See 1071 * dmu_free_long_range_impl() for details. The threshold 1072 * prevents write throttle activation due to dirty freeing L1 1073 * blocks taking up a large percentage of zfs_dirty_data_max. 1074 */ 1075 mutex_enter(&dp->dp_lock); 1076 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] += 1077 l1blks << dn->dn_indblkshift; 1078 mutex_exit(&dp->dp_lock); 1079 DTRACE_PROBE3(free__long__range, 1080 uint64_t, long_free_dirty, uint64_t, chunk_len, 1081 uint64_t, txg); 1082 dnode_free_range(dn, chunk_begin, chunk_len, tx); 1083 1084 dmu_tx_commit(tx); 1085 1086 length -= chunk_len; 1087 } 1088 return (0); 1089 } 1090 1091 int 1092 dmu_free_long_range(objset_t *os, uint64_t object, 1093 uint64_t offset, uint64_t length) 1094 { 1095 dnode_t *dn; 1096 int err; 1097 1098 err = dnode_hold(os, object, FTAG, &dn); 1099 if (err != 0) 1100 return (err); 1101 err = dmu_free_long_range_impl(os, dn, offset, length); 1102 1103 /* 1104 * It is important to zero out the maxblkid when freeing the entire 1105 * file, so that (a) subsequent calls to dmu_free_long_range_impl() 1106 * will take the fast path, and (b) dnode_reallocate() can verify 1107 * that the entire file has been freed. 1108 */ 1109 if (err == 0 && offset == 0 && length == DMU_OBJECT_END) 1110 dn->dn_maxblkid = 0; 1111 1112 dnode_rele(dn, FTAG); 1113 return (err); 1114 } 1115 1116 int 1117 dmu_free_long_object(objset_t *os, uint64_t object) 1118 { 1119 dmu_tx_t *tx; 1120 int err; 1121 1122 err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END); 1123 if (err != 0) 1124 return (err); 1125 1126 tx = dmu_tx_create(os); 1127 dmu_tx_hold_bonus(tx, object); 1128 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1129 dmu_tx_mark_netfree(tx); 1130 err = dmu_tx_assign(tx, TXG_WAIT); 1131 if (err == 0) { 1132 err = dmu_object_free(os, object, tx); 1133 dmu_tx_commit(tx); 1134 } else { 1135 dmu_tx_abort(tx); 1136 } 1137 1138 return (err); 1139 } 1140 1141 int 1142 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 1143 uint64_t size, dmu_tx_t *tx) 1144 { 1145 dnode_t *dn; 1146 int err = dnode_hold(os, object, FTAG, &dn); 1147 if (err) 1148 return (err); 1149 ASSERT(offset < UINT64_MAX); 1150 ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset); 1151 dnode_free_range(dn, offset, size, tx); 1152 dnode_rele(dn, FTAG); 1153 return (0); 1154 } 1155 1156 static int 1157 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size, 1158 void *buf, uint32_t flags) 1159 { 1160 dmu_buf_t **dbp; 1161 int numbufs, err = 0; 1162 1163 /* 1164 * Deal with odd block sizes, where there can't be data past the first 1165 * block. If we ever do the tail block optimization, we will need to 1166 * handle that here as well. 1167 */ 1168 if (dn->dn_maxblkid == 0) { 1169 uint64_t newsz = offset > dn->dn_datablksz ? 0 : 1170 MIN(size, dn->dn_datablksz - offset); 1171 memset((char *)buf + newsz, 0, size - newsz); 1172 size = newsz; 1173 } 1174 1175 while (size > 0) { 1176 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 1177 int i; 1178 1179 /* 1180 * NB: we could do this block-at-a-time, but it's nice 1181 * to be reading in parallel. 1182 */ 1183 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 1184 TRUE, FTAG, &numbufs, &dbp, flags); 1185 if (err) 1186 break; 1187 1188 for (i = 0; i < numbufs; i++) { 1189 uint64_t tocpy; 1190 int64_t bufoff; 1191 dmu_buf_t *db = dbp[i]; 1192 1193 ASSERT(size > 0); 1194 1195 bufoff = offset - db->db_offset; 1196 tocpy = MIN(db->db_size - bufoff, size); 1197 1198 (void) memcpy(buf, (char *)db->db_data + bufoff, tocpy); 1199 1200 offset += tocpy; 1201 size -= tocpy; 1202 buf = (char *)buf + tocpy; 1203 } 1204 dmu_buf_rele_array(dbp, numbufs, FTAG); 1205 } 1206 return (err); 1207 } 1208 1209 int 1210 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1211 void *buf, uint32_t flags) 1212 { 1213 dnode_t *dn; 1214 int err; 1215 1216 err = dnode_hold(os, object, FTAG, &dn); 1217 if (err != 0) 1218 return (err); 1219 1220 err = dmu_read_impl(dn, offset, size, buf, flags); 1221 dnode_rele(dn, FTAG); 1222 return (err); 1223 } 1224 1225 int 1226 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, 1227 uint32_t flags) 1228 { 1229 return (dmu_read_impl(dn, offset, size, buf, flags)); 1230 } 1231 1232 static void 1233 dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size, 1234 const void *buf, dmu_tx_t *tx) 1235 { 1236 int i; 1237 1238 for (i = 0; i < numbufs; i++) { 1239 uint64_t tocpy; 1240 int64_t bufoff; 1241 dmu_buf_t *db = dbp[i]; 1242 1243 ASSERT(size > 0); 1244 1245 bufoff = offset - db->db_offset; 1246 tocpy = MIN(db->db_size - bufoff, size); 1247 1248 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1249 1250 if (tocpy == db->db_size) 1251 dmu_buf_will_fill(db, tx, B_FALSE); 1252 else 1253 dmu_buf_will_dirty(db, tx); 1254 1255 (void) memcpy((char *)db->db_data + bufoff, buf, tocpy); 1256 1257 if (tocpy == db->db_size) 1258 dmu_buf_fill_done(db, tx, B_FALSE); 1259 1260 offset += tocpy; 1261 size -= tocpy; 1262 buf = (char *)buf + tocpy; 1263 } 1264 } 1265 1266 void 1267 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1268 const void *buf, dmu_tx_t *tx) 1269 { 1270 dmu_buf_t **dbp; 1271 int numbufs; 1272 1273 if (size == 0) 1274 return; 1275 1276 VERIFY0(dmu_buf_hold_array(os, object, offset, size, 1277 FALSE, FTAG, &numbufs, &dbp)); 1278 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1279 dmu_buf_rele_array(dbp, numbufs, FTAG); 1280 } 1281 1282 /* 1283 * Note: Lustre is an external consumer of this interface. 1284 */ 1285 void 1286 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, 1287 const void *buf, dmu_tx_t *tx) 1288 { 1289 dmu_buf_t **dbp; 1290 int numbufs; 1291 1292 if (size == 0) 1293 return; 1294 1295 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size, 1296 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH)); 1297 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1298 dmu_buf_rele_array(dbp, numbufs, FTAG); 1299 } 1300 1301 void 1302 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1303 dmu_tx_t *tx) 1304 { 1305 dmu_buf_t **dbp; 1306 int numbufs, i; 1307 1308 if (size == 0) 1309 return; 1310 1311 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 1312 FALSE, FTAG, &numbufs, &dbp)); 1313 1314 for (i = 0; i < numbufs; i++) { 1315 dmu_buf_t *db = dbp[i]; 1316 1317 dmu_buf_will_not_fill(db, tx); 1318 } 1319 dmu_buf_rele_array(dbp, numbufs, FTAG); 1320 } 1321 1322 void 1323 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 1324 void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 1325 int compressed_size, int byteorder, dmu_tx_t *tx) 1326 { 1327 dmu_buf_t *db; 1328 1329 ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); 1330 ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); 1331 VERIFY0(dmu_buf_hold_noread(os, object, offset, 1332 FTAG, &db)); 1333 1334 dmu_buf_write_embedded(db, 1335 data, (bp_embedded_type_t)etype, (enum zio_compress)comp, 1336 uncompressed_size, compressed_size, byteorder, tx); 1337 1338 dmu_buf_rele(db, FTAG); 1339 } 1340 1341 void 1342 dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1343 dmu_tx_t *tx) 1344 { 1345 int numbufs, i; 1346 dmu_buf_t **dbp; 1347 1348 VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG, 1349 &numbufs, &dbp)); 1350 for (i = 0; i < numbufs; i++) 1351 dmu_buf_redact(dbp[i], tx); 1352 dmu_buf_rele_array(dbp, numbufs, FTAG); 1353 } 1354 1355 #ifdef _KERNEL 1356 int 1357 dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size) 1358 { 1359 dmu_buf_t **dbp; 1360 int numbufs, i, err; 1361 1362 /* 1363 * NB: we could do this block-at-a-time, but it's nice 1364 * to be reading in parallel. 1365 */ 1366 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size, 1367 TRUE, FTAG, &numbufs, &dbp, 0); 1368 if (err) 1369 return (err); 1370 1371 for (i = 0; i < numbufs; i++) { 1372 uint64_t tocpy; 1373 int64_t bufoff; 1374 dmu_buf_t *db = dbp[i]; 1375 1376 ASSERT(size > 0); 1377 1378 bufoff = zfs_uio_offset(uio) - db->db_offset; 1379 tocpy = MIN(db->db_size - bufoff, size); 1380 1381 err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy, 1382 UIO_READ, uio); 1383 1384 if (err) 1385 break; 1386 1387 size -= tocpy; 1388 } 1389 dmu_buf_rele_array(dbp, numbufs, FTAG); 1390 1391 return (err); 1392 } 1393 1394 /* 1395 * Read 'size' bytes into the uio buffer. 1396 * From object zdb->db_object. 1397 * Starting at zfs_uio_offset(uio). 1398 * 1399 * If the caller already has a dbuf in the target object 1400 * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), 1401 * because we don't have to find the dnode_t for the object. 1402 */ 1403 int 1404 dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size) 1405 { 1406 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1407 int err; 1408 1409 if (size == 0) 1410 return (0); 1411 1412 DB_DNODE_ENTER(db); 1413 err = dmu_read_uio_dnode(DB_DNODE(db), uio, size); 1414 DB_DNODE_EXIT(db); 1415 1416 return (err); 1417 } 1418 1419 /* 1420 * Read 'size' bytes into the uio buffer. 1421 * From the specified object 1422 * Starting at offset zfs_uio_offset(uio). 1423 */ 1424 int 1425 dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size) 1426 { 1427 dnode_t *dn; 1428 int err; 1429 1430 if (size == 0) 1431 return (0); 1432 1433 err = dnode_hold(os, object, FTAG, &dn); 1434 if (err) 1435 return (err); 1436 1437 err = dmu_read_uio_dnode(dn, uio, size); 1438 1439 dnode_rele(dn, FTAG); 1440 1441 return (err); 1442 } 1443 1444 int 1445 dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx) 1446 { 1447 dmu_buf_t **dbp; 1448 int numbufs; 1449 int err = 0; 1450 int i; 1451 1452 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size, 1453 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); 1454 if (err) 1455 return (err); 1456 1457 for (i = 0; i < numbufs; i++) { 1458 uint64_t tocpy; 1459 int64_t bufoff; 1460 dmu_buf_t *db = dbp[i]; 1461 1462 ASSERT(size > 0); 1463 1464 offset_t off = zfs_uio_offset(uio); 1465 bufoff = off - db->db_offset; 1466 tocpy = MIN(db->db_size - bufoff, size); 1467 1468 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1469 1470 if (tocpy == db->db_size) 1471 dmu_buf_will_fill(db, tx, B_TRUE); 1472 else 1473 dmu_buf_will_dirty(db, tx); 1474 1475 err = zfs_uio_fault_move((char *)db->db_data + bufoff, 1476 tocpy, UIO_WRITE, uio); 1477 1478 if (tocpy == db->db_size && dmu_buf_fill_done(db, tx, err)) { 1479 /* The fill was reverted. Undo any uio progress. */ 1480 zfs_uio_advance(uio, off - zfs_uio_offset(uio)); 1481 } 1482 1483 if (err) 1484 break; 1485 1486 size -= tocpy; 1487 } 1488 1489 dmu_buf_rele_array(dbp, numbufs, FTAG); 1490 return (err); 1491 } 1492 1493 /* 1494 * Write 'size' bytes from the uio buffer. 1495 * To object zdb->db_object. 1496 * Starting at offset zfs_uio_offset(uio). 1497 * 1498 * If the caller already has a dbuf in the target object 1499 * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), 1500 * because we don't have to find the dnode_t for the object. 1501 */ 1502 int 1503 dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size, 1504 dmu_tx_t *tx) 1505 { 1506 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1507 int err; 1508 1509 if (size == 0) 1510 return (0); 1511 1512 DB_DNODE_ENTER(db); 1513 err = dmu_write_uio_dnode(DB_DNODE(db), uio, size, tx); 1514 DB_DNODE_EXIT(db); 1515 1516 return (err); 1517 } 1518 1519 /* 1520 * Write 'size' bytes from the uio buffer. 1521 * To the specified object. 1522 * Starting at offset zfs_uio_offset(uio). 1523 */ 1524 int 1525 dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size, 1526 dmu_tx_t *tx) 1527 { 1528 dnode_t *dn; 1529 int err; 1530 1531 if (size == 0) 1532 return (0); 1533 1534 err = dnode_hold(os, object, FTAG, &dn); 1535 if (err) 1536 return (err); 1537 1538 err = dmu_write_uio_dnode(dn, uio, size, tx); 1539 1540 dnode_rele(dn, FTAG); 1541 1542 return (err); 1543 } 1544 #endif /* _KERNEL */ 1545 1546 static void 1547 dmu_cached_bps(spa_t *spa, blkptr_t *bps, uint_t nbps, 1548 uint64_t *l1sz, uint64_t *l2sz) 1549 { 1550 int cached_flags; 1551 1552 if (bps == NULL) 1553 return; 1554 1555 for (size_t blk_off = 0; blk_off < nbps; blk_off++) { 1556 blkptr_t *bp = &bps[blk_off]; 1557 1558 if (BP_IS_HOLE(bp)) 1559 continue; 1560 1561 cached_flags = arc_cached(spa, bp); 1562 if (cached_flags == 0) 1563 continue; 1564 1565 if ((cached_flags & (ARC_CACHED_IN_L1 | ARC_CACHED_IN_L2)) == 1566 ARC_CACHED_IN_L2) 1567 *l2sz += BP_GET_LSIZE(bp); 1568 else 1569 *l1sz += BP_GET_LSIZE(bp); 1570 } 1571 } 1572 1573 /* 1574 * Estimate DMU object cached size. 1575 */ 1576 int 1577 dmu_object_cached_size(objset_t *os, uint64_t object, 1578 uint64_t *l1sz, uint64_t *l2sz) 1579 { 1580 dnode_t *dn; 1581 dmu_object_info_t doi; 1582 int err = 0; 1583 1584 *l1sz = *l2sz = 0; 1585 1586 if (dnode_hold(os, object, FTAG, &dn) != 0) 1587 return (0); 1588 1589 if (dn->dn_nlevels < 2) { 1590 dnode_rele(dn, FTAG); 1591 return (0); 1592 } 1593 1594 dmu_object_info_from_dnode(dn, &doi); 1595 1596 for (uint64_t off = 0; off < doi.doi_max_offset; 1597 off += dmu_prefetch_max) { 1598 /* dbuf_read doesn't prefetch L1 blocks. */ 1599 dmu_prefetch_by_dnode(dn, 1, off, 1600 dmu_prefetch_max, ZIO_PRIORITY_SYNC_READ); 1601 } 1602 1603 /* 1604 * Hold all valid L1 blocks, asking ARC the status of each BP 1605 * contained in each such L1 block. 1606 */ 1607 uint_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1); 1608 uint64_t l1blks = 1 + (dn->dn_maxblkid / nbps); 1609 1610 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1611 for (uint64_t blk = 0; blk < l1blks; blk++) { 1612 dmu_buf_impl_t *db = NULL; 1613 1614 if (issig()) { 1615 /* 1616 * On interrupt, get out, and bubble up EINTR 1617 */ 1618 err = EINTR; 1619 break; 1620 } 1621 1622 /* 1623 * If we get an i/o error here, the L1 can't be read, 1624 * and nothing under it could be cached, so we just 1625 * continue. Ignoring the error from dbuf_hold_impl 1626 * or from dbuf_read is then a reasonable choice. 1627 */ 1628 err = dbuf_hold_impl(dn, 1, blk, B_TRUE, B_FALSE, FTAG, &db); 1629 if (err != 0) { 1630 /* 1631 * ignore error and continue 1632 */ 1633 err = 0; 1634 continue; 1635 } 1636 1637 err = dbuf_read(db, NULL, DB_RF_CANFAIL); 1638 if (err == 0) { 1639 dmu_cached_bps(dmu_objset_spa(os), db->db.db_data, 1640 nbps, l1sz, l2sz); 1641 } 1642 /* 1643 * error may be ignored, and we continue 1644 */ 1645 err = 0; 1646 dbuf_rele(db, FTAG); 1647 } 1648 rw_exit(&dn->dn_struct_rwlock); 1649 1650 dnode_rele(dn, FTAG); 1651 return (err); 1652 } 1653 1654 /* 1655 * Allocate a loaned anonymous arc buffer. 1656 */ 1657 arc_buf_t * 1658 dmu_request_arcbuf(dmu_buf_t *handle, int size) 1659 { 1660 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 1661 1662 return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size)); 1663 } 1664 1665 /* 1666 * Free a loaned arc buffer. 1667 */ 1668 void 1669 dmu_return_arcbuf(arc_buf_t *buf) 1670 { 1671 arc_return_buf(buf, FTAG); 1672 arc_buf_destroy(buf, FTAG); 1673 } 1674 1675 /* 1676 * A "lightweight" write is faster than a regular write (e.g. 1677 * dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the 1678 * CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the 1679 * data can not be read or overwritten until the transaction's txg has been 1680 * synced. This makes it appropriate for workloads that are known to be 1681 * (temporarily) write-only, like "zfs receive". 1682 * 1683 * A single block is written, starting at the specified offset in bytes. If 1684 * the call is successful, it returns 0 and the provided abd has been 1685 * consumed (the caller should not free it). 1686 */ 1687 int 1688 dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd, 1689 const zio_prop_t *zp, zio_flag_t flags, dmu_tx_t *tx) 1690 { 1691 dbuf_dirty_record_t *dr = 1692 dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx); 1693 if (dr == NULL) 1694 return (SET_ERROR(EIO)); 1695 dr->dt.dll.dr_abd = abd; 1696 dr->dt.dll.dr_props = *zp; 1697 dr->dt.dll.dr_flags = flags; 1698 return (0); 1699 } 1700 1701 /* 1702 * When possible directly assign passed loaned arc buffer to a dbuf. 1703 * If this is not possible copy the contents of passed arc buf via 1704 * dmu_write(). 1705 */ 1706 int 1707 dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, 1708 dmu_tx_t *tx) 1709 { 1710 dmu_buf_impl_t *db; 1711 objset_t *os = dn->dn_objset; 1712 uint64_t object = dn->dn_object; 1713 uint32_t blksz = (uint32_t)arc_buf_lsize(buf); 1714 uint64_t blkid; 1715 1716 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1717 blkid = dbuf_whichblock(dn, 0, offset); 1718 db = dbuf_hold(dn, blkid, FTAG); 1719 rw_exit(&dn->dn_struct_rwlock); 1720 if (db == NULL) 1721 return (SET_ERROR(EIO)); 1722 1723 /* 1724 * We can only assign if the offset is aligned and the arc buf is the 1725 * same size as the dbuf. 1726 */ 1727 if (offset == db->db.db_offset && blksz == db->db.db_size) { 1728 zfs_racct_write(blksz, 1); 1729 dbuf_assign_arcbuf(db, buf, tx); 1730 dbuf_rele(db, FTAG); 1731 } else { 1732 /* compressed bufs must always be assignable to their dbuf */ 1733 ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF); 1734 ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED)); 1735 1736 dbuf_rele(db, FTAG); 1737 dmu_write(os, object, offset, blksz, buf->b_data, tx); 1738 dmu_return_arcbuf(buf); 1739 } 1740 1741 return (0); 1742 } 1743 1744 int 1745 dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, 1746 dmu_tx_t *tx) 1747 { 1748 int err; 1749 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 1750 1751 DB_DNODE_ENTER(db); 1752 err = dmu_assign_arcbuf_by_dnode(DB_DNODE(db), offset, buf, tx); 1753 DB_DNODE_EXIT(db); 1754 1755 return (err); 1756 } 1757 1758 typedef struct { 1759 dbuf_dirty_record_t *dsa_dr; 1760 dmu_sync_cb_t *dsa_done; 1761 zgd_t *dsa_zgd; 1762 dmu_tx_t *dsa_tx; 1763 } dmu_sync_arg_t; 1764 1765 static void 1766 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) 1767 { 1768 (void) buf; 1769 dmu_sync_arg_t *dsa = varg; 1770 dmu_buf_t *db = dsa->dsa_zgd->zgd_db; 1771 blkptr_t *bp = zio->io_bp; 1772 1773 if (zio->io_error == 0) { 1774 if (BP_IS_HOLE(bp)) { 1775 /* 1776 * A block of zeros may compress to a hole, but the 1777 * block size still needs to be known for replay. 1778 */ 1779 BP_SET_LSIZE(bp, db->db_size); 1780 } else if (!BP_IS_EMBEDDED(bp)) { 1781 ASSERT(BP_GET_LEVEL(bp) == 0); 1782 BP_SET_FILL(bp, 1); 1783 } 1784 } 1785 } 1786 1787 static void 1788 dmu_sync_late_arrival_ready(zio_t *zio) 1789 { 1790 dmu_sync_ready(zio, NULL, zio->io_private); 1791 } 1792 1793 static void 1794 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1795 { 1796 (void) buf; 1797 dmu_sync_arg_t *dsa = varg; 1798 dbuf_dirty_record_t *dr = dsa->dsa_dr; 1799 dmu_buf_impl_t *db = dr->dr_dbuf; 1800 zgd_t *zgd = dsa->dsa_zgd; 1801 1802 /* 1803 * Record the vdev(s) backing this blkptr so they can be flushed after 1804 * the writes for the lwb have completed. 1805 */ 1806 if (zio->io_error == 0) { 1807 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1808 } 1809 1810 mutex_enter(&db->db_mtx); 1811 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 1812 if (zio->io_error == 0) { 1813 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); 1814 if (dr->dt.dl.dr_nopwrite) { 1815 blkptr_t *bp = zio->io_bp; 1816 blkptr_t *bp_orig = &zio->io_bp_orig; 1817 uint8_t chksum = BP_GET_CHECKSUM(bp_orig); 1818 1819 ASSERT(BP_EQUAL(bp, bp_orig)); 1820 VERIFY(BP_EQUAL(bp, db->db_blkptr)); 1821 ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); 1822 VERIFY(zio_checksum_table[chksum].ci_flags & 1823 ZCHECKSUM_FLAG_NOPWRITE); 1824 } 1825 dr->dt.dl.dr_overridden_by = *zio->io_bp; 1826 dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 1827 dr->dt.dl.dr_copies = zio->io_prop.zp_copies; 1828 1829 /* 1830 * Old style holes are filled with all zeros, whereas 1831 * new-style holes maintain their lsize, type, level, 1832 * and birth time (see zio_write_compress). While we 1833 * need to reset the BP_SET_LSIZE() call that happened 1834 * in dmu_sync_ready for old style holes, we do *not* 1835 * want to wipe out the information contained in new 1836 * style holes. Thus, only zero out the block pointer if 1837 * it's an old style hole. 1838 */ 1839 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && 1840 BP_GET_LOGICAL_BIRTH(&dr->dt.dl.dr_overridden_by) == 0) 1841 BP_ZERO(&dr->dt.dl.dr_overridden_by); 1842 } else { 1843 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1844 } 1845 cv_broadcast(&db->db_changed); 1846 mutex_exit(&db->db_mtx); 1847 1848 dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1849 1850 kmem_free(dsa, sizeof (*dsa)); 1851 } 1852 1853 static void 1854 dmu_sync_late_arrival_done(zio_t *zio) 1855 { 1856 blkptr_t *bp = zio->io_bp; 1857 dmu_sync_arg_t *dsa = zio->io_private; 1858 zgd_t *zgd = dsa->dsa_zgd; 1859 1860 if (zio->io_error == 0) { 1861 /* 1862 * Record the vdev(s) backing this blkptr so they can be 1863 * flushed after the writes for the lwb have completed. 1864 */ 1865 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1866 1867 if (!BP_IS_HOLE(bp)) { 1868 blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig; 1869 ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); 1870 ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); 1871 ASSERT(BP_GET_LOGICAL_BIRTH(zio->io_bp) == zio->io_txg); 1872 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); 1873 zio_free(zio->io_spa, zio->io_txg, zio->io_bp); 1874 } 1875 } 1876 1877 dmu_tx_commit(dsa->dsa_tx); 1878 1879 dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1880 1881 abd_free(zio->io_abd); 1882 kmem_free(dsa, sizeof (*dsa)); 1883 } 1884 1885 static int 1886 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, 1887 zio_prop_t *zp, zbookmark_phys_t *zb) 1888 { 1889 dmu_sync_arg_t *dsa; 1890 dmu_tx_t *tx; 1891 int error; 1892 1893 error = dbuf_read((dmu_buf_impl_t *)zgd->zgd_db, NULL, 1894 DB_RF_CANFAIL | DB_RF_NOPREFETCH); 1895 if (error != 0) 1896 return (error); 1897 1898 tx = dmu_tx_create(os); 1899 dmu_tx_hold_space(tx, zgd->zgd_db->db_size); 1900 /* 1901 * This transaction does not produce any dirty data or log blocks, so 1902 * it should not be throttled. All other cases wait for TXG sync, by 1903 * which time the log block we are writing will be obsolete, so we can 1904 * skip waiting and just return error here instead. 1905 */ 1906 if (dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE) != 0) { 1907 dmu_tx_abort(tx); 1908 /* Make zl_get_data do txg_waited_synced() */ 1909 return (SET_ERROR(EIO)); 1910 } 1911 1912 /* 1913 * In order to prevent the zgd's lwb from being free'd prior to 1914 * dmu_sync_late_arrival_done() being called, we have to ensure 1915 * the lwb's "max txg" takes this tx's txg into account. 1916 */ 1917 zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx)); 1918 1919 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1920 dsa->dsa_dr = NULL; 1921 dsa->dsa_done = done; 1922 dsa->dsa_zgd = zgd; 1923 dsa->dsa_tx = tx; 1924 1925 /* 1926 * Since we are currently syncing this txg, it's nontrivial to 1927 * determine what BP to nopwrite against, so we disable nopwrite. 1928 * 1929 * When syncing, the db_blkptr is initially the BP of the previous 1930 * txg. We can not nopwrite against it because it will be changed 1931 * (this is similar to the non-late-arrival case where the dbuf is 1932 * dirty in a future txg). 1933 * 1934 * Then dbuf_write_ready() sets bp_blkptr to the location we will write. 1935 * We can not nopwrite against it because although the BP will not 1936 * (typically) be changed, the data has not yet been persisted to this 1937 * location. 1938 * 1939 * Finally, when dbuf_write_done() is called, it is theoretically 1940 * possible to always nopwrite, because the data that was written in 1941 * this txg is the same data that we are trying to write. However we 1942 * would need to check that this dbuf is not dirty in any future 1943 * txg's (as we do in the normal dmu_sync() path). For simplicity, we 1944 * don't nopwrite in this case. 1945 */ 1946 zp->zp_nopwrite = B_FALSE; 1947 1948 zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, 1949 abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size), 1950 zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp, 1951 dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done, 1952 dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); 1953 1954 return (0); 1955 } 1956 1957 /* 1958 * Intent log support: sync the block associated with db to disk. 1959 * N.B. and XXX: the caller is responsible for making sure that the 1960 * data isn't changing while dmu_sync() is writing it. 1961 * 1962 * Return values: 1963 * 1964 * EEXIST: this txg has already been synced, so there's nothing to do. 1965 * The caller should not log the write. 1966 * 1967 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 1968 * The caller should not log the write. 1969 * 1970 * EALREADY: this block is already in the process of being synced. 1971 * The caller should track its progress (somehow). 1972 * 1973 * EIO: could not do the I/O. 1974 * The caller should do a txg_wait_synced(). 1975 * 1976 * 0: the I/O has been initiated. 1977 * The caller should log this blkptr in the done callback. 1978 * It is possible that the I/O will fail, in which case 1979 * the error will be reported to the done callback and 1980 * propagated to pio from zio_done(). 1981 */ 1982 int 1983 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) 1984 { 1985 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; 1986 objset_t *os = db->db_objset; 1987 dsl_dataset_t *ds = os->os_dsl_dataset; 1988 dbuf_dirty_record_t *dr, *dr_next; 1989 dmu_sync_arg_t *dsa; 1990 zbookmark_phys_t zb; 1991 zio_prop_t zp; 1992 1993 ASSERT(pio != NULL); 1994 ASSERT(txg != 0); 1995 1996 SET_BOOKMARK(&zb, ds->ds_object, 1997 db->db.db_object, db->db_level, db->db_blkid); 1998 1999 DB_DNODE_ENTER(db); 2000 dmu_write_policy(os, DB_DNODE(db), db->db_level, WP_DMU_SYNC, &zp); 2001 DB_DNODE_EXIT(db); 2002 2003 /* 2004 * If we're frozen (running ziltest), we always need to generate a bp. 2005 */ 2006 if (txg > spa_freeze_txg(os->os_spa)) 2007 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 2008 2009 /* 2010 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() 2011 * and us. If we determine that this txg is not yet syncing, 2012 * but it begins to sync a moment later, that's OK because the 2013 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. 2014 */ 2015 mutex_enter(&db->db_mtx); 2016 2017 if (txg <= spa_last_synced_txg(os->os_spa)) { 2018 /* 2019 * This txg has already synced. There's nothing to do. 2020 */ 2021 mutex_exit(&db->db_mtx); 2022 return (SET_ERROR(EEXIST)); 2023 } 2024 2025 if (txg <= spa_syncing_txg(os->os_spa)) { 2026 /* 2027 * This txg is currently syncing, so we can't mess with 2028 * the dirty record anymore; just write a new log block. 2029 */ 2030 mutex_exit(&db->db_mtx); 2031 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 2032 } 2033 2034 dr = dbuf_find_dirty_eq(db, txg); 2035 2036 if (dr == NULL) { 2037 /* 2038 * There's no dr for this dbuf, so it must have been freed. 2039 * There's no need to log writes to freed blocks, so we're done. 2040 */ 2041 mutex_exit(&db->db_mtx); 2042 return (SET_ERROR(ENOENT)); 2043 } 2044 2045 dr_next = list_next(&db->db_dirty_records, dr); 2046 ASSERT(dr_next == NULL || dr_next->dr_txg < txg); 2047 2048 if (db->db_blkptr != NULL) { 2049 /* 2050 * We need to fill in zgd_bp with the current blkptr so that 2051 * the nopwrite code can check if we're writing the same 2052 * data that's already on disk. We can only nopwrite if we 2053 * are sure that after making the copy, db_blkptr will not 2054 * change until our i/o completes. We ensure this by 2055 * holding the db_mtx, and only allowing nopwrite if the 2056 * block is not already dirty (see below). This is verified 2057 * by dmu_sync_done(), which VERIFYs that the db_blkptr has 2058 * not changed. 2059 */ 2060 *zgd->zgd_bp = *db->db_blkptr; 2061 } 2062 2063 /* 2064 * Assume the on-disk data is X, the current syncing data (in 2065 * txg - 1) is Y, and the current in-memory data is Z (currently 2066 * in dmu_sync). 2067 * 2068 * We usually want to perform a nopwrite if X and Z are the 2069 * same. However, if Y is different (i.e. the BP is going to 2070 * change before this write takes effect), then a nopwrite will 2071 * be incorrect - we would override with X, which could have 2072 * been freed when Y was written. 2073 * 2074 * (Note that this is not a concern when we are nop-writing from 2075 * syncing context, because X and Y must be identical, because 2076 * all previous txgs have been synced.) 2077 * 2078 * Therefore, we disable nopwrite if the current BP could change 2079 * before this TXG. There are two ways it could change: by 2080 * being dirty (dr_next is non-NULL), or by being freed 2081 * (dnode_block_freed()). This behavior is verified by 2082 * zio_done(), which VERIFYs that the override BP is identical 2083 * to the on-disk BP. 2084 */ 2085 if (dr_next != NULL) { 2086 zp.zp_nopwrite = B_FALSE; 2087 } else { 2088 DB_DNODE_ENTER(db); 2089 if (dnode_block_freed(DB_DNODE(db), db->db_blkid)) 2090 zp.zp_nopwrite = B_FALSE; 2091 DB_DNODE_EXIT(db); 2092 } 2093 2094 ASSERT(dr->dr_txg == txg); 2095 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || 2096 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 2097 /* 2098 * We have already issued a sync write for this buffer, 2099 * or this buffer has already been synced. It could not 2100 * have been dirtied since, or we would have cleared the state. 2101 */ 2102 mutex_exit(&db->db_mtx); 2103 return (SET_ERROR(EALREADY)); 2104 } 2105 2106 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 2107 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 2108 mutex_exit(&db->db_mtx); 2109 2110 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 2111 dsa->dsa_dr = dr; 2112 dsa->dsa_done = done; 2113 dsa->dsa_zgd = zgd; 2114 dsa->dsa_tx = NULL; 2115 2116 zio_nowait(arc_write(pio, os->os_spa, txg, zgd->zgd_bp, 2117 dr->dt.dl.dr_data, !DBUF_IS_CACHEABLE(db), dbuf_is_l2cacheable(db), 2118 &zp, dmu_sync_ready, NULL, dmu_sync_done, dsa, 2119 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); 2120 2121 return (0); 2122 } 2123 2124 int 2125 dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx) 2126 { 2127 dnode_t *dn; 2128 int err; 2129 2130 err = dnode_hold(os, object, FTAG, &dn); 2131 if (err) 2132 return (err); 2133 err = dnode_set_nlevels(dn, nlevels, tx); 2134 dnode_rele(dn, FTAG); 2135 return (err); 2136 } 2137 2138 int 2139 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 2140 dmu_tx_t *tx) 2141 { 2142 dnode_t *dn; 2143 int err; 2144 2145 err = dnode_hold(os, object, FTAG, &dn); 2146 if (err) 2147 return (err); 2148 err = dnode_set_blksz(dn, size, ibs, tx); 2149 dnode_rele(dn, FTAG); 2150 return (err); 2151 } 2152 2153 int 2154 dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid, 2155 dmu_tx_t *tx) 2156 { 2157 dnode_t *dn; 2158 int err; 2159 2160 err = dnode_hold(os, object, FTAG, &dn); 2161 if (err) 2162 return (err); 2163 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2164 dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE); 2165 rw_exit(&dn->dn_struct_rwlock); 2166 dnode_rele(dn, FTAG); 2167 return (0); 2168 } 2169 2170 void 2171 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 2172 dmu_tx_t *tx) 2173 { 2174 dnode_t *dn; 2175 2176 /* 2177 * Send streams include each object's checksum function. This 2178 * check ensures that the receiving system can understand the 2179 * checksum function transmitted. 2180 */ 2181 ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS); 2182 2183 VERIFY0(dnode_hold(os, object, FTAG, &dn)); 2184 ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS); 2185 dn->dn_checksum = checksum; 2186 dnode_setdirty(dn, tx); 2187 dnode_rele(dn, FTAG); 2188 } 2189 2190 void 2191 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 2192 dmu_tx_t *tx) 2193 { 2194 dnode_t *dn; 2195 2196 /* 2197 * Send streams include each object's compression function. This 2198 * check ensures that the receiving system can understand the 2199 * compression function transmitted. 2200 */ 2201 ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS); 2202 2203 VERIFY0(dnode_hold(os, object, FTAG, &dn)); 2204 dn->dn_compress = compress; 2205 dnode_setdirty(dn, tx); 2206 dnode_rele(dn, FTAG); 2207 } 2208 2209 /* 2210 * When the "redundant_metadata" property is set to "most", only indirect 2211 * blocks of this level and higher will have an additional ditto block. 2212 */ 2213 static const int zfs_redundant_metadata_most_ditto_level = 2; 2214 2215 void 2216 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) 2217 { 2218 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET; 2219 boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) || 2220 (wp & WP_SPILL)); 2221 enum zio_checksum checksum = os->os_checksum; 2222 enum zio_compress compress = os->os_compress; 2223 uint8_t complevel = os->os_complevel; 2224 enum zio_checksum dedup_checksum = os->os_dedup_checksum; 2225 boolean_t dedup = B_FALSE; 2226 boolean_t nopwrite = B_FALSE; 2227 boolean_t dedup_verify = os->os_dedup_verify; 2228 boolean_t encrypt = B_FALSE; 2229 int copies = os->os_copies; 2230 2231 /* 2232 * We maintain different write policies for each of the following 2233 * types of data: 2234 * 1. metadata 2235 * 2. preallocated blocks (i.e. level-0 blocks of a dump device) 2236 * 3. all other level 0 blocks 2237 */ 2238 if (ismd) { 2239 /* 2240 * XXX -- we should design a compression algorithm 2241 * that specializes in arrays of bps. 2242 */ 2243 compress = zio_compress_select(os->os_spa, 2244 ZIO_COMPRESS_ON, ZIO_COMPRESS_ON); 2245 2246 /* 2247 * Metadata always gets checksummed. If the data 2248 * checksum is multi-bit correctable, and it's not a 2249 * ZBT-style checksum, then it's suitable for metadata 2250 * as well. Otherwise, the metadata checksum defaults 2251 * to fletcher4. 2252 */ 2253 if (!(zio_checksum_table[checksum].ci_flags & 2254 ZCHECKSUM_FLAG_METADATA) || 2255 (zio_checksum_table[checksum].ci_flags & 2256 ZCHECKSUM_FLAG_EMBEDDED)) 2257 checksum = ZIO_CHECKSUM_FLETCHER_4; 2258 2259 switch (os->os_redundant_metadata) { 2260 case ZFS_REDUNDANT_METADATA_ALL: 2261 copies++; 2262 break; 2263 case ZFS_REDUNDANT_METADATA_MOST: 2264 if (level >= zfs_redundant_metadata_most_ditto_level || 2265 DMU_OT_IS_METADATA(type) || (wp & WP_SPILL)) 2266 copies++; 2267 break; 2268 case ZFS_REDUNDANT_METADATA_SOME: 2269 if (DMU_OT_IS_CRITICAL(type)) 2270 copies++; 2271 break; 2272 case ZFS_REDUNDANT_METADATA_NONE: 2273 break; 2274 } 2275 } else if (wp & WP_NOFILL) { 2276 ASSERT(level == 0); 2277 2278 /* 2279 * If we're writing preallocated blocks, we aren't actually 2280 * writing them so don't set any policy properties. These 2281 * blocks are currently only used by an external subsystem 2282 * outside of zfs (i.e. dump) and not written by the zio 2283 * pipeline. 2284 */ 2285 compress = ZIO_COMPRESS_OFF; 2286 checksum = ZIO_CHECKSUM_OFF; 2287 } else { 2288 compress = zio_compress_select(os->os_spa, dn->dn_compress, 2289 compress); 2290 complevel = zio_complevel_select(os->os_spa, compress, 2291 complevel, complevel); 2292 2293 checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ? 2294 zio_checksum_select(dn->dn_checksum, checksum) : 2295 dedup_checksum; 2296 2297 /* 2298 * Determine dedup setting. If we are in dmu_sync(), 2299 * we won't actually dedup now because that's all 2300 * done in syncing context; but we do want to use the 2301 * dedup checksum. If the checksum is not strong 2302 * enough to ensure unique signatures, force 2303 * dedup_verify. 2304 */ 2305 if (dedup_checksum != ZIO_CHECKSUM_OFF) { 2306 dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE; 2307 if (!(zio_checksum_table[checksum].ci_flags & 2308 ZCHECKSUM_FLAG_DEDUP)) 2309 dedup_verify = B_TRUE; 2310 } 2311 2312 /* 2313 * Enable nopwrite if we have secure enough checksum 2314 * algorithm (see comment in zio_nop_write) and 2315 * compression is enabled. We don't enable nopwrite if 2316 * dedup is enabled as the two features are mutually 2317 * exclusive. 2318 */ 2319 nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags & 2320 ZCHECKSUM_FLAG_NOPWRITE) && 2321 compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled); 2322 } 2323 2324 /* 2325 * All objects in an encrypted objset are protected from modification 2326 * via a MAC. Encrypted objects store their IV and salt in the last DVA 2327 * in the bp, so we cannot use all copies. Encrypted objects are also 2328 * not subject to nopwrite since writing the same data will still 2329 * result in a new ciphertext. Only encrypted blocks can be dedup'd 2330 * to avoid ambiguity in the dedup code since the DDT does not store 2331 * object types. 2332 */ 2333 if (os->os_encrypted && (wp & WP_NOFILL) == 0) { 2334 encrypt = B_TRUE; 2335 2336 if (DMU_OT_IS_ENCRYPTED(type)) { 2337 copies = MIN(copies, SPA_DVAS_PER_BP - 1); 2338 nopwrite = B_FALSE; 2339 } else { 2340 dedup = B_FALSE; 2341 } 2342 2343 if (level <= 0 && 2344 (type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) { 2345 compress = ZIO_COMPRESS_EMPTY; 2346 } 2347 } 2348 2349 zp->zp_compress = compress; 2350 zp->zp_complevel = complevel; 2351 zp->zp_checksum = checksum; 2352 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type; 2353 zp->zp_level = level; 2354 zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa)); 2355 zp->zp_dedup = dedup; 2356 zp->zp_dedup_verify = dedup && dedup_verify; 2357 zp->zp_nopwrite = nopwrite; 2358 zp->zp_encrypt = encrypt; 2359 zp->zp_byteorder = ZFS_HOST_BYTEORDER; 2360 memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN); 2361 memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN); 2362 memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN); 2363 zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ? 2364 os->os_zpl_special_smallblock : 0; 2365 zp->zp_storage_type = dn ? dn->dn_storage_type : DMU_OT_NONE; 2366 2367 ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT); 2368 } 2369 2370 /* 2371 * Reports the location of data and holes in an object. In order to 2372 * accurately report holes all dirty data must be synced to disk. This 2373 * causes extremely poor performance when seeking for holes in a dirty file. 2374 * As a compromise, only provide hole data when the dnode is clean. When 2375 * a dnode is dirty report the dnode as having no holes by returning EBUSY 2376 * which is always safe to do. 2377 */ 2378 int 2379 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 2380 { 2381 dnode_t *dn; 2382 int restarted = 0, err; 2383 2384 restart: 2385 err = dnode_hold(os, object, FTAG, &dn); 2386 if (err) 2387 return (err); 2388 2389 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2390 2391 if (dnode_is_dirty(dn)) { 2392 /* 2393 * If the zfs_dmu_offset_next_sync module option is enabled 2394 * then hole reporting has been requested. Dirty dnodes 2395 * must be synced to disk to accurately report holes. 2396 * 2397 * Provided a RL_READER rangelock spanning 0-UINT64_MAX is 2398 * held by the caller only a single restart will be required. 2399 * We tolerate callers which do not hold the rangelock by 2400 * returning EBUSY and not reporting holes after one restart. 2401 */ 2402 if (zfs_dmu_offset_next_sync) { 2403 rw_exit(&dn->dn_struct_rwlock); 2404 dnode_rele(dn, FTAG); 2405 2406 if (restarted) 2407 return (SET_ERROR(EBUSY)); 2408 2409 txg_wait_synced(dmu_objset_pool(os), 0); 2410 restarted = 1; 2411 goto restart; 2412 } 2413 2414 err = SET_ERROR(EBUSY); 2415 } else { 2416 err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK | 2417 (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); 2418 } 2419 2420 rw_exit(&dn->dn_struct_rwlock); 2421 dnode_rele(dn, FTAG); 2422 2423 return (err); 2424 } 2425 2426 int 2427 dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length, 2428 blkptr_t *bps, size_t *nbpsp) 2429 { 2430 dmu_buf_t **dbp, *dbuf; 2431 dmu_buf_impl_t *db; 2432 blkptr_t *bp; 2433 int error, numbufs; 2434 2435 error = dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG, 2436 &numbufs, &dbp); 2437 if (error != 0) { 2438 if (error == ESRCH) { 2439 error = SET_ERROR(ENXIO); 2440 } 2441 return (error); 2442 } 2443 2444 ASSERT3U(numbufs, <=, *nbpsp); 2445 2446 for (int i = 0; i < numbufs; i++) { 2447 dbuf = dbp[i]; 2448 db = (dmu_buf_impl_t *)dbuf; 2449 2450 mutex_enter(&db->db_mtx); 2451 2452 if (!list_is_empty(&db->db_dirty_records)) { 2453 dbuf_dirty_record_t *dr; 2454 2455 dr = list_head(&db->db_dirty_records); 2456 if (dr->dt.dl.dr_brtwrite) { 2457 /* 2458 * This is very special case where we clone a 2459 * block and in the same transaction group we 2460 * read its BP (most likely to clone the clone). 2461 */ 2462 bp = &dr->dt.dl.dr_overridden_by; 2463 } else { 2464 /* 2465 * The block was modified in the same 2466 * transaction group. 2467 */ 2468 mutex_exit(&db->db_mtx); 2469 error = SET_ERROR(EAGAIN); 2470 goto out; 2471 } 2472 } else { 2473 bp = db->db_blkptr; 2474 } 2475 2476 mutex_exit(&db->db_mtx); 2477 2478 if (bp == NULL) { 2479 /* 2480 * The file size was increased, but the block was never 2481 * written, otherwise we would either have the block 2482 * pointer or the dirty record and would not get here. 2483 * It is effectively a hole, so report it as such. 2484 */ 2485 BP_ZERO(&bps[i]); 2486 continue; 2487 } 2488 /* 2489 * Make sure we clone only data blocks. 2490 */ 2491 if (BP_IS_METADATA(bp) && !BP_IS_HOLE(bp)) { 2492 error = SET_ERROR(EINVAL); 2493 goto out; 2494 } 2495 2496 /* 2497 * If the block was allocated in transaction group that is not 2498 * yet synced, we could clone it, but we couldn't write this 2499 * operation into ZIL, or it may be impossible to replay, since 2500 * the block may appear not yet allocated at that point. 2501 */ 2502 if (BP_GET_BIRTH(bp) > spa_freeze_txg(os->os_spa)) { 2503 error = SET_ERROR(EINVAL); 2504 goto out; 2505 } 2506 if (BP_GET_BIRTH(bp) > spa_last_synced_txg(os->os_spa)) { 2507 error = SET_ERROR(EAGAIN); 2508 goto out; 2509 } 2510 2511 bps[i] = *bp; 2512 } 2513 2514 *nbpsp = numbufs; 2515 out: 2516 dmu_buf_rele_array(dbp, numbufs, FTAG); 2517 2518 return (error); 2519 } 2520 2521 int 2522 dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length, 2523 dmu_tx_t *tx, const blkptr_t *bps, size_t nbps) 2524 { 2525 spa_t *spa; 2526 dmu_buf_t **dbp, *dbuf; 2527 dmu_buf_impl_t *db; 2528 struct dirty_leaf *dl; 2529 dbuf_dirty_record_t *dr; 2530 const blkptr_t *bp; 2531 int error = 0, i, numbufs; 2532 2533 spa = os->os_spa; 2534 2535 VERIFY0(dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG, 2536 &numbufs, &dbp)); 2537 ASSERT3U(nbps, ==, numbufs); 2538 2539 /* 2540 * Before we start cloning make sure that the dbufs sizes match new BPs 2541 * sizes. If they don't, that's a no-go, as we are not able to shrink 2542 * dbufs. 2543 */ 2544 for (i = 0; i < numbufs; i++) { 2545 dbuf = dbp[i]; 2546 db = (dmu_buf_impl_t *)dbuf; 2547 bp = &bps[i]; 2548 2549 ASSERT0(db->db_level); 2550 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2551 ASSERT(db->db_blkid != DMU_SPILL_BLKID); 2552 2553 if (!BP_IS_HOLE(bp) && BP_GET_LSIZE(bp) != dbuf->db_size) { 2554 error = SET_ERROR(EXDEV); 2555 goto out; 2556 } 2557 } 2558 2559 for (i = 0; i < numbufs; i++) { 2560 dbuf = dbp[i]; 2561 db = (dmu_buf_impl_t *)dbuf; 2562 bp = &bps[i]; 2563 2564 ASSERT0(db->db_level); 2565 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2566 ASSERT(db->db_blkid != DMU_SPILL_BLKID); 2567 ASSERT(BP_IS_HOLE(bp) || dbuf->db_size == BP_GET_LSIZE(bp)); 2568 2569 dmu_buf_will_clone(dbuf, tx); 2570 2571 mutex_enter(&db->db_mtx); 2572 2573 dr = list_head(&db->db_dirty_records); 2574 VERIFY(dr != NULL); 2575 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2576 dl = &dr->dt.dl; 2577 dl->dr_overridden_by = *bp; 2578 if (!BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) != 0) { 2579 if (!BP_IS_EMBEDDED(bp)) { 2580 BP_SET_BIRTH(&dl->dr_overridden_by, dr->dr_txg, 2581 BP_GET_BIRTH(bp)); 2582 } else { 2583 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, 2584 dr->dr_txg); 2585 } 2586 } 2587 dl->dr_brtwrite = B_TRUE; 2588 dl->dr_override_state = DR_OVERRIDDEN; 2589 2590 mutex_exit(&db->db_mtx); 2591 2592 /* 2593 * When data in embedded into BP there is no need to create 2594 * BRT entry as there is no data block. Just copy the BP as 2595 * it contains the data. 2596 */ 2597 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) { 2598 brt_pending_add(spa, bp, tx); 2599 } 2600 } 2601 out: 2602 dmu_buf_rele_array(dbp, numbufs, FTAG); 2603 2604 return (error); 2605 } 2606 2607 void 2608 __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2609 { 2610 dnode_phys_t *dnp = dn->dn_phys; 2611 2612 doi->doi_data_block_size = dn->dn_datablksz; 2613 doi->doi_metadata_block_size = dn->dn_indblkshift ? 2614 1ULL << dn->dn_indblkshift : 0; 2615 doi->doi_type = dn->dn_type; 2616 doi->doi_bonus_type = dn->dn_bonustype; 2617 doi->doi_bonus_size = dn->dn_bonuslen; 2618 doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT; 2619 doi->doi_indirection = dn->dn_nlevels; 2620 doi->doi_checksum = dn->dn_checksum; 2621 doi->doi_compress = dn->dn_compress; 2622 doi->doi_nblkptr = dn->dn_nblkptr; 2623 doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; 2624 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 2625 doi->doi_fill_count = 0; 2626 for (int i = 0; i < dnp->dn_nblkptr; i++) 2627 doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]); 2628 } 2629 2630 void 2631 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2632 { 2633 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2634 mutex_enter(&dn->dn_mtx); 2635 2636 __dmu_object_info_from_dnode(dn, doi); 2637 2638 mutex_exit(&dn->dn_mtx); 2639 rw_exit(&dn->dn_struct_rwlock); 2640 } 2641 2642 /* 2643 * Get information on a DMU object. 2644 * If doi is NULL, just indicates whether the object exists. 2645 */ 2646 int 2647 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 2648 { 2649 dnode_t *dn; 2650 int err = dnode_hold(os, object, FTAG, &dn); 2651 2652 if (err) 2653 return (err); 2654 2655 if (doi != NULL) 2656 dmu_object_info_from_dnode(dn, doi); 2657 2658 dnode_rele(dn, FTAG); 2659 return (0); 2660 } 2661 2662 /* 2663 * As above, but faster; can be used when you have a held dbuf in hand. 2664 */ 2665 void 2666 dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi) 2667 { 2668 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2669 2670 DB_DNODE_ENTER(db); 2671 dmu_object_info_from_dnode(DB_DNODE(db), doi); 2672 DB_DNODE_EXIT(db); 2673 } 2674 2675 /* 2676 * Faster still when you only care about the size. 2677 */ 2678 void 2679 dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize, 2680 u_longlong_t *nblk512) 2681 { 2682 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2683 dnode_t *dn; 2684 2685 DB_DNODE_ENTER(db); 2686 dn = DB_DNODE(db); 2687 2688 *blksize = dn->dn_datablksz; 2689 /* add in number of slots used for the dnode itself */ 2690 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 2691 SPA_MINBLOCKSHIFT) + dn->dn_num_slots; 2692 DB_DNODE_EXIT(db); 2693 } 2694 2695 void 2696 dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize) 2697 { 2698 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2699 2700 DB_DNODE_ENTER(db); 2701 *dnsize = DB_DNODE(db)->dn_num_slots << DNODE_SHIFT; 2702 DB_DNODE_EXIT(db); 2703 } 2704 2705 void 2706 byteswap_uint64_array(void *vbuf, size_t size) 2707 { 2708 uint64_t *buf = vbuf; 2709 size_t count = size >> 3; 2710 int i; 2711 2712 ASSERT((size & 7) == 0); 2713 2714 for (i = 0; i < count; i++) 2715 buf[i] = BSWAP_64(buf[i]); 2716 } 2717 2718 void 2719 byteswap_uint32_array(void *vbuf, size_t size) 2720 { 2721 uint32_t *buf = vbuf; 2722 size_t count = size >> 2; 2723 int i; 2724 2725 ASSERT((size & 3) == 0); 2726 2727 for (i = 0; i < count; i++) 2728 buf[i] = BSWAP_32(buf[i]); 2729 } 2730 2731 void 2732 byteswap_uint16_array(void *vbuf, size_t size) 2733 { 2734 uint16_t *buf = vbuf; 2735 size_t count = size >> 1; 2736 int i; 2737 2738 ASSERT((size & 1) == 0); 2739 2740 for (i = 0; i < count; i++) 2741 buf[i] = BSWAP_16(buf[i]); 2742 } 2743 2744 void 2745 byteswap_uint8_array(void *vbuf, size_t size) 2746 { 2747 (void) vbuf, (void) size; 2748 } 2749 2750 void 2751 dmu_init(void) 2752 { 2753 abd_init(); 2754 zfs_dbgmsg_init(); 2755 sa_cache_init(); 2756 dmu_objset_init(); 2757 dnode_init(); 2758 zfetch_init(); 2759 dmu_tx_init(); 2760 l2arc_init(); 2761 arc_init(); 2762 dbuf_init(); 2763 } 2764 2765 void 2766 dmu_fini(void) 2767 { 2768 arc_fini(); /* arc depends on l2arc, so arc must go first */ 2769 l2arc_fini(); 2770 dmu_tx_fini(); 2771 zfetch_fini(); 2772 dbuf_fini(); 2773 dnode_fini(); 2774 dmu_objset_fini(); 2775 sa_cache_fini(); 2776 zfs_dbgmsg_fini(); 2777 abd_fini(); 2778 } 2779 2780 EXPORT_SYMBOL(dmu_bonus_hold); 2781 EXPORT_SYMBOL(dmu_bonus_hold_by_dnode); 2782 EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus); 2783 EXPORT_SYMBOL(dmu_buf_rele_array); 2784 EXPORT_SYMBOL(dmu_prefetch); 2785 EXPORT_SYMBOL(dmu_prefetch_by_dnode); 2786 EXPORT_SYMBOL(dmu_prefetch_dnode); 2787 EXPORT_SYMBOL(dmu_free_range); 2788 EXPORT_SYMBOL(dmu_free_long_range); 2789 EXPORT_SYMBOL(dmu_free_long_object); 2790 EXPORT_SYMBOL(dmu_read); 2791 EXPORT_SYMBOL(dmu_read_by_dnode); 2792 EXPORT_SYMBOL(dmu_write); 2793 EXPORT_SYMBOL(dmu_write_by_dnode); 2794 EXPORT_SYMBOL(dmu_prealloc); 2795 EXPORT_SYMBOL(dmu_object_info); 2796 EXPORT_SYMBOL(dmu_object_info_from_dnode); 2797 EXPORT_SYMBOL(dmu_object_info_from_db); 2798 EXPORT_SYMBOL(dmu_object_size_from_db); 2799 EXPORT_SYMBOL(dmu_object_dnsize_from_db); 2800 EXPORT_SYMBOL(dmu_object_set_nlevels); 2801 EXPORT_SYMBOL(dmu_object_set_blocksize); 2802 EXPORT_SYMBOL(dmu_object_set_maxblkid); 2803 EXPORT_SYMBOL(dmu_object_set_checksum); 2804 EXPORT_SYMBOL(dmu_object_set_compress); 2805 EXPORT_SYMBOL(dmu_offset_next); 2806 EXPORT_SYMBOL(dmu_write_policy); 2807 EXPORT_SYMBOL(dmu_sync); 2808 EXPORT_SYMBOL(dmu_request_arcbuf); 2809 EXPORT_SYMBOL(dmu_return_arcbuf); 2810 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode); 2811 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf); 2812 EXPORT_SYMBOL(dmu_buf_hold); 2813 EXPORT_SYMBOL(dmu_ot); 2814 2815 ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW, 2816 "Enable NOP writes"); 2817 2818 ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, UINT, ZMOD_RW, 2819 "Percentage of dirtied blocks from frees in one TXG"); 2820 2821 ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW, 2822 "Enable forcing txg sync to find holes"); 2823 2824 /* CSTYLED */ 2825 ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW, 2826 "Limit one prefetch call to this size"); 2827