1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2015 by Chunwei Chen. All rights reserved. 28 * Copyright (c) 2019 Datto Inc. 29 * Copyright (c) 2019, 2023, Klara Inc. 30 * Copyright (c) 2019, Allan Jude 31 * Copyright (c) 2022 Hewlett Packard Enterprise Development LP. 32 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek 33 */ 34 35 #include <sys/dmu.h> 36 #include <sys/dmu_impl.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/dbuf.h> 39 #include <sys/dnode.h> 40 #include <sys/zfs_context.h> 41 #include <sys/dmu_objset.h> 42 #include <sys/dmu_traverse.h> 43 #include <sys/dsl_dataset.h> 44 #include <sys/dsl_dir.h> 45 #include <sys/dsl_pool.h> 46 #include <sys/dsl_synctask.h> 47 #include <sys/dsl_prop.h> 48 #include <sys/dmu_zfetch.h> 49 #include <sys/zfs_ioctl.h> 50 #include <sys/zap.h> 51 #include <sys/zio_checksum.h> 52 #include <sys/zio_compress.h> 53 #include <sys/sa.h> 54 #include <sys/zfeature.h> 55 #include <sys/abd.h> 56 #include <sys/brt.h> 57 #include <sys/trace_zfs.h> 58 #include <sys/zfs_racct.h> 59 #include <sys/zfs_rlock.h> 60 #ifdef _KERNEL 61 #include <sys/vmsystm.h> 62 #include <sys/zfs_znode.h> 63 #endif 64 65 /* 66 * Enable/disable nopwrite feature. 67 */ 68 static int zfs_nopwrite_enabled = 1; 69 70 /* 71 * Tunable to control percentage of dirtied L1 blocks from frees allowed into 72 * one TXG. After this threshold is crossed, additional dirty blocks from frees 73 * will wait until the next TXG. 74 * A value of zero will disable this throttle. 75 */ 76 static uint_t zfs_per_txg_dirty_frees_percent = 30; 77 78 /* 79 * Enable/disable forcing txg sync when dirty checking for holes with lseek(). 80 * By default this is enabled to ensure accurate hole reporting, it can result 81 * in a significant performance penalty for lseek(SEEK_HOLE) heavy workloads. 82 * Disabling this option will result in holes never being reported in dirty 83 * files which is always safe. 84 */ 85 static int zfs_dmu_offset_next_sync = 1; 86 87 /* 88 * Limit the amount we can prefetch with one call to this amount. This 89 * helps to limit the amount of memory that can be used by prefetching. 90 * Larger objects should be prefetched a bit at a time. 91 */ 92 #ifdef _ILP32 93 uint_t dmu_prefetch_max = 8 * 1024 * 1024; 94 #else 95 uint_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE; 96 #endif 97 98 /* 99 * Override copies= for dedup state objects. 0 means the traditional behaviour 100 * (ie the default for the containing objset ie 3 for the MOS). 101 */ 102 uint_t dmu_ddt_copies = 0; 103 104 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 105 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" }, 106 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" }, 107 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" }, 108 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" }, 109 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" }, 110 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" }, 111 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" }, 112 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" }, 113 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" }, 114 {DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" }, 115 {DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" }, 116 {DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" }, 117 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" }, 118 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"}, 119 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" }, 120 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" }, 121 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" }, 122 {DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" }, 123 {DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" }, 124 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" }, 125 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" }, 126 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" }, 127 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" }, 128 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" }, 129 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" }, 130 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" }, 131 {DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" }, 132 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" }, 133 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" }, 134 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" }, 135 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" }, 136 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" }, 137 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" }, 138 {DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" }, 139 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" }, 140 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" }, 141 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" }, 142 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"}, 143 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" }, 144 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" }, 145 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"}, 146 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"}, 147 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" }, 148 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" }, 149 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" }, 150 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" }, 151 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" }, 152 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" }, 153 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" }, 154 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" }, 155 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" }, 156 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" }, 157 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" }, 158 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" } 159 }; 160 161 dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = { 162 { byteswap_uint8_array, "uint8" }, 163 { byteswap_uint16_array, "uint16" }, 164 { byteswap_uint32_array, "uint32" }, 165 { byteswap_uint64_array, "uint64" }, 166 { zap_byteswap, "zap" }, 167 { dnode_buf_byteswap, "dnode" }, 168 { dmu_objset_byteswap, "objset" }, 169 { zfs_znode_byteswap, "znode" }, 170 { zfs_oldacl_byteswap, "oldacl" }, 171 { zfs_acl_byteswap, "acl" } 172 }; 173 174 int 175 dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset, 176 const void *tag, dmu_buf_t **dbp) 177 { 178 uint64_t blkid; 179 dmu_buf_impl_t *db; 180 181 rw_enter(&dn->dn_struct_rwlock, RW_READER); 182 blkid = dbuf_whichblock(dn, 0, offset); 183 db = dbuf_hold(dn, blkid, tag); 184 rw_exit(&dn->dn_struct_rwlock); 185 186 if (db == NULL) { 187 *dbp = NULL; 188 return (SET_ERROR(EIO)); 189 } 190 191 *dbp = &db->db; 192 return (0); 193 } 194 195 int 196 dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset, 197 const void *tag, dmu_buf_t **dbp) 198 { 199 dnode_t *dn; 200 uint64_t blkid; 201 dmu_buf_impl_t *db; 202 int err; 203 204 err = dnode_hold(os, object, FTAG, &dn); 205 if (err) 206 return (err); 207 rw_enter(&dn->dn_struct_rwlock, RW_READER); 208 blkid = dbuf_whichblock(dn, 0, offset); 209 db = dbuf_hold(dn, blkid, tag); 210 rw_exit(&dn->dn_struct_rwlock); 211 dnode_rele(dn, FTAG); 212 213 if (db == NULL) { 214 *dbp = NULL; 215 return (SET_ERROR(EIO)); 216 } 217 218 *dbp = &db->db; 219 return (err); 220 } 221 222 int 223 dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, 224 const void *tag, dmu_buf_t **dbp, int flags) 225 { 226 int err; 227 int db_flags = DB_RF_CANFAIL; 228 229 if (flags & DMU_READ_NO_PREFETCH) 230 db_flags |= DB_RF_NOPREFETCH; 231 if (flags & DMU_READ_NO_DECRYPT) 232 db_flags |= DB_RF_NO_DECRYPT; 233 234 err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp); 235 if (err == 0) { 236 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 237 err = dbuf_read(db, NULL, db_flags); 238 if (err != 0) { 239 dbuf_rele(db, tag); 240 *dbp = NULL; 241 } 242 } 243 244 return (err); 245 } 246 247 int 248 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 249 const void *tag, dmu_buf_t **dbp, int flags) 250 { 251 int err; 252 int db_flags = DB_RF_CANFAIL; 253 254 if (flags & DMU_READ_NO_PREFETCH) 255 db_flags |= DB_RF_NOPREFETCH; 256 if (flags & DMU_READ_NO_DECRYPT) 257 db_flags |= DB_RF_NO_DECRYPT; 258 259 err = dmu_buf_hold_noread(os, object, offset, tag, dbp); 260 if (err == 0) { 261 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 262 err = dbuf_read(db, NULL, db_flags); 263 if (err != 0) { 264 dbuf_rele(db, tag); 265 *dbp = NULL; 266 } 267 } 268 269 return (err); 270 } 271 272 int 273 dmu_bonus_max(void) 274 { 275 return (DN_OLD_MAX_BONUSLEN); 276 } 277 278 int 279 dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx) 280 { 281 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 282 dnode_t *dn; 283 int error; 284 285 if (newsize < 0 || newsize > db_fake->db_size) 286 return (SET_ERROR(EINVAL)); 287 288 DB_DNODE_ENTER(db); 289 dn = DB_DNODE(db); 290 291 if (dn->dn_bonus != db) { 292 error = SET_ERROR(EINVAL); 293 } else { 294 dnode_setbonuslen(dn, newsize, tx); 295 error = 0; 296 } 297 298 DB_DNODE_EXIT(db); 299 return (error); 300 } 301 302 int 303 dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx) 304 { 305 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 306 dnode_t *dn; 307 int error; 308 309 if (!DMU_OT_IS_VALID(type)) 310 return (SET_ERROR(EINVAL)); 311 312 DB_DNODE_ENTER(db); 313 dn = DB_DNODE(db); 314 315 if (dn->dn_bonus != db) { 316 error = SET_ERROR(EINVAL); 317 } else { 318 dnode_setbonus_type(dn, type, tx); 319 error = 0; 320 } 321 322 DB_DNODE_EXIT(db); 323 return (error); 324 } 325 326 dmu_object_type_t 327 dmu_get_bonustype(dmu_buf_t *db_fake) 328 { 329 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 330 dmu_object_type_t type; 331 332 DB_DNODE_ENTER(db); 333 type = DB_DNODE(db)->dn_bonustype; 334 DB_DNODE_EXIT(db); 335 336 return (type); 337 } 338 339 int 340 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx) 341 { 342 dnode_t *dn; 343 int error; 344 345 error = dnode_hold(os, object, FTAG, &dn); 346 dbuf_rm_spill(dn, tx); 347 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 348 dnode_rm_spill(dn, tx); 349 rw_exit(&dn->dn_struct_rwlock); 350 dnode_rele(dn, FTAG); 351 return (error); 352 } 353 354 /* 355 * Lookup and hold the bonus buffer for the provided dnode. If the dnode 356 * has not yet been allocated a new bonus dbuf a will be allocated. 357 * Returns ENOENT, EIO, or 0. 358 */ 359 int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp, 360 uint32_t flags) 361 { 362 dmu_buf_impl_t *db; 363 int error; 364 uint32_t db_flags = DB_RF_MUST_SUCCEED; 365 366 if (flags & DMU_READ_NO_PREFETCH) 367 db_flags |= DB_RF_NOPREFETCH; 368 if (flags & DMU_READ_NO_DECRYPT) 369 db_flags |= DB_RF_NO_DECRYPT; 370 371 rw_enter(&dn->dn_struct_rwlock, RW_READER); 372 if (dn->dn_bonus == NULL) { 373 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) { 374 rw_exit(&dn->dn_struct_rwlock); 375 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 376 } 377 if (dn->dn_bonus == NULL) 378 dbuf_create_bonus(dn); 379 } 380 db = dn->dn_bonus; 381 382 /* as long as the bonus buf is held, the dnode will be held */ 383 if (zfs_refcount_add(&db->db_holds, tag) == 1) { 384 VERIFY(dnode_add_ref(dn, db)); 385 atomic_inc_32(&dn->dn_dbufs_count); 386 } 387 388 /* 389 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 390 * hold and incrementing the dbuf count to ensure that dnode_move() sees 391 * a dnode hold for every dbuf. 392 */ 393 rw_exit(&dn->dn_struct_rwlock); 394 395 error = dbuf_read(db, NULL, db_flags); 396 if (error) { 397 dnode_evict_bonus(dn); 398 dbuf_rele(db, tag); 399 *dbp = NULL; 400 return (error); 401 } 402 403 *dbp = &db->db; 404 return (0); 405 } 406 407 int 408 dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag, dmu_buf_t **dbp) 409 { 410 dnode_t *dn; 411 int error; 412 413 error = dnode_hold(os, object, FTAG, &dn); 414 if (error) 415 return (error); 416 417 error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH); 418 dnode_rele(dn, FTAG); 419 420 return (error); 421 } 422 423 /* 424 * returns ENOENT, EIO, or 0. 425 * 426 * This interface will allocate a blank spill dbuf when a spill blk 427 * doesn't already exist on the dnode. 428 * 429 * if you only want to find an already existing spill db, then 430 * dmu_spill_hold_existing() should be used. 431 */ 432 int 433 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, const void *tag, 434 dmu_buf_t **dbp) 435 { 436 dmu_buf_impl_t *db = NULL; 437 int err; 438 439 if ((flags & DB_RF_HAVESTRUCT) == 0) 440 rw_enter(&dn->dn_struct_rwlock, RW_READER); 441 442 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag); 443 444 if ((flags & DB_RF_HAVESTRUCT) == 0) 445 rw_exit(&dn->dn_struct_rwlock); 446 447 if (db == NULL) { 448 *dbp = NULL; 449 return (SET_ERROR(EIO)); 450 } 451 err = dbuf_read(db, NULL, flags); 452 if (err == 0) 453 *dbp = &db->db; 454 else { 455 dbuf_rele(db, tag); 456 *dbp = NULL; 457 } 458 return (err); 459 } 460 461 int 462 dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp) 463 { 464 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 465 dnode_t *dn; 466 int err; 467 468 DB_DNODE_ENTER(db); 469 dn = DB_DNODE(db); 470 471 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) { 472 err = SET_ERROR(EINVAL); 473 } else { 474 rw_enter(&dn->dn_struct_rwlock, RW_READER); 475 476 if (!dn->dn_have_spill) { 477 err = SET_ERROR(ENOENT); 478 } else { 479 err = dmu_spill_hold_by_dnode(dn, 480 DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp); 481 } 482 483 rw_exit(&dn->dn_struct_rwlock); 484 } 485 486 DB_DNODE_EXIT(db); 487 return (err); 488 } 489 490 int 491 dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, const void *tag, 492 dmu_buf_t **dbp) 493 { 494 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 495 int err; 496 uint32_t db_flags = DB_RF_CANFAIL; 497 498 if (flags & DMU_READ_NO_DECRYPT) 499 db_flags |= DB_RF_NO_DECRYPT; 500 501 DB_DNODE_ENTER(db); 502 err = dmu_spill_hold_by_dnode(DB_DNODE(db), db_flags, tag, dbp); 503 DB_DNODE_EXIT(db); 504 505 return (err); 506 } 507 508 /* 509 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 510 * to take a held dnode rather than <os, object> -- the lookup is wasteful, 511 * and can induce severe lock contention when writing to several files 512 * whose dnodes are in the same block. 513 */ 514 int 515 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, 516 boolean_t read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp, 517 uint32_t flags) 518 { 519 dmu_buf_t **dbp; 520 zstream_t *zs = NULL; 521 uint64_t blkid, nblks, i; 522 uint32_t dbuf_flags; 523 int err; 524 zio_t *zio = NULL; 525 boolean_t missed = B_FALSE; 526 527 ASSERT(!read || length <= DMU_MAX_ACCESS); 528 529 /* 530 * Note: We directly notify the prefetch code of this read, so that 531 * we can tell it about the multi-block read. dbuf_read() only knows 532 * about the one block it is accessing. 533 */ 534 dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT | 535 DB_RF_NOPREFETCH; 536 537 if ((flags & DMU_READ_NO_DECRYPT) != 0) 538 dbuf_flags |= DB_RF_NO_DECRYPT; 539 540 rw_enter(&dn->dn_struct_rwlock, RW_READER); 541 if (dn->dn_datablkshift) { 542 int blkshift = dn->dn_datablkshift; 543 nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) - 544 P2ALIGN_TYPED(offset, 1ULL << blkshift, uint64_t)) 545 >> blkshift; 546 } else { 547 if (offset + length > dn->dn_datablksz) { 548 zfs_panic_recover("zfs: accessing past end of object " 549 "%llx/%llx (size=%u access=%llu+%llu)", 550 (longlong_t)dn->dn_objset-> 551 os_dsl_dataset->ds_object, 552 (longlong_t)dn->dn_object, dn->dn_datablksz, 553 (longlong_t)offset, (longlong_t)length); 554 rw_exit(&dn->dn_struct_rwlock); 555 return (SET_ERROR(EIO)); 556 } 557 nblks = 1; 558 } 559 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 560 561 if (read) 562 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, 563 ZIO_FLAG_CANFAIL); 564 blkid = dbuf_whichblock(dn, 0, offset); 565 if ((flags & DMU_READ_NO_PREFETCH) == 0) { 566 /* 567 * Prepare the zfetch before initiating the demand reads, so 568 * that if multiple threads block on same indirect block, we 569 * base predictions on the original less racy request order. 570 */ 571 zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks, read, 572 B_TRUE); 573 } 574 for (i = 0; i < nblks; i++) { 575 dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag); 576 if (db == NULL) { 577 if (zs) { 578 dmu_zfetch_run(&dn->dn_zfetch, zs, missed, 579 B_TRUE); 580 } 581 rw_exit(&dn->dn_struct_rwlock); 582 dmu_buf_rele_array(dbp, nblks, tag); 583 if (read) 584 zio_nowait(zio); 585 return (SET_ERROR(EIO)); 586 } 587 588 /* 589 * Initiate async demand data read. 590 * We check the db_state after calling dbuf_read() because 591 * (1) dbuf_read() may change the state to CACHED due to a 592 * hit in the ARC, and (2) on a cache miss, a child will 593 * have been added to "zio" but not yet completed, so the 594 * state will not yet be CACHED. 595 */ 596 if (read) { 597 if (i == nblks - 1 && blkid + i < dn->dn_maxblkid && 598 offset + length < db->db.db_offset + 599 db->db.db_size) { 600 if (offset <= db->db.db_offset) 601 dbuf_flags |= DB_RF_PARTIAL_FIRST; 602 else 603 dbuf_flags |= DB_RF_PARTIAL_MORE; 604 } 605 (void) dbuf_read(db, zio, dbuf_flags); 606 if (db->db_state != DB_CACHED) 607 missed = B_TRUE; 608 } 609 dbp[i] = &db->db; 610 } 611 612 if (!read) 613 zfs_racct_write(length, nblks); 614 615 if (zs) 616 dmu_zfetch_run(&dn->dn_zfetch, zs, missed, B_TRUE); 617 rw_exit(&dn->dn_struct_rwlock); 618 619 if (read) { 620 /* wait for async read i/o */ 621 err = zio_wait(zio); 622 if (err) { 623 dmu_buf_rele_array(dbp, nblks, tag); 624 return (err); 625 } 626 627 /* wait for other io to complete */ 628 for (i = 0; i < nblks; i++) { 629 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 630 mutex_enter(&db->db_mtx); 631 while (db->db_state == DB_READ || 632 db->db_state == DB_FILL) 633 cv_wait(&db->db_changed, &db->db_mtx); 634 if (db->db_state == DB_UNCACHED) 635 err = SET_ERROR(EIO); 636 mutex_exit(&db->db_mtx); 637 if (err) { 638 dmu_buf_rele_array(dbp, nblks, tag); 639 return (err); 640 } 641 } 642 } 643 644 *numbufsp = nblks; 645 *dbpp = dbp; 646 return (0); 647 } 648 649 int 650 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 651 uint64_t length, int read, const void *tag, int *numbufsp, 652 dmu_buf_t ***dbpp) 653 { 654 dnode_t *dn; 655 int err; 656 657 err = dnode_hold(os, object, FTAG, &dn); 658 if (err) 659 return (err); 660 661 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 662 numbufsp, dbpp, DMU_READ_PREFETCH); 663 664 dnode_rele(dn, FTAG); 665 666 return (err); 667 } 668 669 int 670 dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset, 671 uint64_t length, boolean_t read, const void *tag, int *numbufsp, 672 dmu_buf_t ***dbpp) 673 { 674 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 675 int err; 676 677 DB_DNODE_ENTER(db); 678 err = dmu_buf_hold_array_by_dnode(DB_DNODE(db), offset, length, read, 679 tag, numbufsp, dbpp, DMU_READ_PREFETCH); 680 DB_DNODE_EXIT(db); 681 682 return (err); 683 } 684 685 void 686 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, const void *tag) 687 { 688 int i; 689 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 690 691 if (numbufs == 0) 692 return; 693 694 for (i = 0; i < numbufs; i++) { 695 if (dbp[i]) 696 dbuf_rele(dbp[i], tag); 697 } 698 699 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 700 } 701 702 /* 703 * Issue prefetch I/Os for the given blocks. If level is greater than 0, the 704 * indirect blocks prefetched will be those that point to the blocks containing 705 * the data starting at offset, and continuing to offset + len. If the range 706 * is too long, prefetch the first dmu_prefetch_max bytes as requested, while 707 * for the rest only a higher level, also fitting within dmu_prefetch_max. It 708 * should primarily help random reads, since for long sequential reads there is 709 * a speculative prefetcher. 710 * 711 * Note that if the indirect blocks above the blocks being prefetched are not 712 * in cache, they will be asynchronously read in. Dnode read by dnode_hold() 713 * is currently synchronous. 714 */ 715 void 716 dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 717 uint64_t len, zio_priority_t pri) 718 { 719 dnode_t *dn; 720 721 if (dmu_prefetch_max == 0 || len == 0) { 722 dmu_prefetch_dnode(os, object, pri); 723 return; 724 } 725 726 if (dnode_hold(os, object, FTAG, &dn) != 0) 727 return; 728 729 dmu_prefetch_by_dnode(dn, level, offset, len, pri); 730 731 dnode_rele(dn, FTAG); 732 } 733 734 void 735 dmu_prefetch_by_dnode(dnode_t *dn, int64_t level, uint64_t offset, 736 uint64_t len, zio_priority_t pri) 737 { 738 int64_t level2 = level; 739 uint64_t start, end, start2, end2; 740 741 /* 742 * Depending on len we may do two prefetches: blocks [start, end) at 743 * level, and following blocks [start2, end2) at higher level2. 744 */ 745 rw_enter(&dn->dn_struct_rwlock, RW_READER); 746 if (dn->dn_datablkshift != 0) { 747 /* 748 * The object has multiple blocks. Calculate the full range 749 * of blocks [start, end2) and then split it into two parts, 750 * so that the first [start, end) fits into dmu_prefetch_max. 751 */ 752 start = dbuf_whichblock(dn, level, offset); 753 end2 = dbuf_whichblock(dn, level, offset + len - 1) + 1; 754 uint8_t ibs = dn->dn_indblkshift; 755 uint8_t bs = (level == 0) ? dn->dn_datablkshift : ibs; 756 uint_t limit = P2ROUNDUP(dmu_prefetch_max, 1 << bs) >> bs; 757 start2 = end = MIN(end2, start + limit); 758 759 /* 760 * Find level2 where [start2, end2) fits into dmu_prefetch_max. 761 */ 762 uint8_t ibps = ibs - SPA_BLKPTRSHIFT; 763 limit = P2ROUNDUP(dmu_prefetch_max, 1 << ibs) >> ibs; 764 do { 765 level2++; 766 start2 = P2ROUNDUP(start2, 1 << ibps) >> ibps; 767 end2 = P2ROUNDUP(end2, 1 << ibps) >> ibps; 768 } while (end2 - start2 > limit); 769 } else { 770 /* There is only one block. Prefetch it or nothing. */ 771 start = start2 = end2 = 0; 772 end = start + (level == 0 && offset < dn->dn_datablksz); 773 } 774 775 for (uint64_t i = start; i < end; i++) 776 dbuf_prefetch(dn, level, i, pri, 0); 777 for (uint64_t i = start2; i < end2; i++) 778 dbuf_prefetch(dn, level2, i, pri, 0); 779 rw_exit(&dn->dn_struct_rwlock); 780 } 781 782 typedef struct { 783 kmutex_t dpa_lock; 784 kcondvar_t dpa_cv; 785 uint64_t dpa_pending_io; 786 } dmu_prefetch_arg_t; 787 788 static void 789 dmu_prefetch_done(void *arg, uint64_t level, uint64_t blkid, boolean_t issued) 790 { 791 (void) level; (void) blkid; (void)issued; 792 dmu_prefetch_arg_t *dpa = arg; 793 794 ASSERT0(level); 795 796 mutex_enter(&dpa->dpa_lock); 797 ASSERT3U(dpa->dpa_pending_io, >, 0); 798 if (--dpa->dpa_pending_io == 0) 799 cv_broadcast(&dpa->dpa_cv); 800 mutex_exit(&dpa->dpa_lock); 801 } 802 803 static void 804 dmu_prefetch_wait_by_dnode(dnode_t *dn, uint64_t offset, uint64_t len) 805 { 806 dmu_prefetch_arg_t dpa; 807 808 mutex_init(&dpa.dpa_lock, NULL, MUTEX_DEFAULT, NULL); 809 cv_init(&dpa.dpa_cv, NULL, CV_DEFAULT, NULL); 810 811 rw_enter(&dn->dn_struct_rwlock, RW_READER); 812 813 uint64_t start = dbuf_whichblock(dn, 0, offset); 814 uint64_t end = dbuf_whichblock(dn, 0, offset + len - 1) + 1; 815 dpa.dpa_pending_io = end - start; 816 817 for (uint64_t blk = start; blk < end; blk++) { 818 (void) dbuf_prefetch_impl(dn, 0, blk, ZIO_PRIORITY_ASYNC_READ, 819 0, dmu_prefetch_done, &dpa); 820 } 821 822 rw_exit(&dn->dn_struct_rwlock); 823 824 /* wait for prefetch L0 reads to finish */ 825 mutex_enter(&dpa.dpa_lock); 826 while (dpa.dpa_pending_io > 0) { 827 cv_wait(&dpa.dpa_cv, &dpa.dpa_lock); 828 829 } 830 mutex_exit(&dpa.dpa_lock); 831 832 mutex_destroy(&dpa.dpa_lock); 833 cv_destroy(&dpa.dpa_cv); 834 } 835 836 /* 837 * Issue prefetch I/Os for the given L0 block range and wait for the I/O 838 * to complete. This does not enforce dmu_prefetch_max and will prefetch 839 * the entire range. The blocks are read from disk into the ARC but no 840 * decompression occurs (i.e., the dbuf cache is not required). 841 */ 842 int 843 dmu_prefetch_wait(objset_t *os, uint64_t object, uint64_t offset, uint64_t size) 844 { 845 dnode_t *dn; 846 int err = 0; 847 848 err = dnode_hold(os, object, FTAG, &dn); 849 if (err != 0) 850 return (err); 851 852 /* 853 * Chunk the requests (16 indirects worth) so that we can be interrupted 854 */ 855 uint64_t chunksize; 856 if (dn->dn_indblkshift) { 857 uint64_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1); 858 chunksize = (nbps * 16) << dn->dn_datablkshift; 859 } else { 860 chunksize = dn->dn_datablksz; 861 } 862 863 while (size > 0) { 864 uint64_t mylen = MIN(size, chunksize); 865 866 dmu_prefetch_wait_by_dnode(dn, offset, mylen); 867 868 offset += mylen; 869 size -= mylen; 870 871 if (issig()) { 872 err = SET_ERROR(EINTR); 873 break; 874 } 875 } 876 877 dnode_rele(dn, FTAG); 878 879 return (err); 880 } 881 882 /* 883 * Issue prefetch I/Os for the given object's dnode. 884 */ 885 void 886 dmu_prefetch_dnode(objset_t *os, uint64_t object, zio_priority_t pri) 887 { 888 if (object == 0 || object >= DN_MAX_OBJECT) 889 return; 890 891 dnode_t *dn = DMU_META_DNODE(os); 892 rw_enter(&dn->dn_struct_rwlock, RW_READER); 893 uint64_t blkid = dbuf_whichblock(dn, 0, object * sizeof (dnode_phys_t)); 894 dbuf_prefetch(dn, 0, blkid, pri, 0); 895 rw_exit(&dn->dn_struct_rwlock); 896 } 897 898 /* 899 * Get the next "chunk" of file data to free. We traverse the file from 900 * the end so that the file gets shorter over time (if we crashes in the 901 * middle, this will leave us in a better state). We find allocated file 902 * data by simply searching the allocated level 1 indirects. 903 * 904 * On input, *start should be the first offset that does not need to be 905 * freed (e.g. "offset + length"). On return, *start will be the first 906 * offset that should be freed and l1blks is set to the number of level 1 907 * indirect blocks found within the chunk. 908 */ 909 static int 910 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks) 911 { 912 uint64_t blks; 913 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1); 914 /* bytes of data covered by a level-1 indirect block */ 915 uint64_t iblkrange = (uint64_t)dn->dn_datablksz * 916 EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); 917 918 ASSERT3U(minimum, <=, *start); 919 920 /* dn_nlevels == 1 means we don't have any L1 blocks */ 921 if (dn->dn_nlevels <= 1) { 922 *l1blks = 0; 923 *start = minimum; 924 return (0); 925 } 926 927 /* 928 * Check if we can free the entire range assuming that all of the 929 * L1 blocks in this range have data. If we can, we use this 930 * worst case value as an estimate so we can avoid having to look 931 * at the object's actual data. 932 */ 933 uint64_t total_l1blks = 934 (roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) / 935 iblkrange; 936 if (total_l1blks <= maxblks) { 937 *l1blks = total_l1blks; 938 *start = minimum; 939 return (0); 940 } 941 ASSERT(ISP2(iblkrange)); 942 943 for (blks = 0; *start > minimum && blks < maxblks; blks++) { 944 int err; 945 946 /* 947 * dnode_next_offset(BACKWARDS) will find an allocated L1 948 * indirect block at or before the input offset. We must 949 * decrement *start so that it is at the end of the region 950 * to search. 951 */ 952 (*start)--; 953 954 err = dnode_next_offset(dn, 955 DNODE_FIND_BACKWARDS, start, 2, 1, 0); 956 957 /* if there are no indirect blocks before start, we are done */ 958 if (err == ESRCH) { 959 *start = minimum; 960 break; 961 } else if (err != 0) { 962 *l1blks = blks; 963 return (err); 964 } 965 966 /* set start to the beginning of this L1 indirect */ 967 *start = P2ALIGN_TYPED(*start, iblkrange, uint64_t); 968 } 969 if (*start < minimum) 970 *start = minimum; 971 *l1blks = blks; 972 973 return (0); 974 } 975 976 /* 977 * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set, 978 * otherwise return false. 979 * Used below in dmu_free_long_range_impl() to enable abort when unmounting 980 */ 981 static boolean_t 982 dmu_objset_zfs_unmounting(objset_t *os) 983 { 984 #ifdef _KERNEL 985 if (dmu_objset_type(os) == DMU_OST_ZFS) 986 return (zfs_get_vfs_flag_unmounted(os)); 987 #else 988 (void) os; 989 #endif 990 return (B_FALSE); 991 } 992 993 static int 994 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, 995 uint64_t length) 996 { 997 uint64_t object_size; 998 int err; 999 uint64_t dirty_frees_threshold; 1000 dsl_pool_t *dp = dmu_objset_pool(os); 1001 1002 if (dn == NULL) 1003 return (SET_ERROR(EINVAL)); 1004 1005 object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 1006 if (offset >= object_size) 1007 return (0); 1008 1009 if (zfs_per_txg_dirty_frees_percent <= 100) 1010 dirty_frees_threshold = 1011 zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100; 1012 else 1013 dirty_frees_threshold = zfs_dirty_data_max / 20; 1014 1015 if (length == DMU_OBJECT_END || offset + length > object_size) 1016 length = object_size - offset; 1017 1018 while (length != 0) { 1019 uint64_t chunk_end, chunk_begin, chunk_len; 1020 uint64_t l1blks; 1021 dmu_tx_t *tx; 1022 1023 if (dmu_objset_zfs_unmounting(dn->dn_objset)) 1024 return (SET_ERROR(EINTR)); 1025 1026 chunk_end = chunk_begin = offset + length; 1027 1028 /* move chunk_begin backwards to the beginning of this chunk */ 1029 err = get_next_chunk(dn, &chunk_begin, offset, &l1blks); 1030 if (err) 1031 return (err); 1032 ASSERT3U(chunk_begin, >=, offset); 1033 ASSERT3U(chunk_begin, <=, chunk_end); 1034 1035 chunk_len = chunk_end - chunk_begin; 1036 1037 tx = dmu_tx_create(os); 1038 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len); 1039 1040 /* 1041 * Mark this transaction as typically resulting in a net 1042 * reduction in space used. 1043 */ 1044 dmu_tx_mark_netfree(tx); 1045 err = dmu_tx_assign(tx, TXG_WAIT); 1046 if (err) { 1047 dmu_tx_abort(tx); 1048 return (err); 1049 } 1050 1051 uint64_t txg = dmu_tx_get_txg(tx); 1052 1053 mutex_enter(&dp->dp_lock); 1054 uint64_t long_free_dirty = 1055 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK]; 1056 mutex_exit(&dp->dp_lock); 1057 1058 /* 1059 * To avoid filling up a TXG with just frees, wait for 1060 * the next TXG to open before freeing more chunks if 1061 * we have reached the threshold of frees. 1062 */ 1063 if (dirty_frees_threshold != 0 && 1064 long_free_dirty >= dirty_frees_threshold) { 1065 DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay); 1066 dmu_tx_commit(tx); 1067 txg_wait_open(dp, 0, B_TRUE); 1068 continue; 1069 } 1070 1071 /* 1072 * In order to prevent unnecessary write throttling, for each 1073 * TXG, we track the cumulative size of L1 blocks being dirtied 1074 * in dnode_free_range() below. We compare this number to a 1075 * tunable threshold, past which we prevent new L1 dirty freeing 1076 * blocks from being added into the open TXG. See 1077 * dmu_free_long_range_impl() for details. The threshold 1078 * prevents write throttle activation due to dirty freeing L1 1079 * blocks taking up a large percentage of zfs_dirty_data_max. 1080 */ 1081 mutex_enter(&dp->dp_lock); 1082 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] += 1083 l1blks << dn->dn_indblkshift; 1084 mutex_exit(&dp->dp_lock); 1085 DTRACE_PROBE3(free__long__range, 1086 uint64_t, long_free_dirty, uint64_t, chunk_len, 1087 uint64_t, txg); 1088 dnode_free_range(dn, chunk_begin, chunk_len, tx); 1089 1090 dmu_tx_commit(tx); 1091 1092 length -= chunk_len; 1093 } 1094 return (0); 1095 } 1096 1097 int 1098 dmu_free_long_range(objset_t *os, uint64_t object, 1099 uint64_t offset, uint64_t length) 1100 { 1101 dnode_t *dn; 1102 int err; 1103 1104 err = dnode_hold(os, object, FTAG, &dn); 1105 if (err != 0) 1106 return (err); 1107 err = dmu_free_long_range_impl(os, dn, offset, length); 1108 1109 /* 1110 * It is important to zero out the maxblkid when freeing the entire 1111 * file, so that (a) subsequent calls to dmu_free_long_range_impl() 1112 * will take the fast path, and (b) dnode_reallocate() can verify 1113 * that the entire file has been freed. 1114 */ 1115 if (err == 0 && offset == 0 && length == DMU_OBJECT_END) 1116 dn->dn_maxblkid = 0; 1117 1118 dnode_rele(dn, FTAG); 1119 return (err); 1120 } 1121 1122 int 1123 dmu_free_long_object(objset_t *os, uint64_t object) 1124 { 1125 dmu_tx_t *tx; 1126 int err; 1127 1128 err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END); 1129 if (err != 0) 1130 return (err); 1131 1132 tx = dmu_tx_create(os); 1133 dmu_tx_hold_bonus(tx, object); 1134 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 1135 dmu_tx_mark_netfree(tx); 1136 err = dmu_tx_assign(tx, TXG_WAIT); 1137 if (err == 0) { 1138 err = dmu_object_free(os, object, tx); 1139 dmu_tx_commit(tx); 1140 } else { 1141 dmu_tx_abort(tx); 1142 } 1143 1144 return (err); 1145 } 1146 1147 int 1148 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 1149 uint64_t size, dmu_tx_t *tx) 1150 { 1151 dnode_t *dn; 1152 int err = dnode_hold(os, object, FTAG, &dn); 1153 if (err) 1154 return (err); 1155 ASSERT(offset < UINT64_MAX); 1156 ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset); 1157 dnode_free_range(dn, offset, size, tx); 1158 dnode_rele(dn, FTAG); 1159 return (0); 1160 } 1161 1162 static int 1163 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size, 1164 void *buf, uint32_t flags) 1165 { 1166 dmu_buf_t **dbp; 1167 int numbufs, err = 0; 1168 1169 /* 1170 * Deal with odd block sizes, where there can't be data past the first 1171 * block. If we ever do the tail block optimization, we will need to 1172 * handle that here as well. 1173 */ 1174 if (dn->dn_maxblkid == 0) { 1175 uint64_t newsz = offset > dn->dn_datablksz ? 0 : 1176 MIN(size, dn->dn_datablksz - offset); 1177 memset((char *)buf + newsz, 0, size - newsz); 1178 size = newsz; 1179 } 1180 1181 while (size > 0) { 1182 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 1183 int i; 1184 1185 /* 1186 * NB: we could do this block-at-a-time, but it's nice 1187 * to be reading in parallel. 1188 */ 1189 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 1190 TRUE, FTAG, &numbufs, &dbp, flags); 1191 if (err) 1192 break; 1193 1194 for (i = 0; i < numbufs; i++) { 1195 uint64_t tocpy; 1196 int64_t bufoff; 1197 dmu_buf_t *db = dbp[i]; 1198 1199 ASSERT(size > 0); 1200 1201 bufoff = offset - db->db_offset; 1202 tocpy = MIN(db->db_size - bufoff, size); 1203 1204 (void) memcpy(buf, (char *)db->db_data + bufoff, tocpy); 1205 1206 offset += tocpy; 1207 size -= tocpy; 1208 buf = (char *)buf + tocpy; 1209 } 1210 dmu_buf_rele_array(dbp, numbufs, FTAG); 1211 } 1212 return (err); 1213 } 1214 1215 int 1216 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1217 void *buf, uint32_t flags) 1218 { 1219 dnode_t *dn; 1220 int err; 1221 1222 err = dnode_hold(os, object, FTAG, &dn); 1223 if (err != 0) 1224 return (err); 1225 1226 err = dmu_read_impl(dn, offset, size, buf, flags); 1227 dnode_rele(dn, FTAG); 1228 return (err); 1229 } 1230 1231 int 1232 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, 1233 uint32_t flags) 1234 { 1235 return (dmu_read_impl(dn, offset, size, buf, flags)); 1236 } 1237 1238 static void 1239 dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size, 1240 const void *buf, dmu_tx_t *tx) 1241 { 1242 int i; 1243 1244 for (i = 0; i < numbufs; i++) { 1245 uint64_t tocpy; 1246 int64_t bufoff; 1247 dmu_buf_t *db = dbp[i]; 1248 1249 ASSERT(size > 0); 1250 1251 bufoff = offset - db->db_offset; 1252 tocpy = MIN(db->db_size - bufoff, size); 1253 1254 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1255 1256 if (tocpy == db->db_size) 1257 dmu_buf_will_fill(db, tx, B_FALSE); 1258 else 1259 dmu_buf_will_dirty(db, tx); 1260 1261 (void) memcpy((char *)db->db_data + bufoff, buf, tocpy); 1262 1263 if (tocpy == db->db_size) 1264 dmu_buf_fill_done(db, tx, B_FALSE); 1265 1266 offset += tocpy; 1267 size -= tocpy; 1268 buf = (char *)buf + tocpy; 1269 } 1270 } 1271 1272 void 1273 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1274 const void *buf, dmu_tx_t *tx) 1275 { 1276 dmu_buf_t **dbp; 1277 int numbufs; 1278 1279 if (size == 0) 1280 return; 1281 1282 VERIFY0(dmu_buf_hold_array(os, object, offset, size, 1283 FALSE, FTAG, &numbufs, &dbp)); 1284 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1285 dmu_buf_rele_array(dbp, numbufs, FTAG); 1286 } 1287 1288 /* 1289 * Note: Lustre is an external consumer of this interface. 1290 */ 1291 void 1292 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, 1293 const void *buf, dmu_tx_t *tx) 1294 { 1295 dmu_buf_t **dbp; 1296 int numbufs; 1297 1298 if (size == 0) 1299 return; 1300 1301 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size, 1302 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH)); 1303 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1304 dmu_buf_rele_array(dbp, numbufs, FTAG); 1305 } 1306 1307 void 1308 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1309 dmu_tx_t *tx) 1310 { 1311 dmu_buf_t **dbp; 1312 int numbufs, i; 1313 1314 if (size == 0) 1315 return; 1316 1317 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 1318 FALSE, FTAG, &numbufs, &dbp)); 1319 1320 for (i = 0; i < numbufs; i++) { 1321 dmu_buf_t *db = dbp[i]; 1322 1323 dmu_buf_will_not_fill(db, tx); 1324 } 1325 dmu_buf_rele_array(dbp, numbufs, FTAG); 1326 } 1327 1328 void 1329 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 1330 void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 1331 int compressed_size, int byteorder, dmu_tx_t *tx) 1332 { 1333 dmu_buf_t *db; 1334 1335 ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); 1336 ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); 1337 VERIFY0(dmu_buf_hold_noread(os, object, offset, 1338 FTAG, &db)); 1339 1340 dmu_buf_write_embedded(db, 1341 data, (bp_embedded_type_t)etype, (enum zio_compress)comp, 1342 uncompressed_size, compressed_size, byteorder, tx); 1343 1344 dmu_buf_rele(db, FTAG); 1345 } 1346 1347 void 1348 dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1349 dmu_tx_t *tx) 1350 { 1351 int numbufs, i; 1352 dmu_buf_t **dbp; 1353 1354 VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG, 1355 &numbufs, &dbp)); 1356 for (i = 0; i < numbufs; i++) 1357 dmu_buf_redact(dbp[i], tx); 1358 dmu_buf_rele_array(dbp, numbufs, FTAG); 1359 } 1360 1361 #ifdef _KERNEL 1362 int 1363 dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size) 1364 { 1365 dmu_buf_t **dbp; 1366 int numbufs, i, err; 1367 1368 /* 1369 * NB: we could do this block-at-a-time, but it's nice 1370 * to be reading in parallel. 1371 */ 1372 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size, 1373 TRUE, FTAG, &numbufs, &dbp, 0); 1374 if (err) 1375 return (err); 1376 1377 for (i = 0; i < numbufs; i++) { 1378 uint64_t tocpy; 1379 int64_t bufoff; 1380 dmu_buf_t *db = dbp[i]; 1381 1382 ASSERT(size > 0); 1383 1384 bufoff = zfs_uio_offset(uio) - db->db_offset; 1385 tocpy = MIN(db->db_size - bufoff, size); 1386 1387 err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy, 1388 UIO_READ, uio); 1389 1390 if (err) 1391 break; 1392 1393 size -= tocpy; 1394 } 1395 dmu_buf_rele_array(dbp, numbufs, FTAG); 1396 1397 return (err); 1398 } 1399 1400 /* 1401 * Read 'size' bytes into the uio buffer. 1402 * From object zdb->db_object. 1403 * Starting at zfs_uio_offset(uio). 1404 * 1405 * If the caller already has a dbuf in the target object 1406 * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), 1407 * because we don't have to find the dnode_t for the object. 1408 */ 1409 int 1410 dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size) 1411 { 1412 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1413 int err; 1414 1415 if (size == 0) 1416 return (0); 1417 1418 DB_DNODE_ENTER(db); 1419 err = dmu_read_uio_dnode(DB_DNODE(db), uio, size); 1420 DB_DNODE_EXIT(db); 1421 1422 return (err); 1423 } 1424 1425 /* 1426 * Read 'size' bytes into the uio buffer. 1427 * From the specified object 1428 * Starting at offset zfs_uio_offset(uio). 1429 */ 1430 int 1431 dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size) 1432 { 1433 dnode_t *dn; 1434 int err; 1435 1436 if (size == 0) 1437 return (0); 1438 1439 err = dnode_hold(os, object, FTAG, &dn); 1440 if (err) 1441 return (err); 1442 1443 err = dmu_read_uio_dnode(dn, uio, size); 1444 1445 dnode_rele(dn, FTAG); 1446 1447 return (err); 1448 } 1449 1450 int 1451 dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx) 1452 { 1453 dmu_buf_t **dbp; 1454 int numbufs; 1455 int err = 0; 1456 int i; 1457 1458 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size, 1459 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); 1460 if (err) 1461 return (err); 1462 1463 for (i = 0; i < numbufs; i++) { 1464 uint64_t tocpy; 1465 int64_t bufoff; 1466 dmu_buf_t *db = dbp[i]; 1467 1468 ASSERT(size > 0); 1469 1470 offset_t off = zfs_uio_offset(uio); 1471 bufoff = off - db->db_offset; 1472 tocpy = MIN(db->db_size - bufoff, size); 1473 1474 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1475 1476 if (tocpy == db->db_size) 1477 dmu_buf_will_fill(db, tx, B_TRUE); 1478 else 1479 dmu_buf_will_dirty(db, tx); 1480 1481 err = zfs_uio_fault_move((char *)db->db_data + bufoff, 1482 tocpy, UIO_WRITE, uio); 1483 1484 if (tocpy == db->db_size && dmu_buf_fill_done(db, tx, err)) { 1485 /* The fill was reverted. Undo any uio progress. */ 1486 zfs_uio_advance(uio, off - zfs_uio_offset(uio)); 1487 } 1488 1489 if (err) 1490 break; 1491 1492 size -= tocpy; 1493 } 1494 1495 dmu_buf_rele_array(dbp, numbufs, FTAG); 1496 return (err); 1497 } 1498 1499 /* 1500 * Write 'size' bytes from the uio buffer. 1501 * To object zdb->db_object. 1502 * Starting at offset zfs_uio_offset(uio). 1503 * 1504 * If the caller already has a dbuf in the target object 1505 * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), 1506 * because we don't have to find the dnode_t for the object. 1507 */ 1508 int 1509 dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size, 1510 dmu_tx_t *tx) 1511 { 1512 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1513 int err; 1514 1515 if (size == 0) 1516 return (0); 1517 1518 DB_DNODE_ENTER(db); 1519 err = dmu_write_uio_dnode(DB_DNODE(db), uio, size, tx); 1520 DB_DNODE_EXIT(db); 1521 1522 return (err); 1523 } 1524 1525 /* 1526 * Write 'size' bytes from the uio buffer. 1527 * To the specified object. 1528 * Starting at offset zfs_uio_offset(uio). 1529 */ 1530 int 1531 dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size, 1532 dmu_tx_t *tx) 1533 { 1534 dnode_t *dn; 1535 int err; 1536 1537 if (size == 0) 1538 return (0); 1539 1540 err = dnode_hold(os, object, FTAG, &dn); 1541 if (err) 1542 return (err); 1543 1544 err = dmu_write_uio_dnode(dn, uio, size, tx); 1545 1546 dnode_rele(dn, FTAG); 1547 1548 return (err); 1549 } 1550 #endif /* _KERNEL */ 1551 1552 static void 1553 dmu_cached_bps(spa_t *spa, blkptr_t *bps, uint_t nbps, 1554 uint64_t *l1sz, uint64_t *l2sz) 1555 { 1556 int cached_flags; 1557 1558 if (bps == NULL) 1559 return; 1560 1561 for (size_t blk_off = 0; blk_off < nbps; blk_off++) { 1562 blkptr_t *bp = &bps[blk_off]; 1563 1564 if (BP_IS_HOLE(bp)) 1565 continue; 1566 1567 cached_flags = arc_cached(spa, bp); 1568 if (cached_flags == 0) 1569 continue; 1570 1571 if ((cached_flags & (ARC_CACHED_IN_L1 | ARC_CACHED_IN_L2)) == 1572 ARC_CACHED_IN_L2) 1573 *l2sz += BP_GET_LSIZE(bp); 1574 else 1575 *l1sz += BP_GET_LSIZE(bp); 1576 } 1577 } 1578 1579 /* 1580 * Estimate DMU object cached size. 1581 */ 1582 int 1583 dmu_object_cached_size(objset_t *os, uint64_t object, 1584 uint64_t *l1sz, uint64_t *l2sz) 1585 { 1586 dnode_t *dn; 1587 dmu_object_info_t doi; 1588 int err = 0; 1589 1590 *l1sz = *l2sz = 0; 1591 1592 if (dnode_hold(os, object, FTAG, &dn) != 0) 1593 return (0); 1594 1595 if (dn->dn_nlevels < 2) { 1596 dnode_rele(dn, FTAG); 1597 return (0); 1598 } 1599 1600 dmu_object_info_from_dnode(dn, &doi); 1601 1602 for (uint64_t off = 0; off < doi.doi_max_offset; 1603 off += dmu_prefetch_max) { 1604 /* dbuf_read doesn't prefetch L1 blocks. */ 1605 dmu_prefetch_by_dnode(dn, 1, off, 1606 dmu_prefetch_max, ZIO_PRIORITY_SYNC_READ); 1607 } 1608 1609 /* 1610 * Hold all valid L1 blocks, asking ARC the status of each BP 1611 * contained in each such L1 block. 1612 */ 1613 uint_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1); 1614 uint64_t l1blks = 1 + (dn->dn_maxblkid / nbps); 1615 1616 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1617 for (uint64_t blk = 0; blk < l1blks; blk++) { 1618 dmu_buf_impl_t *db = NULL; 1619 1620 if (issig()) { 1621 /* 1622 * On interrupt, get out, and bubble up EINTR 1623 */ 1624 err = EINTR; 1625 break; 1626 } 1627 1628 /* 1629 * If we get an i/o error here, the L1 can't be read, 1630 * and nothing under it could be cached, so we just 1631 * continue. Ignoring the error from dbuf_hold_impl 1632 * or from dbuf_read is then a reasonable choice. 1633 */ 1634 err = dbuf_hold_impl(dn, 1, blk, B_TRUE, B_FALSE, FTAG, &db); 1635 if (err != 0) { 1636 /* 1637 * ignore error and continue 1638 */ 1639 err = 0; 1640 continue; 1641 } 1642 1643 err = dbuf_read(db, NULL, DB_RF_CANFAIL); 1644 if (err == 0) { 1645 dmu_cached_bps(dmu_objset_spa(os), db->db.db_data, 1646 nbps, l1sz, l2sz); 1647 } 1648 /* 1649 * error may be ignored, and we continue 1650 */ 1651 err = 0; 1652 dbuf_rele(db, FTAG); 1653 } 1654 rw_exit(&dn->dn_struct_rwlock); 1655 1656 dnode_rele(dn, FTAG); 1657 return (err); 1658 } 1659 1660 /* 1661 * Allocate a loaned anonymous arc buffer. 1662 */ 1663 arc_buf_t * 1664 dmu_request_arcbuf(dmu_buf_t *handle, int size) 1665 { 1666 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 1667 1668 return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size)); 1669 } 1670 1671 /* 1672 * Free a loaned arc buffer. 1673 */ 1674 void 1675 dmu_return_arcbuf(arc_buf_t *buf) 1676 { 1677 arc_return_buf(buf, FTAG); 1678 arc_buf_destroy(buf, FTAG); 1679 } 1680 1681 /* 1682 * A "lightweight" write is faster than a regular write (e.g. 1683 * dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the 1684 * CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the 1685 * data can not be read or overwritten until the transaction's txg has been 1686 * synced. This makes it appropriate for workloads that are known to be 1687 * (temporarily) write-only, like "zfs receive". 1688 * 1689 * A single block is written, starting at the specified offset in bytes. If 1690 * the call is successful, it returns 0 and the provided abd has been 1691 * consumed (the caller should not free it). 1692 */ 1693 int 1694 dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd, 1695 const zio_prop_t *zp, zio_flag_t flags, dmu_tx_t *tx) 1696 { 1697 dbuf_dirty_record_t *dr = 1698 dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx); 1699 if (dr == NULL) 1700 return (SET_ERROR(EIO)); 1701 dr->dt.dll.dr_abd = abd; 1702 dr->dt.dll.dr_props = *zp; 1703 dr->dt.dll.dr_flags = flags; 1704 return (0); 1705 } 1706 1707 /* 1708 * When possible directly assign passed loaned arc buffer to a dbuf. 1709 * If this is not possible copy the contents of passed arc buf via 1710 * dmu_write(). 1711 */ 1712 int 1713 dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, 1714 dmu_tx_t *tx) 1715 { 1716 dmu_buf_impl_t *db; 1717 objset_t *os = dn->dn_objset; 1718 uint64_t object = dn->dn_object; 1719 uint32_t blksz = (uint32_t)arc_buf_lsize(buf); 1720 uint64_t blkid; 1721 1722 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1723 blkid = dbuf_whichblock(dn, 0, offset); 1724 db = dbuf_hold(dn, blkid, FTAG); 1725 rw_exit(&dn->dn_struct_rwlock); 1726 if (db == NULL) 1727 return (SET_ERROR(EIO)); 1728 1729 /* 1730 * We can only assign if the offset is aligned and the arc buf is the 1731 * same size as the dbuf. 1732 */ 1733 if (offset == db->db.db_offset && blksz == db->db.db_size) { 1734 zfs_racct_write(blksz, 1); 1735 dbuf_assign_arcbuf(db, buf, tx); 1736 dbuf_rele(db, FTAG); 1737 } else { 1738 /* compressed bufs must always be assignable to their dbuf */ 1739 ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF); 1740 ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED)); 1741 1742 dbuf_rele(db, FTAG); 1743 dmu_write(os, object, offset, blksz, buf->b_data, tx); 1744 dmu_return_arcbuf(buf); 1745 } 1746 1747 return (0); 1748 } 1749 1750 int 1751 dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, 1752 dmu_tx_t *tx) 1753 { 1754 int err; 1755 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 1756 1757 DB_DNODE_ENTER(db); 1758 err = dmu_assign_arcbuf_by_dnode(DB_DNODE(db), offset, buf, tx); 1759 DB_DNODE_EXIT(db); 1760 1761 return (err); 1762 } 1763 1764 typedef struct { 1765 dbuf_dirty_record_t *dsa_dr; 1766 dmu_sync_cb_t *dsa_done; 1767 zgd_t *dsa_zgd; 1768 dmu_tx_t *dsa_tx; 1769 } dmu_sync_arg_t; 1770 1771 static void 1772 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) 1773 { 1774 (void) buf; 1775 dmu_sync_arg_t *dsa = varg; 1776 dmu_buf_t *db = dsa->dsa_zgd->zgd_db; 1777 blkptr_t *bp = zio->io_bp; 1778 1779 if (zio->io_error == 0) { 1780 if (BP_IS_HOLE(bp)) { 1781 /* 1782 * A block of zeros may compress to a hole, but the 1783 * block size still needs to be known for replay. 1784 */ 1785 BP_SET_LSIZE(bp, db->db_size); 1786 } else if (!BP_IS_EMBEDDED(bp)) { 1787 ASSERT(BP_GET_LEVEL(bp) == 0); 1788 BP_SET_FILL(bp, 1); 1789 } 1790 } 1791 } 1792 1793 static void 1794 dmu_sync_late_arrival_ready(zio_t *zio) 1795 { 1796 dmu_sync_ready(zio, NULL, zio->io_private); 1797 } 1798 1799 static void 1800 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1801 { 1802 (void) buf; 1803 dmu_sync_arg_t *dsa = varg; 1804 dbuf_dirty_record_t *dr = dsa->dsa_dr; 1805 dmu_buf_impl_t *db = dr->dr_dbuf; 1806 zgd_t *zgd = dsa->dsa_zgd; 1807 1808 /* 1809 * Record the vdev(s) backing this blkptr so they can be flushed after 1810 * the writes for the lwb have completed. 1811 */ 1812 if (zio->io_error == 0) { 1813 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1814 } 1815 1816 mutex_enter(&db->db_mtx); 1817 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 1818 if (zio->io_error == 0) { 1819 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); 1820 if (dr->dt.dl.dr_nopwrite) { 1821 blkptr_t *bp = zio->io_bp; 1822 blkptr_t *bp_orig = &zio->io_bp_orig; 1823 uint8_t chksum = BP_GET_CHECKSUM(bp_orig); 1824 1825 ASSERT(BP_EQUAL(bp, bp_orig)); 1826 VERIFY(BP_EQUAL(bp, db->db_blkptr)); 1827 ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); 1828 VERIFY(zio_checksum_table[chksum].ci_flags & 1829 ZCHECKSUM_FLAG_NOPWRITE); 1830 } 1831 dr->dt.dl.dr_overridden_by = *zio->io_bp; 1832 dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 1833 dr->dt.dl.dr_copies = zio->io_prop.zp_copies; 1834 1835 /* 1836 * Old style holes are filled with all zeros, whereas 1837 * new-style holes maintain their lsize, type, level, 1838 * and birth time (see zio_write_compress). While we 1839 * need to reset the BP_SET_LSIZE() call that happened 1840 * in dmu_sync_ready for old style holes, we do *not* 1841 * want to wipe out the information contained in new 1842 * style holes. Thus, only zero out the block pointer if 1843 * it's an old style hole. 1844 */ 1845 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && 1846 BP_GET_LOGICAL_BIRTH(&dr->dt.dl.dr_overridden_by) == 0) 1847 BP_ZERO(&dr->dt.dl.dr_overridden_by); 1848 } else { 1849 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1850 } 1851 cv_broadcast(&db->db_changed); 1852 mutex_exit(&db->db_mtx); 1853 1854 dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1855 1856 kmem_free(dsa, sizeof (*dsa)); 1857 } 1858 1859 static void 1860 dmu_sync_late_arrival_done(zio_t *zio) 1861 { 1862 blkptr_t *bp = zio->io_bp; 1863 dmu_sync_arg_t *dsa = zio->io_private; 1864 zgd_t *zgd = dsa->dsa_zgd; 1865 1866 if (zio->io_error == 0) { 1867 /* 1868 * Record the vdev(s) backing this blkptr so they can be 1869 * flushed after the writes for the lwb have completed. 1870 */ 1871 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1872 1873 if (!BP_IS_HOLE(bp)) { 1874 blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig; 1875 ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); 1876 ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); 1877 ASSERT(BP_GET_LOGICAL_BIRTH(zio->io_bp) == zio->io_txg); 1878 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); 1879 zio_free(zio->io_spa, zio->io_txg, zio->io_bp); 1880 } 1881 } 1882 1883 dmu_tx_commit(dsa->dsa_tx); 1884 1885 dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1886 1887 abd_free(zio->io_abd); 1888 kmem_free(dsa, sizeof (*dsa)); 1889 } 1890 1891 static int 1892 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, 1893 zio_prop_t *zp, zbookmark_phys_t *zb) 1894 { 1895 dmu_sync_arg_t *dsa; 1896 dmu_tx_t *tx; 1897 int error; 1898 1899 error = dbuf_read((dmu_buf_impl_t *)zgd->zgd_db, NULL, 1900 DB_RF_CANFAIL | DB_RF_NOPREFETCH); 1901 if (error != 0) 1902 return (error); 1903 1904 tx = dmu_tx_create(os); 1905 dmu_tx_hold_space(tx, zgd->zgd_db->db_size); 1906 /* 1907 * This transaction does not produce any dirty data or log blocks, so 1908 * it should not be throttled. All other cases wait for TXG sync, by 1909 * which time the log block we are writing will be obsolete, so we can 1910 * skip waiting and just return error here instead. 1911 */ 1912 if (dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE) != 0) { 1913 dmu_tx_abort(tx); 1914 /* Make zl_get_data do txg_waited_synced() */ 1915 return (SET_ERROR(EIO)); 1916 } 1917 1918 /* 1919 * In order to prevent the zgd's lwb from being free'd prior to 1920 * dmu_sync_late_arrival_done() being called, we have to ensure 1921 * the lwb's "max txg" takes this tx's txg into account. 1922 */ 1923 zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx)); 1924 1925 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1926 dsa->dsa_dr = NULL; 1927 dsa->dsa_done = done; 1928 dsa->dsa_zgd = zgd; 1929 dsa->dsa_tx = tx; 1930 1931 /* 1932 * Since we are currently syncing this txg, it's nontrivial to 1933 * determine what BP to nopwrite against, so we disable nopwrite. 1934 * 1935 * When syncing, the db_blkptr is initially the BP of the previous 1936 * txg. We can not nopwrite against it because it will be changed 1937 * (this is similar to the non-late-arrival case where the dbuf is 1938 * dirty in a future txg). 1939 * 1940 * Then dbuf_write_ready() sets bp_blkptr to the location we will write. 1941 * We can not nopwrite against it because although the BP will not 1942 * (typically) be changed, the data has not yet been persisted to this 1943 * location. 1944 * 1945 * Finally, when dbuf_write_done() is called, it is theoretically 1946 * possible to always nopwrite, because the data that was written in 1947 * this txg is the same data that we are trying to write. However we 1948 * would need to check that this dbuf is not dirty in any future 1949 * txg's (as we do in the normal dmu_sync() path). For simplicity, we 1950 * don't nopwrite in this case. 1951 */ 1952 zp->zp_nopwrite = B_FALSE; 1953 1954 zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, 1955 abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size), 1956 zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp, 1957 dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done, 1958 dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); 1959 1960 return (0); 1961 } 1962 1963 /* 1964 * Intent log support: sync the block associated with db to disk. 1965 * N.B. and XXX: the caller is responsible for making sure that the 1966 * data isn't changing while dmu_sync() is writing it. 1967 * 1968 * Return values: 1969 * 1970 * EEXIST: this txg has already been synced, so there's nothing to do. 1971 * The caller should not log the write. 1972 * 1973 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 1974 * The caller should not log the write. 1975 * 1976 * EALREADY: this block is already in the process of being synced. 1977 * The caller should track its progress (somehow). 1978 * 1979 * EIO: could not do the I/O. 1980 * The caller should do a txg_wait_synced(). 1981 * 1982 * 0: the I/O has been initiated. 1983 * The caller should log this blkptr in the done callback. 1984 * It is possible that the I/O will fail, in which case 1985 * the error will be reported to the done callback and 1986 * propagated to pio from zio_done(). 1987 */ 1988 int 1989 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) 1990 { 1991 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; 1992 objset_t *os = db->db_objset; 1993 dsl_dataset_t *ds = os->os_dsl_dataset; 1994 dbuf_dirty_record_t *dr, *dr_next; 1995 dmu_sync_arg_t *dsa; 1996 zbookmark_phys_t zb; 1997 zio_prop_t zp; 1998 1999 ASSERT(pio != NULL); 2000 ASSERT(txg != 0); 2001 2002 SET_BOOKMARK(&zb, ds->ds_object, 2003 db->db.db_object, db->db_level, db->db_blkid); 2004 2005 DB_DNODE_ENTER(db); 2006 dmu_write_policy(os, DB_DNODE(db), db->db_level, WP_DMU_SYNC, &zp); 2007 DB_DNODE_EXIT(db); 2008 2009 /* 2010 * If we're frozen (running ziltest), we always need to generate a bp. 2011 */ 2012 if (txg > spa_freeze_txg(os->os_spa)) 2013 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 2014 2015 /* 2016 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() 2017 * and us. If we determine that this txg is not yet syncing, 2018 * but it begins to sync a moment later, that's OK because the 2019 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. 2020 */ 2021 mutex_enter(&db->db_mtx); 2022 2023 if (txg <= spa_last_synced_txg(os->os_spa)) { 2024 /* 2025 * This txg has already synced. There's nothing to do. 2026 */ 2027 mutex_exit(&db->db_mtx); 2028 return (SET_ERROR(EEXIST)); 2029 } 2030 2031 if (txg <= spa_syncing_txg(os->os_spa)) { 2032 /* 2033 * This txg is currently syncing, so we can't mess with 2034 * the dirty record anymore; just write a new log block. 2035 */ 2036 mutex_exit(&db->db_mtx); 2037 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 2038 } 2039 2040 dr = dbuf_find_dirty_eq(db, txg); 2041 2042 if (dr == NULL) { 2043 /* 2044 * There's no dr for this dbuf, so it must have been freed. 2045 * There's no need to log writes to freed blocks, so we're done. 2046 */ 2047 mutex_exit(&db->db_mtx); 2048 return (SET_ERROR(ENOENT)); 2049 } 2050 2051 dr_next = list_next(&db->db_dirty_records, dr); 2052 ASSERT(dr_next == NULL || dr_next->dr_txg < txg); 2053 2054 if (db->db_blkptr != NULL) { 2055 /* 2056 * We need to fill in zgd_bp with the current blkptr so that 2057 * the nopwrite code can check if we're writing the same 2058 * data that's already on disk. We can only nopwrite if we 2059 * are sure that after making the copy, db_blkptr will not 2060 * change until our i/o completes. We ensure this by 2061 * holding the db_mtx, and only allowing nopwrite if the 2062 * block is not already dirty (see below). This is verified 2063 * by dmu_sync_done(), which VERIFYs that the db_blkptr has 2064 * not changed. 2065 */ 2066 *zgd->zgd_bp = *db->db_blkptr; 2067 } 2068 2069 /* 2070 * Assume the on-disk data is X, the current syncing data (in 2071 * txg - 1) is Y, and the current in-memory data is Z (currently 2072 * in dmu_sync). 2073 * 2074 * We usually want to perform a nopwrite if X and Z are the 2075 * same. However, if Y is different (i.e. the BP is going to 2076 * change before this write takes effect), then a nopwrite will 2077 * be incorrect - we would override with X, which could have 2078 * been freed when Y was written. 2079 * 2080 * (Note that this is not a concern when we are nop-writing from 2081 * syncing context, because X and Y must be identical, because 2082 * all previous txgs have been synced.) 2083 * 2084 * Therefore, we disable nopwrite if the current BP could change 2085 * before this TXG. There are two ways it could change: by 2086 * being dirty (dr_next is non-NULL), or by being freed 2087 * (dnode_block_freed()). This behavior is verified by 2088 * zio_done(), which VERIFYs that the override BP is identical 2089 * to the on-disk BP. 2090 */ 2091 if (dr_next != NULL) { 2092 zp.zp_nopwrite = B_FALSE; 2093 } else { 2094 DB_DNODE_ENTER(db); 2095 if (dnode_block_freed(DB_DNODE(db), db->db_blkid)) 2096 zp.zp_nopwrite = B_FALSE; 2097 DB_DNODE_EXIT(db); 2098 } 2099 2100 ASSERT(dr->dr_txg == txg); 2101 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || 2102 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 2103 /* 2104 * We have already issued a sync write for this buffer, 2105 * or this buffer has already been synced. It could not 2106 * have been dirtied since, or we would have cleared the state. 2107 */ 2108 mutex_exit(&db->db_mtx); 2109 return (SET_ERROR(EALREADY)); 2110 } 2111 2112 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 2113 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 2114 mutex_exit(&db->db_mtx); 2115 2116 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 2117 dsa->dsa_dr = dr; 2118 dsa->dsa_done = done; 2119 dsa->dsa_zgd = zgd; 2120 dsa->dsa_tx = NULL; 2121 2122 zio_nowait(arc_write(pio, os->os_spa, txg, zgd->zgd_bp, 2123 dr->dt.dl.dr_data, !DBUF_IS_CACHEABLE(db), dbuf_is_l2cacheable(db), 2124 &zp, dmu_sync_ready, NULL, dmu_sync_done, dsa, 2125 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); 2126 2127 return (0); 2128 } 2129 2130 int 2131 dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx) 2132 { 2133 dnode_t *dn; 2134 int err; 2135 2136 err = dnode_hold(os, object, FTAG, &dn); 2137 if (err) 2138 return (err); 2139 err = dnode_set_nlevels(dn, nlevels, tx); 2140 dnode_rele(dn, FTAG); 2141 return (err); 2142 } 2143 2144 int 2145 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 2146 dmu_tx_t *tx) 2147 { 2148 dnode_t *dn; 2149 int err; 2150 2151 err = dnode_hold(os, object, FTAG, &dn); 2152 if (err) 2153 return (err); 2154 err = dnode_set_blksz(dn, size, ibs, tx); 2155 dnode_rele(dn, FTAG); 2156 return (err); 2157 } 2158 2159 int 2160 dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid, 2161 dmu_tx_t *tx) 2162 { 2163 dnode_t *dn; 2164 int err; 2165 2166 err = dnode_hold(os, object, FTAG, &dn); 2167 if (err) 2168 return (err); 2169 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2170 dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE); 2171 rw_exit(&dn->dn_struct_rwlock); 2172 dnode_rele(dn, FTAG); 2173 return (0); 2174 } 2175 2176 void 2177 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 2178 dmu_tx_t *tx) 2179 { 2180 dnode_t *dn; 2181 2182 /* 2183 * Send streams include each object's checksum function. This 2184 * check ensures that the receiving system can understand the 2185 * checksum function transmitted. 2186 */ 2187 ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS); 2188 2189 VERIFY0(dnode_hold(os, object, FTAG, &dn)); 2190 ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS); 2191 dn->dn_checksum = checksum; 2192 dnode_setdirty(dn, tx); 2193 dnode_rele(dn, FTAG); 2194 } 2195 2196 void 2197 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 2198 dmu_tx_t *tx) 2199 { 2200 dnode_t *dn; 2201 2202 /* 2203 * Send streams include each object's compression function. This 2204 * check ensures that the receiving system can understand the 2205 * compression function transmitted. 2206 */ 2207 ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS); 2208 2209 VERIFY0(dnode_hold(os, object, FTAG, &dn)); 2210 dn->dn_compress = compress; 2211 dnode_setdirty(dn, tx); 2212 dnode_rele(dn, FTAG); 2213 } 2214 2215 /* 2216 * When the "redundant_metadata" property is set to "most", only indirect 2217 * blocks of this level and higher will have an additional ditto block. 2218 */ 2219 static const int zfs_redundant_metadata_most_ditto_level = 2; 2220 2221 void 2222 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) 2223 { 2224 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET; 2225 boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) || 2226 (wp & WP_SPILL)); 2227 enum zio_checksum checksum = os->os_checksum; 2228 enum zio_compress compress = os->os_compress; 2229 uint8_t complevel = os->os_complevel; 2230 enum zio_checksum dedup_checksum = os->os_dedup_checksum; 2231 boolean_t dedup = B_FALSE; 2232 boolean_t nopwrite = B_FALSE; 2233 boolean_t dedup_verify = os->os_dedup_verify; 2234 boolean_t encrypt = B_FALSE; 2235 int copies = os->os_copies; 2236 2237 /* 2238 * We maintain different write policies for each of the following 2239 * types of data: 2240 * 1. metadata 2241 * 2. preallocated blocks (i.e. level-0 blocks of a dump device) 2242 * 3. all other level 0 blocks 2243 */ 2244 if (ismd) { 2245 /* 2246 * XXX -- we should design a compression algorithm 2247 * that specializes in arrays of bps. 2248 */ 2249 compress = zio_compress_select(os->os_spa, 2250 ZIO_COMPRESS_ON, ZIO_COMPRESS_ON); 2251 2252 /* 2253 * Metadata always gets checksummed. If the data 2254 * checksum is multi-bit correctable, and it's not a 2255 * ZBT-style checksum, then it's suitable for metadata 2256 * as well. Otherwise, the metadata checksum defaults 2257 * to fletcher4. 2258 */ 2259 if (!(zio_checksum_table[checksum].ci_flags & 2260 ZCHECKSUM_FLAG_METADATA) || 2261 (zio_checksum_table[checksum].ci_flags & 2262 ZCHECKSUM_FLAG_EMBEDDED)) 2263 checksum = ZIO_CHECKSUM_FLETCHER_4; 2264 2265 switch (os->os_redundant_metadata) { 2266 case ZFS_REDUNDANT_METADATA_ALL: 2267 copies++; 2268 break; 2269 case ZFS_REDUNDANT_METADATA_MOST: 2270 if (level >= zfs_redundant_metadata_most_ditto_level || 2271 DMU_OT_IS_METADATA(type) || (wp & WP_SPILL)) 2272 copies++; 2273 break; 2274 case ZFS_REDUNDANT_METADATA_SOME: 2275 if (DMU_OT_IS_CRITICAL(type)) 2276 copies++; 2277 break; 2278 case ZFS_REDUNDANT_METADATA_NONE: 2279 break; 2280 } 2281 2282 if (dmu_ddt_copies > 0) { 2283 /* 2284 * If this tuneable is set, and this is a write for a 2285 * dedup entry store (zap or log), then we treat it 2286 * something like ZFS_REDUNDANT_METADATA_MOST on a 2287 * regular dataset: this many copies, and one more for 2288 * "higher" indirect blocks. This specific exception is 2289 * necessary because dedup objects are stored in the 2290 * MOS, which always has the highest possible copies. 2291 */ 2292 dmu_object_type_t stype = 2293 dn ? dn->dn_storage_type : DMU_OT_NONE; 2294 if (stype == DMU_OT_NONE) 2295 stype = type; 2296 if (stype == DMU_OT_DDT_ZAP) { 2297 copies = dmu_ddt_copies; 2298 if (level >= 2299 zfs_redundant_metadata_most_ditto_level) 2300 copies++; 2301 } 2302 } 2303 } else if (wp & WP_NOFILL) { 2304 ASSERT(level == 0); 2305 2306 /* 2307 * If we're writing preallocated blocks, we aren't actually 2308 * writing them so don't set any policy properties. These 2309 * blocks are currently only used by an external subsystem 2310 * outside of zfs (i.e. dump) and not written by the zio 2311 * pipeline. 2312 */ 2313 compress = ZIO_COMPRESS_OFF; 2314 checksum = ZIO_CHECKSUM_OFF; 2315 } else { 2316 compress = zio_compress_select(os->os_spa, dn->dn_compress, 2317 compress); 2318 complevel = zio_complevel_select(os->os_spa, compress, 2319 complevel, complevel); 2320 2321 checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ? 2322 zio_checksum_select(dn->dn_checksum, checksum) : 2323 dedup_checksum; 2324 2325 /* 2326 * Determine dedup setting. If we are in dmu_sync(), 2327 * we won't actually dedup now because that's all 2328 * done in syncing context; but we do want to use the 2329 * dedup checksum. If the checksum is not strong 2330 * enough to ensure unique signatures, force 2331 * dedup_verify. 2332 */ 2333 if (dedup_checksum != ZIO_CHECKSUM_OFF) { 2334 dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE; 2335 if (!(zio_checksum_table[checksum].ci_flags & 2336 ZCHECKSUM_FLAG_DEDUP)) 2337 dedup_verify = B_TRUE; 2338 } 2339 2340 /* 2341 * Enable nopwrite if we have secure enough checksum 2342 * algorithm (see comment in zio_nop_write) and 2343 * compression is enabled. We don't enable nopwrite if 2344 * dedup is enabled as the two features are mutually 2345 * exclusive. 2346 */ 2347 nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags & 2348 ZCHECKSUM_FLAG_NOPWRITE) && 2349 compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled); 2350 } 2351 2352 /* 2353 * All objects in an encrypted objset are protected from modification 2354 * via a MAC. Encrypted objects store their IV and salt in the last DVA 2355 * in the bp, so we cannot use all copies. Encrypted objects are also 2356 * not subject to nopwrite since writing the same data will still 2357 * result in a new ciphertext. Only encrypted blocks can be dedup'd 2358 * to avoid ambiguity in the dedup code since the DDT does not store 2359 * object types. 2360 */ 2361 if (os->os_encrypted && (wp & WP_NOFILL) == 0) { 2362 encrypt = B_TRUE; 2363 2364 if (DMU_OT_IS_ENCRYPTED(type)) { 2365 copies = MIN(copies, SPA_DVAS_PER_BP - 1); 2366 nopwrite = B_FALSE; 2367 } else { 2368 dedup = B_FALSE; 2369 } 2370 2371 if (level <= 0 && 2372 (type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) { 2373 compress = ZIO_COMPRESS_EMPTY; 2374 } 2375 } 2376 2377 zp->zp_compress = compress; 2378 zp->zp_complevel = complevel; 2379 zp->zp_checksum = checksum; 2380 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type; 2381 zp->zp_level = level; 2382 zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa)); 2383 zp->zp_dedup = dedup; 2384 zp->zp_dedup_verify = dedup && dedup_verify; 2385 zp->zp_nopwrite = nopwrite; 2386 zp->zp_encrypt = encrypt; 2387 zp->zp_byteorder = ZFS_HOST_BYTEORDER; 2388 memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN); 2389 memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN); 2390 memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN); 2391 zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ? 2392 os->os_zpl_special_smallblock : 0; 2393 zp->zp_storage_type = dn ? dn->dn_storage_type : DMU_OT_NONE; 2394 2395 ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT); 2396 } 2397 2398 /* 2399 * Reports the location of data and holes in an object. In order to 2400 * accurately report holes all dirty data must be synced to disk. This 2401 * causes extremely poor performance when seeking for holes in a dirty file. 2402 * As a compromise, only provide hole data when the dnode is clean. When 2403 * a dnode is dirty report the dnode as having no holes by returning EBUSY 2404 * which is always safe to do. 2405 */ 2406 int 2407 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 2408 { 2409 dnode_t *dn; 2410 int restarted = 0, err; 2411 2412 restart: 2413 err = dnode_hold(os, object, FTAG, &dn); 2414 if (err) 2415 return (err); 2416 2417 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2418 2419 if (dnode_is_dirty(dn)) { 2420 /* 2421 * If the zfs_dmu_offset_next_sync module option is enabled 2422 * then hole reporting has been requested. Dirty dnodes 2423 * must be synced to disk to accurately report holes. 2424 * 2425 * Provided a RL_READER rangelock spanning 0-UINT64_MAX is 2426 * held by the caller only a single restart will be required. 2427 * We tolerate callers which do not hold the rangelock by 2428 * returning EBUSY and not reporting holes after one restart. 2429 */ 2430 if (zfs_dmu_offset_next_sync) { 2431 rw_exit(&dn->dn_struct_rwlock); 2432 dnode_rele(dn, FTAG); 2433 2434 if (restarted) 2435 return (SET_ERROR(EBUSY)); 2436 2437 txg_wait_synced(dmu_objset_pool(os), 0); 2438 restarted = 1; 2439 goto restart; 2440 } 2441 2442 err = SET_ERROR(EBUSY); 2443 } else { 2444 err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK | 2445 (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); 2446 } 2447 2448 rw_exit(&dn->dn_struct_rwlock); 2449 dnode_rele(dn, FTAG); 2450 2451 return (err); 2452 } 2453 2454 int 2455 dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length, 2456 blkptr_t *bps, size_t *nbpsp) 2457 { 2458 dmu_buf_t **dbp, *dbuf; 2459 dmu_buf_impl_t *db; 2460 blkptr_t *bp; 2461 int error, numbufs; 2462 2463 error = dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG, 2464 &numbufs, &dbp); 2465 if (error != 0) { 2466 if (error == ESRCH) { 2467 error = SET_ERROR(ENXIO); 2468 } 2469 return (error); 2470 } 2471 2472 ASSERT3U(numbufs, <=, *nbpsp); 2473 2474 for (int i = 0; i < numbufs; i++) { 2475 dbuf = dbp[i]; 2476 db = (dmu_buf_impl_t *)dbuf; 2477 2478 mutex_enter(&db->db_mtx); 2479 2480 if (!list_is_empty(&db->db_dirty_records)) { 2481 dbuf_dirty_record_t *dr; 2482 2483 dr = list_head(&db->db_dirty_records); 2484 if (dr->dt.dl.dr_brtwrite) { 2485 /* 2486 * This is very special case where we clone a 2487 * block and in the same transaction group we 2488 * read its BP (most likely to clone the clone). 2489 */ 2490 bp = &dr->dt.dl.dr_overridden_by; 2491 } else { 2492 /* 2493 * The block was modified in the same 2494 * transaction group. 2495 */ 2496 mutex_exit(&db->db_mtx); 2497 error = SET_ERROR(EAGAIN); 2498 goto out; 2499 } 2500 } else { 2501 bp = db->db_blkptr; 2502 } 2503 2504 mutex_exit(&db->db_mtx); 2505 2506 if (bp == NULL) { 2507 /* 2508 * The file size was increased, but the block was never 2509 * written, otherwise we would either have the block 2510 * pointer or the dirty record and would not get here. 2511 * It is effectively a hole, so report it as such. 2512 */ 2513 BP_ZERO(&bps[i]); 2514 continue; 2515 } 2516 /* 2517 * Make sure we clone only data blocks. 2518 */ 2519 if (BP_IS_METADATA(bp) && !BP_IS_HOLE(bp)) { 2520 error = SET_ERROR(EINVAL); 2521 goto out; 2522 } 2523 2524 /* 2525 * If the block was allocated in transaction group that is not 2526 * yet synced, we could clone it, but we couldn't write this 2527 * operation into ZIL, or it may be impossible to replay, since 2528 * the block may appear not yet allocated at that point. 2529 */ 2530 if (BP_GET_BIRTH(bp) > spa_freeze_txg(os->os_spa)) { 2531 error = SET_ERROR(EINVAL); 2532 goto out; 2533 } 2534 if (BP_GET_BIRTH(bp) > spa_last_synced_txg(os->os_spa)) { 2535 error = SET_ERROR(EAGAIN); 2536 goto out; 2537 } 2538 2539 bps[i] = *bp; 2540 } 2541 2542 *nbpsp = numbufs; 2543 out: 2544 dmu_buf_rele_array(dbp, numbufs, FTAG); 2545 2546 return (error); 2547 } 2548 2549 int 2550 dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length, 2551 dmu_tx_t *tx, const blkptr_t *bps, size_t nbps) 2552 { 2553 spa_t *spa; 2554 dmu_buf_t **dbp, *dbuf; 2555 dmu_buf_impl_t *db; 2556 struct dirty_leaf *dl; 2557 dbuf_dirty_record_t *dr; 2558 const blkptr_t *bp; 2559 int error = 0, i, numbufs; 2560 2561 spa = os->os_spa; 2562 2563 VERIFY0(dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG, 2564 &numbufs, &dbp)); 2565 ASSERT3U(nbps, ==, numbufs); 2566 2567 /* 2568 * Before we start cloning make sure that the dbufs sizes match new BPs 2569 * sizes. If they don't, that's a no-go, as we are not able to shrink 2570 * dbufs. 2571 */ 2572 for (i = 0; i < numbufs; i++) { 2573 dbuf = dbp[i]; 2574 db = (dmu_buf_impl_t *)dbuf; 2575 bp = &bps[i]; 2576 2577 ASSERT0(db->db_level); 2578 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2579 ASSERT(db->db_blkid != DMU_SPILL_BLKID); 2580 2581 if (!BP_IS_HOLE(bp) && BP_GET_LSIZE(bp) != dbuf->db_size) { 2582 error = SET_ERROR(EXDEV); 2583 goto out; 2584 } 2585 } 2586 2587 for (i = 0; i < numbufs; i++) { 2588 dbuf = dbp[i]; 2589 db = (dmu_buf_impl_t *)dbuf; 2590 bp = &bps[i]; 2591 2592 ASSERT0(db->db_level); 2593 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2594 ASSERT(db->db_blkid != DMU_SPILL_BLKID); 2595 ASSERT(BP_IS_HOLE(bp) || dbuf->db_size == BP_GET_LSIZE(bp)); 2596 2597 dmu_buf_will_clone(dbuf, tx); 2598 2599 mutex_enter(&db->db_mtx); 2600 2601 dr = list_head(&db->db_dirty_records); 2602 VERIFY(dr != NULL); 2603 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2604 dl = &dr->dt.dl; 2605 dl->dr_overridden_by = *bp; 2606 if (!BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) != 0) { 2607 if (!BP_IS_EMBEDDED(bp)) { 2608 BP_SET_BIRTH(&dl->dr_overridden_by, dr->dr_txg, 2609 BP_GET_BIRTH(bp)); 2610 } else { 2611 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, 2612 dr->dr_txg); 2613 } 2614 } 2615 dl->dr_brtwrite = B_TRUE; 2616 dl->dr_override_state = DR_OVERRIDDEN; 2617 2618 mutex_exit(&db->db_mtx); 2619 2620 /* 2621 * When data in embedded into BP there is no need to create 2622 * BRT entry as there is no data block. Just copy the BP as 2623 * it contains the data. 2624 */ 2625 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) { 2626 brt_pending_add(spa, bp, tx); 2627 } 2628 } 2629 out: 2630 dmu_buf_rele_array(dbp, numbufs, FTAG); 2631 2632 return (error); 2633 } 2634 2635 void 2636 __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2637 { 2638 dnode_phys_t *dnp = dn->dn_phys; 2639 2640 doi->doi_data_block_size = dn->dn_datablksz; 2641 doi->doi_metadata_block_size = dn->dn_indblkshift ? 2642 1ULL << dn->dn_indblkshift : 0; 2643 doi->doi_type = dn->dn_type; 2644 doi->doi_bonus_type = dn->dn_bonustype; 2645 doi->doi_bonus_size = dn->dn_bonuslen; 2646 doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT; 2647 doi->doi_indirection = dn->dn_nlevels; 2648 doi->doi_checksum = dn->dn_checksum; 2649 doi->doi_compress = dn->dn_compress; 2650 doi->doi_nblkptr = dn->dn_nblkptr; 2651 doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; 2652 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 2653 doi->doi_fill_count = 0; 2654 for (int i = 0; i < dnp->dn_nblkptr; i++) 2655 doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]); 2656 } 2657 2658 void 2659 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2660 { 2661 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2662 mutex_enter(&dn->dn_mtx); 2663 2664 __dmu_object_info_from_dnode(dn, doi); 2665 2666 mutex_exit(&dn->dn_mtx); 2667 rw_exit(&dn->dn_struct_rwlock); 2668 } 2669 2670 /* 2671 * Get information on a DMU object. 2672 * If doi is NULL, just indicates whether the object exists. 2673 */ 2674 int 2675 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 2676 { 2677 dnode_t *dn; 2678 int err = dnode_hold(os, object, FTAG, &dn); 2679 2680 if (err) 2681 return (err); 2682 2683 if (doi != NULL) 2684 dmu_object_info_from_dnode(dn, doi); 2685 2686 dnode_rele(dn, FTAG); 2687 return (0); 2688 } 2689 2690 /* 2691 * As above, but faster; can be used when you have a held dbuf in hand. 2692 */ 2693 void 2694 dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi) 2695 { 2696 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2697 2698 DB_DNODE_ENTER(db); 2699 dmu_object_info_from_dnode(DB_DNODE(db), doi); 2700 DB_DNODE_EXIT(db); 2701 } 2702 2703 /* 2704 * Faster still when you only care about the size. 2705 */ 2706 void 2707 dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize, 2708 u_longlong_t *nblk512) 2709 { 2710 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2711 dnode_t *dn; 2712 2713 DB_DNODE_ENTER(db); 2714 dn = DB_DNODE(db); 2715 2716 *blksize = dn->dn_datablksz; 2717 /* add in number of slots used for the dnode itself */ 2718 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 2719 SPA_MINBLOCKSHIFT) + dn->dn_num_slots; 2720 DB_DNODE_EXIT(db); 2721 } 2722 2723 void 2724 dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize) 2725 { 2726 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2727 2728 DB_DNODE_ENTER(db); 2729 *dnsize = DB_DNODE(db)->dn_num_slots << DNODE_SHIFT; 2730 DB_DNODE_EXIT(db); 2731 } 2732 2733 void 2734 byteswap_uint64_array(void *vbuf, size_t size) 2735 { 2736 uint64_t *buf = vbuf; 2737 size_t count = size >> 3; 2738 int i; 2739 2740 ASSERT((size & 7) == 0); 2741 2742 for (i = 0; i < count; i++) 2743 buf[i] = BSWAP_64(buf[i]); 2744 } 2745 2746 void 2747 byteswap_uint32_array(void *vbuf, size_t size) 2748 { 2749 uint32_t *buf = vbuf; 2750 size_t count = size >> 2; 2751 int i; 2752 2753 ASSERT((size & 3) == 0); 2754 2755 for (i = 0; i < count; i++) 2756 buf[i] = BSWAP_32(buf[i]); 2757 } 2758 2759 void 2760 byteswap_uint16_array(void *vbuf, size_t size) 2761 { 2762 uint16_t *buf = vbuf; 2763 size_t count = size >> 1; 2764 int i; 2765 2766 ASSERT((size & 1) == 0); 2767 2768 for (i = 0; i < count; i++) 2769 buf[i] = BSWAP_16(buf[i]); 2770 } 2771 2772 void 2773 byteswap_uint8_array(void *vbuf, size_t size) 2774 { 2775 (void) vbuf, (void) size; 2776 } 2777 2778 void 2779 dmu_init(void) 2780 { 2781 abd_init(); 2782 zfs_dbgmsg_init(); 2783 sa_cache_init(); 2784 dmu_objset_init(); 2785 dnode_init(); 2786 zfetch_init(); 2787 dmu_tx_init(); 2788 l2arc_init(); 2789 arc_init(); 2790 dbuf_init(); 2791 } 2792 2793 void 2794 dmu_fini(void) 2795 { 2796 arc_fini(); /* arc depends on l2arc, so arc must go first */ 2797 l2arc_fini(); 2798 dmu_tx_fini(); 2799 zfetch_fini(); 2800 dbuf_fini(); 2801 dnode_fini(); 2802 dmu_objset_fini(); 2803 sa_cache_fini(); 2804 zfs_dbgmsg_fini(); 2805 abd_fini(); 2806 } 2807 2808 EXPORT_SYMBOL(dmu_bonus_hold); 2809 EXPORT_SYMBOL(dmu_bonus_hold_by_dnode); 2810 EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus); 2811 EXPORT_SYMBOL(dmu_buf_rele_array); 2812 EXPORT_SYMBOL(dmu_prefetch); 2813 EXPORT_SYMBOL(dmu_prefetch_by_dnode); 2814 EXPORT_SYMBOL(dmu_prefetch_dnode); 2815 EXPORT_SYMBOL(dmu_free_range); 2816 EXPORT_SYMBOL(dmu_free_long_range); 2817 EXPORT_SYMBOL(dmu_free_long_object); 2818 EXPORT_SYMBOL(dmu_read); 2819 EXPORT_SYMBOL(dmu_read_by_dnode); 2820 EXPORT_SYMBOL(dmu_write); 2821 EXPORT_SYMBOL(dmu_write_by_dnode); 2822 EXPORT_SYMBOL(dmu_prealloc); 2823 EXPORT_SYMBOL(dmu_object_info); 2824 EXPORT_SYMBOL(dmu_object_info_from_dnode); 2825 EXPORT_SYMBOL(dmu_object_info_from_db); 2826 EXPORT_SYMBOL(dmu_object_size_from_db); 2827 EXPORT_SYMBOL(dmu_object_dnsize_from_db); 2828 EXPORT_SYMBOL(dmu_object_set_nlevels); 2829 EXPORT_SYMBOL(dmu_object_set_blocksize); 2830 EXPORT_SYMBOL(dmu_object_set_maxblkid); 2831 EXPORT_SYMBOL(dmu_object_set_checksum); 2832 EXPORT_SYMBOL(dmu_object_set_compress); 2833 EXPORT_SYMBOL(dmu_offset_next); 2834 EXPORT_SYMBOL(dmu_write_policy); 2835 EXPORT_SYMBOL(dmu_sync); 2836 EXPORT_SYMBOL(dmu_request_arcbuf); 2837 EXPORT_SYMBOL(dmu_return_arcbuf); 2838 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode); 2839 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf); 2840 EXPORT_SYMBOL(dmu_buf_hold); 2841 EXPORT_SYMBOL(dmu_ot); 2842 2843 ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW, 2844 "Enable NOP writes"); 2845 2846 ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, UINT, ZMOD_RW, 2847 "Percentage of dirtied blocks from frees in one TXG"); 2848 2849 ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW, 2850 "Enable forcing txg sync to find holes"); 2851 2852 /* CSTYLED */ 2853 ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW, 2854 "Limit one prefetch call to this size"); 2855 2856 /* CSTYLED */ 2857 ZFS_MODULE_PARAM(zfs, , dmu_ddt_copies, UINT, ZMOD_RW, 2858 "Override copies= for dedup objects"); 2859