1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved. 24 */ 25 /* Copyright (c) 2013 by Saso Kiselkov. All rights reserved. */ 26 /* Copyright (c) 2013, Joyent, Inc. All rights reserved. */ 27 /* Copyright 2016 Nexenta Systems, Inc. All rights reserved. */ 28 29 #include <sys/dmu.h> 30 #include <sys/dmu_impl.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/dbuf.h> 33 #include <sys/dnode.h> 34 #include <sys/zfs_context.h> 35 #include <sys/dmu_objset.h> 36 #include <sys/dmu_traverse.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dsl_pool.h> 40 #include <sys/dsl_synctask.h> 41 #include <sys/dsl_prop.h> 42 #include <sys/dmu_zfetch.h> 43 #include <sys/zfs_ioctl.h> 44 #include <sys/zap.h> 45 #include <sys/zio_checksum.h> 46 #include <sys/zio_compress.h> 47 #include <sys/sa.h> 48 #include <sys/zfeature.h> 49 #ifdef _KERNEL 50 #include <sys/vmsystm.h> 51 #include <sys/zfs_znode.h> 52 #endif 53 54 /* 55 * Enable/disable nopwrite feature. 56 */ 57 int zfs_nopwrite_enabled = 1; 58 59 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 60 { DMU_BSWAP_UINT8, TRUE, "unallocated" }, 61 { DMU_BSWAP_ZAP, TRUE, "object directory" }, 62 { DMU_BSWAP_UINT64, TRUE, "object array" }, 63 { DMU_BSWAP_UINT8, TRUE, "packed nvlist" }, 64 { DMU_BSWAP_UINT64, TRUE, "packed nvlist size" }, 65 { DMU_BSWAP_UINT64, TRUE, "bpobj" }, 66 { DMU_BSWAP_UINT64, TRUE, "bpobj header" }, 67 { DMU_BSWAP_UINT64, TRUE, "SPA space map header" }, 68 { DMU_BSWAP_UINT64, TRUE, "SPA space map" }, 69 { DMU_BSWAP_UINT64, TRUE, "ZIL intent log" }, 70 { DMU_BSWAP_DNODE, TRUE, "DMU dnode" }, 71 { DMU_BSWAP_OBJSET, TRUE, "DMU objset" }, 72 { DMU_BSWAP_UINT64, TRUE, "DSL directory" }, 73 { DMU_BSWAP_ZAP, TRUE, "DSL directory child map"}, 74 { DMU_BSWAP_ZAP, TRUE, "DSL dataset snap map" }, 75 { DMU_BSWAP_ZAP, TRUE, "DSL props" }, 76 { DMU_BSWAP_UINT64, TRUE, "DSL dataset" }, 77 { DMU_BSWAP_ZNODE, TRUE, "ZFS znode" }, 78 { DMU_BSWAP_OLDACL, TRUE, "ZFS V0 ACL" }, 79 { DMU_BSWAP_UINT8, FALSE, "ZFS plain file" }, 80 { DMU_BSWAP_ZAP, TRUE, "ZFS directory" }, 81 { DMU_BSWAP_ZAP, TRUE, "ZFS master node" }, 82 { DMU_BSWAP_ZAP, TRUE, "ZFS delete queue" }, 83 { DMU_BSWAP_UINT8, FALSE, "zvol object" }, 84 { DMU_BSWAP_ZAP, TRUE, "zvol prop" }, 85 { DMU_BSWAP_UINT8, FALSE, "other uint8[]" }, 86 { DMU_BSWAP_UINT64, FALSE, "other uint64[]" }, 87 { DMU_BSWAP_ZAP, TRUE, "other ZAP" }, 88 { DMU_BSWAP_ZAP, TRUE, "persistent error log" }, 89 { DMU_BSWAP_UINT8, TRUE, "SPA history" }, 90 { DMU_BSWAP_UINT64, TRUE, "SPA history offsets" }, 91 { DMU_BSWAP_ZAP, TRUE, "Pool properties" }, 92 { DMU_BSWAP_ZAP, TRUE, "DSL permissions" }, 93 { DMU_BSWAP_ACL, TRUE, "ZFS ACL" }, 94 { DMU_BSWAP_UINT8, TRUE, "ZFS SYSACL" }, 95 { DMU_BSWAP_UINT8, TRUE, "FUID table" }, 96 { DMU_BSWAP_UINT64, TRUE, "FUID table size" }, 97 { DMU_BSWAP_ZAP, TRUE, "DSL dataset next clones"}, 98 { DMU_BSWAP_ZAP, TRUE, "scan work queue" }, 99 { DMU_BSWAP_ZAP, TRUE, "ZFS user/group used" }, 100 { DMU_BSWAP_ZAP, TRUE, "ZFS user/group quota" }, 101 { DMU_BSWAP_ZAP, TRUE, "snapshot refcount tags"}, 102 { DMU_BSWAP_ZAP, TRUE, "DDT ZAP algorithm" }, 103 { DMU_BSWAP_ZAP, TRUE, "DDT statistics" }, 104 { DMU_BSWAP_UINT8, TRUE, "System attributes" }, 105 { DMU_BSWAP_ZAP, TRUE, "SA master node" }, 106 { DMU_BSWAP_ZAP, TRUE, "SA attr registration" }, 107 { DMU_BSWAP_ZAP, TRUE, "SA attr layouts" }, 108 { DMU_BSWAP_ZAP, TRUE, "scan translations" }, 109 { DMU_BSWAP_UINT8, FALSE, "deduplicated block" }, 110 { DMU_BSWAP_ZAP, TRUE, "DSL deadlist map" }, 111 { DMU_BSWAP_UINT64, TRUE, "DSL deadlist map hdr" }, 112 { DMU_BSWAP_ZAP, TRUE, "DSL dir clones" }, 113 { DMU_BSWAP_UINT64, TRUE, "bpobj subobj" } 114 }; 115 116 const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = { 117 { byteswap_uint8_array, "uint8" }, 118 { byteswap_uint16_array, "uint16" }, 119 { byteswap_uint32_array, "uint32" }, 120 { byteswap_uint64_array, "uint64" }, 121 { zap_byteswap, "zap" }, 122 { dnode_buf_byteswap, "dnode" }, 123 { dmu_objset_byteswap, "objset" }, 124 { zfs_znode_byteswap, "znode" }, 125 { zfs_oldacl_byteswap, "oldacl" }, 126 { zfs_acl_byteswap, "acl" } 127 }; 128 129 int 130 dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset, 131 void *tag, dmu_buf_t **dbp) 132 { 133 uint64_t blkid; 134 dmu_buf_impl_t *db; 135 136 blkid = dbuf_whichblock(dn, 0, offset); 137 rw_enter(&dn->dn_struct_rwlock, RW_READER); 138 db = dbuf_hold(dn, blkid, tag); 139 rw_exit(&dn->dn_struct_rwlock); 140 141 if (db == NULL) { 142 *dbp = NULL; 143 return (SET_ERROR(EIO)); 144 } 145 146 *dbp = &db->db; 147 return (0); 148 } 149 int 150 dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset, 151 void *tag, dmu_buf_t **dbp) 152 { 153 dnode_t *dn; 154 uint64_t blkid; 155 dmu_buf_impl_t *db; 156 int err; 157 158 err = dnode_hold(os, object, FTAG, &dn); 159 if (err) 160 return (err); 161 blkid = dbuf_whichblock(dn, 0, offset); 162 rw_enter(&dn->dn_struct_rwlock, RW_READER); 163 db = dbuf_hold(dn, blkid, tag); 164 rw_exit(&dn->dn_struct_rwlock); 165 dnode_rele(dn, FTAG); 166 167 if (db == NULL) { 168 *dbp = NULL; 169 return (SET_ERROR(EIO)); 170 } 171 172 *dbp = &db->db; 173 return (err); 174 } 175 176 int 177 dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, 178 void *tag, dmu_buf_t **dbp, int flags) 179 { 180 int err; 181 int db_flags = DB_RF_CANFAIL; 182 183 if (flags & DMU_READ_NO_PREFETCH) 184 db_flags |= DB_RF_NOPREFETCH; 185 186 err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp); 187 if (err == 0) { 188 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 189 err = dbuf_read(db, NULL, db_flags); 190 if (err != 0) { 191 dbuf_rele(db, tag); 192 *dbp = NULL; 193 } 194 } 195 196 return (err); 197 } 198 199 int 200 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 201 void *tag, dmu_buf_t **dbp, int flags) 202 { 203 int err; 204 int db_flags = DB_RF_CANFAIL; 205 206 if (flags & DMU_READ_NO_PREFETCH) 207 db_flags |= DB_RF_NOPREFETCH; 208 209 err = dmu_buf_hold_noread(os, object, offset, tag, dbp); 210 if (err == 0) { 211 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 212 err = dbuf_read(db, NULL, db_flags); 213 if (err != 0) { 214 dbuf_rele(db, tag); 215 *dbp = NULL; 216 } 217 } 218 219 return (err); 220 } 221 222 int 223 dmu_bonus_max(void) 224 { 225 return (DN_MAX_BONUSLEN); 226 } 227 228 int 229 dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx) 230 { 231 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 232 dnode_t *dn; 233 int error; 234 235 DB_DNODE_ENTER(db); 236 dn = DB_DNODE(db); 237 238 if (dn->dn_bonus != db) { 239 error = SET_ERROR(EINVAL); 240 } else if (newsize < 0 || newsize > db_fake->db_size) { 241 error = SET_ERROR(EINVAL); 242 } else { 243 dnode_setbonuslen(dn, newsize, tx); 244 error = 0; 245 } 246 247 DB_DNODE_EXIT(db); 248 return (error); 249 } 250 251 int 252 dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx) 253 { 254 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 255 dnode_t *dn; 256 int error; 257 258 DB_DNODE_ENTER(db); 259 dn = DB_DNODE(db); 260 261 if (!DMU_OT_IS_VALID(type)) { 262 error = SET_ERROR(EINVAL); 263 } else if (dn->dn_bonus != db) { 264 error = SET_ERROR(EINVAL); 265 } else { 266 dnode_setbonus_type(dn, type, tx); 267 error = 0; 268 } 269 270 DB_DNODE_EXIT(db); 271 return (error); 272 } 273 274 dmu_object_type_t 275 dmu_get_bonustype(dmu_buf_t *db_fake) 276 { 277 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 278 dnode_t *dn; 279 dmu_object_type_t type; 280 281 DB_DNODE_ENTER(db); 282 dn = DB_DNODE(db); 283 type = dn->dn_bonustype; 284 DB_DNODE_EXIT(db); 285 286 return (type); 287 } 288 289 int 290 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx) 291 { 292 dnode_t *dn; 293 int error; 294 295 error = dnode_hold(os, object, FTAG, &dn); 296 dbuf_rm_spill(dn, tx); 297 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 298 dnode_rm_spill(dn, tx); 299 rw_exit(&dn->dn_struct_rwlock); 300 dnode_rele(dn, FTAG); 301 return (error); 302 } 303 304 /* 305 * returns ENOENT, EIO, or 0. 306 */ 307 int 308 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) 309 { 310 dnode_t *dn; 311 dmu_buf_impl_t *db; 312 int error; 313 314 error = dnode_hold(os, object, FTAG, &dn); 315 if (error) 316 return (error); 317 318 rw_enter(&dn->dn_struct_rwlock, RW_READER); 319 if (dn->dn_bonus == NULL) { 320 rw_exit(&dn->dn_struct_rwlock); 321 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 322 if (dn->dn_bonus == NULL) 323 dbuf_create_bonus(dn); 324 } 325 db = dn->dn_bonus; 326 327 /* as long as the bonus buf is held, the dnode will be held */ 328 if (refcount_add(&db->db_holds, tag) == 1) { 329 VERIFY(dnode_add_ref(dn, db)); 330 atomic_inc_32(&dn->dn_dbufs_count); 331 } 332 333 /* 334 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 335 * hold and incrementing the dbuf count to ensure that dnode_move() sees 336 * a dnode hold for every dbuf. 337 */ 338 rw_exit(&dn->dn_struct_rwlock); 339 340 dnode_rele(dn, FTAG); 341 342 VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH)); 343 344 *dbp = &db->db; 345 return (0); 346 } 347 348 /* 349 * returns ENOENT, EIO, or 0. 350 * 351 * This interface will allocate a blank spill dbuf when a spill blk 352 * doesn't already exist on the dnode. 353 * 354 * if you only want to find an already existing spill db, then 355 * dmu_spill_hold_existing() should be used. 356 */ 357 int 358 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp) 359 { 360 dmu_buf_impl_t *db = NULL; 361 int err; 362 363 if ((flags & DB_RF_HAVESTRUCT) == 0) 364 rw_enter(&dn->dn_struct_rwlock, RW_READER); 365 366 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag); 367 368 if ((flags & DB_RF_HAVESTRUCT) == 0) 369 rw_exit(&dn->dn_struct_rwlock); 370 371 ASSERT(db != NULL); 372 err = dbuf_read(db, NULL, flags); 373 if (err == 0) 374 *dbp = &db->db; 375 else 376 dbuf_rele(db, tag); 377 return (err); 378 } 379 380 int 381 dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 382 { 383 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 384 dnode_t *dn; 385 int err; 386 387 DB_DNODE_ENTER(db); 388 dn = DB_DNODE(db); 389 390 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) { 391 err = SET_ERROR(EINVAL); 392 } else { 393 rw_enter(&dn->dn_struct_rwlock, RW_READER); 394 395 if (!dn->dn_have_spill) { 396 err = SET_ERROR(ENOENT); 397 } else { 398 err = dmu_spill_hold_by_dnode(dn, 399 DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp); 400 } 401 402 rw_exit(&dn->dn_struct_rwlock); 403 } 404 405 DB_DNODE_EXIT(db); 406 return (err); 407 } 408 409 int 410 dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 411 { 412 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 413 dnode_t *dn; 414 int err; 415 416 DB_DNODE_ENTER(db); 417 dn = DB_DNODE(db); 418 err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp); 419 DB_DNODE_EXIT(db); 420 421 return (err); 422 } 423 424 /* 425 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 426 * to take a held dnode rather than <os, object> -- the lookup is wasteful, 427 * and can induce severe lock contention when writing to several files 428 * whose dnodes are in the same block. 429 */ 430 static int 431 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, 432 boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags) 433 { 434 dmu_buf_t **dbp; 435 uint64_t blkid, nblks, i; 436 uint32_t dbuf_flags; 437 int err; 438 zio_t *zio; 439 440 ASSERT(length <= DMU_MAX_ACCESS); 441 442 /* 443 * Note: We directly notify the prefetch code of this read, so that 444 * we can tell it about the multi-block read. dbuf_read() only knows 445 * about the one block it is accessing. 446 */ 447 dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT | 448 DB_RF_NOPREFETCH; 449 450 rw_enter(&dn->dn_struct_rwlock, RW_READER); 451 if (dn->dn_datablkshift) { 452 int blkshift = dn->dn_datablkshift; 453 nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) - 454 P2ALIGN(offset, 1ULL << blkshift)) >> blkshift; 455 } else { 456 if (offset + length > dn->dn_datablksz) { 457 zfs_panic_recover("zfs: accessing past end of object " 458 "%llx/%llx (size=%u access=%llu+%llu)", 459 (longlong_t)dn->dn_objset-> 460 os_dsl_dataset->ds_object, 461 (longlong_t)dn->dn_object, dn->dn_datablksz, 462 (longlong_t)offset, (longlong_t)length); 463 rw_exit(&dn->dn_struct_rwlock); 464 return (SET_ERROR(EIO)); 465 } 466 nblks = 1; 467 } 468 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 469 470 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); 471 blkid = dbuf_whichblock(dn, 0, offset); 472 for (i = 0; i < nblks; i++) { 473 dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag); 474 if (db == NULL) { 475 rw_exit(&dn->dn_struct_rwlock); 476 dmu_buf_rele_array(dbp, nblks, tag); 477 zio_nowait(zio); 478 return (SET_ERROR(EIO)); 479 } 480 481 /* initiate async i/o */ 482 if (read) 483 (void) dbuf_read(db, zio, dbuf_flags); 484 dbp[i] = &db->db; 485 } 486 487 if ((flags & DMU_READ_NO_PREFETCH) == 0 && 488 DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) { 489 dmu_zfetch(&dn->dn_zfetch, blkid, nblks, 490 read && DNODE_IS_CACHEABLE(dn)); 491 } 492 rw_exit(&dn->dn_struct_rwlock); 493 494 /* wait for async i/o */ 495 err = zio_wait(zio); 496 if (err) { 497 dmu_buf_rele_array(dbp, nblks, tag); 498 return (err); 499 } 500 501 /* wait for other io to complete */ 502 if (read) { 503 for (i = 0; i < nblks; i++) { 504 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 505 mutex_enter(&db->db_mtx); 506 while (db->db_state == DB_READ || 507 db->db_state == DB_FILL) 508 cv_wait(&db->db_changed, &db->db_mtx); 509 if (db->db_state == DB_UNCACHED) 510 err = SET_ERROR(EIO); 511 mutex_exit(&db->db_mtx); 512 if (err) { 513 dmu_buf_rele_array(dbp, nblks, tag); 514 return (err); 515 } 516 } 517 } 518 519 *numbufsp = nblks; 520 *dbpp = dbp; 521 return (0); 522 } 523 524 static int 525 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 526 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 527 { 528 dnode_t *dn; 529 int err; 530 531 err = dnode_hold(os, object, FTAG, &dn); 532 if (err) 533 return (err); 534 535 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 536 numbufsp, dbpp, DMU_READ_PREFETCH); 537 538 dnode_rele(dn, FTAG); 539 540 return (err); 541 } 542 543 int 544 dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset, 545 uint64_t length, boolean_t read, void *tag, int *numbufsp, 546 dmu_buf_t ***dbpp) 547 { 548 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 549 dnode_t *dn; 550 int err; 551 552 DB_DNODE_ENTER(db); 553 dn = DB_DNODE(db); 554 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 555 numbufsp, dbpp, DMU_READ_PREFETCH); 556 DB_DNODE_EXIT(db); 557 558 return (err); 559 } 560 561 void 562 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 563 { 564 int i; 565 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 566 567 if (numbufs == 0) 568 return; 569 570 for (i = 0; i < numbufs; i++) { 571 if (dbp[i]) 572 dbuf_rele(dbp[i], tag); 573 } 574 575 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 576 } 577 578 /* 579 * Issue prefetch i/os for the given blocks. If level is greater than 0, the 580 * indirect blocks prefeteched will be those that point to the blocks containing 581 * the data starting at offset, and continuing to offset + len. 582 * 583 * Note that if the indirect blocks above the blocks being prefetched are not in 584 * cache, they will be asychronously read in. 585 */ 586 void 587 dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 588 uint64_t len, zio_priority_t pri) 589 { 590 dnode_t *dn; 591 uint64_t blkid; 592 int nblks, err; 593 594 if (len == 0) { /* they're interested in the bonus buffer */ 595 dn = DMU_META_DNODE(os); 596 597 if (object == 0 || object >= DN_MAX_OBJECT) 598 return; 599 600 rw_enter(&dn->dn_struct_rwlock, RW_READER); 601 blkid = dbuf_whichblock(dn, level, 602 object * sizeof (dnode_phys_t)); 603 dbuf_prefetch(dn, level, blkid, pri, 0); 604 rw_exit(&dn->dn_struct_rwlock); 605 return; 606 } 607 608 /* 609 * XXX - Note, if the dnode for the requested object is not 610 * already cached, we will do a *synchronous* read in the 611 * dnode_hold() call. The same is true for any indirects. 612 */ 613 err = dnode_hold(os, object, FTAG, &dn); 614 if (err != 0) 615 return; 616 617 rw_enter(&dn->dn_struct_rwlock, RW_READER); 618 /* 619 * offset + len - 1 is the last byte we want to prefetch for, and offset 620 * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the 621 * last block we want to prefetch, and dbuf_whichblock(dn, level, 622 * offset) is the first. Then the number we need to prefetch is the 623 * last - first + 1. 624 */ 625 if (level > 0 || dn->dn_datablkshift != 0) { 626 nblks = dbuf_whichblock(dn, level, offset + len - 1) - 627 dbuf_whichblock(dn, level, offset) + 1; 628 } else { 629 nblks = (offset < dn->dn_datablksz); 630 } 631 632 if (nblks != 0) { 633 blkid = dbuf_whichblock(dn, level, offset); 634 for (int i = 0; i < nblks; i++) 635 dbuf_prefetch(dn, level, blkid + i, pri, 0); 636 } 637 638 rw_exit(&dn->dn_struct_rwlock); 639 640 dnode_rele(dn, FTAG); 641 } 642 643 /* 644 * Get the next "chunk" of file data to free. We traverse the file from 645 * the end so that the file gets shorter over time (if we crashes in the 646 * middle, this will leave us in a better state). We find allocated file 647 * data by simply searching the allocated level 1 indirects. 648 * 649 * On input, *start should be the first offset that does not need to be 650 * freed (e.g. "offset + length"). On return, *start will be the first 651 * offset that should be freed. 652 */ 653 static int 654 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum) 655 { 656 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1); 657 /* bytes of data covered by a level-1 indirect block */ 658 uint64_t iblkrange = 659 dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); 660 661 ASSERT3U(minimum, <=, *start); 662 663 if (*start - minimum <= iblkrange * maxblks) { 664 *start = minimum; 665 return (0); 666 } 667 ASSERT(ISP2(iblkrange)); 668 669 for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) { 670 int err; 671 672 /* 673 * dnode_next_offset(BACKWARDS) will find an allocated L1 674 * indirect block at or before the input offset. We must 675 * decrement *start so that it is at the end of the region 676 * to search. 677 */ 678 (*start)--; 679 err = dnode_next_offset(dn, 680 DNODE_FIND_BACKWARDS, start, 2, 1, 0); 681 682 /* if there are no indirect blocks before start, we are done */ 683 if (err == ESRCH) { 684 *start = minimum; 685 break; 686 } else if (err != 0) { 687 return (err); 688 } 689 690 /* set start to the beginning of this L1 indirect */ 691 *start = P2ALIGN(*start, iblkrange); 692 } 693 if (*start < minimum) 694 *start = minimum; 695 return (0); 696 } 697 698 /* 699 * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set, 700 * otherwise return false. 701 * Used below in dmu_free_long_range_impl() to enable abort when unmounting 702 */ 703 /*ARGSUSED*/ 704 static boolean_t 705 dmu_objset_zfs_unmounting(objset_t *os) 706 { 707 #ifdef _KERNEL 708 if (dmu_objset_type(os) == DMU_OST_ZFS) 709 return (zfs_get_vfs_flag_unmounted(os)); 710 #endif 711 return (B_FALSE); 712 } 713 714 static int 715 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, 716 uint64_t length) 717 { 718 uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 719 int err; 720 721 if (offset >= object_size) 722 return (0); 723 724 if (length == DMU_OBJECT_END || offset + length > object_size) 725 length = object_size - offset; 726 727 while (length != 0) { 728 uint64_t chunk_end, chunk_begin; 729 730 if (dmu_objset_zfs_unmounting(dn->dn_objset)) 731 return (SET_ERROR(EINTR)); 732 733 chunk_end = chunk_begin = offset + length; 734 735 /* move chunk_begin backwards to the beginning of this chunk */ 736 err = get_next_chunk(dn, &chunk_begin, offset); 737 if (err) 738 return (err); 739 ASSERT3U(chunk_begin, >=, offset); 740 ASSERT3U(chunk_begin, <=, chunk_end); 741 742 dmu_tx_t *tx = dmu_tx_create(os); 743 dmu_tx_hold_free(tx, dn->dn_object, 744 chunk_begin, chunk_end - chunk_begin); 745 746 /* 747 * Mark this transaction as typically resulting in a net 748 * reduction in space used. 749 */ 750 dmu_tx_mark_netfree(tx); 751 err = dmu_tx_assign(tx, TXG_WAIT); 752 if (err) { 753 dmu_tx_abort(tx); 754 return (err); 755 } 756 dnode_free_range(dn, chunk_begin, chunk_end - chunk_begin, tx); 757 dmu_tx_commit(tx); 758 759 length -= chunk_end - chunk_begin; 760 } 761 return (0); 762 } 763 764 int 765 dmu_free_long_range(objset_t *os, uint64_t object, 766 uint64_t offset, uint64_t length) 767 { 768 dnode_t *dn; 769 int err; 770 771 err = dnode_hold(os, object, FTAG, &dn); 772 if (err != 0) 773 return (err); 774 err = dmu_free_long_range_impl(os, dn, offset, length); 775 776 /* 777 * It is important to zero out the maxblkid when freeing the entire 778 * file, so that (a) subsequent calls to dmu_free_long_range_impl() 779 * will take the fast path, and (b) dnode_reallocate() can verify 780 * that the entire file has been freed. 781 */ 782 if (err == 0 && offset == 0 && length == DMU_OBJECT_END) 783 dn->dn_maxblkid = 0; 784 785 dnode_rele(dn, FTAG); 786 return (err); 787 } 788 789 int 790 dmu_free_long_object(objset_t *os, uint64_t object) 791 { 792 dmu_tx_t *tx; 793 int err; 794 795 err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END); 796 if (err != 0) 797 return (err); 798 799 tx = dmu_tx_create(os); 800 dmu_tx_hold_bonus(tx, object); 801 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 802 dmu_tx_mark_netfree(tx); 803 err = dmu_tx_assign(tx, TXG_WAIT); 804 if (err == 0) { 805 err = dmu_object_free(os, object, tx); 806 dmu_tx_commit(tx); 807 } else { 808 dmu_tx_abort(tx); 809 } 810 811 return (err); 812 } 813 814 int 815 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 816 uint64_t size, dmu_tx_t *tx) 817 { 818 dnode_t *dn; 819 int err = dnode_hold(os, object, FTAG, &dn); 820 if (err) 821 return (err); 822 ASSERT(offset < UINT64_MAX); 823 ASSERT(size == -1ULL || size <= UINT64_MAX - offset); 824 dnode_free_range(dn, offset, size, tx); 825 dnode_rele(dn, FTAG); 826 return (0); 827 } 828 829 int 830 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 831 void *buf, uint32_t flags) 832 { 833 dnode_t *dn; 834 dmu_buf_t **dbp; 835 int numbufs, err; 836 837 err = dnode_hold(os, object, FTAG, &dn); 838 if (err) 839 return (err); 840 841 /* 842 * Deal with odd block sizes, where there can't be data past the first 843 * block. If we ever do the tail block optimization, we will need to 844 * handle that here as well. 845 */ 846 if (dn->dn_maxblkid == 0) { 847 int newsz = offset > dn->dn_datablksz ? 0 : 848 MIN(size, dn->dn_datablksz - offset); 849 bzero((char *)buf + newsz, size - newsz); 850 size = newsz; 851 } 852 853 while (size > 0) { 854 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 855 int i; 856 857 /* 858 * NB: we could do this block-at-a-time, but it's nice 859 * to be reading in parallel. 860 */ 861 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 862 TRUE, FTAG, &numbufs, &dbp, flags); 863 if (err) 864 break; 865 866 for (i = 0; i < numbufs; i++) { 867 int tocpy; 868 int bufoff; 869 dmu_buf_t *db = dbp[i]; 870 871 ASSERT(size > 0); 872 873 bufoff = offset - db->db_offset; 874 tocpy = (int)MIN(db->db_size - bufoff, size); 875 876 bcopy((char *)db->db_data + bufoff, buf, tocpy); 877 878 offset += tocpy; 879 size -= tocpy; 880 buf = (char *)buf + tocpy; 881 } 882 dmu_buf_rele_array(dbp, numbufs, FTAG); 883 } 884 dnode_rele(dn, FTAG); 885 return (err); 886 } 887 888 void 889 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 890 const void *buf, dmu_tx_t *tx) 891 { 892 dmu_buf_t **dbp; 893 int numbufs, i; 894 895 if (size == 0) 896 return; 897 898 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 899 FALSE, FTAG, &numbufs, &dbp)); 900 901 for (i = 0; i < numbufs; i++) { 902 int tocpy; 903 int bufoff; 904 dmu_buf_t *db = dbp[i]; 905 906 ASSERT(size > 0); 907 908 bufoff = offset - db->db_offset; 909 tocpy = (int)MIN(db->db_size - bufoff, size); 910 911 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 912 913 if (tocpy == db->db_size) 914 dmu_buf_will_fill(db, tx); 915 else 916 dmu_buf_will_dirty(db, tx); 917 918 bcopy(buf, (char *)db->db_data + bufoff, tocpy); 919 920 if (tocpy == db->db_size) 921 dmu_buf_fill_done(db, tx); 922 923 offset += tocpy; 924 size -= tocpy; 925 buf = (char *)buf + tocpy; 926 } 927 dmu_buf_rele_array(dbp, numbufs, FTAG); 928 } 929 930 void 931 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 932 dmu_tx_t *tx) 933 { 934 dmu_buf_t **dbp; 935 int numbufs, i; 936 937 if (size == 0) 938 return; 939 940 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 941 FALSE, FTAG, &numbufs, &dbp)); 942 943 for (i = 0; i < numbufs; i++) { 944 dmu_buf_t *db = dbp[i]; 945 946 dmu_buf_will_not_fill(db, tx); 947 } 948 dmu_buf_rele_array(dbp, numbufs, FTAG); 949 } 950 951 void 952 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 953 void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 954 int compressed_size, int byteorder, dmu_tx_t *tx) 955 { 956 dmu_buf_t *db; 957 958 ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); 959 ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); 960 VERIFY0(dmu_buf_hold_noread(os, object, offset, 961 FTAG, &db)); 962 963 dmu_buf_write_embedded(db, 964 data, (bp_embedded_type_t)etype, (enum zio_compress)comp, 965 uncompressed_size, compressed_size, byteorder, tx); 966 967 dmu_buf_rele(db, FTAG); 968 } 969 970 /* 971 * DMU support for xuio 972 */ 973 kstat_t *xuio_ksp = NULL; 974 975 int 976 dmu_xuio_init(xuio_t *xuio, int nblk) 977 { 978 dmu_xuio_t *priv; 979 uio_t *uio = &xuio->xu_uio; 980 981 uio->uio_iovcnt = nblk; 982 uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP); 983 984 priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP); 985 priv->cnt = nblk; 986 priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP); 987 priv->iovp = uio->uio_iov; 988 XUIO_XUZC_PRIV(xuio) = priv; 989 990 if (XUIO_XUZC_RW(xuio) == UIO_READ) 991 XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk); 992 else 993 XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk); 994 995 return (0); 996 } 997 998 void 999 dmu_xuio_fini(xuio_t *xuio) 1000 { 1001 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1002 int nblk = priv->cnt; 1003 1004 kmem_free(priv->iovp, nblk * sizeof (iovec_t)); 1005 kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *)); 1006 kmem_free(priv, sizeof (dmu_xuio_t)); 1007 1008 if (XUIO_XUZC_RW(xuio) == UIO_READ) 1009 XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk); 1010 else 1011 XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk); 1012 } 1013 1014 /* 1015 * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf } 1016 * and increase priv->next by 1. 1017 */ 1018 int 1019 dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n) 1020 { 1021 struct iovec *iov; 1022 uio_t *uio = &xuio->xu_uio; 1023 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1024 int i = priv->next++; 1025 1026 ASSERT(i < priv->cnt); 1027 ASSERT(off + n <= arc_buf_size(abuf)); 1028 iov = uio->uio_iov + i; 1029 iov->iov_base = (char *)abuf->b_data + off; 1030 iov->iov_len = n; 1031 priv->bufs[i] = abuf; 1032 return (0); 1033 } 1034 1035 int 1036 dmu_xuio_cnt(xuio_t *xuio) 1037 { 1038 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1039 return (priv->cnt); 1040 } 1041 1042 arc_buf_t * 1043 dmu_xuio_arcbuf(xuio_t *xuio, int i) 1044 { 1045 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1046 1047 ASSERT(i < priv->cnt); 1048 return (priv->bufs[i]); 1049 } 1050 1051 void 1052 dmu_xuio_clear(xuio_t *xuio, int i) 1053 { 1054 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1055 1056 ASSERT(i < priv->cnt); 1057 priv->bufs[i] = NULL; 1058 } 1059 1060 static void 1061 xuio_stat_init(void) 1062 { 1063 xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc", 1064 KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t), 1065 KSTAT_FLAG_VIRTUAL); 1066 if (xuio_ksp != NULL) { 1067 xuio_ksp->ks_data = &xuio_stats; 1068 kstat_install(xuio_ksp); 1069 } 1070 } 1071 1072 static void 1073 xuio_stat_fini(void) 1074 { 1075 if (xuio_ksp != NULL) { 1076 kstat_delete(xuio_ksp); 1077 xuio_ksp = NULL; 1078 } 1079 } 1080 1081 void 1082 xuio_stat_wbuf_copied() 1083 { 1084 XUIOSTAT_BUMP(xuiostat_wbuf_copied); 1085 } 1086 1087 void 1088 xuio_stat_wbuf_nocopy() 1089 { 1090 XUIOSTAT_BUMP(xuiostat_wbuf_nocopy); 1091 } 1092 1093 #ifdef _KERNEL 1094 static int 1095 dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) 1096 { 1097 dmu_buf_t **dbp; 1098 int numbufs, i, err; 1099 xuio_t *xuio = NULL; 1100 1101 /* 1102 * NB: we could do this block-at-a-time, but it's nice 1103 * to be reading in parallel. 1104 */ 1105 err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 1106 TRUE, FTAG, &numbufs, &dbp, 0); 1107 if (err) 1108 return (err); 1109 1110 if (uio->uio_extflg == UIO_XUIO) 1111 xuio = (xuio_t *)uio; 1112 1113 for (i = 0; i < numbufs; i++) { 1114 int tocpy; 1115 int bufoff; 1116 dmu_buf_t *db = dbp[i]; 1117 1118 ASSERT(size > 0); 1119 1120 bufoff = uio->uio_loffset - db->db_offset; 1121 tocpy = (int)MIN(db->db_size - bufoff, size); 1122 1123 if (xuio) { 1124 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 1125 arc_buf_t *dbuf_abuf = dbi->db_buf; 1126 arc_buf_t *abuf = dbuf_loan_arcbuf(dbi); 1127 err = dmu_xuio_add(xuio, abuf, bufoff, tocpy); 1128 if (!err) { 1129 uio->uio_resid -= tocpy; 1130 uio->uio_loffset += tocpy; 1131 } 1132 1133 if (abuf == dbuf_abuf) 1134 XUIOSTAT_BUMP(xuiostat_rbuf_nocopy); 1135 else 1136 XUIOSTAT_BUMP(xuiostat_rbuf_copied); 1137 } else { 1138 err = uiomove((char *)db->db_data + bufoff, tocpy, 1139 UIO_READ, uio); 1140 } 1141 if (err) 1142 break; 1143 1144 size -= tocpy; 1145 } 1146 dmu_buf_rele_array(dbp, numbufs, FTAG); 1147 1148 return (err); 1149 } 1150 1151 /* 1152 * Read 'size' bytes into the uio buffer. 1153 * From object zdb->db_object. 1154 * Starting at offset uio->uio_loffset. 1155 * 1156 * If the caller already has a dbuf in the target object 1157 * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), 1158 * because we don't have to find the dnode_t for the object. 1159 */ 1160 int 1161 dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size) 1162 { 1163 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1164 dnode_t *dn; 1165 int err; 1166 1167 if (size == 0) 1168 return (0); 1169 1170 DB_DNODE_ENTER(db); 1171 dn = DB_DNODE(db); 1172 err = dmu_read_uio_dnode(dn, uio, size); 1173 DB_DNODE_EXIT(db); 1174 1175 return (err); 1176 } 1177 1178 /* 1179 * Read 'size' bytes into the uio buffer. 1180 * From the specified object 1181 * Starting at offset uio->uio_loffset. 1182 */ 1183 int 1184 dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) 1185 { 1186 dnode_t *dn; 1187 int err; 1188 1189 if (size == 0) 1190 return (0); 1191 1192 err = dnode_hold(os, object, FTAG, &dn); 1193 if (err) 1194 return (err); 1195 1196 err = dmu_read_uio_dnode(dn, uio, size); 1197 1198 dnode_rele(dn, FTAG); 1199 1200 return (err); 1201 } 1202 1203 static int 1204 dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) 1205 { 1206 dmu_buf_t **dbp; 1207 int numbufs; 1208 int err = 0; 1209 int i; 1210 1211 err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 1212 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); 1213 if (err) 1214 return (err); 1215 1216 for (i = 0; i < numbufs; i++) { 1217 int tocpy; 1218 int bufoff; 1219 dmu_buf_t *db = dbp[i]; 1220 1221 ASSERT(size > 0); 1222 1223 bufoff = uio->uio_loffset - db->db_offset; 1224 tocpy = (int)MIN(db->db_size - bufoff, size); 1225 1226 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1227 1228 if (tocpy == db->db_size) 1229 dmu_buf_will_fill(db, tx); 1230 else 1231 dmu_buf_will_dirty(db, tx); 1232 1233 /* 1234 * XXX uiomove could block forever (eg. nfs-backed 1235 * pages). There needs to be a uiolockdown() function 1236 * to lock the pages in memory, so that uiomove won't 1237 * block. 1238 */ 1239 err = uiomove((char *)db->db_data + bufoff, tocpy, 1240 UIO_WRITE, uio); 1241 1242 if (tocpy == db->db_size) 1243 dmu_buf_fill_done(db, tx); 1244 1245 if (err) 1246 break; 1247 1248 size -= tocpy; 1249 } 1250 1251 dmu_buf_rele_array(dbp, numbufs, FTAG); 1252 return (err); 1253 } 1254 1255 /* 1256 * Write 'size' bytes from the uio buffer. 1257 * To object zdb->db_object. 1258 * Starting at offset uio->uio_loffset. 1259 * 1260 * If the caller already has a dbuf in the target object 1261 * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), 1262 * because we don't have to find the dnode_t for the object. 1263 */ 1264 int 1265 dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, 1266 dmu_tx_t *tx) 1267 { 1268 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1269 dnode_t *dn; 1270 int err; 1271 1272 if (size == 0) 1273 return (0); 1274 1275 DB_DNODE_ENTER(db); 1276 dn = DB_DNODE(db); 1277 err = dmu_write_uio_dnode(dn, uio, size, tx); 1278 DB_DNODE_EXIT(db); 1279 1280 return (err); 1281 } 1282 1283 /* 1284 * Write 'size' bytes from the uio buffer. 1285 * To the specified object. 1286 * Starting at offset uio->uio_loffset. 1287 */ 1288 int 1289 dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, 1290 dmu_tx_t *tx) 1291 { 1292 dnode_t *dn; 1293 int err; 1294 1295 if (size == 0) 1296 return (0); 1297 1298 err = dnode_hold(os, object, FTAG, &dn); 1299 if (err) 1300 return (err); 1301 1302 err = dmu_write_uio_dnode(dn, uio, size, tx); 1303 1304 dnode_rele(dn, FTAG); 1305 1306 return (err); 1307 } 1308 1309 int 1310 dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1311 page_t *pp, dmu_tx_t *tx) 1312 { 1313 dmu_buf_t **dbp; 1314 int numbufs, i; 1315 int err; 1316 1317 if (size == 0) 1318 return (0); 1319 1320 err = dmu_buf_hold_array(os, object, offset, size, 1321 FALSE, FTAG, &numbufs, &dbp); 1322 if (err) 1323 return (err); 1324 1325 for (i = 0; i < numbufs; i++) { 1326 int tocpy, copied, thiscpy; 1327 int bufoff; 1328 dmu_buf_t *db = dbp[i]; 1329 caddr_t va; 1330 1331 ASSERT(size > 0); 1332 ASSERT3U(db->db_size, >=, PAGESIZE); 1333 1334 bufoff = offset - db->db_offset; 1335 tocpy = (int)MIN(db->db_size - bufoff, size); 1336 1337 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1338 1339 if (tocpy == db->db_size) 1340 dmu_buf_will_fill(db, tx); 1341 else 1342 dmu_buf_will_dirty(db, tx); 1343 1344 for (copied = 0; copied < tocpy; copied += PAGESIZE) { 1345 ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); 1346 thiscpy = MIN(PAGESIZE, tocpy - copied); 1347 va = zfs_map_page(pp, S_READ); 1348 bcopy(va, (char *)db->db_data + bufoff, thiscpy); 1349 zfs_unmap_page(pp, va); 1350 pp = pp->p_next; 1351 bufoff += PAGESIZE; 1352 } 1353 1354 if (tocpy == db->db_size) 1355 dmu_buf_fill_done(db, tx); 1356 1357 offset += tocpy; 1358 size -= tocpy; 1359 } 1360 dmu_buf_rele_array(dbp, numbufs, FTAG); 1361 return (err); 1362 } 1363 #endif 1364 1365 /* 1366 * Allocate a loaned anonymous arc buffer. 1367 */ 1368 arc_buf_t * 1369 dmu_request_arcbuf(dmu_buf_t *handle, int size) 1370 { 1371 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 1372 1373 return (arc_loan_buf(db->db_objset->os_spa, size)); 1374 } 1375 1376 /* 1377 * Free a loaned arc buffer. 1378 */ 1379 void 1380 dmu_return_arcbuf(arc_buf_t *buf) 1381 { 1382 arc_return_buf(buf, FTAG); 1383 arc_buf_destroy(buf, FTAG); 1384 } 1385 1386 /* 1387 * When possible directly assign passed loaned arc buffer to a dbuf. 1388 * If this is not possible copy the contents of passed arc buf via 1389 * dmu_write(). 1390 */ 1391 void 1392 dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, 1393 dmu_tx_t *tx) 1394 { 1395 dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; 1396 dnode_t *dn; 1397 dmu_buf_impl_t *db; 1398 uint32_t blksz = (uint32_t)arc_buf_size(buf); 1399 uint64_t blkid; 1400 1401 DB_DNODE_ENTER(dbuf); 1402 dn = DB_DNODE(dbuf); 1403 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1404 blkid = dbuf_whichblock(dn, 0, offset); 1405 VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL); 1406 rw_exit(&dn->dn_struct_rwlock); 1407 DB_DNODE_EXIT(dbuf); 1408 1409 /* 1410 * We can only assign if the offset is aligned, the arc buf is the 1411 * same size as the dbuf, and the dbuf is not metadata. It 1412 * can't be metadata because the loaned arc buf comes from the 1413 * user-data kmem arena. 1414 */ 1415 if (offset == db->db.db_offset && blksz == db->db.db_size && 1416 DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA) { 1417 dbuf_assign_arcbuf(db, buf, tx); 1418 dbuf_rele(db, FTAG); 1419 } else { 1420 objset_t *os; 1421 uint64_t object; 1422 1423 DB_DNODE_ENTER(dbuf); 1424 dn = DB_DNODE(dbuf); 1425 os = dn->dn_objset; 1426 object = dn->dn_object; 1427 DB_DNODE_EXIT(dbuf); 1428 1429 dbuf_rele(db, FTAG); 1430 dmu_write(os, object, offset, blksz, buf->b_data, tx); 1431 dmu_return_arcbuf(buf); 1432 XUIOSTAT_BUMP(xuiostat_wbuf_copied); 1433 } 1434 } 1435 1436 typedef struct { 1437 dbuf_dirty_record_t *dsa_dr; 1438 dmu_sync_cb_t *dsa_done; 1439 zgd_t *dsa_zgd; 1440 dmu_tx_t *dsa_tx; 1441 } dmu_sync_arg_t; 1442 1443 /* ARGSUSED */ 1444 static void 1445 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) 1446 { 1447 dmu_sync_arg_t *dsa = varg; 1448 dmu_buf_t *db = dsa->dsa_zgd->zgd_db; 1449 blkptr_t *bp = zio->io_bp; 1450 1451 if (zio->io_error == 0) { 1452 if (BP_IS_HOLE(bp)) { 1453 /* 1454 * A block of zeros may compress to a hole, but the 1455 * block size still needs to be known for replay. 1456 */ 1457 BP_SET_LSIZE(bp, db->db_size); 1458 } else if (!BP_IS_EMBEDDED(bp)) { 1459 ASSERT(BP_GET_LEVEL(bp) == 0); 1460 bp->blk_fill = 1; 1461 } 1462 } 1463 } 1464 1465 static void 1466 dmu_sync_late_arrival_ready(zio_t *zio) 1467 { 1468 dmu_sync_ready(zio, NULL, zio->io_private); 1469 } 1470 1471 /* ARGSUSED */ 1472 static void 1473 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1474 { 1475 dmu_sync_arg_t *dsa = varg; 1476 dbuf_dirty_record_t *dr = dsa->dsa_dr; 1477 dmu_buf_impl_t *db = dr->dr_dbuf; 1478 1479 mutex_enter(&db->db_mtx); 1480 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 1481 if (zio->io_error == 0) { 1482 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); 1483 if (dr->dt.dl.dr_nopwrite) { 1484 blkptr_t *bp = zio->io_bp; 1485 blkptr_t *bp_orig = &zio->io_bp_orig; 1486 uint8_t chksum = BP_GET_CHECKSUM(bp_orig); 1487 1488 ASSERT(BP_EQUAL(bp, bp_orig)); 1489 ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); 1490 ASSERT(zio_checksum_table[chksum].ci_flags & 1491 ZCHECKSUM_FLAG_NOPWRITE); 1492 } 1493 dr->dt.dl.dr_overridden_by = *zio->io_bp; 1494 dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 1495 dr->dt.dl.dr_copies = zio->io_prop.zp_copies; 1496 1497 /* 1498 * Old style holes are filled with all zeros, whereas 1499 * new-style holes maintain their lsize, type, level, 1500 * and birth time (see zio_write_compress). While we 1501 * need to reset the BP_SET_LSIZE() call that happened 1502 * in dmu_sync_ready for old style holes, we do *not* 1503 * want to wipe out the information contained in new 1504 * style holes. Thus, only zero out the block pointer if 1505 * it's an old style hole. 1506 */ 1507 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && 1508 dr->dt.dl.dr_overridden_by.blk_birth == 0) 1509 BP_ZERO(&dr->dt.dl.dr_overridden_by); 1510 } else { 1511 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1512 } 1513 cv_broadcast(&db->db_changed); 1514 mutex_exit(&db->db_mtx); 1515 1516 dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1517 1518 kmem_free(dsa, sizeof (*dsa)); 1519 } 1520 1521 static void 1522 dmu_sync_late_arrival_done(zio_t *zio) 1523 { 1524 blkptr_t *bp = zio->io_bp; 1525 dmu_sync_arg_t *dsa = zio->io_private; 1526 blkptr_t *bp_orig = &zio->io_bp_orig; 1527 1528 if (zio->io_error == 0 && !BP_IS_HOLE(bp)) { 1529 /* 1530 * If we didn't allocate a new block (i.e. ZIO_FLAG_NOPWRITE) 1531 * then there is nothing to do here. Otherwise, free the 1532 * newly allocated block in this txg. 1533 */ 1534 if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 1535 ASSERT(BP_EQUAL(bp, bp_orig)); 1536 } else { 1537 ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); 1538 ASSERT(zio->io_bp->blk_birth == zio->io_txg); 1539 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); 1540 zio_free(zio->io_spa, zio->io_txg, zio->io_bp); 1541 } 1542 } 1543 1544 dmu_tx_commit(dsa->dsa_tx); 1545 1546 dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1547 1548 kmem_free(dsa, sizeof (*dsa)); 1549 } 1550 1551 static int 1552 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, 1553 zio_prop_t *zp, zbookmark_phys_t *zb) 1554 { 1555 dmu_sync_arg_t *dsa; 1556 dmu_tx_t *tx; 1557 1558 tx = dmu_tx_create(os); 1559 dmu_tx_hold_space(tx, zgd->zgd_db->db_size); 1560 if (dmu_tx_assign(tx, TXG_WAIT) != 0) { 1561 dmu_tx_abort(tx); 1562 /* Make zl_get_data do txg_waited_synced() */ 1563 return (SET_ERROR(EIO)); 1564 } 1565 1566 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1567 dsa->dsa_dr = NULL; 1568 dsa->dsa_done = done; 1569 dsa->dsa_zgd = zgd; 1570 dsa->dsa_tx = tx; 1571 1572 zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), 1573 zgd->zgd_bp, zgd->zgd_db->db_data, zgd->zgd_db->db_size, 1574 zp, dmu_sync_late_arrival_ready, NULL, 1575 NULL, dmu_sync_late_arrival_done, dsa, ZIO_PRIORITY_SYNC_WRITE, 1576 ZIO_FLAG_CANFAIL, zb)); 1577 1578 return (0); 1579 } 1580 1581 /* 1582 * Intent log support: sync the block associated with db to disk. 1583 * N.B. and XXX: the caller is responsible for making sure that the 1584 * data isn't changing while dmu_sync() is writing it. 1585 * 1586 * Return values: 1587 * 1588 * EEXIST: this txg has already been synced, so there's nothing to do. 1589 * The caller should not log the write. 1590 * 1591 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 1592 * The caller should not log the write. 1593 * 1594 * EALREADY: this block is already in the process of being synced. 1595 * The caller should track its progress (somehow). 1596 * 1597 * EIO: could not do the I/O. 1598 * The caller should do a txg_wait_synced(). 1599 * 1600 * 0: the I/O has been initiated. 1601 * The caller should log this blkptr in the done callback. 1602 * It is possible that the I/O will fail, in which case 1603 * the error will be reported to the done callback and 1604 * propagated to pio from zio_done(). 1605 */ 1606 int 1607 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) 1608 { 1609 blkptr_t *bp = zgd->zgd_bp; 1610 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; 1611 objset_t *os = db->db_objset; 1612 dsl_dataset_t *ds = os->os_dsl_dataset; 1613 dbuf_dirty_record_t *dr; 1614 dmu_sync_arg_t *dsa; 1615 zbookmark_phys_t zb; 1616 zio_prop_t zp; 1617 dnode_t *dn; 1618 1619 ASSERT(pio != NULL); 1620 ASSERT(txg != 0); 1621 1622 SET_BOOKMARK(&zb, ds->ds_object, 1623 db->db.db_object, db->db_level, db->db_blkid); 1624 1625 DB_DNODE_ENTER(db); 1626 dn = DB_DNODE(db); 1627 dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp); 1628 DB_DNODE_EXIT(db); 1629 1630 /* 1631 * If we're frozen (running ziltest), we always need to generate a bp. 1632 */ 1633 if (txg > spa_freeze_txg(os->os_spa)) 1634 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1635 1636 /* 1637 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() 1638 * and us. If we determine that this txg is not yet syncing, 1639 * but it begins to sync a moment later, that's OK because the 1640 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. 1641 */ 1642 mutex_enter(&db->db_mtx); 1643 1644 if (txg <= spa_last_synced_txg(os->os_spa)) { 1645 /* 1646 * This txg has already synced. There's nothing to do. 1647 */ 1648 mutex_exit(&db->db_mtx); 1649 return (SET_ERROR(EEXIST)); 1650 } 1651 1652 if (txg <= spa_syncing_txg(os->os_spa)) { 1653 /* 1654 * This txg is currently syncing, so we can't mess with 1655 * the dirty record anymore; just write a new log block. 1656 */ 1657 mutex_exit(&db->db_mtx); 1658 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1659 } 1660 1661 dr = db->db_last_dirty; 1662 while (dr && dr->dr_txg != txg) 1663 dr = dr->dr_next; 1664 1665 if (dr == NULL) { 1666 /* 1667 * There's no dr for this dbuf, so it must have been freed. 1668 * There's no need to log writes to freed blocks, so we're done. 1669 */ 1670 mutex_exit(&db->db_mtx); 1671 return (SET_ERROR(ENOENT)); 1672 } 1673 1674 ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg); 1675 1676 /* 1677 * Assume the on-disk data is X, the current syncing data (in 1678 * txg - 1) is Y, and the current in-memory data is Z (currently 1679 * in dmu_sync). 1680 * 1681 * We usually want to perform a nopwrite if X and Z are the 1682 * same. However, if Y is different (i.e. the BP is going to 1683 * change before this write takes effect), then a nopwrite will 1684 * be incorrect - we would override with X, which could have 1685 * been freed when Y was written. 1686 * 1687 * (Note that this is not a concern when we are nop-writing from 1688 * syncing context, because X and Y must be identical, because 1689 * all previous txgs have been synced.) 1690 * 1691 * Therefore, we disable nopwrite if the current BP could change 1692 * before this TXG. There are two ways it could change: by 1693 * being dirty (dr_next is non-NULL), or by being freed 1694 * (dnode_block_freed()). This behavior is verified by 1695 * zio_done(), which VERIFYs that the override BP is identical 1696 * to the on-disk BP. 1697 */ 1698 DB_DNODE_ENTER(db); 1699 dn = DB_DNODE(db); 1700 if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid)) 1701 zp.zp_nopwrite = B_FALSE; 1702 DB_DNODE_EXIT(db); 1703 1704 ASSERT(dr->dr_txg == txg); 1705 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || 1706 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 1707 /* 1708 * We have already issued a sync write for this buffer, 1709 * or this buffer has already been synced. It could not 1710 * have been dirtied since, or we would have cleared the state. 1711 */ 1712 mutex_exit(&db->db_mtx); 1713 return (SET_ERROR(EALREADY)); 1714 } 1715 1716 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 1717 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 1718 mutex_exit(&db->db_mtx); 1719 1720 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1721 dsa->dsa_dr = dr; 1722 dsa->dsa_done = done; 1723 dsa->dsa_zgd = zgd; 1724 dsa->dsa_tx = NULL; 1725 1726 zio_nowait(arc_write(pio, os->os_spa, txg, 1727 bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db), 1728 &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa, 1729 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); 1730 1731 return (0); 1732 } 1733 1734 int 1735 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 1736 dmu_tx_t *tx) 1737 { 1738 dnode_t *dn; 1739 int err; 1740 1741 err = dnode_hold(os, object, FTAG, &dn); 1742 if (err) 1743 return (err); 1744 err = dnode_set_blksz(dn, size, ibs, tx); 1745 dnode_rele(dn, FTAG); 1746 return (err); 1747 } 1748 1749 void 1750 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 1751 dmu_tx_t *tx) 1752 { 1753 dnode_t *dn; 1754 1755 /* 1756 * Send streams include each object's checksum function. This 1757 * check ensures that the receiving system can understand the 1758 * checksum function transmitted. 1759 */ 1760 ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS); 1761 1762 VERIFY0(dnode_hold(os, object, FTAG, &dn)); 1763 ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS); 1764 dn->dn_checksum = checksum; 1765 dnode_setdirty(dn, tx); 1766 dnode_rele(dn, FTAG); 1767 } 1768 1769 void 1770 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 1771 dmu_tx_t *tx) 1772 { 1773 dnode_t *dn; 1774 1775 /* 1776 * Send streams include each object's compression function. This 1777 * check ensures that the receiving system can understand the 1778 * compression function transmitted. 1779 */ 1780 ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS); 1781 1782 VERIFY0(dnode_hold(os, object, FTAG, &dn)); 1783 dn->dn_compress = compress; 1784 dnode_setdirty(dn, tx); 1785 dnode_rele(dn, FTAG); 1786 } 1787 1788 int zfs_mdcomp_disable = 0; 1789 1790 /* 1791 * When the "redundant_metadata" property is set to "most", only indirect 1792 * blocks of this level and higher will have an additional ditto block. 1793 */ 1794 int zfs_redundant_metadata_most_ditto_level = 2; 1795 1796 void 1797 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) 1798 { 1799 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET; 1800 boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) || 1801 (wp & WP_SPILL)); 1802 enum zio_checksum checksum = os->os_checksum; 1803 enum zio_compress compress = os->os_compress; 1804 enum zio_checksum dedup_checksum = os->os_dedup_checksum; 1805 boolean_t dedup = B_FALSE; 1806 boolean_t nopwrite = B_FALSE; 1807 boolean_t dedup_verify = os->os_dedup_verify; 1808 int copies = os->os_copies; 1809 1810 /* 1811 * We maintain different write policies for each of the following 1812 * types of data: 1813 * 1. metadata 1814 * 2. preallocated blocks (i.e. level-0 blocks of a dump device) 1815 * 3. all other level 0 blocks 1816 */ 1817 if (ismd) { 1818 if (zfs_mdcomp_disable) { 1819 compress = ZIO_COMPRESS_EMPTY; 1820 } else { 1821 /* 1822 * XXX -- we should design a compression algorithm 1823 * that specializes in arrays of bps. 1824 */ 1825 compress = zio_compress_select(os->os_spa, 1826 ZIO_COMPRESS_ON, ZIO_COMPRESS_ON); 1827 } 1828 1829 /* 1830 * Metadata always gets checksummed. If the data 1831 * checksum is multi-bit correctable, and it's not a 1832 * ZBT-style checksum, then it's suitable for metadata 1833 * as well. Otherwise, the metadata checksum defaults 1834 * to fletcher4. 1835 */ 1836 if (!(zio_checksum_table[checksum].ci_flags & 1837 ZCHECKSUM_FLAG_METADATA) || 1838 (zio_checksum_table[checksum].ci_flags & 1839 ZCHECKSUM_FLAG_EMBEDDED)) 1840 checksum = ZIO_CHECKSUM_FLETCHER_4; 1841 1842 if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL || 1843 (os->os_redundant_metadata == 1844 ZFS_REDUNDANT_METADATA_MOST && 1845 (level >= zfs_redundant_metadata_most_ditto_level || 1846 DMU_OT_IS_METADATA(type) || (wp & WP_SPILL)))) 1847 copies++; 1848 } else if (wp & WP_NOFILL) { 1849 ASSERT(level == 0); 1850 1851 /* 1852 * If we're writing preallocated blocks, we aren't actually 1853 * writing them so don't set any policy properties. These 1854 * blocks are currently only used by an external subsystem 1855 * outside of zfs (i.e. dump) and not written by the zio 1856 * pipeline. 1857 */ 1858 compress = ZIO_COMPRESS_OFF; 1859 checksum = ZIO_CHECKSUM_NOPARITY; 1860 } else { 1861 compress = zio_compress_select(os->os_spa, dn->dn_compress, 1862 compress); 1863 1864 checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ? 1865 zio_checksum_select(dn->dn_checksum, checksum) : 1866 dedup_checksum; 1867 1868 /* 1869 * Determine dedup setting. If we are in dmu_sync(), 1870 * we won't actually dedup now because that's all 1871 * done in syncing context; but we do want to use the 1872 * dedup checkum. If the checksum is not strong 1873 * enough to ensure unique signatures, force 1874 * dedup_verify. 1875 */ 1876 if (dedup_checksum != ZIO_CHECKSUM_OFF) { 1877 dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE; 1878 if (!(zio_checksum_table[checksum].ci_flags & 1879 ZCHECKSUM_FLAG_DEDUP)) 1880 dedup_verify = B_TRUE; 1881 } 1882 1883 /* 1884 * Enable nopwrite if we have secure enough checksum 1885 * algorithm (see comment in zio_nop_write) and 1886 * compression is enabled. We don't enable nopwrite if 1887 * dedup is enabled as the two features are mutually 1888 * exclusive. 1889 */ 1890 nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags & 1891 ZCHECKSUM_FLAG_NOPWRITE) && 1892 compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled); 1893 } 1894 1895 zp->zp_checksum = checksum; 1896 zp->zp_compress = compress; 1897 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type; 1898 zp->zp_level = level; 1899 zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa)); 1900 zp->zp_dedup = dedup; 1901 zp->zp_dedup_verify = dedup && dedup_verify; 1902 zp->zp_nopwrite = nopwrite; 1903 } 1904 1905 int 1906 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 1907 { 1908 dnode_t *dn; 1909 int err; 1910 1911 /* 1912 * Sync any current changes before 1913 * we go trundling through the block pointers. 1914 */ 1915 err = dmu_object_wait_synced(os, object); 1916 if (err) { 1917 return (err); 1918 } 1919 1920 err = dnode_hold(os, object, FTAG, &dn); 1921 if (err) { 1922 return (err); 1923 } 1924 1925 err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); 1926 dnode_rele(dn, FTAG); 1927 1928 return (err); 1929 } 1930 1931 /* 1932 * Given the ZFS object, if it contains any dirty nodes 1933 * this function flushes all dirty blocks to disk. This 1934 * ensures the DMU object info is updated. A more efficient 1935 * future version might just find the TXG with the maximum 1936 * ID and wait for that to be synced. 1937 */ 1938 int 1939 dmu_object_wait_synced(objset_t *os, uint64_t object) 1940 { 1941 dnode_t *dn; 1942 int error, i; 1943 1944 error = dnode_hold(os, object, FTAG, &dn); 1945 if (error) { 1946 return (error); 1947 } 1948 1949 for (i = 0; i < TXG_SIZE; i++) { 1950 if (list_link_active(&dn->dn_dirty_link[i])) { 1951 break; 1952 } 1953 } 1954 dnode_rele(dn, FTAG); 1955 if (i != TXG_SIZE) { 1956 txg_wait_synced(dmu_objset_pool(os), 0); 1957 } 1958 1959 return (0); 1960 } 1961 1962 void 1963 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 1964 { 1965 dnode_phys_t *dnp; 1966 1967 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1968 mutex_enter(&dn->dn_mtx); 1969 1970 dnp = dn->dn_phys; 1971 1972 doi->doi_data_block_size = dn->dn_datablksz; 1973 doi->doi_metadata_block_size = dn->dn_indblkshift ? 1974 1ULL << dn->dn_indblkshift : 0; 1975 doi->doi_type = dn->dn_type; 1976 doi->doi_bonus_type = dn->dn_bonustype; 1977 doi->doi_bonus_size = dn->dn_bonuslen; 1978 doi->doi_indirection = dn->dn_nlevels; 1979 doi->doi_checksum = dn->dn_checksum; 1980 doi->doi_compress = dn->dn_compress; 1981 doi->doi_nblkptr = dn->dn_nblkptr; 1982 doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; 1983 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 1984 doi->doi_fill_count = 0; 1985 for (int i = 0; i < dnp->dn_nblkptr; i++) 1986 doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]); 1987 1988 mutex_exit(&dn->dn_mtx); 1989 rw_exit(&dn->dn_struct_rwlock); 1990 } 1991 1992 /* 1993 * Get information on a DMU object. 1994 * If doi is NULL, just indicates whether the object exists. 1995 */ 1996 int 1997 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 1998 { 1999 dnode_t *dn; 2000 int err = dnode_hold(os, object, FTAG, &dn); 2001 2002 if (err) 2003 return (err); 2004 2005 if (doi != NULL) 2006 dmu_object_info_from_dnode(dn, doi); 2007 2008 dnode_rele(dn, FTAG); 2009 return (0); 2010 } 2011 2012 /* 2013 * As above, but faster; can be used when you have a held dbuf in hand. 2014 */ 2015 void 2016 dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi) 2017 { 2018 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2019 2020 DB_DNODE_ENTER(db); 2021 dmu_object_info_from_dnode(DB_DNODE(db), doi); 2022 DB_DNODE_EXIT(db); 2023 } 2024 2025 /* 2026 * Faster still when you only care about the size. 2027 * This is specifically optimized for zfs_getattr(). 2028 */ 2029 void 2030 dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize, 2031 u_longlong_t *nblk512) 2032 { 2033 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2034 dnode_t *dn; 2035 2036 DB_DNODE_ENTER(db); 2037 dn = DB_DNODE(db); 2038 2039 *blksize = dn->dn_datablksz; 2040 /* add 1 for dnode space */ 2041 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 2042 SPA_MINBLOCKSHIFT) + 1; 2043 DB_DNODE_EXIT(db); 2044 } 2045 2046 void 2047 byteswap_uint64_array(void *vbuf, size_t size) 2048 { 2049 uint64_t *buf = vbuf; 2050 size_t count = size >> 3; 2051 int i; 2052 2053 ASSERT((size & 7) == 0); 2054 2055 for (i = 0; i < count; i++) 2056 buf[i] = BSWAP_64(buf[i]); 2057 } 2058 2059 void 2060 byteswap_uint32_array(void *vbuf, size_t size) 2061 { 2062 uint32_t *buf = vbuf; 2063 size_t count = size >> 2; 2064 int i; 2065 2066 ASSERT((size & 3) == 0); 2067 2068 for (i = 0; i < count; i++) 2069 buf[i] = BSWAP_32(buf[i]); 2070 } 2071 2072 void 2073 byteswap_uint16_array(void *vbuf, size_t size) 2074 { 2075 uint16_t *buf = vbuf; 2076 size_t count = size >> 1; 2077 int i; 2078 2079 ASSERT((size & 1) == 0); 2080 2081 for (i = 0; i < count; i++) 2082 buf[i] = BSWAP_16(buf[i]); 2083 } 2084 2085 /* ARGSUSED */ 2086 void 2087 byteswap_uint8_array(void *vbuf, size_t size) 2088 { 2089 } 2090 2091 void 2092 dmu_init(void) 2093 { 2094 zfs_dbgmsg_init(); 2095 sa_cache_init(); 2096 xuio_stat_init(); 2097 dmu_objset_init(); 2098 dnode_init(); 2099 zfetch_init(); 2100 l2arc_init(); 2101 arc_init(); 2102 dbuf_init(); 2103 } 2104 2105 void 2106 dmu_fini(void) 2107 { 2108 arc_fini(); /* arc depends on l2arc, so arc must go first */ 2109 l2arc_fini(); 2110 zfetch_fini(); 2111 dbuf_fini(); 2112 dnode_fini(); 2113 dmu_objset_fini(); 2114 xuio_stat_fini(); 2115 sa_cache_fini(); 2116 zfs_dbgmsg_fini(); 2117 } 2118