1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 #include <sys/dmu.h> 26 #include <sys/dmu_impl.h> 27 #include <sys/dmu_tx.h> 28 #include <sys/dbuf.h> 29 #include <sys/dnode.h> 30 #include <sys/zfs_context.h> 31 #include <sys/dmu_objset.h> 32 #include <sys/dmu_traverse.h> 33 #include <sys/dsl_dataset.h> 34 #include <sys/dsl_dir.h> 35 #include <sys/dsl_pool.h> 36 #include <sys/dsl_synctask.h> 37 #include <sys/dsl_prop.h> 38 #include <sys/dmu_zfetch.h> 39 #include <sys/zfs_ioctl.h> 40 #include <sys/zap.h> 41 #include <sys/zio_checksum.h> 42 #include <sys/sa.h> 43 #ifdef _KERNEL 44 #include <sys/vmsystm.h> 45 #include <sys/zfs_znode.h> 46 #endif 47 48 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 49 { byteswap_uint8_array, TRUE, "unallocated" }, 50 { zap_byteswap, TRUE, "object directory" }, 51 { byteswap_uint64_array, TRUE, "object array" }, 52 { byteswap_uint8_array, TRUE, "packed nvlist" }, 53 { byteswap_uint64_array, TRUE, "packed nvlist size" }, 54 { byteswap_uint64_array, TRUE, "bpobj" }, 55 { byteswap_uint64_array, TRUE, "bpobj header" }, 56 { byteswap_uint64_array, TRUE, "SPA space map header" }, 57 { byteswap_uint64_array, TRUE, "SPA space map" }, 58 { byteswap_uint64_array, TRUE, "ZIL intent log" }, 59 { dnode_buf_byteswap, TRUE, "DMU dnode" }, 60 { dmu_objset_byteswap, TRUE, "DMU objset" }, 61 { byteswap_uint64_array, TRUE, "DSL directory" }, 62 { zap_byteswap, TRUE, "DSL directory child map"}, 63 { zap_byteswap, TRUE, "DSL dataset snap map" }, 64 { zap_byteswap, TRUE, "DSL props" }, 65 { byteswap_uint64_array, TRUE, "DSL dataset" }, 66 { zfs_znode_byteswap, TRUE, "ZFS znode" }, 67 { zfs_oldacl_byteswap, TRUE, "ZFS V0 ACL" }, 68 { byteswap_uint8_array, FALSE, "ZFS plain file" }, 69 { zap_byteswap, TRUE, "ZFS directory" }, 70 { zap_byteswap, TRUE, "ZFS master node" }, 71 { zap_byteswap, TRUE, "ZFS delete queue" }, 72 { byteswap_uint8_array, FALSE, "zvol object" }, 73 { zap_byteswap, TRUE, "zvol prop" }, 74 { byteswap_uint8_array, FALSE, "other uint8[]" }, 75 { byteswap_uint64_array, FALSE, "other uint64[]" }, 76 { zap_byteswap, TRUE, "other ZAP" }, 77 { zap_byteswap, TRUE, "persistent error log" }, 78 { byteswap_uint8_array, TRUE, "SPA history" }, 79 { byteswap_uint64_array, TRUE, "SPA history offsets" }, 80 { zap_byteswap, TRUE, "Pool properties" }, 81 { zap_byteswap, TRUE, "DSL permissions" }, 82 { zfs_acl_byteswap, TRUE, "ZFS ACL" }, 83 { byteswap_uint8_array, TRUE, "ZFS SYSACL" }, 84 { byteswap_uint8_array, TRUE, "FUID table" }, 85 { byteswap_uint64_array, TRUE, "FUID table size" }, 86 { zap_byteswap, TRUE, "DSL dataset next clones"}, 87 { zap_byteswap, TRUE, "scan work queue" }, 88 { zap_byteswap, TRUE, "ZFS user/group used" }, 89 { zap_byteswap, TRUE, "ZFS user/group quota" }, 90 { zap_byteswap, TRUE, "snapshot refcount tags"}, 91 { zap_byteswap, TRUE, "DDT ZAP algorithm" }, 92 { zap_byteswap, TRUE, "DDT statistics" }, 93 { byteswap_uint8_array, TRUE, "System attributes" }, 94 { zap_byteswap, TRUE, "SA master node" }, 95 { zap_byteswap, TRUE, "SA attr registration" }, 96 { zap_byteswap, TRUE, "SA attr layouts" }, 97 { zap_byteswap, TRUE, "scan translations" }, 98 { byteswap_uint8_array, FALSE, "deduplicated block" }, 99 { zap_byteswap, TRUE, "DSL deadlist map" }, 100 { byteswap_uint64_array, TRUE, "DSL deadlist map hdr" }, 101 { zap_byteswap, TRUE, "DSL dir clones" }, 102 { byteswap_uint64_array, TRUE, "bpobj subobj" }, 103 }; 104 105 int 106 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 107 void *tag, dmu_buf_t **dbp, int flags) 108 { 109 dnode_t *dn; 110 uint64_t blkid; 111 dmu_buf_impl_t *db; 112 int err; 113 int db_flags = DB_RF_CANFAIL; 114 115 if (flags & DMU_READ_NO_PREFETCH) 116 db_flags |= DB_RF_NOPREFETCH; 117 118 err = dnode_hold(os, object, FTAG, &dn); 119 if (err) 120 return (err); 121 blkid = dbuf_whichblock(dn, offset); 122 rw_enter(&dn->dn_struct_rwlock, RW_READER); 123 db = dbuf_hold(dn, blkid, tag); 124 rw_exit(&dn->dn_struct_rwlock); 125 if (db == NULL) { 126 err = EIO; 127 } else { 128 err = dbuf_read(db, NULL, db_flags); 129 if (err) { 130 dbuf_rele(db, tag); 131 db = NULL; 132 } 133 } 134 135 dnode_rele(dn, FTAG); 136 *dbp = &db->db; 137 return (err); 138 } 139 140 int 141 dmu_bonus_max(void) 142 { 143 return (DN_MAX_BONUSLEN); 144 } 145 146 int 147 dmu_set_bonus(dmu_buf_t *db, int newsize, dmu_tx_t *tx) 148 { 149 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 150 151 if (dn->dn_bonus != (dmu_buf_impl_t *)db) 152 return (EINVAL); 153 if (newsize < 0 || newsize > db->db_size) 154 return (EINVAL); 155 dnode_setbonuslen(dn, newsize, tx); 156 return (0); 157 } 158 159 int 160 dmu_set_bonustype(dmu_buf_t *db, dmu_object_type_t type, dmu_tx_t *tx) 161 { 162 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 163 164 if (type > DMU_OT_NUMTYPES) 165 return (EINVAL); 166 167 if (dn->dn_bonus != (dmu_buf_impl_t *)db) 168 return (EINVAL); 169 170 dnode_setbonus_type(dn, type, tx); 171 return (0); 172 } 173 174 int 175 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx) 176 { 177 dnode_t *dn; 178 int error; 179 180 error = dnode_hold(os, object, FTAG, &dn); 181 dbuf_rm_spill(dn, tx); 182 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 183 dnode_rm_spill(dn, tx); 184 rw_exit(&dn->dn_struct_rwlock); 185 dnode_rele(dn, FTAG); 186 return (error); 187 } 188 189 /* 190 * returns ENOENT, EIO, or 0. 191 */ 192 int 193 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) 194 { 195 dnode_t *dn; 196 dmu_buf_impl_t *db; 197 int error; 198 199 error = dnode_hold(os, object, FTAG, &dn); 200 if (error) 201 return (error); 202 203 rw_enter(&dn->dn_struct_rwlock, RW_READER); 204 if (dn->dn_bonus == NULL) { 205 rw_exit(&dn->dn_struct_rwlock); 206 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 207 if (dn->dn_bonus == NULL) 208 dbuf_create_bonus(dn); 209 } 210 db = dn->dn_bonus; 211 rw_exit(&dn->dn_struct_rwlock); 212 213 /* as long as the bonus buf is held, the dnode will be held */ 214 if (refcount_add(&db->db_holds, tag) == 1) 215 VERIFY(dnode_add_ref(dn, db)); 216 217 dnode_rele(dn, FTAG); 218 219 VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH)); 220 221 *dbp = &db->db; 222 return (0); 223 } 224 225 /* 226 * returns ENOENT, EIO, or 0. 227 * 228 * This interface will allocate a blank spill dbuf when a spill blk 229 * doesn't already exist on the dnode. 230 * 231 * if you only want to find an already existing spill db, then 232 * dmu_spill_hold_existing() should be used. 233 */ 234 int 235 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp) 236 { 237 dmu_buf_impl_t *db = NULL; 238 int err; 239 240 if ((flags & DB_RF_HAVESTRUCT) == 0) 241 rw_enter(&dn->dn_struct_rwlock, RW_READER); 242 243 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag); 244 245 if ((flags & DB_RF_HAVESTRUCT) == 0) 246 rw_exit(&dn->dn_struct_rwlock); 247 248 ASSERT(db != NULL); 249 err = dbuf_read(db, NULL, flags); 250 if (err == 0) 251 *dbp = &db->db; 252 else 253 dbuf_rele(db, tag); 254 return (err); 255 } 256 257 int 258 dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 259 { 260 dnode_t *dn = ((dmu_buf_impl_t *)bonus)->db_dnode; 261 int err; 262 263 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) 264 return (EINVAL); 265 rw_enter(&dn->dn_struct_rwlock, RW_READER); 266 267 if (!dn->dn_have_spill) { 268 rw_exit(&dn->dn_struct_rwlock); 269 return (ENOENT); 270 } 271 err = dmu_spill_hold_by_dnode(dn, 272 DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp); 273 rw_exit(&dn->dn_struct_rwlock); 274 return (err); 275 } 276 277 int 278 dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 279 { 280 return (dmu_spill_hold_by_dnode(((dmu_buf_impl_t *)bonus)->db_dnode, 281 DB_RF_CANFAIL, tag, dbp)); 282 } 283 284 /* 285 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 286 * to take a held dnode rather than <os, object> -- the lookup is wasteful, 287 * and can induce severe lock contention when writing to several files 288 * whose dnodes are in the same block. 289 */ 290 static int 291 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, 292 int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags) 293 { 294 dsl_pool_t *dp = NULL; 295 dmu_buf_t **dbp; 296 uint64_t blkid, nblks, i; 297 uint32_t dbuf_flags; 298 int err; 299 zio_t *zio; 300 hrtime_t start; 301 302 ASSERT(length <= DMU_MAX_ACCESS); 303 304 dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT; 305 if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz) 306 dbuf_flags |= DB_RF_NOPREFETCH; 307 308 rw_enter(&dn->dn_struct_rwlock, RW_READER); 309 if (dn->dn_datablkshift) { 310 int blkshift = dn->dn_datablkshift; 311 nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) - 312 P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift; 313 } else { 314 if (offset + length > dn->dn_datablksz) { 315 zfs_panic_recover("zfs: accessing past end of object " 316 "%llx/%llx (size=%u access=%llu+%llu)", 317 (longlong_t)dn->dn_objset-> 318 os_dsl_dataset->ds_object, 319 (longlong_t)dn->dn_object, dn->dn_datablksz, 320 (longlong_t)offset, (longlong_t)length); 321 rw_exit(&dn->dn_struct_rwlock); 322 return (EIO); 323 } 324 nblks = 1; 325 } 326 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 327 328 if (dn->dn_objset->os_dsl_dataset) 329 dp = dn->dn_objset->os_dsl_dataset->ds_dir->dd_pool; 330 if (dp && dsl_pool_sync_context(dp)) 331 start = gethrtime(); 332 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); 333 blkid = dbuf_whichblock(dn, offset); 334 for (i = 0; i < nblks; i++) { 335 dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag); 336 if (db == NULL) { 337 rw_exit(&dn->dn_struct_rwlock); 338 dmu_buf_rele_array(dbp, nblks, tag); 339 zio_nowait(zio); 340 return (EIO); 341 } 342 /* initiate async i/o */ 343 if (read) { 344 (void) dbuf_read(db, zio, dbuf_flags); 345 } 346 dbp[i] = &db->db; 347 } 348 rw_exit(&dn->dn_struct_rwlock); 349 350 /* wait for async i/o */ 351 err = zio_wait(zio); 352 /* track read overhead when we are in sync context */ 353 if (dp && dsl_pool_sync_context(dp)) 354 dp->dp_read_overhead += gethrtime() - start; 355 if (err) { 356 dmu_buf_rele_array(dbp, nblks, tag); 357 return (err); 358 } 359 360 /* wait for other io to complete */ 361 if (read) { 362 for (i = 0; i < nblks; i++) { 363 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 364 mutex_enter(&db->db_mtx); 365 while (db->db_state == DB_READ || 366 db->db_state == DB_FILL) 367 cv_wait(&db->db_changed, &db->db_mtx); 368 if (db->db_state == DB_UNCACHED) 369 err = EIO; 370 mutex_exit(&db->db_mtx); 371 if (err) { 372 dmu_buf_rele_array(dbp, nblks, tag); 373 return (err); 374 } 375 } 376 } 377 378 *numbufsp = nblks; 379 *dbpp = dbp; 380 return (0); 381 } 382 383 static int 384 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 385 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 386 { 387 dnode_t *dn; 388 int err; 389 390 err = dnode_hold(os, object, FTAG, &dn); 391 if (err) 392 return (err); 393 394 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 395 numbufsp, dbpp, DMU_READ_PREFETCH); 396 397 dnode_rele(dn, FTAG); 398 399 return (err); 400 } 401 402 int 403 dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, 404 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 405 { 406 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 407 int err; 408 409 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 410 numbufsp, dbpp, DMU_READ_PREFETCH); 411 412 return (err); 413 } 414 415 void 416 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 417 { 418 int i; 419 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 420 421 if (numbufs == 0) 422 return; 423 424 for (i = 0; i < numbufs; i++) { 425 if (dbp[i]) 426 dbuf_rele(dbp[i], tag); 427 } 428 429 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 430 } 431 432 void 433 dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len) 434 { 435 dnode_t *dn; 436 uint64_t blkid; 437 int nblks, i, err; 438 439 if (zfs_prefetch_disable) 440 return; 441 442 if (len == 0) { /* they're interested in the bonus buffer */ 443 dn = os->os_meta_dnode; 444 445 if (object == 0 || object >= DN_MAX_OBJECT) 446 return; 447 448 rw_enter(&dn->dn_struct_rwlock, RW_READER); 449 blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t)); 450 dbuf_prefetch(dn, blkid); 451 rw_exit(&dn->dn_struct_rwlock); 452 return; 453 } 454 455 /* 456 * XXX - Note, if the dnode for the requested object is not 457 * already cached, we will do a *synchronous* read in the 458 * dnode_hold() call. The same is true for any indirects. 459 */ 460 err = dnode_hold(os, object, FTAG, &dn); 461 if (err != 0) 462 return; 463 464 rw_enter(&dn->dn_struct_rwlock, RW_READER); 465 if (dn->dn_datablkshift) { 466 int blkshift = dn->dn_datablkshift; 467 nblks = (P2ROUNDUP(offset+len, 1<<blkshift) - 468 P2ALIGN(offset, 1<<blkshift)) >> blkshift; 469 } else { 470 nblks = (offset < dn->dn_datablksz); 471 } 472 473 if (nblks != 0) { 474 blkid = dbuf_whichblock(dn, offset); 475 for (i = 0; i < nblks; i++) 476 dbuf_prefetch(dn, blkid+i); 477 } 478 479 rw_exit(&dn->dn_struct_rwlock); 480 481 dnode_rele(dn, FTAG); 482 } 483 484 /* 485 * Get the next "chunk" of file data to free. We traverse the file from 486 * the end so that the file gets shorter over time (if we crashes in the 487 * middle, this will leave us in a better state). We find allocated file 488 * data by simply searching the allocated level 1 indirects. 489 */ 490 static int 491 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t limit) 492 { 493 uint64_t len = *start - limit; 494 uint64_t blkcnt = 0; 495 uint64_t maxblks = DMU_MAX_ACCESS / (1ULL << (dn->dn_indblkshift + 1)); 496 uint64_t iblkrange = 497 dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); 498 499 ASSERT(limit <= *start); 500 501 if (len <= iblkrange * maxblks) { 502 *start = limit; 503 return (0); 504 } 505 ASSERT(ISP2(iblkrange)); 506 507 while (*start > limit && blkcnt < maxblks) { 508 int err; 509 510 /* find next allocated L1 indirect */ 511 err = dnode_next_offset(dn, 512 DNODE_FIND_BACKWARDS, start, 2, 1, 0); 513 514 /* if there are no more, then we are done */ 515 if (err == ESRCH) { 516 *start = limit; 517 return (0); 518 } else if (err) { 519 return (err); 520 } 521 blkcnt += 1; 522 523 /* reset offset to end of "next" block back */ 524 *start = P2ALIGN(*start, iblkrange); 525 if (*start <= limit) 526 *start = limit; 527 else 528 *start -= 1; 529 } 530 return (0); 531 } 532 533 static int 534 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, 535 uint64_t length, boolean_t free_dnode) 536 { 537 dmu_tx_t *tx; 538 uint64_t object_size, start, end, len; 539 boolean_t trunc = (length == DMU_OBJECT_END); 540 int align, err; 541 542 align = 1 << dn->dn_datablkshift; 543 ASSERT(align > 0); 544 object_size = align == 1 ? dn->dn_datablksz : 545 (dn->dn_maxblkid + 1) << dn->dn_datablkshift; 546 547 end = offset + length; 548 if (trunc || end > object_size) 549 end = object_size; 550 if (end <= offset) 551 return (0); 552 length = end - offset; 553 554 while (length) { 555 start = end; 556 /* assert(offset <= start) */ 557 err = get_next_chunk(dn, &start, offset); 558 if (err) 559 return (err); 560 len = trunc ? DMU_OBJECT_END : end - start; 561 562 tx = dmu_tx_create(os); 563 dmu_tx_hold_free(tx, dn->dn_object, start, len); 564 err = dmu_tx_assign(tx, TXG_WAIT); 565 if (err) { 566 dmu_tx_abort(tx); 567 return (err); 568 } 569 570 dnode_free_range(dn, start, trunc ? -1 : len, tx); 571 572 if (start == 0 && free_dnode) { 573 ASSERT(trunc); 574 dnode_free(dn, tx); 575 } 576 577 length -= end - start; 578 579 dmu_tx_commit(tx); 580 end = start; 581 } 582 return (0); 583 } 584 585 int 586 dmu_free_long_range(objset_t *os, uint64_t object, 587 uint64_t offset, uint64_t length) 588 { 589 dnode_t *dn; 590 int err; 591 592 err = dnode_hold(os, object, FTAG, &dn); 593 if (err != 0) 594 return (err); 595 err = dmu_free_long_range_impl(os, dn, offset, length, FALSE); 596 dnode_rele(dn, FTAG); 597 return (err); 598 } 599 600 int 601 dmu_free_object(objset_t *os, uint64_t object) 602 { 603 dnode_t *dn; 604 dmu_tx_t *tx; 605 int err; 606 607 err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 608 FTAG, &dn); 609 if (err != 0) 610 return (err); 611 if (dn->dn_nlevels == 1) { 612 tx = dmu_tx_create(os); 613 dmu_tx_hold_bonus(tx, object); 614 dmu_tx_hold_free(tx, dn->dn_object, 0, DMU_OBJECT_END); 615 err = dmu_tx_assign(tx, TXG_WAIT); 616 if (err == 0) { 617 dnode_free_range(dn, 0, DMU_OBJECT_END, tx); 618 dnode_free(dn, tx); 619 dmu_tx_commit(tx); 620 } else { 621 dmu_tx_abort(tx); 622 } 623 } else { 624 err = dmu_free_long_range_impl(os, dn, 0, DMU_OBJECT_END, TRUE); 625 } 626 dnode_rele(dn, FTAG); 627 return (err); 628 } 629 630 int 631 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 632 uint64_t size, dmu_tx_t *tx) 633 { 634 dnode_t *dn; 635 int err = dnode_hold(os, object, FTAG, &dn); 636 if (err) 637 return (err); 638 ASSERT(offset < UINT64_MAX); 639 ASSERT(size == -1ULL || size <= UINT64_MAX - offset); 640 dnode_free_range(dn, offset, size, tx); 641 dnode_rele(dn, FTAG); 642 return (0); 643 } 644 645 int 646 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 647 void *buf, uint32_t flags) 648 { 649 dnode_t *dn; 650 dmu_buf_t **dbp; 651 int numbufs, err; 652 653 err = dnode_hold(os, object, FTAG, &dn); 654 if (err) 655 return (err); 656 657 /* 658 * Deal with odd block sizes, where there can't be data past the first 659 * block. If we ever do the tail block optimization, we will need to 660 * handle that here as well. 661 */ 662 if (dn->dn_maxblkid == 0) { 663 int newsz = offset > dn->dn_datablksz ? 0 : 664 MIN(size, dn->dn_datablksz - offset); 665 bzero((char *)buf + newsz, size - newsz); 666 size = newsz; 667 } 668 669 while (size > 0) { 670 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 671 int i; 672 673 /* 674 * NB: we could do this block-at-a-time, but it's nice 675 * to be reading in parallel. 676 */ 677 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 678 TRUE, FTAG, &numbufs, &dbp, flags); 679 if (err) 680 break; 681 682 for (i = 0; i < numbufs; i++) { 683 int tocpy; 684 int bufoff; 685 dmu_buf_t *db = dbp[i]; 686 687 ASSERT(size > 0); 688 689 bufoff = offset - db->db_offset; 690 tocpy = (int)MIN(db->db_size - bufoff, size); 691 692 bcopy((char *)db->db_data + bufoff, buf, tocpy); 693 694 offset += tocpy; 695 size -= tocpy; 696 buf = (char *)buf + tocpy; 697 } 698 dmu_buf_rele_array(dbp, numbufs, FTAG); 699 } 700 dnode_rele(dn, FTAG); 701 return (err); 702 } 703 704 void 705 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 706 const void *buf, dmu_tx_t *tx) 707 { 708 dmu_buf_t **dbp; 709 int numbufs, i; 710 711 if (size == 0) 712 return; 713 714 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 715 FALSE, FTAG, &numbufs, &dbp)); 716 717 for (i = 0; i < numbufs; i++) { 718 int tocpy; 719 int bufoff; 720 dmu_buf_t *db = dbp[i]; 721 722 ASSERT(size > 0); 723 724 bufoff = offset - db->db_offset; 725 tocpy = (int)MIN(db->db_size - bufoff, size); 726 727 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 728 729 if (tocpy == db->db_size) 730 dmu_buf_will_fill(db, tx); 731 else 732 dmu_buf_will_dirty(db, tx); 733 734 bcopy(buf, (char *)db->db_data + bufoff, tocpy); 735 736 if (tocpy == db->db_size) 737 dmu_buf_fill_done(db, tx); 738 739 offset += tocpy; 740 size -= tocpy; 741 buf = (char *)buf + tocpy; 742 } 743 dmu_buf_rele_array(dbp, numbufs, FTAG); 744 } 745 746 void 747 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 748 dmu_tx_t *tx) 749 { 750 dmu_buf_t **dbp; 751 int numbufs, i; 752 753 if (size == 0) 754 return; 755 756 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 757 FALSE, FTAG, &numbufs, &dbp)); 758 759 for (i = 0; i < numbufs; i++) { 760 dmu_buf_t *db = dbp[i]; 761 762 dmu_buf_will_not_fill(db, tx); 763 } 764 dmu_buf_rele_array(dbp, numbufs, FTAG); 765 } 766 767 /* 768 * DMU support for xuio 769 */ 770 kstat_t *xuio_ksp = NULL; 771 772 int 773 dmu_xuio_init(xuio_t *xuio, int nblk) 774 { 775 dmu_xuio_t *priv; 776 uio_t *uio = &xuio->xu_uio; 777 778 uio->uio_iovcnt = nblk; 779 uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP); 780 781 priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP); 782 priv->cnt = nblk; 783 priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP); 784 priv->iovp = uio->uio_iov; 785 XUIO_XUZC_PRIV(xuio) = priv; 786 787 if (XUIO_XUZC_RW(xuio) == UIO_READ) 788 XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk); 789 else 790 XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk); 791 792 return (0); 793 } 794 795 void 796 dmu_xuio_fini(xuio_t *xuio) 797 { 798 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 799 int nblk = priv->cnt; 800 801 kmem_free(priv->iovp, nblk * sizeof (iovec_t)); 802 kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *)); 803 kmem_free(priv, sizeof (dmu_xuio_t)); 804 805 if (XUIO_XUZC_RW(xuio) == UIO_READ) 806 XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk); 807 else 808 XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk); 809 } 810 811 /* 812 * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf } 813 * and increase priv->next by 1. 814 */ 815 int 816 dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n) 817 { 818 struct iovec *iov; 819 uio_t *uio = &xuio->xu_uio; 820 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 821 int i = priv->next++; 822 823 ASSERT(i < priv->cnt); 824 ASSERT(off + n <= arc_buf_size(abuf)); 825 iov = uio->uio_iov + i; 826 iov->iov_base = (char *)abuf->b_data + off; 827 iov->iov_len = n; 828 priv->bufs[i] = abuf; 829 return (0); 830 } 831 832 int 833 dmu_xuio_cnt(xuio_t *xuio) 834 { 835 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 836 return (priv->cnt); 837 } 838 839 arc_buf_t * 840 dmu_xuio_arcbuf(xuio_t *xuio, int i) 841 { 842 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 843 844 ASSERT(i < priv->cnt); 845 return (priv->bufs[i]); 846 } 847 848 void 849 dmu_xuio_clear(xuio_t *xuio, int i) 850 { 851 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 852 853 ASSERT(i < priv->cnt); 854 priv->bufs[i] = NULL; 855 } 856 857 static void 858 xuio_stat_init(void) 859 { 860 xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc", 861 KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t), 862 KSTAT_FLAG_VIRTUAL); 863 if (xuio_ksp != NULL) { 864 xuio_ksp->ks_data = &xuio_stats; 865 kstat_install(xuio_ksp); 866 } 867 } 868 869 static void 870 xuio_stat_fini(void) 871 { 872 if (xuio_ksp != NULL) { 873 kstat_delete(xuio_ksp); 874 xuio_ksp = NULL; 875 } 876 } 877 878 void 879 xuio_stat_wbuf_copied() 880 { 881 XUIOSTAT_BUMP(xuiostat_wbuf_copied); 882 } 883 884 void 885 xuio_stat_wbuf_nocopy() 886 { 887 XUIOSTAT_BUMP(xuiostat_wbuf_nocopy); 888 } 889 890 #ifdef _KERNEL 891 int 892 dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) 893 { 894 dmu_buf_t **dbp; 895 int numbufs, i, err; 896 xuio_t *xuio = NULL; 897 898 /* 899 * NB: we could do this block-at-a-time, but it's nice 900 * to be reading in parallel. 901 */ 902 err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG, 903 &numbufs, &dbp); 904 if (err) 905 return (err); 906 907 if (uio->uio_extflg == UIO_XUIO) 908 xuio = (xuio_t *)uio; 909 910 for (i = 0; i < numbufs; i++) { 911 int tocpy; 912 int bufoff; 913 dmu_buf_t *db = dbp[i]; 914 915 ASSERT(size > 0); 916 917 bufoff = uio->uio_loffset - db->db_offset; 918 tocpy = (int)MIN(db->db_size - bufoff, size); 919 920 if (xuio) { 921 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 922 arc_buf_t *dbuf_abuf = dbi->db_buf; 923 arc_buf_t *abuf = dbuf_loan_arcbuf(dbi); 924 err = dmu_xuio_add(xuio, abuf, bufoff, tocpy); 925 if (!err) { 926 uio->uio_resid -= tocpy; 927 uio->uio_loffset += tocpy; 928 } 929 930 if (abuf == dbuf_abuf) 931 XUIOSTAT_BUMP(xuiostat_rbuf_nocopy); 932 else 933 XUIOSTAT_BUMP(xuiostat_rbuf_copied); 934 } else { 935 err = uiomove((char *)db->db_data + bufoff, tocpy, 936 UIO_READ, uio); 937 } 938 if (err) 939 break; 940 941 size -= tocpy; 942 } 943 dmu_buf_rele_array(dbp, numbufs, FTAG); 944 945 return (err); 946 } 947 948 static int 949 dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) 950 { 951 dmu_buf_t **dbp; 952 int numbufs; 953 int err = 0; 954 int i; 955 956 err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 957 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); 958 if (err) 959 return (err); 960 961 for (i = 0; i < numbufs; i++) { 962 int tocpy; 963 int bufoff; 964 dmu_buf_t *db = dbp[i]; 965 966 ASSERT(size > 0); 967 968 bufoff = uio->uio_loffset - db->db_offset; 969 tocpy = (int)MIN(db->db_size - bufoff, size); 970 971 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 972 973 if (tocpy == db->db_size) 974 dmu_buf_will_fill(db, tx); 975 else 976 dmu_buf_will_dirty(db, tx); 977 978 /* 979 * XXX uiomove could block forever (eg. nfs-backed 980 * pages). There needs to be a uiolockdown() function 981 * to lock the pages in memory, so that uiomove won't 982 * block. 983 */ 984 err = uiomove((char *)db->db_data + bufoff, tocpy, 985 UIO_WRITE, uio); 986 987 if (tocpy == db->db_size) 988 dmu_buf_fill_done(db, tx); 989 990 if (err) 991 break; 992 993 size -= tocpy; 994 } 995 996 dmu_buf_rele_array(dbp, numbufs, FTAG); 997 return (err); 998 } 999 1000 int 1001 dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, 1002 dmu_tx_t *tx) 1003 { 1004 if (size == 0) 1005 return (0); 1006 1007 return (dmu_write_uio_dnode(((dmu_buf_impl_t *)zdb)->db_dnode, 1008 uio, size, tx)); 1009 } 1010 1011 int 1012 dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, 1013 dmu_tx_t *tx) 1014 { 1015 dnode_t *dn; 1016 int err; 1017 1018 if (size == 0) 1019 return (0); 1020 1021 err = dnode_hold(os, object, FTAG, &dn); 1022 if (err) 1023 return (err); 1024 1025 err = dmu_write_uio_dnode(dn, uio, size, tx); 1026 1027 dnode_rele(dn, FTAG); 1028 1029 return (err); 1030 } 1031 1032 int 1033 dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1034 page_t *pp, dmu_tx_t *tx) 1035 { 1036 dmu_buf_t **dbp; 1037 int numbufs, i; 1038 int err; 1039 1040 if (size == 0) 1041 return (0); 1042 1043 err = dmu_buf_hold_array(os, object, offset, size, 1044 FALSE, FTAG, &numbufs, &dbp); 1045 if (err) 1046 return (err); 1047 1048 for (i = 0; i < numbufs; i++) { 1049 int tocpy, copied, thiscpy; 1050 int bufoff; 1051 dmu_buf_t *db = dbp[i]; 1052 caddr_t va; 1053 1054 ASSERT(size > 0); 1055 ASSERT3U(db->db_size, >=, PAGESIZE); 1056 1057 bufoff = offset - db->db_offset; 1058 tocpy = (int)MIN(db->db_size - bufoff, size); 1059 1060 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1061 1062 if (tocpy == db->db_size) 1063 dmu_buf_will_fill(db, tx); 1064 else 1065 dmu_buf_will_dirty(db, tx); 1066 1067 for (copied = 0; copied < tocpy; copied += PAGESIZE) { 1068 ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); 1069 thiscpy = MIN(PAGESIZE, tocpy - copied); 1070 va = zfs_map_page(pp, S_READ); 1071 bcopy(va, (char *)db->db_data + bufoff, thiscpy); 1072 zfs_unmap_page(pp, va); 1073 pp = pp->p_next; 1074 bufoff += PAGESIZE; 1075 } 1076 1077 if (tocpy == db->db_size) 1078 dmu_buf_fill_done(db, tx); 1079 1080 offset += tocpy; 1081 size -= tocpy; 1082 } 1083 dmu_buf_rele_array(dbp, numbufs, FTAG); 1084 return (err); 1085 } 1086 #endif 1087 1088 /* 1089 * Allocate a loaned anonymous arc buffer. 1090 */ 1091 arc_buf_t * 1092 dmu_request_arcbuf(dmu_buf_t *handle, int size) 1093 { 1094 dnode_t *dn = ((dmu_buf_impl_t *)handle)->db_dnode; 1095 1096 return (arc_loan_buf(dn->dn_objset->os_spa, size)); 1097 } 1098 1099 /* 1100 * Free a loaned arc buffer. 1101 */ 1102 void 1103 dmu_return_arcbuf(arc_buf_t *buf) 1104 { 1105 arc_return_buf(buf, FTAG); 1106 VERIFY(arc_buf_remove_ref(buf, FTAG) == 1); 1107 } 1108 1109 /* 1110 * When possible directly assign passed loaned arc buffer to a dbuf. 1111 * If this is not possible copy the contents of passed arc buf via 1112 * dmu_write(). 1113 */ 1114 void 1115 dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, 1116 dmu_tx_t *tx) 1117 { 1118 dnode_t *dn = ((dmu_buf_impl_t *)handle)->db_dnode; 1119 dmu_buf_impl_t *db; 1120 uint32_t blksz = (uint32_t)arc_buf_size(buf); 1121 uint64_t blkid; 1122 1123 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1124 blkid = dbuf_whichblock(dn, offset); 1125 VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL); 1126 rw_exit(&dn->dn_struct_rwlock); 1127 1128 if (offset == db->db.db_offset && blksz == db->db.db_size) { 1129 dbuf_assign_arcbuf(db, buf, tx); 1130 dbuf_rele(db, FTAG); 1131 } else { 1132 dbuf_rele(db, FTAG); 1133 dmu_write(dn->dn_objset, dn->dn_object, offset, blksz, 1134 buf->b_data, tx); 1135 dmu_return_arcbuf(buf); 1136 XUIOSTAT_BUMP(xuiostat_wbuf_copied); 1137 } 1138 } 1139 1140 typedef struct { 1141 dbuf_dirty_record_t *dsa_dr; 1142 dmu_sync_cb_t *dsa_done; 1143 zgd_t *dsa_zgd; 1144 dmu_tx_t *dsa_tx; 1145 } dmu_sync_arg_t; 1146 1147 /* ARGSUSED */ 1148 static void 1149 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) 1150 { 1151 dmu_sync_arg_t *dsa = varg; 1152 dmu_buf_t *db = dsa->dsa_zgd->zgd_db; 1153 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 1154 blkptr_t *bp = zio->io_bp; 1155 1156 if (zio->io_error == 0) { 1157 if (BP_IS_HOLE(bp)) { 1158 /* 1159 * A block of zeros may compress to a hole, but the 1160 * block size still needs to be known for replay. 1161 */ 1162 BP_SET_LSIZE(bp, db->db_size); 1163 } else { 1164 ASSERT(BP_GET_TYPE(bp) == dn->dn_type); 1165 ASSERT(BP_GET_LEVEL(bp) == 0); 1166 bp->blk_fill = 1; 1167 } 1168 } 1169 } 1170 1171 static void 1172 dmu_sync_late_arrival_ready(zio_t *zio) 1173 { 1174 dmu_sync_ready(zio, NULL, zio->io_private); 1175 } 1176 1177 /* ARGSUSED */ 1178 static void 1179 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1180 { 1181 dmu_sync_arg_t *dsa = varg; 1182 dbuf_dirty_record_t *dr = dsa->dsa_dr; 1183 dmu_buf_impl_t *db = dr->dr_dbuf; 1184 1185 mutex_enter(&db->db_mtx); 1186 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 1187 if (zio->io_error == 0) { 1188 dr->dt.dl.dr_overridden_by = *zio->io_bp; 1189 dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 1190 dr->dt.dl.dr_copies = zio->io_prop.zp_copies; 1191 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by)) 1192 BP_ZERO(&dr->dt.dl.dr_overridden_by); 1193 } else { 1194 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1195 } 1196 cv_broadcast(&db->db_changed); 1197 mutex_exit(&db->db_mtx); 1198 1199 dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1200 1201 kmem_free(dsa, sizeof (*dsa)); 1202 } 1203 1204 static void 1205 dmu_sync_late_arrival_done(zio_t *zio) 1206 { 1207 blkptr_t *bp = zio->io_bp; 1208 dmu_sync_arg_t *dsa = zio->io_private; 1209 1210 if (zio->io_error == 0 && !BP_IS_HOLE(bp)) { 1211 ASSERT(zio->io_bp->blk_birth == zio->io_txg); 1212 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); 1213 zio_free(zio->io_spa, zio->io_txg, zio->io_bp); 1214 } 1215 1216 dmu_tx_commit(dsa->dsa_tx); 1217 1218 dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1219 1220 kmem_free(dsa, sizeof (*dsa)); 1221 } 1222 1223 static int 1224 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, 1225 zio_prop_t *zp, zbookmark_t *zb) 1226 { 1227 dmu_sync_arg_t *dsa; 1228 dmu_tx_t *tx; 1229 1230 tx = dmu_tx_create(os); 1231 dmu_tx_hold_space(tx, zgd->zgd_db->db_size); 1232 if (dmu_tx_assign(tx, TXG_WAIT) != 0) { 1233 dmu_tx_abort(tx); 1234 return (EIO); /* Make zl_get_data do txg_waited_synced() */ 1235 } 1236 1237 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1238 dsa->dsa_dr = NULL; 1239 dsa->dsa_done = done; 1240 dsa->dsa_zgd = zgd; 1241 dsa->dsa_tx = tx; 1242 1243 zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, 1244 zgd->zgd_db->db_data, zgd->zgd_db->db_size, zp, 1245 dmu_sync_late_arrival_ready, dmu_sync_late_arrival_done, dsa, 1246 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); 1247 1248 return (0); 1249 } 1250 1251 /* 1252 * Intent log support: sync the block associated with db to disk. 1253 * N.B. and XXX: the caller is responsible for making sure that the 1254 * data isn't changing while dmu_sync() is writing it. 1255 * 1256 * Return values: 1257 * 1258 * EEXIST: this txg has already been synced, so there's nothing to to. 1259 * The caller should not log the write. 1260 * 1261 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 1262 * The caller should not log the write. 1263 * 1264 * EALREADY: this block is already in the process of being synced. 1265 * The caller should track its progress (somehow). 1266 * 1267 * EIO: could not do the I/O. 1268 * The caller should do a txg_wait_synced(). 1269 * 1270 * 0: the I/O has been initiated. 1271 * The caller should log this blkptr in the done callback. 1272 * It is possible that the I/O will fail, in which case 1273 * the error will be reported to the done callback and 1274 * propagated to pio from zio_done(). 1275 */ 1276 int 1277 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) 1278 { 1279 blkptr_t *bp = zgd->zgd_bp; 1280 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; 1281 objset_t *os = db->db_objset; 1282 dsl_dataset_t *ds = os->os_dsl_dataset; 1283 dbuf_dirty_record_t *dr; 1284 dmu_sync_arg_t *dsa; 1285 zbookmark_t zb; 1286 zio_prop_t zp; 1287 1288 ASSERT(pio != NULL); 1289 ASSERT(BP_IS_HOLE(bp)); 1290 ASSERT(txg != 0); 1291 1292 SET_BOOKMARK(&zb, ds->ds_object, 1293 db->db.db_object, db->db_level, db->db_blkid); 1294 1295 dmu_write_policy(os, db->db_dnode, db->db_level, WP_DMU_SYNC, &zp); 1296 1297 /* 1298 * If we're frozen (running ziltest), we always need to generate a bp. 1299 */ 1300 if (txg > spa_freeze_txg(os->os_spa)) 1301 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1302 1303 /* 1304 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() 1305 * and us. If we determine that this txg is not yet syncing, 1306 * but it begins to sync a moment later, that's OK because the 1307 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. 1308 */ 1309 mutex_enter(&db->db_mtx); 1310 1311 if (txg <= spa_last_synced_txg(os->os_spa)) { 1312 /* 1313 * This txg has already synced. There's nothing to do. 1314 */ 1315 mutex_exit(&db->db_mtx); 1316 return (EEXIST); 1317 } 1318 1319 if (txg <= spa_syncing_txg(os->os_spa)) { 1320 /* 1321 * This txg is currently syncing, so we can't mess with 1322 * the dirty record anymore; just write a new log block. 1323 */ 1324 mutex_exit(&db->db_mtx); 1325 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1326 } 1327 1328 dr = db->db_last_dirty; 1329 while (dr && dr->dr_txg != txg) 1330 dr = dr->dr_next; 1331 1332 if (dr == NULL) { 1333 /* 1334 * There's no dr for this dbuf, so it must have been freed. 1335 * There's no need to log writes to freed blocks, so we're done. 1336 */ 1337 mutex_exit(&db->db_mtx); 1338 return (ENOENT); 1339 } 1340 1341 ASSERT(dr->dr_txg == txg); 1342 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || 1343 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 1344 /* 1345 * We have already issued a sync write for this buffer, 1346 * or this buffer has already been synced. It could not 1347 * have been dirtied since, or we would have cleared the state. 1348 */ 1349 mutex_exit(&db->db_mtx); 1350 return (EALREADY); 1351 } 1352 1353 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 1354 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 1355 mutex_exit(&db->db_mtx); 1356 1357 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1358 dsa->dsa_dr = dr; 1359 dsa->dsa_done = done; 1360 dsa->dsa_zgd = zgd; 1361 dsa->dsa_tx = NULL; 1362 1363 zio_nowait(arc_write(pio, os->os_spa, txg, 1364 bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db), &zp, 1365 dmu_sync_ready, dmu_sync_done, dsa, 1366 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); 1367 1368 return (0); 1369 } 1370 1371 int 1372 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 1373 dmu_tx_t *tx) 1374 { 1375 dnode_t *dn; 1376 int err; 1377 1378 err = dnode_hold(os, object, FTAG, &dn); 1379 if (err) 1380 return (err); 1381 err = dnode_set_blksz(dn, size, ibs, tx); 1382 dnode_rele(dn, FTAG); 1383 return (err); 1384 } 1385 1386 void 1387 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 1388 dmu_tx_t *tx) 1389 { 1390 dnode_t *dn; 1391 1392 /* XXX assumes dnode_hold will not get an i/o error */ 1393 (void) dnode_hold(os, object, FTAG, &dn); 1394 ASSERT(checksum < ZIO_CHECKSUM_FUNCTIONS); 1395 dn->dn_checksum = checksum; 1396 dnode_setdirty(dn, tx); 1397 dnode_rele(dn, FTAG); 1398 } 1399 1400 void 1401 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 1402 dmu_tx_t *tx) 1403 { 1404 dnode_t *dn; 1405 1406 /* XXX assumes dnode_hold will not get an i/o error */ 1407 (void) dnode_hold(os, object, FTAG, &dn); 1408 ASSERT(compress < ZIO_COMPRESS_FUNCTIONS); 1409 dn->dn_compress = compress; 1410 dnode_setdirty(dn, tx); 1411 dnode_rele(dn, FTAG); 1412 } 1413 1414 int zfs_mdcomp_disable = 0; 1415 1416 void 1417 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) 1418 { 1419 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET; 1420 boolean_t ismd = (level > 0 || dmu_ot[type].ot_metadata || 1421 (wp & WP_SPILL)); 1422 enum zio_checksum checksum = os->os_checksum; 1423 enum zio_compress compress = os->os_compress; 1424 enum zio_checksum dedup_checksum = os->os_dedup_checksum; 1425 boolean_t dedup; 1426 boolean_t dedup_verify = os->os_dedup_verify; 1427 int copies = os->os_copies; 1428 1429 /* 1430 * Determine checksum setting. 1431 */ 1432 if (ismd) { 1433 /* 1434 * Metadata always gets checksummed. If the data 1435 * checksum is multi-bit correctable, and it's not a 1436 * ZBT-style checksum, then it's suitable for metadata 1437 * as well. Otherwise, the metadata checksum defaults 1438 * to fletcher4. 1439 */ 1440 if (zio_checksum_table[checksum].ci_correctable < 1 || 1441 zio_checksum_table[checksum].ci_eck) 1442 checksum = ZIO_CHECKSUM_FLETCHER_4; 1443 } else { 1444 checksum = zio_checksum_select(dn->dn_checksum, checksum); 1445 } 1446 1447 /* 1448 * Determine compression setting. 1449 */ 1450 if (ismd) { 1451 /* 1452 * XXX -- we should design a compression algorithm 1453 * that specializes in arrays of bps. 1454 */ 1455 compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : 1456 ZIO_COMPRESS_LZJB; 1457 } else { 1458 compress = zio_compress_select(dn->dn_compress, compress); 1459 } 1460 1461 /* 1462 * Determine dedup setting. If we are in dmu_sync(), we won't 1463 * actually dedup now because that's all done in syncing context; 1464 * but we do want to use the dedup checkum. If the checksum is not 1465 * strong enough to ensure unique signatures, force dedup_verify. 1466 */ 1467 dedup = (!ismd && dedup_checksum != ZIO_CHECKSUM_OFF); 1468 if (dedup) { 1469 checksum = dedup_checksum; 1470 if (!zio_checksum_table[checksum].ci_dedup) 1471 dedup_verify = 1; 1472 } 1473 1474 if (wp & WP_DMU_SYNC) 1475 dedup = 0; 1476 1477 if (wp & WP_NOFILL) { 1478 ASSERT(!ismd && level == 0); 1479 checksum = ZIO_CHECKSUM_OFF; 1480 compress = ZIO_COMPRESS_OFF; 1481 dedup = B_FALSE; 1482 } 1483 1484 zp->zp_checksum = checksum; 1485 zp->zp_compress = compress; 1486 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type; 1487 zp->zp_level = level; 1488 zp->zp_copies = MIN(copies + ismd, spa_max_replication(os->os_spa)); 1489 zp->zp_dedup = dedup; 1490 zp->zp_dedup_verify = dedup && dedup_verify; 1491 } 1492 1493 int 1494 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 1495 { 1496 dnode_t *dn; 1497 int i, err; 1498 1499 err = dnode_hold(os, object, FTAG, &dn); 1500 if (err) 1501 return (err); 1502 /* 1503 * Sync any current changes before 1504 * we go trundling through the block pointers. 1505 */ 1506 for (i = 0; i < TXG_SIZE; i++) { 1507 if (list_link_active(&dn->dn_dirty_link[i])) 1508 break; 1509 } 1510 if (i != TXG_SIZE) { 1511 dnode_rele(dn, FTAG); 1512 txg_wait_synced(dmu_objset_pool(os), 0); 1513 err = dnode_hold(os, object, FTAG, &dn); 1514 if (err) 1515 return (err); 1516 } 1517 1518 err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); 1519 dnode_rele(dn, FTAG); 1520 1521 return (err); 1522 } 1523 1524 void 1525 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 1526 { 1527 dnode_phys_t *dnp; 1528 1529 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1530 mutex_enter(&dn->dn_mtx); 1531 1532 dnp = dn->dn_phys; 1533 1534 doi->doi_data_block_size = dn->dn_datablksz; 1535 doi->doi_metadata_block_size = dn->dn_indblkshift ? 1536 1ULL << dn->dn_indblkshift : 0; 1537 doi->doi_type = dn->dn_type; 1538 doi->doi_bonus_type = dn->dn_bonustype; 1539 doi->doi_bonus_size = dn->dn_bonuslen; 1540 doi->doi_indirection = dn->dn_nlevels; 1541 doi->doi_checksum = dn->dn_checksum; 1542 doi->doi_compress = dn->dn_compress; 1543 doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; 1544 doi->doi_max_offset = (dnp->dn_maxblkid + 1) * dn->dn_datablksz; 1545 doi->doi_fill_count = 0; 1546 for (int i = 0; i < dnp->dn_nblkptr; i++) 1547 doi->doi_fill_count += dnp->dn_blkptr[i].blk_fill; 1548 1549 mutex_exit(&dn->dn_mtx); 1550 rw_exit(&dn->dn_struct_rwlock); 1551 } 1552 1553 /* 1554 * Get information on a DMU object. 1555 * If doi is NULL, just indicates whether the object exists. 1556 */ 1557 int 1558 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 1559 { 1560 dnode_t *dn; 1561 int err = dnode_hold(os, object, FTAG, &dn); 1562 1563 if (err) 1564 return (err); 1565 1566 if (doi != NULL) 1567 dmu_object_info_from_dnode(dn, doi); 1568 1569 dnode_rele(dn, FTAG); 1570 return (0); 1571 } 1572 1573 /* 1574 * As above, but faster; can be used when you have a held dbuf in hand. 1575 */ 1576 void 1577 dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi) 1578 { 1579 dmu_object_info_from_dnode(((dmu_buf_impl_t *)db)->db_dnode, doi); 1580 } 1581 1582 /* 1583 * Faster still when you only care about the size. 1584 * This is specifically optimized for zfs_getattr(). 1585 */ 1586 void 1587 dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, u_longlong_t *nblk512) 1588 { 1589 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 1590 1591 *blksize = dn->dn_datablksz; 1592 /* add 1 for dnode space */ 1593 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 1594 SPA_MINBLOCKSHIFT) + 1; 1595 } 1596 1597 void 1598 byteswap_uint64_array(void *vbuf, size_t size) 1599 { 1600 uint64_t *buf = vbuf; 1601 size_t count = size >> 3; 1602 int i; 1603 1604 ASSERT((size & 7) == 0); 1605 1606 for (i = 0; i < count; i++) 1607 buf[i] = BSWAP_64(buf[i]); 1608 } 1609 1610 void 1611 byteswap_uint32_array(void *vbuf, size_t size) 1612 { 1613 uint32_t *buf = vbuf; 1614 size_t count = size >> 2; 1615 int i; 1616 1617 ASSERT((size & 3) == 0); 1618 1619 for (i = 0; i < count; i++) 1620 buf[i] = BSWAP_32(buf[i]); 1621 } 1622 1623 void 1624 byteswap_uint16_array(void *vbuf, size_t size) 1625 { 1626 uint16_t *buf = vbuf; 1627 size_t count = size >> 1; 1628 int i; 1629 1630 ASSERT((size & 1) == 0); 1631 1632 for (i = 0; i < count; i++) 1633 buf[i] = BSWAP_16(buf[i]); 1634 } 1635 1636 /* ARGSUSED */ 1637 void 1638 byteswap_uint8_array(void *vbuf, size_t size) 1639 { 1640 } 1641 1642 void 1643 dmu_init(void) 1644 { 1645 zfs_dbgmsg_init(); 1646 dbuf_init(); 1647 dnode_init(); 1648 zfetch_init(); 1649 arc_init(); 1650 l2arc_init(); 1651 xuio_stat_init(); 1652 sa_cache_init(); 1653 } 1654 1655 void 1656 dmu_fini(void) 1657 { 1658 arc_fini(); 1659 zfetch_fini(); 1660 dnode_fini(); 1661 dbuf_fini(); 1662 l2arc_fini(); 1663 xuio_stat_fini(); 1664 sa_cache_fini(); 1665 zfs_dbgmsg_fini(); 1666 } 1667