1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dbuf.h> 32 #include <sys/dnode.h> 33 #include <sys/zfs_context.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/dmu_traverse.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/dsl_dir.h> 38 #include <sys/dsl_pool.h> 39 #include <sys/dsl_synctask.h> 40 #include <sys/dsl_prop.h> 41 #include <sys/dmu_zfetch.h> 42 #include <sys/zfs_ioctl.h> 43 #include <sys/zap.h> 44 #include <sys/zio_checksum.h> 45 #ifdef _KERNEL 46 #include <sys/vmsystm.h> 47 #endif 48 49 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 50 { byteswap_uint8_array, TRUE, "unallocated" }, 51 { zap_byteswap, TRUE, "object directory" }, 52 { byteswap_uint64_array, TRUE, "object array" }, 53 { byteswap_uint8_array, TRUE, "packed nvlist" }, 54 { byteswap_uint64_array, TRUE, "packed nvlist size" }, 55 { byteswap_uint64_array, TRUE, "bplist" }, 56 { byteswap_uint64_array, TRUE, "bplist header" }, 57 { byteswap_uint64_array, TRUE, "SPA space map header" }, 58 { byteswap_uint64_array, TRUE, "SPA space map" }, 59 { byteswap_uint64_array, TRUE, "ZIL intent log" }, 60 { dnode_buf_byteswap, TRUE, "DMU dnode" }, 61 { dmu_objset_byteswap, TRUE, "DMU objset" }, 62 { byteswap_uint64_array, TRUE, "DSL directory" }, 63 { zap_byteswap, TRUE, "DSL directory child map"}, 64 { zap_byteswap, TRUE, "DSL dataset snap map" }, 65 { zap_byteswap, TRUE, "DSL props" }, 66 { byteswap_uint64_array, TRUE, "DSL dataset" }, 67 { zfs_znode_byteswap, TRUE, "ZFS znode" }, 68 { zfs_acl_byteswap, TRUE, "ZFS ACL" }, 69 { byteswap_uint8_array, FALSE, "ZFS plain file" }, 70 { zap_byteswap, TRUE, "ZFS directory" }, 71 { zap_byteswap, TRUE, "ZFS master node" }, 72 { zap_byteswap, TRUE, "ZFS delete queue" }, 73 { byteswap_uint8_array, FALSE, "zvol object" }, 74 { zap_byteswap, TRUE, "zvol prop" }, 75 { byteswap_uint8_array, FALSE, "other uint8[]" }, 76 { byteswap_uint64_array, FALSE, "other uint64[]" }, 77 { zap_byteswap, TRUE, "other ZAP" }, 78 { zap_byteswap, TRUE, "persistent error log" }, 79 { byteswap_uint8_array, TRUE, "SPA history" }, 80 { byteswap_uint64_array, TRUE, "SPA history offsets" }, 81 }; 82 83 int 84 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 85 void *tag, dmu_buf_t **dbp) 86 { 87 dnode_t *dn; 88 uint64_t blkid; 89 dmu_buf_impl_t *db; 90 int err; 91 92 err = dnode_hold(os->os, object, FTAG, &dn); 93 if (err) 94 return (err); 95 blkid = dbuf_whichblock(dn, offset); 96 rw_enter(&dn->dn_struct_rwlock, RW_READER); 97 db = dbuf_hold(dn, blkid, tag); 98 rw_exit(&dn->dn_struct_rwlock); 99 if (db == NULL) { 100 err = EIO; 101 } else { 102 err = dbuf_read(db, NULL, DB_RF_CANFAIL); 103 if (err) { 104 dbuf_rele(db, tag); 105 db = NULL; 106 } 107 } 108 109 dnode_rele(dn, FTAG); 110 *dbp = &db->db; 111 return (err); 112 } 113 114 int 115 dmu_bonus_max(void) 116 { 117 return (DN_MAX_BONUSLEN); 118 } 119 120 /* 121 * returns ENOENT, EIO, or 0. 122 */ 123 int 124 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) 125 { 126 dnode_t *dn; 127 int err, count; 128 dmu_buf_impl_t *db; 129 130 err = dnode_hold(os->os, object, FTAG, &dn); 131 if (err) 132 return (err); 133 134 rw_enter(&dn->dn_struct_rwlock, RW_READER); 135 if (dn->dn_bonus == NULL) { 136 rw_exit(&dn->dn_struct_rwlock); 137 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 138 if (dn->dn_bonus == NULL) 139 dn->dn_bonus = dbuf_create_bonus(dn); 140 } 141 db = dn->dn_bonus; 142 rw_exit(&dn->dn_struct_rwlock); 143 mutex_enter(&db->db_mtx); 144 count = refcount_add(&db->db_holds, tag); 145 mutex_exit(&db->db_mtx); 146 if (count == 1) 147 dnode_add_ref(dn, db); 148 dnode_rele(dn, FTAG); 149 150 VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED)); 151 152 *dbp = &db->db; 153 return (0); 154 } 155 156 /* 157 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 158 * to take a held dnode rather than <os, object> -- the lookup is wasteful, 159 * and can induce severe lock contention when writing to several files 160 * whose dnodes are in the same block. 161 */ 162 static int 163 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, 164 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 165 { 166 dmu_buf_t **dbp; 167 uint64_t blkid, nblks, i; 168 uint32_t flags; 169 int err; 170 zio_t *zio; 171 172 ASSERT(length <= DMU_MAX_ACCESS); 173 174 flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT; 175 if (length > zfetch_array_rd_sz) 176 flags |= DB_RF_NOPREFETCH; 177 178 rw_enter(&dn->dn_struct_rwlock, RW_READER); 179 if (dn->dn_datablkshift) { 180 int blkshift = dn->dn_datablkshift; 181 nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) - 182 P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift; 183 } else { 184 ASSERT3U(offset + length, <=, dn->dn_datablksz); 185 nblks = 1; 186 } 187 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 188 189 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, TRUE); 190 blkid = dbuf_whichblock(dn, offset); 191 for (i = 0; i < nblks; i++) { 192 dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag); 193 if (db == NULL) { 194 rw_exit(&dn->dn_struct_rwlock); 195 dmu_buf_rele_array(dbp, nblks, tag); 196 zio_nowait(zio); 197 return (EIO); 198 } 199 /* initiate async i/o */ 200 if (read) { 201 rw_exit(&dn->dn_struct_rwlock); 202 (void) dbuf_read(db, zio, flags); 203 rw_enter(&dn->dn_struct_rwlock, RW_READER); 204 } 205 dbp[i] = &db->db; 206 } 207 rw_exit(&dn->dn_struct_rwlock); 208 209 /* wait for async i/o */ 210 err = zio_wait(zio); 211 if (err) { 212 dmu_buf_rele_array(dbp, nblks, tag); 213 return (err); 214 } 215 216 /* wait for other io to complete */ 217 if (read) { 218 for (i = 0; i < nblks; i++) { 219 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 220 mutex_enter(&db->db_mtx); 221 while (db->db_state == DB_READ || 222 db->db_state == DB_FILL) 223 cv_wait(&db->db_changed, &db->db_mtx); 224 if (db->db_state == DB_UNCACHED) 225 err = EIO; 226 mutex_exit(&db->db_mtx); 227 if (err) { 228 dmu_buf_rele_array(dbp, nblks, tag); 229 return (err); 230 } 231 } 232 } 233 234 *numbufsp = nblks; 235 *dbpp = dbp; 236 return (0); 237 } 238 239 static int 240 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 241 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 242 { 243 dnode_t *dn; 244 int err; 245 246 err = dnode_hold(os->os, object, FTAG, &dn); 247 if (err) 248 return (err); 249 250 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 251 numbufsp, dbpp); 252 253 dnode_rele(dn, FTAG); 254 255 return (err); 256 } 257 258 int 259 dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, 260 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 261 { 262 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 263 int err; 264 265 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 266 numbufsp, dbpp); 267 268 return (err); 269 } 270 271 void 272 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 273 { 274 int i; 275 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 276 277 if (numbufs == 0) 278 return; 279 280 for (i = 0; i < numbufs; i++) { 281 if (dbp[i]) 282 dbuf_rele(dbp[i], tag); 283 } 284 285 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 286 } 287 288 void 289 dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len) 290 { 291 dnode_t *dn; 292 uint64_t blkid; 293 int nblks, i, err; 294 295 if (zfs_prefetch_disable) 296 return; 297 298 if (len == 0) { /* they're interested in the bonus buffer */ 299 dn = os->os->os_meta_dnode; 300 301 if (object == 0 || object >= DN_MAX_OBJECT) 302 return; 303 304 rw_enter(&dn->dn_struct_rwlock, RW_READER); 305 blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t)); 306 dbuf_prefetch(dn, blkid); 307 rw_exit(&dn->dn_struct_rwlock); 308 return; 309 } 310 311 /* 312 * XXX - Note, if the dnode for the requested object is not 313 * already cached, we will do a *synchronous* read in the 314 * dnode_hold() call. The same is true for any indirects. 315 */ 316 err = dnode_hold(os->os, object, FTAG, &dn); 317 if (err != 0) 318 return; 319 320 rw_enter(&dn->dn_struct_rwlock, RW_READER); 321 if (dn->dn_datablkshift) { 322 int blkshift = dn->dn_datablkshift; 323 nblks = (P2ROUNDUP(offset+len, 1<<blkshift) - 324 P2ALIGN(offset, 1<<blkshift)) >> blkshift; 325 } else { 326 nblks = (offset < dn->dn_datablksz); 327 } 328 329 if (nblks != 0) { 330 blkid = dbuf_whichblock(dn, offset); 331 for (i = 0; i < nblks; i++) 332 dbuf_prefetch(dn, blkid+i); 333 } 334 335 rw_exit(&dn->dn_struct_rwlock); 336 337 dnode_rele(dn, FTAG); 338 } 339 340 int 341 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 342 uint64_t size, dmu_tx_t *tx) 343 { 344 dnode_t *dn; 345 int err = dnode_hold(os->os, object, FTAG, &dn); 346 if (err) 347 return (err); 348 ASSERT(offset < UINT64_MAX); 349 ASSERT(size == -1ULL || size <= UINT64_MAX - offset); 350 dnode_free_range(dn, offset, size, tx); 351 dnode_rele(dn, FTAG); 352 return (0); 353 } 354 355 int 356 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 357 void *buf) 358 { 359 dnode_t *dn; 360 dmu_buf_t **dbp; 361 int numbufs, i, err; 362 363 err = dnode_hold(os->os, object, FTAG, &dn); 364 if (err) 365 return (err); 366 367 /* 368 * Deal with odd block sizes, where there can't be data past the first 369 * block. If we ever do the tail block optimization, we will need to 370 * handle that here as well. 371 */ 372 if (dn->dn_datablkshift == 0) { 373 int newsz = offset > dn->dn_datablksz ? 0 : 374 MIN(size, dn->dn_datablksz - offset); 375 bzero((char *)buf + newsz, size - newsz); 376 size = newsz; 377 } 378 379 while (size > 0) { 380 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 381 int err; 382 383 /* 384 * NB: we could do this block-at-a-time, but it's nice 385 * to be reading in parallel. 386 */ 387 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 388 TRUE, FTAG, &numbufs, &dbp); 389 if (err) 390 return (err); 391 392 for (i = 0; i < numbufs; i++) { 393 int tocpy; 394 int bufoff; 395 dmu_buf_t *db = dbp[i]; 396 397 ASSERT(size > 0); 398 399 bufoff = offset - db->db_offset; 400 tocpy = (int)MIN(db->db_size - bufoff, size); 401 402 bcopy((char *)db->db_data + bufoff, buf, tocpy); 403 404 offset += tocpy; 405 size -= tocpy; 406 buf = (char *)buf + tocpy; 407 } 408 dmu_buf_rele_array(dbp, numbufs, FTAG); 409 } 410 dnode_rele(dn, FTAG); 411 return (0); 412 } 413 414 void 415 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 416 const void *buf, dmu_tx_t *tx) 417 { 418 dmu_buf_t **dbp; 419 int numbufs, i; 420 421 if (size == 0) 422 return; 423 424 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 425 FALSE, FTAG, &numbufs, &dbp)); 426 427 for (i = 0; i < numbufs; i++) { 428 int tocpy; 429 int bufoff; 430 dmu_buf_t *db = dbp[i]; 431 432 ASSERT(size > 0); 433 434 bufoff = offset - db->db_offset; 435 tocpy = (int)MIN(db->db_size - bufoff, size); 436 437 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 438 439 if (tocpy == db->db_size) 440 dmu_buf_will_fill(db, tx); 441 else 442 dmu_buf_will_dirty(db, tx); 443 444 bcopy(buf, (char *)db->db_data + bufoff, tocpy); 445 446 if (tocpy == db->db_size) 447 dmu_buf_fill_done(db, tx); 448 449 offset += tocpy; 450 size -= tocpy; 451 buf = (char *)buf + tocpy; 452 } 453 dmu_buf_rele_array(dbp, numbufs, FTAG); 454 } 455 456 #ifdef _KERNEL 457 int 458 dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) 459 { 460 dmu_buf_t **dbp; 461 int numbufs, i, err; 462 463 /* 464 * NB: we could do this block-at-a-time, but it's nice 465 * to be reading in parallel. 466 */ 467 err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG, 468 &numbufs, &dbp); 469 if (err) 470 return (err); 471 472 for (i = 0; i < numbufs; i++) { 473 int tocpy; 474 int bufoff; 475 dmu_buf_t *db = dbp[i]; 476 477 ASSERT(size > 0); 478 479 bufoff = uio->uio_loffset - db->db_offset; 480 tocpy = (int)MIN(db->db_size - bufoff, size); 481 482 err = uiomove((char *)db->db_data + bufoff, tocpy, 483 UIO_READ, uio); 484 if (err) 485 break; 486 487 size -= tocpy; 488 } 489 dmu_buf_rele_array(dbp, numbufs, FTAG); 490 491 return (err); 492 } 493 494 int 495 dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, 496 dmu_tx_t *tx) 497 { 498 dmu_buf_t **dbp; 499 int numbufs, i; 500 int err = 0; 501 502 if (size == 0) 503 return (0); 504 505 err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, 506 FALSE, FTAG, &numbufs, &dbp); 507 if (err) 508 return (err); 509 510 for (i = 0; i < numbufs; i++) { 511 int tocpy; 512 int bufoff; 513 dmu_buf_t *db = dbp[i]; 514 515 ASSERT(size > 0); 516 517 bufoff = uio->uio_loffset - db->db_offset; 518 tocpy = (int)MIN(db->db_size - bufoff, size); 519 520 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 521 522 if (tocpy == db->db_size) 523 dmu_buf_will_fill(db, tx); 524 else 525 dmu_buf_will_dirty(db, tx); 526 527 /* 528 * XXX uiomove could block forever (eg. nfs-backed 529 * pages). There needs to be a uiolockdown() function 530 * to lock the pages in memory, so that uiomove won't 531 * block. 532 */ 533 err = uiomove((char *)db->db_data + bufoff, tocpy, 534 UIO_WRITE, uio); 535 536 if (tocpy == db->db_size) 537 dmu_buf_fill_done(db, tx); 538 539 if (err) 540 break; 541 542 size -= tocpy; 543 } 544 dmu_buf_rele_array(dbp, numbufs, FTAG); 545 return (err); 546 } 547 548 int 549 dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 550 page_t *pp, dmu_tx_t *tx) 551 { 552 dmu_buf_t **dbp; 553 int numbufs, i; 554 int err; 555 556 if (size == 0) 557 return (0); 558 559 err = dmu_buf_hold_array(os, object, offset, size, 560 FALSE, FTAG, &numbufs, &dbp); 561 if (err) 562 return (err); 563 564 for (i = 0; i < numbufs; i++) { 565 int tocpy, copied, thiscpy; 566 int bufoff; 567 dmu_buf_t *db = dbp[i]; 568 caddr_t va; 569 570 ASSERT(size > 0); 571 ASSERT3U(db->db_size, >=, PAGESIZE); 572 573 bufoff = offset - db->db_offset; 574 tocpy = (int)MIN(db->db_size - bufoff, size); 575 576 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 577 578 if (tocpy == db->db_size) 579 dmu_buf_will_fill(db, tx); 580 else 581 dmu_buf_will_dirty(db, tx); 582 583 for (copied = 0; copied < tocpy; copied += PAGESIZE) { 584 ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); 585 thiscpy = MIN(PAGESIZE, tocpy - copied); 586 va = ppmapin(pp, PROT_READ, (caddr_t)-1); 587 bcopy(va, (char *)db->db_data + bufoff, thiscpy); 588 ppmapout(va); 589 pp = pp->p_next; 590 bufoff += PAGESIZE; 591 } 592 593 if (tocpy == db->db_size) 594 dmu_buf_fill_done(db, tx); 595 596 if (err) 597 break; 598 599 offset += tocpy; 600 size -= tocpy; 601 } 602 dmu_buf_rele_array(dbp, numbufs, FTAG); 603 return (err); 604 } 605 #endif 606 607 typedef struct { 608 dbuf_dirty_record_t *dr; 609 dmu_sync_cb_t *done; 610 void *arg; 611 } dmu_sync_arg_t; 612 613 /* ARGSUSED */ 614 static void 615 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 616 { 617 dmu_sync_arg_t *in = varg; 618 dbuf_dirty_record_t *dr = in->dr; 619 dmu_buf_impl_t *db = dr->dr_dbuf; 620 dmu_sync_cb_t *done = in->done; 621 622 if (!BP_IS_HOLE(zio->io_bp)) { 623 zio->io_bp->blk_fill = 1; 624 BP_SET_TYPE(zio->io_bp, db->db_dnode->dn_type); 625 BP_SET_LEVEL(zio->io_bp, 0); 626 } 627 628 mutex_enter(&db->db_mtx); 629 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 630 dr->dt.dl.dr_overridden_by = *zio->io_bp; /* structure assignment */ 631 dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 632 cv_broadcast(&db->db_changed); 633 mutex_exit(&db->db_mtx); 634 635 if (done) 636 done(&(db->db), in->arg); 637 638 kmem_free(in, sizeof (dmu_sync_arg_t)); 639 } 640 641 /* 642 * Intent log support: sync the block associated with db to disk. 643 * N.B. and XXX: the caller is responsible for making sure that the 644 * data isn't changing while dmu_sync() is writing it. 645 * 646 * Return values: 647 * 648 * EEXIST: this txg has already been synced, so there's nothing to to. 649 * The caller should not log the write. 650 * 651 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 652 * The caller should not log the write. 653 * 654 * EALREADY: this block is already in the process of being synced. 655 * The caller should track its progress (somehow). 656 * 657 * EINPROGRESS: the IO has been initiated. 658 * The caller should log this blkptr in the callback. 659 * 660 * 0: completed. Sets *bp to the blkptr just written. 661 * The caller should log this blkptr immediately. 662 */ 663 int 664 dmu_sync(zio_t *pio, dmu_buf_t *db_fake, 665 blkptr_t *bp, uint64_t txg, dmu_sync_cb_t *done, void *arg) 666 { 667 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 668 objset_impl_t *os = db->db_objset; 669 dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool; 670 tx_state_t *tx = &dp->dp_tx; 671 dbuf_dirty_record_t *dr; 672 dmu_sync_arg_t *in; 673 zbookmark_t zb; 674 zio_t *zio; 675 int err; 676 677 ASSERT(BP_IS_HOLE(bp)); 678 ASSERT(txg != 0); 679 680 681 dprintf("dmu_sync txg=%llu, s,o,q %llu %llu %llu\n", 682 txg, tx->tx_synced_txg, tx->tx_open_txg, tx->tx_quiesced_txg); 683 684 /* 685 * XXX - would be nice if we could do this without suspending... 686 */ 687 txg_suspend(dp); 688 689 /* 690 * If this txg already synced, there's nothing to do. 691 */ 692 if (txg <= tx->tx_synced_txg) { 693 txg_resume(dp); 694 /* 695 * If we're running ziltest, we need the blkptr regardless. 696 */ 697 if (txg > spa_freeze_txg(dp->dp_spa)) { 698 /* if db_blkptr == NULL, this was an empty write */ 699 if (db->db_blkptr) 700 *bp = *db->db_blkptr; /* structure assignment */ 701 return (0); 702 } 703 return (EEXIST); 704 } 705 706 mutex_enter(&db->db_mtx); 707 708 if (txg == tx->tx_syncing_txg) { 709 while (db->db_data_pending) { 710 /* 711 * IO is in-progress. Wait for it to finish. 712 * XXX - would be nice to be able to somehow "attach" 713 * this zio to the parent zio passed in. 714 */ 715 cv_wait(&db->db_changed, &db->db_mtx); 716 if (!db->db_data_pending && 717 db->db_blkptr && BP_IS_HOLE(db->db_blkptr)) { 718 /* 719 * IO was compressed away 720 */ 721 *bp = *db->db_blkptr; /* structure assignment */ 722 mutex_exit(&db->db_mtx); 723 txg_resume(dp); 724 return (0); 725 } 726 ASSERT(db->db_data_pending || 727 (db->db_blkptr && db->db_blkptr->blk_birth == txg)); 728 } 729 730 if (db->db_blkptr && db->db_blkptr->blk_birth == txg) { 731 /* 732 * IO is already completed. 733 */ 734 *bp = *db->db_blkptr; /* structure assignment */ 735 mutex_exit(&db->db_mtx); 736 txg_resume(dp); 737 return (0); 738 } 739 } 740 741 dr = db->db_last_dirty; 742 while (dr && dr->dr_txg > txg) 743 dr = dr->dr_next; 744 if (dr == NULL || dr->dr_txg < txg) { 745 /* 746 * This dbuf isn't dirty, must have been free_range'd. 747 * There's no need to log writes to freed blocks, so we're done. 748 */ 749 mutex_exit(&db->db_mtx); 750 txg_resume(dp); 751 return (ENOENT); 752 } 753 754 ASSERT(dr->dr_txg == txg); 755 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 756 /* 757 * We have already issued a sync write for this buffer. 758 */ 759 mutex_exit(&db->db_mtx); 760 txg_resume(dp); 761 return (EALREADY); 762 } else if (dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 763 /* 764 * This buffer has already been synced. It could not 765 * have been dirtied since, or we would have cleared the state. 766 */ 767 *bp = dr->dt.dl.dr_overridden_by; /* structure assignment */ 768 mutex_exit(&db->db_mtx); 769 txg_resume(dp); 770 return (0); 771 } 772 773 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 774 in = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 775 in->dr = dr; 776 in->done = done; 777 in->arg = arg; 778 mutex_exit(&db->db_mtx); 779 txg_resume(dp); 780 781 zb.zb_objset = os->os_dsl_dataset->ds_object; 782 zb.zb_object = db->db.db_object; 783 zb.zb_level = db->db_level; 784 zb.zb_blkid = db->db_blkid; 785 zio = arc_write(pio, os->os_spa, 786 zio_checksum_select(db->db_dnode->dn_checksum, os->os_checksum), 787 zio_compress_select(db->db_dnode->dn_compress, os->os_compress), 788 dmu_get_replication_level(os->os_spa, &zb, db->db_dnode->dn_type), 789 txg, bp, dr->dt.dl.dr_data, NULL, dmu_sync_done, in, 790 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 791 792 if (pio) { 793 zio_nowait(zio); 794 err = EINPROGRESS; 795 } else { 796 err = zio_wait(zio); 797 ASSERT(err == 0); 798 } 799 return (err); 800 } 801 802 int 803 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 804 dmu_tx_t *tx) 805 { 806 dnode_t *dn; 807 int err; 808 809 err = dnode_hold(os->os, object, FTAG, &dn); 810 if (err) 811 return (err); 812 err = dnode_set_blksz(dn, size, ibs, tx); 813 dnode_rele(dn, FTAG); 814 return (err); 815 } 816 817 void 818 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 819 dmu_tx_t *tx) 820 { 821 dnode_t *dn; 822 823 /* XXX assumes dnode_hold will not get an i/o error */ 824 (void) dnode_hold(os->os, object, FTAG, &dn); 825 ASSERT(checksum < ZIO_CHECKSUM_FUNCTIONS); 826 dn->dn_checksum = checksum; 827 dnode_setdirty(dn, tx); 828 dnode_rele(dn, FTAG); 829 } 830 831 void 832 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 833 dmu_tx_t *tx) 834 { 835 dnode_t *dn; 836 837 /* XXX assumes dnode_hold will not get an i/o error */ 838 (void) dnode_hold(os->os, object, FTAG, &dn); 839 ASSERT(compress < ZIO_COMPRESS_FUNCTIONS); 840 dn->dn_compress = compress; 841 dnode_setdirty(dn, tx); 842 dnode_rele(dn, FTAG); 843 } 844 845 /* 846 * XXX - eventually, this should take into account per-dataset (or 847 * even per-object?) user requests for higher levels of replication. 848 */ 849 int 850 dmu_get_replication_level(spa_t *spa, zbookmark_t *zb, dmu_object_type_t ot) 851 { 852 int ncopies = 1; 853 854 if (dmu_ot[ot].ot_metadata) 855 ncopies++; 856 if (zb->zb_level != 0) 857 ncopies++; 858 if (zb->zb_objset == 0 && zb->zb_object == 0) 859 ncopies++; 860 return (MIN(ncopies, spa_max_replication(spa))); 861 } 862 863 int 864 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 865 { 866 dnode_t *dn; 867 int i, err; 868 869 err = dnode_hold(os->os, object, FTAG, &dn); 870 if (err) 871 return (err); 872 /* 873 * Sync any current changes before 874 * we go trundling through the block pointers. 875 */ 876 for (i = 0; i < TXG_SIZE; i++) { 877 if (list_link_active(&dn->dn_dirty_link[i])) 878 break; 879 } 880 if (i != TXG_SIZE) { 881 dnode_rele(dn, FTAG); 882 txg_wait_synced(dmu_objset_pool(os), 0); 883 err = dnode_hold(os->os, object, FTAG, &dn); 884 if (err) 885 return (err); 886 } 887 888 err = dnode_next_offset(dn, hole, off, 1, 1, 0); 889 dnode_rele(dn, FTAG); 890 891 return (err); 892 } 893 894 void 895 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 896 { 897 rw_enter(&dn->dn_struct_rwlock, RW_READER); 898 mutex_enter(&dn->dn_mtx); 899 900 doi->doi_data_block_size = dn->dn_datablksz; 901 doi->doi_metadata_block_size = dn->dn_indblkshift ? 902 1ULL << dn->dn_indblkshift : 0; 903 doi->doi_indirection = dn->dn_nlevels; 904 doi->doi_checksum = dn->dn_checksum; 905 doi->doi_compress = dn->dn_compress; 906 doi->doi_physical_blks = (DN_USED_BYTES(dn->dn_phys) + 907 SPA_MINBLOCKSIZE/2) >> SPA_MINBLOCKSHIFT; 908 doi->doi_max_block_offset = dn->dn_phys->dn_maxblkid; 909 doi->doi_type = dn->dn_type; 910 doi->doi_bonus_size = dn->dn_bonuslen; 911 doi->doi_bonus_type = dn->dn_bonustype; 912 913 mutex_exit(&dn->dn_mtx); 914 rw_exit(&dn->dn_struct_rwlock); 915 } 916 917 /* 918 * Get information on a DMU object. 919 * If doi is NULL, just indicates whether the object exists. 920 */ 921 int 922 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 923 { 924 dnode_t *dn; 925 int err = dnode_hold(os->os, object, FTAG, &dn); 926 927 if (err) 928 return (err); 929 930 if (doi != NULL) 931 dmu_object_info_from_dnode(dn, doi); 932 933 dnode_rele(dn, FTAG); 934 return (0); 935 } 936 937 /* 938 * As above, but faster; can be used when you have a held dbuf in hand. 939 */ 940 void 941 dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi) 942 { 943 dmu_object_info_from_dnode(((dmu_buf_impl_t *)db)->db_dnode, doi); 944 } 945 946 /* 947 * Faster still when you only care about the size. 948 * This is specifically optimized for zfs_getattr(). 949 */ 950 void 951 dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, u_longlong_t *nblk512) 952 { 953 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 954 955 *blksize = dn->dn_datablksz; 956 /* add 1 for dnode space */ 957 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 958 SPA_MINBLOCKSHIFT) + 1; 959 } 960 961 void 962 byteswap_uint64_array(void *vbuf, size_t size) 963 { 964 uint64_t *buf = vbuf; 965 size_t count = size >> 3; 966 int i; 967 968 ASSERT((size & 7) == 0); 969 970 for (i = 0; i < count; i++) 971 buf[i] = BSWAP_64(buf[i]); 972 } 973 974 void 975 byteswap_uint32_array(void *vbuf, size_t size) 976 { 977 uint32_t *buf = vbuf; 978 size_t count = size >> 2; 979 int i; 980 981 ASSERT((size & 3) == 0); 982 983 for (i = 0; i < count; i++) 984 buf[i] = BSWAP_32(buf[i]); 985 } 986 987 void 988 byteswap_uint16_array(void *vbuf, size_t size) 989 { 990 uint16_t *buf = vbuf; 991 size_t count = size >> 1; 992 int i; 993 994 ASSERT((size & 1) == 0); 995 996 for (i = 0; i < count; i++) 997 buf[i] = BSWAP_16(buf[i]); 998 } 999 1000 /* ARGSUSED */ 1001 void 1002 byteswap_uint8_array(void *vbuf, size_t size) 1003 { 1004 } 1005 1006 void 1007 dmu_init(void) 1008 { 1009 dbuf_init(); 1010 dnode_init(); 1011 arc_init(); 1012 } 1013 1014 void 1015 dmu_fini(void) 1016 { 1017 arc_fini(); 1018 dnode_fini(); 1019 dbuf_fini(); 1020 } 1021