1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dbuf.h> 32 #include <sys/dnode.h> 33 #include <sys/zfs_context.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/dmu_traverse.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/dsl_dir.h> 38 #include <sys/dsl_pool.h> 39 #include <sys/dsl_synctask.h> 40 #include <sys/dsl_prop.h> 41 #include <sys/dmu_zfetch.h> 42 #include <sys/zfs_ioctl.h> 43 #include <sys/zap.h> 44 #include <sys/zio_checksum.h> 45 #ifdef _KERNEL 46 #include <sys/vmsystm.h> 47 #endif 48 49 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 50 { byteswap_uint8_array, TRUE, "unallocated" }, 51 { zap_byteswap, TRUE, "object directory" }, 52 { byteswap_uint64_array, TRUE, "object array" }, 53 { byteswap_uint8_array, TRUE, "packed nvlist" }, 54 { byteswap_uint64_array, TRUE, "packed nvlist size" }, 55 { byteswap_uint64_array, TRUE, "bplist" }, 56 { byteswap_uint64_array, TRUE, "bplist header" }, 57 { byteswap_uint64_array, TRUE, "SPA space map header" }, 58 { byteswap_uint64_array, TRUE, "SPA space map" }, 59 { byteswap_uint64_array, TRUE, "ZIL intent log" }, 60 { dnode_buf_byteswap, TRUE, "DMU dnode" }, 61 { dmu_objset_byteswap, TRUE, "DMU objset" }, 62 { byteswap_uint64_array, TRUE, "DSL directory" }, 63 { zap_byteswap, TRUE, "DSL directory child map"}, 64 { zap_byteswap, TRUE, "DSL dataset snap map" }, 65 { zap_byteswap, TRUE, "DSL props" }, 66 { byteswap_uint64_array, TRUE, "DSL dataset" }, 67 { zfs_znode_byteswap, TRUE, "ZFS znode" }, 68 { zfs_acl_byteswap, TRUE, "ZFS ACL" }, 69 { byteswap_uint8_array, FALSE, "ZFS plain file" }, 70 { zap_byteswap, TRUE, "ZFS directory" }, 71 { zap_byteswap, TRUE, "ZFS master node" }, 72 { zap_byteswap, TRUE, "ZFS delete queue" }, 73 { byteswap_uint8_array, FALSE, "zvol object" }, 74 { zap_byteswap, TRUE, "zvol prop" }, 75 { byteswap_uint8_array, FALSE, "other uint8[]" }, 76 { byteswap_uint64_array, FALSE, "other uint64[]" }, 77 { zap_byteswap, TRUE, "other ZAP" }, 78 { zap_byteswap, TRUE, "persistent error log" }, 79 { byteswap_uint8_array, TRUE, "SPA history" }, 80 { byteswap_uint64_array, TRUE, "SPA history offsets" }, 81 { zap_byteswap, TRUE, "Pool properties" }, 82 { zap_byteswap, TRUE, "DSL permissions" } 83 }; 84 85 int 86 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 87 void *tag, dmu_buf_t **dbp) 88 { 89 dnode_t *dn; 90 uint64_t blkid; 91 dmu_buf_impl_t *db; 92 int err; 93 94 err = dnode_hold(os->os, object, FTAG, &dn); 95 if (err) 96 return (err); 97 blkid = dbuf_whichblock(dn, offset); 98 rw_enter(&dn->dn_struct_rwlock, RW_READER); 99 db = dbuf_hold(dn, blkid, tag); 100 rw_exit(&dn->dn_struct_rwlock); 101 if (db == NULL) { 102 err = EIO; 103 } else { 104 err = dbuf_read(db, NULL, DB_RF_CANFAIL); 105 if (err) { 106 dbuf_rele(db, tag); 107 db = NULL; 108 } 109 } 110 111 dnode_rele(dn, FTAG); 112 *dbp = &db->db; 113 return (err); 114 } 115 116 int 117 dmu_bonus_max(void) 118 { 119 return (DN_MAX_BONUSLEN); 120 } 121 122 /* 123 * returns ENOENT, EIO, or 0. 124 */ 125 int 126 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) 127 { 128 dnode_t *dn; 129 int err, count; 130 dmu_buf_impl_t *db; 131 132 err = dnode_hold(os->os, object, FTAG, &dn); 133 if (err) 134 return (err); 135 136 rw_enter(&dn->dn_struct_rwlock, RW_READER); 137 if (dn->dn_bonus == NULL) { 138 rw_exit(&dn->dn_struct_rwlock); 139 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 140 if (dn->dn_bonus == NULL) 141 dn->dn_bonus = dbuf_create_bonus(dn); 142 } 143 db = dn->dn_bonus; 144 rw_exit(&dn->dn_struct_rwlock); 145 mutex_enter(&db->db_mtx); 146 count = refcount_add(&db->db_holds, tag); 147 mutex_exit(&db->db_mtx); 148 if (count == 1) 149 dnode_add_ref(dn, db); 150 dnode_rele(dn, FTAG); 151 152 VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED)); 153 154 *dbp = &db->db; 155 return (0); 156 } 157 158 /* 159 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 160 * to take a held dnode rather than <os, object> -- the lookup is wasteful, 161 * and can induce severe lock contention when writing to several files 162 * whose dnodes are in the same block. 163 */ 164 static int 165 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, 166 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 167 { 168 dmu_buf_t **dbp; 169 uint64_t blkid, nblks, i; 170 uint32_t flags; 171 int err; 172 zio_t *zio; 173 174 ASSERT(length <= DMU_MAX_ACCESS); 175 176 flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT; 177 if (length > zfetch_array_rd_sz) 178 flags |= DB_RF_NOPREFETCH; 179 180 rw_enter(&dn->dn_struct_rwlock, RW_READER); 181 if (dn->dn_datablkshift) { 182 int blkshift = dn->dn_datablkshift; 183 nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) - 184 P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift; 185 } else { 186 if (offset + length > dn->dn_datablksz) { 187 zfs_panic_recover("zfs: accessing past end of object " 188 "%llx/%llx (size=%u access=%llu+%llu)", 189 (longlong_t)dn->dn_objset-> 190 os_dsl_dataset->ds_object, 191 (longlong_t)dn->dn_object, dn->dn_datablksz, 192 (longlong_t)offset, (longlong_t)length); 193 return (EIO); 194 } 195 nblks = 1; 196 } 197 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 198 199 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, TRUE); 200 blkid = dbuf_whichblock(dn, offset); 201 for (i = 0; i < nblks; i++) { 202 dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag); 203 if (db == NULL) { 204 rw_exit(&dn->dn_struct_rwlock); 205 dmu_buf_rele_array(dbp, nblks, tag); 206 zio_nowait(zio); 207 return (EIO); 208 } 209 /* initiate async i/o */ 210 if (read) { 211 rw_exit(&dn->dn_struct_rwlock); 212 (void) dbuf_read(db, zio, flags); 213 rw_enter(&dn->dn_struct_rwlock, RW_READER); 214 } 215 dbp[i] = &db->db; 216 } 217 rw_exit(&dn->dn_struct_rwlock); 218 219 /* wait for async i/o */ 220 err = zio_wait(zio); 221 if (err) { 222 dmu_buf_rele_array(dbp, nblks, tag); 223 return (err); 224 } 225 226 /* wait for other io to complete */ 227 if (read) { 228 for (i = 0; i < nblks; i++) { 229 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 230 mutex_enter(&db->db_mtx); 231 while (db->db_state == DB_READ || 232 db->db_state == DB_FILL) 233 cv_wait(&db->db_changed, &db->db_mtx); 234 if (db->db_state == DB_UNCACHED) 235 err = EIO; 236 mutex_exit(&db->db_mtx); 237 if (err) { 238 dmu_buf_rele_array(dbp, nblks, tag); 239 return (err); 240 } 241 } 242 } 243 244 *numbufsp = nblks; 245 *dbpp = dbp; 246 return (0); 247 } 248 249 static int 250 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 251 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 252 { 253 dnode_t *dn; 254 int err; 255 256 err = dnode_hold(os->os, object, FTAG, &dn); 257 if (err) 258 return (err); 259 260 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 261 numbufsp, dbpp); 262 263 dnode_rele(dn, FTAG); 264 265 return (err); 266 } 267 268 int 269 dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, 270 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 271 { 272 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 273 int err; 274 275 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 276 numbufsp, dbpp); 277 278 return (err); 279 } 280 281 void 282 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 283 { 284 int i; 285 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 286 287 if (numbufs == 0) 288 return; 289 290 for (i = 0; i < numbufs; i++) { 291 if (dbp[i]) 292 dbuf_rele(dbp[i], tag); 293 } 294 295 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 296 } 297 298 void 299 dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len) 300 { 301 dnode_t *dn; 302 uint64_t blkid; 303 int nblks, i, err; 304 305 if (zfs_prefetch_disable) 306 return; 307 308 if (len == 0) { /* they're interested in the bonus buffer */ 309 dn = os->os->os_meta_dnode; 310 311 if (object == 0 || object >= DN_MAX_OBJECT) 312 return; 313 314 rw_enter(&dn->dn_struct_rwlock, RW_READER); 315 blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t)); 316 dbuf_prefetch(dn, blkid); 317 rw_exit(&dn->dn_struct_rwlock); 318 return; 319 } 320 321 /* 322 * XXX - Note, if the dnode for the requested object is not 323 * already cached, we will do a *synchronous* read in the 324 * dnode_hold() call. The same is true for any indirects. 325 */ 326 err = dnode_hold(os->os, object, FTAG, &dn); 327 if (err != 0) 328 return; 329 330 rw_enter(&dn->dn_struct_rwlock, RW_READER); 331 if (dn->dn_datablkshift) { 332 int blkshift = dn->dn_datablkshift; 333 nblks = (P2ROUNDUP(offset+len, 1<<blkshift) - 334 P2ALIGN(offset, 1<<blkshift)) >> blkshift; 335 } else { 336 nblks = (offset < dn->dn_datablksz); 337 } 338 339 if (nblks != 0) { 340 blkid = dbuf_whichblock(dn, offset); 341 for (i = 0; i < nblks; i++) 342 dbuf_prefetch(dn, blkid+i); 343 } 344 345 rw_exit(&dn->dn_struct_rwlock); 346 347 dnode_rele(dn, FTAG); 348 } 349 350 int 351 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 352 uint64_t size, dmu_tx_t *tx) 353 { 354 dnode_t *dn; 355 int err = dnode_hold(os->os, object, FTAG, &dn); 356 if (err) 357 return (err); 358 ASSERT(offset < UINT64_MAX); 359 ASSERT(size == -1ULL || size <= UINT64_MAX - offset); 360 dnode_free_range(dn, offset, size, tx); 361 dnode_rele(dn, FTAG); 362 return (0); 363 } 364 365 int 366 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 367 void *buf) 368 { 369 dnode_t *dn; 370 dmu_buf_t **dbp; 371 int numbufs, i, err; 372 373 err = dnode_hold(os->os, object, FTAG, &dn); 374 if (err) 375 return (err); 376 377 /* 378 * Deal with odd block sizes, where there can't be data past the first 379 * block. If we ever do the tail block optimization, we will need to 380 * handle that here as well. 381 */ 382 if (dn->dn_datablkshift == 0) { 383 int newsz = offset > dn->dn_datablksz ? 0 : 384 MIN(size, dn->dn_datablksz - offset); 385 bzero((char *)buf + newsz, size - newsz); 386 size = newsz; 387 } 388 389 while (size > 0) { 390 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 391 int err; 392 393 /* 394 * NB: we could do this block-at-a-time, but it's nice 395 * to be reading in parallel. 396 */ 397 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 398 TRUE, FTAG, &numbufs, &dbp); 399 if (err) 400 return (err); 401 402 for (i = 0; i < numbufs; i++) { 403 int tocpy; 404 int bufoff; 405 dmu_buf_t *db = dbp[i]; 406 407 ASSERT(size > 0); 408 409 bufoff = offset - db->db_offset; 410 tocpy = (int)MIN(db->db_size - bufoff, size); 411 412 bcopy((char *)db->db_data + bufoff, buf, tocpy); 413 414 offset += tocpy; 415 size -= tocpy; 416 buf = (char *)buf + tocpy; 417 } 418 dmu_buf_rele_array(dbp, numbufs, FTAG); 419 } 420 dnode_rele(dn, FTAG); 421 return (0); 422 } 423 424 void 425 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 426 const void *buf, dmu_tx_t *tx) 427 { 428 dmu_buf_t **dbp; 429 int numbufs, i; 430 431 if (size == 0) 432 return; 433 434 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 435 FALSE, FTAG, &numbufs, &dbp)); 436 437 for (i = 0; i < numbufs; i++) { 438 int tocpy; 439 int bufoff; 440 dmu_buf_t *db = dbp[i]; 441 442 ASSERT(size > 0); 443 444 bufoff = offset - db->db_offset; 445 tocpy = (int)MIN(db->db_size - bufoff, size); 446 447 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 448 449 if (tocpy == db->db_size) 450 dmu_buf_will_fill(db, tx); 451 else 452 dmu_buf_will_dirty(db, tx); 453 454 bcopy(buf, (char *)db->db_data + bufoff, tocpy); 455 456 if (tocpy == db->db_size) 457 dmu_buf_fill_done(db, tx); 458 459 offset += tocpy; 460 size -= tocpy; 461 buf = (char *)buf + tocpy; 462 } 463 dmu_buf_rele_array(dbp, numbufs, FTAG); 464 } 465 466 #ifdef _KERNEL 467 int 468 dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) 469 { 470 dmu_buf_t **dbp; 471 int numbufs, i, err; 472 473 /* 474 * NB: we could do this block-at-a-time, but it's nice 475 * to be reading in parallel. 476 */ 477 err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG, 478 &numbufs, &dbp); 479 if (err) 480 return (err); 481 482 for (i = 0; i < numbufs; i++) { 483 int tocpy; 484 int bufoff; 485 dmu_buf_t *db = dbp[i]; 486 487 ASSERT(size > 0); 488 489 bufoff = uio->uio_loffset - db->db_offset; 490 tocpy = (int)MIN(db->db_size - bufoff, size); 491 492 err = uiomove((char *)db->db_data + bufoff, tocpy, 493 UIO_READ, uio); 494 if (err) 495 break; 496 497 size -= tocpy; 498 } 499 dmu_buf_rele_array(dbp, numbufs, FTAG); 500 501 return (err); 502 } 503 504 int 505 dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, 506 dmu_tx_t *tx) 507 { 508 dmu_buf_t **dbp; 509 int numbufs, i; 510 int err = 0; 511 512 if (size == 0) 513 return (0); 514 515 err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, 516 FALSE, FTAG, &numbufs, &dbp); 517 if (err) 518 return (err); 519 520 for (i = 0; i < numbufs; i++) { 521 int tocpy; 522 int bufoff; 523 dmu_buf_t *db = dbp[i]; 524 525 ASSERT(size > 0); 526 527 bufoff = uio->uio_loffset - db->db_offset; 528 tocpy = (int)MIN(db->db_size - bufoff, size); 529 530 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 531 532 if (tocpy == db->db_size) 533 dmu_buf_will_fill(db, tx); 534 else 535 dmu_buf_will_dirty(db, tx); 536 537 /* 538 * XXX uiomove could block forever (eg. nfs-backed 539 * pages). There needs to be a uiolockdown() function 540 * to lock the pages in memory, so that uiomove won't 541 * block. 542 */ 543 err = uiomove((char *)db->db_data + bufoff, tocpy, 544 UIO_WRITE, uio); 545 546 if (tocpy == db->db_size) 547 dmu_buf_fill_done(db, tx); 548 549 if (err) 550 break; 551 552 size -= tocpy; 553 } 554 dmu_buf_rele_array(dbp, numbufs, FTAG); 555 return (err); 556 } 557 558 int 559 dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 560 page_t *pp, dmu_tx_t *tx) 561 { 562 dmu_buf_t **dbp; 563 int numbufs, i; 564 int err; 565 566 if (size == 0) 567 return (0); 568 569 err = dmu_buf_hold_array(os, object, offset, size, 570 FALSE, FTAG, &numbufs, &dbp); 571 if (err) 572 return (err); 573 574 for (i = 0; i < numbufs; i++) { 575 int tocpy, copied, thiscpy; 576 int bufoff; 577 dmu_buf_t *db = dbp[i]; 578 caddr_t va; 579 580 ASSERT(size > 0); 581 ASSERT3U(db->db_size, >=, PAGESIZE); 582 583 bufoff = offset - db->db_offset; 584 tocpy = (int)MIN(db->db_size - bufoff, size); 585 586 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 587 588 if (tocpy == db->db_size) 589 dmu_buf_will_fill(db, tx); 590 else 591 dmu_buf_will_dirty(db, tx); 592 593 for (copied = 0; copied < tocpy; copied += PAGESIZE) { 594 ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); 595 thiscpy = MIN(PAGESIZE, tocpy - copied); 596 va = ppmapin(pp, PROT_READ, (caddr_t)-1); 597 bcopy(va, (char *)db->db_data + bufoff, thiscpy); 598 ppmapout(va); 599 pp = pp->p_next; 600 bufoff += PAGESIZE; 601 } 602 603 if (tocpy == db->db_size) 604 dmu_buf_fill_done(db, tx); 605 606 if (err) 607 break; 608 609 offset += tocpy; 610 size -= tocpy; 611 } 612 dmu_buf_rele_array(dbp, numbufs, FTAG); 613 return (err); 614 } 615 #endif 616 617 typedef struct { 618 dbuf_dirty_record_t *dr; 619 dmu_sync_cb_t *done; 620 void *arg; 621 } dmu_sync_arg_t; 622 623 /* ARGSUSED */ 624 static void 625 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 626 { 627 dmu_sync_arg_t *in = varg; 628 dbuf_dirty_record_t *dr = in->dr; 629 dmu_buf_impl_t *db = dr->dr_dbuf; 630 dmu_sync_cb_t *done = in->done; 631 632 if (!BP_IS_HOLE(zio->io_bp)) { 633 zio->io_bp->blk_fill = 1; 634 BP_SET_TYPE(zio->io_bp, db->db_dnode->dn_type); 635 BP_SET_LEVEL(zio->io_bp, 0); 636 } 637 638 mutex_enter(&db->db_mtx); 639 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 640 dr->dt.dl.dr_overridden_by = *zio->io_bp; /* structure assignment */ 641 dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 642 cv_broadcast(&db->db_changed); 643 mutex_exit(&db->db_mtx); 644 645 if (done) 646 done(&(db->db), in->arg); 647 648 kmem_free(in, sizeof (dmu_sync_arg_t)); 649 } 650 651 /* 652 * Intent log support: sync the block associated with db to disk. 653 * N.B. and XXX: the caller is responsible for making sure that the 654 * data isn't changing while dmu_sync() is writing it. 655 * 656 * Return values: 657 * 658 * EEXIST: this txg has already been synced, so there's nothing to to. 659 * The caller should not log the write. 660 * 661 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 662 * The caller should not log the write. 663 * 664 * EALREADY: this block is already in the process of being synced. 665 * The caller should track its progress (somehow). 666 * 667 * EINPROGRESS: the IO has been initiated. 668 * The caller should log this blkptr in the callback. 669 * 670 * 0: completed. Sets *bp to the blkptr just written. 671 * The caller should log this blkptr immediately. 672 */ 673 int 674 dmu_sync(zio_t *pio, dmu_buf_t *db_fake, 675 blkptr_t *bp, uint64_t txg, dmu_sync_cb_t *done, void *arg) 676 { 677 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 678 objset_impl_t *os = db->db_objset; 679 dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool; 680 tx_state_t *tx = &dp->dp_tx; 681 dbuf_dirty_record_t *dr; 682 dmu_sync_arg_t *in; 683 zbookmark_t zb; 684 zio_t *zio; 685 int zio_flags; 686 int err; 687 688 ASSERT(BP_IS_HOLE(bp)); 689 ASSERT(txg != 0); 690 691 692 dprintf("dmu_sync txg=%llu, s,o,q %llu %llu %llu\n", 693 txg, tx->tx_synced_txg, tx->tx_open_txg, tx->tx_quiesced_txg); 694 695 /* 696 * XXX - would be nice if we could do this without suspending... 697 */ 698 txg_suspend(dp); 699 700 /* 701 * If this txg already synced, there's nothing to do. 702 */ 703 if (txg <= tx->tx_synced_txg) { 704 txg_resume(dp); 705 /* 706 * If we're running ziltest, we need the blkptr regardless. 707 */ 708 if (txg > spa_freeze_txg(dp->dp_spa)) { 709 /* if db_blkptr == NULL, this was an empty write */ 710 if (db->db_blkptr) 711 *bp = *db->db_blkptr; /* structure assignment */ 712 return (0); 713 } 714 return (EEXIST); 715 } 716 717 mutex_enter(&db->db_mtx); 718 719 if (txg == tx->tx_syncing_txg) { 720 while (db->db_data_pending) { 721 /* 722 * IO is in-progress. Wait for it to finish. 723 * XXX - would be nice to be able to somehow "attach" 724 * this zio to the parent zio passed in. 725 */ 726 cv_wait(&db->db_changed, &db->db_mtx); 727 if (!db->db_data_pending && 728 db->db_blkptr && BP_IS_HOLE(db->db_blkptr)) { 729 /* 730 * IO was compressed away 731 */ 732 *bp = *db->db_blkptr; /* structure assignment */ 733 mutex_exit(&db->db_mtx); 734 txg_resume(dp); 735 return (0); 736 } 737 ASSERT(db->db_data_pending || 738 (db->db_blkptr && db->db_blkptr->blk_birth == txg)); 739 } 740 741 if (db->db_blkptr && db->db_blkptr->blk_birth == txg) { 742 /* 743 * IO is already completed. 744 */ 745 *bp = *db->db_blkptr; /* structure assignment */ 746 mutex_exit(&db->db_mtx); 747 txg_resume(dp); 748 return (0); 749 } 750 } 751 752 dr = db->db_last_dirty; 753 while (dr && dr->dr_txg > txg) 754 dr = dr->dr_next; 755 if (dr == NULL || dr->dr_txg < txg) { 756 /* 757 * This dbuf isn't dirty, must have been free_range'd. 758 * There's no need to log writes to freed blocks, so we're done. 759 */ 760 mutex_exit(&db->db_mtx); 761 txg_resume(dp); 762 return (ENOENT); 763 } 764 765 ASSERT(dr->dr_txg == txg); 766 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 767 /* 768 * We have already issued a sync write for this buffer. 769 */ 770 mutex_exit(&db->db_mtx); 771 txg_resume(dp); 772 return (EALREADY); 773 } else if (dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 774 /* 775 * This buffer has already been synced. It could not 776 * have been dirtied since, or we would have cleared the state. 777 */ 778 *bp = dr->dt.dl.dr_overridden_by; /* structure assignment */ 779 mutex_exit(&db->db_mtx); 780 txg_resume(dp); 781 return (0); 782 } 783 784 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 785 in = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 786 in->dr = dr; 787 in->done = done; 788 in->arg = arg; 789 mutex_exit(&db->db_mtx); 790 txg_resume(dp); 791 792 zb.zb_objset = os->os_dsl_dataset->ds_object; 793 zb.zb_object = db->db.db_object; 794 zb.zb_level = db->db_level; 795 zb.zb_blkid = db->db_blkid; 796 zio_flags = ZIO_FLAG_MUSTSUCCEED; 797 if (dmu_ot[db->db_dnode->dn_type].ot_metadata || zb.zb_level != 0) 798 zio_flags |= ZIO_FLAG_METADATA; 799 zio = arc_write(pio, os->os_spa, 800 zio_checksum_select(db->db_dnode->dn_checksum, os->os_checksum), 801 zio_compress_select(db->db_dnode->dn_compress, os->os_compress), 802 dmu_get_replication_level(os, &zb, db->db_dnode->dn_type), 803 txg, bp, dr->dt.dl.dr_data, NULL, dmu_sync_done, in, 804 ZIO_PRIORITY_SYNC_WRITE, zio_flags, &zb); 805 806 if (pio) { 807 zio_nowait(zio); 808 err = EINPROGRESS; 809 } else { 810 err = zio_wait(zio); 811 ASSERT(err == 0); 812 } 813 return (err); 814 } 815 816 int 817 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 818 dmu_tx_t *tx) 819 { 820 dnode_t *dn; 821 int err; 822 823 err = dnode_hold(os->os, object, FTAG, &dn); 824 if (err) 825 return (err); 826 err = dnode_set_blksz(dn, size, ibs, tx); 827 dnode_rele(dn, FTAG); 828 return (err); 829 } 830 831 void 832 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 833 dmu_tx_t *tx) 834 { 835 dnode_t *dn; 836 837 /* XXX assumes dnode_hold will not get an i/o error */ 838 (void) dnode_hold(os->os, object, FTAG, &dn); 839 ASSERT(checksum < ZIO_CHECKSUM_FUNCTIONS); 840 dn->dn_checksum = checksum; 841 dnode_setdirty(dn, tx); 842 dnode_rele(dn, FTAG); 843 } 844 845 void 846 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 847 dmu_tx_t *tx) 848 { 849 dnode_t *dn; 850 851 /* XXX assumes dnode_hold will not get an i/o error */ 852 (void) dnode_hold(os->os, object, FTAG, &dn); 853 ASSERT(compress < ZIO_COMPRESS_FUNCTIONS); 854 dn->dn_compress = compress; 855 dnode_setdirty(dn, tx); 856 dnode_rele(dn, FTAG); 857 } 858 859 int 860 dmu_get_replication_level(objset_impl_t *os, 861 zbookmark_t *zb, dmu_object_type_t ot) 862 { 863 int ncopies = os->os_copies; 864 865 /* If it's the mos, it should have max copies set. */ 866 ASSERT(zb->zb_objset != 0 || 867 ncopies == spa_max_replication(os->os_spa)); 868 869 if (dmu_ot[ot].ot_metadata || zb->zb_level != 0) 870 ncopies++; 871 return (MIN(ncopies, spa_max_replication(os->os_spa))); 872 } 873 874 int 875 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 876 { 877 dnode_t *dn; 878 int i, err; 879 880 err = dnode_hold(os->os, object, FTAG, &dn); 881 if (err) 882 return (err); 883 /* 884 * Sync any current changes before 885 * we go trundling through the block pointers. 886 */ 887 for (i = 0; i < TXG_SIZE; i++) { 888 if (list_link_active(&dn->dn_dirty_link[i])) 889 break; 890 } 891 if (i != TXG_SIZE) { 892 dnode_rele(dn, FTAG); 893 txg_wait_synced(dmu_objset_pool(os), 0); 894 err = dnode_hold(os->os, object, FTAG, &dn); 895 if (err) 896 return (err); 897 } 898 899 err = dnode_next_offset(dn, hole, off, 1, 1, 0); 900 dnode_rele(dn, FTAG); 901 902 return (err); 903 } 904 905 void 906 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 907 { 908 rw_enter(&dn->dn_struct_rwlock, RW_READER); 909 mutex_enter(&dn->dn_mtx); 910 911 doi->doi_data_block_size = dn->dn_datablksz; 912 doi->doi_metadata_block_size = dn->dn_indblkshift ? 913 1ULL << dn->dn_indblkshift : 0; 914 doi->doi_indirection = dn->dn_nlevels; 915 doi->doi_checksum = dn->dn_checksum; 916 doi->doi_compress = dn->dn_compress; 917 doi->doi_physical_blks = (DN_USED_BYTES(dn->dn_phys) + 918 SPA_MINBLOCKSIZE/2) >> SPA_MINBLOCKSHIFT; 919 doi->doi_max_block_offset = dn->dn_phys->dn_maxblkid; 920 doi->doi_type = dn->dn_type; 921 doi->doi_bonus_size = dn->dn_bonuslen; 922 doi->doi_bonus_type = dn->dn_bonustype; 923 924 mutex_exit(&dn->dn_mtx); 925 rw_exit(&dn->dn_struct_rwlock); 926 } 927 928 /* 929 * Get information on a DMU object. 930 * If doi is NULL, just indicates whether the object exists. 931 */ 932 int 933 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 934 { 935 dnode_t *dn; 936 int err = dnode_hold(os->os, object, FTAG, &dn); 937 938 if (err) 939 return (err); 940 941 if (doi != NULL) 942 dmu_object_info_from_dnode(dn, doi); 943 944 dnode_rele(dn, FTAG); 945 return (0); 946 } 947 948 /* 949 * As above, but faster; can be used when you have a held dbuf in hand. 950 */ 951 void 952 dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi) 953 { 954 dmu_object_info_from_dnode(((dmu_buf_impl_t *)db)->db_dnode, doi); 955 } 956 957 /* 958 * Faster still when you only care about the size. 959 * This is specifically optimized for zfs_getattr(). 960 */ 961 void 962 dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, u_longlong_t *nblk512) 963 { 964 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 965 966 *blksize = dn->dn_datablksz; 967 /* add 1 for dnode space */ 968 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 969 SPA_MINBLOCKSHIFT) + 1; 970 } 971 972 void 973 byteswap_uint64_array(void *vbuf, size_t size) 974 { 975 uint64_t *buf = vbuf; 976 size_t count = size >> 3; 977 int i; 978 979 ASSERT((size & 7) == 0); 980 981 for (i = 0; i < count; i++) 982 buf[i] = BSWAP_64(buf[i]); 983 } 984 985 void 986 byteswap_uint32_array(void *vbuf, size_t size) 987 { 988 uint32_t *buf = vbuf; 989 size_t count = size >> 2; 990 int i; 991 992 ASSERT((size & 3) == 0); 993 994 for (i = 0; i < count; i++) 995 buf[i] = BSWAP_32(buf[i]); 996 } 997 998 void 999 byteswap_uint16_array(void *vbuf, size_t size) 1000 { 1001 uint16_t *buf = vbuf; 1002 size_t count = size >> 1; 1003 int i; 1004 1005 ASSERT((size & 1) == 0); 1006 1007 for (i = 0; i < count; i++) 1008 buf[i] = BSWAP_16(buf[i]); 1009 } 1010 1011 /* ARGSUSED */ 1012 void 1013 byteswap_uint8_array(void *vbuf, size_t size) 1014 { 1015 } 1016 1017 void 1018 dmu_init(void) 1019 { 1020 dbuf_init(); 1021 dnode_init(); 1022 arc_init(); 1023 } 1024 1025 void 1026 dmu_fini(void) 1027 { 1028 arc_fini(); 1029 dnode_fini(); 1030 dbuf_fini(); 1031 } 1032