1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dbuf.h> 32 #include <sys/dnode.h> 33 #include <sys/zfs_context.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/dmu_traverse.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/dsl_dir.h> 38 #include <sys/dsl_pool.h> 39 #include <sys/dsl_synctask.h> 40 #include <sys/dsl_prop.h> 41 #include <sys/dmu_zfetch.h> 42 #include <sys/zfs_ioctl.h> 43 #include <sys/zap.h> 44 #include <sys/zio_checksum.h> 45 #ifdef _KERNEL 46 #include <sys/vmsystm.h> 47 #endif 48 49 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 50 { byteswap_uint8_array, TRUE, "unallocated" }, 51 { zap_byteswap, TRUE, "object directory" }, 52 { byteswap_uint64_array, TRUE, "object array" }, 53 { byteswap_uint8_array, TRUE, "packed nvlist" }, 54 { byteswap_uint64_array, TRUE, "packed nvlist size" }, 55 { byteswap_uint64_array, TRUE, "bplist" }, 56 { byteswap_uint64_array, TRUE, "bplist header" }, 57 { byteswap_uint64_array, TRUE, "SPA space map header" }, 58 { byteswap_uint64_array, TRUE, "SPA space map" }, 59 { byteswap_uint64_array, TRUE, "ZIL intent log" }, 60 { dnode_buf_byteswap, TRUE, "DMU dnode" }, 61 { dmu_objset_byteswap, TRUE, "DMU objset" }, 62 { byteswap_uint64_array, TRUE, "DSL directory" }, 63 { zap_byteswap, TRUE, "DSL directory child map"}, 64 { zap_byteswap, TRUE, "DSL dataset snap map" }, 65 { zap_byteswap, TRUE, "DSL props" }, 66 { byteswap_uint64_array, TRUE, "DSL dataset" }, 67 { zfs_znode_byteswap, TRUE, "ZFS znode" }, 68 { zfs_acl_byteswap, TRUE, "ZFS ACL" }, 69 { byteswap_uint8_array, FALSE, "ZFS plain file" }, 70 { zap_byteswap, TRUE, "ZFS directory" }, 71 { zap_byteswap, TRUE, "ZFS master node" }, 72 { zap_byteswap, TRUE, "ZFS delete queue" }, 73 { byteswap_uint8_array, FALSE, "zvol object" }, 74 { zap_byteswap, TRUE, "zvol prop" }, 75 { byteswap_uint8_array, FALSE, "other uint8[]" }, 76 { byteswap_uint64_array, FALSE, "other uint64[]" }, 77 { zap_byteswap, TRUE, "other ZAP" }, 78 { zap_byteswap, TRUE, "persistent error log" }, 79 { byteswap_uint8_array, TRUE, "SPA history" }, 80 { byteswap_uint64_array, TRUE, "SPA history offsets" }, 81 { zap_byteswap, TRUE, "Pool properties" }, 82 { zap_byteswap, TRUE, "DSL permissions" } 83 }; 84 85 int 86 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 87 void *tag, dmu_buf_t **dbp) 88 { 89 dnode_t *dn; 90 uint64_t blkid; 91 dmu_buf_impl_t *db; 92 int err; 93 94 err = dnode_hold(os->os, object, FTAG, &dn); 95 if (err) 96 return (err); 97 blkid = dbuf_whichblock(dn, offset); 98 rw_enter(&dn->dn_struct_rwlock, RW_READER); 99 db = dbuf_hold(dn, blkid, tag); 100 rw_exit(&dn->dn_struct_rwlock); 101 if (db == NULL) { 102 err = EIO; 103 } else { 104 err = dbuf_read(db, NULL, DB_RF_CANFAIL); 105 if (err) { 106 dbuf_rele(db, tag); 107 db = NULL; 108 } 109 } 110 111 dnode_rele(dn, FTAG); 112 *dbp = &db->db; 113 return (err); 114 } 115 116 int 117 dmu_bonus_max(void) 118 { 119 return (DN_MAX_BONUSLEN); 120 } 121 122 int 123 dmu_set_bonus(dmu_buf_t *db, int newsize, dmu_tx_t *tx) 124 { 125 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 126 127 if (dn->dn_bonus != (dmu_buf_impl_t *)db) 128 return (EINVAL); 129 if (newsize < 0 || newsize > db->db_size) 130 return (EINVAL); 131 dnode_setbonuslen(dn, newsize, tx); 132 return (0); 133 } 134 135 /* 136 * returns ENOENT, EIO, or 0. 137 */ 138 int 139 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) 140 { 141 dnode_t *dn; 142 dmu_buf_impl_t *db; 143 int error; 144 145 error = dnode_hold(os->os, object, FTAG, &dn); 146 if (error) 147 return (error); 148 149 rw_enter(&dn->dn_struct_rwlock, RW_READER); 150 if (dn->dn_bonus == NULL) { 151 rw_exit(&dn->dn_struct_rwlock); 152 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 153 if (dn->dn_bonus == NULL) 154 dbuf_create_bonus(dn); 155 } 156 db = dn->dn_bonus; 157 rw_exit(&dn->dn_struct_rwlock); 158 159 /* as long as the bonus buf is held, the dnode will be held */ 160 if (refcount_add(&db->db_holds, tag) == 1) 161 VERIFY(dnode_add_ref(dn, db)); 162 163 dnode_rele(dn, FTAG); 164 165 VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED)); 166 167 *dbp = &db->db; 168 return (0); 169 } 170 171 /* 172 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 173 * to take a held dnode rather than <os, object> -- the lookup is wasteful, 174 * and can induce severe lock contention when writing to several files 175 * whose dnodes are in the same block. 176 */ 177 static int 178 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, 179 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 180 { 181 dmu_buf_t **dbp; 182 uint64_t blkid, nblks, i; 183 uint32_t flags; 184 int err; 185 zio_t *zio; 186 187 ASSERT(length <= DMU_MAX_ACCESS); 188 189 flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT; 190 if (length > zfetch_array_rd_sz) 191 flags |= DB_RF_NOPREFETCH; 192 193 rw_enter(&dn->dn_struct_rwlock, RW_READER); 194 if (dn->dn_datablkshift) { 195 int blkshift = dn->dn_datablkshift; 196 nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) - 197 P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift; 198 } else { 199 if (offset + length > dn->dn_datablksz) { 200 zfs_panic_recover("zfs: accessing past end of object " 201 "%llx/%llx (size=%u access=%llu+%llu)", 202 (longlong_t)dn->dn_objset-> 203 os_dsl_dataset->ds_object, 204 (longlong_t)dn->dn_object, dn->dn_datablksz, 205 (longlong_t)offset, (longlong_t)length); 206 return (EIO); 207 } 208 nblks = 1; 209 } 210 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 211 212 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, TRUE); 213 blkid = dbuf_whichblock(dn, offset); 214 for (i = 0; i < nblks; i++) { 215 dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag); 216 if (db == NULL) { 217 rw_exit(&dn->dn_struct_rwlock); 218 dmu_buf_rele_array(dbp, nblks, tag); 219 zio_nowait(zio); 220 return (EIO); 221 } 222 /* initiate async i/o */ 223 if (read) { 224 rw_exit(&dn->dn_struct_rwlock); 225 (void) dbuf_read(db, zio, flags); 226 rw_enter(&dn->dn_struct_rwlock, RW_READER); 227 } 228 dbp[i] = &db->db; 229 } 230 rw_exit(&dn->dn_struct_rwlock); 231 232 /* wait for async i/o */ 233 err = zio_wait(zio); 234 if (err) { 235 dmu_buf_rele_array(dbp, nblks, tag); 236 return (err); 237 } 238 239 /* wait for other io to complete */ 240 if (read) { 241 for (i = 0; i < nblks; i++) { 242 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 243 mutex_enter(&db->db_mtx); 244 while (db->db_state == DB_READ || 245 db->db_state == DB_FILL) 246 cv_wait(&db->db_changed, &db->db_mtx); 247 if (db->db_state == DB_UNCACHED) 248 err = EIO; 249 mutex_exit(&db->db_mtx); 250 if (err) { 251 dmu_buf_rele_array(dbp, nblks, tag); 252 return (err); 253 } 254 } 255 } 256 257 *numbufsp = nblks; 258 *dbpp = dbp; 259 return (0); 260 } 261 262 static int 263 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 264 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 265 { 266 dnode_t *dn; 267 int err; 268 269 err = dnode_hold(os->os, object, FTAG, &dn); 270 if (err) 271 return (err); 272 273 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 274 numbufsp, dbpp); 275 276 dnode_rele(dn, FTAG); 277 278 return (err); 279 } 280 281 int 282 dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, 283 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 284 { 285 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 286 int err; 287 288 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 289 numbufsp, dbpp); 290 291 return (err); 292 } 293 294 void 295 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 296 { 297 int i; 298 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 299 300 if (numbufs == 0) 301 return; 302 303 for (i = 0; i < numbufs; i++) { 304 if (dbp[i]) 305 dbuf_rele(dbp[i], tag); 306 } 307 308 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 309 } 310 311 void 312 dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len) 313 { 314 dnode_t *dn; 315 uint64_t blkid; 316 int nblks, i, err; 317 318 if (zfs_prefetch_disable) 319 return; 320 321 if (len == 0) { /* they're interested in the bonus buffer */ 322 dn = os->os->os_meta_dnode; 323 324 if (object == 0 || object >= DN_MAX_OBJECT) 325 return; 326 327 rw_enter(&dn->dn_struct_rwlock, RW_READER); 328 blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t)); 329 dbuf_prefetch(dn, blkid); 330 rw_exit(&dn->dn_struct_rwlock); 331 return; 332 } 333 334 /* 335 * XXX - Note, if the dnode for the requested object is not 336 * already cached, we will do a *synchronous* read in the 337 * dnode_hold() call. The same is true for any indirects. 338 */ 339 err = dnode_hold(os->os, object, FTAG, &dn); 340 if (err != 0) 341 return; 342 343 rw_enter(&dn->dn_struct_rwlock, RW_READER); 344 if (dn->dn_datablkshift) { 345 int blkshift = dn->dn_datablkshift; 346 nblks = (P2ROUNDUP(offset+len, 1<<blkshift) - 347 P2ALIGN(offset, 1<<blkshift)) >> blkshift; 348 } else { 349 nblks = (offset < dn->dn_datablksz); 350 } 351 352 if (nblks != 0) { 353 blkid = dbuf_whichblock(dn, offset); 354 for (i = 0; i < nblks; i++) 355 dbuf_prefetch(dn, blkid+i); 356 } 357 358 rw_exit(&dn->dn_struct_rwlock); 359 360 dnode_rele(dn, FTAG); 361 } 362 363 int 364 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 365 uint64_t size, dmu_tx_t *tx) 366 { 367 dnode_t *dn; 368 int err = dnode_hold(os->os, object, FTAG, &dn); 369 if (err) 370 return (err); 371 ASSERT(offset < UINT64_MAX); 372 ASSERT(size == -1ULL || size <= UINT64_MAX - offset); 373 dnode_free_range(dn, offset, size, tx); 374 dnode_rele(dn, FTAG); 375 return (0); 376 } 377 378 int 379 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 380 void *buf) 381 { 382 dnode_t *dn; 383 dmu_buf_t **dbp; 384 int numbufs, i, err; 385 386 err = dnode_hold(os->os, object, FTAG, &dn); 387 if (err) 388 return (err); 389 390 /* 391 * Deal with odd block sizes, where there can't be data past the first 392 * block. If we ever do the tail block optimization, we will need to 393 * handle that here as well. 394 */ 395 if (dn->dn_datablkshift == 0) { 396 int newsz = offset > dn->dn_datablksz ? 0 : 397 MIN(size, dn->dn_datablksz - offset); 398 bzero((char *)buf + newsz, size - newsz); 399 size = newsz; 400 } 401 402 while (size > 0) { 403 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 404 405 /* 406 * NB: we could do this block-at-a-time, but it's nice 407 * to be reading in parallel. 408 */ 409 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 410 TRUE, FTAG, &numbufs, &dbp); 411 if (err) 412 break; 413 414 for (i = 0; i < numbufs; i++) { 415 int tocpy; 416 int bufoff; 417 dmu_buf_t *db = dbp[i]; 418 419 ASSERT(size > 0); 420 421 bufoff = offset - db->db_offset; 422 tocpy = (int)MIN(db->db_size - bufoff, size); 423 424 bcopy((char *)db->db_data + bufoff, buf, tocpy); 425 426 offset += tocpy; 427 size -= tocpy; 428 buf = (char *)buf + tocpy; 429 } 430 dmu_buf_rele_array(dbp, numbufs, FTAG); 431 } 432 dnode_rele(dn, FTAG); 433 return (err); 434 } 435 436 void 437 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 438 const void *buf, dmu_tx_t *tx) 439 { 440 dmu_buf_t **dbp; 441 int numbufs, i; 442 443 if (size == 0) 444 return; 445 446 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 447 FALSE, FTAG, &numbufs, &dbp)); 448 449 for (i = 0; i < numbufs; i++) { 450 int tocpy; 451 int bufoff; 452 dmu_buf_t *db = dbp[i]; 453 454 ASSERT(size > 0); 455 456 bufoff = offset - db->db_offset; 457 tocpy = (int)MIN(db->db_size - bufoff, size); 458 459 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 460 461 if (tocpy == db->db_size) 462 dmu_buf_will_fill(db, tx); 463 else 464 dmu_buf_will_dirty(db, tx); 465 466 bcopy(buf, (char *)db->db_data + bufoff, tocpy); 467 468 if (tocpy == db->db_size) 469 dmu_buf_fill_done(db, tx); 470 471 offset += tocpy; 472 size -= tocpy; 473 buf = (char *)buf + tocpy; 474 } 475 dmu_buf_rele_array(dbp, numbufs, FTAG); 476 } 477 478 #ifdef _KERNEL 479 int 480 dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) 481 { 482 dmu_buf_t **dbp; 483 int numbufs, i, err; 484 485 /* 486 * NB: we could do this block-at-a-time, but it's nice 487 * to be reading in parallel. 488 */ 489 err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG, 490 &numbufs, &dbp); 491 if (err) 492 return (err); 493 494 for (i = 0; i < numbufs; i++) { 495 int tocpy; 496 int bufoff; 497 dmu_buf_t *db = dbp[i]; 498 499 ASSERT(size > 0); 500 501 bufoff = uio->uio_loffset - db->db_offset; 502 tocpy = (int)MIN(db->db_size - bufoff, size); 503 504 err = uiomove((char *)db->db_data + bufoff, tocpy, 505 UIO_READ, uio); 506 if (err) 507 break; 508 509 size -= tocpy; 510 } 511 dmu_buf_rele_array(dbp, numbufs, FTAG); 512 513 return (err); 514 } 515 516 int 517 dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, 518 dmu_tx_t *tx) 519 { 520 dmu_buf_t **dbp; 521 int numbufs, i; 522 int err = 0; 523 524 if (size == 0) 525 return (0); 526 527 err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, 528 FALSE, FTAG, &numbufs, &dbp); 529 if (err) 530 return (err); 531 532 for (i = 0; i < numbufs; i++) { 533 int tocpy; 534 int bufoff; 535 dmu_buf_t *db = dbp[i]; 536 537 ASSERT(size > 0); 538 539 bufoff = uio->uio_loffset - db->db_offset; 540 tocpy = (int)MIN(db->db_size - bufoff, size); 541 542 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 543 544 if (tocpy == db->db_size) 545 dmu_buf_will_fill(db, tx); 546 else 547 dmu_buf_will_dirty(db, tx); 548 549 /* 550 * XXX uiomove could block forever (eg. nfs-backed 551 * pages). There needs to be a uiolockdown() function 552 * to lock the pages in memory, so that uiomove won't 553 * block. 554 */ 555 err = uiomove((char *)db->db_data + bufoff, tocpy, 556 UIO_WRITE, uio); 557 558 if (tocpy == db->db_size) 559 dmu_buf_fill_done(db, tx); 560 561 if (err) 562 break; 563 564 size -= tocpy; 565 } 566 dmu_buf_rele_array(dbp, numbufs, FTAG); 567 return (err); 568 } 569 570 int 571 dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 572 page_t *pp, dmu_tx_t *tx) 573 { 574 dmu_buf_t **dbp; 575 int numbufs, i; 576 int err; 577 578 if (size == 0) 579 return (0); 580 581 err = dmu_buf_hold_array(os, object, offset, size, 582 FALSE, FTAG, &numbufs, &dbp); 583 if (err) 584 return (err); 585 586 for (i = 0; i < numbufs; i++) { 587 int tocpy, copied, thiscpy; 588 int bufoff; 589 dmu_buf_t *db = dbp[i]; 590 caddr_t va; 591 592 ASSERT(size > 0); 593 ASSERT3U(db->db_size, >=, PAGESIZE); 594 595 bufoff = offset - db->db_offset; 596 tocpy = (int)MIN(db->db_size - bufoff, size); 597 598 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 599 600 if (tocpy == db->db_size) 601 dmu_buf_will_fill(db, tx); 602 else 603 dmu_buf_will_dirty(db, tx); 604 605 for (copied = 0; copied < tocpy; copied += PAGESIZE) { 606 ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); 607 thiscpy = MIN(PAGESIZE, tocpy - copied); 608 va = ppmapin(pp, PROT_READ, (caddr_t)-1); 609 bcopy(va, (char *)db->db_data + bufoff, thiscpy); 610 ppmapout(va); 611 pp = pp->p_next; 612 bufoff += PAGESIZE; 613 } 614 615 if (tocpy == db->db_size) 616 dmu_buf_fill_done(db, tx); 617 618 if (err) 619 break; 620 621 offset += tocpy; 622 size -= tocpy; 623 } 624 dmu_buf_rele_array(dbp, numbufs, FTAG); 625 return (err); 626 } 627 #endif 628 629 typedef struct { 630 dbuf_dirty_record_t *dr; 631 dmu_sync_cb_t *done; 632 void *arg; 633 } dmu_sync_arg_t; 634 635 /* ARGSUSED */ 636 static void 637 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 638 { 639 dmu_sync_arg_t *in = varg; 640 dbuf_dirty_record_t *dr = in->dr; 641 dmu_buf_impl_t *db = dr->dr_dbuf; 642 dmu_sync_cb_t *done = in->done; 643 644 if (!BP_IS_HOLE(zio->io_bp)) { 645 zio->io_bp->blk_fill = 1; 646 BP_SET_TYPE(zio->io_bp, db->db_dnode->dn_type); 647 BP_SET_LEVEL(zio->io_bp, 0); 648 } 649 650 mutex_enter(&db->db_mtx); 651 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 652 dr->dt.dl.dr_overridden_by = *zio->io_bp; /* structure assignment */ 653 dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 654 cv_broadcast(&db->db_changed); 655 mutex_exit(&db->db_mtx); 656 657 if (done) 658 done(&(db->db), in->arg); 659 660 kmem_free(in, sizeof (dmu_sync_arg_t)); 661 } 662 663 /* 664 * Intent log support: sync the block associated with db to disk. 665 * N.B. and XXX: the caller is responsible for making sure that the 666 * data isn't changing while dmu_sync() is writing it. 667 * 668 * Return values: 669 * 670 * EEXIST: this txg has already been synced, so there's nothing to to. 671 * The caller should not log the write. 672 * 673 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 674 * The caller should not log the write. 675 * 676 * EALREADY: this block is already in the process of being synced. 677 * The caller should track its progress (somehow). 678 * 679 * EINPROGRESS: the IO has been initiated. 680 * The caller should log this blkptr in the callback. 681 * 682 * 0: completed. Sets *bp to the blkptr just written. 683 * The caller should log this blkptr immediately. 684 */ 685 int 686 dmu_sync(zio_t *pio, dmu_buf_t *db_fake, 687 blkptr_t *bp, uint64_t txg, dmu_sync_cb_t *done, void *arg) 688 { 689 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 690 objset_impl_t *os = db->db_objset; 691 dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool; 692 tx_state_t *tx = &dp->dp_tx; 693 dbuf_dirty_record_t *dr; 694 dmu_sync_arg_t *in; 695 zbookmark_t zb; 696 zio_t *zio; 697 int zio_flags; 698 int err; 699 700 ASSERT(BP_IS_HOLE(bp)); 701 ASSERT(txg != 0); 702 703 704 dprintf("dmu_sync txg=%llu, s,o,q %llu %llu %llu\n", 705 txg, tx->tx_synced_txg, tx->tx_open_txg, tx->tx_quiesced_txg); 706 707 /* 708 * XXX - would be nice if we could do this without suspending... 709 */ 710 txg_suspend(dp); 711 712 /* 713 * If this txg already synced, there's nothing to do. 714 */ 715 if (txg <= tx->tx_synced_txg) { 716 txg_resume(dp); 717 /* 718 * If we're running ziltest, we need the blkptr regardless. 719 */ 720 if (txg > spa_freeze_txg(dp->dp_spa)) { 721 /* if db_blkptr == NULL, this was an empty write */ 722 if (db->db_blkptr) 723 *bp = *db->db_blkptr; /* structure assignment */ 724 return (0); 725 } 726 return (EEXIST); 727 } 728 729 mutex_enter(&db->db_mtx); 730 731 if (txg == tx->tx_syncing_txg) { 732 while (db->db_data_pending) { 733 /* 734 * IO is in-progress. Wait for it to finish. 735 * XXX - would be nice to be able to somehow "attach" 736 * this zio to the parent zio passed in. 737 */ 738 cv_wait(&db->db_changed, &db->db_mtx); 739 if (!db->db_data_pending && 740 db->db_blkptr && BP_IS_HOLE(db->db_blkptr)) { 741 /* 742 * IO was compressed away 743 */ 744 *bp = *db->db_blkptr; /* structure assignment */ 745 mutex_exit(&db->db_mtx); 746 txg_resume(dp); 747 return (0); 748 } 749 ASSERT(db->db_data_pending || 750 (db->db_blkptr && db->db_blkptr->blk_birth == txg)); 751 } 752 753 if (db->db_blkptr && db->db_blkptr->blk_birth == txg) { 754 /* 755 * IO is already completed. 756 */ 757 *bp = *db->db_blkptr; /* structure assignment */ 758 mutex_exit(&db->db_mtx); 759 txg_resume(dp); 760 return (0); 761 } 762 } 763 764 dr = db->db_last_dirty; 765 while (dr && dr->dr_txg > txg) 766 dr = dr->dr_next; 767 if (dr == NULL || dr->dr_txg < txg) { 768 /* 769 * This dbuf isn't dirty, must have been free_range'd. 770 * There's no need to log writes to freed blocks, so we're done. 771 */ 772 mutex_exit(&db->db_mtx); 773 txg_resume(dp); 774 return (ENOENT); 775 } 776 777 ASSERT(dr->dr_txg == txg); 778 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 779 /* 780 * We have already issued a sync write for this buffer. 781 */ 782 mutex_exit(&db->db_mtx); 783 txg_resume(dp); 784 return (EALREADY); 785 } else if (dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 786 /* 787 * This buffer has already been synced. It could not 788 * have been dirtied since, or we would have cleared the state. 789 */ 790 *bp = dr->dt.dl.dr_overridden_by; /* structure assignment */ 791 mutex_exit(&db->db_mtx); 792 txg_resume(dp); 793 return (0); 794 } 795 796 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 797 in = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 798 in->dr = dr; 799 in->done = done; 800 in->arg = arg; 801 mutex_exit(&db->db_mtx); 802 txg_resume(dp); 803 804 zb.zb_objset = os->os_dsl_dataset->ds_object; 805 zb.zb_object = db->db.db_object; 806 zb.zb_level = db->db_level; 807 zb.zb_blkid = db->db_blkid; 808 zio_flags = ZIO_FLAG_MUSTSUCCEED; 809 if (dmu_ot[db->db_dnode->dn_type].ot_metadata || zb.zb_level != 0) 810 zio_flags |= ZIO_FLAG_METADATA; 811 zio = arc_write(pio, os->os_spa, 812 zio_checksum_select(db->db_dnode->dn_checksum, os->os_checksum), 813 zio_compress_select(db->db_dnode->dn_compress, os->os_compress), 814 dmu_get_replication_level(os, &zb, db->db_dnode->dn_type), 815 txg, bp, dr->dt.dl.dr_data, NULL, dmu_sync_done, in, 816 ZIO_PRIORITY_SYNC_WRITE, zio_flags, &zb); 817 818 if (pio) { 819 zio_nowait(zio); 820 err = EINPROGRESS; 821 } else { 822 err = zio_wait(zio); 823 ASSERT(err == 0); 824 } 825 return (err); 826 } 827 828 int 829 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 830 dmu_tx_t *tx) 831 { 832 dnode_t *dn; 833 int err; 834 835 err = dnode_hold(os->os, object, FTAG, &dn); 836 if (err) 837 return (err); 838 err = dnode_set_blksz(dn, size, ibs, tx); 839 dnode_rele(dn, FTAG); 840 return (err); 841 } 842 843 void 844 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 845 dmu_tx_t *tx) 846 { 847 dnode_t *dn; 848 849 /* XXX assumes dnode_hold will not get an i/o error */ 850 (void) dnode_hold(os->os, object, FTAG, &dn); 851 ASSERT(checksum < ZIO_CHECKSUM_FUNCTIONS); 852 dn->dn_checksum = checksum; 853 dnode_setdirty(dn, tx); 854 dnode_rele(dn, FTAG); 855 } 856 857 void 858 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 859 dmu_tx_t *tx) 860 { 861 dnode_t *dn; 862 863 /* XXX assumes dnode_hold will not get an i/o error */ 864 (void) dnode_hold(os->os, object, FTAG, &dn); 865 ASSERT(compress < ZIO_COMPRESS_FUNCTIONS); 866 dn->dn_compress = compress; 867 dnode_setdirty(dn, tx); 868 dnode_rele(dn, FTAG); 869 } 870 871 int 872 dmu_get_replication_level(objset_impl_t *os, 873 zbookmark_t *zb, dmu_object_type_t ot) 874 { 875 int ncopies = os->os_copies; 876 877 /* If it's the mos, it should have max copies set. */ 878 ASSERT(zb->zb_objset != 0 || 879 ncopies == spa_max_replication(os->os_spa)); 880 881 if (dmu_ot[ot].ot_metadata || zb->zb_level != 0) 882 ncopies++; 883 return (MIN(ncopies, spa_max_replication(os->os_spa))); 884 } 885 886 int 887 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 888 { 889 dnode_t *dn; 890 int i, err; 891 892 err = dnode_hold(os->os, object, FTAG, &dn); 893 if (err) 894 return (err); 895 /* 896 * Sync any current changes before 897 * we go trundling through the block pointers. 898 */ 899 for (i = 0; i < TXG_SIZE; i++) { 900 if (list_link_active(&dn->dn_dirty_link[i])) 901 break; 902 } 903 if (i != TXG_SIZE) { 904 dnode_rele(dn, FTAG); 905 txg_wait_synced(dmu_objset_pool(os), 0); 906 err = dnode_hold(os->os, object, FTAG, &dn); 907 if (err) 908 return (err); 909 } 910 911 err = dnode_next_offset(dn, hole, off, 1, 1, 0); 912 dnode_rele(dn, FTAG); 913 914 return (err); 915 } 916 917 void 918 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 919 { 920 rw_enter(&dn->dn_struct_rwlock, RW_READER); 921 mutex_enter(&dn->dn_mtx); 922 923 doi->doi_data_block_size = dn->dn_datablksz; 924 doi->doi_metadata_block_size = dn->dn_indblkshift ? 925 1ULL << dn->dn_indblkshift : 0; 926 doi->doi_indirection = dn->dn_nlevels; 927 doi->doi_checksum = dn->dn_checksum; 928 doi->doi_compress = dn->dn_compress; 929 doi->doi_physical_blks = (DN_USED_BYTES(dn->dn_phys) + 930 SPA_MINBLOCKSIZE/2) >> SPA_MINBLOCKSHIFT; 931 doi->doi_max_block_offset = dn->dn_phys->dn_maxblkid; 932 doi->doi_type = dn->dn_type; 933 doi->doi_bonus_size = dn->dn_bonuslen; 934 doi->doi_bonus_type = dn->dn_bonustype; 935 936 mutex_exit(&dn->dn_mtx); 937 rw_exit(&dn->dn_struct_rwlock); 938 } 939 940 /* 941 * Get information on a DMU object. 942 * If doi is NULL, just indicates whether the object exists. 943 */ 944 int 945 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 946 { 947 dnode_t *dn; 948 int err = dnode_hold(os->os, object, FTAG, &dn); 949 950 if (err) 951 return (err); 952 953 if (doi != NULL) 954 dmu_object_info_from_dnode(dn, doi); 955 956 dnode_rele(dn, FTAG); 957 return (0); 958 } 959 960 /* 961 * As above, but faster; can be used when you have a held dbuf in hand. 962 */ 963 void 964 dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi) 965 { 966 dmu_object_info_from_dnode(((dmu_buf_impl_t *)db)->db_dnode, doi); 967 } 968 969 /* 970 * Faster still when you only care about the size. 971 * This is specifically optimized for zfs_getattr(). 972 */ 973 void 974 dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, u_longlong_t *nblk512) 975 { 976 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 977 978 *blksize = dn->dn_datablksz; 979 /* add 1 for dnode space */ 980 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 981 SPA_MINBLOCKSHIFT) + 1; 982 } 983 984 void 985 byteswap_uint64_array(void *vbuf, size_t size) 986 { 987 uint64_t *buf = vbuf; 988 size_t count = size >> 3; 989 int i; 990 991 ASSERT((size & 7) == 0); 992 993 for (i = 0; i < count; i++) 994 buf[i] = BSWAP_64(buf[i]); 995 } 996 997 void 998 byteswap_uint32_array(void *vbuf, size_t size) 999 { 1000 uint32_t *buf = vbuf; 1001 size_t count = size >> 2; 1002 int i; 1003 1004 ASSERT((size & 3) == 0); 1005 1006 for (i = 0; i < count; i++) 1007 buf[i] = BSWAP_32(buf[i]); 1008 } 1009 1010 void 1011 byteswap_uint16_array(void *vbuf, size_t size) 1012 { 1013 uint16_t *buf = vbuf; 1014 size_t count = size >> 1; 1015 int i; 1016 1017 ASSERT((size & 1) == 0); 1018 1019 for (i = 0; i < count; i++) 1020 buf[i] = BSWAP_16(buf[i]); 1021 } 1022 1023 /* ARGSUSED */ 1024 void 1025 byteswap_uint8_array(void *vbuf, size_t size) 1026 { 1027 } 1028 1029 void 1030 dmu_init(void) 1031 { 1032 dbuf_init(); 1033 dnode_init(); 1034 arc_init(); 1035 } 1036 1037 void 1038 dmu_fini(void) 1039 { 1040 arc_fini(); 1041 dnode_fini(); 1042 dbuf_fini(); 1043 } 1044