1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/dmu.h> 27 #include <sys/dmu_impl.h> 28 #include <sys/dbuf.h> 29 #include <sys/dmu_tx.h> 30 #include <sys/dmu_objset.h> 31 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ 32 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ 33 #include <sys/dsl_pool.h> 34 #include <sys/zap_impl.h> /* for fzap_default_block_shift */ 35 #include <sys/spa.h> 36 #include <sys/zfs_context.h> 37 38 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 39 uint64_t arg1, uint64_t arg2); 40 41 42 dmu_tx_t * 43 dmu_tx_create_dd(dsl_dir_t *dd) 44 { 45 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 46 tx->tx_dir = dd; 47 if (dd) 48 tx->tx_pool = dd->dd_pool; 49 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 50 offsetof(dmu_tx_hold_t, txh_node)); 51 #ifdef ZFS_DEBUG 52 refcount_create(&tx->tx_space_written); 53 refcount_create(&tx->tx_space_freed); 54 #endif 55 return (tx); 56 } 57 58 dmu_tx_t * 59 dmu_tx_create(objset_t *os) 60 { 61 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 62 tx->tx_objset = os; 63 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); 64 return (tx); 65 } 66 67 dmu_tx_t * 68 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 69 { 70 dmu_tx_t *tx = dmu_tx_create_dd(NULL); 71 72 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 73 tx->tx_pool = dp; 74 tx->tx_txg = txg; 75 tx->tx_anyobj = TRUE; 76 77 return (tx); 78 } 79 80 int 81 dmu_tx_is_syncing(dmu_tx_t *tx) 82 { 83 return (tx->tx_anyobj); 84 } 85 86 int 87 dmu_tx_private_ok(dmu_tx_t *tx) 88 { 89 return (tx->tx_anyobj); 90 } 91 92 static dmu_tx_hold_t * 93 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 94 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 95 { 96 dmu_tx_hold_t *txh; 97 dnode_t *dn = NULL; 98 int err; 99 100 if (object != DMU_NEW_OBJECT) { 101 err = dnode_hold(os, object, tx, &dn); 102 if (err) { 103 tx->tx_err = err; 104 return (NULL); 105 } 106 107 if (err == 0 && tx->tx_txg != 0) { 108 mutex_enter(&dn->dn_mtx); 109 /* 110 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 111 * problem, but there's no way for it to happen (for 112 * now, at least). 113 */ 114 ASSERT(dn->dn_assigned_txg == 0); 115 dn->dn_assigned_txg = tx->tx_txg; 116 (void) refcount_add(&dn->dn_tx_holds, tx); 117 mutex_exit(&dn->dn_mtx); 118 } 119 } 120 121 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 122 txh->txh_tx = tx; 123 txh->txh_dnode = dn; 124 #ifdef ZFS_DEBUG 125 txh->txh_type = type; 126 txh->txh_arg1 = arg1; 127 txh->txh_arg2 = arg2; 128 #endif 129 list_insert_tail(&tx->tx_holds, txh); 130 131 return (txh); 132 } 133 134 void 135 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) 136 { 137 /* 138 * If we're syncing, they can manipulate any object anyhow, and 139 * the hold on the dnode_t can cause problems. 140 */ 141 if (!dmu_tx_is_syncing(tx)) { 142 (void) dmu_tx_hold_object_impl(tx, os, 143 object, THT_NEWOBJECT, 0, 0); 144 } 145 } 146 147 static int 148 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 149 { 150 int err; 151 dmu_buf_impl_t *db; 152 153 rw_enter(&dn->dn_struct_rwlock, RW_READER); 154 db = dbuf_hold_level(dn, level, blkid, FTAG); 155 rw_exit(&dn->dn_struct_rwlock); 156 if (db == NULL) 157 return (EIO); 158 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 159 dbuf_rele(db, FTAG); 160 return (err); 161 } 162 163 static void 164 dmu_tx_count_indirects(dmu_tx_hold_t *txh, dmu_buf_impl_t *db, 165 boolean_t freeable, dmu_buf_impl_t **history) 166 { 167 int i = db->db_level + 1; 168 dnode_t *dn = db->db_dnode; 169 170 if (i >= dn->dn_nlevels) 171 return; 172 173 db = db->db_parent; 174 if (db == NULL) { 175 uint64_t lvls = dn->dn_nlevels - i; 176 177 txh->txh_space_towrite += lvls << dn->dn_indblkshift; 178 return; 179 } 180 181 if (db != history[i]) { 182 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 183 uint64_t space = 1ULL << dn->dn_indblkshift; 184 185 freeable = (db->db_blkptr && (freeable || 186 dsl_dataset_block_freeable(ds, db->db_blkptr->blk_birth))); 187 if (freeable) 188 txh->txh_space_tooverwrite += space; 189 else 190 txh->txh_space_towrite += space; 191 if (db->db_blkptr) 192 txh->txh_space_tounref += space; 193 history[i] = db; 194 dmu_tx_count_indirects(txh, db, freeable, history); 195 } 196 } 197 198 /* ARGSUSED */ 199 static void 200 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 201 { 202 dnode_t *dn = txh->txh_dnode; 203 uint64_t start, end, i; 204 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; 205 int err = 0; 206 207 if (len == 0) 208 return; 209 210 min_bs = SPA_MINBLOCKSHIFT; 211 max_bs = SPA_MAXBLOCKSHIFT; 212 min_ibs = DN_MIN_INDBLKSHIFT; 213 max_ibs = DN_MAX_INDBLKSHIFT; 214 215 if (dn) { 216 dmu_buf_impl_t *last[DN_MAX_LEVELS]; 217 int nlvls = dn->dn_nlevels; 218 int delta; 219 220 /* 221 * For i/o error checking, read the first and last level-0 222 * blocks (if they are not aligned), and all the level-1 blocks. 223 */ 224 if (dn->dn_maxblkid == 0) { 225 delta = dn->dn_datablksz; 226 start = (off < dn->dn_datablksz) ? 0 : 1; 227 end = (off+len <= dn->dn_datablksz) ? 0 : 1; 228 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { 229 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 230 if (err) 231 goto out; 232 delta -= off; 233 } 234 } else { 235 zio_t *zio = zio_root(dn->dn_objset->os_spa, 236 NULL, NULL, ZIO_FLAG_CANFAIL); 237 238 /* first level-0 block */ 239 start = off >> dn->dn_datablkshift; 240 if (P2PHASE(off, dn->dn_datablksz) || 241 len < dn->dn_datablksz) { 242 err = dmu_tx_check_ioerr(zio, dn, 0, start); 243 if (err) 244 goto out; 245 } 246 247 /* last level-0 block */ 248 end = (off+len-1) >> dn->dn_datablkshift; 249 if (end != start && end <= dn->dn_maxblkid && 250 P2PHASE(off+len, dn->dn_datablksz)) { 251 err = dmu_tx_check_ioerr(zio, dn, 0, end); 252 if (err) 253 goto out; 254 } 255 256 /* level-1 blocks */ 257 if (nlvls > 1) { 258 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 259 for (i = (start>>shft)+1; i < end>>shft; i++) { 260 err = dmu_tx_check_ioerr(zio, dn, 1, i); 261 if (err) 262 goto out; 263 } 264 } 265 266 err = zio_wait(zio); 267 if (err) 268 goto out; 269 delta = P2NPHASE(off, dn->dn_datablksz); 270 } 271 272 if (dn->dn_maxblkid > 0) { 273 /* 274 * The blocksize can't change, 275 * so we can make a more precise estimate. 276 */ 277 ASSERT(dn->dn_datablkshift != 0); 278 min_bs = max_bs = dn->dn_datablkshift; 279 min_ibs = max_ibs = dn->dn_indblkshift; 280 } else if (dn->dn_indblkshift > max_ibs) { 281 /* 282 * This ensures that if we reduce DN_MAX_INDBLKSHIFT, 283 * the code will still work correctly on older pools. 284 */ 285 min_ibs = max_ibs = dn->dn_indblkshift; 286 } 287 288 /* 289 * If this write is not off the end of the file 290 * we need to account for overwrites/unref. 291 */ 292 if (start <= dn->dn_maxblkid) 293 bzero(last, sizeof (dmu_buf_impl_t *) * DN_MAX_LEVELS); 294 while (start <= dn->dn_maxblkid) { 295 spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 296 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 297 dmu_buf_impl_t *db; 298 299 rw_enter(&dn->dn_struct_rwlock, RW_READER); 300 db = dbuf_hold_level(dn, 0, start, FTAG); 301 rw_exit(&dn->dn_struct_rwlock); 302 if (db->db_blkptr && dsl_dataset_block_freeable(ds, 303 db->db_blkptr->blk_birth)) { 304 dprintf_bp(db->db_blkptr, "can free old%s", ""); 305 txh->txh_space_tooverwrite += dn->dn_datablksz; 306 txh->txh_space_tounref += dn->dn_datablksz; 307 dmu_tx_count_indirects(txh, db, TRUE, last); 308 } else { 309 txh->txh_space_towrite += dn->dn_datablksz; 310 if (db->db_blkptr) 311 txh->txh_space_tounref += 312 bp_get_dasize(spa, db->db_blkptr); 313 dmu_tx_count_indirects(txh, db, FALSE, last); 314 } 315 dbuf_rele(db, FTAG); 316 if (++start > end) { 317 /* 318 * Account for new indirects appearing 319 * before this IO gets assigned into a txg. 320 */ 321 bits = 64 - min_bs; 322 epbs = min_ibs - SPA_BLKPTRSHIFT; 323 for (bits -= epbs * (nlvls - 1); 324 bits >= 0; bits -= epbs) 325 txh->txh_fudge += 1ULL << max_ibs; 326 goto out; 327 } 328 off += delta; 329 if (len >= delta) 330 len -= delta; 331 delta = dn->dn_datablksz; 332 } 333 } 334 335 /* 336 * 'end' is the last thing we will access, not one past. 337 * This way we won't overflow when accessing the last byte. 338 */ 339 start = P2ALIGN(off, 1ULL << max_bs); 340 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; 341 txh->txh_space_towrite += end - start + 1; 342 343 start >>= min_bs; 344 end >>= min_bs; 345 346 epbs = min_ibs - SPA_BLKPTRSHIFT; 347 348 /* 349 * The object contains at most 2^(64 - min_bs) blocks, 350 * and each indirect level maps 2^epbs. 351 */ 352 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { 353 start >>= epbs; 354 end >>= epbs; 355 ASSERT3U(end, >=, start); 356 txh->txh_space_towrite += (end - start + 1) << max_ibs; 357 if (start != 0) { 358 /* 359 * We also need a new blkid=0 indirect block 360 * to reference any existing file data. 361 */ 362 txh->txh_space_towrite += 1ULL << max_ibs; 363 } 364 } 365 366 out: 367 if (txh->txh_space_towrite + txh->txh_space_tooverwrite > 368 2 * DMU_MAX_ACCESS) 369 err = EFBIG; 370 371 if (err) 372 txh->txh_tx->tx_err = err; 373 } 374 375 static void 376 dmu_tx_count_dnode(dmu_tx_hold_t *txh) 377 { 378 dnode_t *dn = txh->txh_dnode; 379 dnode_t *mdn = txh->txh_tx->tx_objset->os_meta_dnode; 380 uint64_t space = mdn->dn_datablksz + 381 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift); 382 383 if (dn && dn->dn_dbuf->db_blkptr && 384 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 385 dn->dn_dbuf->db_blkptr->blk_birth)) { 386 txh->txh_space_tooverwrite += space; 387 txh->txh_space_tounref += space; 388 } else { 389 txh->txh_space_towrite += space; 390 if (dn && dn->dn_dbuf->db_blkptr) 391 txh->txh_space_tounref += space; 392 } 393 } 394 395 void 396 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 397 { 398 dmu_tx_hold_t *txh; 399 400 ASSERT(tx->tx_txg == 0); 401 ASSERT(len < DMU_MAX_ACCESS); 402 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 403 404 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 405 object, THT_WRITE, off, len); 406 if (txh == NULL) 407 return; 408 409 dmu_tx_count_write(txh, off, len); 410 dmu_tx_count_dnode(txh); 411 } 412 413 static void 414 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 415 { 416 uint64_t blkid, nblks, lastblk; 417 uint64_t space = 0, unref = 0, skipped = 0; 418 dnode_t *dn = txh->txh_dnode; 419 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 420 spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 421 int epbs; 422 423 if (dn->dn_nlevels == 0) 424 return; 425 426 /* 427 * The struct_rwlock protects us against dn_nlevels 428 * changing, in case (against all odds) we manage to dirty & 429 * sync out the changes after we check for being dirty. 430 * Also, dbuf_hold_level() wants us to have the struct_rwlock. 431 */ 432 rw_enter(&dn->dn_struct_rwlock, RW_READER); 433 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 434 if (dn->dn_maxblkid == 0) { 435 if (off == 0 && len >= dn->dn_datablksz) { 436 blkid = 0; 437 nblks = 1; 438 } else { 439 rw_exit(&dn->dn_struct_rwlock); 440 return; 441 } 442 } else { 443 blkid = off >> dn->dn_datablkshift; 444 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; 445 446 if (blkid >= dn->dn_maxblkid) { 447 rw_exit(&dn->dn_struct_rwlock); 448 return; 449 } 450 if (blkid + nblks > dn->dn_maxblkid) 451 nblks = dn->dn_maxblkid - blkid; 452 453 } 454 if (dn->dn_nlevels == 1) { 455 int i; 456 for (i = 0; i < nblks; i++) { 457 blkptr_t *bp = dn->dn_phys->dn_blkptr; 458 ASSERT3U(blkid + i, <, dn->dn_nblkptr); 459 bp += blkid + i; 460 if (dsl_dataset_block_freeable(ds, bp->blk_birth)) { 461 dprintf_bp(bp, "can free old%s", ""); 462 space += bp_get_dasize(spa, bp); 463 } 464 unref += BP_GET_ASIZE(bp); 465 } 466 nblks = 0; 467 } 468 469 /* 470 * Add in memory requirements of higher-level indirects. 471 * This assumes a worst-possible scenario for dn_nlevels. 472 */ 473 { 474 uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs); 475 int level = (dn->dn_nlevels > 1) ? 2 : 1; 476 477 while (level++ < DN_MAX_LEVELS) { 478 txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift; 479 blkcnt = 1 + (blkcnt >> epbs); 480 } 481 ASSERT(blkcnt <= dn->dn_nblkptr); 482 } 483 484 lastblk = blkid + nblks - 1; 485 while (nblks) { 486 dmu_buf_impl_t *dbuf; 487 uint64_t ibyte, new_blkid; 488 int epb = 1 << epbs; 489 int err, i, blkoff, tochk; 490 blkptr_t *bp; 491 492 ibyte = blkid << dn->dn_datablkshift; 493 err = dnode_next_offset(dn, 494 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); 495 new_blkid = ibyte >> dn->dn_datablkshift; 496 if (err == ESRCH) { 497 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 498 break; 499 } 500 if (err) { 501 txh->txh_tx->tx_err = err; 502 break; 503 } 504 if (new_blkid > lastblk) { 505 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 506 break; 507 } 508 509 if (new_blkid > blkid) { 510 ASSERT((new_blkid >> epbs) > (blkid >> epbs)); 511 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; 512 nblks -= new_blkid - blkid; 513 blkid = new_blkid; 514 } 515 blkoff = P2PHASE(blkid, epb); 516 tochk = MIN(epb - blkoff, nblks); 517 518 dbuf = dbuf_hold_level(dn, 1, blkid >> epbs, FTAG); 519 520 txh->txh_memory_tohold += dbuf->db.db_size; 521 if (txh->txh_memory_tohold > DMU_MAX_ACCESS) { 522 txh->txh_tx->tx_err = E2BIG; 523 dbuf_rele(dbuf, FTAG); 524 break; 525 } 526 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); 527 if (err != 0) { 528 txh->txh_tx->tx_err = err; 529 dbuf_rele(dbuf, FTAG); 530 break; 531 } 532 533 bp = dbuf->db.db_data; 534 bp += blkoff; 535 536 for (i = 0; i < tochk; i++) { 537 if (dsl_dataset_block_freeable(ds, bp[i].blk_birth)) { 538 dprintf_bp(&bp[i], "can free old%s", ""); 539 space += bp_get_dasize(spa, &bp[i]); 540 } 541 unref += BP_GET_ASIZE(bp); 542 } 543 dbuf_rele(dbuf, FTAG); 544 545 blkid += tochk; 546 nblks -= tochk; 547 } 548 rw_exit(&dn->dn_struct_rwlock); 549 550 /* account for new level 1 indirect blocks that might show up */ 551 if (skipped > 0) { 552 txh->txh_fudge += skipped << dn->dn_indblkshift; 553 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); 554 txh->txh_memory_tohold += skipped << dn->dn_indblkshift; 555 } 556 txh->txh_space_tofree += space; 557 txh->txh_space_tounref += unref; 558 } 559 560 void 561 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 562 { 563 dmu_tx_hold_t *txh; 564 dnode_t *dn; 565 uint64_t start, end, i; 566 int err, shift; 567 zio_t *zio; 568 569 ASSERT(tx->tx_txg == 0); 570 571 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 572 object, THT_FREE, off, len); 573 if (txh == NULL) 574 return; 575 dn = txh->txh_dnode; 576 577 /* first block */ 578 if (off != 0) 579 dmu_tx_count_write(txh, off, 1); 580 /* last block */ 581 if (len != DMU_OBJECT_END) 582 dmu_tx_count_write(txh, off+len, 1); 583 584 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) 585 return; 586 if (len == DMU_OBJECT_END) 587 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; 588 589 /* 590 * For i/o error checking, read the first and last level-0 591 * blocks, and all the level-1 blocks. The above count_write's 592 * have already taken care of the level-0 blocks. 593 */ 594 if (dn->dn_nlevels > 1) { 595 shift = dn->dn_datablkshift + dn->dn_indblkshift - 596 SPA_BLKPTRSHIFT; 597 start = off >> shift; 598 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0; 599 600 zio = zio_root(tx->tx_pool->dp_spa, 601 NULL, NULL, ZIO_FLAG_CANFAIL); 602 for (i = start; i <= end; i++) { 603 uint64_t ibyte = i << shift; 604 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 605 i = ibyte >> shift; 606 if (err == ESRCH) 607 break; 608 if (err) { 609 tx->tx_err = err; 610 return; 611 } 612 613 err = dmu_tx_check_ioerr(zio, dn, 1, i); 614 if (err) { 615 tx->tx_err = err; 616 return; 617 } 618 } 619 err = zio_wait(zio); 620 if (err) { 621 tx->tx_err = err; 622 return; 623 } 624 } 625 626 dmu_tx_count_dnode(txh); 627 dmu_tx_count_free(txh, off, len); 628 } 629 630 void 631 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 632 { 633 dmu_tx_hold_t *txh; 634 dnode_t *dn; 635 uint64_t nblocks; 636 int epbs, err; 637 638 ASSERT(tx->tx_txg == 0); 639 640 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 641 object, THT_ZAP, add, (uintptr_t)name); 642 if (txh == NULL) 643 return; 644 dn = txh->txh_dnode; 645 646 dmu_tx_count_dnode(txh); 647 648 if (dn == NULL) { 649 /* 650 * We will be able to fit a new object's entries into one leaf 651 * block. So there will be at most 2 blocks total, 652 * including the header block. 653 */ 654 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); 655 return; 656 } 657 658 ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap); 659 660 if (dn->dn_maxblkid == 0 && !add) { 661 /* 662 * If there is only one block (i.e. this is a micro-zap) 663 * and we are not adding anything, the accounting is simple. 664 */ 665 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 666 if (err) { 667 tx->tx_err = err; 668 return; 669 } 670 671 /* 672 * Use max block size here, since we don't know how much 673 * the size will change between now and the dbuf dirty call. 674 */ 675 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 676 dn->dn_phys->dn_blkptr[0].blk_birth)) { 677 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE; 678 } else { 679 txh->txh_space_towrite += SPA_MAXBLOCKSIZE; 680 } 681 if (dn->dn_phys->dn_blkptr[0].blk_birth) 682 txh->txh_space_tounref += SPA_MAXBLOCKSIZE; 683 return; 684 } 685 686 if (dn->dn_maxblkid > 0 && name) { 687 /* 688 * access the name in this fat-zap so that we'll check 689 * for i/o errors to the leaf blocks, etc. 690 */ 691 err = zap_lookup(dn->dn_objset, dn->dn_object, name, 692 8, 0, NULL); 693 if (err == EIO) { 694 tx->tx_err = err; 695 return; 696 } 697 } 698 699 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add, 700 &txh->txh_space_towrite, &txh->txh_space_tooverwrite); 701 702 /* 703 * If the modified blocks are scattered to the four winds, 704 * we'll have to modify an indirect twig for each. 705 */ 706 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 707 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) 708 if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj) 709 txh->txh_space_towrite += 3 << dn->dn_indblkshift; 710 else 711 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift; 712 } 713 714 void 715 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 716 { 717 dmu_tx_hold_t *txh; 718 719 ASSERT(tx->tx_txg == 0); 720 721 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 722 object, THT_BONUS, 0, 0); 723 if (txh) 724 dmu_tx_count_dnode(txh); 725 } 726 727 void 728 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 729 { 730 dmu_tx_hold_t *txh; 731 ASSERT(tx->tx_txg == 0); 732 733 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 734 DMU_NEW_OBJECT, THT_SPACE, space, 0); 735 736 txh->txh_space_towrite += space; 737 } 738 739 int 740 dmu_tx_holds(dmu_tx_t *tx, uint64_t object) 741 { 742 dmu_tx_hold_t *txh; 743 int holds = 0; 744 745 /* 746 * By asserting that the tx is assigned, we're counting the 747 * number of dn_tx_holds, which is the same as the number of 748 * dn_holds. Otherwise, we'd be counting dn_holds, but 749 * dn_tx_holds could be 0. 750 */ 751 ASSERT(tx->tx_txg != 0); 752 753 /* if (tx->tx_anyobj == TRUE) */ 754 /* return (0); */ 755 756 for (txh = list_head(&tx->tx_holds); txh; 757 txh = list_next(&tx->tx_holds, txh)) { 758 if (txh->txh_dnode && txh->txh_dnode->dn_object == object) 759 holds++; 760 } 761 762 return (holds); 763 } 764 765 #ifdef ZFS_DEBUG 766 void 767 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 768 { 769 dmu_tx_hold_t *txh; 770 int match_object = FALSE, match_offset = FALSE; 771 dnode_t *dn = db->db_dnode; 772 773 ASSERT(tx->tx_txg != 0); 774 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 775 ASSERT3U(dn->dn_object, ==, db->db.db_object); 776 777 if (tx->tx_anyobj) 778 return; 779 780 /* XXX No checking on the meta dnode for now */ 781 if (db->db.db_object == DMU_META_DNODE_OBJECT) 782 return; 783 784 for (txh = list_head(&tx->tx_holds); txh; 785 txh = list_next(&tx->tx_holds, txh)) { 786 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 787 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 788 match_object = TRUE; 789 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 790 int datablkshift = dn->dn_datablkshift ? 791 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 792 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 793 int shift = datablkshift + epbs * db->db_level; 794 uint64_t beginblk = shift >= 64 ? 0 : 795 (txh->txh_arg1 >> shift); 796 uint64_t endblk = shift >= 64 ? 0 : 797 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 798 uint64_t blkid = db->db_blkid; 799 800 /* XXX txh_arg2 better not be zero... */ 801 802 dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 803 txh->txh_type, beginblk, endblk); 804 805 switch (txh->txh_type) { 806 case THT_WRITE: 807 if (blkid >= beginblk && blkid <= endblk) 808 match_offset = TRUE; 809 /* 810 * We will let this hold work for the bonus 811 * buffer so that we don't need to hold it 812 * when creating a new object. 813 */ 814 if (blkid == DB_BONUS_BLKID) 815 match_offset = TRUE; 816 /* 817 * They might have to increase nlevels, 818 * thus dirtying the new TLIBs. Or the 819 * might have to change the block size, 820 * thus dirying the new lvl=0 blk=0. 821 */ 822 if (blkid == 0) 823 match_offset = TRUE; 824 break; 825 case THT_FREE: 826 /* 827 * We will dirty all the level 1 blocks in 828 * the free range and perhaps the first and 829 * last level 0 block. 830 */ 831 if (blkid >= beginblk && (blkid <= endblk || 832 txh->txh_arg2 == DMU_OBJECT_END)) 833 match_offset = TRUE; 834 break; 835 case THT_BONUS: 836 if (blkid == DB_BONUS_BLKID) 837 match_offset = TRUE; 838 break; 839 case THT_ZAP: 840 match_offset = TRUE; 841 break; 842 case THT_NEWOBJECT: 843 match_object = TRUE; 844 break; 845 default: 846 ASSERT(!"bad txh_type"); 847 } 848 } 849 if (match_object && match_offset) 850 return; 851 } 852 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 853 (u_longlong_t)db->db.db_object, db->db_level, 854 (u_longlong_t)db->db_blkid); 855 } 856 #endif 857 858 static int 859 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how) 860 { 861 dmu_tx_hold_t *txh; 862 spa_t *spa = tx->tx_pool->dp_spa; 863 uint64_t memory, asize, fsize, usize; 864 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; 865 866 ASSERT3U(tx->tx_txg, ==, 0); 867 868 if (tx->tx_err) 869 return (tx->tx_err); 870 871 if (spa_suspended(spa)) { 872 /* 873 * If the user has indicated a blocking failure mode 874 * then return ERESTART which will block in dmu_tx_wait(). 875 * Otherwise, return EIO so that an error can get 876 * propagated back to the VOP calls. 877 * 878 * Note that we always honor the txg_how flag regardless 879 * of the failuremode setting. 880 */ 881 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 882 txg_how != TXG_WAIT) 883 return (EIO); 884 885 return (ERESTART); 886 } 887 888 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 889 tx->tx_needassign_txh = NULL; 890 891 /* 892 * NB: No error returns are allowed after txg_hold_open, but 893 * before processing the dnode holds, due to the 894 * dmu_tx_unassign() logic. 895 */ 896 897 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; 898 for (txh = list_head(&tx->tx_holds); txh; 899 txh = list_next(&tx->tx_holds, txh)) { 900 dnode_t *dn = txh->txh_dnode; 901 if (dn != NULL) { 902 mutex_enter(&dn->dn_mtx); 903 if (dn->dn_assigned_txg == tx->tx_txg - 1) { 904 mutex_exit(&dn->dn_mtx); 905 tx->tx_needassign_txh = txh; 906 return (ERESTART); 907 } 908 if (dn->dn_assigned_txg == 0) 909 dn->dn_assigned_txg = tx->tx_txg; 910 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 911 (void) refcount_add(&dn->dn_tx_holds, tx); 912 mutex_exit(&dn->dn_mtx); 913 } 914 towrite += txh->txh_space_towrite; 915 tofree += txh->txh_space_tofree; 916 tooverwrite += txh->txh_space_tooverwrite; 917 tounref += txh->txh_space_tounref; 918 tohold += txh->txh_memory_tohold; 919 fudge += txh->txh_fudge; 920 } 921 922 /* 923 * NB: This check must be after we've held the dnodes, so that 924 * the dmu_tx_unassign() logic will work properly 925 */ 926 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg) 927 return (ERESTART); 928 929 /* 930 * If a snapshot has been taken since we made our estimates, 931 * assume that we won't be able to free or overwrite anything. 932 */ 933 if (tx->tx_objset && 934 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > 935 tx->tx_lastsnap_txg) { 936 towrite += tooverwrite; 937 tooverwrite = tofree = 0; 938 } 939 940 /* needed allocation: worst-case estimate of write space */ 941 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); 942 /* freed space estimate: worst-case overwrite + free estimate */ 943 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; 944 /* convert unrefd space to worst-case estimate */ 945 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); 946 /* calculate memory footprint estimate */ 947 memory = towrite + tooverwrite + tohold; 948 949 #ifdef ZFS_DEBUG 950 /* 951 * Add in 'tohold' to account for our dirty holds on this memory 952 * XXX - the "fudge" factor is to account for skipped blocks that 953 * we missed because dnode_next_offset() misses in-core-only blocks. 954 */ 955 tx->tx_space_towrite = asize + 956 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); 957 tx->tx_space_tofree = tofree; 958 tx->tx_space_tooverwrite = tooverwrite; 959 tx->tx_space_tounref = tounref; 960 #endif 961 962 if (tx->tx_dir && asize != 0) { 963 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 964 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); 965 if (err) 966 return (err); 967 } 968 969 return (0); 970 } 971 972 static void 973 dmu_tx_unassign(dmu_tx_t *tx) 974 { 975 dmu_tx_hold_t *txh; 976 977 if (tx->tx_txg == 0) 978 return; 979 980 txg_rele_to_quiesce(&tx->tx_txgh); 981 982 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; 983 txh = list_next(&tx->tx_holds, txh)) { 984 dnode_t *dn = txh->txh_dnode; 985 986 if (dn == NULL) 987 continue; 988 mutex_enter(&dn->dn_mtx); 989 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 990 991 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 992 dn->dn_assigned_txg = 0; 993 cv_broadcast(&dn->dn_notxholds); 994 } 995 mutex_exit(&dn->dn_mtx); 996 } 997 998 txg_rele_to_sync(&tx->tx_txgh); 999 1000 tx->tx_lasttried_txg = tx->tx_txg; 1001 tx->tx_txg = 0; 1002 } 1003 1004 /* 1005 * Assign tx to a transaction group. txg_how can be one of: 1006 * 1007 * (1) TXG_WAIT. If the current open txg is full, waits until there's 1008 * a new one. This should be used when you're not holding locks. 1009 * If will only fail if we're truly out of space (or over quota). 1010 * 1011 * (2) TXG_NOWAIT. If we can't assign into the current open txg without 1012 * blocking, returns immediately with ERESTART. This should be used 1013 * whenever you're holding locks. On an ERESTART error, the caller 1014 * should drop locks, do a dmu_tx_wait(tx), and try again. 1015 * 1016 * (3) A specific txg. Use this if you need to ensure that multiple 1017 * transactions all sync in the same txg. Like TXG_NOWAIT, it 1018 * returns ERESTART if it can't assign you into the requested txg. 1019 */ 1020 int 1021 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how) 1022 { 1023 int err; 1024 1025 ASSERT(tx->tx_txg == 0); 1026 ASSERT(txg_how != 0); 1027 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1028 1029 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1030 dmu_tx_unassign(tx); 1031 1032 if (err != ERESTART || txg_how != TXG_WAIT) 1033 return (err); 1034 1035 dmu_tx_wait(tx); 1036 } 1037 1038 txg_rele_to_quiesce(&tx->tx_txgh); 1039 1040 return (0); 1041 } 1042 1043 void 1044 dmu_tx_wait(dmu_tx_t *tx) 1045 { 1046 spa_t *spa = tx->tx_pool->dp_spa; 1047 1048 ASSERT(tx->tx_txg == 0); 1049 1050 /* 1051 * It's possible that the pool has become active after this thread 1052 * has tried to obtain a tx. If that's the case then his 1053 * tx_lasttried_txg would not have been assigned. 1054 */ 1055 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1056 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1); 1057 } else if (tx->tx_needassign_txh) { 1058 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1059 1060 mutex_enter(&dn->dn_mtx); 1061 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1062 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1063 mutex_exit(&dn->dn_mtx); 1064 tx->tx_needassign_txh = NULL; 1065 } else { 1066 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 1067 } 1068 } 1069 1070 void 1071 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) 1072 { 1073 #ifdef ZFS_DEBUG 1074 if (tx->tx_dir == NULL || delta == 0) 1075 return; 1076 1077 if (delta > 0) { 1078 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, 1079 tx->tx_space_towrite); 1080 (void) refcount_add_many(&tx->tx_space_written, delta, NULL); 1081 } else { 1082 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); 1083 } 1084 #endif 1085 } 1086 1087 void 1088 dmu_tx_commit(dmu_tx_t *tx) 1089 { 1090 dmu_tx_hold_t *txh; 1091 1092 ASSERT(tx->tx_txg != 0); 1093 1094 while (txh = list_head(&tx->tx_holds)) { 1095 dnode_t *dn = txh->txh_dnode; 1096 1097 list_remove(&tx->tx_holds, txh); 1098 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1099 if (dn == NULL) 1100 continue; 1101 mutex_enter(&dn->dn_mtx); 1102 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1103 1104 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1105 dn->dn_assigned_txg = 0; 1106 cv_broadcast(&dn->dn_notxholds); 1107 } 1108 mutex_exit(&dn->dn_mtx); 1109 dnode_rele(dn, tx); 1110 } 1111 1112 if (tx->tx_tempreserve_cookie) 1113 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1114 1115 if (tx->tx_anyobj == FALSE) 1116 txg_rele_to_sync(&tx->tx_txgh); 1117 list_destroy(&tx->tx_holds); 1118 #ifdef ZFS_DEBUG 1119 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", 1120 tx->tx_space_towrite, refcount_count(&tx->tx_space_written), 1121 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); 1122 refcount_destroy_many(&tx->tx_space_written, 1123 refcount_count(&tx->tx_space_written)); 1124 refcount_destroy_many(&tx->tx_space_freed, 1125 refcount_count(&tx->tx_space_freed)); 1126 #endif 1127 kmem_free(tx, sizeof (dmu_tx_t)); 1128 } 1129 1130 void 1131 dmu_tx_abort(dmu_tx_t *tx) 1132 { 1133 dmu_tx_hold_t *txh; 1134 1135 ASSERT(tx->tx_txg == 0); 1136 1137 while (txh = list_head(&tx->tx_holds)) { 1138 dnode_t *dn = txh->txh_dnode; 1139 1140 list_remove(&tx->tx_holds, txh); 1141 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1142 if (dn != NULL) 1143 dnode_rele(dn, tx); 1144 } 1145 list_destroy(&tx->tx_holds); 1146 #ifdef ZFS_DEBUG 1147 refcount_destroy_many(&tx->tx_space_written, 1148 refcount_count(&tx->tx_space_written)); 1149 refcount_destroy_many(&tx->tx_space_freed, 1150 refcount_count(&tx->tx_space_freed)); 1151 #endif 1152 kmem_free(tx, sizeof (dmu_tx_t)); 1153 } 1154 1155 uint64_t 1156 dmu_tx_get_txg(dmu_tx_t *tx) 1157 { 1158 ASSERT(tx->tx_txg != 0); 1159 return (tx->tx_txg); 1160 } 1161