1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa.h> 30 #include <sys/dmu.h> 31 #include <sys/zap.h> 32 #include <sys/arc.h> 33 #include <sys/stat.h> 34 #include <sys/resource.h> 35 #include <sys/zil.h> 36 #include <sys/zil_impl.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/vdev.h> 39 40 /* 41 * The zfs intent log (ZIL) saves transaction records of system calls 42 * that change the file system in memory with enough information 43 * to be able to replay them. These are stored in memory until 44 * either the DMU transaction group (txg) commits them to the stable pool 45 * and they can be discarded, or they are flushed to the stable log 46 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 47 * requirement. In the event of a panic or power fail then those log 48 * records (transactions) are replayed. 49 * 50 * There is one ZIL per file system. Its on-disk (pool) format consists 51 * of 3 parts: 52 * 53 * - ZIL header 54 * - ZIL blocks 55 * - ZIL records 56 * 57 * A log record holds a system call transaction. Log blocks can 58 * hold many log records and the blocks are chained together. 59 * Each ZIL block contains a block pointer (blkptr_t) to the next 60 * ZIL block in the chain. The ZIL header points to the first 61 * block in the chain. Note there is not a fixed place in the pool 62 * to hold blocks. They are dynamically allocated and freed as 63 * needed from the blocks available. Figure X shows the ZIL structure: 64 */ 65 66 /* 67 * These global ZIL switches affect all pools 68 */ 69 int zil_disable = 0; /* disable intent logging */ 70 int zil_always = 0; /* make every transaction synchronous */ 71 int zil_purge = 0; /* at pool open, just throw everything away */ 72 int zil_noflush = 0; /* don't flush write cache buffers on disks */ 73 74 static kmem_cache_t *zil_lwb_cache; 75 76 static int 77 zil_dva_compare(const void *x1, const void *x2) 78 { 79 const dva_t *dva1 = x1; 80 const dva_t *dva2 = x2; 81 82 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 83 return (-1); 84 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 85 return (1); 86 87 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 88 return (-1); 89 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 90 return (1); 91 92 return (0); 93 } 94 95 static void 96 zil_dva_tree_init(avl_tree_t *t) 97 { 98 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 99 offsetof(zil_dva_node_t, zn_node)); 100 } 101 102 static void 103 zil_dva_tree_fini(avl_tree_t *t) 104 { 105 zil_dva_node_t *zn; 106 void *cookie = NULL; 107 108 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 109 kmem_free(zn, sizeof (zil_dva_node_t)); 110 111 avl_destroy(t); 112 } 113 114 static int 115 zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 116 { 117 zil_dva_node_t *zn; 118 avl_index_t where; 119 120 if (avl_find(t, dva, &where) != NULL) 121 return (EEXIST); 122 123 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 124 zn->zn_dva = *dva; 125 avl_insert(t, zn, where); 126 127 return (0); 128 } 129 130 static zil_header_t * 131 zil_header_in_syncing_context(zilog_t *zilog) 132 { 133 return ((zil_header_t *)zilog->zl_header); 134 } 135 136 static void 137 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 138 { 139 zio_cksum_t *zc = &bp->blk_cksum; 140 141 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 142 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 143 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 144 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 145 } 146 147 /* 148 * Read a log block, make sure it's valid, and byteswap it if necessary. 149 */ 150 static int 151 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 152 { 153 blkptr_t blk = *bp; 154 zbookmark_t zb; 155 int error; 156 157 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 158 zb.zb_object = 0; 159 zb.zb_level = -1; 160 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 161 162 *abufpp = NULL; 163 164 error = arc_read(NULL, zilog->zl_spa, &blk, byteswap_uint64_array, 165 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 166 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, ARC_WAIT, &zb); 167 168 if (error == 0) { 169 char *data = (*abufpp)->b_data; 170 uint64_t blksz = BP_GET_LSIZE(bp); 171 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 172 zio_cksum_t cksum = bp->blk_cksum; 173 174 /* 175 * Sequence numbers should be... sequential. The checksum 176 * verifier for the next block should be bp's checksum plus 1. 177 */ 178 cksum.zc_word[ZIL_ZC_SEQ]++; 179 180 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum))) 181 error = ESTALE; 182 else if (BP_IS_HOLE(&ztp->zit_next_blk)) 183 error = ENOENT; 184 else if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t))) 185 error = EOVERFLOW; 186 187 if (error) { 188 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 189 *abufpp = NULL; 190 } 191 } 192 193 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 194 195 return (error); 196 } 197 198 /* 199 * Parse the intent log, and call parse_func for each valid record within. 200 * Return the highest sequence number. 201 */ 202 uint64_t 203 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 204 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 205 { 206 const zil_header_t *zh = zilog->zl_header; 207 uint64_t claim_seq = zh->zh_claim_seq; 208 uint64_t seq = 0; 209 uint64_t max_seq = 0; 210 blkptr_t blk = zh->zh_log; 211 arc_buf_t *abuf; 212 char *lrbuf, *lrp; 213 zil_trailer_t *ztp; 214 int reclen, error; 215 216 if (BP_IS_HOLE(&blk)) 217 return (max_seq); 218 219 /* 220 * Starting at the block pointed to by zh_log we read the log chain. 221 * For each block in the chain we strongly check that block to 222 * ensure its validity. We stop when an invalid block is found. 223 * For each block pointer in the chain we call parse_blk_func(). 224 * For each record in each valid block we call parse_lr_func(). 225 * If the log has been claimed, stop if we encounter a sequence 226 * number greater than the highest claimed sequence number. 227 */ 228 zil_dva_tree_init(&zilog->zl_dva_tree); 229 for (;;) { 230 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 231 232 if (claim_seq != 0 && seq > claim_seq) 233 break; 234 235 ASSERT(max_seq < seq); 236 max_seq = seq; 237 238 error = zil_read_log_block(zilog, &blk, &abuf); 239 240 if (parse_blk_func != NULL) 241 parse_blk_func(zilog, &blk, arg, txg); 242 243 if (error) 244 break; 245 246 lrbuf = abuf->b_data; 247 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 248 blk = ztp->zit_next_blk; 249 250 if (parse_lr_func == NULL) { 251 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 252 continue; 253 } 254 255 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 256 lr_t *lr = (lr_t *)lrp; 257 reclen = lr->lrc_reclen; 258 ASSERT3U(reclen, >=, sizeof (lr_t)); 259 parse_lr_func(zilog, lr, arg, txg); 260 } 261 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 262 } 263 zil_dva_tree_fini(&zilog->zl_dva_tree); 264 265 return (max_seq); 266 } 267 268 /* ARGSUSED */ 269 static void 270 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 271 { 272 spa_t *spa = zilog->zl_spa; 273 int err; 274 275 /* 276 * Claim log block if not already committed and not already claimed. 277 */ 278 if (bp->blk_birth >= first_txg && 279 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 280 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL)); 281 ASSERT(err == 0); 282 } 283 } 284 285 static void 286 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 287 { 288 if (lrc->lrc_txtype == TX_WRITE) { 289 lr_write_t *lr = (lr_write_t *)lrc; 290 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 291 } 292 } 293 294 /* ARGSUSED */ 295 static void 296 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 297 { 298 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 299 } 300 301 static void 302 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 303 { 304 /* 305 * If we previously claimed it, we need to free it. 306 */ 307 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 308 lr_write_t *lr = (lr_write_t *)lrc; 309 blkptr_t *bp = &lr->lr_blkptr; 310 if (bp->blk_birth >= claim_txg && 311 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 312 (void) arc_free(NULL, zilog->zl_spa, 313 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 314 } 315 } 316 } 317 318 /* 319 * Create an on-disk intent log. 320 */ 321 static void 322 zil_create(zilog_t *zilog) 323 { 324 const zil_header_t *zh = zilog->zl_header; 325 lwb_t *lwb; 326 uint64_t txg = 0; 327 dmu_tx_t *tx = NULL; 328 blkptr_t blk; 329 int error = 0; 330 331 /* 332 * Wait for any previous destroy to complete. 333 */ 334 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 335 336 ASSERT(zh->zh_claim_txg == 0); 337 ASSERT(zh->zh_replay_seq == 0); 338 339 blk = zh->zh_log; 340 341 /* 342 * If we don't already have an initial log block, allocate one now. 343 */ 344 if (BP_IS_HOLE(&blk)) { 345 tx = dmu_tx_create(zilog->zl_os); 346 (void) dmu_tx_assign(tx, TXG_WAIT); 347 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 348 txg = dmu_tx_get_txg(tx); 349 350 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, txg); 351 352 if (error == 0) 353 zil_init_log_chain(zilog, &blk); 354 } 355 356 /* 357 * Allocate a log write buffer (lwb) for the first log block. 358 */ 359 if (error == 0) { 360 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 361 lwb->lwb_zilog = zilog; 362 lwb->lwb_blk = blk; 363 lwb->lwb_nused = 0; 364 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 365 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 366 lwb->lwb_max_txg = txg; 367 lwb->lwb_seq = 0; 368 lwb->lwb_state = UNWRITTEN; 369 lwb->lwb_zio = NULL; 370 371 mutex_enter(&zilog->zl_lock); 372 list_insert_tail(&zilog->zl_lwb_list, lwb); 373 mutex_exit(&zilog->zl_lock); 374 } 375 376 /* 377 * If we just allocated the first log block, commit our transaction 378 * and wait for zil_sync() to stuff the block poiner into zh_log. 379 * (zh is part of the MOS, so we cannot modify it in open context.) 380 */ 381 if (tx != NULL) { 382 dmu_tx_commit(tx); 383 txg_wait_synced(zilog->zl_dmu_pool, txg); 384 } 385 386 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 387 } 388 389 /* 390 * In one tx, free all log blocks and clear the log header. 391 * If keep_first is set, then we're replaying a log with no content. 392 * We want to keep the first block, however, so that the first 393 * synchronous transaction doesn't require a txg_wait_synced() 394 * in zil_create(). We don't need to txg_wait_synced() here either 395 * when keep_first is set, because both zil_create() and zil_destroy() 396 * will wait for any in-progress destroys to complete. 397 */ 398 void 399 zil_destroy(zilog_t *zilog, boolean_t keep_first) 400 { 401 const zil_header_t *zh = zilog->zl_header; 402 lwb_t *lwb; 403 dmu_tx_t *tx; 404 uint64_t txg; 405 406 /* 407 * Wait for any previous destroy to complete. 408 */ 409 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 410 411 if (BP_IS_HOLE(&zh->zh_log)) 412 return; 413 414 tx = dmu_tx_create(zilog->zl_os); 415 (void) dmu_tx_assign(tx, TXG_WAIT); 416 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 417 txg = dmu_tx_get_txg(tx); 418 419 mutex_enter(&zilog->zl_lock); 420 421 ASSERT3U(zilog->zl_destroy_txg, <, txg); 422 zilog->zl_destroy_txg = txg; 423 zilog->zl_keep_first = keep_first; 424 425 if (!list_is_empty(&zilog->zl_lwb_list)) { 426 ASSERT(zh->zh_claim_txg == 0); 427 ASSERT(!keep_first); 428 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 429 list_remove(&zilog->zl_lwb_list, lwb); 430 if (lwb->lwb_buf != NULL) 431 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 432 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 433 kmem_cache_free(zil_lwb_cache, lwb); 434 } 435 mutex_exit(&zilog->zl_lock); 436 } else { 437 mutex_exit(&zilog->zl_lock); 438 if (!keep_first) { 439 (void) zil_parse(zilog, zil_free_log_block, 440 zil_free_log_record, tx, zh->zh_claim_txg); 441 } 442 } 443 444 dmu_tx_commit(tx); 445 446 if (keep_first) /* no need to wait in this case */ 447 return; 448 449 txg_wait_synced(zilog->zl_dmu_pool, txg); 450 ASSERT(BP_IS_HOLE(&zh->zh_log)); 451 } 452 453 int 454 zil_claim(char *osname, void *txarg) 455 { 456 dmu_tx_t *tx = txarg; 457 uint64_t first_txg = dmu_tx_get_txg(tx); 458 zilog_t *zilog; 459 zil_header_t *zh; 460 objset_t *os; 461 int error; 462 463 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os); 464 if (error) { 465 cmn_err(CE_WARN, "can't process intent log for %s", osname); 466 return (0); 467 } 468 469 zilog = dmu_objset_zil(os); 470 zh = zil_header_in_syncing_context(zilog); 471 472 /* 473 * Claim all log blocks if we haven't already done so, and remember 474 * the highest claimed sequence number. This ensures that if we can 475 * read only part of the log now (e.g. due to a missing device), 476 * but we can read the entire log later, we will not try to replay 477 * or destroy beyond the last block we successfully claimed. 478 */ 479 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 480 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 481 zh->zh_claim_txg = first_txg; 482 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 483 zil_claim_log_record, tx, first_txg); 484 dsl_dataset_dirty(dmu_objset_ds(os), tx); 485 } 486 487 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 488 dmu_objset_close(os); 489 return (0); 490 } 491 492 void 493 zil_add_vdev(zilog_t *zilog, uint64_t vdev, uint64_t seq) 494 { 495 zil_vdev_t *zv; 496 497 if (zil_noflush) 498 return; 499 500 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 501 zv = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP); 502 zv->vdev = vdev; 503 zv->seq = seq; 504 list_insert_tail(&zilog->zl_vdev_list, zv); 505 } 506 507 void 508 zil_flush_vdevs(zilog_t *zilog, uint64_t seq) 509 { 510 vdev_t *vd; 511 zil_vdev_t *zv, *zv2; 512 zio_t *zio; 513 spa_t *spa; 514 uint64_t vdev; 515 516 if (zil_noflush) 517 return; 518 519 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 520 521 spa = zilog->zl_spa; 522 zio = NULL; 523 524 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL && 525 zv->seq <= seq) { 526 vdev = zv->vdev; 527 list_remove(&zilog->zl_vdev_list, zv); 528 kmem_free(zv, sizeof (zil_vdev_t)); 529 530 /* 531 * remove all chained entries <= seq with same vdev 532 */ 533 zv = list_head(&zilog->zl_vdev_list); 534 while (zv && zv->seq <= seq) { 535 zv2 = list_next(&zilog->zl_vdev_list, zv); 536 if (zv->vdev == vdev) { 537 list_remove(&zilog->zl_vdev_list, zv); 538 kmem_free(zv, sizeof (zil_vdev_t)); 539 } 540 zv = zv2; 541 } 542 543 /* flush the write cache for this vdev */ 544 mutex_exit(&zilog->zl_lock); 545 if (zio == NULL) 546 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 547 vd = vdev_lookup_top(spa, vdev); 548 ASSERT(vd); 549 (void) zio_nowait(zio_ioctl(zio, spa, vd, DKIOCFLUSHWRITECACHE, 550 NULL, NULL, ZIO_PRIORITY_NOW, 551 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY)); 552 mutex_enter(&zilog->zl_lock); 553 } 554 555 /* 556 * Wait for all the flushes to complete. Not all devices actually 557 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 558 */ 559 if (zio != NULL) { 560 mutex_exit(&zilog->zl_lock); 561 (void) zio_wait(zio); 562 mutex_enter(&zilog->zl_lock); 563 } 564 } 565 566 /* 567 * Function called when a log block write completes 568 */ 569 static void 570 zil_lwb_write_done(zio_t *zio) 571 { 572 lwb_t *prev; 573 lwb_t *lwb = zio->io_private; 574 zilog_t *zilog = lwb->lwb_zilog; 575 uint64_t max_seq; 576 577 /* 578 * Now that we've written this log block, we have a stable pointer 579 * to the next block in the chain, so it's OK to let the txg in 580 * which we allocated the next block sync. 581 */ 582 txg_rele_to_sync(&lwb->lwb_txgh); 583 584 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 585 mutex_enter(&zilog->zl_lock); 586 lwb->lwb_buf = NULL; 587 if (zio->io_error) { 588 zilog->zl_log_error = B_TRUE; 589 mutex_exit(&zilog->zl_lock); 590 cv_broadcast(&zilog->zl_cv_seq); 591 return; 592 } 593 594 prev = list_prev(&zilog->zl_lwb_list, lwb); 595 if (prev && prev->lwb_state != SEQ_COMPLETE) { 596 /* There's an unwritten buffer in the chain before this one */ 597 lwb->lwb_state = SEQ_INCOMPLETE; 598 mutex_exit(&zilog->zl_lock); 599 return; 600 } 601 602 max_seq = lwb->lwb_seq; 603 lwb->lwb_state = SEQ_COMPLETE; 604 /* 605 * We must also follow up the chain for already written buffers 606 * to see if we can set zl_ss_seq even higher. 607 */ 608 while (lwb = list_next(&zilog->zl_lwb_list, lwb)) { 609 if (lwb->lwb_state != SEQ_INCOMPLETE) 610 break; 611 lwb->lwb_state = SEQ_COMPLETE; 612 /* lwb_seq will be zero if we've written an empty buffer */ 613 if (lwb->lwb_seq) { 614 ASSERT3U(max_seq, <, lwb->lwb_seq); 615 max_seq = lwb->lwb_seq; 616 } 617 } 618 zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 619 mutex_exit(&zilog->zl_lock); 620 cv_broadcast(&zilog->zl_cv_seq); 621 } 622 623 /* 624 * Initialize the io for a log block. 625 * 626 * Note, we should not initialize the IO until we are about 627 * to use it, since zio_rewrite() does a spa_config_enter(). 628 */ 629 static void 630 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 631 { 632 zbookmark_t zb; 633 634 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 635 zb.zb_object = 0; 636 zb.zb_level = -1; 637 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 638 639 ASSERT(lwb->lwb_zio == NULL); 640 lwb->lwb_zio = zio_rewrite(NULL, zilog->zl_spa, 641 ZIO_CHECKSUM_ZILOG, 0, &lwb->lwb_blk, lwb->lwb_buf, 642 lwb->lwb_sz, zil_lwb_write_done, lwb, 643 ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 644 } 645 646 /* 647 * Start a log block write and advance to the next log block. 648 * Calls are serialized. 649 */ 650 static lwb_t * 651 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 652 { 653 lwb_t *nlwb; 654 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 655 spa_t *spa = zilog->zl_spa; 656 blkptr_t *bp = &ztp->zit_next_blk; 657 uint64_t txg; 658 uint64_t zil_blksz; 659 int error; 660 661 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 662 663 /* 664 * Allocate the next block and save its address in this block 665 * before writing it in order to establish the log chain. 666 * Note that if the allocation of nlwb synced before we wrote 667 * the block that points at it (lwb), we'd leak it if we crashed. 668 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 669 */ 670 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 671 txg_rele_to_quiesce(&lwb->lwb_txgh); 672 673 /* 674 * Pick a ZIL blocksize. We request a size that is the 675 * maximum of the previous used size, the current used size and 676 * the amount waiting in the queue. 677 */ 678 zil_blksz = MAX(zilog->zl_prev_used, 679 zilog->zl_cur_used + sizeof (*ztp)); 680 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 681 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); 682 if (zil_blksz > ZIL_MAX_BLKSZ) 683 zil_blksz = ZIL_MAX_BLKSZ; 684 685 error = zio_alloc_blk(spa, zil_blksz, bp, txg); 686 if (error) { 687 /* 688 * Reinitialise the lwb. 689 * By returning NULL the caller will call tx_wait_synced() 690 */ 691 mutex_enter(&zilog->zl_lock); 692 ASSERT(lwb->lwb_state == UNWRITTEN); 693 lwb->lwb_nused = 0; 694 lwb->lwb_seq = 0; 695 mutex_exit(&zilog->zl_lock); 696 txg_rele_to_sync(&lwb->lwb_txgh); 697 return (NULL); 698 } 699 700 ASSERT3U(bp->blk_birth, ==, txg); 701 ztp->zit_pad = 0; 702 ztp->zit_nused = lwb->lwb_nused; 703 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 704 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 705 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 706 707 /* 708 * Allocate a new log write buffer (lwb). 709 */ 710 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 711 712 nlwb->lwb_zilog = zilog; 713 nlwb->lwb_blk = *bp; 714 nlwb->lwb_nused = 0; 715 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 716 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 717 nlwb->lwb_max_txg = txg; 718 nlwb->lwb_seq = 0; 719 nlwb->lwb_state = UNWRITTEN; 720 nlwb->lwb_zio = NULL; 721 722 /* 723 * Put new lwb at the end of the log chain, 724 * and record the vdev for later flushing 725 */ 726 mutex_enter(&zilog->zl_lock); 727 list_insert_tail(&zilog->zl_lwb_list, nlwb); 728 zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk))), 729 lwb->lwb_seq); 730 mutex_exit(&zilog->zl_lock); 731 732 /* 733 * kick off the write for the old log block 734 */ 735 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 736 if (lwb->lwb_zio == NULL) { 737 /* 738 * This can only happen if there are no log records in this 739 * block (i.e. the first record to go in was too big to fit). 740 * XXX - would be nice if we could avoid this IO 741 */ 742 ASSERT(lwb->lwb_nused == 0); 743 zil_lwb_write_init(zilog, lwb); 744 } 745 zio_nowait(lwb->lwb_zio); 746 747 return (nlwb); 748 } 749 750 static lwb_t * 751 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 752 { 753 lr_t *lrc = &itx->itx_lr; /* common log record */ 754 lr_write_t *lr = (lr_write_t *)lrc; 755 uint64_t seq = lrc->lrc_seq; 756 uint64_t txg = lrc->lrc_txg; 757 uint64_t reclen = lrc->lrc_reclen; 758 uint64_t dlen; 759 760 if (lwb == NULL) 761 return (NULL); 762 ASSERT(lwb->lwb_buf != NULL); 763 764 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 765 dlen = P2ROUNDUP_TYPED( 766 lr->lr_length, sizeof (uint64_t), uint64_t); 767 else 768 dlen = 0; 769 770 zilog->zl_cur_used += (reclen + dlen); 771 772 /* 773 * If this record won't fit in the current log block, start a new one. 774 */ 775 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 776 lwb = zil_lwb_write_start(zilog, lwb); 777 if (lwb == NULL) 778 return (NULL); 779 ASSERT(lwb->lwb_nused == 0); 780 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 781 txg_wait_synced(zilog->zl_dmu_pool, txg); 782 mutex_enter(&zilog->zl_lock); 783 zilog->zl_ss_seq = MAX(seq, zilog->zl_ss_seq); 784 mutex_exit(&zilog->zl_lock); 785 return (lwb); 786 } 787 } 788 789 if (lwb->lwb_zio == NULL) 790 zil_lwb_write_init(zilog, lwb); 791 792 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 793 794 /* 795 * If it's a write, fetch the data or get its blkptr as appropriate. 796 */ 797 if (lrc->lrc_txtype == TX_WRITE) { 798 if (txg > spa_freeze_txg(zilog->zl_spa)) 799 txg_wait_synced(zilog->zl_dmu_pool, txg); 800 if (itx->itx_wr_state != WR_COPIED) { 801 char *dbuf; 802 int error; 803 804 /* alignment is guaranteed */ 805 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused); 806 if (dlen) { 807 ASSERT(itx->itx_wr_state == WR_NEED_COPY); 808 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen; 809 lr->lr_common.lrc_reclen += dlen; 810 } else { 811 ASSERT(itx->itx_wr_state == WR_INDIRECT); 812 dbuf = NULL; 813 } 814 error = zilog->zl_get_data( 815 itx->itx_private, lr, dbuf, lwb->lwb_zio); 816 if (error) { 817 ASSERT(error == ENOENT || error == EEXIST || 818 error == EALREADY); 819 return (lwb); 820 } 821 } 822 } 823 824 mutex_enter(&zilog->zl_lock); 825 ASSERT(seq > zilog->zl_wait_seq); 826 zilog->zl_wait_seq = seq; 827 mutex_exit(&zilog->zl_lock); 828 lwb->lwb_nused += reclen + dlen; 829 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 830 ASSERT3U(lwb->lwb_seq, <, seq); 831 lwb->lwb_seq = seq; 832 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 833 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 834 835 return (lwb); 836 } 837 838 itx_t * 839 zil_itx_create(int txtype, size_t lrsize) 840 { 841 itx_t *itx; 842 843 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 844 845 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 846 itx->itx_lr.lrc_txtype = txtype; 847 itx->itx_lr.lrc_reclen = lrsize; 848 itx->itx_lr.lrc_seq = 0; /* defensive */ 849 850 return (itx); 851 } 852 853 uint64_t 854 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 855 { 856 uint64_t seq; 857 858 ASSERT(itx->itx_lr.lrc_seq == 0); 859 860 mutex_enter(&zilog->zl_lock); 861 list_insert_tail(&zilog->zl_itx_list, itx); 862 zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen; 863 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 864 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 865 mutex_exit(&zilog->zl_lock); 866 867 return (seq); 868 } 869 870 /* 871 * Free up all in-memory intent log transactions that have now been synced. 872 */ 873 static void 874 zil_itx_clean(zilog_t *zilog) 875 { 876 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 877 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 878 uint64_t max_seq = 0; 879 itx_t *itx; 880 881 mutex_enter(&zilog->zl_lock); 882 while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 883 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 884 list_remove(&zilog->zl_itx_list, itx); 885 zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen; 886 ASSERT3U(max_seq, <, itx->itx_lr.lrc_seq); 887 max_seq = itx->itx_lr.lrc_seq; 888 kmem_free(itx, offsetof(itx_t, itx_lr) 889 + itx->itx_lr.lrc_reclen); 890 } 891 if (max_seq > zilog->zl_ss_seq) { 892 zilog->zl_ss_seq = max_seq; 893 cv_broadcast(&zilog->zl_cv_seq); 894 } 895 mutex_exit(&zilog->zl_lock); 896 } 897 898 void 899 zil_clean(zilog_t *zilog) 900 { 901 /* 902 * Check for any log blocks that can be freed. 903 * Log blocks are only freed when the log block allocation and 904 * log records contained within are both known to be committed. 905 */ 906 mutex_enter(&zilog->zl_lock); 907 if (list_head(&zilog->zl_itx_list) != NULL) 908 (void) taskq_dispatch(zilog->zl_clean_taskq, 909 (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP); 910 mutex_exit(&zilog->zl_lock); 911 } 912 913 /* 914 * Push zfs transactions to stable storage up to the supplied sequence number. 915 */ 916 void 917 zil_commit(zilog_t *zilog, uint64_t seq, int ioflag) 918 { 919 uint64_t txg; 920 uint64_t max_seq; 921 uint64_t reclen; 922 itx_t *itx; 923 lwb_t *lwb; 924 spa_t *spa; 925 926 if (zilog == NULL || seq == 0 || 927 ((ioflag & (FSYNC | FDSYNC | FRSYNC)) == 0 && !zil_always)) 928 return; 929 930 spa = zilog->zl_spa; 931 mutex_enter(&zilog->zl_lock); 932 933 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 934 935 for (;;) { 936 if (zilog->zl_ss_seq >= seq) { /* already on stable storage */ 937 mutex_exit(&zilog->zl_lock); 938 return; 939 } 940 941 if (zilog->zl_writer == B_FALSE) /* no one writing, do it */ 942 break; 943 944 cv_wait(&zilog->zl_cv_write, &zilog->zl_lock); 945 } 946 947 zilog->zl_writer = B_TRUE; 948 max_seq = 0; 949 950 if (zilog->zl_suspend) { 951 lwb = NULL; 952 } else { 953 lwb = list_tail(&zilog->zl_lwb_list); 954 if (lwb == NULL) { 955 mutex_exit(&zilog->zl_lock); 956 zil_create(zilog); 957 mutex_enter(&zilog->zl_lock); 958 lwb = list_tail(&zilog->zl_lwb_list); 959 } 960 } 961 962 /* 963 * Loop through in-memory log transactions filling log blocks, 964 * until we reach the given sequence number and there's no more 965 * room in the write buffer. 966 */ 967 for (;;) { 968 itx = list_head(&zilog->zl_itx_list); 969 if (itx == NULL) 970 break; 971 972 reclen = itx->itx_lr.lrc_reclen; 973 if ((itx->itx_lr.lrc_seq > seq) && 974 ((lwb == NULL) || (lwb->lwb_nused + reclen > 975 ZIL_BLK_DATA_SZ(lwb)))) 976 break; 977 978 list_remove(&zilog->zl_itx_list, itx); 979 txg = itx->itx_lr.lrc_txg; 980 ASSERT(txg); 981 982 mutex_exit(&zilog->zl_lock); 983 if (txg > spa_last_synced_txg(spa) || 984 txg > spa_freeze_txg(spa)) 985 lwb = zil_lwb_commit(zilog, itx, lwb); 986 else 987 max_seq = itx->itx_lr.lrc_seq; 988 kmem_free(itx, offsetof(itx_t, itx_lr) 989 + itx->itx_lr.lrc_reclen); 990 mutex_enter(&zilog->zl_lock); 991 zilog->zl_itx_list_sz -= reclen; 992 } 993 994 mutex_exit(&zilog->zl_lock); 995 996 /* write the last block out */ 997 if (lwb != NULL && lwb->lwb_nused != 0) 998 lwb = zil_lwb_write_start(zilog, lwb); 999 1000 zilog->zl_prev_used = zilog->zl_cur_used; 1001 zilog->zl_cur_used = 0; 1002 1003 mutex_enter(&zilog->zl_lock); 1004 if (max_seq > zilog->zl_ss_seq) { 1005 zilog->zl_ss_seq = max_seq; 1006 cv_broadcast(&zilog->zl_cv_seq); 1007 } 1008 /* 1009 * Wait if necessary for our seq to be committed. 1010 */ 1011 if (lwb && zilog->zl_wait_seq) { 1012 while (zilog->zl_ss_seq < zilog->zl_wait_seq && 1013 zilog->zl_log_error == 0) 1014 cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 1015 zil_flush_vdevs(zilog, seq); 1016 } 1017 1018 if (zilog->zl_log_error || lwb == NULL) { 1019 zilog->zl_log_error = 0; 1020 max_seq = zilog->zl_itx_seq; 1021 mutex_exit(&zilog->zl_lock); 1022 txg_wait_synced(zilog->zl_dmu_pool, 0); 1023 mutex_enter(&zilog->zl_lock); 1024 zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 1025 cv_broadcast(&zilog->zl_cv_seq); 1026 } 1027 /* wake up others waiting to start a write */ 1028 zilog->zl_wait_seq = 0; 1029 zilog->zl_writer = B_FALSE; 1030 mutex_exit(&zilog->zl_lock); 1031 cv_broadcast(&zilog->zl_cv_write); 1032 } 1033 1034 /* 1035 * Called in syncing context to free committed log blocks and update log header. 1036 */ 1037 void 1038 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1039 { 1040 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1041 uint64_t txg = dmu_tx_get_txg(tx); 1042 spa_t *spa = zilog->zl_spa; 1043 lwb_t *lwb; 1044 1045 mutex_enter(&zilog->zl_lock); 1046 1047 ASSERT(zilog->zl_stop_sync == 0); 1048 1049 zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 1050 1051 if (zilog->zl_destroy_txg == txg) { 1052 blkptr_t blk = zh->zh_log; 1053 1054 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1055 ASSERT(spa_sync_pass(spa) == 1); 1056 1057 bzero(zh, sizeof (zil_header_t)); 1058 bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 1059 1060 if (zilog->zl_keep_first) { 1061 /* 1062 * If this block was part of log chain that couldn't 1063 * be claimed because a device was missing during 1064 * zil_claim(), but that device later returns, 1065 * then this block could erroneously appear valid. 1066 * To guard against this, assign a new GUID to the new 1067 * log chain so it doesn't matter what blk points to. 1068 */ 1069 zil_init_log_chain(zilog, &blk); 1070 zh->zh_log = blk; 1071 } 1072 } 1073 1074 for (;;) { 1075 lwb = list_head(&zilog->zl_lwb_list); 1076 if (lwb == NULL) { 1077 mutex_exit(&zilog->zl_lock); 1078 return; 1079 } 1080 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1081 break; 1082 list_remove(&zilog->zl_lwb_list, lwb); 1083 zio_free_blk(spa, &lwb->lwb_blk, txg); 1084 kmem_cache_free(zil_lwb_cache, lwb); 1085 } 1086 zh->zh_log = lwb->lwb_blk; 1087 mutex_exit(&zilog->zl_lock); 1088 } 1089 1090 void 1091 zil_init(void) 1092 { 1093 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1094 sizeof (struct lwb), NULL, NULL, NULL, NULL, NULL, NULL, 0); 1095 } 1096 1097 void 1098 zil_fini(void) 1099 { 1100 kmem_cache_destroy(zil_lwb_cache); 1101 } 1102 1103 zilog_t * 1104 zil_alloc(objset_t *os, zil_header_t *zh_phys) 1105 { 1106 zilog_t *zilog; 1107 1108 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1109 1110 zilog->zl_header = zh_phys; 1111 zilog->zl_os = os; 1112 zilog->zl_spa = dmu_objset_spa(os); 1113 zilog->zl_dmu_pool = dmu_objset_pool(os); 1114 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1115 1116 list_create(&zilog->zl_itx_list, sizeof (itx_t), 1117 offsetof(itx_t, itx_node)); 1118 1119 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1120 offsetof(lwb_t, lwb_node)); 1121 1122 list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t), 1123 offsetof(zil_vdev_t, vdev_seq_node)); 1124 1125 return (zilog); 1126 } 1127 1128 void 1129 zil_free(zilog_t *zilog) 1130 { 1131 lwb_t *lwb; 1132 zil_vdev_t *zv; 1133 1134 zilog->zl_stop_sync = 1; 1135 1136 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1137 list_remove(&zilog->zl_lwb_list, lwb); 1138 if (lwb->lwb_buf != NULL) 1139 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1140 kmem_cache_free(zil_lwb_cache, lwb); 1141 } 1142 list_destroy(&zilog->zl_lwb_list); 1143 1144 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 1145 list_remove(&zilog->zl_vdev_list, zv); 1146 kmem_free(zv, sizeof (zil_vdev_t)); 1147 } 1148 list_destroy(&zilog->zl_vdev_list); 1149 1150 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1151 list_destroy(&zilog->zl_itx_list); 1152 1153 kmem_free(zilog, sizeof (zilog_t)); 1154 } 1155 1156 /* 1157 * return true if the initial log block is not valid 1158 */ 1159 static int 1160 zil_empty(zilog_t *zilog) 1161 { 1162 const zil_header_t *zh = zilog->zl_header; 1163 arc_buf_t *abuf = NULL; 1164 1165 if (BP_IS_HOLE(&zh->zh_log)) 1166 return (1); 1167 1168 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 1169 return (1); 1170 1171 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 1172 return (0); 1173 } 1174 1175 /* 1176 * Open an intent log. 1177 */ 1178 zilog_t * 1179 zil_open(objset_t *os, zil_get_data_t *get_data) 1180 { 1181 zilog_t *zilog = dmu_objset_zil(os); 1182 1183 zilog->zl_get_data = get_data; 1184 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1185 2, 2, TASKQ_PREPOPULATE); 1186 1187 return (zilog); 1188 } 1189 1190 /* 1191 * Close an intent log. 1192 */ 1193 void 1194 zil_close(zilog_t *zilog) 1195 { 1196 /* 1197 * If the log isn't already committed, mark the objset dirty 1198 * (so zil_sync() will be called) and wait for that txg to sync. 1199 */ 1200 if (!zil_is_committed(zilog)) { 1201 uint64_t txg; 1202 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1203 (void) dmu_tx_assign(tx, TXG_WAIT); 1204 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1205 txg = dmu_tx_get_txg(tx); 1206 dmu_tx_commit(tx); 1207 txg_wait_synced(zilog->zl_dmu_pool, txg); 1208 } 1209 1210 taskq_destroy(zilog->zl_clean_taskq); 1211 zilog->zl_clean_taskq = NULL; 1212 zilog->zl_get_data = NULL; 1213 1214 zil_itx_clean(zilog); 1215 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1216 } 1217 1218 /* 1219 * Suspend an intent log. While in suspended mode, we still honor 1220 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1221 * We suspend the log briefly when taking a snapshot so that the snapshot 1222 * contains all the data it's supposed to, and has an empty intent log. 1223 */ 1224 int 1225 zil_suspend(zilog_t *zilog) 1226 { 1227 const zil_header_t *zh = zilog->zl_header; 1228 lwb_t *lwb; 1229 1230 mutex_enter(&zilog->zl_lock); 1231 if (zh->zh_claim_txg != 0) { /* unplayed log */ 1232 mutex_exit(&zilog->zl_lock); 1233 return (EBUSY); 1234 } 1235 if (zilog->zl_suspend++ != 0) { 1236 /* 1237 * Someone else already began a suspend. 1238 * Just wait for them to finish. 1239 */ 1240 while (zilog->zl_suspending) 1241 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1242 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1243 mutex_exit(&zilog->zl_lock); 1244 return (0); 1245 } 1246 zilog->zl_suspending = B_TRUE; 1247 mutex_exit(&zilog->zl_lock); 1248 1249 zil_commit(zilog, UINT64_MAX, FSYNC); 1250 1251 mutex_enter(&zilog->zl_lock); 1252 for (;;) { 1253 /* 1254 * Wait for any in-flight log writes to complete. 1255 */ 1256 for (lwb = list_head(&zilog->zl_lwb_list); lwb != NULL; 1257 lwb = list_next(&zilog->zl_lwb_list, lwb)) 1258 if (lwb->lwb_seq != 0 && lwb->lwb_state != SEQ_COMPLETE) 1259 break; 1260 1261 if (lwb == NULL) 1262 break; 1263 1264 cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 1265 } 1266 1267 mutex_exit(&zilog->zl_lock); 1268 1269 zil_destroy(zilog, B_FALSE); 1270 1271 mutex_enter(&zilog->zl_lock); 1272 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1273 zilog->zl_suspending = B_FALSE; 1274 cv_broadcast(&zilog->zl_cv_suspend); 1275 mutex_exit(&zilog->zl_lock); 1276 1277 return (0); 1278 } 1279 1280 void 1281 zil_resume(zilog_t *zilog) 1282 { 1283 mutex_enter(&zilog->zl_lock); 1284 ASSERT(zilog->zl_suspend != 0); 1285 zilog->zl_suspend--; 1286 mutex_exit(&zilog->zl_lock); 1287 } 1288 1289 typedef struct zil_replay_arg { 1290 objset_t *zr_os; 1291 zil_replay_func_t **zr_replay; 1292 void *zr_arg; 1293 void (*zr_rm_sync)(void *arg); 1294 uint64_t *zr_txgp; 1295 boolean_t zr_byteswap; 1296 char *zr_lrbuf; 1297 } zil_replay_arg_t; 1298 1299 static void 1300 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1301 { 1302 zil_replay_arg_t *zr = zra; 1303 const zil_header_t *zh = zilog->zl_header; 1304 uint64_t reclen = lr->lrc_reclen; 1305 uint64_t txtype = lr->lrc_txtype; 1306 int pass, error; 1307 1308 if (zilog->zl_stop_replay) 1309 return; 1310 1311 if (lr->lrc_txg < claim_txg) /* already committed */ 1312 return; 1313 1314 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1315 return; 1316 1317 /* 1318 * Make a copy of the data so we can revise and extend it. 1319 */ 1320 bcopy(lr, zr->zr_lrbuf, reclen); 1321 1322 /* 1323 * The log block containing this lr may have been byteswapped 1324 * so that we can easily examine common fields like lrc_txtype. 1325 * However, the log is a mix of different data types, and only the 1326 * replay vectors know how to byteswap their records. Therefore, if 1327 * the lr was byteswapped, undo it before invoking the replay vector. 1328 */ 1329 if (zr->zr_byteswap) 1330 byteswap_uint64_array(zr->zr_lrbuf, reclen); 1331 1332 /* 1333 * If this is a TX_WRITE with a blkptr, suck in the data. 1334 */ 1335 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1336 lr_write_t *lrw = (lr_write_t *)lr; 1337 blkptr_t *wbp = &lrw->lr_blkptr; 1338 uint64_t wlen = lrw->lr_length; 1339 char *wbuf = zr->zr_lrbuf + reclen; 1340 1341 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1342 bzero(wbuf, wlen); 1343 } else { 1344 /* 1345 * A subsequent write may have overwritten this block, 1346 * in which case wbp may have been been freed and 1347 * reallocated, and our read of wbp may fail with a 1348 * checksum error. We can safely ignore this because 1349 * the later write will provide the correct data. 1350 */ 1351 zbookmark_t zb; 1352 1353 zb.zb_objset = dmu_objset_id(zilog->zl_os); 1354 zb.zb_object = lrw->lr_foid; 1355 zb.zb_level = -1; 1356 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); 1357 1358 (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1359 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1360 ZIO_PRIORITY_SYNC_READ, 1361 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1362 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1363 } 1364 } 1365 1366 /* 1367 * We must now do two things atomically: replay this log record, 1368 * and update the log header to reflect the fact that we did so. 1369 * We use the DMU's ability to assign into a specific txg to do this. 1370 */ 1371 for (pass = 1; /* CONSTANTCONDITION */; pass++) { 1372 uint64_t replay_txg; 1373 dmu_tx_t *replay_tx; 1374 1375 replay_tx = dmu_tx_create(zr->zr_os); 1376 error = dmu_tx_assign(replay_tx, TXG_WAIT); 1377 if (error) { 1378 dmu_tx_abort(replay_tx); 1379 break; 1380 } 1381 1382 replay_txg = dmu_tx_get_txg(replay_tx); 1383 1384 if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1385 error = EINVAL; 1386 } else { 1387 /* 1388 * On the first pass, arrange for the replay vector 1389 * to fail its dmu_tx_assign(). That's the only way 1390 * to ensure that those code paths remain well tested. 1391 */ 1392 *zr->zr_txgp = replay_txg - (pass == 1); 1393 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1394 zr->zr_byteswap); 1395 *zr->zr_txgp = TXG_NOWAIT; 1396 } 1397 1398 if (error == 0) { 1399 dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1400 zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1401 lr->lrc_seq; 1402 } 1403 1404 dmu_tx_commit(replay_tx); 1405 1406 if (error != ERESTART) 1407 break; 1408 1409 if (pass != 1) 1410 txg_wait_open(spa_get_dsl(zilog->zl_spa), 1411 replay_txg + 1); 1412 1413 dprintf("pass %d, retrying\n", pass); 1414 } 1415 1416 if (error) { 1417 char *name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1418 dmu_objset_name(zr->zr_os, name); 1419 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1420 "dataset %s, seq 0x%llx, txtype %llu\n", 1421 error, name, 1422 (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype); 1423 zilog->zl_stop_replay = 1; 1424 kmem_free(name, MAXNAMELEN); 1425 } 1426 1427 /* 1428 * The DMU's dnode layer doesn't see removes until the txg commits, 1429 * so a subsequent claim can spuriously fail with EEXIST. 1430 * To prevent this, if we might have removed an object, 1431 * wait for the delete thread to delete it, and then 1432 * wait for the transaction group to sync. 1433 */ 1434 if (txtype == TX_REMOVE || txtype == TX_RMDIR || txtype == TX_RENAME) { 1435 if (zr->zr_rm_sync != NULL) 1436 zr->zr_rm_sync(zr->zr_arg); 1437 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1438 } 1439 } 1440 1441 /* 1442 * If this dataset has a non-empty intent log, replay it and destroy it. 1443 */ 1444 void 1445 zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1446 zil_replay_func_t *replay_func[TX_MAX_TYPE], void (*rm_sync)(void *arg)) 1447 { 1448 zilog_t *zilog = dmu_objset_zil(os); 1449 const zil_header_t *zh = zilog->zl_header; 1450 zil_replay_arg_t zr; 1451 1452 if (zil_empty(zilog)) { 1453 zil_destroy(zilog, B_TRUE); 1454 return; 1455 } 1456 1457 zr.zr_os = os; 1458 zr.zr_replay = replay_func; 1459 zr.zr_arg = arg; 1460 zr.zr_rm_sync = rm_sync; 1461 zr.zr_txgp = txgp; 1462 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1463 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1464 1465 /* 1466 * Wait for in-progress removes to sync before starting replay. 1467 */ 1468 if (rm_sync != NULL) 1469 rm_sync(arg); 1470 txg_wait_synced(zilog->zl_dmu_pool, 0); 1471 1472 zilog->zl_stop_replay = 0; 1473 (void) zil_parse(zilog, NULL, zil_replay_log_record, &zr, 1474 zh->zh_claim_txg); 1475 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1476 1477 zil_destroy(zilog, B_FALSE); 1478 } 1479 1480 /* 1481 * Report whether all transactions are committed 1482 */ 1483 int 1484 zil_is_committed(zilog_t *zilog) 1485 { 1486 lwb_t *lwb; 1487 1488 if (!list_is_empty(&zilog->zl_itx_list)) 1489 return (B_FALSE); 1490 1491 /* 1492 * A log write buffer at the head of the list that is not UNWRITTEN 1493 * means there's a lwb yet to be freed after a txg commit 1494 */ 1495 lwb = list_head(&zilog->zl_lwb_list); 1496 if (lwb && lwb->lwb_state != UNWRITTEN) 1497 return (B_FALSE); 1498 ASSERT(zil_empty(zilog)); 1499 return (B_TRUE); 1500 } 1501