1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa.h> 30 #include <sys/dmu.h> 31 #include <sys/zap.h> 32 #include <sys/arc.h> 33 #include <sys/stat.h> 34 #include <sys/resource.h> 35 #include <sys/zil.h> 36 #include <sys/zil_impl.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/vdev.h> 39 40 /* 41 * The zfs intent log (ZIL) saves transaction records of system calls 42 * that change the file system in memory with enough information 43 * to be able to replay them. These are stored in memory until 44 * either the DMU transaction group (txg) commits them to the stable pool 45 * and they can be discarded, or they are flushed to the stable log 46 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 47 * requirement. In the event of a panic or power fail then those log 48 * records (transactions) are replayed. 49 * 50 * There is one ZIL per file system. Its on-disk (pool) format consists 51 * of 3 parts: 52 * 53 * - ZIL header 54 * - ZIL blocks 55 * - ZIL records 56 * 57 * A log record holds a system call transaction. Log blocks can 58 * hold many log records and the blocks are chained together. 59 * Each ZIL block contains a block pointer (blkptr_t) to the next 60 * ZIL block in the chain. The ZIL header points to the first 61 * block in the chain. Note there is not a fixed place in the pool 62 * to hold blocks. They are dynamically allocated and freed as 63 * needed from the blocks available. Figure X shows the ZIL structure: 64 */ 65 66 /* 67 * These global ZIL switches affect all pools 68 */ 69 int zil_disable = 0; /* disable intent logging */ 70 int zil_always = 0; /* make every transaction synchronous */ 71 int zil_purge = 0; /* at pool open, just throw everything away */ 72 int zil_noflush = 0; /* don't flush write cache buffers on disks */ 73 74 static kmem_cache_t *zil_lwb_cache; 75 76 static int 77 zil_dva_compare(const void *x1, const void *x2) 78 { 79 const dva_t *dva1 = x1; 80 const dva_t *dva2 = x2; 81 82 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 83 return (-1); 84 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 85 return (1); 86 87 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 88 return (-1); 89 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 90 return (1); 91 92 return (0); 93 } 94 95 static void 96 zil_dva_tree_init(avl_tree_t *t) 97 { 98 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 99 offsetof(zil_dva_node_t, zn_node)); 100 } 101 102 static void 103 zil_dva_tree_fini(avl_tree_t *t) 104 { 105 zil_dva_node_t *zn; 106 void *cookie = NULL; 107 108 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 109 kmem_free(zn, sizeof (zil_dva_node_t)); 110 111 avl_destroy(t); 112 } 113 114 static int 115 zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 116 { 117 zil_dva_node_t *zn; 118 avl_index_t where; 119 120 if (avl_find(t, dva, &where) != NULL) 121 return (EEXIST); 122 123 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 124 zn->zn_dva = *dva; 125 avl_insert(t, zn, where); 126 127 return (0); 128 } 129 130 static zil_header_t * 131 zil_header_in_syncing_context(zilog_t *zilog) 132 { 133 return ((zil_header_t *)zilog->zl_header); 134 } 135 136 static void 137 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 138 { 139 zio_cksum_t *zc = &bp->blk_cksum; 140 141 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 142 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 143 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 144 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 145 } 146 147 /* 148 * Read a log block, make sure it's valid, and byteswap it if necessary. 149 */ 150 static int 151 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 152 { 153 blkptr_t blk = *bp; 154 zbookmark_t zb; 155 int error; 156 157 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 158 zb.zb_object = 0; 159 zb.zb_level = -1; 160 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 161 162 *abufpp = NULL; 163 164 error = arc_read(NULL, zilog->zl_spa, &blk, byteswap_uint64_array, 165 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 166 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, ARC_WAIT, &zb); 167 168 if (error == 0) { 169 char *data = (*abufpp)->b_data; 170 uint64_t blksz = BP_GET_LSIZE(bp); 171 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 172 zio_cksum_t cksum = bp->blk_cksum; 173 174 /* 175 * Sequence numbers should be... sequential. The checksum 176 * verifier for the next block should be bp's checksum plus 1. 177 */ 178 cksum.zc_word[ZIL_ZC_SEQ]++; 179 180 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum))) 181 error = ESTALE; 182 else if (BP_IS_HOLE(&ztp->zit_next_blk)) 183 error = ENOENT; 184 else if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t))) 185 error = EOVERFLOW; 186 187 if (error) { 188 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 189 *abufpp = NULL; 190 } 191 } 192 193 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 194 195 return (error); 196 } 197 198 /* 199 * Parse the intent log, and call parse_func for each valid record within. 200 * Return the highest sequence number. 201 */ 202 uint64_t 203 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 204 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 205 { 206 const zil_header_t *zh = zilog->zl_header; 207 uint64_t claim_seq = zh->zh_claim_seq; 208 uint64_t seq = 0; 209 uint64_t max_seq = 0; 210 blkptr_t blk = zh->zh_log; 211 arc_buf_t *abuf; 212 char *lrbuf, *lrp; 213 zil_trailer_t *ztp; 214 int reclen, error; 215 216 if (BP_IS_HOLE(&blk)) 217 return (max_seq); 218 219 /* 220 * Starting at the block pointed to by zh_log we read the log chain. 221 * For each block in the chain we strongly check that block to 222 * ensure its validity. We stop when an invalid block is found. 223 * For each block pointer in the chain we call parse_blk_func(). 224 * For each record in each valid block we call parse_lr_func(). 225 * If the log has been claimed, stop if we encounter a sequence 226 * number greater than the highest claimed sequence number. 227 */ 228 zil_dva_tree_init(&zilog->zl_dva_tree); 229 for (;;) { 230 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 231 232 if (claim_seq != 0 && seq > claim_seq) 233 break; 234 235 ASSERT(max_seq < seq); 236 max_seq = seq; 237 238 error = zil_read_log_block(zilog, &blk, &abuf); 239 240 if (parse_blk_func != NULL) 241 parse_blk_func(zilog, &blk, arg, txg); 242 243 if (error) 244 break; 245 246 lrbuf = abuf->b_data; 247 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 248 blk = ztp->zit_next_blk; 249 250 if (parse_lr_func == NULL) { 251 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 252 continue; 253 } 254 255 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 256 lr_t *lr = (lr_t *)lrp; 257 reclen = lr->lrc_reclen; 258 ASSERT3U(reclen, >=, sizeof (lr_t)); 259 parse_lr_func(zilog, lr, arg, txg); 260 } 261 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 262 } 263 zil_dva_tree_fini(&zilog->zl_dva_tree); 264 265 return (max_seq); 266 } 267 268 /* ARGSUSED */ 269 static void 270 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 271 { 272 spa_t *spa = zilog->zl_spa; 273 int err; 274 275 /* 276 * Claim log block if not already committed and not already claimed. 277 */ 278 if (bp->blk_birth >= first_txg && 279 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 280 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL)); 281 ASSERT(err == 0); 282 } 283 } 284 285 static void 286 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 287 { 288 if (lrc->lrc_txtype == TX_WRITE) { 289 lr_write_t *lr = (lr_write_t *)lrc; 290 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 291 } 292 } 293 294 /* ARGSUSED */ 295 static void 296 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 297 { 298 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 299 } 300 301 static void 302 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 303 { 304 /* 305 * If we previously claimed it, we need to free it. 306 */ 307 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 308 lr_write_t *lr = (lr_write_t *)lrc; 309 blkptr_t *bp = &lr->lr_blkptr; 310 if (bp->blk_birth >= claim_txg && 311 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 312 (void) arc_free(NULL, zilog->zl_spa, 313 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 314 } 315 } 316 } 317 318 /* 319 * Create an on-disk intent log. 320 */ 321 static void 322 zil_create(zilog_t *zilog) 323 { 324 const zil_header_t *zh = zilog->zl_header; 325 lwb_t *lwb; 326 uint64_t txg = 0; 327 dmu_tx_t *tx = NULL; 328 blkptr_t blk; 329 int error = 0; 330 331 /* 332 * Wait for any previous destroy to complete. 333 */ 334 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 335 336 ASSERT(zh->zh_claim_txg == 0); 337 ASSERT(zh->zh_replay_seq == 0); 338 339 blk = zh->zh_log; 340 341 /* 342 * If we don't already have an initial log block, allocate one now. 343 */ 344 if (BP_IS_HOLE(&blk)) { 345 tx = dmu_tx_create(zilog->zl_os); 346 (void) dmu_tx_assign(tx, TXG_WAIT); 347 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 348 txg = dmu_tx_get_txg(tx); 349 350 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, txg); 351 352 if (error == 0) 353 zil_init_log_chain(zilog, &blk); 354 } 355 356 /* 357 * Allocate a log write buffer (lwb) for the first log block. 358 */ 359 if (error == 0) { 360 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 361 lwb->lwb_zilog = zilog; 362 lwb->lwb_blk = blk; 363 lwb->lwb_nused = 0; 364 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 365 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 366 lwb->lwb_max_txg = txg; 367 lwb->lwb_seq = 0; 368 lwb->lwb_state = UNWRITTEN; 369 mutex_enter(&zilog->zl_lock); 370 list_insert_tail(&zilog->zl_lwb_list, lwb); 371 mutex_exit(&zilog->zl_lock); 372 } 373 374 /* 375 * If we just allocated the first log block, commit our transaction 376 * and wait for zil_sync() to stuff the block poiner into zh_log. 377 * (zh is part of the MOS, so we cannot modify it in open context.) 378 */ 379 if (tx != NULL) { 380 dmu_tx_commit(tx); 381 txg_wait_synced(zilog->zl_dmu_pool, txg); 382 } 383 384 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 385 } 386 387 /* 388 * In one tx, free all log blocks and clear the log header. 389 * If keep_first is set, then we're replaying a log with no content. 390 * We want to keep the first block, however, so that the first 391 * synchronous transaction doesn't require a txg_wait_synced() 392 * in zil_create(). We don't need to txg_wait_synced() here either 393 * when keep_first is set, because both zil_create() and zil_destroy() 394 * will wait for any in-progress destroys to complete. 395 */ 396 void 397 zil_destroy(zilog_t *zilog, boolean_t keep_first) 398 { 399 const zil_header_t *zh = zilog->zl_header; 400 lwb_t *lwb; 401 dmu_tx_t *tx; 402 uint64_t txg; 403 404 /* 405 * Wait for any previous destroy to complete. 406 */ 407 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 408 409 if (BP_IS_HOLE(&zh->zh_log)) 410 return; 411 412 tx = dmu_tx_create(zilog->zl_os); 413 (void) dmu_tx_assign(tx, TXG_WAIT); 414 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 415 txg = dmu_tx_get_txg(tx); 416 417 mutex_enter(&zilog->zl_lock); 418 419 ASSERT3U(zilog->zl_destroy_txg, <, txg); 420 zilog->zl_destroy_txg = txg; 421 zilog->zl_keep_first = keep_first; 422 423 if (!list_is_empty(&zilog->zl_lwb_list)) { 424 ASSERT(zh->zh_claim_txg == 0); 425 ASSERT(!keep_first); 426 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 427 list_remove(&zilog->zl_lwb_list, lwb); 428 if (lwb->lwb_buf != NULL) 429 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 430 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 431 kmem_cache_free(zil_lwb_cache, lwb); 432 } 433 mutex_exit(&zilog->zl_lock); 434 } else { 435 mutex_exit(&zilog->zl_lock); 436 if (!keep_first) { 437 (void) zil_parse(zilog, zil_free_log_block, 438 zil_free_log_record, tx, zh->zh_claim_txg); 439 } 440 } 441 442 dmu_tx_commit(tx); 443 444 if (keep_first) /* no need to wait in this case */ 445 return; 446 447 txg_wait_synced(zilog->zl_dmu_pool, txg); 448 ASSERT(BP_IS_HOLE(&zh->zh_log)); 449 } 450 451 int 452 zil_claim(char *osname, void *txarg) 453 { 454 dmu_tx_t *tx = txarg; 455 uint64_t first_txg = dmu_tx_get_txg(tx); 456 zilog_t *zilog; 457 zil_header_t *zh; 458 objset_t *os; 459 int error; 460 461 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os); 462 if (error) { 463 cmn_err(CE_WARN, "can't process intent log for %s", osname); 464 return (0); 465 } 466 467 zilog = dmu_objset_zil(os); 468 zh = zil_header_in_syncing_context(zilog); 469 470 /* 471 * Claim all log blocks if we haven't already done so, and remember 472 * the highest claimed sequence number. This ensures that if we can 473 * read only part of the log now (e.g. due to a missing device), 474 * but we can read the entire log later, we will not try to replay 475 * or destroy beyond the last block we successfully claimed. 476 */ 477 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 478 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 479 zh->zh_claim_txg = first_txg; 480 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 481 zil_claim_log_record, tx, first_txg); 482 dsl_dataset_dirty(dmu_objset_ds(os), tx); 483 } 484 485 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 486 dmu_objset_close(os); 487 return (0); 488 } 489 490 void 491 zil_add_vdev(zilog_t *zilog, uint64_t vdev, uint64_t seq) 492 { 493 zil_vdev_t *zv; 494 495 if (zil_noflush) 496 return; 497 498 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 499 zv = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP); 500 zv->vdev = vdev; 501 zv->seq = seq; 502 list_insert_tail(&zilog->zl_vdev_list, zv); 503 } 504 505 void 506 zil_flush_vdevs(zilog_t *zilog, uint64_t seq) 507 { 508 vdev_t *vd; 509 zil_vdev_t *zv, *zv2; 510 zio_t *zio; 511 spa_t *spa; 512 uint64_t vdev; 513 514 if (zil_noflush) 515 return; 516 517 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 518 519 spa = zilog->zl_spa; 520 zio = NULL; 521 522 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL && 523 zv->seq <= seq) { 524 vdev = zv->vdev; 525 list_remove(&zilog->zl_vdev_list, zv); 526 kmem_free(zv, sizeof (zil_vdev_t)); 527 528 /* 529 * remove all chained entries <= seq with same vdev 530 */ 531 zv = list_head(&zilog->zl_vdev_list); 532 while (zv && zv->seq <= seq) { 533 zv2 = list_next(&zilog->zl_vdev_list, zv); 534 if (zv->vdev == vdev) { 535 list_remove(&zilog->zl_vdev_list, zv); 536 kmem_free(zv, sizeof (zil_vdev_t)); 537 } 538 zv = zv2; 539 } 540 541 /* flush the write cache for this vdev */ 542 mutex_exit(&zilog->zl_lock); 543 if (zio == NULL) 544 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 545 vd = vdev_lookup_top(spa, vdev); 546 ASSERT(vd); 547 (void) zio_nowait(zio_ioctl(zio, spa, vd, DKIOCFLUSHWRITECACHE, 548 NULL, NULL, ZIO_PRIORITY_NOW, 549 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY)); 550 mutex_enter(&zilog->zl_lock); 551 } 552 553 /* 554 * Wait for all the flushes to complete. Not all devices actually 555 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 556 */ 557 if (zio != NULL) { 558 mutex_exit(&zilog->zl_lock); 559 (void) zio_wait(zio); 560 mutex_enter(&zilog->zl_lock); 561 } 562 } 563 564 /* 565 * Function called when a log block write completes 566 */ 567 static void 568 zil_lwb_write_done(zio_t *zio) 569 { 570 lwb_t *prev; 571 lwb_t *lwb = zio->io_private; 572 zilog_t *zilog = lwb->lwb_zilog; 573 uint64_t max_seq; 574 575 /* 576 * Now that we've written this log block, we have a stable pointer 577 * to the next block in the chain, so it's OK to let the txg in 578 * which we allocated the next block sync. 579 */ 580 txg_rele_to_sync(&lwb->lwb_txgh); 581 582 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 583 mutex_enter(&zilog->zl_lock); 584 lwb->lwb_buf = NULL; 585 if (zio->io_error) { 586 zilog->zl_log_error = B_TRUE; 587 mutex_exit(&zilog->zl_lock); 588 cv_broadcast(&zilog->zl_cv_seq); 589 return; 590 } 591 592 prev = list_prev(&zilog->zl_lwb_list, lwb); 593 if (prev && prev->lwb_state != SEQ_COMPLETE) { 594 /* There's an unwritten buffer in the chain before this one */ 595 lwb->lwb_state = SEQ_INCOMPLETE; 596 mutex_exit(&zilog->zl_lock); 597 return; 598 } 599 600 max_seq = lwb->lwb_seq; 601 lwb->lwb_state = SEQ_COMPLETE; 602 /* 603 * We must also follow up the chain for already written buffers 604 * to see if we can set zl_ss_seq even higher. 605 */ 606 while (lwb = list_next(&zilog->zl_lwb_list, lwb)) { 607 if (lwb->lwb_state != SEQ_INCOMPLETE) 608 break; 609 lwb->lwb_state = SEQ_COMPLETE; 610 /* lwb_seq will be zero if we've written an empty buffer */ 611 if (lwb->lwb_seq) { 612 ASSERT3U(max_seq, <, lwb->lwb_seq); 613 max_seq = lwb->lwb_seq; 614 } 615 } 616 zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 617 mutex_exit(&zilog->zl_lock); 618 cv_broadcast(&zilog->zl_cv_seq); 619 } 620 621 /* 622 * Start a log block write and advance to the next log block. 623 * Calls are serialized. 624 */ 625 static lwb_t * 626 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 627 { 628 lwb_t *nlwb; 629 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 630 spa_t *spa = zilog->zl_spa; 631 blkptr_t *bp = &ztp->zit_next_blk; 632 uint64_t txg; 633 uint64_t zil_blksz; 634 zbookmark_t zb; 635 int error; 636 637 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 638 639 /* 640 * Allocate the next block and save its address in this block 641 * before writing it in order to establish the log chain. 642 * Note that if the allocation of nlwb synced before we wrote 643 * the block that points at it (lwb), we'd leak it if we crashed. 644 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 645 */ 646 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 647 txg_rele_to_quiesce(&lwb->lwb_txgh); 648 649 /* 650 * Pick a ZIL blocksize. We request a size that is the 651 * maximum of the previous used size, the current used size and 652 * the amount waiting in the queue. 653 */ 654 zil_blksz = MAX(zilog->zl_cur_used, zilog->zl_prev_used); 655 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 656 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); 657 if (zil_blksz > ZIL_MAX_BLKSZ) 658 zil_blksz = ZIL_MAX_BLKSZ; 659 660 error = zio_alloc_blk(spa, zil_blksz, bp, txg); 661 if (error) { 662 /* 663 * Reinitialise the lwb. 664 * By returning NULL the caller will call tx_wait_synced() 665 */ 666 mutex_enter(&zilog->zl_lock); 667 ASSERT(lwb->lwb_state == UNWRITTEN); 668 lwb->lwb_nused = 0; 669 lwb->lwb_seq = 0; 670 mutex_exit(&zilog->zl_lock); 671 txg_rele_to_sync(&lwb->lwb_txgh); 672 return (NULL); 673 } 674 675 ASSERT3U(bp->blk_birth, ==, txg); 676 ztp->zit_pad = 0; 677 ztp->zit_nused = lwb->lwb_nused; 678 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 679 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 680 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 681 682 /* 683 * Allocate a new log write buffer (lwb). 684 */ 685 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 686 687 nlwb->lwb_zilog = zilog; 688 nlwb->lwb_blk = *bp; 689 nlwb->lwb_nused = 0; 690 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 691 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 692 nlwb->lwb_max_txg = txg; 693 nlwb->lwb_seq = 0; 694 nlwb->lwb_state = UNWRITTEN; 695 696 /* 697 * Put new lwb at the end of the log chain, 698 * and record the vdev for later flushing 699 */ 700 mutex_enter(&zilog->zl_lock); 701 list_insert_tail(&zilog->zl_lwb_list, nlwb); 702 zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk))), 703 lwb->lwb_seq); 704 mutex_exit(&zilog->zl_lock); 705 706 /* 707 * write the old log block 708 */ 709 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 710 zb.zb_object = 0; 711 zb.zb_level = -1; 712 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 713 714 zio_nowait(zio_rewrite(NULL, spa, ZIO_CHECKSUM_ZILOG, 0, 715 &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz, zil_lwb_write_done, lwb, 716 ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb)); 717 718 return (nlwb); 719 } 720 721 static lwb_t * 722 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 723 { 724 lr_t *lrc = &itx->itx_lr; /* common log record */ 725 lr_write_t *lr; 726 char *dbuf; 727 uint64_t seq = lrc->lrc_seq; 728 uint64_t txg = lrc->lrc_txg; 729 uint64_t reclen = lrc->lrc_reclen; 730 uint64_t dlen = 0; 731 int error; 732 733 if (lwb == NULL) 734 return (NULL); 735 ASSERT(lwb->lwb_buf != NULL); 736 737 /* 738 * If it's a write, fetch the data or get its blkptr as appropriate. 739 */ 740 if (lrc->lrc_txtype == TX_WRITE) { 741 lr = (lr_write_t *)lrc; 742 if (txg > spa_freeze_txg(zilog->zl_spa)) 743 txg_wait_synced(zilog->zl_dmu_pool, txg); 744 if (itx->itx_wr_state != WR_COPIED) { 745 if (itx->itx_wr_state == WR_NEED_COPY) { 746 dlen = P2ROUNDUP_TYPED(lr->lr_length, 747 sizeof (uint64_t), uint64_t); 748 ASSERT(dlen); 749 dbuf = kmem_alloc(dlen, KM_NOSLEEP); 750 /* on memory shortage use dmu_sync */ 751 if (dbuf == NULL) { 752 itx->itx_wr_state = WR_INDIRECT; 753 dlen = 0; 754 } 755 } else { 756 ASSERT(itx->itx_wr_state == WR_INDIRECT); 757 dbuf = NULL; 758 } 759 error = zilog->zl_get_data(itx->itx_private, lr, dbuf); 760 if (error) { 761 if (dlen) 762 kmem_free(dbuf, dlen); 763 if (error != ENOENT && error != EALREADY) { 764 txg_wait_synced(zilog->zl_dmu_pool, 765 txg); 766 mutex_enter(&zilog->zl_lock); 767 zilog->zl_ss_seq = 768 MAX(seq, zilog->zl_ss_seq); 769 mutex_exit(&zilog->zl_lock); 770 return (lwb); 771 } 772 mutex_enter(&zilog->zl_lock); 773 zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY( 774 &(lr->lr_blkptr))), seq); 775 mutex_exit(&zilog->zl_lock); 776 return (lwb); 777 } 778 } 779 } 780 781 zilog->zl_cur_used += (reclen + dlen); 782 783 /* 784 * If this record won't fit in the current log block, start a new one. 785 */ 786 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 787 lwb = zil_lwb_write_start(zilog, lwb); 788 if (lwb == NULL) { 789 if (dlen) 790 kmem_free(dbuf, dlen); 791 return (NULL); 792 } 793 ASSERT(lwb->lwb_nused == 0); 794 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 795 txg_wait_synced(zilog->zl_dmu_pool, txg); 796 mutex_enter(&zilog->zl_lock); 797 zilog->zl_ss_seq = MAX(seq, zilog->zl_ss_seq); 798 mutex_exit(&zilog->zl_lock); 799 if (dlen) 800 kmem_free(dbuf, dlen); 801 return (lwb); 802 } 803 } 804 805 lrc->lrc_reclen += dlen; 806 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 807 lwb->lwb_nused += reclen; 808 if (dlen) { 809 bcopy(dbuf, lwb->lwb_buf + lwb->lwb_nused, dlen); 810 lwb->lwb_nused += dlen; 811 kmem_free(dbuf, dlen); 812 lrc->lrc_reclen -= dlen; /* for kmem_free of itx */ 813 } 814 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 815 ASSERT3U(lwb->lwb_seq, <, seq); 816 lwb->lwb_seq = seq; 817 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 818 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 819 820 return (lwb); 821 } 822 823 itx_t * 824 zil_itx_create(int txtype, size_t lrsize) 825 { 826 itx_t *itx; 827 828 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 829 830 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 831 itx->itx_lr.lrc_txtype = txtype; 832 itx->itx_lr.lrc_reclen = lrsize; 833 itx->itx_lr.lrc_seq = 0; /* defensive */ 834 835 return (itx); 836 } 837 838 uint64_t 839 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 840 { 841 uint64_t seq; 842 843 ASSERT(itx->itx_lr.lrc_seq == 0); 844 845 mutex_enter(&zilog->zl_lock); 846 list_insert_tail(&zilog->zl_itx_list, itx); 847 zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen; 848 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 849 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 850 mutex_exit(&zilog->zl_lock); 851 852 return (seq); 853 } 854 855 /* 856 * Free up all in-memory intent log transactions that have now been synced. 857 */ 858 static void 859 zil_itx_clean(zilog_t *zilog) 860 { 861 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 862 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 863 uint64_t max_seq = 0; 864 itx_t *itx; 865 866 mutex_enter(&zilog->zl_lock); 867 while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 868 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 869 list_remove(&zilog->zl_itx_list, itx); 870 zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen; 871 ASSERT3U(max_seq, <, itx->itx_lr.lrc_seq); 872 max_seq = itx->itx_lr.lrc_seq; 873 kmem_free(itx, offsetof(itx_t, itx_lr) 874 + itx->itx_lr.lrc_reclen); 875 } 876 if (max_seq > zilog->zl_ss_seq) { 877 zilog->zl_ss_seq = max_seq; 878 cv_broadcast(&zilog->zl_cv_seq); 879 } 880 mutex_exit(&zilog->zl_lock); 881 } 882 883 void 884 zil_clean(zilog_t *zilog) 885 { 886 /* 887 * Check for any log blocks that can be freed. 888 * Log blocks are only freed when the log block allocation and 889 * log records contained within are both known to be committed. 890 */ 891 mutex_enter(&zilog->zl_lock); 892 if (list_head(&zilog->zl_itx_list) != NULL) 893 (void) taskq_dispatch(zilog->zl_clean_taskq, 894 (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP); 895 mutex_exit(&zilog->zl_lock); 896 } 897 898 /* 899 * Push zfs transactions to stable storage up to the supplied sequence number. 900 */ 901 void 902 zil_commit(zilog_t *zilog, uint64_t seq, int ioflag) 903 { 904 uint64_t txg; 905 uint64_t max_seq; 906 uint64_t reclen; 907 itx_t *itx; 908 lwb_t *lwb; 909 spa_t *spa; 910 911 if (zilog == NULL || seq == 0 || 912 ((ioflag & (FSYNC | FDSYNC | FRSYNC)) == 0 && !zil_always)) 913 return; 914 915 spa = zilog->zl_spa; 916 mutex_enter(&zilog->zl_lock); 917 918 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 919 920 for (;;) { 921 if (zilog->zl_ss_seq >= seq) { /* already on stable storage */ 922 mutex_exit(&zilog->zl_lock); 923 return; 924 } 925 926 if (zilog->zl_writer == B_FALSE) /* no one writing, do it */ 927 break; 928 929 cv_wait(&zilog->zl_cv_write, &zilog->zl_lock); 930 } 931 932 zilog->zl_writer = B_TRUE; 933 max_seq = 0; 934 935 if (zilog->zl_suspend) { 936 lwb = NULL; 937 } else { 938 lwb = list_tail(&zilog->zl_lwb_list); 939 if (lwb == NULL) { 940 mutex_exit(&zilog->zl_lock); 941 zil_create(zilog); 942 mutex_enter(&zilog->zl_lock); 943 lwb = list_tail(&zilog->zl_lwb_list); 944 } 945 } 946 947 /* 948 * Loop through in-memory log transactions filling log blocks, 949 * until we reach the given sequence number and there's no more 950 * room in the write buffer. 951 */ 952 for (;;) { 953 itx = list_head(&zilog->zl_itx_list); 954 if (itx == NULL) 955 break; 956 957 reclen = itx->itx_lr.lrc_reclen; 958 if ((itx->itx_lr.lrc_seq > seq) && 959 ((lwb == NULL) || (lwb->lwb_nused + reclen > 960 ZIL_BLK_DATA_SZ(lwb)))) 961 break; 962 963 list_remove(&zilog->zl_itx_list, itx); 964 txg = itx->itx_lr.lrc_txg; 965 ASSERT(txg); 966 967 mutex_exit(&zilog->zl_lock); 968 if (txg > spa_last_synced_txg(spa) || 969 txg > spa_freeze_txg(spa)) 970 lwb = zil_lwb_commit(zilog, itx, lwb); 971 else 972 max_seq = itx->itx_lr.lrc_seq; 973 kmem_free(itx, offsetof(itx_t, itx_lr) 974 + itx->itx_lr.lrc_reclen); 975 mutex_enter(&zilog->zl_lock); 976 zilog->zl_itx_list_sz -= reclen; 977 } 978 979 mutex_exit(&zilog->zl_lock); 980 981 /* write the last block out */ 982 if (lwb != NULL && lwb->lwb_nused != 0) 983 lwb = zil_lwb_write_start(zilog, lwb); 984 985 zilog->zl_prev_used = zilog->zl_cur_used; 986 zilog->zl_cur_used = 0; 987 988 mutex_enter(&zilog->zl_lock); 989 if (max_seq > zilog->zl_ss_seq) { 990 zilog->zl_ss_seq = max_seq; 991 cv_broadcast(&zilog->zl_cv_seq); 992 } 993 /* 994 * Wait if necessary for our seq to be committed. 995 */ 996 if (lwb) { 997 while (zilog->zl_ss_seq < seq && zilog->zl_log_error == 0) 998 cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 999 zil_flush_vdevs(zilog, seq); 1000 } 1001 1002 if (zilog->zl_log_error || lwb == NULL) { 1003 zilog->zl_log_error = 0; 1004 max_seq = zilog->zl_itx_seq; 1005 mutex_exit(&zilog->zl_lock); 1006 txg_wait_synced(zilog->zl_dmu_pool, 0); 1007 mutex_enter(&zilog->zl_lock); 1008 zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 1009 cv_broadcast(&zilog->zl_cv_seq); 1010 } 1011 /* wake up others waiting to start a write */ 1012 zilog->zl_writer = B_FALSE; 1013 mutex_exit(&zilog->zl_lock); 1014 cv_broadcast(&zilog->zl_cv_write); 1015 } 1016 1017 /* 1018 * Called in syncing context to free committed log blocks and update log header. 1019 */ 1020 void 1021 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1022 { 1023 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1024 uint64_t txg = dmu_tx_get_txg(tx); 1025 spa_t *spa = zilog->zl_spa; 1026 lwb_t *lwb; 1027 1028 mutex_enter(&zilog->zl_lock); 1029 1030 ASSERT(zilog->zl_stop_sync == 0); 1031 1032 zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 1033 1034 if (zilog->zl_destroy_txg == txg) { 1035 blkptr_t blk = zh->zh_log; 1036 1037 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1038 ASSERT(spa_sync_pass(spa) == 1); 1039 1040 bzero(zh, sizeof (zil_header_t)); 1041 bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 1042 1043 if (zilog->zl_keep_first) { 1044 /* 1045 * If this block was part of log chain that couldn't 1046 * be claimed because a device was missing during 1047 * zil_claim(), but that device later returns, 1048 * then this block could erroneously appear valid. 1049 * To guard against this, assign a new GUID to the new 1050 * log chain so it doesn't matter what blk points to. 1051 */ 1052 zil_init_log_chain(zilog, &blk); 1053 zh->zh_log = blk; 1054 } 1055 } 1056 1057 for (;;) { 1058 lwb = list_head(&zilog->zl_lwb_list); 1059 if (lwb == NULL) { 1060 mutex_exit(&zilog->zl_lock); 1061 return; 1062 } 1063 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1064 break; 1065 list_remove(&zilog->zl_lwb_list, lwb); 1066 zio_free_blk(spa, &lwb->lwb_blk, txg); 1067 kmem_cache_free(zil_lwb_cache, lwb); 1068 } 1069 zh->zh_log = lwb->lwb_blk; 1070 mutex_exit(&zilog->zl_lock); 1071 } 1072 1073 void 1074 zil_init(void) 1075 { 1076 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1077 sizeof (struct lwb), NULL, NULL, NULL, NULL, NULL, NULL, 0); 1078 } 1079 1080 void 1081 zil_fini(void) 1082 { 1083 kmem_cache_destroy(zil_lwb_cache); 1084 } 1085 1086 zilog_t * 1087 zil_alloc(objset_t *os, zil_header_t *zh_phys) 1088 { 1089 zilog_t *zilog; 1090 1091 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1092 1093 zilog->zl_header = zh_phys; 1094 zilog->zl_os = os; 1095 zilog->zl_spa = dmu_objset_spa(os); 1096 zilog->zl_dmu_pool = dmu_objset_pool(os); 1097 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1098 1099 list_create(&zilog->zl_itx_list, sizeof (itx_t), 1100 offsetof(itx_t, itx_node)); 1101 1102 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1103 offsetof(lwb_t, lwb_node)); 1104 1105 list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t), 1106 offsetof(zil_vdev_t, vdev_seq_node)); 1107 1108 return (zilog); 1109 } 1110 1111 void 1112 zil_free(zilog_t *zilog) 1113 { 1114 lwb_t *lwb; 1115 zil_vdev_t *zv; 1116 1117 zilog->zl_stop_sync = 1; 1118 1119 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1120 list_remove(&zilog->zl_lwb_list, lwb); 1121 if (lwb->lwb_buf != NULL) 1122 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1123 kmem_cache_free(zil_lwb_cache, lwb); 1124 } 1125 list_destroy(&zilog->zl_lwb_list); 1126 1127 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 1128 list_remove(&zilog->zl_vdev_list, zv); 1129 kmem_free(zv, sizeof (zil_vdev_t)); 1130 } 1131 list_destroy(&zilog->zl_vdev_list); 1132 1133 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1134 list_destroy(&zilog->zl_itx_list); 1135 1136 kmem_free(zilog, sizeof (zilog_t)); 1137 } 1138 1139 /* 1140 * return true if the initial log block is not valid 1141 */ 1142 static int 1143 zil_empty(zilog_t *zilog) 1144 { 1145 const zil_header_t *zh = zilog->zl_header; 1146 arc_buf_t *abuf = NULL; 1147 1148 if (BP_IS_HOLE(&zh->zh_log)) 1149 return (1); 1150 1151 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 1152 return (1); 1153 1154 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 1155 return (0); 1156 } 1157 1158 /* 1159 * Open an intent log. 1160 */ 1161 zilog_t * 1162 zil_open(objset_t *os, zil_get_data_t *get_data) 1163 { 1164 zilog_t *zilog = dmu_objset_zil(os); 1165 1166 zilog->zl_get_data = get_data; 1167 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1168 2, 2, TASKQ_PREPOPULATE); 1169 1170 return (zilog); 1171 } 1172 1173 /* 1174 * Close an intent log. 1175 */ 1176 void 1177 zil_close(zilog_t *zilog) 1178 { 1179 /* 1180 * If the log isn't already committed, mark the objset dirty 1181 * (so zil_sync() will be called) and wait for that txg to sync. 1182 */ 1183 if (!zil_is_committed(zilog)) { 1184 uint64_t txg; 1185 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1186 (void) dmu_tx_assign(tx, TXG_WAIT); 1187 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1188 txg = dmu_tx_get_txg(tx); 1189 dmu_tx_commit(tx); 1190 txg_wait_synced(zilog->zl_dmu_pool, txg); 1191 } 1192 1193 taskq_destroy(zilog->zl_clean_taskq); 1194 zilog->zl_clean_taskq = NULL; 1195 zilog->zl_get_data = NULL; 1196 1197 zil_itx_clean(zilog); 1198 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1199 } 1200 1201 /* 1202 * Suspend an intent log. While in suspended mode, we still honor 1203 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1204 * We suspend the log briefly when taking a snapshot so that the snapshot 1205 * contains all the data it's supposed to, and has an empty intent log. 1206 */ 1207 int 1208 zil_suspend(zilog_t *zilog) 1209 { 1210 const zil_header_t *zh = zilog->zl_header; 1211 lwb_t *lwb; 1212 1213 mutex_enter(&zilog->zl_lock); 1214 if (zh->zh_claim_txg != 0) { /* unplayed log */ 1215 mutex_exit(&zilog->zl_lock); 1216 return (EBUSY); 1217 } 1218 if (zilog->zl_suspend++ != 0) { 1219 /* 1220 * Someone else already began a suspend. 1221 * Just wait for them to finish. 1222 */ 1223 while (zilog->zl_suspending) 1224 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1225 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1226 mutex_exit(&zilog->zl_lock); 1227 return (0); 1228 } 1229 zilog->zl_suspending = B_TRUE; 1230 mutex_exit(&zilog->zl_lock); 1231 1232 zil_commit(zilog, UINT64_MAX, FSYNC); 1233 1234 mutex_enter(&zilog->zl_lock); 1235 for (;;) { 1236 /* 1237 * Wait for any in-flight log writes to complete. 1238 */ 1239 for (lwb = list_head(&zilog->zl_lwb_list); lwb != NULL; 1240 lwb = list_next(&zilog->zl_lwb_list, lwb)) 1241 if (lwb->lwb_seq != 0 && lwb->lwb_state != SEQ_COMPLETE) 1242 break; 1243 1244 if (lwb == NULL) 1245 break; 1246 1247 cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 1248 } 1249 1250 mutex_exit(&zilog->zl_lock); 1251 1252 zil_destroy(zilog, B_FALSE); 1253 1254 mutex_enter(&zilog->zl_lock); 1255 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1256 zilog->zl_suspending = B_FALSE; 1257 cv_broadcast(&zilog->zl_cv_suspend); 1258 mutex_exit(&zilog->zl_lock); 1259 1260 return (0); 1261 } 1262 1263 void 1264 zil_resume(zilog_t *zilog) 1265 { 1266 mutex_enter(&zilog->zl_lock); 1267 ASSERT(zilog->zl_suspend != 0); 1268 zilog->zl_suspend--; 1269 mutex_exit(&zilog->zl_lock); 1270 } 1271 1272 typedef struct zil_replay_arg { 1273 objset_t *zr_os; 1274 zil_replay_func_t **zr_replay; 1275 void *zr_arg; 1276 void (*zr_rm_sync)(void *arg); 1277 uint64_t *zr_txgp; 1278 boolean_t zr_byteswap; 1279 char *zr_lrbuf; 1280 } zil_replay_arg_t; 1281 1282 static void 1283 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1284 { 1285 zil_replay_arg_t *zr = zra; 1286 const zil_header_t *zh = zilog->zl_header; 1287 uint64_t reclen = lr->lrc_reclen; 1288 uint64_t txtype = lr->lrc_txtype; 1289 int pass, error; 1290 1291 if (zilog->zl_stop_replay) 1292 return; 1293 1294 if (lr->lrc_txg < claim_txg) /* already committed */ 1295 return; 1296 1297 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1298 return; 1299 1300 /* 1301 * Make a copy of the data so we can revise and extend it. 1302 */ 1303 bcopy(lr, zr->zr_lrbuf, reclen); 1304 1305 /* 1306 * The log block containing this lr may have been byteswapped 1307 * so that we can easily examine common fields like lrc_txtype. 1308 * However, the log is a mix of different data types, and only the 1309 * replay vectors know how to byteswap their records. Therefore, if 1310 * the lr was byteswapped, undo it before invoking the replay vector. 1311 */ 1312 if (zr->zr_byteswap) 1313 byteswap_uint64_array(zr->zr_lrbuf, reclen); 1314 1315 /* 1316 * If this is a TX_WRITE with a blkptr, suck in the data. 1317 */ 1318 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1319 lr_write_t *lrw = (lr_write_t *)lr; 1320 blkptr_t *wbp = &lrw->lr_blkptr; 1321 uint64_t wlen = lrw->lr_length; 1322 char *wbuf = zr->zr_lrbuf + reclen; 1323 1324 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1325 bzero(wbuf, wlen); 1326 } else { 1327 /* 1328 * A subsequent write may have overwritten this block, 1329 * in which case wbp may have been been freed and 1330 * reallocated, and our read of wbp may fail with a 1331 * checksum error. We can safely ignore this because 1332 * the later write will provide the correct data. 1333 */ 1334 zbookmark_t zb; 1335 1336 zb.zb_objset = dmu_objset_id(zilog->zl_os); 1337 zb.zb_object = lrw->lr_foid; 1338 zb.zb_level = -1; 1339 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); 1340 1341 (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1342 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1343 ZIO_PRIORITY_SYNC_READ, 1344 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1345 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1346 } 1347 } 1348 1349 /* 1350 * We must now do two things atomically: replay this log record, 1351 * and update the log header to reflect the fact that we did so. 1352 * We use the DMU's ability to assign into a specific txg to do this. 1353 */ 1354 for (pass = 1; /* CONSTANTCONDITION */; pass++) { 1355 uint64_t replay_txg; 1356 dmu_tx_t *replay_tx; 1357 1358 replay_tx = dmu_tx_create(zr->zr_os); 1359 error = dmu_tx_assign(replay_tx, TXG_WAIT); 1360 if (error) { 1361 dmu_tx_abort(replay_tx); 1362 break; 1363 } 1364 1365 replay_txg = dmu_tx_get_txg(replay_tx); 1366 1367 if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1368 error = EINVAL; 1369 } else { 1370 /* 1371 * On the first pass, arrange for the replay vector 1372 * to fail its dmu_tx_assign(). That's the only way 1373 * to ensure that those code paths remain well tested. 1374 */ 1375 *zr->zr_txgp = replay_txg - (pass == 1); 1376 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1377 zr->zr_byteswap); 1378 *zr->zr_txgp = TXG_NOWAIT; 1379 } 1380 1381 if (error == 0) { 1382 dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1383 zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1384 lr->lrc_seq; 1385 } 1386 1387 dmu_tx_commit(replay_tx); 1388 1389 if (error != ERESTART) 1390 break; 1391 1392 if (pass != 1) 1393 txg_wait_open(spa_get_dsl(zilog->zl_spa), 1394 replay_txg + 1); 1395 1396 dprintf("pass %d, retrying\n", pass); 1397 } 1398 1399 if (error) { 1400 char *name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1401 dmu_objset_name(zr->zr_os, name); 1402 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1403 "dataset %s, seq 0x%llx, txtype %llu\n", 1404 error, name, 1405 (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype); 1406 zilog->zl_stop_replay = 1; 1407 kmem_free(name, MAXNAMELEN); 1408 } 1409 1410 /* 1411 * The DMU's dnode layer doesn't see removes until the txg commits, 1412 * so a subsequent claim can spuriously fail with EEXIST. 1413 * To prevent this, if we might have removed an object, 1414 * wait for the delete thread to delete it, and then 1415 * wait for the transaction group to sync. 1416 */ 1417 if (txtype == TX_REMOVE || txtype == TX_RMDIR || txtype == TX_RENAME) { 1418 if (zr->zr_rm_sync != NULL) 1419 zr->zr_rm_sync(zr->zr_arg); 1420 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1421 } 1422 } 1423 1424 /* 1425 * If this dataset has a non-empty intent log, replay it and destroy it. 1426 */ 1427 void 1428 zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1429 zil_replay_func_t *replay_func[TX_MAX_TYPE], void (*rm_sync)(void *arg)) 1430 { 1431 zilog_t *zilog = dmu_objset_zil(os); 1432 const zil_header_t *zh = zilog->zl_header; 1433 zil_replay_arg_t zr; 1434 1435 if (zil_empty(zilog)) { 1436 zil_destroy(zilog, B_TRUE); 1437 return; 1438 } 1439 1440 zr.zr_os = os; 1441 zr.zr_replay = replay_func; 1442 zr.zr_arg = arg; 1443 zr.zr_rm_sync = rm_sync; 1444 zr.zr_txgp = txgp; 1445 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1446 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1447 1448 /* 1449 * Wait for in-progress removes to sync before starting replay. 1450 */ 1451 if (rm_sync != NULL) 1452 rm_sync(arg); 1453 txg_wait_synced(zilog->zl_dmu_pool, 0); 1454 1455 zilog->zl_stop_replay = 0; 1456 (void) zil_parse(zilog, NULL, zil_replay_log_record, &zr, 1457 zh->zh_claim_txg); 1458 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1459 1460 zil_destroy(zilog, B_FALSE); 1461 } 1462 1463 /* 1464 * Report whether all transactions are committed 1465 */ 1466 int 1467 zil_is_committed(zilog_t *zilog) 1468 { 1469 lwb_t *lwb; 1470 1471 if (!list_is_empty(&zilog->zl_itx_list)) 1472 return (B_FALSE); 1473 1474 /* 1475 * A log write buffer at the head of the list that is not UNWRITTEN 1476 * means there's a lwb yet to be freed after a txg commit 1477 */ 1478 lwb = list_head(&zilog->zl_lwb_list); 1479 if (lwb && lwb->lwb_state != UNWRITTEN) 1480 return (B_FALSE); 1481 ASSERT(zil_empty(zilog)); 1482 return (B_TRUE); 1483 } 1484