1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa.h> 30 #include <sys/dmu.h> 31 #include <sys/zap.h> 32 #include <sys/arc.h> 33 #include <sys/stat.h> 34 #include <sys/resource.h> 35 #include <sys/zil.h> 36 #include <sys/zil_impl.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/vdev.h> 39 40 /* 41 * The zfs intent log (ZIL) saves transaction records of system calls 42 * that change the file system in memory with enough information 43 * to be able to replay them. These are stored in memory until 44 * either the DMU transaction group (txg) commits them to the stable pool 45 * and they can be discarded, or they are flushed to the stable log 46 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 47 * requirement. In the event of a panic or power fail then those log 48 * records (transactions) are replayed. 49 * 50 * There is one ZIL per file system. Its on-disk (pool) format consists 51 * of 3 parts: 52 * 53 * - ZIL header 54 * - ZIL blocks 55 * - ZIL records 56 * 57 * A log record holds a system call transaction. Log blocks can 58 * hold many log records and the blocks are chained together. 59 * Each ZIL block contains a block pointer (blkptr_t) to the next 60 * ZIL block in the chain. The ZIL header points to the first 61 * block in the chain. Note there is not a fixed place in the pool 62 * to hold blocks. They are dynamically allocated and freed as 63 * needed from the blocks available. Figure X shows the ZIL structure: 64 */ 65 66 /* 67 * These global ZIL switches affect all pools 68 */ 69 int zil_disable = 0; /* disable intent logging */ 70 int zil_always = 0; /* make every transaction synchronous */ 71 int zil_purge = 0; /* at pool open, just throw everything away */ 72 int zil_noflush = 0; /* don't flush write cache buffers on disks */ 73 74 static kmem_cache_t *zil_lwb_cache; 75 76 static int 77 zil_dva_compare(const void *x1, const void *x2) 78 { 79 const dva_t *dva1 = x1; 80 const dva_t *dva2 = x2; 81 82 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 83 return (-1); 84 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 85 return (1); 86 87 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 88 return (-1); 89 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 90 return (1); 91 92 return (0); 93 } 94 95 static void 96 zil_dva_tree_init(avl_tree_t *t) 97 { 98 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 99 offsetof(zil_dva_node_t, zn_node)); 100 } 101 102 static void 103 zil_dva_tree_fini(avl_tree_t *t) 104 { 105 zil_dva_node_t *zn; 106 void *cookie = NULL; 107 108 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 109 kmem_free(zn, sizeof (zil_dva_node_t)); 110 111 avl_destroy(t); 112 } 113 114 static int 115 zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 116 { 117 zil_dva_node_t *zn; 118 avl_index_t where; 119 120 if (avl_find(t, dva, &where) != NULL) 121 return (EEXIST); 122 123 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 124 zn->zn_dva = *dva; 125 avl_insert(t, zn, where); 126 127 return (0); 128 } 129 130 static zil_header_t * 131 zil_header_in_syncing_context(zilog_t *zilog) 132 { 133 return ((zil_header_t *)zilog->zl_header); 134 } 135 136 static void 137 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 138 { 139 zio_cksum_t *zc = &bp->blk_cksum; 140 141 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 142 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 143 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 144 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 145 } 146 147 /* 148 * Read a log block, make sure it's valid, and byteswap it if necessary. 149 */ 150 static int 151 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 152 { 153 blkptr_t blk = *bp; 154 zbookmark_t zb; 155 int error; 156 157 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 158 zb.zb_object = 0; 159 zb.zb_level = -1; 160 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 161 162 *abufpp = NULL; 163 164 error = arc_read(NULL, zilog->zl_spa, &blk, byteswap_uint64_array, 165 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 166 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, ARC_WAIT, &zb); 167 168 if (error == 0) { 169 char *data = (*abufpp)->b_data; 170 uint64_t blksz = BP_GET_LSIZE(bp); 171 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 172 zio_cksum_t cksum = bp->blk_cksum; 173 174 /* 175 * Sequence numbers should be... sequential. The checksum 176 * verifier for the next block should be bp's checksum plus 1. 177 */ 178 cksum.zc_word[ZIL_ZC_SEQ]++; 179 180 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum))) 181 error = ESTALE; 182 else if (BP_IS_HOLE(&ztp->zit_next_blk)) 183 error = ENOENT; 184 else if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t))) 185 error = EOVERFLOW; 186 187 if (error) { 188 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 189 *abufpp = NULL; 190 } 191 } 192 193 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 194 195 return (error); 196 } 197 198 /* 199 * Parse the intent log, and call parse_func for each valid record within. 200 * Return the highest sequence number. 201 */ 202 uint64_t 203 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 204 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 205 { 206 const zil_header_t *zh = zilog->zl_header; 207 uint64_t claim_seq = zh->zh_claim_seq; 208 uint64_t seq = 0; 209 uint64_t max_seq = 0; 210 blkptr_t blk = zh->zh_log; 211 arc_buf_t *abuf; 212 char *lrbuf, *lrp; 213 zil_trailer_t *ztp; 214 int reclen, error; 215 216 if (BP_IS_HOLE(&blk)) 217 return (max_seq); 218 219 /* 220 * Starting at the block pointed to by zh_log we read the log chain. 221 * For each block in the chain we strongly check that block to 222 * ensure its validity. We stop when an invalid block is found. 223 * For each block pointer in the chain we call parse_blk_func(). 224 * For each record in each valid block we call parse_lr_func(). 225 * If the log has been claimed, stop if we encounter a sequence 226 * number greater than the highest claimed sequence number. 227 */ 228 zil_dva_tree_init(&zilog->zl_dva_tree); 229 for (;;) { 230 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 231 232 if (claim_seq != 0 && seq > claim_seq) 233 break; 234 235 ASSERT(max_seq < seq); 236 max_seq = seq; 237 238 error = zil_read_log_block(zilog, &blk, &abuf); 239 240 if (parse_blk_func != NULL) 241 parse_blk_func(zilog, &blk, arg, txg); 242 243 if (error) 244 break; 245 246 lrbuf = abuf->b_data; 247 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 248 blk = ztp->zit_next_blk; 249 250 if (parse_lr_func == NULL) { 251 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 252 continue; 253 } 254 255 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 256 lr_t *lr = (lr_t *)lrp; 257 reclen = lr->lrc_reclen; 258 ASSERT3U(reclen, >=, sizeof (lr_t)); 259 parse_lr_func(zilog, lr, arg, txg); 260 } 261 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 262 } 263 zil_dva_tree_fini(&zilog->zl_dva_tree); 264 265 return (max_seq); 266 } 267 268 /* ARGSUSED */ 269 static void 270 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 271 { 272 spa_t *spa = zilog->zl_spa; 273 int err; 274 275 /* 276 * Claim log block if not already committed and not already claimed. 277 */ 278 if (bp->blk_birth >= first_txg && 279 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 280 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL)); 281 ASSERT(err == 0); 282 } 283 } 284 285 static void 286 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 287 { 288 if (lrc->lrc_txtype == TX_WRITE) { 289 lr_write_t *lr = (lr_write_t *)lrc; 290 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 291 } 292 } 293 294 /* ARGSUSED */ 295 static void 296 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 297 { 298 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 299 } 300 301 static void 302 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 303 { 304 /* 305 * If we previously claimed it, we need to free it. 306 */ 307 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 308 lr_write_t *lr = (lr_write_t *)lrc; 309 blkptr_t *bp = &lr->lr_blkptr; 310 if (bp->blk_birth >= claim_txg && 311 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 312 (void) arc_free(NULL, zilog->zl_spa, 313 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 314 } 315 } 316 } 317 318 /* 319 * Create an on-disk intent log. 320 */ 321 static void 322 zil_create(zilog_t *zilog) 323 { 324 const zil_header_t *zh = zilog->zl_header; 325 lwb_t *lwb; 326 uint64_t txg = 0; 327 dmu_tx_t *tx = NULL; 328 blkptr_t blk; 329 int error = 0; 330 331 /* 332 * Wait for any previous destroy to complete. 333 */ 334 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 335 336 ASSERT(zh->zh_claim_txg == 0); 337 ASSERT(zh->zh_replay_seq == 0); 338 339 blk = zh->zh_log; 340 341 /* 342 * If we don't already have an initial log block, allocate one now. 343 */ 344 if (BP_IS_HOLE(&blk)) { 345 tx = dmu_tx_create(zilog->zl_os); 346 (void) dmu_tx_assign(tx, TXG_WAIT); 347 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 348 txg = dmu_tx_get_txg(tx); 349 350 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, txg); 351 352 if (error == 0) 353 zil_init_log_chain(zilog, &blk); 354 } 355 356 /* 357 * Allocate a log write buffer (lwb) for the first log block. 358 */ 359 if (error == 0) { 360 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 361 lwb->lwb_zilog = zilog; 362 lwb->lwb_blk = blk; 363 lwb->lwb_nused = 0; 364 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 365 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 366 lwb->lwb_max_txg = txg; 367 lwb->lwb_seq = 0; 368 lwb->lwb_state = UNWRITTEN; 369 mutex_enter(&zilog->zl_lock); 370 list_insert_tail(&zilog->zl_lwb_list, lwb); 371 mutex_exit(&zilog->zl_lock); 372 } 373 374 /* 375 * If we just allocated the first log block, commit our transaction 376 * and wait for zil_sync() to stuff the block poiner into zh_log. 377 * (zh is part of the MOS, so we cannot modify it in open context.) 378 */ 379 if (tx != NULL) { 380 dmu_tx_commit(tx); 381 txg_wait_synced(zilog->zl_dmu_pool, txg); 382 } 383 384 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 385 } 386 387 /* 388 * In one tx, free all log blocks and clear the log header. 389 * If keep_first is set, then we're replaying a log with no content. 390 * We want to keep the first block, however, so that the first 391 * synchronous transaction doesn't require a txg_wait_synced() 392 * in zil_create(). We don't need to txg_wait_synced() here either 393 * when keep_first is set, because both zil_create() and zil_destroy() 394 * will wait for any in-progress destroys to complete. 395 */ 396 void 397 zil_destroy(zilog_t *zilog, boolean_t keep_first) 398 { 399 const zil_header_t *zh = zilog->zl_header; 400 lwb_t *lwb; 401 dmu_tx_t *tx; 402 uint64_t txg; 403 404 /* 405 * Wait for any previous destroy to complete. 406 */ 407 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 408 409 if (BP_IS_HOLE(&zh->zh_log)) 410 return; 411 412 tx = dmu_tx_create(zilog->zl_os); 413 (void) dmu_tx_assign(tx, TXG_WAIT); 414 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 415 txg = dmu_tx_get_txg(tx); 416 417 mutex_enter(&zilog->zl_lock); 418 419 ASSERT3U(zilog->zl_destroy_txg, <, txg); 420 zilog->zl_destroy_txg = txg; 421 zilog->zl_keep_first = keep_first; 422 423 if (!list_is_empty(&zilog->zl_lwb_list)) { 424 ASSERT(zh->zh_claim_txg == 0); 425 ASSERT(!keep_first); 426 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 427 list_remove(&zilog->zl_lwb_list, lwb); 428 if (lwb->lwb_buf != NULL) 429 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 430 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 431 kmem_cache_free(zil_lwb_cache, lwb); 432 } 433 mutex_exit(&zilog->zl_lock); 434 } else { 435 mutex_exit(&zilog->zl_lock); 436 if (!keep_first) { 437 (void) zil_parse(zilog, zil_free_log_block, 438 zil_free_log_record, tx, zh->zh_claim_txg); 439 } 440 } 441 442 dmu_tx_commit(tx); 443 444 if (keep_first) /* no need to wait in this case */ 445 return; 446 447 txg_wait_synced(zilog->zl_dmu_pool, txg); 448 ASSERT(BP_IS_HOLE(&zh->zh_log)); 449 } 450 451 void 452 zil_claim(char *osname, void *txarg) 453 { 454 dmu_tx_t *tx = txarg; 455 uint64_t first_txg = dmu_tx_get_txg(tx); 456 zilog_t *zilog; 457 zil_header_t *zh; 458 objset_t *os; 459 int error; 460 461 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os); 462 if (error) { 463 cmn_err(CE_WARN, "can't process intent log for %s", osname); 464 return; 465 } 466 467 zilog = dmu_objset_zil(os); 468 zh = zil_header_in_syncing_context(zilog); 469 470 /* 471 * Claim all log blocks if we haven't already done so, and remember 472 * the highest claimed sequence number. This ensures that if we can 473 * read only part of the log now (e.g. due to a missing device), 474 * but we can read the entire log later, we will not try to replay 475 * or destroy beyond the last block we successfully claimed. 476 */ 477 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 478 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 479 zh->zh_claim_txg = first_txg; 480 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 481 zil_claim_log_record, tx, first_txg); 482 dsl_dataset_dirty(dmu_objset_ds(os), tx); 483 } 484 485 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 486 dmu_objset_close(os); 487 } 488 489 void 490 zil_add_vdev(zilog_t *zilog, uint64_t vdev, uint64_t seq) 491 { 492 zil_vdev_t *zv; 493 494 if (zil_noflush) 495 return; 496 497 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 498 zv = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP); 499 zv->vdev = vdev; 500 zv->seq = seq; 501 list_insert_tail(&zilog->zl_vdev_list, zv); 502 } 503 504 void 505 zil_flush_vdevs(zilog_t *zilog, uint64_t seq) 506 { 507 vdev_t *vd; 508 zil_vdev_t *zv, *zv2; 509 zio_t *zio; 510 spa_t *spa; 511 uint64_t vdev; 512 513 if (zil_noflush) 514 return; 515 516 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 517 518 spa = zilog->zl_spa; 519 zio = NULL; 520 521 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL && 522 zv->seq <= seq) { 523 vdev = zv->vdev; 524 list_remove(&zilog->zl_vdev_list, zv); 525 kmem_free(zv, sizeof (zil_vdev_t)); 526 527 /* 528 * remove all chained entries <= seq with same vdev 529 */ 530 zv = list_head(&zilog->zl_vdev_list); 531 while (zv && zv->seq <= seq) { 532 zv2 = list_next(&zilog->zl_vdev_list, zv); 533 if (zv->vdev == vdev) { 534 list_remove(&zilog->zl_vdev_list, zv); 535 kmem_free(zv, sizeof (zil_vdev_t)); 536 } 537 zv = zv2; 538 } 539 540 /* flush the write cache for this vdev */ 541 mutex_exit(&zilog->zl_lock); 542 if (zio == NULL) 543 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 544 vd = vdev_lookup_top(spa, vdev); 545 ASSERT(vd); 546 (void) zio_nowait(zio_ioctl(zio, spa, vd, DKIOCFLUSHWRITECACHE, 547 NULL, NULL, ZIO_PRIORITY_NOW, 548 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY)); 549 mutex_enter(&zilog->zl_lock); 550 } 551 552 /* 553 * Wait for all the flushes to complete. Not all devices actually 554 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 555 */ 556 if (zio != NULL) { 557 mutex_exit(&zilog->zl_lock); 558 (void) zio_wait(zio); 559 mutex_enter(&zilog->zl_lock); 560 } 561 } 562 563 /* 564 * Function called when a log block write completes 565 */ 566 static void 567 zil_lwb_write_done(zio_t *zio) 568 { 569 lwb_t *prev; 570 lwb_t *lwb = zio->io_private; 571 zilog_t *zilog = lwb->lwb_zilog; 572 uint64_t max_seq; 573 574 /* 575 * Now that we've written this log block, we have a stable pointer 576 * to the next block in the chain, so it's OK to let the txg in 577 * which we allocated the next block sync. 578 */ 579 txg_rele_to_sync(&lwb->lwb_txgh); 580 581 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 582 mutex_enter(&zilog->zl_lock); 583 lwb->lwb_buf = NULL; 584 if (zio->io_error) { 585 zilog->zl_log_error = B_TRUE; 586 mutex_exit(&zilog->zl_lock); 587 cv_broadcast(&zilog->zl_cv_seq); 588 return; 589 } 590 591 prev = list_prev(&zilog->zl_lwb_list, lwb); 592 if (prev && prev->lwb_state != SEQ_COMPLETE) { 593 /* There's an unwritten buffer in the chain before this one */ 594 lwb->lwb_state = SEQ_INCOMPLETE; 595 mutex_exit(&zilog->zl_lock); 596 return; 597 } 598 599 max_seq = lwb->lwb_seq; 600 lwb->lwb_state = SEQ_COMPLETE; 601 /* 602 * We must also follow up the chain for already written buffers 603 * to see if we can set zl_ss_seq even higher. 604 */ 605 while (lwb = list_next(&zilog->zl_lwb_list, lwb)) { 606 if (lwb->lwb_state != SEQ_INCOMPLETE) 607 break; 608 lwb->lwb_state = SEQ_COMPLETE; 609 /* lwb_seq will be zero if we've written an empty buffer */ 610 if (lwb->lwb_seq) { 611 ASSERT3U(max_seq, <, lwb->lwb_seq); 612 max_seq = lwb->lwb_seq; 613 } 614 } 615 zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 616 mutex_exit(&zilog->zl_lock); 617 cv_broadcast(&zilog->zl_cv_seq); 618 } 619 620 /* 621 * Start a log block write and advance to the next log block. 622 * Calls are serialized. 623 */ 624 static lwb_t * 625 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 626 { 627 lwb_t *nlwb; 628 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 629 spa_t *spa = zilog->zl_spa; 630 blkptr_t *bp = &ztp->zit_next_blk; 631 uint64_t txg; 632 uint64_t zil_blksz; 633 zbookmark_t zb; 634 int error; 635 636 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 637 638 /* 639 * Allocate the next block and save its address in this block 640 * before writing it in order to establish the log chain. 641 * Note that if the allocation of nlwb synced before we wrote 642 * the block that points at it (lwb), we'd leak it if we crashed. 643 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 644 */ 645 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 646 txg_rele_to_quiesce(&lwb->lwb_txgh); 647 648 /* 649 * Pick a ZIL blocksize. We request a size that is the 650 * maximum of the previous used size, the current used size and 651 * the amount waiting in the queue. 652 */ 653 zil_blksz = MAX(zilog->zl_cur_used, zilog->zl_prev_used); 654 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 655 zil_blksz = P2ROUNDUP(zil_blksz, ZIL_MIN_BLKSZ); 656 if (zil_blksz > ZIL_MAX_BLKSZ) 657 zil_blksz = ZIL_MAX_BLKSZ; 658 659 error = zio_alloc_blk(spa, zil_blksz, bp, txg); 660 if (error) { 661 /* 662 * Reinitialise the lwb. 663 * By returning NULL the caller will call tx_wait_synced() 664 */ 665 mutex_enter(&zilog->zl_lock); 666 ASSERT(lwb->lwb_state == UNWRITTEN); 667 lwb->lwb_nused = 0; 668 lwb->lwb_seq = 0; 669 mutex_exit(&zilog->zl_lock); 670 txg_rele_to_sync(&lwb->lwb_txgh); 671 return (NULL); 672 } 673 674 ASSERT3U(bp->blk_birth, ==, txg); 675 ztp->zit_pad = 0; 676 ztp->zit_nused = lwb->lwb_nused; 677 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 678 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 679 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 680 681 /* 682 * Allocate a new log write buffer (lwb). 683 */ 684 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 685 686 nlwb->lwb_zilog = zilog; 687 nlwb->lwb_blk = *bp; 688 nlwb->lwb_nused = 0; 689 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 690 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 691 nlwb->lwb_max_txg = txg; 692 nlwb->lwb_seq = 0; 693 nlwb->lwb_state = UNWRITTEN; 694 695 /* 696 * Put new lwb at the end of the log chain, 697 * and record the vdev for later flushing 698 */ 699 mutex_enter(&zilog->zl_lock); 700 list_insert_tail(&zilog->zl_lwb_list, nlwb); 701 zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk))), 702 lwb->lwb_seq); 703 mutex_exit(&zilog->zl_lock); 704 705 /* 706 * write the old log block 707 */ 708 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 709 zb.zb_object = 0; 710 zb.zb_level = -1; 711 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 712 713 zio_nowait(zio_rewrite(NULL, spa, ZIO_CHECKSUM_ZILOG, 0, 714 &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz, zil_lwb_write_done, lwb, 715 ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb)); 716 717 return (nlwb); 718 } 719 720 static lwb_t * 721 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 722 { 723 lr_t *lrc = &itx->itx_lr; /* common log record */ 724 lr_write_t *lr; 725 char *dbuf; 726 uint64_t seq = lrc->lrc_seq; 727 uint64_t txg = lrc->lrc_txg; 728 uint64_t reclen = lrc->lrc_reclen; 729 uint64_t dlen = 0; 730 int error; 731 732 if (lwb == NULL) 733 return (NULL); 734 ASSERT(lwb->lwb_buf != NULL); 735 736 /* 737 * If it's a write, fetch the data or get its blkptr as appropriate. 738 */ 739 if (lrc->lrc_txtype == TX_WRITE) { 740 lr = (lr_write_t *)lrc; 741 if (txg > spa_freeze_txg(zilog->zl_spa)) 742 txg_wait_synced(zilog->zl_dmu_pool, txg); 743 if (itx->itx_wr_state != WR_COPIED) { 744 if (itx->itx_wr_state == WR_NEED_COPY) { 745 dlen = P2ROUNDUP(lr->lr_length, 746 sizeof (uint64_t)); 747 ASSERT(dlen); 748 dbuf = kmem_alloc(dlen, KM_NOSLEEP); 749 /* on memory shortage use dmu_sync */ 750 if (dbuf == NULL) { 751 itx->itx_wr_state = WR_INDIRECT; 752 dlen = 0; 753 } 754 } else { 755 ASSERT(itx->itx_wr_state == WR_INDIRECT); 756 dbuf = NULL; 757 } 758 error = zilog->zl_get_data(itx->itx_private, lr, dbuf); 759 if (error) { 760 if (dlen) 761 kmem_free(dbuf, dlen); 762 if (error != ENOENT && error != EALREADY) { 763 txg_wait_synced(zilog->zl_dmu_pool, 764 txg); 765 mutex_enter(&zilog->zl_lock); 766 zilog->zl_ss_seq = 767 MAX(seq, zilog->zl_ss_seq); 768 mutex_exit(&zilog->zl_lock); 769 return (lwb); 770 } 771 mutex_enter(&zilog->zl_lock); 772 zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY( 773 &(lr->lr_blkptr))), seq); 774 mutex_exit(&zilog->zl_lock); 775 return (lwb); 776 } 777 } 778 } 779 780 zilog->zl_cur_used += (reclen + dlen); 781 782 /* 783 * If this record won't fit in the current log block, start a new one. 784 */ 785 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 786 lwb = zil_lwb_write_start(zilog, lwb); 787 if (lwb == NULL) { 788 if (dlen) 789 kmem_free(dbuf, dlen); 790 return (NULL); 791 } 792 ASSERT(lwb->lwb_nused == 0); 793 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 794 txg_wait_synced(zilog->zl_dmu_pool, txg); 795 mutex_enter(&zilog->zl_lock); 796 zilog->zl_ss_seq = MAX(seq, zilog->zl_ss_seq); 797 mutex_exit(&zilog->zl_lock); 798 if (dlen) 799 kmem_free(dbuf, dlen); 800 return (lwb); 801 } 802 } 803 804 lrc->lrc_reclen += dlen; 805 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 806 lwb->lwb_nused += reclen; 807 if (dlen) { 808 bcopy(dbuf, lwb->lwb_buf + lwb->lwb_nused, dlen); 809 lwb->lwb_nused += dlen; 810 kmem_free(dbuf, dlen); 811 lrc->lrc_reclen -= dlen; /* for kmem_free of itx */ 812 } 813 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 814 ASSERT3U(lwb->lwb_seq, <, seq); 815 lwb->lwb_seq = seq; 816 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 817 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 818 819 return (lwb); 820 } 821 822 itx_t * 823 zil_itx_create(int txtype, size_t lrsize) 824 { 825 itx_t *itx; 826 827 lrsize = P2ROUNDUP(lrsize, sizeof (uint64_t)); 828 829 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 830 itx->itx_lr.lrc_txtype = txtype; 831 itx->itx_lr.lrc_reclen = lrsize; 832 itx->itx_lr.lrc_seq = 0; /* defensive */ 833 834 return (itx); 835 } 836 837 uint64_t 838 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 839 { 840 uint64_t seq; 841 842 ASSERT(itx->itx_lr.lrc_seq == 0); 843 844 mutex_enter(&zilog->zl_lock); 845 list_insert_tail(&zilog->zl_itx_list, itx); 846 zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen; 847 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 848 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 849 mutex_exit(&zilog->zl_lock); 850 851 return (seq); 852 } 853 854 /* 855 * Free up all in-memory intent log transactions that have now been synced. 856 */ 857 static void 858 zil_itx_clean(zilog_t *zilog) 859 { 860 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 861 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 862 uint64_t max_seq = 0; 863 itx_t *itx; 864 865 mutex_enter(&zilog->zl_lock); 866 while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 867 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 868 list_remove(&zilog->zl_itx_list, itx); 869 zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen; 870 ASSERT3U(max_seq, <, itx->itx_lr.lrc_seq); 871 max_seq = itx->itx_lr.lrc_seq; 872 kmem_free(itx, offsetof(itx_t, itx_lr) 873 + itx->itx_lr.lrc_reclen); 874 } 875 if (max_seq > zilog->zl_ss_seq) { 876 zilog->zl_ss_seq = max_seq; 877 cv_broadcast(&zilog->zl_cv_seq); 878 } 879 mutex_exit(&zilog->zl_lock); 880 } 881 882 void 883 zil_clean(zilog_t *zilog) 884 { 885 /* 886 * Check for any log blocks that can be freed. 887 * Log blocks are only freed when the log block allocation and 888 * log records contained within are both known to be committed. 889 */ 890 mutex_enter(&zilog->zl_lock); 891 if (list_head(&zilog->zl_itx_list) != NULL) 892 (void) taskq_dispatch(zilog->zl_clean_taskq, 893 (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP); 894 mutex_exit(&zilog->zl_lock); 895 } 896 897 /* 898 * Push zfs transactions to stable storage up to the supplied sequence number. 899 */ 900 void 901 zil_commit(zilog_t *zilog, uint64_t seq, int ioflag) 902 { 903 uint64_t txg; 904 uint64_t max_seq; 905 uint64_t reclen; 906 itx_t *itx; 907 lwb_t *lwb; 908 spa_t *spa; 909 910 if (zilog == NULL || seq == 0 || 911 ((ioflag & (FSYNC | FDSYNC | FRSYNC)) == 0 && !zil_always)) 912 return; 913 914 spa = zilog->zl_spa; 915 mutex_enter(&zilog->zl_lock); 916 917 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 918 919 for (;;) { 920 if (zilog->zl_ss_seq >= seq) { /* already on stable storage */ 921 mutex_exit(&zilog->zl_lock); 922 return; 923 } 924 925 if (zilog->zl_writer == B_FALSE) /* no one writing, do it */ 926 break; 927 928 cv_wait(&zilog->zl_cv_write, &zilog->zl_lock); 929 } 930 931 zilog->zl_writer = B_TRUE; 932 max_seq = 0; 933 934 if (zilog->zl_suspend) { 935 lwb = NULL; 936 } else { 937 lwb = list_tail(&zilog->zl_lwb_list); 938 if (lwb == NULL) { 939 mutex_exit(&zilog->zl_lock); 940 zil_create(zilog); 941 mutex_enter(&zilog->zl_lock); 942 lwb = list_tail(&zilog->zl_lwb_list); 943 } 944 } 945 946 /* 947 * Loop through in-memory log transactions filling log blocks, 948 * until we reach the given sequence number and there's no more 949 * room in the write buffer. 950 */ 951 for (;;) { 952 itx = list_head(&zilog->zl_itx_list); 953 if (itx == NULL) 954 break; 955 956 reclen = itx->itx_lr.lrc_reclen; 957 if ((itx->itx_lr.lrc_seq > seq) && 958 ((lwb == NULL) || (lwb->lwb_nused + reclen > 959 ZIL_BLK_DATA_SZ(lwb)))) 960 break; 961 962 list_remove(&zilog->zl_itx_list, itx); 963 txg = itx->itx_lr.lrc_txg; 964 ASSERT(txg); 965 966 mutex_exit(&zilog->zl_lock); 967 if (txg > spa_last_synced_txg(spa) || 968 txg > spa_freeze_txg(spa)) 969 lwb = zil_lwb_commit(zilog, itx, lwb); 970 else 971 max_seq = itx->itx_lr.lrc_seq; 972 kmem_free(itx, offsetof(itx_t, itx_lr) 973 + itx->itx_lr.lrc_reclen); 974 mutex_enter(&zilog->zl_lock); 975 zilog->zl_itx_list_sz -= reclen; 976 } 977 978 mutex_exit(&zilog->zl_lock); 979 980 /* write the last block out */ 981 if (lwb != NULL && lwb->lwb_nused != 0) 982 lwb = zil_lwb_write_start(zilog, lwb); 983 984 zilog->zl_prev_used = zilog->zl_cur_used; 985 zilog->zl_cur_used = 0; 986 987 mutex_enter(&zilog->zl_lock); 988 if (max_seq > zilog->zl_ss_seq) { 989 zilog->zl_ss_seq = max_seq; 990 cv_broadcast(&zilog->zl_cv_seq); 991 } 992 /* 993 * Wait if necessary for our seq to be committed. 994 */ 995 if (lwb) { 996 while (zilog->zl_ss_seq < seq && zilog->zl_log_error == 0) 997 cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 998 zil_flush_vdevs(zilog, seq); 999 } 1000 1001 if (zilog->zl_log_error || lwb == NULL) { 1002 zilog->zl_log_error = 0; 1003 max_seq = zilog->zl_itx_seq; 1004 mutex_exit(&zilog->zl_lock); 1005 txg_wait_synced(zilog->zl_dmu_pool, 0); 1006 mutex_enter(&zilog->zl_lock); 1007 zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 1008 cv_broadcast(&zilog->zl_cv_seq); 1009 } 1010 /* wake up others waiting to start a write */ 1011 zilog->zl_writer = B_FALSE; 1012 mutex_exit(&zilog->zl_lock); 1013 cv_broadcast(&zilog->zl_cv_write); 1014 } 1015 1016 /* 1017 * Called in syncing context to free committed log blocks and update log header. 1018 */ 1019 void 1020 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1021 { 1022 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1023 uint64_t txg = dmu_tx_get_txg(tx); 1024 spa_t *spa = zilog->zl_spa; 1025 lwb_t *lwb; 1026 1027 mutex_enter(&zilog->zl_lock); 1028 1029 ASSERT(zilog->zl_stop_sync == 0); 1030 1031 zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 1032 1033 if (zilog->zl_destroy_txg == txg) { 1034 blkptr_t blk = zh->zh_log; 1035 1036 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1037 ASSERT(spa_sync_pass(spa) == 1); 1038 1039 bzero(zh, sizeof (zil_header_t)); 1040 bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 1041 1042 if (zilog->zl_keep_first) { 1043 /* 1044 * If this block was part of log chain that couldn't 1045 * be claimed because a device was missing during 1046 * zil_claim(), but that device later returns, 1047 * then this block could erroneously appear valid. 1048 * To guard against this, assign a new GUID to the new 1049 * log chain so it doesn't matter what blk points to. 1050 */ 1051 zil_init_log_chain(zilog, &blk); 1052 zh->zh_log = blk; 1053 } 1054 } 1055 1056 for (;;) { 1057 lwb = list_head(&zilog->zl_lwb_list); 1058 if (lwb == NULL) { 1059 mutex_exit(&zilog->zl_lock); 1060 return; 1061 } 1062 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1063 break; 1064 list_remove(&zilog->zl_lwb_list, lwb); 1065 zio_free_blk(spa, &lwb->lwb_blk, txg); 1066 kmem_cache_free(zil_lwb_cache, lwb); 1067 } 1068 zh->zh_log = lwb->lwb_blk; 1069 mutex_exit(&zilog->zl_lock); 1070 } 1071 1072 void 1073 zil_init(void) 1074 { 1075 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1076 sizeof (struct lwb), NULL, NULL, NULL, NULL, NULL, NULL, 0); 1077 } 1078 1079 void 1080 zil_fini(void) 1081 { 1082 kmem_cache_destroy(zil_lwb_cache); 1083 } 1084 1085 zilog_t * 1086 zil_alloc(objset_t *os, zil_header_t *zh_phys) 1087 { 1088 zilog_t *zilog; 1089 1090 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1091 1092 zilog->zl_header = zh_phys; 1093 zilog->zl_os = os; 1094 zilog->zl_spa = dmu_objset_spa(os); 1095 zilog->zl_dmu_pool = dmu_objset_pool(os); 1096 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1097 1098 list_create(&zilog->zl_itx_list, sizeof (itx_t), 1099 offsetof(itx_t, itx_node)); 1100 1101 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1102 offsetof(lwb_t, lwb_node)); 1103 1104 list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t), 1105 offsetof(zil_vdev_t, vdev_seq_node)); 1106 1107 return (zilog); 1108 } 1109 1110 void 1111 zil_free(zilog_t *zilog) 1112 { 1113 lwb_t *lwb; 1114 zil_vdev_t *zv; 1115 1116 zilog->zl_stop_sync = 1; 1117 1118 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1119 list_remove(&zilog->zl_lwb_list, lwb); 1120 if (lwb->lwb_buf != NULL) 1121 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1122 kmem_cache_free(zil_lwb_cache, lwb); 1123 } 1124 list_destroy(&zilog->zl_lwb_list); 1125 1126 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 1127 list_remove(&zilog->zl_vdev_list, zv); 1128 kmem_free(zv, sizeof (zil_vdev_t)); 1129 } 1130 list_destroy(&zilog->zl_vdev_list); 1131 1132 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1133 list_destroy(&zilog->zl_itx_list); 1134 1135 kmem_free(zilog, sizeof (zilog_t)); 1136 } 1137 1138 /* 1139 * return true if the initial log block is not valid 1140 */ 1141 static int 1142 zil_empty(zilog_t *zilog) 1143 { 1144 const zil_header_t *zh = zilog->zl_header; 1145 arc_buf_t *abuf = NULL; 1146 1147 if (BP_IS_HOLE(&zh->zh_log)) 1148 return (1); 1149 1150 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 1151 return (1); 1152 1153 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 1154 return (0); 1155 } 1156 1157 /* 1158 * Open an intent log. 1159 */ 1160 zilog_t * 1161 zil_open(objset_t *os, zil_get_data_t *get_data) 1162 { 1163 zilog_t *zilog = dmu_objset_zil(os); 1164 1165 zilog->zl_get_data = get_data; 1166 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1167 2, 2, TASKQ_PREPOPULATE); 1168 1169 return (zilog); 1170 } 1171 1172 /* 1173 * Close an intent log. 1174 */ 1175 void 1176 zil_close(zilog_t *zilog) 1177 { 1178 /* 1179 * If the log isn't already committed, mark the objset dirty 1180 * (so zil_sync() will be called) and wait for that txg to sync. 1181 */ 1182 if (!zil_is_committed(zilog)) { 1183 uint64_t txg; 1184 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1185 (void) dmu_tx_assign(tx, TXG_WAIT); 1186 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1187 txg = dmu_tx_get_txg(tx); 1188 dmu_tx_commit(tx); 1189 txg_wait_synced(zilog->zl_dmu_pool, txg); 1190 } 1191 1192 taskq_destroy(zilog->zl_clean_taskq); 1193 zilog->zl_clean_taskq = NULL; 1194 zilog->zl_get_data = NULL; 1195 1196 zil_itx_clean(zilog); 1197 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1198 } 1199 1200 /* 1201 * Suspend an intent log. While in suspended mode, we still honor 1202 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1203 * We suspend the log briefly when taking a snapshot so that the snapshot 1204 * contains all the data it's supposed to, and has an empty intent log. 1205 */ 1206 int 1207 zil_suspend(zilog_t *zilog) 1208 { 1209 const zil_header_t *zh = zilog->zl_header; 1210 lwb_t *lwb; 1211 1212 mutex_enter(&zilog->zl_lock); 1213 if (zh->zh_claim_txg != 0) { /* unplayed log */ 1214 mutex_exit(&zilog->zl_lock); 1215 return (EBUSY); 1216 } 1217 if (zilog->zl_suspend++ != 0) { 1218 /* 1219 * Someone else already began a suspend. 1220 * Just wait for them to finish. 1221 */ 1222 while (zilog->zl_suspending) 1223 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1224 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1225 mutex_exit(&zilog->zl_lock); 1226 return (0); 1227 } 1228 zilog->zl_suspending = B_TRUE; 1229 mutex_exit(&zilog->zl_lock); 1230 1231 zil_commit(zilog, UINT64_MAX, FSYNC); 1232 1233 mutex_enter(&zilog->zl_lock); 1234 for (;;) { 1235 /* 1236 * Wait for any in-flight log writes to complete. 1237 */ 1238 for (lwb = list_head(&zilog->zl_lwb_list); lwb != NULL; 1239 lwb = list_next(&zilog->zl_lwb_list, lwb)) 1240 if (lwb->lwb_seq != 0 && lwb->lwb_state != SEQ_COMPLETE) 1241 break; 1242 1243 if (lwb == NULL) 1244 break; 1245 1246 cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 1247 } 1248 1249 mutex_exit(&zilog->zl_lock); 1250 1251 zil_destroy(zilog, B_FALSE); 1252 1253 mutex_enter(&zilog->zl_lock); 1254 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1255 zilog->zl_suspending = B_FALSE; 1256 cv_broadcast(&zilog->zl_cv_suspend); 1257 mutex_exit(&zilog->zl_lock); 1258 1259 return (0); 1260 } 1261 1262 void 1263 zil_resume(zilog_t *zilog) 1264 { 1265 mutex_enter(&zilog->zl_lock); 1266 ASSERT(zilog->zl_suspend != 0); 1267 zilog->zl_suspend--; 1268 mutex_exit(&zilog->zl_lock); 1269 } 1270 1271 typedef struct zil_replay_arg { 1272 objset_t *zr_os; 1273 zil_replay_func_t **zr_replay; 1274 void *zr_arg; 1275 void (*zr_rm_sync)(void *arg); 1276 uint64_t *zr_txgp; 1277 boolean_t zr_byteswap; 1278 char *zr_lrbuf; 1279 } zil_replay_arg_t; 1280 1281 static void 1282 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1283 { 1284 zil_replay_arg_t *zr = zra; 1285 const zil_header_t *zh = zilog->zl_header; 1286 uint64_t reclen = lr->lrc_reclen; 1287 uint64_t txtype = lr->lrc_txtype; 1288 int pass, error; 1289 1290 if (zilog->zl_stop_replay) 1291 return; 1292 1293 if (lr->lrc_txg < claim_txg) /* already committed */ 1294 return; 1295 1296 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1297 return; 1298 1299 /* 1300 * Make a copy of the data so we can revise and extend it. 1301 */ 1302 bcopy(lr, zr->zr_lrbuf, reclen); 1303 1304 /* 1305 * The log block containing this lr may have been byteswapped 1306 * so that we can easily examine common fields like lrc_txtype. 1307 * However, the log is a mix of different data types, and only the 1308 * replay vectors know how to byteswap their records. Therefore, if 1309 * the lr was byteswapped, undo it before invoking the replay vector. 1310 */ 1311 if (zr->zr_byteswap) 1312 byteswap_uint64_array(zr->zr_lrbuf, reclen); 1313 1314 /* 1315 * If this is a TX_WRITE with a blkptr, suck in the data. 1316 */ 1317 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1318 lr_write_t *lrw = (lr_write_t *)lr; 1319 blkptr_t *wbp = &lrw->lr_blkptr; 1320 uint64_t wlen = lrw->lr_length; 1321 char *wbuf = zr->zr_lrbuf + reclen; 1322 1323 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1324 bzero(wbuf, wlen); 1325 } else { 1326 /* 1327 * A subsequent write may have overwritten this block, 1328 * in which case wbp may have been been freed and 1329 * reallocated, and our read of wbp may fail with a 1330 * checksum error. We can safely ignore this because 1331 * the later write will provide the correct data. 1332 */ 1333 zbookmark_t zb; 1334 1335 zb.zb_objset = dmu_objset_id(zilog->zl_os); 1336 zb.zb_object = lrw->lr_foid; 1337 zb.zb_level = -1; 1338 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); 1339 1340 (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1341 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1342 ZIO_PRIORITY_SYNC_READ, 1343 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1344 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1345 } 1346 } 1347 1348 /* 1349 * We must now do two things atomically: replay this log record, 1350 * and update the log header to reflect the fact that we did so. 1351 * We use the DMU's ability to assign into a specific txg to do this. 1352 */ 1353 for (pass = 1; /* CONSTANTCONDITION */; pass++) { 1354 uint64_t replay_txg; 1355 dmu_tx_t *replay_tx; 1356 1357 replay_tx = dmu_tx_create(zr->zr_os); 1358 error = dmu_tx_assign(replay_tx, TXG_WAIT); 1359 if (error) { 1360 dmu_tx_abort(replay_tx); 1361 break; 1362 } 1363 1364 replay_txg = dmu_tx_get_txg(replay_tx); 1365 1366 if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1367 error = EINVAL; 1368 } else { 1369 /* 1370 * On the first pass, arrange for the replay vector 1371 * to fail its dmu_tx_assign(). That's the only way 1372 * to ensure that those code paths remain well tested. 1373 */ 1374 *zr->zr_txgp = replay_txg - (pass == 1); 1375 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1376 zr->zr_byteswap); 1377 *zr->zr_txgp = TXG_NOWAIT; 1378 } 1379 1380 if (error == 0) { 1381 dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1382 zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1383 lr->lrc_seq; 1384 } 1385 1386 dmu_tx_commit(replay_tx); 1387 1388 if (error != ERESTART) 1389 break; 1390 1391 if (pass != 1) 1392 txg_wait_open(spa_get_dsl(zilog->zl_spa), 1393 replay_txg + 1); 1394 1395 dprintf("pass %d, retrying\n", pass); 1396 } 1397 1398 if (error) { 1399 char *name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1400 dmu_objset_name(zr->zr_os, name); 1401 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1402 "dataset %s, seq 0x%llx, txtype %llu\n", 1403 error, name, 1404 (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype); 1405 zilog->zl_stop_replay = 1; 1406 kmem_free(name, MAXNAMELEN); 1407 } 1408 1409 /* 1410 * The DMU's dnode layer doesn't see removes until the txg commits, 1411 * so a subsequent claim can spuriously fail with EEXIST. 1412 * To prevent this, if we might have removed an object, 1413 * wait for the delete thread to delete it, and then 1414 * wait for the transaction group to sync. 1415 */ 1416 if (txtype == TX_REMOVE || txtype == TX_RMDIR || txtype == TX_RENAME) { 1417 if (zr->zr_rm_sync != NULL) 1418 zr->zr_rm_sync(zr->zr_arg); 1419 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1420 } 1421 } 1422 1423 /* 1424 * If this dataset has a non-empty intent log, replay it and destroy it. 1425 */ 1426 void 1427 zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1428 zil_replay_func_t *replay_func[TX_MAX_TYPE], void (*rm_sync)(void *arg)) 1429 { 1430 zilog_t *zilog = dmu_objset_zil(os); 1431 const zil_header_t *zh = zilog->zl_header; 1432 zil_replay_arg_t zr; 1433 1434 if (zil_empty(zilog)) { 1435 zil_destroy(zilog, B_TRUE); 1436 return; 1437 } 1438 1439 zr.zr_os = os; 1440 zr.zr_replay = replay_func; 1441 zr.zr_arg = arg; 1442 zr.zr_rm_sync = rm_sync; 1443 zr.zr_txgp = txgp; 1444 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1445 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1446 1447 /* 1448 * Wait for in-progress removes to sync before starting replay. 1449 */ 1450 if (rm_sync != NULL) 1451 rm_sync(arg); 1452 txg_wait_synced(zilog->zl_dmu_pool, 0); 1453 1454 zilog->zl_stop_replay = 0; 1455 (void) zil_parse(zilog, NULL, zil_replay_log_record, &zr, 1456 zh->zh_claim_txg); 1457 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1458 1459 zil_destroy(zilog, B_FALSE); 1460 } 1461 1462 /* 1463 * Report whether all transactions are committed 1464 */ 1465 int 1466 zil_is_committed(zilog_t *zilog) 1467 { 1468 lwb_t *lwb; 1469 1470 if (!list_is_empty(&zilog->zl_itx_list)) 1471 return (B_FALSE); 1472 1473 /* 1474 * A log write buffer at the head of the list that is not UNWRITTEN 1475 * means there's a lwb yet to be freed after a txg commit 1476 */ 1477 lwb = list_head(&zilog->zl_lwb_list); 1478 if (lwb && lwb->lwb_state != UNWRITTEN) 1479 return (B_FALSE); 1480 ASSERT(zil_empty(zilog)); 1481 return (B_TRUE); 1482 } 1483