1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa.h> 30 #include <sys/dmu.h> 31 #include <sys/zap.h> 32 #include <sys/arc.h> 33 #include <sys/stat.h> 34 #include <sys/resource.h> 35 #include <sys/zil.h> 36 #include <sys/zil_impl.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/vdev.h> 39 40 /* 41 * The zfs intent log (ZIL) saves transaction records of system calls 42 * that change the file system in memory with enough information 43 * to be able to replay them. These are stored in memory until 44 * either the DMU transaction group (txg) commits them to the stable pool 45 * and they can be discarded, or they are flushed to the stable log 46 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 47 * requirement. In the event of a panic or power fail then those log 48 * records (transactions) are replayed. 49 * 50 * There is one ZIL per file system. Its on-disk (pool) format consists 51 * of 3 parts: 52 * 53 * - ZIL header 54 * - ZIL blocks 55 * - ZIL records 56 * 57 * A log record holds a system call transaction. Log blocks can 58 * hold many log records and the blocks are chained together. 59 * Each ZIL block contains a block pointer (blkptr_t) to the next 60 * ZIL block in the chain. The ZIL header points to the first 61 * block in the chain. Note there is not a fixed place in the pool 62 * to hold blocks. They are dynamically allocated and freed as 63 * needed from the blocks available. Figure X shows the ZIL structure: 64 */ 65 66 /* 67 * This global ZIL switch affects all pools 68 */ 69 int zil_disable = 0; /* disable intent logging */ 70 71 /* 72 * Tunable parameter for debugging or performance analysis. Setting 73 * zfs_nocacheflush will cause corruption on power loss if a volatile 74 * out-of-order write cache is enabled. 75 */ 76 boolean_t zfs_nocacheflush = B_FALSE; 77 78 static kmem_cache_t *zil_lwb_cache; 79 80 static int 81 zil_dva_compare(const void *x1, const void *x2) 82 { 83 const dva_t *dva1 = x1; 84 const dva_t *dva2 = x2; 85 86 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 87 return (-1); 88 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 89 return (1); 90 91 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 92 return (-1); 93 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 94 return (1); 95 96 return (0); 97 } 98 99 static void 100 zil_dva_tree_init(avl_tree_t *t) 101 { 102 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 103 offsetof(zil_dva_node_t, zn_node)); 104 } 105 106 static void 107 zil_dva_tree_fini(avl_tree_t *t) 108 { 109 zil_dva_node_t *zn; 110 void *cookie = NULL; 111 112 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 113 kmem_free(zn, sizeof (zil_dva_node_t)); 114 115 avl_destroy(t); 116 } 117 118 static int 119 zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 120 { 121 zil_dva_node_t *zn; 122 avl_index_t where; 123 124 if (avl_find(t, dva, &where) != NULL) 125 return (EEXIST); 126 127 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 128 zn->zn_dva = *dva; 129 avl_insert(t, zn, where); 130 131 return (0); 132 } 133 134 static zil_header_t * 135 zil_header_in_syncing_context(zilog_t *zilog) 136 { 137 return ((zil_header_t *)zilog->zl_header); 138 } 139 140 static void 141 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 142 { 143 zio_cksum_t *zc = &bp->blk_cksum; 144 145 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 146 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 147 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 148 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 149 } 150 151 /* 152 * Read a log block, make sure it's valid, and byteswap it if necessary. 153 */ 154 static int 155 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 156 { 157 blkptr_t blk = *bp; 158 zbookmark_t zb; 159 uint32_t aflags = ARC_WAIT; 160 int error; 161 162 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 163 zb.zb_object = 0; 164 zb.zb_level = -1; 165 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 166 167 *abufpp = NULL; 168 169 error = arc_read(NULL, zilog->zl_spa, &blk, byteswap_uint64_array, 170 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 171 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb); 172 173 if (error == 0) { 174 char *data = (*abufpp)->b_data; 175 uint64_t blksz = BP_GET_LSIZE(bp); 176 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 177 zio_cksum_t cksum = bp->blk_cksum; 178 179 /* 180 * Sequence numbers should be... sequential. The checksum 181 * verifier for the next block should be bp's checksum plus 1. 182 */ 183 cksum.zc_word[ZIL_ZC_SEQ]++; 184 185 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum))) 186 error = ESTALE; 187 else if (BP_IS_HOLE(&ztp->zit_next_blk)) 188 error = ENOENT; 189 else if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t))) 190 error = EOVERFLOW; 191 192 if (error) { 193 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 194 *abufpp = NULL; 195 } 196 } 197 198 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 199 200 return (error); 201 } 202 203 /* 204 * Parse the intent log, and call parse_func for each valid record within. 205 * Return the highest sequence number. 206 */ 207 uint64_t 208 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 209 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 210 { 211 const zil_header_t *zh = zilog->zl_header; 212 uint64_t claim_seq = zh->zh_claim_seq; 213 uint64_t seq = 0; 214 uint64_t max_seq = 0; 215 blkptr_t blk = zh->zh_log; 216 arc_buf_t *abuf; 217 char *lrbuf, *lrp; 218 zil_trailer_t *ztp; 219 int reclen, error; 220 221 if (BP_IS_HOLE(&blk)) 222 return (max_seq); 223 224 /* 225 * Starting at the block pointed to by zh_log we read the log chain. 226 * For each block in the chain we strongly check that block to 227 * ensure its validity. We stop when an invalid block is found. 228 * For each block pointer in the chain we call parse_blk_func(). 229 * For each record in each valid block we call parse_lr_func(). 230 * If the log has been claimed, stop if we encounter a sequence 231 * number greater than the highest claimed sequence number. 232 */ 233 zil_dva_tree_init(&zilog->zl_dva_tree); 234 for (;;) { 235 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 236 237 if (claim_seq != 0 && seq > claim_seq) 238 break; 239 240 ASSERT(max_seq < seq); 241 max_seq = seq; 242 243 error = zil_read_log_block(zilog, &blk, &abuf); 244 245 if (parse_blk_func != NULL) 246 parse_blk_func(zilog, &blk, arg, txg); 247 248 if (error) 249 break; 250 251 lrbuf = abuf->b_data; 252 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 253 blk = ztp->zit_next_blk; 254 255 if (parse_lr_func == NULL) { 256 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 257 continue; 258 } 259 260 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 261 lr_t *lr = (lr_t *)lrp; 262 reclen = lr->lrc_reclen; 263 ASSERT3U(reclen, >=, sizeof (lr_t)); 264 parse_lr_func(zilog, lr, arg, txg); 265 } 266 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 267 } 268 zil_dva_tree_fini(&zilog->zl_dva_tree); 269 270 return (max_seq); 271 } 272 273 /* ARGSUSED */ 274 static void 275 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 276 { 277 spa_t *spa = zilog->zl_spa; 278 int err; 279 280 /* 281 * Claim log block if not already committed and not already claimed. 282 */ 283 if (bp->blk_birth >= first_txg && 284 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 285 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL)); 286 ASSERT(err == 0); 287 } 288 } 289 290 static void 291 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 292 { 293 if (lrc->lrc_txtype == TX_WRITE) { 294 lr_write_t *lr = (lr_write_t *)lrc; 295 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 296 } 297 } 298 299 /* ARGSUSED */ 300 static void 301 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 302 { 303 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 304 } 305 306 static void 307 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 308 { 309 /* 310 * If we previously claimed it, we need to free it. 311 */ 312 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 313 lr_write_t *lr = (lr_write_t *)lrc; 314 blkptr_t *bp = &lr->lr_blkptr; 315 if (bp->blk_birth >= claim_txg && 316 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 317 (void) arc_free(NULL, zilog->zl_spa, 318 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 319 } 320 } 321 } 322 323 /* 324 * Create an on-disk intent log. 325 */ 326 static void 327 zil_create(zilog_t *zilog) 328 { 329 const zil_header_t *zh = zilog->zl_header; 330 lwb_t *lwb; 331 uint64_t txg = 0; 332 dmu_tx_t *tx = NULL; 333 blkptr_t blk; 334 int error = 0; 335 336 /* 337 * Wait for any previous destroy to complete. 338 */ 339 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 340 341 ASSERT(zh->zh_claim_txg == 0); 342 ASSERT(zh->zh_replay_seq == 0); 343 344 blk = zh->zh_log; 345 346 /* 347 * If we don't already have an initial log block, allocate one now. 348 */ 349 if (BP_IS_HOLE(&blk)) { 350 tx = dmu_tx_create(zilog->zl_os); 351 (void) dmu_tx_assign(tx, TXG_WAIT); 352 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 353 txg = dmu_tx_get_txg(tx); 354 355 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, txg); 356 357 if (error == 0) 358 zil_init_log_chain(zilog, &blk); 359 } 360 361 /* 362 * Allocate a log write buffer (lwb) for the first log block. 363 */ 364 if (error == 0) { 365 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 366 lwb->lwb_zilog = zilog; 367 lwb->lwb_blk = blk; 368 lwb->lwb_nused = 0; 369 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 370 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 371 lwb->lwb_max_txg = txg; 372 lwb->lwb_zio = NULL; 373 374 mutex_enter(&zilog->zl_lock); 375 list_insert_tail(&zilog->zl_lwb_list, lwb); 376 mutex_exit(&zilog->zl_lock); 377 } 378 379 /* 380 * If we just allocated the first log block, commit our transaction 381 * and wait for zil_sync() to stuff the block poiner into zh_log. 382 * (zh is part of the MOS, so we cannot modify it in open context.) 383 */ 384 if (tx != NULL) { 385 dmu_tx_commit(tx); 386 txg_wait_synced(zilog->zl_dmu_pool, txg); 387 } 388 389 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 390 } 391 392 /* 393 * In one tx, free all log blocks and clear the log header. 394 * If keep_first is set, then we're replaying a log with no content. 395 * We want to keep the first block, however, so that the first 396 * synchronous transaction doesn't require a txg_wait_synced() 397 * in zil_create(). We don't need to txg_wait_synced() here either 398 * when keep_first is set, because both zil_create() and zil_destroy() 399 * will wait for any in-progress destroys to complete. 400 */ 401 void 402 zil_destroy(zilog_t *zilog, boolean_t keep_first) 403 { 404 const zil_header_t *zh = zilog->zl_header; 405 lwb_t *lwb; 406 dmu_tx_t *tx; 407 uint64_t txg; 408 409 /* 410 * Wait for any previous destroy to complete. 411 */ 412 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 413 414 if (BP_IS_HOLE(&zh->zh_log)) 415 return; 416 417 tx = dmu_tx_create(zilog->zl_os); 418 (void) dmu_tx_assign(tx, TXG_WAIT); 419 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 420 txg = dmu_tx_get_txg(tx); 421 422 mutex_enter(&zilog->zl_lock); 423 424 ASSERT3U(zilog->zl_destroy_txg, <, txg); 425 zilog->zl_destroy_txg = txg; 426 zilog->zl_keep_first = keep_first; 427 428 if (!list_is_empty(&zilog->zl_lwb_list)) { 429 ASSERT(zh->zh_claim_txg == 0); 430 ASSERT(!keep_first); 431 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 432 list_remove(&zilog->zl_lwb_list, lwb); 433 if (lwb->lwb_buf != NULL) 434 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 435 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 436 kmem_cache_free(zil_lwb_cache, lwb); 437 } 438 } else { 439 if (!keep_first) { 440 (void) zil_parse(zilog, zil_free_log_block, 441 zil_free_log_record, tx, zh->zh_claim_txg); 442 } 443 } 444 mutex_exit(&zilog->zl_lock); 445 446 dmu_tx_commit(tx); 447 448 if (keep_first) /* no need to wait in this case */ 449 return; 450 451 txg_wait_synced(zilog->zl_dmu_pool, txg); 452 ASSERT(BP_IS_HOLE(&zh->zh_log)); 453 } 454 455 int 456 zil_claim(char *osname, void *txarg) 457 { 458 dmu_tx_t *tx = txarg; 459 uint64_t first_txg = dmu_tx_get_txg(tx); 460 zilog_t *zilog; 461 zil_header_t *zh; 462 objset_t *os; 463 int error; 464 465 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os); 466 if (error) { 467 cmn_err(CE_WARN, "can't process intent log for %s", osname); 468 return (0); 469 } 470 471 zilog = dmu_objset_zil(os); 472 zh = zil_header_in_syncing_context(zilog); 473 474 /* 475 * Claim all log blocks if we haven't already done so, and remember 476 * the highest claimed sequence number. This ensures that if we can 477 * read only part of the log now (e.g. due to a missing device), 478 * but we can read the entire log later, we will not try to replay 479 * or destroy beyond the last block we successfully claimed. 480 */ 481 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 482 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 483 zh->zh_claim_txg = first_txg; 484 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 485 zil_claim_log_record, tx, first_txg); 486 dsl_dataset_dirty(dmu_objset_ds(os), tx); 487 } 488 489 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 490 dmu_objset_close(os); 491 return (0); 492 } 493 494 void 495 zil_add_vdev(zilog_t *zilog, uint64_t vdev) 496 { 497 zil_vdev_t *zv; 498 499 if (zfs_nocacheflush) 500 return; 501 502 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 503 zv = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP); 504 zv->vdev = vdev; 505 list_insert_tail(&zilog->zl_vdev_list, zv); 506 } 507 508 void 509 zil_flush_vdevs(zilog_t *zilog) 510 { 511 vdev_t *vd; 512 zil_vdev_t *zv, *zv2; 513 zio_t *zio; 514 spa_t *spa; 515 uint64_t vdev; 516 517 if (zfs_nocacheflush) 518 return; 519 520 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 521 522 spa = zilog->zl_spa; 523 zio = NULL; 524 525 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 526 vdev = zv->vdev; 527 list_remove(&zilog->zl_vdev_list, zv); 528 kmem_free(zv, sizeof (zil_vdev_t)); 529 530 /* 531 * remove all chained entries with same vdev 532 */ 533 zv = list_head(&zilog->zl_vdev_list); 534 while (zv) { 535 zv2 = list_next(&zilog->zl_vdev_list, zv); 536 if (zv->vdev == vdev) { 537 list_remove(&zilog->zl_vdev_list, zv); 538 kmem_free(zv, sizeof (zil_vdev_t)); 539 } 540 zv = zv2; 541 } 542 543 /* flush the write cache for this vdev */ 544 mutex_exit(&zilog->zl_lock); 545 if (zio == NULL) 546 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 547 vd = vdev_lookup_top(spa, vdev); 548 ASSERT(vd); 549 (void) zio_nowait(zio_ioctl(zio, spa, vd, DKIOCFLUSHWRITECACHE, 550 NULL, NULL, ZIO_PRIORITY_NOW, 551 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY)); 552 mutex_enter(&zilog->zl_lock); 553 } 554 555 /* 556 * Wait for all the flushes to complete. Not all devices actually 557 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 558 */ 559 if (zio != NULL) { 560 mutex_exit(&zilog->zl_lock); 561 (void) zio_wait(zio); 562 mutex_enter(&zilog->zl_lock); 563 } 564 } 565 566 /* 567 * Function called when a log block write completes 568 */ 569 static void 570 zil_lwb_write_done(zio_t *zio) 571 { 572 lwb_t *lwb = zio->io_private; 573 zilog_t *zilog = lwb->lwb_zilog; 574 575 /* 576 * Now that we've written this log block, we have a stable pointer 577 * to the next block in the chain, so it's OK to let the txg in 578 * which we allocated the next block sync. 579 */ 580 txg_rele_to_sync(&lwb->lwb_txgh); 581 582 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 583 mutex_enter(&zilog->zl_lock); 584 lwb->lwb_buf = NULL; 585 if (zio->io_error) { 586 zilog->zl_log_error = B_TRUE; 587 mutex_exit(&zilog->zl_lock); 588 return; 589 } 590 mutex_exit(&zilog->zl_lock); 591 } 592 593 /* 594 * Initialize the io for a log block. 595 * 596 * Note, we should not initialize the IO until we are about 597 * to use it, since zio_rewrite() does a spa_config_enter(). 598 */ 599 static void 600 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 601 { 602 zbookmark_t zb; 603 604 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 605 zb.zb_object = 0; 606 zb.zb_level = -1; 607 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 608 609 if (zilog->zl_root_zio == NULL) { 610 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 611 ZIO_FLAG_CANFAIL); 612 } 613 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 614 ZIO_CHECKSUM_ZILOG, 0, &lwb->lwb_blk, lwb->lwb_buf, 615 lwb->lwb_sz, zil_lwb_write_done, lwb, 616 ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 617 } 618 619 /* 620 * Start a log block write and advance to the next log block. 621 * Calls are serialized. 622 */ 623 static lwb_t * 624 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 625 { 626 lwb_t *nlwb; 627 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 628 spa_t *spa = zilog->zl_spa; 629 blkptr_t *bp = &ztp->zit_next_blk; 630 uint64_t txg; 631 uint64_t zil_blksz; 632 int error; 633 634 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 635 636 /* 637 * Allocate the next block and save its address in this block 638 * before writing it in order to establish the log chain. 639 * Note that if the allocation of nlwb synced before we wrote 640 * the block that points at it (lwb), we'd leak it if we crashed. 641 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 642 */ 643 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 644 txg_rele_to_quiesce(&lwb->lwb_txgh); 645 646 /* 647 * Pick a ZIL blocksize. We request a size that is the 648 * maximum of the previous used size, the current used size and 649 * the amount waiting in the queue. 650 */ 651 zil_blksz = MAX(zilog->zl_prev_used, 652 zilog->zl_cur_used + sizeof (*ztp)); 653 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 654 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); 655 if (zil_blksz > ZIL_MAX_BLKSZ) 656 zil_blksz = ZIL_MAX_BLKSZ; 657 658 error = zio_alloc_blk(spa, zil_blksz, bp, txg); 659 if (error) { 660 /* 661 * Reinitialise the lwb. 662 * By returning NULL the caller will call tx_wait_synced() 663 */ 664 mutex_enter(&zilog->zl_lock); 665 lwb->lwb_nused = 0; 666 mutex_exit(&zilog->zl_lock); 667 txg_rele_to_sync(&lwb->lwb_txgh); 668 return (NULL); 669 } 670 671 ASSERT3U(bp->blk_birth, ==, txg); 672 ztp->zit_pad = 0; 673 ztp->zit_nused = lwb->lwb_nused; 674 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 675 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 676 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 677 678 /* 679 * Allocate a new log write buffer (lwb). 680 */ 681 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 682 683 nlwb->lwb_zilog = zilog; 684 nlwb->lwb_blk = *bp; 685 nlwb->lwb_nused = 0; 686 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 687 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 688 nlwb->lwb_max_txg = txg; 689 nlwb->lwb_zio = NULL; 690 691 /* 692 * Put new lwb at the end of the log chain, 693 * and record the vdev for later flushing 694 */ 695 mutex_enter(&zilog->zl_lock); 696 list_insert_tail(&zilog->zl_lwb_list, nlwb); 697 zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk)))); 698 mutex_exit(&zilog->zl_lock); 699 700 /* 701 * kick off the write for the old log block 702 */ 703 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 704 if (lwb->lwb_zio == NULL) 705 zil_lwb_write_init(zilog, lwb); 706 zio_nowait(lwb->lwb_zio); 707 708 return (nlwb); 709 } 710 711 static lwb_t * 712 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 713 { 714 lr_t *lrc = &itx->itx_lr; /* common log record */ 715 lr_write_t *lr = (lr_write_t *)lrc; 716 uint64_t txg = lrc->lrc_txg; 717 uint64_t reclen = lrc->lrc_reclen; 718 uint64_t dlen; 719 720 if (lwb == NULL) 721 return (NULL); 722 ASSERT(lwb->lwb_buf != NULL); 723 724 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 725 dlen = P2ROUNDUP_TYPED( 726 lr->lr_length, sizeof (uint64_t), uint64_t); 727 else 728 dlen = 0; 729 730 zilog->zl_cur_used += (reclen + dlen); 731 732 /* 733 * If this record won't fit in the current log block, start a new one. 734 */ 735 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 736 lwb = zil_lwb_write_start(zilog, lwb); 737 if (lwb == NULL) 738 return (NULL); 739 ASSERT(lwb->lwb_nused == 0); 740 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 741 txg_wait_synced(zilog->zl_dmu_pool, txg); 742 return (lwb); 743 } 744 } 745 746 /* 747 * Update the lrc_seq, to be log record sequence number. See zil.h 748 * Then copy the record to the log buffer. 749 */ 750 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 751 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 752 753 /* 754 * If it's a write, fetch the data or get its blkptr as appropriate. 755 */ 756 if (lrc->lrc_txtype == TX_WRITE) { 757 if (txg > spa_freeze_txg(zilog->zl_spa)) 758 txg_wait_synced(zilog->zl_dmu_pool, txg); 759 if (itx->itx_wr_state != WR_COPIED) { 760 char *dbuf; 761 int error; 762 763 /* alignment is guaranteed */ 764 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused); 765 if (dlen) { 766 ASSERT(itx->itx_wr_state == WR_NEED_COPY); 767 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen; 768 lr->lr_common.lrc_reclen += dlen; 769 } else { 770 ASSERT(itx->itx_wr_state == WR_INDIRECT); 771 dbuf = NULL; 772 } 773 error = zilog->zl_get_data( 774 itx->itx_private, lr, dbuf, lwb->lwb_zio); 775 if (error) { 776 ASSERT(error == ENOENT || error == EEXIST || 777 error == EALREADY); 778 return (lwb); 779 } 780 } 781 } 782 783 lwb->lwb_nused += reclen + dlen; 784 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 785 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 786 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 787 788 return (lwb); 789 } 790 791 itx_t * 792 zil_itx_create(int txtype, size_t lrsize) 793 { 794 itx_t *itx; 795 796 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 797 798 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 799 itx->itx_lr.lrc_txtype = txtype; 800 itx->itx_lr.lrc_reclen = lrsize; 801 itx->itx_lr.lrc_seq = 0; /* defensive */ 802 803 return (itx); 804 } 805 806 uint64_t 807 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 808 { 809 uint64_t seq; 810 811 ASSERT(itx->itx_lr.lrc_seq == 0); 812 813 mutex_enter(&zilog->zl_lock); 814 list_insert_tail(&zilog->zl_itx_list, itx); 815 zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen; 816 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 817 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 818 mutex_exit(&zilog->zl_lock); 819 820 return (seq); 821 } 822 823 /* 824 * Free up all in-memory intent log transactions that have now been synced. 825 */ 826 static void 827 zil_itx_clean(zilog_t *zilog) 828 { 829 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 830 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 831 itx_t *itx; 832 833 mutex_enter(&zilog->zl_lock); 834 /* wait for a log writer to finish walking list */ 835 while (zilog->zl_writer) { 836 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 837 } 838 /* no need to set zl_writer as we never drop zl_lock */ 839 while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 840 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 841 list_remove(&zilog->zl_itx_list, itx); 842 zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen; 843 kmem_free(itx, offsetof(itx_t, itx_lr) 844 + itx->itx_lr.lrc_reclen); 845 } 846 mutex_exit(&zilog->zl_lock); 847 } 848 849 /* 850 * If there are in-memory intent log transactions then 851 * start up a taskq to free up any that have now been synced. 852 */ 853 void 854 zil_clean(zilog_t *zilog) 855 { 856 mutex_enter(&zilog->zl_lock); 857 if (list_head(&zilog->zl_itx_list) != NULL) 858 (void) taskq_dispatch(zilog->zl_clean_taskq, 859 (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP); 860 mutex_exit(&zilog->zl_lock); 861 } 862 863 void 864 zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid) 865 { 866 uint64_t txg; 867 uint64_t reclen; 868 itx_t *itx, *itx_next = (itx_t *)-1; 869 lwb_t *lwb; 870 spa_t *spa; 871 872 zilog->zl_writer = B_TRUE; 873 zilog->zl_root_zio = NULL; 874 spa = zilog->zl_spa; 875 876 if (zilog->zl_suspend) { 877 lwb = NULL; 878 } else { 879 lwb = list_tail(&zilog->zl_lwb_list); 880 if (lwb == NULL) { 881 /* 882 * Return if there's nothing to flush before we 883 * dirty the fs by calling zil_create() 884 */ 885 if (list_is_empty(&zilog->zl_itx_list)) { 886 /* wake up others waiting to start a write */ 887 zilog->zl_writer = B_FALSE; 888 cv_broadcast(&zilog->zl_cv_writer); 889 mutex_exit(&zilog->zl_lock); 890 return; 891 } 892 893 mutex_exit(&zilog->zl_lock); 894 zil_create(zilog); 895 mutex_enter(&zilog->zl_lock); 896 lwb = list_tail(&zilog->zl_lwb_list); 897 } 898 } 899 900 /* 901 * Loop through in-memory log transactions filling log blocks, 902 * until we reach the given sequence number and there's no more 903 * room in the write buffer. 904 */ 905 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 906 for (;;) { 907 /* 908 * Find the next itx to push: 909 * Push all transactions related to specified foid and all 910 * other transactions except TX_WRITE, TX_TRUNCATE, 911 * TX_SETATTR and TX_ACL for all other files. 912 */ 913 if (itx_next != (itx_t *)-1) 914 itx = itx_next; 915 else 916 itx = list_head(&zilog->zl_itx_list); 917 for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) { 918 if (foid == 0) /* push all foids? */ 919 break; 920 switch (itx->itx_lr.lrc_txtype) { 921 case TX_SETATTR: 922 case TX_WRITE: 923 case TX_TRUNCATE: 924 case TX_ACL: 925 /* lr_foid is same offset for these records */ 926 if (((lr_write_t *)&itx->itx_lr)->lr_foid 927 != foid) { 928 continue; /* skip this record */ 929 } 930 } 931 break; 932 } 933 if (itx == NULL) 934 break; 935 936 reclen = itx->itx_lr.lrc_reclen; 937 if ((itx->itx_lr.lrc_seq > seq) && 938 ((lwb == NULL) || (lwb->lwb_nused == 0) || 939 (lwb->lwb_nused + reclen > ZIL_BLK_DATA_SZ(lwb)))) 940 break; 941 942 /* 943 * Save the next pointer. Even though we soon drop 944 * zl_lock all threads that may change the list 945 * (another writer or zil_itx_clean) can't do so until 946 * they have zl_writer. 947 */ 948 itx_next = list_next(&zilog->zl_itx_list, itx); 949 list_remove(&zilog->zl_itx_list, itx); 950 txg = itx->itx_lr.lrc_txg; 951 ASSERT(txg); 952 953 mutex_exit(&zilog->zl_lock); 954 if (txg > spa_last_synced_txg(spa) || 955 txg > spa_freeze_txg(spa)) 956 lwb = zil_lwb_commit(zilog, itx, lwb); 957 kmem_free(itx, offsetof(itx_t, itx_lr) 958 + itx->itx_lr.lrc_reclen); 959 mutex_enter(&zilog->zl_lock); 960 zilog->zl_itx_list_sz -= reclen; 961 } 962 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 963 mutex_exit(&zilog->zl_lock); 964 965 /* write the last block out */ 966 if (lwb != NULL && lwb->lwb_nused != 0) 967 lwb = zil_lwb_write_start(zilog, lwb); 968 969 zilog->zl_prev_used = zilog->zl_cur_used; 970 zilog->zl_cur_used = 0; 971 972 /* 973 * Wait if necessary for the log blocks to be on stable storage. 974 */ 975 mutex_enter(&zilog->zl_lock); 976 if (zilog->zl_root_zio) { 977 mutex_exit(&zilog->zl_lock); 978 DTRACE_PROBE1(zil__cw3, zilog_t *, zilog); 979 (void) zio_wait(zilog->zl_root_zio); 980 DTRACE_PROBE1(zil__cw4, zilog_t *, zilog); 981 mutex_enter(&zilog->zl_lock); 982 zil_flush_vdevs(zilog); 983 } 984 985 if (zilog->zl_log_error || lwb == NULL) { 986 zilog->zl_log_error = 0; 987 mutex_exit(&zilog->zl_lock); 988 txg_wait_synced(zilog->zl_dmu_pool, 0); 989 mutex_enter(&zilog->zl_lock); 990 } 991 /* wake up others waiting to start a write */ 992 zilog->zl_writer = B_FALSE; 993 cv_broadcast(&zilog->zl_cv_writer); 994 mutex_exit(&zilog->zl_lock); 995 } 996 997 /* 998 * Push zfs transactions to stable storage up to the supplied sequence number. 999 * If foid is 0 push out all transactions, otherwise push only those 1000 * for that file or might have been used to create that file. 1001 */ 1002 void 1003 zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid) 1004 { 1005 if (zilog == NULL || seq == 0) 1006 return; 1007 1008 mutex_enter(&zilog->zl_lock); 1009 1010 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 1011 1012 while (zilog->zl_writer) 1013 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1014 zil_commit_writer(zilog, seq, foid); /* drops zl_lock */ 1015 } 1016 1017 /* 1018 * Called in syncing context to free committed log blocks and update log header. 1019 */ 1020 void 1021 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1022 { 1023 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1024 uint64_t txg = dmu_tx_get_txg(tx); 1025 spa_t *spa = zilog->zl_spa; 1026 lwb_t *lwb; 1027 1028 mutex_enter(&zilog->zl_lock); 1029 1030 ASSERT(zilog->zl_stop_sync == 0); 1031 1032 zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 1033 1034 if (zilog->zl_destroy_txg == txg) { 1035 blkptr_t blk = zh->zh_log; 1036 1037 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1038 ASSERT(spa_sync_pass(spa) == 1); 1039 1040 bzero(zh, sizeof (zil_header_t)); 1041 bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 1042 1043 if (zilog->zl_keep_first) { 1044 /* 1045 * If this block was part of log chain that couldn't 1046 * be claimed because a device was missing during 1047 * zil_claim(), but that device later returns, 1048 * then this block could erroneously appear valid. 1049 * To guard against this, assign a new GUID to the new 1050 * log chain so it doesn't matter what blk points to. 1051 */ 1052 zil_init_log_chain(zilog, &blk); 1053 zh->zh_log = blk; 1054 } 1055 } 1056 1057 for (;;) { 1058 lwb = list_head(&zilog->zl_lwb_list); 1059 if (lwb == NULL) { 1060 mutex_exit(&zilog->zl_lock); 1061 return; 1062 } 1063 zh->zh_log = lwb->lwb_blk; 1064 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1065 break; 1066 list_remove(&zilog->zl_lwb_list, lwb); 1067 zio_free_blk(spa, &lwb->lwb_blk, txg); 1068 kmem_cache_free(zil_lwb_cache, lwb); 1069 } 1070 mutex_exit(&zilog->zl_lock); 1071 } 1072 1073 void 1074 zil_init(void) 1075 { 1076 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1077 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1078 } 1079 1080 void 1081 zil_fini(void) 1082 { 1083 kmem_cache_destroy(zil_lwb_cache); 1084 } 1085 1086 zilog_t * 1087 zil_alloc(objset_t *os, zil_header_t *zh_phys) 1088 { 1089 zilog_t *zilog; 1090 1091 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1092 1093 zilog->zl_header = zh_phys; 1094 zilog->zl_os = os; 1095 zilog->zl_spa = dmu_objset_spa(os); 1096 zilog->zl_dmu_pool = dmu_objset_pool(os); 1097 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1098 1099 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 1100 1101 list_create(&zilog->zl_itx_list, sizeof (itx_t), 1102 offsetof(itx_t, itx_node)); 1103 1104 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1105 offsetof(lwb_t, lwb_node)); 1106 1107 list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t), 1108 offsetof(zil_vdev_t, vdev_seq_node)); 1109 1110 return (zilog); 1111 } 1112 1113 void 1114 zil_free(zilog_t *zilog) 1115 { 1116 lwb_t *lwb; 1117 zil_vdev_t *zv; 1118 1119 zilog->zl_stop_sync = 1; 1120 1121 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1122 list_remove(&zilog->zl_lwb_list, lwb); 1123 if (lwb->lwb_buf != NULL) 1124 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1125 kmem_cache_free(zil_lwb_cache, lwb); 1126 } 1127 list_destroy(&zilog->zl_lwb_list); 1128 1129 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 1130 list_remove(&zilog->zl_vdev_list, zv); 1131 kmem_free(zv, sizeof (zil_vdev_t)); 1132 } 1133 list_destroy(&zilog->zl_vdev_list); 1134 1135 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1136 list_destroy(&zilog->zl_itx_list); 1137 mutex_destroy(&zilog->zl_lock); 1138 1139 kmem_free(zilog, sizeof (zilog_t)); 1140 } 1141 1142 /* 1143 * return true if the initial log block is not valid 1144 */ 1145 static int 1146 zil_empty(zilog_t *zilog) 1147 { 1148 const zil_header_t *zh = zilog->zl_header; 1149 arc_buf_t *abuf = NULL; 1150 1151 if (BP_IS_HOLE(&zh->zh_log)) 1152 return (1); 1153 1154 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 1155 return (1); 1156 1157 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 1158 return (0); 1159 } 1160 1161 /* 1162 * Open an intent log. 1163 */ 1164 zilog_t * 1165 zil_open(objset_t *os, zil_get_data_t *get_data) 1166 { 1167 zilog_t *zilog = dmu_objset_zil(os); 1168 1169 zilog->zl_get_data = get_data; 1170 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1171 2, 2, TASKQ_PREPOPULATE); 1172 1173 return (zilog); 1174 } 1175 1176 /* 1177 * Close an intent log. 1178 */ 1179 void 1180 zil_close(zilog_t *zilog) 1181 { 1182 /* 1183 * If the log isn't already committed, mark the objset dirty 1184 * (so zil_sync() will be called) and wait for that txg to sync. 1185 */ 1186 if (!zil_is_committed(zilog)) { 1187 uint64_t txg; 1188 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1189 (void) dmu_tx_assign(tx, TXG_WAIT); 1190 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1191 txg = dmu_tx_get_txg(tx); 1192 dmu_tx_commit(tx); 1193 txg_wait_synced(zilog->zl_dmu_pool, txg); 1194 } 1195 1196 taskq_destroy(zilog->zl_clean_taskq); 1197 zilog->zl_clean_taskq = NULL; 1198 zilog->zl_get_data = NULL; 1199 1200 zil_itx_clean(zilog); 1201 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1202 } 1203 1204 /* 1205 * Suspend an intent log. While in suspended mode, we still honor 1206 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1207 * We suspend the log briefly when taking a snapshot so that the snapshot 1208 * contains all the data it's supposed to, and has an empty intent log. 1209 */ 1210 int 1211 zil_suspend(zilog_t *zilog) 1212 { 1213 const zil_header_t *zh = zilog->zl_header; 1214 1215 mutex_enter(&zilog->zl_lock); 1216 if (zh->zh_claim_txg != 0) { /* unplayed log */ 1217 mutex_exit(&zilog->zl_lock); 1218 return (EBUSY); 1219 } 1220 if (zilog->zl_suspend++ != 0) { 1221 /* 1222 * Someone else already began a suspend. 1223 * Just wait for them to finish. 1224 */ 1225 while (zilog->zl_suspending) 1226 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1227 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1228 mutex_exit(&zilog->zl_lock); 1229 return (0); 1230 } 1231 zilog->zl_suspending = B_TRUE; 1232 mutex_exit(&zilog->zl_lock); 1233 1234 zil_commit(zilog, UINT64_MAX, 0); 1235 1236 /* 1237 * Wait for any in-flight log writes to complete. 1238 */ 1239 mutex_enter(&zilog->zl_lock); 1240 while (zilog->zl_writer) 1241 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1242 mutex_exit(&zilog->zl_lock); 1243 1244 zil_destroy(zilog, B_FALSE); 1245 1246 mutex_enter(&zilog->zl_lock); 1247 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1248 zilog->zl_suspending = B_FALSE; 1249 cv_broadcast(&zilog->zl_cv_suspend); 1250 mutex_exit(&zilog->zl_lock); 1251 1252 return (0); 1253 } 1254 1255 void 1256 zil_resume(zilog_t *zilog) 1257 { 1258 mutex_enter(&zilog->zl_lock); 1259 ASSERT(zilog->zl_suspend != 0); 1260 zilog->zl_suspend--; 1261 mutex_exit(&zilog->zl_lock); 1262 } 1263 1264 typedef struct zil_replay_arg { 1265 objset_t *zr_os; 1266 zil_replay_func_t **zr_replay; 1267 void *zr_arg; 1268 void (*zr_rm_sync)(void *arg); 1269 uint64_t *zr_txgp; 1270 boolean_t zr_byteswap; 1271 char *zr_lrbuf; 1272 } zil_replay_arg_t; 1273 1274 static void 1275 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1276 { 1277 zil_replay_arg_t *zr = zra; 1278 const zil_header_t *zh = zilog->zl_header; 1279 uint64_t reclen = lr->lrc_reclen; 1280 uint64_t txtype = lr->lrc_txtype; 1281 int pass, error; 1282 1283 if (zilog->zl_stop_replay) 1284 return; 1285 1286 if (lr->lrc_txg < claim_txg) /* already committed */ 1287 return; 1288 1289 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1290 return; 1291 1292 /* 1293 * Make a copy of the data so we can revise and extend it. 1294 */ 1295 bcopy(lr, zr->zr_lrbuf, reclen); 1296 1297 /* 1298 * The log block containing this lr may have been byteswapped 1299 * so that we can easily examine common fields like lrc_txtype. 1300 * However, the log is a mix of different data types, and only the 1301 * replay vectors know how to byteswap their records. Therefore, if 1302 * the lr was byteswapped, undo it before invoking the replay vector. 1303 */ 1304 if (zr->zr_byteswap) 1305 byteswap_uint64_array(zr->zr_lrbuf, reclen); 1306 1307 /* 1308 * If this is a TX_WRITE with a blkptr, suck in the data. 1309 */ 1310 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1311 lr_write_t *lrw = (lr_write_t *)lr; 1312 blkptr_t *wbp = &lrw->lr_blkptr; 1313 uint64_t wlen = lrw->lr_length; 1314 char *wbuf = zr->zr_lrbuf + reclen; 1315 1316 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1317 bzero(wbuf, wlen); 1318 } else { 1319 /* 1320 * A subsequent write may have overwritten this block, 1321 * in which case wbp may have been been freed and 1322 * reallocated, and our read of wbp may fail with a 1323 * checksum error. We can safely ignore this because 1324 * the later write will provide the correct data. 1325 */ 1326 zbookmark_t zb; 1327 1328 zb.zb_objset = dmu_objset_id(zilog->zl_os); 1329 zb.zb_object = lrw->lr_foid; 1330 zb.zb_level = -1; 1331 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); 1332 1333 (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1334 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1335 ZIO_PRIORITY_SYNC_READ, 1336 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1337 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1338 } 1339 } 1340 1341 /* 1342 * We must now do two things atomically: replay this log record, 1343 * and update the log header to reflect the fact that we did so. 1344 * We use the DMU's ability to assign into a specific txg to do this. 1345 */ 1346 for (pass = 1; /* CONSTANTCONDITION */; pass++) { 1347 uint64_t replay_txg; 1348 dmu_tx_t *replay_tx; 1349 1350 replay_tx = dmu_tx_create(zr->zr_os); 1351 error = dmu_tx_assign(replay_tx, TXG_WAIT); 1352 if (error) { 1353 dmu_tx_abort(replay_tx); 1354 break; 1355 } 1356 1357 replay_txg = dmu_tx_get_txg(replay_tx); 1358 1359 if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1360 error = EINVAL; 1361 } else { 1362 /* 1363 * On the first pass, arrange for the replay vector 1364 * to fail its dmu_tx_assign(). That's the only way 1365 * to ensure that those code paths remain well tested. 1366 */ 1367 *zr->zr_txgp = replay_txg - (pass == 1); 1368 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1369 zr->zr_byteswap); 1370 *zr->zr_txgp = TXG_NOWAIT; 1371 } 1372 1373 if (error == 0) { 1374 dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1375 zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1376 lr->lrc_seq; 1377 } 1378 1379 dmu_tx_commit(replay_tx); 1380 1381 if (error != ERESTART) 1382 break; 1383 1384 if (pass != 1) 1385 txg_wait_open(spa_get_dsl(zilog->zl_spa), 1386 replay_txg + 1); 1387 1388 dprintf("pass %d, retrying\n", pass); 1389 } 1390 1391 if (error) { 1392 char *name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1393 dmu_objset_name(zr->zr_os, name); 1394 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1395 "dataset %s, seq 0x%llx, txtype %llu\n", 1396 error, name, 1397 (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype); 1398 zilog->zl_stop_replay = 1; 1399 kmem_free(name, MAXNAMELEN); 1400 } 1401 1402 /* 1403 * The DMU's dnode layer doesn't see removes until the txg commits, 1404 * so a subsequent claim can spuriously fail with EEXIST. 1405 * To prevent this, if we might have removed an object, 1406 * wait for the delete thread to delete it, and then 1407 * wait for the transaction group to sync. 1408 */ 1409 if (txtype == TX_REMOVE || txtype == TX_RMDIR || txtype == TX_RENAME) { 1410 if (zr->zr_rm_sync != NULL) 1411 zr->zr_rm_sync(zr->zr_arg); 1412 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1413 } 1414 } 1415 1416 /* 1417 * If this dataset has a non-empty intent log, replay it and destroy it. 1418 */ 1419 void 1420 zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1421 zil_replay_func_t *replay_func[TX_MAX_TYPE], void (*rm_sync)(void *arg)) 1422 { 1423 zilog_t *zilog = dmu_objset_zil(os); 1424 const zil_header_t *zh = zilog->zl_header; 1425 zil_replay_arg_t zr; 1426 1427 if (zil_empty(zilog)) { 1428 zil_destroy(zilog, B_TRUE); 1429 return; 1430 } 1431 1432 zr.zr_os = os; 1433 zr.zr_replay = replay_func; 1434 zr.zr_arg = arg; 1435 zr.zr_rm_sync = rm_sync; 1436 zr.zr_txgp = txgp; 1437 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1438 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1439 1440 /* 1441 * Wait for in-progress removes to sync before starting replay. 1442 */ 1443 if (rm_sync != NULL) 1444 rm_sync(arg); 1445 txg_wait_synced(zilog->zl_dmu_pool, 0); 1446 1447 zilog->zl_stop_replay = 0; 1448 (void) zil_parse(zilog, NULL, zil_replay_log_record, &zr, 1449 zh->zh_claim_txg); 1450 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1451 1452 zil_destroy(zilog, B_FALSE); 1453 } 1454 1455 /* 1456 * Report whether all transactions are committed 1457 */ 1458 int 1459 zil_is_committed(zilog_t *zilog) 1460 { 1461 lwb_t *lwb; 1462 int ret; 1463 1464 mutex_enter(&zilog->zl_lock); 1465 while (zilog->zl_writer) 1466 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1467 1468 /* recent unpushed intent log transactions? */ 1469 if (!list_is_empty(&zilog->zl_itx_list)) { 1470 ret = B_FALSE; 1471 goto out; 1472 } 1473 1474 /* intent log never used? */ 1475 lwb = list_head(&zilog->zl_lwb_list); 1476 if (lwb == NULL) { 1477 ret = B_TRUE; 1478 goto out; 1479 } 1480 1481 /* 1482 * more than 1 log buffer means zil_sync() hasn't yet freed 1483 * entries after a txg has committed 1484 */ 1485 if (list_next(&zilog->zl_lwb_list, lwb)) { 1486 ret = B_FALSE; 1487 goto out; 1488 } 1489 1490 ASSERT(zil_empty(zilog)); 1491 ret = B_TRUE; 1492 out: 1493 cv_broadcast(&zilog->zl_cv_writer); 1494 mutex_exit(&zilog->zl_lock); 1495 return (ret); 1496 } 1497