1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa.h> 30 #include <sys/dmu.h> 31 #include <sys/zap.h> 32 #include <sys/arc.h> 33 #include <sys/stat.h> 34 #include <sys/resource.h> 35 #include <sys/zil.h> 36 #include <sys/zil_impl.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/vdev.h> 39 #include <sys/dmu_tx.h> 40 41 /* 42 * The zfs intent log (ZIL) saves transaction records of system calls 43 * that change the file system in memory with enough information 44 * to be able to replay them. These are stored in memory until 45 * either the DMU transaction group (txg) commits them to the stable pool 46 * and they can be discarded, or they are flushed to the stable log 47 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 48 * requirement. In the event of a panic or power fail then those log 49 * records (transactions) are replayed. 50 * 51 * There is one ZIL per file system. Its on-disk (pool) format consists 52 * of 3 parts: 53 * 54 * - ZIL header 55 * - ZIL blocks 56 * - ZIL records 57 * 58 * A log record holds a system call transaction. Log blocks can 59 * hold many log records and the blocks are chained together. 60 * Each ZIL block contains a block pointer (blkptr_t) to the next 61 * ZIL block in the chain. The ZIL header points to the first 62 * block in the chain. Note there is not a fixed place in the pool 63 * to hold blocks. They are dynamically allocated and freed as 64 * needed from the blocks available. Figure X shows the ZIL structure: 65 */ 66 67 /* 68 * This global ZIL switch affects all pools 69 */ 70 int zil_disable = 0; /* disable intent logging */ 71 72 /* 73 * Tunable parameter for debugging or performance analysis. Setting 74 * zfs_nocacheflush will cause corruption on power loss if a volatile 75 * out-of-order write cache is enabled. 76 */ 77 boolean_t zfs_nocacheflush = B_FALSE; 78 79 static kmem_cache_t *zil_lwb_cache; 80 81 static int 82 zil_dva_compare(const void *x1, const void *x2) 83 { 84 const dva_t *dva1 = x1; 85 const dva_t *dva2 = x2; 86 87 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 88 return (-1); 89 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 90 return (1); 91 92 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 93 return (-1); 94 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 95 return (1); 96 97 return (0); 98 } 99 100 static void 101 zil_dva_tree_init(avl_tree_t *t) 102 { 103 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 104 offsetof(zil_dva_node_t, zn_node)); 105 } 106 107 static void 108 zil_dva_tree_fini(avl_tree_t *t) 109 { 110 zil_dva_node_t *zn; 111 void *cookie = NULL; 112 113 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 114 kmem_free(zn, sizeof (zil_dva_node_t)); 115 116 avl_destroy(t); 117 } 118 119 static int 120 zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 121 { 122 zil_dva_node_t *zn; 123 avl_index_t where; 124 125 if (avl_find(t, dva, &where) != NULL) 126 return (EEXIST); 127 128 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 129 zn->zn_dva = *dva; 130 avl_insert(t, zn, where); 131 132 return (0); 133 } 134 135 static zil_header_t * 136 zil_header_in_syncing_context(zilog_t *zilog) 137 { 138 return ((zil_header_t *)zilog->zl_header); 139 } 140 141 static void 142 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 143 { 144 zio_cksum_t *zc = &bp->blk_cksum; 145 146 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 147 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 148 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 149 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 150 } 151 152 /* 153 * Read a log block, make sure it's valid, and byteswap it if necessary. 154 */ 155 static int 156 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp) 157 { 158 blkptr_t blk = *bp; 159 zbookmark_t zb; 160 uint32_t aflags = ARC_WAIT; 161 int error; 162 163 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; 164 zb.zb_object = 0; 165 zb.zb_level = -1; 166 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; 167 168 *abufpp = NULL; 169 170 error = arc_read(NULL, zilog->zl_spa, &blk, byteswap_uint64_array, 171 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | 172 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb); 173 174 if (error == 0) { 175 char *data = (*abufpp)->b_data; 176 uint64_t blksz = BP_GET_LSIZE(bp); 177 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1; 178 zio_cksum_t cksum = bp->blk_cksum; 179 180 /* 181 * Sequence numbers should be... sequential. The checksum 182 * verifier for the next block should be bp's checksum plus 1. 183 */ 184 cksum.zc_word[ZIL_ZC_SEQ]++; 185 186 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum))) 187 error = ESTALE; 188 else if (BP_IS_HOLE(&ztp->zit_next_blk)) 189 error = ENOENT; 190 else if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t))) 191 error = EOVERFLOW; 192 193 if (error) { 194 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1); 195 *abufpp = NULL; 196 } 197 } 198 199 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid); 200 201 return (error); 202 } 203 204 /* 205 * Parse the intent log, and call parse_func for each valid record within. 206 * Return the highest sequence number. 207 */ 208 uint64_t 209 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 210 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 211 { 212 const zil_header_t *zh = zilog->zl_header; 213 uint64_t claim_seq = zh->zh_claim_seq; 214 uint64_t seq = 0; 215 uint64_t max_seq = 0; 216 blkptr_t blk = zh->zh_log; 217 arc_buf_t *abuf; 218 char *lrbuf, *lrp; 219 zil_trailer_t *ztp; 220 int reclen, error; 221 222 if (BP_IS_HOLE(&blk)) 223 return (max_seq); 224 225 /* 226 * Starting at the block pointed to by zh_log we read the log chain. 227 * For each block in the chain we strongly check that block to 228 * ensure its validity. We stop when an invalid block is found. 229 * For each block pointer in the chain we call parse_blk_func(). 230 * For each record in each valid block we call parse_lr_func(). 231 * If the log has been claimed, stop if we encounter a sequence 232 * number greater than the highest claimed sequence number. 233 */ 234 zil_dva_tree_init(&zilog->zl_dva_tree); 235 for (;;) { 236 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 237 238 if (claim_seq != 0 && seq > claim_seq) 239 break; 240 241 ASSERT(max_seq < seq); 242 max_seq = seq; 243 244 error = zil_read_log_block(zilog, &blk, &abuf); 245 246 if (parse_blk_func != NULL) 247 parse_blk_func(zilog, &blk, arg, txg); 248 249 if (error) 250 break; 251 252 lrbuf = abuf->b_data; 253 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 254 blk = ztp->zit_next_blk; 255 256 if (parse_lr_func == NULL) { 257 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 258 continue; 259 } 260 261 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 262 lr_t *lr = (lr_t *)lrp; 263 reclen = lr->lrc_reclen; 264 ASSERT3U(reclen, >=, sizeof (lr_t)); 265 parse_lr_func(zilog, lr, arg, txg); 266 } 267 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 268 } 269 zil_dva_tree_fini(&zilog->zl_dva_tree); 270 271 return (max_seq); 272 } 273 274 /* ARGSUSED */ 275 static void 276 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 277 { 278 spa_t *spa = zilog->zl_spa; 279 int err; 280 281 /* 282 * Claim log block if not already committed and not already claimed. 283 */ 284 if (bp->blk_birth >= first_txg && 285 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 286 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL)); 287 ASSERT(err == 0); 288 } 289 } 290 291 static void 292 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 293 { 294 if (lrc->lrc_txtype == TX_WRITE) { 295 lr_write_t *lr = (lr_write_t *)lrc; 296 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 297 } 298 } 299 300 /* ARGSUSED */ 301 static void 302 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 303 { 304 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 305 } 306 307 static void 308 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 309 { 310 /* 311 * If we previously claimed it, we need to free it. 312 */ 313 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 314 lr_write_t *lr = (lr_write_t *)lrc; 315 blkptr_t *bp = &lr->lr_blkptr; 316 if (bp->blk_birth >= claim_txg && 317 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 318 (void) arc_free(NULL, zilog->zl_spa, 319 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 320 } 321 } 322 } 323 324 /* 325 * Create an on-disk intent log. 326 */ 327 static void 328 zil_create(zilog_t *zilog) 329 { 330 const zil_header_t *zh = zilog->zl_header; 331 lwb_t *lwb; 332 uint64_t txg = 0; 333 dmu_tx_t *tx = NULL; 334 blkptr_t blk; 335 int error = 0; 336 337 /* 338 * Wait for any previous destroy to complete. 339 */ 340 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 341 342 ASSERT(zh->zh_claim_txg == 0); 343 ASSERT(zh->zh_replay_seq == 0); 344 345 blk = zh->zh_log; 346 347 /* 348 * If we don't already have an initial log block, allocate one now. 349 */ 350 if (BP_IS_HOLE(&blk)) { 351 tx = dmu_tx_create(zilog->zl_os); 352 (void) dmu_tx_assign(tx, TXG_WAIT); 353 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 354 txg = dmu_tx_get_txg(tx); 355 356 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, 357 NULL, txg); 358 359 if (error == 0) 360 zil_init_log_chain(zilog, &blk); 361 } 362 363 /* 364 * Allocate a log write buffer (lwb) for the first log block. 365 */ 366 if (error == 0) { 367 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 368 lwb->lwb_zilog = zilog; 369 lwb->lwb_blk = blk; 370 lwb->lwb_nused = 0; 371 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 372 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 373 lwb->lwb_max_txg = txg; 374 lwb->lwb_zio = NULL; 375 376 mutex_enter(&zilog->zl_lock); 377 list_insert_tail(&zilog->zl_lwb_list, lwb); 378 mutex_exit(&zilog->zl_lock); 379 } 380 381 /* 382 * If we just allocated the first log block, commit our transaction 383 * and wait for zil_sync() to stuff the block poiner into zh_log. 384 * (zh is part of the MOS, so we cannot modify it in open context.) 385 */ 386 if (tx != NULL) { 387 dmu_tx_commit(tx); 388 txg_wait_synced(zilog->zl_dmu_pool, txg); 389 } 390 391 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 392 } 393 394 /* 395 * In one tx, free all log blocks and clear the log header. 396 * If keep_first is set, then we're replaying a log with no content. 397 * We want to keep the first block, however, so that the first 398 * synchronous transaction doesn't require a txg_wait_synced() 399 * in zil_create(). We don't need to txg_wait_synced() here either 400 * when keep_first is set, because both zil_create() and zil_destroy() 401 * will wait for any in-progress destroys to complete. 402 */ 403 void 404 zil_destroy(zilog_t *zilog, boolean_t keep_first) 405 { 406 const zil_header_t *zh = zilog->zl_header; 407 lwb_t *lwb; 408 dmu_tx_t *tx; 409 uint64_t txg; 410 411 /* 412 * Wait for any previous destroy to complete. 413 */ 414 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 415 416 if (BP_IS_HOLE(&zh->zh_log)) 417 return; 418 419 tx = dmu_tx_create(zilog->zl_os); 420 (void) dmu_tx_assign(tx, TXG_WAIT); 421 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 422 txg = dmu_tx_get_txg(tx); 423 424 mutex_enter(&zilog->zl_lock); 425 426 ASSERT3U(zilog->zl_destroy_txg, <, txg); 427 zilog->zl_destroy_txg = txg; 428 zilog->zl_keep_first = keep_first; 429 430 if (!list_is_empty(&zilog->zl_lwb_list)) { 431 ASSERT(zh->zh_claim_txg == 0); 432 ASSERT(!keep_first); 433 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 434 list_remove(&zilog->zl_lwb_list, lwb); 435 if (lwb->lwb_buf != NULL) 436 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 437 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); 438 kmem_cache_free(zil_lwb_cache, lwb); 439 } 440 } else { 441 if (!keep_first) { 442 (void) zil_parse(zilog, zil_free_log_block, 443 zil_free_log_record, tx, zh->zh_claim_txg); 444 } 445 } 446 mutex_exit(&zilog->zl_lock); 447 448 dmu_tx_commit(tx); 449 450 if (keep_first) /* no need to wait in this case */ 451 return; 452 453 txg_wait_synced(zilog->zl_dmu_pool, txg); 454 ASSERT(BP_IS_HOLE(&zh->zh_log)); 455 } 456 457 /* 458 * zil_rollback_destroy() is only called by the rollback code. 459 * We already have a syncing tx. Rollback has exclusive access to the 460 * dataset, so we don't have to worry about concurrent zil access. 461 * The actual freeing of any log blocks occurs in zil_sync() later in 462 * this txg syncing phase. 463 */ 464 void 465 zil_rollback_destroy(zilog_t *zilog, dmu_tx_t *tx) 466 { 467 const zil_header_t *zh = zilog->zl_header; 468 uint64_t txg; 469 470 if (BP_IS_HOLE(&zh->zh_log)) 471 return; 472 473 txg = dmu_tx_get_txg(tx); 474 ASSERT3U(zilog->zl_destroy_txg, <, txg); 475 zilog->zl_destroy_txg = txg; 476 zilog->zl_keep_first = B_FALSE; 477 478 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 479 (void) zil_parse(zilog, zil_free_log_block, zil_free_log_record, 480 tx, zh->zh_claim_txg); 481 } 482 483 int 484 zil_claim(char *osname, void *txarg) 485 { 486 dmu_tx_t *tx = txarg; 487 uint64_t first_txg = dmu_tx_get_txg(tx); 488 zilog_t *zilog; 489 zil_header_t *zh; 490 objset_t *os; 491 int error; 492 493 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os); 494 if (error) { 495 cmn_err(CE_WARN, "can't process intent log for %s", osname); 496 return (0); 497 } 498 499 zilog = dmu_objset_zil(os); 500 zh = zil_header_in_syncing_context(zilog); 501 502 /* 503 * Claim all log blocks if we haven't already done so, and remember 504 * the highest claimed sequence number. This ensures that if we can 505 * read only part of the log now (e.g. due to a missing device), 506 * but we can read the entire log later, we will not try to replay 507 * or destroy beyond the last block we successfully claimed. 508 */ 509 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 510 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 511 zh->zh_claim_txg = first_txg; 512 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, 513 zil_claim_log_record, tx, first_txg); 514 dsl_dataset_dirty(dmu_objset_ds(os), tx); 515 } 516 517 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 518 dmu_objset_close(os); 519 return (0); 520 } 521 522 void 523 zil_add_vdev(zilog_t *zilog, uint64_t vdev) 524 { 525 zil_vdev_t *zv, *new; 526 uint64_t bmap_sz = sizeof (zilog->zl_vdev_bmap) << 3; 527 uchar_t *cp; 528 529 if (zfs_nocacheflush) 530 return; 531 532 if (vdev < bmap_sz) { 533 cp = zilog->zl_vdev_bmap + (vdev / 8); 534 atomic_or_8(cp, 1 << (vdev % 8)); 535 } else { 536 /* 537 * insert into ordered list 538 */ 539 mutex_enter(&zilog->zl_lock); 540 for (zv = list_head(&zilog->zl_vdev_list); zv != NULL; 541 zv = list_next(&zilog->zl_vdev_list, zv)) { 542 if (zv->vdev == vdev) { 543 /* duplicate found - just return */ 544 mutex_exit(&zilog->zl_lock); 545 return; 546 } 547 if (zv->vdev > vdev) { 548 /* insert before this entry */ 549 new = kmem_alloc(sizeof (zil_vdev_t), 550 KM_SLEEP); 551 new->vdev = vdev; 552 list_insert_before(&zilog->zl_vdev_list, 553 zv, new); 554 mutex_exit(&zilog->zl_lock); 555 return; 556 } 557 } 558 /* ran off end of list, insert at the end */ 559 ASSERT(zv == NULL); 560 new = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP); 561 new->vdev = vdev; 562 list_insert_tail(&zilog->zl_vdev_list, new); 563 mutex_exit(&zilog->zl_lock); 564 } 565 } 566 567 void 568 zil_flush_vdevs(zilog_t *zilog) 569 { 570 zil_vdev_t *zv; 571 zio_t *zio = NULL; 572 spa_t *spa = zilog->zl_spa; 573 uint64_t vdev; 574 uint8_t b; 575 int i, j; 576 577 ASSERT(zilog->zl_writer); 578 579 for (i = 0; i < sizeof (zilog->zl_vdev_bmap); i++) { 580 b = zilog->zl_vdev_bmap[i]; 581 if (b == 0) 582 continue; 583 for (j = 0; j < 8; j++) { 584 if (b & (1 << j)) { 585 vdev = (i << 3) + j; 586 zio_flush_vdev(spa, vdev, &zio); 587 } 588 } 589 zilog->zl_vdev_bmap[i] = 0; 590 } 591 592 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 593 zio_flush_vdev(spa, zv->vdev, &zio); 594 list_remove(&zilog->zl_vdev_list, zv); 595 kmem_free(zv, sizeof (zil_vdev_t)); 596 } 597 /* 598 * Wait for all the flushes to complete. Not all devices actually 599 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 600 */ 601 if (zio) 602 (void) zio_wait(zio); 603 } 604 605 /* 606 * Function called when a log block write completes 607 */ 608 static void 609 zil_lwb_write_done(zio_t *zio) 610 { 611 lwb_t *lwb = zio->io_private; 612 zilog_t *zilog = lwb->lwb_zilog; 613 614 /* 615 * Now that we've written this log block, we have a stable pointer 616 * to the next block in the chain, so it's OK to let the txg in 617 * which we allocated the next block sync. 618 */ 619 txg_rele_to_sync(&lwb->lwb_txgh); 620 621 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 622 mutex_enter(&zilog->zl_lock); 623 lwb->lwb_buf = NULL; 624 if (zio->io_error) 625 zilog->zl_log_error = B_TRUE; 626 mutex_exit(&zilog->zl_lock); 627 } 628 629 /* 630 * Initialize the io for a log block. 631 * 632 * Note, we should not initialize the IO until we are about 633 * to use it, since zio_rewrite() does a spa_config_enter(). 634 */ 635 static void 636 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 637 { 638 zbookmark_t zb; 639 640 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET]; 641 zb.zb_object = 0; 642 zb.zb_level = -1; 643 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 644 645 if (zilog->zl_root_zio == NULL) { 646 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 647 ZIO_FLAG_CANFAIL); 648 } 649 if (lwb->lwb_zio == NULL) { 650 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 651 ZIO_CHECKSUM_ZILOG, 0, &lwb->lwb_blk, lwb->lwb_buf, 652 lwb->lwb_sz, zil_lwb_write_done, lwb, 653 ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_CANFAIL, &zb); 654 } 655 } 656 657 /* 658 * Start a log block write and advance to the next log block. 659 * Calls are serialized. 660 */ 661 static lwb_t * 662 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 663 { 664 lwb_t *nlwb; 665 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 666 spa_t *spa = zilog->zl_spa; 667 blkptr_t *bp = &ztp->zit_next_blk; 668 uint64_t txg; 669 uint64_t zil_blksz; 670 int error; 671 672 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 673 674 /* 675 * Allocate the next block and save its address in this block 676 * before writing it in order to establish the log chain. 677 * Note that if the allocation of nlwb synced before we wrote 678 * the block that points at it (lwb), we'd leak it if we crashed. 679 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 680 */ 681 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 682 txg_rele_to_quiesce(&lwb->lwb_txgh); 683 684 /* 685 * Pick a ZIL blocksize. We request a size that is the 686 * maximum of the previous used size, the current used size and 687 * the amount waiting in the queue. 688 */ 689 zil_blksz = MAX(zilog->zl_prev_used, 690 zilog->zl_cur_used + sizeof (*ztp)); 691 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 692 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); 693 if (zil_blksz > ZIL_MAX_BLKSZ) 694 zil_blksz = ZIL_MAX_BLKSZ; 695 696 BP_ZERO(bp); 697 /* pass the old blkptr in order to spread log blocks across devs */ 698 error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg); 699 if (error) { 700 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg); 701 702 /* 703 * We dirty the dataset to ensure that zil_sync() will 704 * be called to remove this lwb from our zl_lwb_list. 705 * Failing to do so, may leave an lwb with a NULL lwb_buf 706 * hanging around on the zl_lwb_list. 707 */ 708 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 709 dmu_tx_commit(tx); 710 711 /* 712 * Since we've just experienced an allocation failure so we 713 * terminate the current lwb and send it on its way. 714 */ 715 ztp->zit_pad = 0; 716 ztp->zit_nused = lwb->lwb_nused; 717 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 718 zio_nowait(lwb->lwb_zio); 719 720 /* 721 * By returning NULL the caller will call tx_wait_synced() 722 */ 723 return (NULL); 724 } 725 726 ASSERT3U(bp->blk_birth, ==, txg); 727 ztp->zit_pad = 0; 728 ztp->zit_nused = lwb->lwb_nused; 729 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 730 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 731 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 732 733 /* 734 * Allocate a new log write buffer (lwb). 735 */ 736 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 737 738 nlwb->lwb_zilog = zilog; 739 nlwb->lwb_blk = *bp; 740 nlwb->lwb_nused = 0; 741 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 742 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 743 nlwb->lwb_max_txg = txg; 744 nlwb->lwb_zio = NULL; 745 746 /* 747 * Put new lwb at the end of the log chain 748 */ 749 mutex_enter(&zilog->zl_lock); 750 list_insert_tail(&zilog->zl_lwb_list, nlwb); 751 mutex_exit(&zilog->zl_lock); 752 753 /* Record the vdev for later flushing */ 754 zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk)))); 755 756 /* 757 * kick off the write for the old log block 758 */ 759 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 760 ASSERT(lwb->lwb_zio); 761 zio_nowait(lwb->lwb_zio); 762 763 return (nlwb); 764 } 765 766 static lwb_t * 767 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 768 { 769 lr_t *lrc = &itx->itx_lr; /* common log record */ 770 lr_write_t *lr = (lr_write_t *)lrc; 771 uint64_t txg = lrc->lrc_txg; 772 uint64_t reclen = lrc->lrc_reclen; 773 uint64_t dlen; 774 775 if (lwb == NULL) 776 return (NULL); 777 ASSERT(lwb->lwb_buf != NULL); 778 779 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 780 dlen = P2ROUNDUP_TYPED( 781 lr->lr_length, sizeof (uint64_t), uint64_t); 782 else 783 dlen = 0; 784 785 zilog->zl_cur_used += (reclen + dlen); 786 787 zil_lwb_write_init(zilog, lwb); 788 789 /* 790 * If this record won't fit in the current log block, start a new one. 791 */ 792 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 793 lwb = zil_lwb_write_start(zilog, lwb); 794 if (lwb == NULL) 795 return (NULL); 796 zil_lwb_write_init(zilog, lwb); 797 ASSERT(lwb->lwb_nused == 0); 798 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { 799 txg_wait_synced(zilog->zl_dmu_pool, txg); 800 return (lwb); 801 } 802 } 803 804 /* 805 * Update the lrc_seq, to be log record sequence number. See zil.h 806 * Then copy the record to the log buffer. 807 */ 808 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 809 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 810 811 /* 812 * If it's a write, fetch the data or get its blkptr as appropriate. 813 */ 814 if (lrc->lrc_txtype == TX_WRITE) { 815 if (txg > spa_freeze_txg(zilog->zl_spa)) 816 txg_wait_synced(zilog->zl_dmu_pool, txg); 817 if (itx->itx_wr_state != WR_COPIED) { 818 char *dbuf; 819 int error; 820 821 /* alignment is guaranteed */ 822 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused); 823 if (dlen) { 824 ASSERT(itx->itx_wr_state == WR_NEED_COPY); 825 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen; 826 lr->lr_common.lrc_reclen += dlen; 827 } else { 828 ASSERT(itx->itx_wr_state == WR_INDIRECT); 829 dbuf = NULL; 830 } 831 error = zilog->zl_get_data( 832 itx->itx_private, lr, dbuf, lwb->lwb_zio); 833 if (error) { 834 ASSERT(error == ENOENT || error == EEXIST || 835 error == EALREADY); 836 return (lwb); 837 } 838 } 839 } 840 841 lwb->lwb_nused += reclen + dlen; 842 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 843 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 844 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 845 846 return (lwb); 847 } 848 849 itx_t * 850 zil_itx_create(int txtype, size_t lrsize) 851 { 852 itx_t *itx; 853 854 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 855 856 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 857 itx->itx_lr.lrc_txtype = txtype; 858 itx->itx_lr.lrc_reclen = lrsize; 859 itx->itx_lr.lrc_seq = 0; /* defensive */ 860 861 return (itx); 862 } 863 864 uint64_t 865 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 866 { 867 uint64_t seq; 868 869 ASSERT(itx->itx_lr.lrc_seq == 0); 870 871 mutex_enter(&zilog->zl_lock); 872 list_insert_tail(&zilog->zl_itx_list, itx); 873 zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen; 874 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 875 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 876 mutex_exit(&zilog->zl_lock); 877 878 return (seq); 879 } 880 881 /* 882 * Free up all in-memory intent log transactions that have now been synced. 883 */ 884 static void 885 zil_itx_clean(zilog_t *zilog) 886 { 887 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 888 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 889 list_t clean_list; 890 itx_t *itx; 891 892 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 893 894 mutex_enter(&zilog->zl_lock); 895 /* wait for a log writer to finish walking list */ 896 while (zilog->zl_writer) { 897 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 898 } 899 900 /* 901 * Move the sync'd log transactions to a separate list so we can call 902 * kmem_free without holding the zl_lock. 903 * 904 * There is no need to set zl_writer as we don't drop zl_lock here 905 */ 906 while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 907 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 908 list_remove(&zilog->zl_itx_list, itx); 909 zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen; 910 list_insert_tail(&clean_list, itx); 911 } 912 cv_broadcast(&zilog->zl_cv_writer); 913 mutex_exit(&zilog->zl_lock); 914 915 /* destroy sync'd log transactions */ 916 while ((itx = list_head(&clean_list)) != NULL) { 917 list_remove(&clean_list, itx); 918 kmem_free(itx, offsetof(itx_t, itx_lr) 919 + itx->itx_lr.lrc_reclen); 920 } 921 list_destroy(&clean_list); 922 } 923 924 /* 925 * If there are any in-memory intent log transactions which have now been 926 * synced then start up a taskq to free them. 927 */ 928 void 929 zil_clean(zilog_t *zilog) 930 { 931 itx_t *itx; 932 933 mutex_enter(&zilog->zl_lock); 934 itx = list_head(&zilog->zl_itx_list); 935 if ((itx != NULL) && 936 (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) { 937 (void) taskq_dispatch(zilog->zl_clean_taskq, 938 (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP); 939 } 940 mutex_exit(&zilog->zl_lock); 941 } 942 943 void 944 zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid) 945 { 946 uint64_t txg; 947 uint64_t reclen; 948 uint64_t commit_seq = 0; 949 itx_t *itx, *itx_next = (itx_t *)-1; 950 lwb_t *lwb; 951 spa_t *spa; 952 953 zilog->zl_writer = B_TRUE; 954 zilog->zl_root_zio = NULL; 955 spa = zilog->zl_spa; 956 957 if (zilog->zl_suspend) { 958 lwb = NULL; 959 } else { 960 lwb = list_tail(&zilog->zl_lwb_list); 961 if (lwb == NULL) { 962 /* 963 * Return if there's nothing to flush before we 964 * dirty the fs by calling zil_create() 965 */ 966 if (list_is_empty(&zilog->zl_itx_list)) { 967 zilog->zl_writer = B_FALSE; 968 return; 969 } 970 mutex_exit(&zilog->zl_lock); 971 zil_create(zilog); 972 mutex_enter(&zilog->zl_lock); 973 lwb = list_tail(&zilog->zl_lwb_list); 974 } 975 } 976 977 /* Loop through in-memory log transactions filling log blocks. */ 978 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 979 for (;;) { 980 /* 981 * Find the next itx to push: 982 * Push all transactions related to specified foid and all 983 * other transactions except TX_WRITE, TX_TRUNCATE, 984 * TX_SETATTR and TX_ACL for all other files. 985 */ 986 if (itx_next != (itx_t *)-1) 987 itx = itx_next; 988 else 989 itx = list_head(&zilog->zl_itx_list); 990 for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) { 991 if (foid == 0) /* push all foids? */ 992 break; 993 if (itx->itx_sync) /* push all O_[D]SYNC */ 994 break; 995 switch (itx->itx_lr.lrc_txtype) { 996 case TX_SETATTR: 997 case TX_WRITE: 998 case TX_TRUNCATE: 999 case TX_ACL: 1000 /* lr_foid is same offset for these records */ 1001 if (((lr_write_t *)&itx->itx_lr)->lr_foid 1002 != foid) { 1003 continue; /* skip this record */ 1004 } 1005 } 1006 break; 1007 } 1008 if (itx == NULL) 1009 break; 1010 1011 reclen = itx->itx_lr.lrc_reclen; 1012 if ((itx->itx_lr.lrc_seq > seq) && 1013 ((lwb == NULL) || (lwb->lwb_nused == 0) || 1014 (lwb->lwb_nused + reclen > ZIL_BLK_DATA_SZ(lwb)))) { 1015 break; 1016 } 1017 1018 /* 1019 * Save the next pointer. Even though we soon drop 1020 * zl_lock all threads that may change the list 1021 * (another writer or zil_itx_clean) can't do so until 1022 * they have zl_writer. 1023 */ 1024 itx_next = list_next(&zilog->zl_itx_list, itx); 1025 list_remove(&zilog->zl_itx_list, itx); 1026 mutex_exit(&zilog->zl_lock); 1027 txg = itx->itx_lr.lrc_txg; 1028 ASSERT(txg); 1029 1030 if (txg > spa_last_synced_txg(spa) || 1031 txg > spa_freeze_txg(spa)) 1032 lwb = zil_lwb_commit(zilog, itx, lwb); 1033 kmem_free(itx, offsetof(itx_t, itx_lr) 1034 + itx->itx_lr.lrc_reclen); 1035 mutex_enter(&zilog->zl_lock); 1036 zilog->zl_itx_list_sz -= reclen; 1037 } 1038 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 1039 /* determine commit sequence number */ 1040 itx = list_head(&zilog->zl_itx_list); 1041 if (itx) 1042 commit_seq = itx->itx_lr.lrc_seq; 1043 else 1044 commit_seq = zilog->zl_itx_seq; 1045 mutex_exit(&zilog->zl_lock); 1046 1047 /* write the last block out */ 1048 if (lwb != NULL && lwb->lwb_zio != NULL) 1049 lwb = zil_lwb_write_start(zilog, lwb); 1050 1051 zilog->zl_prev_used = zilog->zl_cur_used; 1052 zilog->zl_cur_used = 0; 1053 1054 /* 1055 * Wait if necessary for the log blocks to be on stable storage. 1056 */ 1057 if (zilog->zl_root_zio) { 1058 DTRACE_PROBE1(zil__cw3, zilog_t *, zilog); 1059 (void) zio_wait(zilog->zl_root_zio); 1060 DTRACE_PROBE1(zil__cw4, zilog_t *, zilog); 1061 if (!zfs_nocacheflush) 1062 zil_flush_vdevs(zilog); 1063 } 1064 1065 if (zilog->zl_log_error || lwb == NULL) { 1066 zilog->zl_log_error = 0; 1067 txg_wait_synced(zilog->zl_dmu_pool, 0); 1068 } 1069 1070 mutex_enter(&zilog->zl_lock); 1071 zilog->zl_writer = B_FALSE; 1072 1073 ASSERT3U(commit_seq, >=, zilog->zl_commit_seq); 1074 zilog->zl_commit_seq = commit_seq; 1075 } 1076 1077 /* 1078 * Push zfs transactions to stable storage up to the supplied sequence number. 1079 * If foid is 0 push out all transactions, otherwise push only those 1080 * for that file or might have been used to create that file. 1081 */ 1082 void 1083 zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid) 1084 { 1085 if (zilog == NULL || seq == 0) 1086 return; 1087 1088 mutex_enter(&zilog->zl_lock); 1089 1090 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 1091 1092 while (zilog->zl_writer) { 1093 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1094 if (seq < zilog->zl_commit_seq) { 1095 mutex_exit(&zilog->zl_lock); 1096 return; 1097 } 1098 } 1099 zil_commit_writer(zilog, seq, foid); /* drops zl_lock */ 1100 /* wake up others waiting on the commit */ 1101 cv_broadcast(&zilog->zl_cv_writer); 1102 mutex_exit(&zilog->zl_lock); 1103 } 1104 1105 /* 1106 * Called in syncing context to free committed log blocks and update log header. 1107 */ 1108 void 1109 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1110 { 1111 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1112 uint64_t txg = dmu_tx_get_txg(tx); 1113 spa_t *spa = zilog->zl_spa; 1114 lwb_t *lwb; 1115 1116 mutex_enter(&zilog->zl_lock); 1117 1118 ASSERT(zilog->zl_stop_sync == 0); 1119 1120 zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 1121 1122 if (zilog->zl_destroy_txg == txg) { 1123 blkptr_t blk = zh->zh_log; 1124 1125 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1126 ASSERT(spa_sync_pass(spa) == 1); 1127 1128 bzero(zh, sizeof (zil_header_t)); 1129 bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 1130 1131 if (zilog->zl_keep_first) { 1132 /* 1133 * If this block was part of log chain that couldn't 1134 * be claimed because a device was missing during 1135 * zil_claim(), but that device later returns, 1136 * then this block could erroneously appear valid. 1137 * To guard against this, assign a new GUID to the new 1138 * log chain so it doesn't matter what blk points to. 1139 */ 1140 zil_init_log_chain(zilog, &blk); 1141 zh->zh_log = blk; 1142 } 1143 } 1144 1145 for (;;) { 1146 lwb = list_head(&zilog->zl_lwb_list); 1147 if (lwb == NULL) { 1148 mutex_exit(&zilog->zl_lock); 1149 return; 1150 } 1151 zh->zh_log = lwb->lwb_blk; 1152 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1153 break; 1154 list_remove(&zilog->zl_lwb_list, lwb); 1155 zio_free_blk(spa, &lwb->lwb_blk, txg); 1156 kmem_cache_free(zil_lwb_cache, lwb); 1157 1158 /* 1159 * If we don't have anything left in the lwb list then 1160 * we've had an allocation failure and we need to zero 1161 * out the zil_header blkptr so that we don't end 1162 * up freeing the same block twice. 1163 */ 1164 if (list_head(&zilog->zl_lwb_list) == NULL) 1165 BP_ZERO(&zh->zh_log); 1166 } 1167 mutex_exit(&zilog->zl_lock); 1168 } 1169 1170 void 1171 zil_init(void) 1172 { 1173 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1174 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1175 } 1176 1177 void 1178 zil_fini(void) 1179 { 1180 kmem_cache_destroy(zil_lwb_cache); 1181 } 1182 1183 zilog_t * 1184 zil_alloc(objset_t *os, zil_header_t *zh_phys) 1185 { 1186 zilog_t *zilog; 1187 1188 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1189 1190 zilog->zl_header = zh_phys; 1191 zilog->zl_os = os; 1192 zilog->zl_spa = dmu_objset_spa(os); 1193 zilog->zl_dmu_pool = dmu_objset_pool(os); 1194 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1195 1196 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 1197 1198 list_create(&zilog->zl_itx_list, sizeof (itx_t), 1199 offsetof(itx_t, itx_node)); 1200 1201 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1202 offsetof(lwb_t, lwb_node)); 1203 1204 list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t), 1205 offsetof(zil_vdev_t, vdev_seq_node)); 1206 1207 return (zilog); 1208 } 1209 1210 void 1211 zil_free(zilog_t *zilog) 1212 { 1213 lwb_t *lwb; 1214 zil_vdev_t *zv; 1215 1216 zilog->zl_stop_sync = 1; 1217 1218 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1219 list_remove(&zilog->zl_lwb_list, lwb); 1220 if (lwb->lwb_buf != NULL) 1221 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1222 kmem_cache_free(zil_lwb_cache, lwb); 1223 } 1224 list_destroy(&zilog->zl_lwb_list); 1225 1226 while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 1227 list_remove(&zilog->zl_vdev_list, zv); 1228 kmem_free(zv, sizeof (zil_vdev_t)); 1229 } 1230 list_destroy(&zilog->zl_vdev_list); 1231 1232 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1233 list_destroy(&zilog->zl_itx_list); 1234 mutex_destroy(&zilog->zl_lock); 1235 1236 kmem_free(zilog, sizeof (zilog_t)); 1237 } 1238 1239 /* 1240 * return true if the initial log block is not valid 1241 */ 1242 static int 1243 zil_empty(zilog_t *zilog) 1244 { 1245 const zil_header_t *zh = zilog->zl_header; 1246 arc_buf_t *abuf = NULL; 1247 1248 if (BP_IS_HOLE(&zh->zh_log)) 1249 return (1); 1250 1251 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0) 1252 return (1); 1253 1254 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 1255 return (0); 1256 } 1257 1258 /* 1259 * Open an intent log. 1260 */ 1261 zilog_t * 1262 zil_open(objset_t *os, zil_get_data_t *get_data) 1263 { 1264 zilog_t *zilog = dmu_objset_zil(os); 1265 1266 zilog->zl_get_data = get_data; 1267 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1268 2, 2, TASKQ_PREPOPULATE); 1269 1270 return (zilog); 1271 } 1272 1273 /* 1274 * Close an intent log. 1275 */ 1276 void 1277 zil_close(zilog_t *zilog) 1278 { 1279 /* 1280 * If the log isn't already committed, mark the objset dirty 1281 * (so zil_sync() will be called) and wait for that txg to sync. 1282 */ 1283 if (!zil_is_committed(zilog)) { 1284 uint64_t txg; 1285 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1286 (void) dmu_tx_assign(tx, TXG_WAIT); 1287 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1288 txg = dmu_tx_get_txg(tx); 1289 dmu_tx_commit(tx); 1290 txg_wait_synced(zilog->zl_dmu_pool, txg); 1291 } 1292 1293 taskq_destroy(zilog->zl_clean_taskq); 1294 zilog->zl_clean_taskq = NULL; 1295 zilog->zl_get_data = NULL; 1296 1297 zil_itx_clean(zilog); 1298 ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1299 } 1300 1301 /* 1302 * Suspend an intent log. While in suspended mode, we still honor 1303 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1304 * We suspend the log briefly when taking a snapshot so that the snapshot 1305 * contains all the data it's supposed to, and has an empty intent log. 1306 */ 1307 int 1308 zil_suspend(zilog_t *zilog) 1309 { 1310 const zil_header_t *zh = zilog->zl_header; 1311 1312 mutex_enter(&zilog->zl_lock); 1313 if (zh->zh_claim_txg != 0) { /* unplayed log */ 1314 mutex_exit(&zilog->zl_lock); 1315 return (EBUSY); 1316 } 1317 if (zilog->zl_suspend++ != 0) { 1318 /* 1319 * Someone else already began a suspend. 1320 * Just wait for them to finish. 1321 */ 1322 while (zilog->zl_suspending) 1323 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1324 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1325 mutex_exit(&zilog->zl_lock); 1326 return (0); 1327 } 1328 zilog->zl_suspending = B_TRUE; 1329 mutex_exit(&zilog->zl_lock); 1330 1331 zil_commit(zilog, UINT64_MAX, 0); 1332 1333 /* 1334 * Wait for any in-flight log writes to complete. 1335 */ 1336 mutex_enter(&zilog->zl_lock); 1337 while (zilog->zl_writer) 1338 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1339 mutex_exit(&zilog->zl_lock); 1340 1341 zil_destroy(zilog, B_FALSE); 1342 1343 mutex_enter(&zilog->zl_lock); 1344 ASSERT(BP_IS_HOLE(&zh->zh_log)); 1345 zilog->zl_suspending = B_FALSE; 1346 cv_broadcast(&zilog->zl_cv_suspend); 1347 mutex_exit(&zilog->zl_lock); 1348 1349 return (0); 1350 } 1351 1352 void 1353 zil_resume(zilog_t *zilog) 1354 { 1355 mutex_enter(&zilog->zl_lock); 1356 ASSERT(zilog->zl_suspend != 0); 1357 zilog->zl_suspend--; 1358 mutex_exit(&zilog->zl_lock); 1359 } 1360 1361 typedef struct zil_replay_arg { 1362 objset_t *zr_os; 1363 zil_replay_func_t **zr_replay; 1364 void *zr_arg; 1365 uint64_t *zr_txgp; 1366 boolean_t zr_byteswap; 1367 char *zr_lrbuf; 1368 } zil_replay_arg_t; 1369 1370 static void 1371 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1372 { 1373 zil_replay_arg_t *zr = zra; 1374 const zil_header_t *zh = zilog->zl_header; 1375 uint64_t reclen = lr->lrc_reclen; 1376 uint64_t txtype = lr->lrc_txtype; 1377 char *name; 1378 int pass, error, sunk; 1379 1380 if (zilog->zl_stop_replay) 1381 return; 1382 1383 if (lr->lrc_txg < claim_txg) /* already committed */ 1384 return; 1385 1386 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1387 return; 1388 1389 /* 1390 * Make a copy of the data so we can revise and extend it. 1391 */ 1392 bcopy(lr, zr->zr_lrbuf, reclen); 1393 1394 /* 1395 * The log block containing this lr may have been byteswapped 1396 * so that we can easily examine common fields like lrc_txtype. 1397 * However, the log is a mix of different data types, and only the 1398 * replay vectors know how to byteswap their records. Therefore, if 1399 * the lr was byteswapped, undo it before invoking the replay vector. 1400 */ 1401 if (zr->zr_byteswap) 1402 byteswap_uint64_array(zr->zr_lrbuf, reclen); 1403 1404 /* 1405 * If this is a TX_WRITE with a blkptr, suck in the data. 1406 */ 1407 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1408 lr_write_t *lrw = (lr_write_t *)lr; 1409 blkptr_t *wbp = &lrw->lr_blkptr; 1410 uint64_t wlen = lrw->lr_length; 1411 char *wbuf = zr->zr_lrbuf + reclen; 1412 1413 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1414 bzero(wbuf, wlen); 1415 } else { 1416 /* 1417 * A subsequent write may have overwritten this block, 1418 * in which case wbp may have been been freed and 1419 * reallocated, and our read of wbp may fail with a 1420 * checksum error. We can safely ignore this because 1421 * the later write will provide the correct data. 1422 */ 1423 zbookmark_t zb; 1424 1425 zb.zb_objset = dmu_objset_id(zilog->zl_os); 1426 zb.zb_object = lrw->lr_foid; 1427 zb.zb_level = -1; 1428 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); 1429 1430 (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1431 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1432 ZIO_PRIORITY_SYNC_READ, 1433 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1434 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1435 } 1436 } 1437 1438 /* 1439 * We must now do two things atomically: replay this log record, 1440 * and update the log header to reflect the fact that we did so. 1441 * We use the DMU's ability to assign into a specific txg to do this. 1442 */ 1443 for (pass = 1, sunk = B_FALSE; /* CONSTANTCONDITION */; pass++) { 1444 uint64_t replay_txg; 1445 dmu_tx_t *replay_tx; 1446 1447 replay_tx = dmu_tx_create(zr->zr_os); 1448 error = dmu_tx_assign(replay_tx, TXG_WAIT); 1449 if (error) { 1450 dmu_tx_abort(replay_tx); 1451 break; 1452 } 1453 1454 replay_txg = dmu_tx_get_txg(replay_tx); 1455 1456 if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1457 error = EINVAL; 1458 } else { 1459 /* 1460 * On the first pass, arrange for the replay vector 1461 * to fail its dmu_tx_assign(). That's the only way 1462 * to ensure that those code paths remain well tested. 1463 */ 1464 *zr->zr_txgp = replay_txg - (pass == 1); 1465 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1466 zr->zr_byteswap); 1467 *zr->zr_txgp = TXG_NOWAIT; 1468 } 1469 1470 if (error == 0) { 1471 dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1472 zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1473 lr->lrc_seq; 1474 } 1475 1476 dmu_tx_commit(replay_tx); 1477 1478 if (!error) 1479 return; 1480 1481 /* 1482 * The DMU's dnode layer doesn't see removes until the txg 1483 * commits, so a subsequent claim can spuriously fail with 1484 * EEXIST. So if we receive any error other than ERESTART 1485 * we try syncing out any removes then retrying the 1486 * transaction. 1487 */ 1488 if (error != ERESTART && !sunk) { 1489 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1490 sunk = B_TRUE; 1491 continue; /* retry */ 1492 } 1493 1494 if (error != ERESTART) 1495 break; 1496 1497 if (pass != 1) 1498 txg_wait_open(spa_get_dsl(zilog->zl_spa), 1499 replay_txg + 1); 1500 1501 dprintf("pass %d, retrying\n", pass); 1502 } 1503 1504 ASSERT(error && error != ERESTART); 1505 name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1506 dmu_objset_name(zr->zr_os, name); 1507 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1508 "dataset %s, seq 0x%llx, txtype %llu\n", 1509 error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype); 1510 zilog->zl_stop_replay = 1; 1511 kmem_free(name, MAXNAMELEN); 1512 } 1513 1514 /* ARGSUSED */ 1515 static void 1516 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 1517 { 1518 zilog->zl_replay_blks++; 1519 } 1520 1521 /* 1522 * If this dataset has a non-empty intent log, replay it and destroy it. 1523 */ 1524 void 1525 zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1526 zil_replay_func_t *replay_func[TX_MAX_TYPE]) 1527 { 1528 zilog_t *zilog = dmu_objset_zil(os); 1529 const zil_header_t *zh = zilog->zl_header; 1530 zil_replay_arg_t zr; 1531 1532 if (zil_empty(zilog)) { 1533 zil_destroy(zilog, B_TRUE); 1534 return; 1535 } 1536 1537 zr.zr_os = os; 1538 zr.zr_replay = replay_func; 1539 zr.zr_arg = arg; 1540 zr.zr_txgp = txgp; 1541 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 1542 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1543 1544 /* 1545 * Wait for in-progress removes to sync before starting replay. 1546 */ 1547 txg_wait_synced(zilog->zl_dmu_pool, 0); 1548 1549 zilog->zl_stop_replay = 0; 1550 zilog->zl_replay_time = lbolt; 1551 ASSERT(zilog->zl_replay_blks == 0); 1552 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 1553 zh->zh_claim_txg); 1554 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1555 1556 zil_destroy(zilog, B_FALSE); 1557 } 1558 1559 /* 1560 * Report whether all transactions are committed 1561 */ 1562 int 1563 zil_is_committed(zilog_t *zilog) 1564 { 1565 lwb_t *lwb; 1566 int ret; 1567 1568 mutex_enter(&zilog->zl_lock); 1569 while (zilog->zl_writer) 1570 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); 1571 1572 /* recent unpushed intent log transactions? */ 1573 if (!list_is_empty(&zilog->zl_itx_list)) { 1574 ret = B_FALSE; 1575 goto out; 1576 } 1577 1578 /* intent log never used? */ 1579 lwb = list_head(&zilog->zl_lwb_list); 1580 if (lwb == NULL) { 1581 ret = B_TRUE; 1582 goto out; 1583 } 1584 1585 /* 1586 * more than 1 log buffer means zil_sync() hasn't yet freed 1587 * entries after a txg has committed 1588 */ 1589 if (list_next(&zilog->zl_lwb_list, lwb)) { 1590 ret = B_FALSE; 1591 goto out; 1592 } 1593 1594 ASSERT(zil_empty(zilog)); 1595 ret = B_TRUE; 1596 out: 1597 cv_broadcast(&zilog->zl_cv_writer); 1598 mutex_exit(&zilog->zl_lock); 1599 return (ret); 1600 } 1601