1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Integros [integros.com] 25 */ 26 27 /* Portions Copyright 2010 Robert Milkowski */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/dmu.h> 32 #include <sys/zap.h> 33 #include <sys/arc.h> 34 #include <sys/stat.h> 35 #include <sys/resource.h> 36 #include <sys/zil.h> 37 #include <sys/zil_impl.h> 38 #include <sys/dsl_dataset.h> 39 #include <sys/vdev_impl.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/dsl_pool.h> 42 43 /* 44 * The zfs intent log (ZIL) saves transaction records of system calls 45 * that change the file system in memory with enough information 46 * to be able to replay them. These are stored in memory until 47 * either the DMU transaction group (txg) commits them to the stable pool 48 * and they can be discarded, or they are flushed to the stable log 49 * (also in the pool) due to a fsync, O_DSYNC or other synchronous 50 * requirement. In the event of a panic or power fail then those log 51 * records (transactions) are replayed. 52 * 53 * There is one ZIL per file system. Its on-disk (pool) format consists 54 * of 3 parts: 55 * 56 * - ZIL header 57 * - ZIL blocks 58 * - ZIL records 59 * 60 * A log record holds a system call transaction. Log blocks can 61 * hold many log records and the blocks are chained together. 62 * Each ZIL block contains a block pointer (blkptr_t) to the next 63 * ZIL block in the chain. The ZIL header points to the first 64 * block in the chain. Note there is not a fixed place in the pool 65 * to hold blocks. They are dynamically allocated and freed as 66 * needed from the blocks available. Figure X shows the ZIL structure: 67 */ 68 69 /* 70 * Disable intent logging replay. This global ZIL switch affects all pools. 71 */ 72 int zil_replay_disable = 0; 73 74 /* 75 * Tunable parameter for debugging or performance analysis. Setting 76 * zfs_nocacheflush will cause corruption on power loss if a volatile 77 * out-of-order write cache is enabled. 78 */ 79 boolean_t zfs_nocacheflush = B_FALSE; 80 81 static kmem_cache_t *zil_lwb_cache; 82 83 static void zil_async_to_sync(zilog_t *zilog, uint64_t foid); 84 85 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 86 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 87 88 static int 89 zil_bp_compare(const void *x1, const void *x2) 90 { 91 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 92 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 93 94 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 95 return (-1); 96 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 97 return (1); 98 99 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 100 return (-1); 101 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 102 return (1); 103 104 return (0); 105 } 106 107 static void 108 zil_bp_tree_init(zilog_t *zilog) 109 { 110 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 111 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 112 } 113 114 static void 115 zil_bp_tree_fini(zilog_t *zilog) 116 { 117 avl_tree_t *t = &zilog->zl_bp_tree; 118 zil_bp_node_t *zn; 119 void *cookie = NULL; 120 121 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 122 kmem_free(zn, sizeof (zil_bp_node_t)); 123 124 avl_destroy(t); 125 } 126 127 int 128 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 129 { 130 avl_tree_t *t = &zilog->zl_bp_tree; 131 const dva_t *dva; 132 zil_bp_node_t *zn; 133 avl_index_t where; 134 135 if (BP_IS_EMBEDDED(bp)) 136 return (0); 137 138 dva = BP_IDENTITY(bp); 139 140 if (avl_find(t, dva, &where) != NULL) 141 return (SET_ERROR(EEXIST)); 142 143 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 144 zn->zn_dva = *dva; 145 avl_insert(t, zn, where); 146 147 return (0); 148 } 149 150 static zil_header_t * 151 zil_header_in_syncing_context(zilog_t *zilog) 152 { 153 return ((zil_header_t *)zilog->zl_header); 154 } 155 156 static void 157 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 158 { 159 zio_cksum_t *zc = &bp->blk_cksum; 160 161 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 162 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 163 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 164 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 165 } 166 167 /* 168 * Read a log block and make sure it's valid. 169 */ 170 static int 171 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 172 char **end) 173 { 174 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 175 arc_flags_t aflags = ARC_FLAG_WAIT; 176 arc_buf_t *abuf = NULL; 177 zbookmark_phys_t zb; 178 int error; 179 180 if (zilog->zl_header->zh_claim_txg == 0) 181 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 182 183 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 184 zio_flags |= ZIO_FLAG_SPECULATIVE; 185 186 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 187 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 188 189 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 190 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 191 192 if (error == 0) { 193 zio_cksum_t cksum = bp->blk_cksum; 194 195 /* 196 * Validate the checksummed log block. 197 * 198 * Sequence numbers should be... sequential. The checksum 199 * verifier for the next block should be bp's checksum plus 1. 200 * 201 * Also check the log chain linkage and size used. 202 */ 203 cksum.zc_word[ZIL_ZC_SEQ]++; 204 205 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 206 zil_chain_t *zilc = abuf->b_data; 207 char *lr = (char *)(zilc + 1); 208 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 209 210 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 211 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 212 error = SET_ERROR(ECKSUM); 213 } else { 214 ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); 215 bcopy(lr, dst, len); 216 *end = (char *)dst + len; 217 *nbp = zilc->zc_next_blk; 218 } 219 } else { 220 char *lr = abuf->b_data; 221 uint64_t size = BP_GET_LSIZE(bp); 222 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 223 224 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 225 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 226 (zilc->zc_nused > (size - sizeof (*zilc)))) { 227 error = SET_ERROR(ECKSUM); 228 } else { 229 ASSERT3U(zilc->zc_nused, <=, 230 SPA_OLD_MAXBLOCKSIZE); 231 bcopy(lr, dst, zilc->zc_nused); 232 *end = (char *)dst + zilc->zc_nused; 233 *nbp = zilc->zc_next_blk; 234 } 235 } 236 237 arc_buf_destroy(abuf, &abuf); 238 } 239 240 return (error); 241 } 242 243 /* 244 * Read a TX_WRITE log data block. 245 */ 246 static int 247 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 248 { 249 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 250 const blkptr_t *bp = &lr->lr_blkptr; 251 arc_flags_t aflags = ARC_FLAG_WAIT; 252 arc_buf_t *abuf = NULL; 253 zbookmark_phys_t zb; 254 int error; 255 256 if (BP_IS_HOLE(bp)) { 257 if (wbuf != NULL) 258 bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 259 return (0); 260 } 261 262 if (zilog->zl_header->zh_claim_txg == 0) 263 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 264 265 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 266 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 267 268 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 269 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 270 271 if (error == 0) { 272 if (wbuf != NULL) 273 bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 274 arc_buf_destroy(abuf, &abuf); 275 } 276 277 return (error); 278 } 279 280 /* 281 * Parse the intent log, and call parse_func for each valid record within. 282 */ 283 int 284 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 285 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 286 { 287 const zil_header_t *zh = zilog->zl_header; 288 boolean_t claimed = !!zh->zh_claim_txg; 289 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 290 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 291 uint64_t max_blk_seq = 0; 292 uint64_t max_lr_seq = 0; 293 uint64_t blk_count = 0; 294 uint64_t lr_count = 0; 295 blkptr_t blk, next_blk; 296 char *lrbuf, *lrp; 297 int error = 0; 298 299 /* 300 * Old logs didn't record the maximum zh_claim_lr_seq. 301 */ 302 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 303 claim_lr_seq = UINT64_MAX; 304 305 /* 306 * Starting at the block pointed to by zh_log we read the log chain. 307 * For each block in the chain we strongly check that block to 308 * ensure its validity. We stop when an invalid block is found. 309 * For each block pointer in the chain we call parse_blk_func(). 310 * For each record in each valid block we call parse_lr_func(). 311 * If the log has been claimed, stop if we encounter a sequence 312 * number greater than the highest claimed sequence number. 313 */ 314 lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); 315 zil_bp_tree_init(zilog); 316 317 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 318 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 319 int reclen; 320 char *end; 321 322 if (blk_seq > claim_blk_seq) 323 break; 324 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 325 break; 326 ASSERT3U(max_blk_seq, <, blk_seq); 327 max_blk_seq = blk_seq; 328 blk_count++; 329 330 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 331 break; 332 333 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 334 if (error != 0) 335 break; 336 337 for (lrp = lrbuf; lrp < end; lrp += reclen) { 338 lr_t *lr = (lr_t *)lrp; 339 reclen = lr->lrc_reclen; 340 ASSERT3U(reclen, >=, sizeof (lr_t)); 341 if (lr->lrc_seq > claim_lr_seq) 342 goto done; 343 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 344 goto done; 345 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 346 max_lr_seq = lr->lrc_seq; 347 lr_count++; 348 } 349 } 350 done: 351 zilog->zl_parse_error = error; 352 zilog->zl_parse_blk_seq = max_blk_seq; 353 zilog->zl_parse_lr_seq = max_lr_seq; 354 zilog->zl_parse_blk_count = blk_count; 355 zilog->zl_parse_lr_count = lr_count; 356 357 ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 358 (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 359 360 zil_bp_tree_fini(zilog); 361 zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); 362 363 return (error); 364 } 365 366 static int 367 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 368 { 369 /* 370 * Claim log block if not already committed and not already claimed. 371 * If tx == NULL, just verify that the block is claimable. 372 */ 373 if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || 374 zil_bp_tree_add(zilog, bp) != 0) 375 return (0); 376 377 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 378 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 379 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 380 } 381 382 static int 383 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 384 { 385 lr_write_t *lr = (lr_write_t *)lrc; 386 int error; 387 388 if (lrc->lrc_txtype != TX_WRITE) 389 return (0); 390 391 /* 392 * If the block is not readable, don't claim it. This can happen 393 * in normal operation when a log block is written to disk before 394 * some of the dmu_sync() blocks it points to. In this case, the 395 * transaction cannot have been committed to anyone (we would have 396 * waited for all writes to be stable first), so it is semantically 397 * correct to declare this the end of the log. 398 */ 399 if (lr->lr_blkptr.blk_birth >= first_txg && 400 (error = zil_read_log_data(zilog, lr, NULL)) != 0) 401 return (error); 402 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 403 } 404 405 /* ARGSUSED */ 406 static int 407 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 408 { 409 zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 410 411 return (0); 412 } 413 414 static int 415 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 416 { 417 lr_write_t *lr = (lr_write_t *)lrc; 418 blkptr_t *bp = &lr->lr_blkptr; 419 420 /* 421 * If we previously claimed it, we need to free it. 422 */ 423 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 424 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && 425 !BP_IS_HOLE(bp)) 426 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 427 428 return (0); 429 } 430 431 static lwb_t * 432 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg) 433 { 434 lwb_t *lwb; 435 436 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 437 lwb->lwb_zilog = zilog; 438 lwb->lwb_blk = *bp; 439 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 440 lwb->lwb_max_txg = txg; 441 lwb->lwb_zio = NULL; 442 lwb->lwb_tx = NULL; 443 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 444 lwb->lwb_nused = sizeof (zil_chain_t); 445 lwb->lwb_sz = BP_GET_LSIZE(bp); 446 } else { 447 lwb->lwb_nused = 0; 448 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 449 } 450 451 mutex_enter(&zilog->zl_lock); 452 list_insert_tail(&zilog->zl_lwb_list, lwb); 453 mutex_exit(&zilog->zl_lock); 454 455 return (lwb); 456 } 457 458 /* 459 * Called when we create in-memory log transactions so that we know 460 * to cleanup the itxs at the end of spa_sync(). 461 */ 462 void 463 zilog_dirty(zilog_t *zilog, uint64_t txg) 464 { 465 dsl_pool_t *dp = zilog->zl_dmu_pool; 466 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 467 468 if (ds->ds_is_snapshot) 469 panic("dirtying snapshot!"); 470 471 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 472 /* up the hold count until we can be written out */ 473 dmu_buf_add_ref(ds->ds_dbuf, zilog); 474 } 475 } 476 477 /* 478 * Determine if the zil is dirty in the specified txg. Callers wanting to 479 * ensure that the dirty state does not change must hold the itxg_lock for 480 * the specified txg. Holding the lock will ensure that the zil cannot be 481 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current 482 * state. 483 */ 484 boolean_t 485 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) 486 { 487 dsl_pool_t *dp = zilog->zl_dmu_pool; 488 489 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) 490 return (B_TRUE); 491 return (B_FALSE); 492 } 493 494 /* 495 * Determine if the zil is dirty. The zil is considered dirty if it has 496 * any pending itx records that have not been cleaned by zil_clean(). 497 */ 498 boolean_t 499 zilog_is_dirty(zilog_t *zilog) 500 { 501 dsl_pool_t *dp = zilog->zl_dmu_pool; 502 503 for (int t = 0; t < TXG_SIZE; t++) { 504 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 505 return (B_TRUE); 506 } 507 return (B_FALSE); 508 } 509 510 /* 511 * Create an on-disk intent log. 512 */ 513 static lwb_t * 514 zil_create(zilog_t *zilog) 515 { 516 const zil_header_t *zh = zilog->zl_header; 517 lwb_t *lwb = NULL; 518 uint64_t txg = 0; 519 dmu_tx_t *tx = NULL; 520 blkptr_t blk; 521 int error = 0; 522 523 /* 524 * Wait for any previous destroy to complete. 525 */ 526 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 527 528 ASSERT(zh->zh_claim_txg == 0); 529 ASSERT(zh->zh_replay_seq == 0); 530 531 blk = zh->zh_log; 532 533 /* 534 * Allocate an initial log block if: 535 * - there isn't one already 536 * - the existing block is the wrong endianess 537 */ 538 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 539 tx = dmu_tx_create(zilog->zl_os); 540 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 541 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 542 txg = dmu_tx_get_txg(tx); 543 544 if (!BP_IS_HOLE(&blk)) { 545 zio_free_zil(zilog->zl_spa, txg, &blk); 546 BP_ZERO(&blk); 547 } 548 549 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL, 550 ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 551 552 if (error == 0) 553 zil_init_log_chain(zilog, &blk); 554 } 555 556 /* 557 * Allocate a log write buffer (lwb) for the first log block. 558 */ 559 if (error == 0) 560 lwb = zil_alloc_lwb(zilog, &blk, txg); 561 562 /* 563 * If we just allocated the first log block, commit our transaction 564 * and wait for zil_sync() to stuff the block poiner into zh_log. 565 * (zh is part of the MOS, so we cannot modify it in open context.) 566 */ 567 if (tx != NULL) { 568 dmu_tx_commit(tx); 569 txg_wait_synced(zilog->zl_dmu_pool, txg); 570 } 571 572 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 573 574 return (lwb); 575 } 576 577 /* 578 * In one tx, free all log blocks and clear the log header. 579 * If keep_first is set, then we're replaying a log with no content. 580 * We want to keep the first block, however, so that the first 581 * synchronous transaction doesn't require a txg_wait_synced() 582 * in zil_create(). We don't need to txg_wait_synced() here either 583 * when keep_first is set, because both zil_create() and zil_destroy() 584 * will wait for any in-progress destroys to complete. 585 */ 586 void 587 zil_destroy(zilog_t *zilog, boolean_t keep_first) 588 { 589 const zil_header_t *zh = zilog->zl_header; 590 lwb_t *lwb; 591 dmu_tx_t *tx; 592 uint64_t txg; 593 594 /* 595 * Wait for any previous destroy to complete. 596 */ 597 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 598 599 zilog->zl_old_header = *zh; /* debugging aid */ 600 601 if (BP_IS_HOLE(&zh->zh_log)) 602 return; 603 604 tx = dmu_tx_create(zilog->zl_os); 605 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 606 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 607 txg = dmu_tx_get_txg(tx); 608 609 mutex_enter(&zilog->zl_lock); 610 611 ASSERT3U(zilog->zl_destroy_txg, <, txg); 612 zilog->zl_destroy_txg = txg; 613 zilog->zl_keep_first = keep_first; 614 615 if (!list_is_empty(&zilog->zl_lwb_list)) { 616 ASSERT(zh->zh_claim_txg == 0); 617 VERIFY(!keep_first); 618 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 619 list_remove(&zilog->zl_lwb_list, lwb); 620 if (lwb->lwb_buf != NULL) 621 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 622 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk); 623 kmem_cache_free(zil_lwb_cache, lwb); 624 } 625 } else if (!keep_first) { 626 zil_destroy_sync(zilog, tx); 627 } 628 mutex_exit(&zilog->zl_lock); 629 630 dmu_tx_commit(tx); 631 } 632 633 void 634 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 635 { 636 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 637 (void) zil_parse(zilog, zil_free_log_block, 638 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg); 639 } 640 641 int 642 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 643 { 644 dmu_tx_t *tx = txarg; 645 uint64_t first_txg = dmu_tx_get_txg(tx); 646 zilog_t *zilog; 647 zil_header_t *zh; 648 objset_t *os; 649 int error; 650 651 error = dmu_objset_own_obj(dp, ds->ds_object, 652 DMU_OST_ANY, B_FALSE, FTAG, &os); 653 if (error != 0) { 654 /* 655 * EBUSY indicates that the objset is inconsistent, in which 656 * case it can not have a ZIL. 657 */ 658 if (error != EBUSY) { 659 cmn_err(CE_WARN, "can't open objset for %llu, error %u", 660 (unsigned long long)ds->ds_object, error); 661 } 662 return (0); 663 } 664 665 zilog = dmu_objset_zil(os); 666 zh = zil_header_in_syncing_context(zilog); 667 668 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { 669 if (!BP_IS_HOLE(&zh->zh_log)) 670 zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); 671 BP_ZERO(&zh->zh_log); 672 dsl_dataset_dirty(dmu_objset_ds(os), tx); 673 dmu_objset_disown(os, FTAG); 674 return (0); 675 } 676 677 /* 678 * Claim all log blocks if we haven't already done so, and remember 679 * the highest claimed sequence number. This ensures that if we can 680 * read only part of the log now (e.g. due to a missing device), 681 * but we can read the entire log later, we will not try to replay 682 * or destroy beyond the last block we successfully claimed. 683 */ 684 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 685 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 686 (void) zil_parse(zilog, zil_claim_log_block, 687 zil_claim_log_record, tx, first_txg); 688 zh->zh_claim_txg = first_txg; 689 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 690 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 691 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 692 zh->zh_flags |= ZIL_REPLAY_NEEDED; 693 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 694 dsl_dataset_dirty(dmu_objset_ds(os), tx); 695 } 696 697 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 698 dmu_objset_disown(os, FTAG); 699 return (0); 700 } 701 702 /* 703 * Check the log by walking the log chain. 704 * Checksum errors are ok as they indicate the end of the chain. 705 * Any other error (no device or read failure) returns an error. 706 */ 707 /* ARGSUSED */ 708 int 709 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 710 { 711 zilog_t *zilog; 712 objset_t *os; 713 blkptr_t *bp; 714 int error; 715 716 ASSERT(tx == NULL); 717 718 error = dmu_objset_from_ds(ds, &os); 719 if (error != 0) { 720 cmn_err(CE_WARN, "can't open objset %llu, error %d", 721 (unsigned long long)ds->ds_object, error); 722 return (0); 723 } 724 725 zilog = dmu_objset_zil(os); 726 bp = (blkptr_t *)&zilog->zl_header->zh_log; 727 728 /* 729 * Check the first block and determine if it's on a log device 730 * which may have been removed or faulted prior to loading this 731 * pool. If so, there's no point in checking the rest of the log 732 * as its content should have already been synced to the pool. 733 */ 734 if (!BP_IS_HOLE(bp)) { 735 vdev_t *vd; 736 boolean_t valid = B_TRUE; 737 738 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 739 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 740 if (vd->vdev_islog && vdev_is_dead(vd)) 741 valid = vdev_log_state_valid(vd); 742 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 743 744 if (!valid) 745 return (0); 746 } 747 748 /* 749 * Because tx == NULL, zil_claim_log_block() will not actually claim 750 * any blocks, but just determine whether it is possible to do so. 751 * In addition to checking the log chain, zil_claim_log_block() 752 * will invoke zio_claim() with a done func of spa_claim_notify(), 753 * which will update spa_max_claim_txg. See spa_load() for details. 754 */ 755 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 756 zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa)); 757 758 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 759 } 760 761 static int 762 zil_vdev_compare(const void *x1, const void *x2) 763 { 764 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 765 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 766 767 if (v1 < v2) 768 return (-1); 769 if (v1 > v2) 770 return (1); 771 772 return (0); 773 } 774 775 void 776 zil_add_block(zilog_t *zilog, const blkptr_t *bp) 777 { 778 avl_tree_t *t = &zilog->zl_vdev_tree; 779 avl_index_t where; 780 zil_vdev_node_t *zv, zvsearch; 781 int ndvas = BP_GET_NDVAS(bp); 782 int i; 783 784 if (zfs_nocacheflush) 785 return; 786 787 ASSERT(zilog->zl_writer); 788 789 /* 790 * Even though we're zl_writer, we still need a lock because the 791 * zl_get_data() callbacks may have dmu_sync() done callbacks 792 * that will run concurrently. 793 */ 794 mutex_enter(&zilog->zl_vdev_lock); 795 for (i = 0; i < ndvas; i++) { 796 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 797 if (avl_find(t, &zvsearch, &where) == NULL) { 798 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 799 zv->zv_vdev = zvsearch.zv_vdev; 800 avl_insert(t, zv, where); 801 } 802 } 803 mutex_exit(&zilog->zl_vdev_lock); 804 } 805 806 static void 807 zil_flush_vdevs(zilog_t *zilog) 808 { 809 spa_t *spa = zilog->zl_spa; 810 avl_tree_t *t = &zilog->zl_vdev_tree; 811 void *cookie = NULL; 812 zil_vdev_node_t *zv; 813 zio_t *zio; 814 815 ASSERT(zilog->zl_writer); 816 817 /* 818 * We don't need zl_vdev_lock here because we're the zl_writer, 819 * and all zl_get_data() callbacks are done. 820 */ 821 if (avl_numnodes(t) == 0) 822 return; 823 824 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 825 826 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 827 828 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 829 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 830 if (vd != NULL) 831 zio_flush(zio, vd); 832 kmem_free(zv, sizeof (*zv)); 833 } 834 835 /* 836 * Wait for all the flushes to complete. Not all devices actually 837 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 838 */ 839 (void) zio_wait(zio); 840 841 spa_config_exit(spa, SCL_STATE, FTAG); 842 } 843 844 /* 845 * Function called when a log block write completes 846 */ 847 static void 848 zil_lwb_write_done(zio_t *zio) 849 { 850 lwb_t *lwb = zio->io_private; 851 zilog_t *zilog = lwb->lwb_zilog; 852 dmu_tx_t *tx = lwb->lwb_tx; 853 854 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 855 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 856 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 857 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 858 ASSERT(!BP_IS_GANG(zio->io_bp)); 859 ASSERT(!BP_IS_HOLE(zio->io_bp)); 860 ASSERT(BP_GET_FILL(zio->io_bp) == 0); 861 862 /* 863 * Ensure the lwb buffer pointer is cleared before releasing 864 * the txg. If we have had an allocation failure and 865 * the txg is waiting to sync then we want want zil_sync() 866 * to remove the lwb so that it's not picked up as the next new 867 * one in zil_commit_writer(). zil_sync() will only remove 868 * the lwb if lwb_buf is null. 869 */ 870 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 871 mutex_enter(&zilog->zl_lock); 872 lwb->lwb_buf = NULL; 873 lwb->lwb_tx = NULL; 874 mutex_exit(&zilog->zl_lock); 875 876 /* 877 * Now that we've written this log block, we have a stable pointer 878 * to the next block in the chain, so it's OK to let the txg in 879 * which we allocated the next block sync. 880 */ 881 dmu_tx_commit(tx); 882 } 883 884 /* 885 * Initialize the io for a log block. 886 */ 887 static void 888 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 889 { 890 zbookmark_phys_t zb; 891 892 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 893 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 894 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 895 896 if (zilog->zl_root_zio == NULL) { 897 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 898 ZIO_FLAG_CANFAIL); 899 } 900 if (lwb->lwb_zio == NULL) { 901 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 902 0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk), 903 zil_lwb_write_done, lwb, ZIO_PRIORITY_SYNC_WRITE, 904 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 905 } 906 } 907 908 /* 909 * Define a limited set of intent log block sizes. 910 * 911 * These must be a multiple of 4KB. Note only the amount used (again 912 * aligned to 4KB) actually gets written. However, we can't always just 913 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. 914 */ 915 uint64_t zil_block_buckets[] = { 916 4096, /* non TX_WRITE */ 917 8192+4096, /* data base */ 918 32*1024 + 4096, /* NFS writes */ 919 UINT64_MAX 920 }; 921 922 /* 923 * Use the slog as long as the logbias is 'latency' and the current commit size 924 * is less than the limit or the total list size is less than 2X the limit. 925 * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX. 926 */ 927 uint64_t zil_slog_limit = 1024 * 1024; 928 #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \ 929 (((zilog)->zl_cur_used < zil_slog_limit) || \ 930 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))) 931 932 /* 933 * Start a log block write and advance to the next log block. 934 * Calls are serialized. 935 */ 936 static lwb_t * 937 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 938 { 939 lwb_t *nlwb = NULL; 940 zil_chain_t *zilc; 941 spa_t *spa = zilog->zl_spa; 942 blkptr_t *bp; 943 dmu_tx_t *tx; 944 uint64_t txg; 945 uint64_t zil_blksz, wsz; 946 int i, error; 947 948 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 949 zilc = (zil_chain_t *)lwb->lwb_buf; 950 bp = &zilc->zc_next_blk; 951 } else { 952 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 953 bp = &zilc->zc_next_blk; 954 } 955 956 ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 957 958 /* 959 * Allocate the next block and save its address in this block 960 * before writing it in order to establish the log chain. 961 * Note that if the allocation of nlwb synced before we wrote 962 * the block that points at it (lwb), we'd leak it if we crashed. 963 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 964 * We dirty the dataset to ensure that zil_sync() will be called 965 * to clean up in the event of allocation failure or I/O failure. 966 */ 967 tx = dmu_tx_create(zilog->zl_os); 968 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 969 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 970 txg = dmu_tx_get_txg(tx); 971 972 lwb->lwb_tx = tx; 973 974 /* 975 * Log blocks are pre-allocated. Here we select the size of the next 976 * block, based on size used in the last block. 977 * - first find the smallest bucket that will fit the block from a 978 * limited set of block sizes. This is because it's faster to write 979 * blocks allocated from the same metaslab as they are adjacent or 980 * close. 981 * - next find the maximum from the new suggested size and an array of 982 * previous sizes. This lessens a picket fence effect of wrongly 983 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 984 * requests. 985 * 986 * Note we only write what is used, but we can't just allocate 987 * the maximum block size because we can exhaust the available 988 * pool log space. 989 */ 990 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 991 for (i = 0; zil_blksz > zil_block_buckets[i]; i++) 992 continue; 993 zil_blksz = zil_block_buckets[i]; 994 if (zil_blksz == UINT64_MAX) 995 zil_blksz = SPA_OLD_MAXBLOCKSIZE; 996 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 997 for (i = 0; i < ZIL_PREV_BLKS; i++) 998 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 999 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 1000 1001 BP_ZERO(bp); 1002 /* pass the old blkptr in order to spread log blocks across devs */ 1003 error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, 1004 USE_SLOG(zilog)); 1005 if (error == 0) { 1006 ASSERT3U(bp->blk_birth, ==, txg); 1007 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 1008 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 1009 1010 /* 1011 * Allocate a new log write buffer (lwb). 1012 */ 1013 nlwb = zil_alloc_lwb(zilog, bp, txg); 1014 1015 /* Record the block for later vdev flushing */ 1016 zil_add_block(zilog, &lwb->lwb_blk); 1017 } 1018 1019 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1020 /* For Slim ZIL only write what is used. */ 1021 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 1022 ASSERT3U(wsz, <=, lwb->lwb_sz); 1023 zio_shrink(lwb->lwb_zio, wsz); 1024 1025 } else { 1026 wsz = lwb->lwb_sz; 1027 } 1028 1029 zilc->zc_pad = 0; 1030 zilc->zc_nused = lwb->lwb_nused; 1031 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1032 1033 /* 1034 * clear unused data for security 1035 */ 1036 bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 1037 1038 zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */ 1039 1040 /* 1041 * If there was an allocation failure then nlwb will be null which 1042 * forces a txg_wait_synced(). 1043 */ 1044 return (nlwb); 1045 } 1046 1047 static lwb_t * 1048 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 1049 { 1050 lr_t *lrc = &itx->itx_lr; /* common log record */ 1051 lr_write_t *lrw = (lr_write_t *)lrc; 1052 char *lr_buf; 1053 uint64_t txg = lrc->lrc_txg; 1054 uint64_t reclen = lrc->lrc_reclen; 1055 uint64_t dlen = 0; 1056 1057 if (lwb == NULL) 1058 return (NULL); 1059 1060 ASSERT(lwb->lwb_buf != NULL); 1061 1062 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 1063 dlen = P2ROUNDUP_TYPED( 1064 lrw->lr_length, sizeof (uint64_t), uint64_t); 1065 1066 zilog->zl_cur_used += (reclen + dlen); 1067 1068 zil_lwb_write_init(zilog, lwb); 1069 1070 /* 1071 * If this record won't fit in the current log block, start a new one. 1072 */ 1073 if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1074 lwb = zil_lwb_write_start(zilog, lwb); 1075 if (lwb == NULL) 1076 return (NULL); 1077 zil_lwb_write_init(zilog, lwb); 1078 ASSERT(LWB_EMPTY(lwb)); 1079 if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1080 txg_wait_synced(zilog->zl_dmu_pool, txg); 1081 return (lwb); 1082 } 1083 } 1084 1085 lr_buf = lwb->lwb_buf + lwb->lwb_nused; 1086 bcopy(lrc, lr_buf, reclen); 1087 lrc = (lr_t *)lr_buf; 1088 lrw = (lr_write_t *)lrc; 1089 1090 /* 1091 * If it's a write, fetch the data or get its blkptr as appropriate. 1092 */ 1093 if (lrc->lrc_txtype == TX_WRITE) { 1094 if (txg > spa_freeze_txg(zilog->zl_spa)) 1095 txg_wait_synced(zilog->zl_dmu_pool, txg); 1096 if (itx->itx_wr_state != WR_COPIED) { 1097 char *dbuf; 1098 int error; 1099 1100 if (dlen) { 1101 ASSERT(itx->itx_wr_state == WR_NEED_COPY); 1102 dbuf = lr_buf + reclen; 1103 lrw->lr_common.lrc_reclen += dlen; 1104 } else { 1105 ASSERT(itx->itx_wr_state == WR_INDIRECT); 1106 dbuf = NULL; 1107 } 1108 error = zilog->zl_get_data( 1109 itx->itx_private, lrw, dbuf, lwb->lwb_zio); 1110 if (error == EIO) { 1111 txg_wait_synced(zilog->zl_dmu_pool, txg); 1112 return (lwb); 1113 } 1114 if (error != 0) { 1115 ASSERT(error == ENOENT || error == EEXIST || 1116 error == EALREADY); 1117 return (lwb); 1118 } 1119 } 1120 } 1121 1122 /* 1123 * We're actually making an entry, so update lrc_seq to be the 1124 * log record sequence number. Note that this is generally not 1125 * equal to the itx sequence number because not all transactions 1126 * are synchronous, and sometimes spa_sync() gets there first. 1127 */ 1128 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 1129 lwb->lwb_nused += reclen + dlen; 1130 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1131 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1132 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 1133 1134 return (lwb); 1135 } 1136 1137 itx_t * 1138 zil_itx_create(uint64_t txtype, size_t lrsize) 1139 { 1140 itx_t *itx; 1141 1142 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1143 1144 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1145 itx->itx_lr.lrc_txtype = txtype; 1146 itx->itx_lr.lrc_reclen = lrsize; 1147 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 1148 itx->itx_lr.lrc_seq = 0; /* defensive */ 1149 itx->itx_sync = B_TRUE; /* default is synchronous */ 1150 1151 return (itx); 1152 } 1153 1154 void 1155 zil_itx_destroy(itx_t *itx) 1156 { 1157 kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 1158 } 1159 1160 /* 1161 * Free up the sync and async itxs. The itxs_t has already been detached 1162 * so no locks are needed. 1163 */ 1164 static void 1165 zil_itxg_clean(itxs_t *itxs) 1166 { 1167 itx_t *itx; 1168 list_t *list; 1169 avl_tree_t *t; 1170 void *cookie; 1171 itx_async_node_t *ian; 1172 1173 list = &itxs->i_sync_list; 1174 while ((itx = list_head(list)) != NULL) { 1175 list_remove(list, itx); 1176 kmem_free(itx, offsetof(itx_t, itx_lr) + 1177 itx->itx_lr.lrc_reclen); 1178 } 1179 1180 cookie = NULL; 1181 t = &itxs->i_async_tree; 1182 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1183 list = &ian->ia_list; 1184 while ((itx = list_head(list)) != NULL) { 1185 list_remove(list, itx); 1186 kmem_free(itx, offsetof(itx_t, itx_lr) + 1187 itx->itx_lr.lrc_reclen); 1188 } 1189 list_destroy(list); 1190 kmem_free(ian, sizeof (itx_async_node_t)); 1191 } 1192 avl_destroy(t); 1193 1194 kmem_free(itxs, sizeof (itxs_t)); 1195 } 1196 1197 static int 1198 zil_aitx_compare(const void *x1, const void *x2) 1199 { 1200 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 1201 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 1202 1203 if (o1 < o2) 1204 return (-1); 1205 if (o1 > o2) 1206 return (1); 1207 1208 return (0); 1209 } 1210 1211 /* 1212 * Remove all async itx with the given oid. 1213 */ 1214 static void 1215 zil_remove_async(zilog_t *zilog, uint64_t oid) 1216 { 1217 uint64_t otxg, txg; 1218 itx_async_node_t *ian; 1219 avl_tree_t *t; 1220 avl_index_t where; 1221 list_t clean_list; 1222 itx_t *itx; 1223 1224 ASSERT(oid != 0); 1225 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1226 1227 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1228 otxg = ZILTEST_TXG; 1229 else 1230 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1231 1232 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1233 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1234 1235 mutex_enter(&itxg->itxg_lock); 1236 if (itxg->itxg_txg != txg) { 1237 mutex_exit(&itxg->itxg_lock); 1238 continue; 1239 } 1240 1241 /* 1242 * Locate the object node and append its list. 1243 */ 1244 t = &itxg->itxg_itxs->i_async_tree; 1245 ian = avl_find(t, &oid, &where); 1246 if (ian != NULL) 1247 list_move_tail(&clean_list, &ian->ia_list); 1248 mutex_exit(&itxg->itxg_lock); 1249 } 1250 while ((itx = list_head(&clean_list)) != NULL) { 1251 list_remove(&clean_list, itx); 1252 kmem_free(itx, offsetof(itx_t, itx_lr) + 1253 itx->itx_lr.lrc_reclen); 1254 } 1255 list_destroy(&clean_list); 1256 } 1257 1258 void 1259 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1260 { 1261 uint64_t txg; 1262 itxg_t *itxg; 1263 itxs_t *itxs, *clean = NULL; 1264 1265 /* 1266 * Object ids can be re-instantiated in the next txg so 1267 * remove any async transactions to avoid future leaks. 1268 * This can happen if a fsync occurs on the re-instantiated 1269 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets 1270 * the new file data and flushes a write record for the old object. 1271 */ 1272 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE) 1273 zil_remove_async(zilog, itx->itx_oid); 1274 1275 /* 1276 * Ensure the data of a renamed file is committed before the rename. 1277 */ 1278 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 1279 zil_async_to_sync(zilog, itx->itx_oid); 1280 1281 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 1282 txg = ZILTEST_TXG; 1283 else 1284 txg = dmu_tx_get_txg(tx); 1285 1286 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1287 mutex_enter(&itxg->itxg_lock); 1288 itxs = itxg->itxg_itxs; 1289 if (itxg->itxg_txg != txg) { 1290 if (itxs != NULL) { 1291 /* 1292 * The zil_clean callback hasn't got around to cleaning 1293 * this itxg. Save the itxs for release below. 1294 * This should be rare. 1295 */ 1296 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " 1297 "txg %llu", itxg->itxg_txg); 1298 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 1299 itxg->itxg_sod = 0; 1300 clean = itxg->itxg_itxs; 1301 } 1302 ASSERT(itxg->itxg_sod == 0); 1303 itxg->itxg_txg = txg; 1304 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP); 1305 1306 list_create(&itxs->i_sync_list, sizeof (itx_t), 1307 offsetof(itx_t, itx_node)); 1308 avl_create(&itxs->i_async_tree, zil_aitx_compare, 1309 sizeof (itx_async_node_t), 1310 offsetof(itx_async_node_t, ia_node)); 1311 } 1312 if (itx->itx_sync) { 1313 list_insert_tail(&itxs->i_sync_list, itx); 1314 atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod); 1315 itxg->itxg_sod += itx->itx_sod; 1316 } else { 1317 avl_tree_t *t = &itxs->i_async_tree; 1318 uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid; 1319 itx_async_node_t *ian; 1320 avl_index_t where; 1321 1322 ian = avl_find(t, &foid, &where); 1323 if (ian == NULL) { 1324 ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP); 1325 list_create(&ian->ia_list, sizeof (itx_t), 1326 offsetof(itx_t, itx_node)); 1327 ian->ia_foid = foid; 1328 avl_insert(t, ian, where); 1329 } 1330 list_insert_tail(&ian->ia_list, itx); 1331 } 1332 1333 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1334 zilog_dirty(zilog, txg); 1335 mutex_exit(&itxg->itxg_lock); 1336 1337 /* Release the old itxs now we've dropped the lock */ 1338 if (clean != NULL) 1339 zil_itxg_clean(clean); 1340 } 1341 1342 /* 1343 * If there are any in-memory intent log transactions which have now been 1344 * synced then start up a taskq to free them. We should only do this after we 1345 * have written out the uberblocks (i.e. txg has been comitted) so that 1346 * don't inadvertently clean out in-memory log records that would be required 1347 * by zil_commit(). 1348 */ 1349 void 1350 zil_clean(zilog_t *zilog, uint64_t synced_txg) 1351 { 1352 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 1353 itxs_t *clean_me; 1354 1355 mutex_enter(&itxg->itxg_lock); 1356 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 1357 mutex_exit(&itxg->itxg_lock); 1358 return; 1359 } 1360 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 1361 ASSERT(itxg->itxg_txg != 0); 1362 ASSERT(zilog->zl_clean_taskq != NULL); 1363 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 1364 itxg->itxg_sod = 0; 1365 clean_me = itxg->itxg_itxs; 1366 itxg->itxg_itxs = NULL; 1367 itxg->itxg_txg = 0; 1368 mutex_exit(&itxg->itxg_lock); 1369 /* 1370 * Preferably start a task queue to free up the old itxs but 1371 * if taskq_dispatch can't allocate resources to do that then 1372 * free it in-line. This should be rare. Note, using TQ_SLEEP 1373 * created a bad performance problem. 1374 */ 1375 if (taskq_dispatch(zilog->zl_clean_taskq, 1376 (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == NULL) 1377 zil_itxg_clean(clean_me); 1378 } 1379 1380 /* 1381 * Get the list of itxs to commit into zl_itx_commit_list. 1382 */ 1383 static void 1384 zil_get_commit_list(zilog_t *zilog) 1385 { 1386 uint64_t otxg, txg; 1387 list_t *commit_list = &zilog->zl_itx_commit_list; 1388 uint64_t push_sod = 0; 1389 1390 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1391 otxg = ZILTEST_TXG; 1392 else 1393 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1394 1395 /* 1396 * This is inherently racy, since there is nothing to prevent 1397 * the last synced txg from changing. That's okay since we'll 1398 * only commit things in the future. 1399 */ 1400 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1401 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1402 1403 mutex_enter(&itxg->itxg_lock); 1404 if (itxg->itxg_txg != txg) { 1405 mutex_exit(&itxg->itxg_lock); 1406 continue; 1407 } 1408 1409 /* 1410 * If we're adding itx records to the zl_itx_commit_list, 1411 * then the zil better be dirty in this "txg". We can assert 1412 * that here since we're holding the itxg_lock which will 1413 * prevent spa_sync from cleaning it. Once we add the itxs 1414 * to the zl_itx_commit_list we must commit it to disk even 1415 * if it's unnecessary (i.e. the txg was synced). 1416 */ 1417 ASSERT(zilog_is_dirty_in_txg(zilog, txg) || 1418 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 1419 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 1420 push_sod += itxg->itxg_sod; 1421 itxg->itxg_sod = 0; 1422 1423 mutex_exit(&itxg->itxg_lock); 1424 } 1425 atomic_add_64(&zilog->zl_itx_list_sz, -push_sod); 1426 } 1427 1428 /* 1429 * Move the async itxs for a specified object to commit into sync lists. 1430 */ 1431 static void 1432 zil_async_to_sync(zilog_t *zilog, uint64_t foid) 1433 { 1434 uint64_t otxg, txg; 1435 itx_async_node_t *ian; 1436 avl_tree_t *t; 1437 avl_index_t where; 1438 1439 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1440 otxg = ZILTEST_TXG; 1441 else 1442 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1443 1444 /* 1445 * This is inherently racy, since there is nothing to prevent 1446 * the last synced txg from changing. 1447 */ 1448 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1449 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1450 1451 mutex_enter(&itxg->itxg_lock); 1452 if (itxg->itxg_txg != txg) { 1453 mutex_exit(&itxg->itxg_lock); 1454 continue; 1455 } 1456 1457 /* 1458 * If a foid is specified then find that node and append its 1459 * list. Otherwise walk the tree appending all the lists 1460 * to the sync list. We add to the end rather than the 1461 * beginning to ensure the create has happened. 1462 */ 1463 t = &itxg->itxg_itxs->i_async_tree; 1464 if (foid != 0) { 1465 ian = avl_find(t, &foid, &where); 1466 if (ian != NULL) { 1467 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1468 &ian->ia_list); 1469 } 1470 } else { 1471 void *cookie = NULL; 1472 1473 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1474 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1475 &ian->ia_list); 1476 list_destroy(&ian->ia_list); 1477 kmem_free(ian, sizeof (itx_async_node_t)); 1478 } 1479 } 1480 mutex_exit(&itxg->itxg_lock); 1481 } 1482 } 1483 1484 static void 1485 zil_commit_writer(zilog_t *zilog) 1486 { 1487 uint64_t txg; 1488 itx_t *itx; 1489 lwb_t *lwb; 1490 spa_t *spa = zilog->zl_spa; 1491 int error = 0; 1492 1493 ASSERT(zilog->zl_root_zio == NULL); 1494 1495 mutex_exit(&zilog->zl_lock); 1496 1497 zil_get_commit_list(zilog); 1498 1499 /* 1500 * Return if there's nothing to commit before we dirty the fs by 1501 * calling zil_create(). 1502 */ 1503 if (list_head(&zilog->zl_itx_commit_list) == NULL) { 1504 mutex_enter(&zilog->zl_lock); 1505 return; 1506 } 1507 1508 if (zilog->zl_suspend) { 1509 lwb = NULL; 1510 } else { 1511 lwb = list_tail(&zilog->zl_lwb_list); 1512 if (lwb == NULL) 1513 lwb = zil_create(zilog); 1514 } 1515 1516 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 1517 while (itx = list_head(&zilog->zl_itx_commit_list)) { 1518 txg = itx->itx_lr.lrc_txg; 1519 ASSERT3U(txg, !=, 0); 1520 1521 /* 1522 * This is inherently racy and may result in us writing 1523 * out a log block for a txg that was just synced. This is 1524 * ok since we'll end cleaning up that log block the next 1525 * time we call zil_sync(). 1526 */ 1527 if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa)) 1528 lwb = zil_lwb_commit(zilog, itx, lwb); 1529 list_remove(&zilog->zl_itx_commit_list, itx); 1530 kmem_free(itx, offsetof(itx_t, itx_lr) 1531 + itx->itx_lr.lrc_reclen); 1532 } 1533 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 1534 1535 /* write the last block out */ 1536 if (lwb != NULL && lwb->lwb_zio != NULL) 1537 lwb = zil_lwb_write_start(zilog, lwb); 1538 1539 zilog->zl_cur_used = 0; 1540 1541 /* 1542 * Wait if necessary for the log blocks to be on stable storage. 1543 */ 1544 if (zilog->zl_root_zio) { 1545 error = zio_wait(zilog->zl_root_zio); 1546 zilog->zl_root_zio = NULL; 1547 zil_flush_vdevs(zilog); 1548 } 1549 1550 if (error || lwb == NULL) 1551 txg_wait_synced(zilog->zl_dmu_pool, 0); 1552 1553 mutex_enter(&zilog->zl_lock); 1554 1555 /* 1556 * Remember the highest committed log sequence number for ztest. 1557 * We only update this value when all the log writes succeeded, 1558 * because ztest wants to ASSERT that it got the whole log chain. 1559 */ 1560 if (error == 0 && lwb != NULL) 1561 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1562 } 1563 1564 /* 1565 * Commit zfs transactions to stable storage. 1566 * If foid is 0 push out all transactions, otherwise push only those 1567 * for that object or might reference that object. 1568 * 1569 * itxs are committed in batches. In a heavily stressed zil there will be 1570 * a commit writer thread who is writing out a bunch of itxs to the log 1571 * for a set of committing threads (cthreads) in the same batch as the writer. 1572 * Those cthreads are all waiting on the same cv for that batch. 1573 * 1574 * There will also be a different and growing batch of threads that are 1575 * waiting to commit (qthreads). When the committing batch completes 1576 * a transition occurs such that the cthreads exit and the qthreads become 1577 * cthreads. One of the new cthreads becomes the writer thread for the 1578 * batch. Any new threads arriving become new qthreads. 1579 * 1580 * Only 2 condition variables are needed and there's no transition 1581 * between the two cvs needed. They just flip-flop between qthreads 1582 * and cthreads. 1583 * 1584 * Using this scheme we can efficiently wakeup up only those threads 1585 * that have been committed. 1586 */ 1587 void 1588 zil_commit(zilog_t *zilog, uint64_t foid) 1589 { 1590 uint64_t mybatch; 1591 1592 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 1593 return; 1594 1595 /* move the async itxs for the foid to the sync queues */ 1596 zil_async_to_sync(zilog, foid); 1597 1598 mutex_enter(&zilog->zl_lock); 1599 mybatch = zilog->zl_next_batch; 1600 while (zilog->zl_writer) { 1601 cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock); 1602 if (mybatch <= zilog->zl_com_batch) { 1603 mutex_exit(&zilog->zl_lock); 1604 return; 1605 } 1606 } 1607 1608 zilog->zl_next_batch++; 1609 zilog->zl_writer = B_TRUE; 1610 zil_commit_writer(zilog); 1611 zilog->zl_com_batch = mybatch; 1612 zilog->zl_writer = B_FALSE; 1613 mutex_exit(&zilog->zl_lock); 1614 1615 /* wake up one thread to become the next writer */ 1616 cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]); 1617 1618 /* wake up all threads waiting for this batch to be committed */ 1619 cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]); 1620 } 1621 1622 /* 1623 * Called in syncing context to free committed log blocks and update log header. 1624 */ 1625 void 1626 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1627 { 1628 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1629 uint64_t txg = dmu_tx_get_txg(tx); 1630 spa_t *spa = zilog->zl_spa; 1631 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 1632 lwb_t *lwb; 1633 1634 /* 1635 * We don't zero out zl_destroy_txg, so make sure we don't try 1636 * to destroy it twice. 1637 */ 1638 if (spa_sync_pass(spa) != 1) 1639 return; 1640 1641 mutex_enter(&zilog->zl_lock); 1642 1643 ASSERT(zilog->zl_stop_sync == 0); 1644 1645 if (*replayed_seq != 0) { 1646 ASSERT(zh->zh_replay_seq < *replayed_seq); 1647 zh->zh_replay_seq = *replayed_seq; 1648 *replayed_seq = 0; 1649 } 1650 1651 if (zilog->zl_destroy_txg == txg) { 1652 blkptr_t blk = zh->zh_log; 1653 1654 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1655 1656 bzero(zh, sizeof (zil_header_t)); 1657 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 1658 1659 if (zilog->zl_keep_first) { 1660 /* 1661 * If this block was part of log chain that couldn't 1662 * be claimed because a device was missing during 1663 * zil_claim(), but that device later returns, 1664 * then this block could erroneously appear valid. 1665 * To guard against this, assign a new GUID to the new 1666 * log chain so it doesn't matter what blk points to. 1667 */ 1668 zil_init_log_chain(zilog, &blk); 1669 zh->zh_log = blk; 1670 } 1671 } 1672 1673 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1674 zh->zh_log = lwb->lwb_blk; 1675 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1676 break; 1677 list_remove(&zilog->zl_lwb_list, lwb); 1678 zio_free_zil(spa, txg, &lwb->lwb_blk); 1679 kmem_cache_free(zil_lwb_cache, lwb); 1680 1681 /* 1682 * If we don't have anything left in the lwb list then 1683 * we've had an allocation failure and we need to zero 1684 * out the zil_header blkptr so that we don't end 1685 * up freeing the same block twice. 1686 */ 1687 if (list_head(&zilog->zl_lwb_list) == NULL) 1688 BP_ZERO(&zh->zh_log); 1689 } 1690 mutex_exit(&zilog->zl_lock); 1691 } 1692 1693 void 1694 zil_init(void) 1695 { 1696 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 1697 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1698 } 1699 1700 void 1701 zil_fini(void) 1702 { 1703 kmem_cache_destroy(zil_lwb_cache); 1704 } 1705 1706 void 1707 zil_set_sync(zilog_t *zilog, uint64_t sync) 1708 { 1709 zilog->zl_sync = sync; 1710 } 1711 1712 void 1713 zil_set_logbias(zilog_t *zilog, uint64_t logbias) 1714 { 1715 zilog->zl_logbias = logbias; 1716 } 1717 1718 zilog_t * 1719 zil_alloc(objset_t *os, zil_header_t *zh_phys) 1720 { 1721 zilog_t *zilog; 1722 1723 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1724 1725 zilog->zl_header = zh_phys; 1726 zilog->zl_os = os; 1727 zilog->zl_spa = dmu_objset_spa(os); 1728 zilog->zl_dmu_pool = dmu_objset_pool(os); 1729 zilog->zl_destroy_txg = TXG_INITIAL - 1; 1730 zilog->zl_logbias = dmu_objset_logbias(os); 1731 zilog->zl_sync = dmu_objset_syncprop(os); 1732 zilog->zl_next_batch = 1; 1733 1734 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 1735 1736 for (int i = 0; i < TXG_SIZE; i++) { 1737 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 1738 MUTEX_DEFAULT, NULL); 1739 } 1740 1741 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1742 offsetof(lwb_t, lwb_node)); 1743 1744 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 1745 offsetof(itx_t, itx_node)); 1746 1747 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 1748 1749 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 1750 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1751 1752 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 1753 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 1754 cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL); 1755 cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL); 1756 1757 return (zilog); 1758 } 1759 1760 void 1761 zil_free(zilog_t *zilog) 1762 { 1763 zilog->zl_stop_sync = 1; 1764 1765 ASSERT0(zilog->zl_suspend); 1766 ASSERT0(zilog->zl_suspending); 1767 1768 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1769 list_destroy(&zilog->zl_lwb_list); 1770 1771 avl_destroy(&zilog->zl_vdev_tree); 1772 mutex_destroy(&zilog->zl_vdev_lock); 1773 1774 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 1775 list_destroy(&zilog->zl_itx_commit_list); 1776 1777 for (int i = 0; i < TXG_SIZE; i++) { 1778 /* 1779 * It's possible for an itx to be generated that doesn't dirty 1780 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 1781 * callback to remove the entry. We remove those here. 1782 * 1783 * Also free up the ziltest itxs. 1784 */ 1785 if (zilog->zl_itxg[i].itxg_itxs) 1786 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 1787 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 1788 } 1789 1790 mutex_destroy(&zilog->zl_lock); 1791 1792 cv_destroy(&zilog->zl_cv_writer); 1793 cv_destroy(&zilog->zl_cv_suspend); 1794 cv_destroy(&zilog->zl_cv_batch[0]); 1795 cv_destroy(&zilog->zl_cv_batch[1]); 1796 1797 kmem_free(zilog, sizeof (zilog_t)); 1798 } 1799 1800 /* 1801 * Open an intent log. 1802 */ 1803 zilog_t * 1804 zil_open(objset_t *os, zil_get_data_t *get_data) 1805 { 1806 zilog_t *zilog = dmu_objset_zil(os); 1807 1808 ASSERT(zilog->zl_clean_taskq == NULL); 1809 ASSERT(zilog->zl_get_data == NULL); 1810 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1811 1812 zilog->zl_get_data = get_data; 1813 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1814 2, 2, TASKQ_PREPOPULATE); 1815 1816 return (zilog); 1817 } 1818 1819 /* 1820 * Close an intent log. 1821 */ 1822 void 1823 zil_close(zilog_t *zilog) 1824 { 1825 lwb_t *lwb; 1826 uint64_t txg = 0; 1827 1828 zil_commit(zilog, 0); /* commit all itx */ 1829 1830 /* 1831 * The lwb_max_txg for the stubby lwb will reflect the last activity 1832 * for the zil. After a txg_wait_synced() on the txg we know all the 1833 * callbacks have occurred that may clean the zil. Only then can we 1834 * destroy the zl_clean_taskq. 1835 */ 1836 mutex_enter(&zilog->zl_lock); 1837 lwb = list_tail(&zilog->zl_lwb_list); 1838 if (lwb != NULL) 1839 txg = lwb->lwb_max_txg; 1840 mutex_exit(&zilog->zl_lock); 1841 if (txg) 1842 txg_wait_synced(zilog->zl_dmu_pool, txg); 1843 1844 if (zilog_is_dirty(zilog)) 1845 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg); 1846 VERIFY(!zilog_is_dirty(zilog)); 1847 1848 taskq_destroy(zilog->zl_clean_taskq); 1849 zilog->zl_clean_taskq = NULL; 1850 zilog->zl_get_data = NULL; 1851 1852 /* 1853 * We should have only one LWB left on the list; remove it now. 1854 */ 1855 mutex_enter(&zilog->zl_lock); 1856 lwb = list_head(&zilog->zl_lwb_list); 1857 if (lwb != NULL) { 1858 ASSERT(lwb == list_tail(&zilog->zl_lwb_list)); 1859 list_remove(&zilog->zl_lwb_list, lwb); 1860 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1861 kmem_cache_free(zil_lwb_cache, lwb); 1862 } 1863 mutex_exit(&zilog->zl_lock); 1864 } 1865 1866 static char *suspend_tag = "zil suspending"; 1867 1868 /* 1869 * Suspend an intent log. While in suspended mode, we still honor 1870 * synchronous semantics, but we rely on txg_wait_synced() to do it. 1871 * On old version pools, we suspend the log briefly when taking a 1872 * snapshot so that it will have an empty intent log. 1873 * 1874 * Long holds are not really intended to be used the way we do here -- 1875 * held for such a short time. A concurrent caller of dsl_dataset_long_held() 1876 * could fail. Therefore we take pains to only put a long hold if it is 1877 * actually necessary. Fortunately, it will only be necessary if the 1878 * objset is currently mounted (or the ZVOL equivalent). In that case it 1879 * will already have a long hold, so we are not really making things any worse. 1880 * 1881 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 1882 * zvol_state_t), and use their mechanism to prevent their hold from being 1883 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 1884 * very little gain. 1885 * 1886 * if cookiep == NULL, this does both the suspend & resume. 1887 * Otherwise, it returns with the dataset "long held", and the cookie 1888 * should be passed into zil_resume(). 1889 */ 1890 int 1891 zil_suspend(const char *osname, void **cookiep) 1892 { 1893 objset_t *os; 1894 zilog_t *zilog; 1895 const zil_header_t *zh; 1896 int error; 1897 1898 error = dmu_objset_hold(osname, suspend_tag, &os); 1899 if (error != 0) 1900 return (error); 1901 zilog = dmu_objset_zil(os); 1902 1903 mutex_enter(&zilog->zl_lock); 1904 zh = zilog->zl_header; 1905 1906 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1907 mutex_exit(&zilog->zl_lock); 1908 dmu_objset_rele(os, suspend_tag); 1909 return (SET_ERROR(EBUSY)); 1910 } 1911 1912 /* 1913 * Don't put a long hold in the cases where we can avoid it. This 1914 * is when there is no cookie so we are doing a suspend & resume 1915 * (i.e. called from zil_vdev_offline()), and there's nothing to do 1916 * for the suspend because it's already suspended, or there's no ZIL. 1917 */ 1918 if (cookiep == NULL && !zilog->zl_suspending && 1919 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 1920 mutex_exit(&zilog->zl_lock); 1921 dmu_objset_rele(os, suspend_tag); 1922 return (0); 1923 } 1924 1925 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 1926 dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 1927 1928 zilog->zl_suspend++; 1929 1930 if (zilog->zl_suspend > 1) { 1931 /* 1932 * Someone else is already suspending it. 1933 * Just wait for them to finish. 1934 */ 1935 1936 while (zilog->zl_suspending) 1937 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1938 mutex_exit(&zilog->zl_lock); 1939 1940 if (cookiep == NULL) 1941 zil_resume(os); 1942 else 1943 *cookiep = os; 1944 return (0); 1945 } 1946 1947 /* 1948 * If there is no pointer to an on-disk block, this ZIL must not 1949 * be active (e.g. filesystem not mounted), so there's nothing 1950 * to clean up. 1951 */ 1952 if (BP_IS_HOLE(&zh->zh_log)) { 1953 ASSERT(cookiep != NULL); /* fast path already handled */ 1954 1955 *cookiep = os; 1956 mutex_exit(&zilog->zl_lock); 1957 return (0); 1958 } 1959 1960 zilog->zl_suspending = B_TRUE; 1961 mutex_exit(&zilog->zl_lock); 1962 1963 zil_commit(zilog, 0); 1964 1965 zil_destroy(zilog, B_FALSE); 1966 1967 mutex_enter(&zilog->zl_lock); 1968 zilog->zl_suspending = B_FALSE; 1969 cv_broadcast(&zilog->zl_cv_suspend); 1970 mutex_exit(&zilog->zl_lock); 1971 1972 if (cookiep == NULL) 1973 zil_resume(os); 1974 else 1975 *cookiep = os; 1976 return (0); 1977 } 1978 1979 void 1980 zil_resume(void *cookie) 1981 { 1982 objset_t *os = cookie; 1983 zilog_t *zilog = dmu_objset_zil(os); 1984 1985 mutex_enter(&zilog->zl_lock); 1986 ASSERT(zilog->zl_suspend != 0); 1987 zilog->zl_suspend--; 1988 mutex_exit(&zilog->zl_lock); 1989 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 1990 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 1991 } 1992 1993 typedef struct zil_replay_arg { 1994 zil_replay_func_t **zr_replay; 1995 void *zr_arg; 1996 boolean_t zr_byteswap; 1997 char *zr_lr; 1998 } zil_replay_arg_t; 1999 2000 static int 2001 zil_replay_error(zilog_t *zilog, lr_t *lr, int error) 2002 { 2003 char name[ZFS_MAX_DATASET_NAME_LEN]; 2004 2005 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 2006 2007 dmu_objset_name(zilog->zl_os, name); 2008 2009 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 2010 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 2011 (u_longlong_t)lr->lrc_seq, 2012 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 2013 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 2014 2015 return (error); 2016 } 2017 2018 static int 2019 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 2020 { 2021 zil_replay_arg_t *zr = zra; 2022 const zil_header_t *zh = zilog->zl_header; 2023 uint64_t reclen = lr->lrc_reclen; 2024 uint64_t txtype = lr->lrc_txtype; 2025 int error = 0; 2026 2027 zilog->zl_replaying_seq = lr->lrc_seq; 2028 2029 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 2030 return (0); 2031 2032 if (lr->lrc_txg < claim_txg) /* already committed */ 2033 return (0); 2034 2035 /* Strip case-insensitive bit, still present in log record */ 2036 txtype &= ~TX_CI; 2037 2038 if (txtype == 0 || txtype >= TX_MAX_TYPE) 2039 return (zil_replay_error(zilog, lr, EINVAL)); 2040 2041 /* 2042 * If this record type can be logged out of order, the object 2043 * (lr_foid) may no longer exist. That's legitimate, not an error. 2044 */ 2045 if (TX_OOO(txtype)) { 2046 error = dmu_object_info(zilog->zl_os, 2047 ((lr_ooo_t *)lr)->lr_foid, NULL); 2048 if (error == ENOENT || error == EEXIST) 2049 return (0); 2050 } 2051 2052 /* 2053 * Make a copy of the data so we can revise and extend it. 2054 */ 2055 bcopy(lr, zr->zr_lr, reclen); 2056 2057 /* 2058 * If this is a TX_WRITE with a blkptr, suck in the data. 2059 */ 2060 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 2061 error = zil_read_log_data(zilog, (lr_write_t *)lr, 2062 zr->zr_lr + reclen); 2063 if (error != 0) 2064 return (zil_replay_error(zilog, lr, error)); 2065 } 2066 2067 /* 2068 * The log block containing this lr may have been byteswapped 2069 * so that we can easily examine common fields like lrc_txtype. 2070 * However, the log is a mix of different record types, and only the 2071 * replay vectors know how to byteswap their records. Therefore, if 2072 * the lr was byteswapped, undo it before invoking the replay vector. 2073 */ 2074 if (zr->zr_byteswap) 2075 byteswap_uint64_array(zr->zr_lr, reclen); 2076 2077 /* 2078 * We must now do two things atomically: replay this log record, 2079 * and update the log header sequence number to reflect the fact that 2080 * we did so. At the end of each replay function the sequence number 2081 * is updated if we are in replay mode. 2082 */ 2083 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 2084 if (error != 0) { 2085 /* 2086 * The DMU's dnode layer doesn't see removes until the txg 2087 * commits, so a subsequent claim can spuriously fail with 2088 * EEXIST. So if we receive any error we try syncing out 2089 * any removes then retry the transaction. Note that we 2090 * specify B_FALSE for byteswap now, so we don't do it twice. 2091 */ 2092 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 2093 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 2094 if (error != 0) 2095 return (zil_replay_error(zilog, lr, error)); 2096 } 2097 return (0); 2098 } 2099 2100 /* ARGSUSED */ 2101 static int 2102 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 2103 { 2104 zilog->zl_replay_blks++; 2105 2106 return (0); 2107 } 2108 2109 /* 2110 * If this dataset has a non-empty intent log, replay it and destroy it. 2111 */ 2112 void 2113 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 2114 { 2115 zilog_t *zilog = dmu_objset_zil(os); 2116 const zil_header_t *zh = zilog->zl_header; 2117 zil_replay_arg_t zr; 2118 2119 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 2120 zil_destroy(zilog, B_TRUE); 2121 return; 2122 } 2123 2124 zr.zr_replay = replay_func; 2125 zr.zr_arg = arg; 2126 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 2127 zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 2128 2129 /* 2130 * Wait for in-progress removes to sync before starting replay. 2131 */ 2132 txg_wait_synced(zilog->zl_dmu_pool, 0); 2133 2134 zilog->zl_replay = B_TRUE; 2135 zilog->zl_replay_time = ddi_get_lbolt(); 2136 ASSERT(zilog->zl_replay_blks == 0); 2137 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 2138 zh->zh_claim_txg); 2139 kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 2140 2141 zil_destroy(zilog, B_FALSE); 2142 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 2143 zilog->zl_replay = B_FALSE; 2144 } 2145 2146 boolean_t 2147 zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 2148 { 2149 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 2150 return (B_TRUE); 2151 2152 if (zilog->zl_replay) { 2153 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 2154 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 2155 zilog->zl_replaying_seq; 2156 return (B_TRUE); 2157 } 2158 2159 return (B_FALSE); 2160 } 2161 2162 /* ARGSUSED */ 2163 int 2164 zil_vdev_offline(const char *osname, void *arg) 2165 { 2166 int error; 2167 2168 error = zil_suspend(osname, NULL); 2169 if (error != 0) 2170 return (SET_ERROR(EEXIST)); 2171 return (0); 2172 } 2173