1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Integros [integros.com] 25 * Copyright (c) 2018 Datto Inc. 26 */ 27 28 /* Portions Copyright 2010 Robert Milkowski */ 29 30 #include <sys/zfs_context.h> 31 #include <sys/spa.h> 32 #include <sys/spa_impl.h> 33 #include <sys/dmu.h> 34 #include <sys/zap.h> 35 #include <sys/arc.h> 36 #include <sys/stat.h> 37 #include <sys/zil.h> 38 #include <sys/zil_impl.h> 39 #include <sys/dsl_dataset.h> 40 #include <sys/vdev_impl.h> 41 #include <sys/dmu_tx.h> 42 #include <sys/dsl_pool.h> 43 #include <sys/metaslab.h> 44 #include <sys/trace_zfs.h> 45 #include <sys/abd.h> 46 47 /* 48 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system 49 * calls that change the file system. Each itx has enough information to 50 * be able to replay them after a system crash, power loss, or 51 * equivalent failure mode. These are stored in memory until either: 52 * 53 * 1. they are committed to the pool by the DMU transaction group 54 * (txg), at which point they can be discarded; or 55 * 2. they are committed to the on-disk ZIL for the dataset being 56 * modified (e.g. due to an fsync, O_DSYNC, or other synchronous 57 * requirement). 58 * 59 * In the event of a crash or power loss, the itxs contained by each 60 * dataset's on-disk ZIL will be replayed when that dataset is first 61 * instantiated (e.g. if the dataset is a normal filesystem, when it is 62 * first mounted). 63 * 64 * As hinted at above, there is one ZIL per dataset (both the in-memory 65 * representation, and the on-disk representation). The on-disk format 66 * consists of 3 parts: 67 * 68 * - a single, per-dataset, ZIL header; which points to a chain of 69 * - zero or more ZIL blocks; each of which contains 70 * - zero or more ZIL records 71 * 72 * A ZIL record holds the information necessary to replay a single 73 * system call transaction. A ZIL block can hold many ZIL records, and 74 * the blocks are chained together, similarly to a singly linked list. 75 * 76 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL 77 * block in the chain, and the ZIL header points to the first block in 78 * the chain. 79 * 80 * Note, there is not a fixed place in the pool to hold these ZIL 81 * blocks; they are dynamically allocated and freed as needed from the 82 * blocks available on the pool, though they can be preferentially 83 * allocated from a dedicated "log" vdev. 84 */ 85 86 /* 87 * This controls the amount of time that a ZIL block (lwb) will remain 88 * "open" when it isn't "full", and it has a thread waiting for it to be 89 * committed to stable storage. Please refer to the zil_commit_waiter() 90 * function (and the comments within it) for more details. 91 */ 92 int zfs_commit_timeout_pct = 5; 93 94 /* 95 * See zil.h for more information about these fields. 96 */ 97 zil_stats_t zil_stats = { 98 { "zil_commit_count", KSTAT_DATA_UINT64 }, 99 { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, 100 { "zil_itx_count", KSTAT_DATA_UINT64 }, 101 { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, 102 { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, 103 { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, 104 { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, 105 { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, 106 { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, 107 { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, 108 { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, 109 { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, 110 { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, 111 }; 112 113 static kstat_t *zil_ksp; 114 115 /* 116 * Disable intent logging replay. This global ZIL switch affects all pools. 117 */ 118 int zil_replay_disable = 0; 119 120 /* 121 * Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to 122 * the disk(s) by the ZIL after an LWB write has completed. Setting this 123 * will cause ZIL corruption on power loss if a volatile out-of-order 124 * write cache is enabled. 125 */ 126 int zil_nocacheflush = 0; 127 128 /* 129 * Limit SLOG write size per commit executed with synchronous priority. 130 * Any writes above that will be executed with lower (asynchronous) priority 131 * to limit potential SLOG device abuse by single active ZIL writer. 132 */ 133 unsigned long zil_slog_bulk = 768 * 1024; 134 135 static kmem_cache_t *zil_lwb_cache; 136 static kmem_cache_t *zil_zcw_cache; 137 138 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 139 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 140 141 static int 142 zil_bp_compare(const void *x1, const void *x2) 143 { 144 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 145 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 146 147 int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2)); 148 if (likely(cmp)) 149 return (cmp); 150 151 return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2))); 152 } 153 154 static void 155 zil_bp_tree_init(zilog_t *zilog) 156 { 157 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 158 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 159 } 160 161 static void 162 zil_bp_tree_fini(zilog_t *zilog) 163 { 164 avl_tree_t *t = &zilog->zl_bp_tree; 165 zil_bp_node_t *zn; 166 void *cookie = NULL; 167 168 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 169 kmem_free(zn, sizeof (zil_bp_node_t)); 170 171 avl_destroy(t); 172 } 173 174 int 175 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 176 { 177 avl_tree_t *t = &zilog->zl_bp_tree; 178 const dva_t *dva; 179 zil_bp_node_t *zn; 180 avl_index_t where; 181 182 if (BP_IS_EMBEDDED(bp)) 183 return (0); 184 185 dva = BP_IDENTITY(bp); 186 187 if (avl_find(t, dva, &where) != NULL) 188 return (SET_ERROR(EEXIST)); 189 190 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 191 zn->zn_dva = *dva; 192 avl_insert(t, zn, where); 193 194 return (0); 195 } 196 197 static zil_header_t * 198 zil_header_in_syncing_context(zilog_t *zilog) 199 { 200 return ((zil_header_t *)zilog->zl_header); 201 } 202 203 static void 204 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 205 { 206 zio_cksum_t *zc = &bp->blk_cksum; 207 208 (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0], 209 sizeof (zc->zc_word[ZIL_ZC_GUID_0])); 210 (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1], 211 sizeof (zc->zc_word[ZIL_ZC_GUID_1])); 212 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 213 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 214 } 215 216 /* 217 * Read a log block and make sure it's valid. 218 */ 219 static int 220 zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp, 221 blkptr_t *nbp, void *dst, char **end) 222 { 223 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 224 arc_flags_t aflags = ARC_FLAG_WAIT; 225 arc_buf_t *abuf = NULL; 226 zbookmark_phys_t zb; 227 int error; 228 229 if (zilog->zl_header->zh_claim_txg == 0) 230 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 231 232 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 233 zio_flags |= ZIO_FLAG_SPECULATIVE; 234 235 if (!decrypt) 236 zio_flags |= ZIO_FLAG_RAW; 237 238 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 239 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 240 241 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, 242 &abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 243 244 if (error == 0) { 245 zio_cksum_t cksum = bp->blk_cksum; 246 247 /* 248 * Validate the checksummed log block. 249 * 250 * Sequence numbers should be... sequential. The checksum 251 * verifier for the next block should be bp's checksum plus 1. 252 * 253 * Also check the log chain linkage and size used. 254 */ 255 cksum.zc_word[ZIL_ZC_SEQ]++; 256 257 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 258 zil_chain_t *zilc = abuf->b_data; 259 char *lr = (char *)(zilc + 1); 260 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 261 262 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 263 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 264 error = SET_ERROR(ECKSUM); 265 } else { 266 ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); 267 bcopy(lr, dst, len); 268 *end = (char *)dst + len; 269 *nbp = zilc->zc_next_blk; 270 } 271 } else { 272 char *lr = abuf->b_data; 273 uint64_t size = BP_GET_LSIZE(bp); 274 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 275 276 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 277 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 278 (zilc->zc_nused > (size - sizeof (*zilc)))) { 279 error = SET_ERROR(ECKSUM); 280 } else { 281 ASSERT3U(zilc->zc_nused, <=, 282 SPA_OLD_MAXBLOCKSIZE); 283 bcopy(lr, dst, zilc->zc_nused); 284 *end = (char *)dst + zilc->zc_nused; 285 *nbp = zilc->zc_next_blk; 286 } 287 } 288 289 arc_buf_destroy(abuf, &abuf); 290 } 291 292 return (error); 293 } 294 295 /* 296 * Read a TX_WRITE log data block. 297 */ 298 static int 299 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 300 { 301 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 302 const blkptr_t *bp = &lr->lr_blkptr; 303 arc_flags_t aflags = ARC_FLAG_WAIT; 304 arc_buf_t *abuf = NULL; 305 zbookmark_phys_t zb; 306 int error; 307 308 if (BP_IS_HOLE(bp)) { 309 if (wbuf != NULL) 310 bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 311 return (0); 312 } 313 314 if (zilog->zl_header->zh_claim_txg == 0) 315 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 316 317 /* 318 * If we are not using the resulting data, we are just checking that 319 * it hasn't been corrupted so we don't need to waste CPU time 320 * decompressing and decrypting it. 321 */ 322 if (wbuf == NULL) 323 zio_flags |= ZIO_FLAG_RAW; 324 325 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 326 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 327 328 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 329 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 330 331 if (error == 0) { 332 if (wbuf != NULL) 333 bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 334 arc_buf_destroy(abuf, &abuf); 335 } 336 337 return (error); 338 } 339 340 /* 341 * Parse the intent log, and call parse_func for each valid record within. 342 */ 343 int 344 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 345 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, 346 boolean_t decrypt) 347 { 348 const zil_header_t *zh = zilog->zl_header; 349 boolean_t claimed = !!zh->zh_claim_txg; 350 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 351 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 352 uint64_t max_blk_seq = 0; 353 uint64_t max_lr_seq = 0; 354 uint64_t blk_count = 0; 355 uint64_t lr_count = 0; 356 blkptr_t blk, next_blk; 357 char *lrbuf, *lrp; 358 int error = 0; 359 360 bzero(&next_blk, sizeof (blkptr_t)); 361 362 /* 363 * Old logs didn't record the maximum zh_claim_lr_seq. 364 */ 365 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 366 claim_lr_seq = UINT64_MAX; 367 368 /* 369 * Starting at the block pointed to by zh_log we read the log chain. 370 * For each block in the chain we strongly check that block to 371 * ensure its validity. We stop when an invalid block is found. 372 * For each block pointer in the chain we call parse_blk_func(). 373 * For each record in each valid block we call parse_lr_func(). 374 * If the log has been claimed, stop if we encounter a sequence 375 * number greater than the highest claimed sequence number. 376 */ 377 lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); 378 zil_bp_tree_init(zilog); 379 380 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 381 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 382 int reclen; 383 char *end = NULL; 384 385 if (blk_seq > claim_blk_seq) 386 break; 387 388 error = parse_blk_func(zilog, &blk, arg, txg); 389 if (error != 0) 390 break; 391 ASSERT3U(max_blk_seq, <, blk_seq); 392 max_blk_seq = blk_seq; 393 blk_count++; 394 395 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 396 break; 397 398 error = zil_read_log_block(zilog, decrypt, &blk, &next_blk, 399 lrbuf, &end); 400 if (error != 0) 401 break; 402 403 for (lrp = lrbuf; lrp < end; lrp += reclen) { 404 lr_t *lr = (lr_t *)lrp; 405 reclen = lr->lrc_reclen; 406 ASSERT3U(reclen, >=, sizeof (lr_t)); 407 if (lr->lrc_seq > claim_lr_seq) 408 goto done; 409 410 error = parse_lr_func(zilog, lr, arg, txg); 411 if (error != 0) 412 goto done; 413 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 414 max_lr_seq = lr->lrc_seq; 415 lr_count++; 416 } 417 } 418 done: 419 zilog->zl_parse_error = error; 420 zilog->zl_parse_blk_seq = max_blk_seq; 421 zilog->zl_parse_lr_seq = max_lr_seq; 422 zilog->zl_parse_blk_count = blk_count; 423 zilog->zl_parse_lr_count = lr_count; 424 425 ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 426 (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq) || 427 (decrypt && error == EIO)); 428 429 zil_bp_tree_fini(zilog); 430 zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); 431 432 return (error); 433 } 434 435 /* ARGSUSED */ 436 static int 437 zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 438 uint64_t first_txg) 439 { 440 ASSERT(!BP_IS_HOLE(bp)); 441 442 /* 443 * As we call this function from the context of a rewind to a 444 * checkpoint, each ZIL block whose txg is later than the txg 445 * that we rewind to is invalid. Thus, we return -1 so 446 * zil_parse() doesn't attempt to read it. 447 */ 448 if (bp->blk_birth >= first_txg) 449 return (-1); 450 451 if (zil_bp_tree_add(zilog, bp) != 0) 452 return (0); 453 454 zio_free(zilog->zl_spa, first_txg, bp); 455 return (0); 456 } 457 458 /* ARGSUSED */ 459 static int 460 zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 461 uint64_t first_txg) 462 { 463 return (0); 464 } 465 466 static int 467 zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 468 uint64_t first_txg) 469 { 470 /* 471 * Claim log block if not already committed and not already claimed. 472 * If tx == NULL, just verify that the block is claimable. 473 */ 474 if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || 475 zil_bp_tree_add(zilog, bp) != 0) 476 return (0); 477 478 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 479 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 480 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 481 } 482 483 static int 484 zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 485 uint64_t first_txg) 486 { 487 lr_write_t *lr = (lr_write_t *)lrc; 488 int error; 489 490 if (lrc->lrc_txtype != TX_WRITE) 491 return (0); 492 493 /* 494 * If the block is not readable, don't claim it. This can happen 495 * in normal operation when a log block is written to disk before 496 * some of the dmu_sync() blocks it points to. In this case, the 497 * transaction cannot have been committed to anyone (we would have 498 * waited for all writes to be stable first), so it is semantically 499 * correct to declare this the end of the log. 500 */ 501 if (lr->lr_blkptr.blk_birth >= first_txg) { 502 error = zil_read_log_data(zilog, lr, NULL); 503 if (error != 0) 504 return (error); 505 } 506 507 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 508 } 509 510 /* ARGSUSED */ 511 static int 512 zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 513 uint64_t claim_txg) 514 { 515 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 516 517 return (0); 518 } 519 520 static int 521 zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 522 uint64_t claim_txg) 523 { 524 lr_write_t *lr = (lr_write_t *)lrc; 525 blkptr_t *bp = &lr->lr_blkptr; 526 527 /* 528 * If we previously claimed it, we need to free it. 529 */ 530 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 531 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && 532 !BP_IS_HOLE(bp)) 533 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 534 535 return (0); 536 } 537 538 static int 539 zil_lwb_vdev_compare(const void *x1, const void *x2) 540 { 541 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 542 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 543 544 return (TREE_CMP(v1, v2)); 545 } 546 547 static lwb_t * 548 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg, 549 boolean_t fastwrite) 550 { 551 lwb_t *lwb; 552 553 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 554 lwb->lwb_zilog = zilog; 555 lwb->lwb_blk = *bp; 556 lwb->lwb_fastwrite = fastwrite; 557 lwb->lwb_slog = slog; 558 lwb->lwb_state = LWB_STATE_CLOSED; 559 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 560 lwb->lwb_max_txg = txg; 561 lwb->lwb_write_zio = NULL; 562 lwb->lwb_root_zio = NULL; 563 lwb->lwb_tx = NULL; 564 lwb->lwb_issued_timestamp = 0; 565 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 566 lwb->lwb_nused = sizeof (zil_chain_t); 567 lwb->lwb_sz = BP_GET_LSIZE(bp); 568 } else { 569 lwb->lwb_nused = 0; 570 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 571 } 572 573 mutex_enter(&zilog->zl_lock); 574 list_insert_tail(&zilog->zl_lwb_list, lwb); 575 mutex_exit(&zilog->zl_lock); 576 577 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 578 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 579 VERIFY(list_is_empty(&lwb->lwb_waiters)); 580 VERIFY(list_is_empty(&lwb->lwb_itxs)); 581 582 return (lwb); 583 } 584 585 static void 586 zil_free_lwb(zilog_t *zilog, lwb_t *lwb) 587 { 588 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 589 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 590 VERIFY(list_is_empty(&lwb->lwb_waiters)); 591 VERIFY(list_is_empty(&lwb->lwb_itxs)); 592 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 593 ASSERT3P(lwb->lwb_write_zio, ==, NULL); 594 ASSERT3P(lwb->lwb_root_zio, ==, NULL); 595 ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); 596 ASSERT(lwb->lwb_state == LWB_STATE_CLOSED || 597 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 598 599 /* 600 * Clear the zilog's field to indicate this lwb is no longer 601 * valid, and prevent use-after-free errors. 602 */ 603 if (zilog->zl_last_lwb_opened == lwb) 604 zilog->zl_last_lwb_opened = NULL; 605 606 kmem_cache_free(zil_lwb_cache, lwb); 607 } 608 609 /* 610 * Called when we create in-memory log transactions so that we know 611 * to cleanup the itxs at the end of spa_sync(). 612 */ 613 static void 614 zilog_dirty(zilog_t *zilog, uint64_t txg) 615 { 616 dsl_pool_t *dp = zilog->zl_dmu_pool; 617 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 618 619 ASSERT(spa_writeable(zilog->zl_spa)); 620 621 if (ds->ds_is_snapshot) 622 panic("dirtying snapshot!"); 623 624 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 625 /* up the hold count until we can be written out */ 626 dmu_buf_add_ref(ds->ds_dbuf, zilog); 627 628 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); 629 } 630 } 631 632 /* 633 * Determine if the zil is dirty in the specified txg. Callers wanting to 634 * ensure that the dirty state does not change must hold the itxg_lock for 635 * the specified txg. Holding the lock will ensure that the zil cannot be 636 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current 637 * state. 638 */ 639 static boolean_t __maybe_unused 640 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) 641 { 642 dsl_pool_t *dp = zilog->zl_dmu_pool; 643 644 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) 645 return (B_TRUE); 646 return (B_FALSE); 647 } 648 649 /* 650 * Determine if the zil is dirty. The zil is considered dirty if it has 651 * any pending itx records that have not been cleaned by zil_clean(). 652 */ 653 static boolean_t 654 zilog_is_dirty(zilog_t *zilog) 655 { 656 dsl_pool_t *dp = zilog->zl_dmu_pool; 657 658 for (int t = 0; t < TXG_SIZE; t++) { 659 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 660 return (B_TRUE); 661 } 662 return (B_FALSE); 663 } 664 665 /* 666 * Create an on-disk intent log. 667 */ 668 static lwb_t * 669 zil_create(zilog_t *zilog) 670 { 671 const zil_header_t *zh = zilog->zl_header; 672 lwb_t *lwb = NULL; 673 uint64_t txg = 0; 674 dmu_tx_t *tx = NULL; 675 blkptr_t blk; 676 int error = 0; 677 boolean_t fastwrite = FALSE; 678 boolean_t slog = FALSE; 679 680 /* 681 * Wait for any previous destroy to complete. 682 */ 683 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 684 685 ASSERT(zh->zh_claim_txg == 0); 686 ASSERT(zh->zh_replay_seq == 0); 687 688 blk = zh->zh_log; 689 690 /* 691 * Allocate an initial log block if: 692 * - there isn't one already 693 * - the existing block is the wrong endianness 694 */ 695 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 696 tx = dmu_tx_create(zilog->zl_os); 697 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 698 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 699 txg = dmu_tx_get_txg(tx); 700 701 if (!BP_IS_HOLE(&blk)) { 702 zio_free(zilog->zl_spa, txg, &blk); 703 BP_ZERO(&blk); 704 } 705 706 error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk, 707 ZIL_MIN_BLKSZ, &slog); 708 fastwrite = TRUE; 709 710 if (error == 0) 711 zil_init_log_chain(zilog, &blk); 712 } 713 714 /* 715 * Allocate a log write block (lwb) for the first log block. 716 */ 717 if (error == 0) 718 lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite); 719 720 /* 721 * If we just allocated the first log block, commit our transaction 722 * and wait for zil_sync() to stuff the block pointer into zh_log. 723 * (zh is part of the MOS, so we cannot modify it in open context.) 724 */ 725 if (tx != NULL) { 726 dmu_tx_commit(tx); 727 txg_wait_synced(zilog->zl_dmu_pool, txg); 728 } 729 730 ASSERT(error != 0 || bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 731 IMPLY(error == 0, lwb != NULL); 732 733 return (lwb); 734 } 735 736 /* 737 * In one tx, free all log blocks and clear the log header. If keep_first 738 * is set, then we're replaying a log with no content. We want to keep the 739 * first block, however, so that the first synchronous transaction doesn't 740 * require a txg_wait_synced() in zil_create(). We don't need to 741 * txg_wait_synced() here either when keep_first is set, because both 742 * zil_create() and zil_destroy() will wait for any in-progress destroys 743 * to complete. 744 */ 745 void 746 zil_destroy(zilog_t *zilog, boolean_t keep_first) 747 { 748 const zil_header_t *zh = zilog->zl_header; 749 lwb_t *lwb; 750 dmu_tx_t *tx; 751 uint64_t txg; 752 753 /* 754 * Wait for any previous destroy to complete. 755 */ 756 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 757 758 zilog->zl_old_header = *zh; /* debugging aid */ 759 760 if (BP_IS_HOLE(&zh->zh_log)) 761 return; 762 763 tx = dmu_tx_create(zilog->zl_os); 764 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 765 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 766 txg = dmu_tx_get_txg(tx); 767 768 mutex_enter(&zilog->zl_lock); 769 770 ASSERT3U(zilog->zl_destroy_txg, <, txg); 771 zilog->zl_destroy_txg = txg; 772 zilog->zl_keep_first = keep_first; 773 774 if (!list_is_empty(&zilog->zl_lwb_list)) { 775 ASSERT(zh->zh_claim_txg == 0); 776 VERIFY(!keep_first); 777 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 778 if (lwb->lwb_fastwrite) 779 metaslab_fastwrite_unmark(zilog->zl_spa, 780 &lwb->lwb_blk); 781 782 list_remove(&zilog->zl_lwb_list, lwb); 783 if (lwb->lwb_buf != NULL) 784 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 785 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); 786 zil_free_lwb(zilog, lwb); 787 } 788 } else if (!keep_first) { 789 zil_destroy_sync(zilog, tx); 790 } 791 mutex_exit(&zilog->zl_lock); 792 793 dmu_tx_commit(tx); 794 } 795 796 void 797 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 798 { 799 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 800 (void) zil_parse(zilog, zil_free_log_block, 801 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE); 802 } 803 804 int 805 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 806 { 807 dmu_tx_t *tx = txarg; 808 zilog_t *zilog; 809 uint64_t first_txg; 810 zil_header_t *zh; 811 objset_t *os; 812 int error; 813 814 error = dmu_objset_own_obj(dp, ds->ds_object, 815 DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os); 816 if (error != 0) { 817 /* 818 * EBUSY indicates that the objset is inconsistent, in which 819 * case it can not have a ZIL. 820 */ 821 if (error != EBUSY) { 822 cmn_err(CE_WARN, "can't open objset for %llu, error %u", 823 (unsigned long long)ds->ds_object, error); 824 } 825 826 return (0); 827 } 828 829 zilog = dmu_objset_zil(os); 830 zh = zil_header_in_syncing_context(zilog); 831 ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); 832 first_txg = spa_min_claim_txg(zilog->zl_spa); 833 834 /* 835 * If the spa_log_state is not set to be cleared, check whether 836 * the current uberblock is a checkpoint one and if the current 837 * header has been claimed before moving on. 838 * 839 * If the current uberblock is a checkpointed uberblock then 840 * one of the following scenarios took place: 841 * 842 * 1] We are currently rewinding to the checkpoint of the pool. 843 * 2] We crashed in the middle of a checkpoint rewind but we 844 * did manage to write the checkpointed uberblock to the 845 * vdev labels, so when we tried to import the pool again 846 * the checkpointed uberblock was selected from the import 847 * procedure. 848 * 849 * In both cases we want to zero out all the ZIL blocks, except 850 * the ones that have been claimed at the time of the checkpoint 851 * (their zh_claim_txg != 0). The reason is that these blocks 852 * may be corrupted since we may have reused their locations on 853 * disk after we took the checkpoint. 854 * 855 * We could try to set spa_log_state to SPA_LOG_CLEAR earlier 856 * when we first figure out whether the current uberblock is 857 * checkpointed or not. Unfortunately, that would discard all 858 * the logs, including the ones that are claimed, and we would 859 * leak space. 860 */ 861 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || 862 (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 863 zh->zh_claim_txg == 0)) { 864 if (!BP_IS_HOLE(&zh->zh_log)) { 865 (void) zil_parse(zilog, zil_clear_log_block, 866 zil_noop_log_record, tx, first_txg, B_FALSE); 867 } 868 BP_ZERO(&zh->zh_log); 869 if (os->os_encrypted) 870 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; 871 dsl_dataset_dirty(dmu_objset_ds(os), tx); 872 dmu_objset_disown(os, B_FALSE, FTAG); 873 return (0); 874 } 875 876 /* 877 * If we are not rewinding and opening the pool normally, then 878 * the min_claim_txg should be equal to the first txg of the pool. 879 */ 880 ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); 881 882 /* 883 * Claim all log blocks if we haven't already done so, and remember 884 * the highest claimed sequence number. This ensures that if we can 885 * read only part of the log now (e.g. due to a missing device), 886 * but we can read the entire log later, we will not try to replay 887 * or destroy beyond the last block we successfully claimed. 888 */ 889 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 890 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 891 (void) zil_parse(zilog, zil_claim_log_block, 892 zil_claim_log_record, tx, first_txg, B_FALSE); 893 zh->zh_claim_txg = first_txg; 894 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 895 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 896 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 897 zh->zh_flags |= ZIL_REPLAY_NEEDED; 898 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 899 if (os->os_encrypted) 900 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; 901 dsl_dataset_dirty(dmu_objset_ds(os), tx); 902 } 903 904 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 905 dmu_objset_disown(os, B_FALSE, FTAG); 906 return (0); 907 } 908 909 /* 910 * Check the log by walking the log chain. 911 * Checksum errors are ok as they indicate the end of the chain. 912 * Any other error (no device or read failure) returns an error. 913 */ 914 /* ARGSUSED */ 915 int 916 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 917 { 918 zilog_t *zilog; 919 objset_t *os; 920 blkptr_t *bp; 921 int error; 922 923 ASSERT(tx == NULL); 924 925 error = dmu_objset_from_ds(ds, &os); 926 if (error != 0) { 927 cmn_err(CE_WARN, "can't open objset %llu, error %d", 928 (unsigned long long)ds->ds_object, error); 929 return (0); 930 } 931 932 zilog = dmu_objset_zil(os); 933 bp = (blkptr_t *)&zilog->zl_header->zh_log; 934 935 if (!BP_IS_HOLE(bp)) { 936 vdev_t *vd; 937 boolean_t valid = B_TRUE; 938 939 /* 940 * Check the first block and determine if it's on a log device 941 * which may have been removed or faulted prior to loading this 942 * pool. If so, there's no point in checking the rest of the 943 * log as its content should have already been synced to the 944 * pool. 945 */ 946 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 947 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 948 if (vd->vdev_islog && vdev_is_dead(vd)) 949 valid = vdev_log_state_valid(vd); 950 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 951 952 if (!valid) 953 return (0); 954 955 /* 956 * Check whether the current uberblock is checkpointed (e.g. 957 * we are rewinding) and whether the current header has been 958 * claimed or not. If it hasn't then skip verifying it. We 959 * do this because its ZIL blocks may be part of the pool's 960 * state before the rewind, which is no longer valid. 961 */ 962 zil_header_t *zh = zil_header_in_syncing_context(zilog); 963 if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 964 zh->zh_claim_txg == 0) 965 return (0); 966 } 967 968 /* 969 * Because tx == NULL, zil_claim_log_block() will not actually claim 970 * any blocks, but just determine whether it is possible to do so. 971 * In addition to checking the log chain, zil_claim_log_block() 972 * will invoke zio_claim() with a done func of spa_claim_notify(), 973 * which will update spa_max_claim_txg. See spa_load() for details. 974 */ 975 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 976 zilog->zl_header->zh_claim_txg ? -1ULL : 977 spa_min_claim_txg(os->os_spa), B_FALSE); 978 979 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 980 } 981 982 /* 983 * When an itx is "skipped", this function is used to properly mark the 984 * waiter as "done, and signal any thread(s) waiting on it. An itx can 985 * be skipped (and not committed to an lwb) for a variety of reasons, 986 * one of them being that the itx was committed via spa_sync(), prior to 987 * it being committed to an lwb; this can happen if a thread calling 988 * zil_commit() is racing with spa_sync(). 989 */ 990 static void 991 zil_commit_waiter_skip(zil_commit_waiter_t *zcw) 992 { 993 mutex_enter(&zcw->zcw_lock); 994 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 995 zcw->zcw_done = B_TRUE; 996 cv_broadcast(&zcw->zcw_cv); 997 mutex_exit(&zcw->zcw_lock); 998 } 999 1000 /* 1001 * This function is used when the given waiter is to be linked into an 1002 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. 1003 * At this point, the waiter will no longer be referenced by the itx, 1004 * and instead, will be referenced by the lwb. 1005 */ 1006 static void 1007 zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) 1008 { 1009 /* 1010 * The lwb_waiters field of the lwb is protected by the zilog's 1011 * zl_lock, thus it must be held when calling this function. 1012 */ 1013 ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); 1014 1015 mutex_enter(&zcw->zcw_lock); 1016 ASSERT(!list_link_active(&zcw->zcw_node)); 1017 ASSERT3P(zcw->zcw_lwb, ==, NULL); 1018 ASSERT3P(lwb, !=, NULL); 1019 ASSERT(lwb->lwb_state == LWB_STATE_OPENED || 1020 lwb->lwb_state == LWB_STATE_ISSUED || 1021 lwb->lwb_state == LWB_STATE_WRITE_DONE); 1022 1023 list_insert_tail(&lwb->lwb_waiters, zcw); 1024 zcw->zcw_lwb = lwb; 1025 mutex_exit(&zcw->zcw_lock); 1026 } 1027 1028 /* 1029 * This function is used when zio_alloc_zil() fails to allocate a ZIL 1030 * block, and the given waiter must be linked to the "nolwb waiters" 1031 * list inside of zil_process_commit_list(). 1032 */ 1033 static void 1034 zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) 1035 { 1036 mutex_enter(&zcw->zcw_lock); 1037 ASSERT(!list_link_active(&zcw->zcw_node)); 1038 ASSERT3P(zcw->zcw_lwb, ==, NULL); 1039 list_insert_tail(nolwb, zcw); 1040 mutex_exit(&zcw->zcw_lock); 1041 } 1042 1043 void 1044 zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) 1045 { 1046 avl_tree_t *t = &lwb->lwb_vdev_tree; 1047 avl_index_t where; 1048 zil_vdev_node_t *zv, zvsearch; 1049 int ndvas = BP_GET_NDVAS(bp); 1050 int i; 1051 1052 if (zil_nocacheflush) 1053 return; 1054 1055 mutex_enter(&lwb->lwb_vdev_lock); 1056 for (i = 0; i < ndvas; i++) { 1057 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 1058 if (avl_find(t, &zvsearch, &where) == NULL) { 1059 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 1060 zv->zv_vdev = zvsearch.zv_vdev; 1061 avl_insert(t, zv, where); 1062 } 1063 } 1064 mutex_exit(&lwb->lwb_vdev_lock); 1065 } 1066 1067 static void 1068 zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb) 1069 { 1070 avl_tree_t *src = &lwb->lwb_vdev_tree; 1071 avl_tree_t *dst = &nlwb->lwb_vdev_tree; 1072 void *cookie = NULL; 1073 zil_vdev_node_t *zv; 1074 1075 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1076 ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 1077 ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 1078 1079 /* 1080 * While 'lwb' is at a point in its lifetime where lwb_vdev_tree does 1081 * not need the protection of lwb_vdev_lock (it will only be modified 1082 * while holding zilog->zl_lock) as its writes and those of its 1083 * children have all completed. The younger 'nlwb' may be waiting on 1084 * future writes to additional vdevs. 1085 */ 1086 mutex_enter(&nlwb->lwb_vdev_lock); 1087 /* 1088 * Tear down the 'lwb' vdev tree, ensuring that entries which do not 1089 * exist in 'nlwb' are moved to it, freeing any would-be duplicates. 1090 */ 1091 while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) { 1092 avl_index_t where; 1093 1094 if (avl_find(dst, zv, &where) == NULL) { 1095 avl_insert(dst, zv, where); 1096 } else { 1097 kmem_free(zv, sizeof (*zv)); 1098 } 1099 } 1100 mutex_exit(&nlwb->lwb_vdev_lock); 1101 } 1102 1103 void 1104 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) 1105 { 1106 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1107 } 1108 1109 /* 1110 * This function is a called after all vdevs associated with a given lwb 1111 * write have completed their DKIOCFLUSHWRITECACHE command; or as soon 1112 * as the lwb write completes, if "zil_nocacheflush" is set. Further, 1113 * all "previous" lwb's will have completed before this function is 1114 * called; i.e. this function is called for all previous lwbs before 1115 * it's called for "this" lwb (enforced via zio the dependencies 1116 * configured in zil_lwb_set_zio_dependency()). 1117 * 1118 * The intention is for this function to be called as soon as the 1119 * contents of an lwb are considered "stable" on disk, and will survive 1120 * any sudden loss of power. At this point, any threads waiting for the 1121 * lwb to reach this state are signalled, and the "waiter" structures 1122 * are marked "done". 1123 */ 1124 static void 1125 zil_lwb_flush_vdevs_done(zio_t *zio) 1126 { 1127 lwb_t *lwb = zio->io_private; 1128 zilog_t *zilog = lwb->lwb_zilog; 1129 dmu_tx_t *tx = lwb->lwb_tx; 1130 zil_commit_waiter_t *zcw; 1131 itx_t *itx; 1132 1133 spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); 1134 1135 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1136 1137 mutex_enter(&zilog->zl_lock); 1138 1139 /* 1140 * Ensure the lwb buffer pointer is cleared before releasing the 1141 * txg. If we have had an allocation failure and the txg is 1142 * waiting to sync then we want zil_sync() to remove the lwb so 1143 * that it's not picked up as the next new one in 1144 * zil_process_commit_list(). zil_sync() will only remove the 1145 * lwb if lwb_buf is null. 1146 */ 1147 lwb->lwb_buf = NULL; 1148 lwb->lwb_tx = NULL; 1149 1150 ASSERT3U(lwb->lwb_issued_timestamp, >, 0); 1151 zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp; 1152 1153 lwb->lwb_root_zio = NULL; 1154 1155 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1156 lwb->lwb_state = LWB_STATE_FLUSH_DONE; 1157 1158 if (zilog->zl_last_lwb_opened == lwb) { 1159 /* 1160 * Remember the highest committed log sequence number 1161 * for ztest. We only update this value when all the log 1162 * writes succeeded, because ztest wants to ASSERT that 1163 * it got the whole log chain. 1164 */ 1165 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1166 } 1167 1168 while ((itx = list_head(&lwb->lwb_itxs)) != NULL) { 1169 list_remove(&lwb->lwb_itxs, itx); 1170 zil_itx_destroy(itx); 1171 } 1172 1173 while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) { 1174 mutex_enter(&zcw->zcw_lock); 1175 1176 ASSERT(list_link_active(&zcw->zcw_node)); 1177 list_remove(&lwb->lwb_waiters, zcw); 1178 1179 ASSERT3P(zcw->zcw_lwb, ==, lwb); 1180 zcw->zcw_lwb = NULL; 1181 1182 zcw->zcw_zio_error = zio->io_error; 1183 1184 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1185 zcw->zcw_done = B_TRUE; 1186 cv_broadcast(&zcw->zcw_cv); 1187 1188 mutex_exit(&zcw->zcw_lock); 1189 } 1190 1191 mutex_exit(&zilog->zl_lock); 1192 1193 /* 1194 * Now that we've written this log block, we have a stable pointer 1195 * to the next block in the chain, so it's OK to let the txg in 1196 * which we allocated the next block sync. 1197 */ 1198 dmu_tx_commit(tx); 1199 } 1200 1201 /* 1202 * This is called when an lwb's write zio completes. The callback's 1203 * purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs 1204 * in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved 1205 * in writing out this specific lwb's data, and in the case that cache 1206 * flushes have been deferred, vdevs involved in writing the data for 1207 * previous lwbs. The writes corresponding to all the vdevs in the 1208 * lwb_vdev_tree will have completed by the time this is called, due to 1209 * the zio dependencies configured in zil_lwb_set_zio_dependency(), 1210 * which takes deferred flushes into account. The lwb will be "done" 1211 * once zil_lwb_flush_vdevs_done() is called, which occurs in the zio 1212 * completion callback for the lwb's root zio. 1213 */ 1214 static void 1215 zil_lwb_write_done(zio_t *zio) 1216 { 1217 lwb_t *lwb = zio->io_private; 1218 spa_t *spa = zio->io_spa; 1219 zilog_t *zilog = lwb->lwb_zilog; 1220 avl_tree_t *t = &lwb->lwb_vdev_tree; 1221 void *cookie = NULL; 1222 zil_vdev_node_t *zv; 1223 lwb_t *nlwb; 1224 1225 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); 1226 1227 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1228 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 1229 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 1230 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 1231 ASSERT(!BP_IS_GANG(zio->io_bp)); 1232 ASSERT(!BP_IS_HOLE(zio->io_bp)); 1233 ASSERT(BP_GET_FILL(zio->io_bp) == 0); 1234 1235 abd_free(zio->io_abd); 1236 1237 mutex_enter(&zilog->zl_lock); 1238 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); 1239 lwb->lwb_state = LWB_STATE_WRITE_DONE; 1240 lwb->lwb_write_zio = NULL; 1241 lwb->lwb_fastwrite = FALSE; 1242 nlwb = list_next(&zilog->zl_lwb_list, lwb); 1243 mutex_exit(&zilog->zl_lock); 1244 1245 if (avl_numnodes(t) == 0) 1246 return; 1247 1248 /* 1249 * If there was an IO error, we're not going to call zio_flush() 1250 * on these vdevs, so we simply empty the tree and free the 1251 * nodes. We avoid calling zio_flush() since there isn't any 1252 * good reason for doing so, after the lwb block failed to be 1253 * written out. 1254 */ 1255 if (zio->io_error != 0) { 1256 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) 1257 kmem_free(zv, sizeof (*zv)); 1258 return; 1259 } 1260 1261 /* 1262 * If this lwb does not have any threads waiting for it to 1263 * complete, we want to defer issuing the DKIOCFLUSHWRITECACHE 1264 * command to the vdevs written to by "this" lwb, and instead 1265 * rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE 1266 * command for those vdevs. Thus, we merge the vdev tree of 1267 * "this" lwb with the vdev tree of the "next" lwb in the list, 1268 * and assume the "next" lwb will handle flushing the vdevs (or 1269 * deferring the flush(s) again). 1270 * 1271 * This is a useful performance optimization, especially for 1272 * workloads with lots of async write activity and few sync 1273 * write and/or fsync activity, as it has the potential to 1274 * coalesce multiple flush commands to a vdev into one. 1275 */ 1276 if (list_head(&lwb->lwb_waiters) == NULL && nlwb != NULL) { 1277 zil_lwb_flush_defer(lwb, nlwb); 1278 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 1279 return; 1280 } 1281 1282 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 1283 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 1284 if (vd != NULL) 1285 zio_flush(lwb->lwb_root_zio, vd); 1286 kmem_free(zv, sizeof (*zv)); 1287 } 1288 } 1289 1290 static void 1291 zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb) 1292 { 1293 lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened; 1294 1295 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1296 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 1297 1298 /* 1299 * The zilog's "zl_last_lwb_opened" field is used to build the 1300 * lwb/zio dependency chain, which is used to preserve the 1301 * ordering of lwb completions that is required by the semantics 1302 * of the ZIL. Each new lwb zio becomes a parent of the 1303 * "previous" lwb zio, such that the new lwb's zio cannot 1304 * complete until the "previous" lwb's zio completes. 1305 * 1306 * This is required by the semantics of zil_commit(); the commit 1307 * waiters attached to the lwbs will be woken in the lwb zio's 1308 * completion callback, so this zio dependency graph ensures the 1309 * waiters are woken in the correct order (the same order the 1310 * lwbs were created). 1311 */ 1312 if (last_lwb_opened != NULL && 1313 last_lwb_opened->lwb_state != LWB_STATE_FLUSH_DONE) { 1314 ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || 1315 last_lwb_opened->lwb_state == LWB_STATE_ISSUED || 1316 last_lwb_opened->lwb_state == LWB_STATE_WRITE_DONE); 1317 1318 ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL); 1319 zio_add_child(lwb->lwb_root_zio, 1320 last_lwb_opened->lwb_root_zio); 1321 1322 /* 1323 * If the previous lwb's write hasn't already completed, 1324 * we also want to order the completion of the lwb write 1325 * zios (above, we only order the completion of the lwb 1326 * root zios). This is required because of how we can 1327 * defer the DKIOCFLUSHWRITECACHE commands for each lwb. 1328 * 1329 * When the DKIOCFLUSHWRITECACHE commands are deferred, 1330 * the previous lwb will rely on this lwb to flush the 1331 * vdevs written to by that previous lwb. Thus, we need 1332 * to ensure this lwb doesn't issue the flush until 1333 * after the previous lwb's write completes. We ensure 1334 * this ordering by setting the zio parent/child 1335 * relationship here. 1336 * 1337 * Without this relationship on the lwb's write zio, 1338 * it's possible for this lwb's write to complete prior 1339 * to the previous lwb's write completing; and thus, the 1340 * vdevs for the previous lwb would be flushed prior to 1341 * that lwb's data being written to those vdevs (the 1342 * vdevs are flushed in the lwb write zio's completion 1343 * handler, zil_lwb_write_done()). 1344 */ 1345 if (last_lwb_opened->lwb_state != LWB_STATE_WRITE_DONE) { 1346 ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || 1347 last_lwb_opened->lwb_state == LWB_STATE_ISSUED); 1348 1349 ASSERT3P(last_lwb_opened->lwb_write_zio, !=, NULL); 1350 zio_add_child(lwb->lwb_write_zio, 1351 last_lwb_opened->lwb_write_zio); 1352 } 1353 } 1354 } 1355 1356 1357 /* 1358 * This function's purpose is to "open" an lwb such that it is ready to 1359 * accept new itxs being committed to it. To do this, the lwb's zio 1360 * structures are created, and linked to the lwb. This function is 1361 * idempotent; if the passed in lwb has already been opened, this 1362 * function is essentially a no-op. 1363 */ 1364 static void 1365 zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) 1366 { 1367 zbookmark_phys_t zb; 1368 zio_priority_t prio; 1369 1370 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1371 ASSERT3P(lwb, !=, NULL); 1372 EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED); 1373 EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED); 1374 1375 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1376 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 1377 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 1378 1379 /* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */ 1380 mutex_enter(&zilog->zl_lock); 1381 if (lwb->lwb_root_zio == NULL) { 1382 abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, 1383 BP_GET_LSIZE(&lwb->lwb_blk)); 1384 1385 if (!lwb->lwb_fastwrite) { 1386 metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk); 1387 lwb->lwb_fastwrite = 1; 1388 } 1389 1390 if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk) 1391 prio = ZIO_PRIORITY_SYNC_WRITE; 1392 else 1393 prio = ZIO_PRIORITY_ASYNC_WRITE; 1394 1395 lwb->lwb_root_zio = zio_root(zilog->zl_spa, 1396 zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL); 1397 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1398 1399 lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, 1400 zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd, 1401 BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb, 1402 prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | 1403 ZIO_FLAG_FASTWRITE, &zb); 1404 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1405 1406 lwb->lwb_state = LWB_STATE_OPENED; 1407 1408 zil_lwb_set_zio_dependency(zilog, lwb); 1409 zilog->zl_last_lwb_opened = lwb; 1410 } 1411 mutex_exit(&zilog->zl_lock); 1412 1413 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1414 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1415 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1416 } 1417 1418 /* 1419 * Define a limited set of intent log block sizes. 1420 * 1421 * These must be a multiple of 4KB. Note only the amount used (again 1422 * aligned to 4KB) actually gets written. However, we can't always just 1423 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. 1424 */ 1425 struct { 1426 uint64_t limit; 1427 uint64_t blksz; 1428 } zil_block_buckets[] = { 1429 { 4096, 4096 }, /* non TX_WRITE */ 1430 { 8192 + 4096, 8192 + 4096 }, /* database */ 1431 { 32768 + 4096, 32768 + 4096 }, /* NFS writes */ 1432 { 65536 + 4096, 65536 + 4096 }, /* 64KB writes */ 1433 { 131072, 131072 }, /* < 128KB writes */ 1434 { 131072 +4096, 65536 + 4096 }, /* 128KB writes */ 1435 { UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */ 1436 }; 1437 1438 /* 1439 * Maximum block size used by the ZIL. This is picked up when the ZIL is 1440 * initialized. Otherwise this should not be used directly; see 1441 * zl_max_block_size instead. 1442 */ 1443 int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; 1444 1445 /* 1446 * Start a log block write and advance to the next log block. 1447 * Calls are serialized. 1448 */ 1449 static lwb_t * 1450 zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) 1451 { 1452 lwb_t *nlwb = NULL; 1453 zil_chain_t *zilc; 1454 spa_t *spa = zilog->zl_spa; 1455 blkptr_t *bp; 1456 dmu_tx_t *tx; 1457 uint64_t txg; 1458 uint64_t zil_blksz, wsz; 1459 int i, error; 1460 boolean_t slog; 1461 1462 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1463 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1464 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1465 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1466 1467 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1468 zilc = (zil_chain_t *)lwb->lwb_buf; 1469 bp = &zilc->zc_next_blk; 1470 } else { 1471 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 1472 bp = &zilc->zc_next_blk; 1473 } 1474 1475 ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 1476 1477 /* 1478 * Allocate the next block and save its address in this block 1479 * before writing it in order to establish the log chain. 1480 * Note that if the allocation of nlwb synced before we wrote 1481 * the block that points at it (lwb), we'd leak it if we crashed. 1482 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 1483 * We dirty the dataset to ensure that zil_sync() will be called 1484 * to clean up in the event of allocation failure or I/O failure. 1485 */ 1486 1487 tx = dmu_tx_create(zilog->zl_os); 1488 1489 /* 1490 * Since we are not going to create any new dirty data, and we 1491 * can even help with clearing the existing dirty data, we 1492 * should not be subject to the dirty data based delays. We 1493 * use TXG_NOTHROTTLE to bypass the delay mechanism. 1494 */ 1495 VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE)); 1496 1497 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1498 txg = dmu_tx_get_txg(tx); 1499 1500 lwb->lwb_tx = tx; 1501 1502 /* 1503 * Log blocks are pre-allocated. Here we select the size of the next 1504 * block, based on size used in the last block. 1505 * - first find the smallest bucket that will fit the block from a 1506 * limited set of block sizes. This is because it's faster to write 1507 * blocks allocated from the same metaslab as they are adjacent or 1508 * close. 1509 * - next find the maximum from the new suggested size and an array of 1510 * previous sizes. This lessens a picket fence effect of wrongly 1511 * guessing the size if we have a stream of say 2k, 64k, 2k, 64k 1512 * requests. 1513 * 1514 * Note we only write what is used, but we can't just allocate 1515 * the maximum block size because we can exhaust the available 1516 * pool log space. 1517 */ 1518 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 1519 for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++) 1520 continue; 1521 zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size); 1522 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 1523 for (i = 0; i < ZIL_PREV_BLKS; i++) 1524 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 1525 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 1526 1527 BP_ZERO(bp); 1528 error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog); 1529 if (slog) { 1530 ZIL_STAT_BUMP(zil_itx_metaslab_slog_count); 1531 ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused); 1532 } else { 1533 ZIL_STAT_BUMP(zil_itx_metaslab_normal_count); 1534 ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused); 1535 } 1536 if (error == 0) { 1537 ASSERT3U(bp->blk_birth, ==, txg); 1538 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 1539 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 1540 1541 /* 1542 * Allocate a new log write block (lwb). 1543 */ 1544 nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE); 1545 } 1546 1547 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1548 /* For Slim ZIL only write what is used. */ 1549 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 1550 ASSERT3U(wsz, <=, lwb->lwb_sz); 1551 zio_shrink(lwb->lwb_write_zio, wsz); 1552 1553 } else { 1554 wsz = lwb->lwb_sz; 1555 } 1556 1557 zilc->zc_pad = 0; 1558 zilc->zc_nused = lwb->lwb_nused; 1559 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1560 1561 /* 1562 * clear unused data for security 1563 */ 1564 bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 1565 1566 spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER); 1567 1568 zil_lwb_add_block(lwb, &lwb->lwb_blk); 1569 lwb->lwb_issued_timestamp = gethrtime(); 1570 lwb->lwb_state = LWB_STATE_ISSUED; 1571 1572 zio_nowait(lwb->lwb_root_zio); 1573 zio_nowait(lwb->lwb_write_zio); 1574 1575 /* 1576 * If there was an allocation failure then nlwb will be null which 1577 * forces a txg_wait_synced(). 1578 */ 1579 return (nlwb); 1580 } 1581 1582 /* 1583 * Maximum amount of write data that can be put into single log block. 1584 */ 1585 uint64_t 1586 zil_max_log_data(zilog_t *zilog) 1587 { 1588 return (zilog->zl_max_block_size - 1589 sizeof (zil_chain_t) - sizeof (lr_write_t)); 1590 } 1591 1592 /* 1593 * Maximum amount of log space we agree to waste to reduce number of 1594 * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%). 1595 */ 1596 static inline uint64_t 1597 zil_max_waste_space(zilog_t *zilog) 1598 { 1599 return (zil_max_log_data(zilog) / 8); 1600 } 1601 1602 /* 1603 * Maximum amount of write data for WR_COPIED. For correctness, consumers 1604 * must fall back to WR_NEED_COPY if we can't fit the entire record into one 1605 * maximum sized log block, because each WR_COPIED record must fit in a 1606 * single log block. For space efficiency, we want to fit two records into a 1607 * max-sized log block. 1608 */ 1609 uint64_t 1610 zil_max_copied_data(zilog_t *zilog) 1611 { 1612 return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 - 1613 sizeof (lr_write_t)); 1614 } 1615 1616 static lwb_t * 1617 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 1618 { 1619 lr_t *lrcb, *lrc; 1620 lr_write_t *lrwb, *lrw; 1621 char *lr_buf; 1622 uint64_t dlen, dnow, lwb_sp, reclen, txg, max_log_data; 1623 1624 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1625 ASSERT3P(lwb, !=, NULL); 1626 ASSERT3P(lwb->lwb_buf, !=, NULL); 1627 1628 zil_lwb_write_open(zilog, lwb); 1629 1630 lrc = &itx->itx_lr; 1631 lrw = (lr_write_t *)lrc; 1632 1633 /* 1634 * A commit itx doesn't represent any on-disk state; instead 1635 * it's simply used as a place holder on the commit list, and 1636 * provides a mechanism for attaching a "commit waiter" onto the 1637 * correct lwb (such that the waiter can be signalled upon 1638 * completion of that lwb). Thus, we don't process this itx's 1639 * log record if it's a commit itx (these itx's don't have log 1640 * records), and instead link the itx's waiter onto the lwb's 1641 * list of waiters. 1642 * 1643 * For more details, see the comment above zil_commit(). 1644 */ 1645 if (lrc->lrc_txtype == TX_COMMIT) { 1646 mutex_enter(&zilog->zl_lock); 1647 zil_commit_waiter_link_lwb(itx->itx_private, lwb); 1648 itx->itx_private = NULL; 1649 mutex_exit(&zilog->zl_lock); 1650 return (lwb); 1651 } 1652 1653 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { 1654 dlen = P2ROUNDUP_TYPED( 1655 lrw->lr_length, sizeof (uint64_t), uint64_t); 1656 } else { 1657 dlen = 0; 1658 } 1659 reclen = lrc->lrc_reclen; 1660 zilog->zl_cur_used += (reclen + dlen); 1661 txg = lrc->lrc_txg; 1662 1663 ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen)); 1664 1665 cont: 1666 /* 1667 * If this record won't fit in the current log block, start a new one. 1668 * For WR_NEED_COPY optimize layout for minimal number of chunks. 1669 */ 1670 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 1671 max_log_data = zil_max_log_data(zilog); 1672 if (reclen > lwb_sp || (reclen + dlen > lwb_sp && 1673 lwb_sp < zil_max_waste_space(zilog) && 1674 (dlen % max_log_data == 0 || 1675 lwb_sp < reclen + dlen % max_log_data))) { 1676 lwb = zil_lwb_write_issue(zilog, lwb); 1677 if (lwb == NULL) 1678 return (NULL); 1679 zil_lwb_write_open(zilog, lwb); 1680 ASSERT(LWB_EMPTY(lwb)); 1681 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 1682 1683 /* 1684 * There must be enough space in the new, empty log block to 1685 * hold reclen. For WR_COPIED, we need to fit the whole 1686 * record in one block, and reclen is the header size + the 1687 * data size. For WR_NEED_COPY, we can create multiple 1688 * records, splitting the data into multiple blocks, so we 1689 * only need to fit one word of data per block; in this case 1690 * reclen is just the header size (no data). 1691 */ 1692 ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); 1693 } 1694 1695 dnow = MIN(dlen, lwb_sp - reclen); 1696 lr_buf = lwb->lwb_buf + lwb->lwb_nused; 1697 bcopy(lrc, lr_buf, reclen); 1698 lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */ 1699 lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */ 1700 1701 ZIL_STAT_BUMP(zil_itx_count); 1702 1703 /* 1704 * If it's a write, fetch the data or get its blkptr as appropriate. 1705 */ 1706 if (lrc->lrc_txtype == TX_WRITE) { 1707 if (txg > spa_freeze_txg(zilog->zl_spa)) 1708 txg_wait_synced(zilog->zl_dmu_pool, txg); 1709 if (itx->itx_wr_state == WR_COPIED) { 1710 ZIL_STAT_BUMP(zil_itx_copied_count); 1711 ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length); 1712 } else { 1713 char *dbuf; 1714 int error; 1715 1716 if (itx->itx_wr_state == WR_NEED_COPY) { 1717 dbuf = lr_buf + reclen; 1718 lrcb->lrc_reclen += dnow; 1719 if (lrwb->lr_length > dnow) 1720 lrwb->lr_length = dnow; 1721 lrw->lr_offset += dnow; 1722 lrw->lr_length -= dnow; 1723 ZIL_STAT_BUMP(zil_itx_needcopy_count); 1724 ZIL_STAT_INCR(zil_itx_needcopy_bytes, dnow); 1725 } else { 1726 ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT); 1727 dbuf = NULL; 1728 ZIL_STAT_BUMP(zil_itx_indirect_count); 1729 ZIL_STAT_INCR(zil_itx_indirect_bytes, 1730 lrw->lr_length); 1731 } 1732 1733 /* 1734 * We pass in the "lwb_write_zio" rather than 1735 * "lwb_root_zio" so that the "lwb_write_zio" 1736 * becomes the parent of any zio's created by 1737 * the "zl_get_data" callback. The vdevs are 1738 * flushed after the "lwb_write_zio" completes, 1739 * so we want to make sure that completion 1740 * callback waits for these additional zio's, 1741 * such that the vdevs used by those zio's will 1742 * be included in the lwb's vdev tree, and those 1743 * vdevs will be properly flushed. If we passed 1744 * in "lwb_root_zio" here, then these additional 1745 * vdevs may not be flushed; e.g. if these zio's 1746 * completed after "lwb_write_zio" completed. 1747 */ 1748 error = zilog->zl_get_data(itx->itx_private, 1749 itx->itx_gen, lrwb, dbuf, lwb, 1750 lwb->lwb_write_zio); 1751 1752 if (error == EIO) { 1753 txg_wait_synced(zilog->zl_dmu_pool, txg); 1754 return (lwb); 1755 } 1756 if (error != 0) { 1757 ASSERT(error == ENOENT || error == EEXIST || 1758 error == EALREADY); 1759 return (lwb); 1760 } 1761 } 1762 } 1763 1764 /* 1765 * We're actually making an entry, so update lrc_seq to be the 1766 * log record sequence number. Note that this is generally not 1767 * equal to the itx sequence number because not all transactions 1768 * are synchronous, and sometimes spa_sync() gets there first. 1769 */ 1770 lrcb->lrc_seq = ++zilog->zl_lr_seq; 1771 lwb->lwb_nused += reclen + dnow; 1772 1773 zil_lwb_add_txg(lwb, txg); 1774 1775 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1776 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 1777 1778 dlen -= dnow; 1779 if (dlen > 0) { 1780 zilog->zl_cur_used += reclen; 1781 goto cont; 1782 } 1783 1784 return (lwb); 1785 } 1786 1787 itx_t * 1788 zil_itx_create(uint64_t txtype, size_t lrsize) 1789 { 1790 size_t itxsize; 1791 itx_t *itx; 1792 1793 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1794 itxsize = offsetof(itx_t, itx_lr) + lrsize; 1795 1796 itx = zio_data_buf_alloc(itxsize); 1797 itx->itx_lr.lrc_txtype = txtype; 1798 itx->itx_lr.lrc_reclen = lrsize; 1799 itx->itx_lr.lrc_seq = 0; /* defensive */ 1800 itx->itx_sync = B_TRUE; /* default is synchronous */ 1801 itx->itx_callback = NULL; 1802 itx->itx_callback_data = NULL; 1803 itx->itx_size = itxsize; 1804 1805 return (itx); 1806 } 1807 1808 void 1809 zil_itx_destroy(itx_t *itx) 1810 { 1811 IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL); 1812 IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 1813 1814 if (itx->itx_callback != NULL) 1815 itx->itx_callback(itx->itx_callback_data); 1816 1817 zio_data_buf_free(itx, itx->itx_size); 1818 } 1819 1820 /* 1821 * Free up the sync and async itxs. The itxs_t has already been detached 1822 * so no locks are needed. 1823 */ 1824 static void 1825 zil_itxg_clean(itxs_t *itxs) 1826 { 1827 itx_t *itx; 1828 list_t *list; 1829 avl_tree_t *t; 1830 void *cookie; 1831 itx_async_node_t *ian; 1832 1833 list = &itxs->i_sync_list; 1834 while ((itx = list_head(list)) != NULL) { 1835 /* 1836 * In the general case, commit itxs will not be found 1837 * here, as they'll be committed to an lwb via 1838 * zil_lwb_commit(), and free'd in that function. Having 1839 * said that, it is still possible for commit itxs to be 1840 * found here, due to the following race: 1841 * 1842 * - a thread calls zil_commit() which assigns the 1843 * commit itx to a per-txg i_sync_list 1844 * - zil_itxg_clean() is called (e.g. via spa_sync()) 1845 * while the waiter is still on the i_sync_list 1846 * 1847 * There's nothing to prevent syncing the txg while the 1848 * waiter is on the i_sync_list. This normally doesn't 1849 * happen because spa_sync() is slower than zil_commit(), 1850 * but if zil_commit() calls txg_wait_synced() (e.g. 1851 * because zil_create() or zil_commit_writer_stall() is 1852 * called) we will hit this case. 1853 */ 1854 if (itx->itx_lr.lrc_txtype == TX_COMMIT) 1855 zil_commit_waiter_skip(itx->itx_private); 1856 1857 list_remove(list, itx); 1858 zil_itx_destroy(itx); 1859 } 1860 1861 cookie = NULL; 1862 t = &itxs->i_async_tree; 1863 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1864 list = &ian->ia_list; 1865 while ((itx = list_head(list)) != NULL) { 1866 list_remove(list, itx); 1867 /* commit itxs should never be on the async lists. */ 1868 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 1869 zil_itx_destroy(itx); 1870 } 1871 list_destroy(list); 1872 kmem_free(ian, sizeof (itx_async_node_t)); 1873 } 1874 avl_destroy(t); 1875 1876 kmem_free(itxs, sizeof (itxs_t)); 1877 } 1878 1879 static int 1880 zil_aitx_compare(const void *x1, const void *x2) 1881 { 1882 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 1883 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 1884 1885 return (TREE_CMP(o1, o2)); 1886 } 1887 1888 /* 1889 * Remove all async itx with the given oid. 1890 */ 1891 void 1892 zil_remove_async(zilog_t *zilog, uint64_t oid) 1893 { 1894 uint64_t otxg, txg; 1895 itx_async_node_t *ian; 1896 avl_tree_t *t; 1897 avl_index_t where; 1898 list_t clean_list; 1899 itx_t *itx; 1900 1901 ASSERT(oid != 0); 1902 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1903 1904 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1905 otxg = ZILTEST_TXG; 1906 else 1907 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1908 1909 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1910 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1911 1912 mutex_enter(&itxg->itxg_lock); 1913 if (itxg->itxg_txg != txg) { 1914 mutex_exit(&itxg->itxg_lock); 1915 continue; 1916 } 1917 1918 /* 1919 * Locate the object node and append its list. 1920 */ 1921 t = &itxg->itxg_itxs->i_async_tree; 1922 ian = avl_find(t, &oid, &where); 1923 if (ian != NULL) 1924 list_move_tail(&clean_list, &ian->ia_list); 1925 mutex_exit(&itxg->itxg_lock); 1926 } 1927 while ((itx = list_head(&clean_list)) != NULL) { 1928 list_remove(&clean_list, itx); 1929 /* commit itxs should never be on the async lists. */ 1930 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 1931 zil_itx_destroy(itx); 1932 } 1933 list_destroy(&clean_list); 1934 } 1935 1936 void 1937 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1938 { 1939 uint64_t txg; 1940 itxg_t *itxg; 1941 itxs_t *itxs, *clean = NULL; 1942 1943 /* 1944 * Ensure the data of a renamed file is committed before the rename. 1945 */ 1946 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 1947 zil_async_to_sync(zilog, itx->itx_oid); 1948 1949 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 1950 txg = ZILTEST_TXG; 1951 else 1952 txg = dmu_tx_get_txg(tx); 1953 1954 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1955 mutex_enter(&itxg->itxg_lock); 1956 itxs = itxg->itxg_itxs; 1957 if (itxg->itxg_txg != txg) { 1958 if (itxs != NULL) { 1959 /* 1960 * The zil_clean callback hasn't got around to cleaning 1961 * this itxg. Save the itxs for release below. 1962 * This should be rare. 1963 */ 1964 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " 1965 "txg %llu", (u_longlong_t)itxg->itxg_txg); 1966 clean = itxg->itxg_itxs; 1967 } 1968 itxg->itxg_txg = txg; 1969 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), 1970 KM_SLEEP); 1971 1972 list_create(&itxs->i_sync_list, sizeof (itx_t), 1973 offsetof(itx_t, itx_node)); 1974 avl_create(&itxs->i_async_tree, zil_aitx_compare, 1975 sizeof (itx_async_node_t), 1976 offsetof(itx_async_node_t, ia_node)); 1977 } 1978 if (itx->itx_sync) { 1979 list_insert_tail(&itxs->i_sync_list, itx); 1980 } else { 1981 avl_tree_t *t = &itxs->i_async_tree; 1982 uint64_t foid = 1983 LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid); 1984 itx_async_node_t *ian; 1985 avl_index_t where; 1986 1987 ian = avl_find(t, &foid, &where); 1988 if (ian == NULL) { 1989 ian = kmem_alloc(sizeof (itx_async_node_t), 1990 KM_SLEEP); 1991 list_create(&ian->ia_list, sizeof (itx_t), 1992 offsetof(itx_t, itx_node)); 1993 ian->ia_foid = foid; 1994 avl_insert(t, ian, where); 1995 } 1996 list_insert_tail(&ian->ia_list, itx); 1997 } 1998 1999 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 2000 2001 /* 2002 * We don't want to dirty the ZIL using ZILTEST_TXG, because 2003 * zil_clean() will never be called using ZILTEST_TXG. Thus, we 2004 * need to be careful to always dirty the ZIL using the "real" 2005 * TXG (not itxg_txg) even when the SPA is frozen. 2006 */ 2007 zilog_dirty(zilog, dmu_tx_get_txg(tx)); 2008 mutex_exit(&itxg->itxg_lock); 2009 2010 /* Release the old itxs now we've dropped the lock */ 2011 if (clean != NULL) 2012 zil_itxg_clean(clean); 2013 } 2014 2015 /* 2016 * If there are any in-memory intent log transactions which have now been 2017 * synced then start up a taskq to free them. We should only do this after we 2018 * have written out the uberblocks (i.e. txg has been committed) so that 2019 * don't inadvertently clean out in-memory log records that would be required 2020 * by zil_commit(). 2021 */ 2022 void 2023 zil_clean(zilog_t *zilog, uint64_t synced_txg) 2024 { 2025 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 2026 itxs_t *clean_me; 2027 2028 ASSERT3U(synced_txg, <, ZILTEST_TXG); 2029 2030 mutex_enter(&itxg->itxg_lock); 2031 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 2032 mutex_exit(&itxg->itxg_lock); 2033 return; 2034 } 2035 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 2036 ASSERT3U(itxg->itxg_txg, !=, 0); 2037 clean_me = itxg->itxg_itxs; 2038 itxg->itxg_itxs = NULL; 2039 itxg->itxg_txg = 0; 2040 mutex_exit(&itxg->itxg_lock); 2041 /* 2042 * Preferably start a task queue to free up the old itxs but 2043 * if taskq_dispatch can't allocate resources to do that then 2044 * free it in-line. This should be rare. Note, using TQ_SLEEP 2045 * created a bad performance problem. 2046 */ 2047 ASSERT3P(zilog->zl_dmu_pool, !=, NULL); 2048 ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); 2049 taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, 2050 (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP); 2051 if (id == TASKQID_INVALID) 2052 zil_itxg_clean(clean_me); 2053 } 2054 2055 /* 2056 * This function will traverse the queue of itxs that need to be 2057 * committed, and move them onto the ZIL's zl_itx_commit_list. 2058 */ 2059 static void 2060 zil_get_commit_list(zilog_t *zilog) 2061 { 2062 uint64_t otxg, txg; 2063 list_t *commit_list = &zilog->zl_itx_commit_list; 2064 2065 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2066 2067 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2068 otxg = ZILTEST_TXG; 2069 else 2070 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2071 2072 /* 2073 * This is inherently racy, since there is nothing to prevent 2074 * the last synced txg from changing. That's okay since we'll 2075 * only commit things in the future. 2076 */ 2077 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2078 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2079 2080 mutex_enter(&itxg->itxg_lock); 2081 if (itxg->itxg_txg != txg) { 2082 mutex_exit(&itxg->itxg_lock); 2083 continue; 2084 } 2085 2086 /* 2087 * If we're adding itx records to the zl_itx_commit_list, 2088 * then the zil better be dirty in this "txg". We can assert 2089 * that here since we're holding the itxg_lock which will 2090 * prevent spa_sync from cleaning it. Once we add the itxs 2091 * to the zl_itx_commit_list we must commit it to disk even 2092 * if it's unnecessary (i.e. the txg was synced). 2093 */ 2094 ASSERT(zilog_is_dirty_in_txg(zilog, txg) || 2095 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 2096 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 2097 2098 mutex_exit(&itxg->itxg_lock); 2099 } 2100 } 2101 2102 /* 2103 * Move the async itxs for a specified object to commit into sync lists. 2104 */ 2105 void 2106 zil_async_to_sync(zilog_t *zilog, uint64_t foid) 2107 { 2108 uint64_t otxg, txg; 2109 itx_async_node_t *ian; 2110 avl_tree_t *t; 2111 avl_index_t where; 2112 2113 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2114 otxg = ZILTEST_TXG; 2115 else 2116 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2117 2118 /* 2119 * This is inherently racy, since there is nothing to prevent 2120 * the last synced txg from changing. 2121 */ 2122 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2123 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2124 2125 mutex_enter(&itxg->itxg_lock); 2126 if (itxg->itxg_txg != txg) { 2127 mutex_exit(&itxg->itxg_lock); 2128 continue; 2129 } 2130 2131 /* 2132 * If a foid is specified then find that node and append its 2133 * list. Otherwise walk the tree appending all the lists 2134 * to the sync list. We add to the end rather than the 2135 * beginning to ensure the create has happened. 2136 */ 2137 t = &itxg->itxg_itxs->i_async_tree; 2138 if (foid != 0) { 2139 ian = avl_find(t, &foid, &where); 2140 if (ian != NULL) { 2141 list_move_tail(&itxg->itxg_itxs->i_sync_list, 2142 &ian->ia_list); 2143 } 2144 } else { 2145 void *cookie = NULL; 2146 2147 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 2148 list_move_tail(&itxg->itxg_itxs->i_sync_list, 2149 &ian->ia_list); 2150 list_destroy(&ian->ia_list); 2151 kmem_free(ian, sizeof (itx_async_node_t)); 2152 } 2153 } 2154 mutex_exit(&itxg->itxg_lock); 2155 } 2156 } 2157 2158 /* 2159 * This function will prune commit itxs that are at the head of the 2160 * commit list (it won't prune past the first non-commit itx), and 2161 * either: a) attach them to the last lwb that's still pending 2162 * completion, or b) skip them altogether. 2163 * 2164 * This is used as a performance optimization to prevent commit itxs 2165 * from generating new lwbs when it's unnecessary to do so. 2166 */ 2167 static void 2168 zil_prune_commit_list(zilog_t *zilog) 2169 { 2170 itx_t *itx; 2171 2172 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2173 2174 while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { 2175 lr_t *lrc = &itx->itx_lr; 2176 if (lrc->lrc_txtype != TX_COMMIT) 2177 break; 2178 2179 mutex_enter(&zilog->zl_lock); 2180 2181 lwb_t *last_lwb = zilog->zl_last_lwb_opened; 2182 if (last_lwb == NULL || 2183 last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) { 2184 /* 2185 * All of the itxs this waiter was waiting on 2186 * must have already completed (or there were 2187 * never any itx's for it to wait on), so it's 2188 * safe to skip this waiter and mark it done. 2189 */ 2190 zil_commit_waiter_skip(itx->itx_private); 2191 } else { 2192 zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); 2193 itx->itx_private = NULL; 2194 } 2195 2196 mutex_exit(&zilog->zl_lock); 2197 2198 list_remove(&zilog->zl_itx_commit_list, itx); 2199 zil_itx_destroy(itx); 2200 } 2201 2202 IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 2203 } 2204 2205 static void 2206 zil_commit_writer_stall(zilog_t *zilog) 2207 { 2208 /* 2209 * When zio_alloc_zil() fails to allocate the next lwb block on 2210 * disk, we must call txg_wait_synced() to ensure all of the 2211 * lwbs in the zilog's zl_lwb_list are synced and then freed (in 2212 * zil_sync()), such that any subsequent ZIL writer (i.e. a call 2213 * to zil_process_commit_list()) will have to call zil_create(), 2214 * and start a new ZIL chain. 2215 * 2216 * Since zil_alloc_zil() failed, the lwb that was previously 2217 * issued does not have a pointer to the "next" lwb on disk. 2218 * Thus, if another ZIL writer thread was to allocate the "next" 2219 * on-disk lwb, that block could be leaked in the event of a 2220 * crash (because the previous lwb on-disk would not point to 2221 * it). 2222 * 2223 * We must hold the zilog's zl_issuer_lock while we do this, to 2224 * ensure no new threads enter zil_process_commit_list() until 2225 * all lwb's in the zl_lwb_list have been synced and freed 2226 * (which is achieved via the txg_wait_synced() call). 2227 */ 2228 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2229 txg_wait_synced(zilog->zl_dmu_pool, 0); 2230 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 2231 } 2232 2233 /* 2234 * This function will traverse the commit list, creating new lwbs as 2235 * needed, and committing the itxs from the commit list to these newly 2236 * created lwbs. Additionally, as a new lwb is created, the previous 2237 * lwb will be issued to the zio layer to be written to disk. 2238 */ 2239 static void 2240 zil_process_commit_list(zilog_t *zilog) 2241 { 2242 spa_t *spa = zilog->zl_spa; 2243 list_t nolwb_itxs; 2244 list_t nolwb_waiters; 2245 lwb_t *lwb; 2246 itx_t *itx; 2247 2248 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2249 2250 /* 2251 * Return if there's nothing to commit before we dirty the fs by 2252 * calling zil_create(). 2253 */ 2254 if (list_head(&zilog->zl_itx_commit_list) == NULL) 2255 return; 2256 2257 list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); 2258 list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), 2259 offsetof(zil_commit_waiter_t, zcw_node)); 2260 2261 lwb = list_tail(&zilog->zl_lwb_list); 2262 if (lwb == NULL) { 2263 lwb = zil_create(zilog); 2264 } else { 2265 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2266 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 2267 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 2268 } 2269 2270 while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { 2271 lr_t *lrc = &itx->itx_lr; 2272 uint64_t txg = lrc->lrc_txg; 2273 2274 ASSERT3U(txg, !=, 0); 2275 2276 if (lrc->lrc_txtype == TX_COMMIT) { 2277 DTRACE_PROBE2(zil__process__commit__itx, 2278 zilog_t *, zilog, itx_t *, itx); 2279 } else { 2280 DTRACE_PROBE2(zil__process__normal__itx, 2281 zilog_t *, zilog, itx_t *, itx); 2282 } 2283 2284 list_remove(&zilog->zl_itx_commit_list, itx); 2285 2286 boolean_t synced = txg <= spa_last_synced_txg(spa); 2287 boolean_t frozen = txg > spa_freeze_txg(spa); 2288 2289 /* 2290 * If the txg of this itx has already been synced out, then 2291 * we don't need to commit this itx to an lwb. This is 2292 * because the data of this itx will have already been 2293 * written to the main pool. This is inherently racy, and 2294 * it's still ok to commit an itx whose txg has already 2295 * been synced; this will result in a write that's 2296 * unnecessary, but will do no harm. 2297 * 2298 * With that said, we always want to commit TX_COMMIT itxs 2299 * to an lwb, regardless of whether or not that itx's txg 2300 * has been synced out. We do this to ensure any OPENED lwb 2301 * will always have at least one zil_commit_waiter_t linked 2302 * to the lwb. 2303 * 2304 * As a counter-example, if we skipped TX_COMMIT itx's 2305 * whose txg had already been synced, the following 2306 * situation could occur if we happened to be racing with 2307 * spa_sync: 2308 * 2309 * 1. We commit a non-TX_COMMIT itx to an lwb, where the 2310 * itx's txg is 10 and the last synced txg is 9. 2311 * 2. spa_sync finishes syncing out txg 10. 2312 * 3. We move to the next itx in the list, it's a TX_COMMIT 2313 * whose txg is 10, so we skip it rather than committing 2314 * it to the lwb used in (1). 2315 * 2316 * If the itx that is skipped in (3) is the last TX_COMMIT 2317 * itx in the commit list, than it's possible for the lwb 2318 * used in (1) to remain in the OPENED state indefinitely. 2319 * 2320 * To prevent the above scenario from occurring, ensuring 2321 * that once an lwb is OPENED it will transition to ISSUED 2322 * and eventually DONE, we always commit TX_COMMIT itx's to 2323 * an lwb here, even if that itx's txg has already been 2324 * synced. 2325 * 2326 * Finally, if the pool is frozen, we _always_ commit the 2327 * itx. The point of freezing the pool is to prevent data 2328 * from being written to the main pool via spa_sync, and 2329 * instead rely solely on the ZIL to persistently store the 2330 * data; i.e. when the pool is frozen, the last synced txg 2331 * value can't be trusted. 2332 */ 2333 if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { 2334 if (lwb != NULL) { 2335 lwb = zil_lwb_commit(zilog, itx, lwb); 2336 2337 if (lwb == NULL) 2338 list_insert_tail(&nolwb_itxs, itx); 2339 else 2340 list_insert_tail(&lwb->lwb_itxs, itx); 2341 } else { 2342 if (lrc->lrc_txtype == TX_COMMIT) { 2343 zil_commit_waiter_link_nolwb( 2344 itx->itx_private, &nolwb_waiters); 2345 } 2346 2347 list_insert_tail(&nolwb_itxs, itx); 2348 } 2349 } else { 2350 ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT); 2351 zil_itx_destroy(itx); 2352 } 2353 } 2354 2355 if (lwb == NULL) { 2356 /* 2357 * This indicates zio_alloc_zil() failed to allocate the 2358 * "next" lwb on-disk. When this happens, we must stall 2359 * the ZIL write pipeline; see the comment within 2360 * zil_commit_writer_stall() for more details. 2361 */ 2362 zil_commit_writer_stall(zilog); 2363 2364 /* 2365 * Additionally, we have to signal and mark the "nolwb" 2366 * waiters as "done" here, since without an lwb, we 2367 * can't do this via zil_lwb_flush_vdevs_done() like 2368 * normal. 2369 */ 2370 zil_commit_waiter_t *zcw; 2371 while ((zcw = list_head(&nolwb_waiters)) != NULL) { 2372 zil_commit_waiter_skip(zcw); 2373 list_remove(&nolwb_waiters, zcw); 2374 } 2375 2376 /* 2377 * And finally, we have to destroy the itx's that 2378 * couldn't be committed to an lwb; this will also call 2379 * the itx's callback if one exists for the itx. 2380 */ 2381 while ((itx = list_head(&nolwb_itxs)) != NULL) { 2382 list_remove(&nolwb_itxs, itx); 2383 zil_itx_destroy(itx); 2384 } 2385 } else { 2386 ASSERT(list_is_empty(&nolwb_waiters)); 2387 ASSERT3P(lwb, !=, NULL); 2388 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2389 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 2390 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 2391 2392 /* 2393 * At this point, the ZIL block pointed at by the "lwb" 2394 * variable is in one of the following states: "closed" 2395 * or "open". 2396 * 2397 * If it's "closed", then no itxs have been committed to 2398 * it, so there's no point in issuing its zio (i.e. it's 2399 * "empty"). 2400 * 2401 * If it's "open", then it contains one or more itxs that 2402 * eventually need to be committed to stable storage. In 2403 * this case we intentionally do not issue the lwb's zio 2404 * to disk yet, and instead rely on one of the following 2405 * two mechanisms for issuing the zio: 2406 * 2407 * 1. Ideally, there will be more ZIL activity occurring 2408 * on the system, such that this function will be 2409 * immediately called again (not necessarily by the same 2410 * thread) and this lwb's zio will be issued via 2411 * zil_lwb_commit(). This way, the lwb is guaranteed to 2412 * be "full" when it is issued to disk, and we'll make 2413 * use of the lwb's size the best we can. 2414 * 2415 * 2. If there isn't sufficient ZIL activity occurring on 2416 * the system, such that this lwb's zio isn't issued via 2417 * zil_lwb_commit(), zil_commit_waiter() will issue the 2418 * lwb's zio. If this occurs, the lwb is not guaranteed 2419 * to be "full" by the time its zio is issued, and means 2420 * the size of the lwb was "too large" given the amount 2421 * of ZIL activity occurring on the system at that time. 2422 * 2423 * We do this for a couple of reasons: 2424 * 2425 * 1. To try and reduce the number of IOPs needed to 2426 * write the same number of itxs. If an lwb has space 2427 * available in its buffer for more itxs, and more itxs 2428 * will be committed relatively soon (relative to the 2429 * latency of performing a write), then it's beneficial 2430 * to wait for these "next" itxs. This way, more itxs 2431 * can be committed to stable storage with fewer writes. 2432 * 2433 * 2. To try and use the largest lwb block size that the 2434 * incoming rate of itxs can support. Again, this is to 2435 * try and pack as many itxs into as few lwbs as 2436 * possible, without significantly impacting the latency 2437 * of each individual itx. 2438 */ 2439 } 2440 } 2441 2442 /* 2443 * This function is responsible for ensuring the passed in commit waiter 2444 * (and associated commit itx) is committed to an lwb. If the waiter is 2445 * not already committed to an lwb, all itxs in the zilog's queue of 2446 * itxs will be processed. The assumption is the passed in waiter's 2447 * commit itx will found in the queue just like the other non-commit 2448 * itxs, such that when the entire queue is processed, the waiter will 2449 * have been committed to an lwb. 2450 * 2451 * The lwb associated with the passed in waiter is not guaranteed to 2452 * have been issued by the time this function completes. If the lwb is 2453 * not issued, we rely on future calls to zil_commit_writer() to issue 2454 * the lwb, or the timeout mechanism found in zil_commit_waiter(). 2455 */ 2456 static void 2457 zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) 2458 { 2459 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 2460 ASSERT(spa_writeable(zilog->zl_spa)); 2461 2462 mutex_enter(&zilog->zl_issuer_lock); 2463 2464 if (zcw->zcw_lwb != NULL || zcw->zcw_done) { 2465 /* 2466 * It's possible that, while we were waiting to acquire 2467 * the "zl_issuer_lock", another thread committed this 2468 * waiter to an lwb. If that occurs, we bail out early, 2469 * without processing any of the zilog's queue of itxs. 2470 * 2471 * On certain workloads and system configurations, the 2472 * "zl_issuer_lock" can become highly contended. In an 2473 * attempt to reduce this contention, we immediately drop 2474 * the lock if the waiter has already been processed. 2475 * 2476 * We've measured this optimization to reduce CPU spent 2477 * contending on this lock by up to 5%, using a system 2478 * with 32 CPUs, low latency storage (~50 usec writes), 2479 * and 1024 threads performing sync writes. 2480 */ 2481 goto out; 2482 } 2483 2484 ZIL_STAT_BUMP(zil_commit_writer_count); 2485 2486 zil_get_commit_list(zilog); 2487 zil_prune_commit_list(zilog); 2488 zil_process_commit_list(zilog); 2489 2490 out: 2491 mutex_exit(&zilog->zl_issuer_lock); 2492 } 2493 2494 static void 2495 zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) 2496 { 2497 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 2498 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2499 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 2500 2501 lwb_t *lwb = zcw->zcw_lwb; 2502 ASSERT3P(lwb, !=, NULL); 2503 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED); 2504 2505 /* 2506 * If the lwb has already been issued by another thread, we can 2507 * immediately return since there's no work to be done (the 2508 * point of this function is to issue the lwb). Additionally, we 2509 * do this prior to acquiring the zl_issuer_lock, to avoid 2510 * acquiring it when it's not necessary to do so. 2511 */ 2512 if (lwb->lwb_state == LWB_STATE_ISSUED || 2513 lwb->lwb_state == LWB_STATE_WRITE_DONE || 2514 lwb->lwb_state == LWB_STATE_FLUSH_DONE) 2515 return; 2516 2517 /* 2518 * In order to call zil_lwb_write_issue() we must hold the 2519 * zilog's "zl_issuer_lock". We can't simply acquire that lock, 2520 * since we're already holding the commit waiter's "zcw_lock", 2521 * and those two locks are acquired in the opposite order 2522 * elsewhere. 2523 */ 2524 mutex_exit(&zcw->zcw_lock); 2525 mutex_enter(&zilog->zl_issuer_lock); 2526 mutex_enter(&zcw->zcw_lock); 2527 2528 /* 2529 * Since we just dropped and re-acquired the commit waiter's 2530 * lock, we have to re-check to see if the waiter was marked 2531 * "done" during that process. If the waiter was marked "done", 2532 * the "lwb" pointer is no longer valid (it can be free'd after 2533 * the waiter is marked "done"), so without this check we could 2534 * wind up with a use-after-free error below. 2535 */ 2536 if (zcw->zcw_done) 2537 goto out; 2538 2539 ASSERT3P(lwb, ==, zcw->zcw_lwb); 2540 2541 /* 2542 * We've already checked this above, but since we hadn't acquired 2543 * the zilog's zl_issuer_lock, we have to perform this check a 2544 * second time while holding the lock. 2545 * 2546 * We don't need to hold the zl_lock since the lwb cannot transition 2547 * from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb 2548 * _can_ transition from ISSUED to DONE, but it's OK to race with 2549 * that transition since we treat the lwb the same, whether it's in 2550 * the ISSUED or DONE states. 2551 * 2552 * The important thing, is we treat the lwb differently depending on 2553 * if it's ISSUED or OPENED, and block any other threads that might 2554 * attempt to issue this lwb. For that reason we hold the 2555 * zl_issuer_lock when checking the lwb_state; we must not call 2556 * zil_lwb_write_issue() if the lwb had already been issued. 2557 * 2558 * See the comment above the lwb_state_t structure definition for 2559 * more details on the lwb states, and locking requirements. 2560 */ 2561 if (lwb->lwb_state == LWB_STATE_ISSUED || 2562 lwb->lwb_state == LWB_STATE_WRITE_DONE || 2563 lwb->lwb_state == LWB_STATE_FLUSH_DONE) 2564 goto out; 2565 2566 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 2567 2568 /* 2569 * As described in the comments above zil_commit_waiter() and 2570 * zil_process_commit_list(), we need to issue this lwb's zio 2571 * since we've reached the commit waiter's timeout and it still 2572 * hasn't been issued. 2573 */ 2574 lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb); 2575 2576 IMPLY(nlwb != NULL, lwb->lwb_state != LWB_STATE_OPENED); 2577 2578 /* 2579 * Since the lwb's zio hadn't been issued by the time this thread 2580 * reached its timeout, we reset the zilog's "zl_cur_used" field 2581 * to influence the zil block size selection algorithm. 2582 * 2583 * By having to issue the lwb's zio here, it means the size of the 2584 * lwb was too large, given the incoming throughput of itxs. By 2585 * setting "zl_cur_used" to zero, we communicate this fact to the 2586 * block size selection algorithm, so it can take this information 2587 * into account, and potentially select a smaller size for the 2588 * next lwb block that is allocated. 2589 */ 2590 zilog->zl_cur_used = 0; 2591 2592 if (nlwb == NULL) { 2593 /* 2594 * When zil_lwb_write_issue() returns NULL, this 2595 * indicates zio_alloc_zil() failed to allocate the 2596 * "next" lwb on-disk. When this occurs, the ZIL write 2597 * pipeline must be stalled; see the comment within the 2598 * zil_commit_writer_stall() function for more details. 2599 * 2600 * We must drop the commit waiter's lock prior to 2601 * calling zil_commit_writer_stall() or else we can wind 2602 * up with the following deadlock: 2603 * 2604 * - This thread is waiting for the txg to sync while 2605 * holding the waiter's lock; txg_wait_synced() is 2606 * used within txg_commit_writer_stall(). 2607 * 2608 * - The txg can't sync because it is waiting for this 2609 * lwb's zio callback to call dmu_tx_commit(). 2610 * 2611 * - The lwb's zio callback can't call dmu_tx_commit() 2612 * because it's blocked trying to acquire the waiter's 2613 * lock, which occurs prior to calling dmu_tx_commit() 2614 */ 2615 mutex_exit(&zcw->zcw_lock); 2616 zil_commit_writer_stall(zilog); 2617 mutex_enter(&zcw->zcw_lock); 2618 } 2619 2620 out: 2621 mutex_exit(&zilog->zl_issuer_lock); 2622 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2623 } 2624 2625 /* 2626 * This function is responsible for performing the following two tasks: 2627 * 2628 * 1. its primary responsibility is to block until the given "commit 2629 * waiter" is considered "done". 2630 * 2631 * 2. its secondary responsibility is to issue the zio for the lwb that 2632 * the given "commit waiter" is waiting on, if this function has 2633 * waited "long enough" and the lwb is still in the "open" state. 2634 * 2635 * Given a sufficient amount of itxs being generated and written using 2636 * the ZIL, the lwb's zio will be issued via the zil_lwb_commit() 2637 * function. If this does not occur, this secondary responsibility will 2638 * ensure the lwb is issued even if there is not other synchronous 2639 * activity on the system. 2640 * 2641 * For more details, see zil_process_commit_list(); more specifically, 2642 * the comment at the bottom of that function. 2643 */ 2644 static void 2645 zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) 2646 { 2647 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 2648 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 2649 ASSERT(spa_writeable(zilog->zl_spa)); 2650 2651 mutex_enter(&zcw->zcw_lock); 2652 2653 /* 2654 * The timeout is scaled based on the lwb latency to avoid 2655 * significantly impacting the latency of each individual itx. 2656 * For more details, see the comment at the bottom of the 2657 * zil_process_commit_list() function. 2658 */ 2659 int pct = MAX(zfs_commit_timeout_pct, 1); 2660 hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; 2661 hrtime_t wakeup = gethrtime() + sleep; 2662 boolean_t timedout = B_FALSE; 2663 2664 while (!zcw->zcw_done) { 2665 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2666 2667 lwb_t *lwb = zcw->zcw_lwb; 2668 2669 /* 2670 * Usually, the waiter will have a non-NULL lwb field here, 2671 * but it's possible for it to be NULL as a result of 2672 * zil_commit() racing with spa_sync(). 2673 * 2674 * When zil_clean() is called, it's possible for the itxg 2675 * list (which may be cleaned via a taskq) to contain 2676 * commit itxs. When this occurs, the commit waiters linked 2677 * off of these commit itxs will not be committed to an 2678 * lwb. Additionally, these commit waiters will not be 2679 * marked done until zil_commit_waiter_skip() is called via 2680 * zil_itxg_clean(). 2681 * 2682 * Thus, it's possible for this commit waiter (i.e. the 2683 * "zcw" variable) to be found in this "in between" state; 2684 * where it's "zcw_lwb" field is NULL, and it hasn't yet 2685 * been skipped, so it's "zcw_done" field is still B_FALSE. 2686 */ 2687 IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED); 2688 2689 if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { 2690 ASSERT3B(timedout, ==, B_FALSE); 2691 2692 /* 2693 * If the lwb hasn't been issued yet, then we 2694 * need to wait with a timeout, in case this 2695 * function needs to issue the lwb after the 2696 * timeout is reached; responsibility (2) from 2697 * the comment above this function. 2698 */ 2699 int rc = cv_timedwait_hires(&zcw->zcw_cv, 2700 &zcw->zcw_lock, wakeup, USEC2NSEC(1), 2701 CALLOUT_FLAG_ABSOLUTE); 2702 2703 if (rc != -1 || zcw->zcw_done) 2704 continue; 2705 2706 timedout = B_TRUE; 2707 zil_commit_waiter_timeout(zilog, zcw); 2708 2709 if (!zcw->zcw_done) { 2710 /* 2711 * If the commit waiter has already been 2712 * marked "done", it's possible for the 2713 * waiter's lwb structure to have already 2714 * been freed. Thus, we can only reliably 2715 * make these assertions if the waiter 2716 * isn't done. 2717 */ 2718 ASSERT3P(lwb, ==, zcw->zcw_lwb); 2719 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); 2720 } 2721 } else { 2722 /* 2723 * If the lwb isn't open, then it must have already 2724 * been issued. In that case, there's no need to 2725 * use a timeout when waiting for the lwb to 2726 * complete. 2727 * 2728 * Additionally, if the lwb is NULL, the waiter 2729 * will soon be signaled and marked done via 2730 * zil_clean() and zil_itxg_clean(), so no timeout 2731 * is required. 2732 */ 2733 2734 IMPLY(lwb != NULL, 2735 lwb->lwb_state == LWB_STATE_ISSUED || 2736 lwb->lwb_state == LWB_STATE_WRITE_DONE || 2737 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 2738 cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); 2739 } 2740 } 2741 2742 mutex_exit(&zcw->zcw_lock); 2743 } 2744 2745 static zil_commit_waiter_t * 2746 zil_alloc_commit_waiter(void) 2747 { 2748 zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); 2749 2750 cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); 2751 mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); 2752 list_link_init(&zcw->zcw_node); 2753 zcw->zcw_lwb = NULL; 2754 zcw->zcw_done = B_FALSE; 2755 zcw->zcw_zio_error = 0; 2756 2757 return (zcw); 2758 } 2759 2760 static void 2761 zil_free_commit_waiter(zil_commit_waiter_t *zcw) 2762 { 2763 ASSERT(!list_link_active(&zcw->zcw_node)); 2764 ASSERT3P(zcw->zcw_lwb, ==, NULL); 2765 ASSERT3B(zcw->zcw_done, ==, B_TRUE); 2766 mutex_destroy(&zcw->zcw_lock); 2767 cv_destroy(&zcw->zcw_cv); 2768 kmem_cache_free(zil_zcw_cache, zcw); 2769 } 2770 2771 /* 2772 * This function is used to create a TX_COMMIT itx and assign it. This 2773 * way, it will be linked into the ZIL's list of synchronous itxs, and 2774 * then later committed to an lwb (or skipped) when 2775 * zil_process_commit_list() is called. 2776 */ 2777 static void 2778 zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) 2779 { 2780 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 2781 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 2782 2783 itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); 2784 itx->itx_sync = B_TRUE; 2785 itx->itx_private = zcw; 2786 2787 zil_itx_assign(zilog, itx, tx); 2788 2789 dmu_tx_commit(tx); 2790 } 2791 2792 /* 2793 * Commit ZFS Intent Log transactions (itxs) to stable storage. 2794 * 2795 * When writing ZIL transactions to the on-disk representation of the 2796 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple 2797 * itxs can be committed to a single lwb. Once a lwb is written and 2798 * committed to stable storage (i.e. the lwb is written, and vdevs have 2799 * been flushed), each itx that was committed to that lwb is also 2800 * considered to be committed to stable storage. 2801 * 2802 * When an itx is committed to an lwb, the log record (lr_t) contained 2803 * by the itx is copied into the lwb's zio buffer, and once this buffer 2804 * is written to disk, it becomes an on-disk ZIL block. 2805 * 2806 * As itxs are generated, they're inserted into the ZIL's queue of 2807 * uncommitted itxs. The semantics of zil_commit() are such that it will 2808 * block until all itxs that were in the queue when it was called, are 2809 * committed to stable storage. 2810 * 2811 * If "foid" is zero, this means all "synchronous" and "asynchronous" 2812 * itxs, for all objects in the dataset, will be committed to stable 2813 * storage prior to zil_commit() returning. If "foid" is non-zero, all 2814 * "synchronous" itxs for all objects, but only "asynchronous" itxs 2815 * that correspond to the foid passed in, will be committed to stable 2816 * storage prior to zil_commit() returning. 2817 * 2818 * Generally speaking, when zil_commit() is called, the consumer doesn't 2819 * actually care about _all_ of the uncommitted itxs. Instead, they're 2820 * simply trying to waiting for a specific itx to be committed to disk, 2821 * but the interface(s) for interacting with the ZIL don't allow such 2822 * fine-grained communication. A better interface would allow a consumer 2823 * to create and assign an itx, and then pass a reference to this itx to 2824 * zil_commit(); such that zil_commit() would return as soon as that 2825 * specific itx was committed to disk (instead of waiting for _all_ 2826 * itxs to be committed). 2827 * 2828 * When a thread calls zil_commit() a special "commit itx" will be 2829 * generated, along with a corresponding "waiter" for this commit itx. 2830 * zil_commit() will wait on this waiter's CV, such that when the waiter 2831 * is marked done, and signaled, zil_commit() will return. 2832 * 2833 * This commit itx is inserted into the queue of uncommitted itxs. This 2834 * provides an easy mechanism for determining which itxs were in the 2835 * queue prior to zil_commit() having been called, and which itxs were 2836 * added after zil_commit() was called. 2837 * 2838 * The commit it is special; it doesn't have any on-disk representation. 2839 * When a commit itx is "committed" to an lwb, the waiter associated 2840 * with it is linked onto the lwb's list of waiters. Then, when that lwb 2841 * completes, each waiter on the lwb's list is marked done and signaled 2842 * -- allowing the thread waiting on the waiter to return from zil_commit(). 2843 * 2844 * It's important to point out a few critical factors that allow us 2845 * to make use of the commit itxs, commit waiters, per-lwb lists of 2846 * commit waiters, and zio completion callbacks like we're doing: 2847 * 2848 * 1. The list of waiters for each lwb is traversed, and each commit 2849 * waiter is marked "done" and signaled, in the zio completion 2850 * callback of the lwb's zio[*]. 2851 * 2852 * * Actually, the waiters are signaled in the zio completion 2853 * callback of the root zio for the DKIOCFLUSHWRITECACHE commands 2854 * that are sent to the vdevs upon completion of the lwb zio. 2855 * 2856 * 2. When the itxs are inserted into the ZIL's queue of uncommitted 2857 * itxs, the order in which they are inserted is preserved[*]; as 2858 * itxs are added to the queue, they are added to the tail of 2859 * in-memory linked lists. 2860 * 2861 * When committing the itxs to lwbs (to be written to disk), they 2862 * are committed in the same order in which the itxs were added to 2863 * the uncommitted queue's linked list(s); i.e. the linked list of 2864 * itxs to commit is traversed from head to tail, and each itx is 2865 * committed to an lwb in that order. 2866 * 2867 * * To clarify: 2868 * 2869 * - the order of "sync" itxs is preserved w.r.t. other 2870 * "sync" itxs, regardless of the corresponding objects. 2871 * - the order of "async" itxs is preserved w.r.t. other 2872 * "async" itxs corresponding to the same object. 2873 * - the order of "async" itxs is *not* preserved w.r.t. other 2874 * "async" itxs corresponding to different objects. 2875 * - the order of "sync" itxs w.r.t. "async" itxs (or vice 2876 * versa) is *not* preserved, even for itxs that correspond 2877 * to the same object. 2878 * 2879 * For more details, see: zil_itx_assign(), zil_async_to_sync(), 2880 * zil_get_commit_list(), and zil_process_commit_list(). 2881 * 2882 * 3. The lwbs represent a linked list of blocks on disk. Thus, any 2883 * lwb cannot be considered committed to stable storage, until its 2884 * "previous" lwb is also committed to stable storage. This fact, 2885 * coupled with the fact described above, means that itxs are 2886 * committed in (roughly) the order in which they were generated. 2887 * This is essential because itxs are dependent on prior itxs. 2888 * Thus, we *must not* deem an itx as being committed to stable 2889 * storage, until *all* prior itxs have also been committed to 2890 * stable storage. 2891 * 2892 * To enforce this ordering of lwb zio's, while still leveraging as 2893 * much of the underlying storage performance as possible, we rely 2894 * on two fundamental concepts: 2895 * 2896 * 1. The creation and issuance of lwb zio's is protected by 2897 * the zilog's "zl_issuer_lock", which ensures only a single 2898 * thread is creating and/or issuing lwb's at a time 2899 * 2. The "previous" lwb is a child of the "current" lwb 2900 * (leveraging the zio parent-child dependency graph) 2901 * 2902 * By relying on this parent-child zio relationship, we can have 2903 * many lwb zio's concurrently issued to the underlying storage, 2904 * but the order in which they complete will be the same order in 2905 * which they were created. 2906 */ 2907 void 2908 zil_commit(zilog_t *zilog, uint64_t foid) 2909 { 2910 /* 2911 * We should never attempt to call zil_commit on a snapshot for 2912 * a couple of reasons: 2913 * 2914 * 1. A snapshot may never be modified, thus it cannot have any 2915 * in-flight itxs that would have modified the dataset. 2916 * 2917 * 2. By design, when zil_commit() is called, a commit itx will 2918 * be assigned to this zilog; as a result, the zilog will be 2919 * dirtied. We must not dirty the zilog of a snapshot; there's 2920 * checks in the code that enforce this invariant, and will 2921 * cause a panic if it's not upheld. 2922 */ 2923 ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); 2924 2925 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 2926 return; 2927 2928 if (!spa_writeable(zilog->zl_spa)) { 2929 /* 2930 * If the SPA is not writable, there should never be any 2931 * pending itxs waiting to be committed to disk. If that 2932 * weren't true, we'd skip writing those itxs out, and 2933 * would break the semantics of zil_commit(); thus, we're 2934 * verifying that truth before we return to the caller. 2935 */ 2936 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2937 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 2938 for (int i = 0; i < TXG_SIZE; i++) 2939 ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); 2940 return; 2941 } 2942 2943 /* 2944 * If the ZIL is suspended, we don't want to dirty it by calling 2945 * zil_commit_itx_assign() below, nor can we write out 2946 * lwbs like would be done in zil_commit_write(). Thus, we 2947 * simply rely on txg_wait_synced() to maintain the necessary 2948 * semantics, and avoid calling those functions altogether. 2949 */ 2950 if (zilog->zl_suspend > 0) { 2951 txg_wait_synced(zilog->zl_dmu_pool, 0); 2952 return; 2953 } 2954 2955 zil_commit_impl(zilog, foid); 2956 } 2957 2958 void 2959 zil_commit_impl(zilog_t *zilog, uint64_t foid) 2960 { 2961 ZIL_STAT_BUMP(zil_commit_count); 2962 2963 /* 2964 * Move the "async" itxs for the specified foid to the "sync" 2965 * queues, such that they will be later committed (or skipped) 2966 * to an lwb when zil_process_commit_list() is called. 2967 * 2968 * Since these "async" itxs must be committed prior to this 2969 * call to zil_commit returning, we must perform this operation 2970 * before we call zil_commit_itx_assign(). 2971 */ 2972 zil_async_to_sync(zilog, foid); 2973 2974 /* 2975 * We allocate a new "waiter" structure which will initially be 2976 * linked to the commit itx using the itx's "itx_private" field. 2977 * Since the commit itx doesn't represent any on-disk state, 2978 * when it's committed to an lwb, rather than copying the its 2979 * lr_t into the lwb's buffer, the commit itx's "waiter" will be 2980 * added to the lwb's list of waiters. Then, when the lwb is 2981 * committed to stable storage, each waiter in the lwb's list of 2982 * waiters will be marked "done", and signalled. 2983 * 2984 * We must create the waiter and assign the commit itx prior to 2985 * calling zil_commit_writer(), or else our specific commit itx 2986 * is not guaranteed to be committed to an lwb prior to calling 2987 * zil_commit_waiter(). 2988 */ 2989 zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); 2990 zil_commit_itx_assign(zilog, zcw); 2991 2992 zil_commit_writer(zilog, zcw); 2993 zil_commit_waiter(zilog, zcw); 2994 2995 if (zcw->zcw_zio_error != 0) { 2996 /* 2997 * If there was an error writing out the ZIL blocks that 2998 * this thread is waiting on, then we fallback to 2999 * relying on spa_sync() to write out the data this 3000 * thread is waiting on. Obviously this has performance 3001 * implications, but the expectation is for this to be 3002 * an exceptional case, and shouldn't occur often. 3003 */ 3004 DTRACE_PROBE2(zil__commit__io__error, 3005 zilog_t *, zilog, zil_commit_waiter_t *, zcw); 3006 txg_wait_synced(zilog->zl_dmu_pool, 0); 3007 } 3008 3009 zil_free_commit_waiter(zcw); 3010 } 3011 3012 /* 3013 * Called in syncing context to free committed log blocks and update log header. 3014 */ 3015 void 3016 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 3017 { 3018 zil_header_t *zh = zil_header_in_syncing_context(zilog); 3019 uint64_t txg = dmu_tx_get_txg(tx); 3020 spa_t *spa = zilog->zl_spa; 3021 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 3022 lwb_t *lwb; 3023 3024 /* 3025 * We don't zero out zl_destroy_txg, so make sure we don't try 3026 * to destroy it twice. 3027 */ 3028 if (spa_sync_pass(spa) != 1) 3029 return; 3030 3031 mutex_enter(&zilog->zl_lock); 3032 3033 ASSERT(zilog->zl_stop_sync == 0); 3034 3035 if (*replayed_seq != 0) { 3036 ASSERT(zh->zh_replay_seq < *replayed_seq); 3037 zh->zh_replay_seq = *replayed_seq; 3038 *replayed_seq = 0; 3039 } 3040 3041 if (zilog->zl_destroy_txg == txg) { 3042 blkptr_t blk = zh->zh_log; 3043 3044 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 3045 3046 bzero(zh, sizeof (zil_header_t)); 3047 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 3048 3049 if (zilog->zl_keep_first) { 3050 /* 3051 * If this block was part of log chain that couldn't 3052 * be claimed because a device was missing during 3053 * zil_claim(), but that device later returns, 3054 * then this block could erroneously appear valid. 3055 * To guard against this, assign a new GUID to the new 3056 * log chain so it doesn't matter what blk points to. 3057 */ 3058 zil_init_log_chain(zilog, &blk); 3059 zh->zh_log = blk; 3060 } 3061 } 3062 3063 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 3064 zh->zh_log = lwb->lwb_blk; 3065 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 3066 break; 3067 list_remove(&zilog->zl_lwb_list, lwb); 3068 zio_free(spa, txg, &lwb->lwb_blk); 3069 zil_free_lwb(zilog, lwb); 3070 3071 /* 3072 * If we don't have anything left in the lwb list then 3073 * we've had an allocation failure and we need to zero 3074 * out the zil_header blkptr so that we don't end 3075 * up freeing the same block twice. 3076 */ 3077 if (list_head(&zilog->zl_lwb_list) == NULL) 3078 BP_ZERO(&zh->zh_log); 3079 } 3080 3081 /* 3082 * Remove fastwrite on any blocks that have been pre-allocated for 3083 * the next commit. This prevents fastwrite counter pollution by 3084 * unused, long-lived LWBs. 3085 */ 3086 for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) { 3087 if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) { 3088 metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk); 3089 lwb->lwb_fastwrite = 0; 3090 } 3091 } 3092 3093 mutex_exit(&zilog->zl_lock); 3094 } 3095 3096 /* ARGSUSED */ 3097 static int 3098 zil_lwb_cons(void *vbuf, void *unused, int kmflag) 3099 { 3100 lwb_t *lwb = vbuf; 3101 list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); 3102 list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), 3103 offsetof(zil_commit_waiter_t, zcw_node)); 3104 avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, 3105 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 3106 mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 3107 return (0); 3108 } 3109 3110 /* ARGSUSED */ 3111 static void 3112 zil_lwb_dest(void *vbuf, void *unused) 3113 { 3114 lwb_t *lwb = vbuf; 3115 mutex_destroy(&lwb->lwb_vdev_lock); 3116 avl_destroy(&lwb->lwb_vdev_tree); 3117 list_destroy(&lwb->lwb_waiters); 3118 list_destroy(&lwb->lwb_itxs); 3119 } 3120 3121 void 3122 zil_init(void) 3123 { 3124 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 3125 sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); 3126 3127 zil_zcw_cache = kmem_cache_create("zil_zcw_cache", 3128 sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 3129 3130 zil_ksp = kstat_create("zfs", 0, "zil", "misc", 3131 KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t), 3132 KSTAT_FLAG_VIRTUAL); 3133 3134 if (zil_ksp != NULL) { 3135 zil_ksp->ks_data = &zil_stats; 3136 kstat_install(zil_ksp); 3137 } 3138 } 3139 3140 void 3141 zil_fini(void) 3142 { 3143 kmem_cache_destroy(zil_zcw_cache); 3144 kmem_cache_destroy(zil_lwb_cache); 3145 3146 if (zil_ksp != NULL) { 3147 kstat_delete(zil_ksp); 3148 zil_ksp = NULL; 3149 } 3150 } 3151 3152 void 3153 zil_set_sync(zilog_t *zilog, uint64_t sync) 3154 { 3155 zilog->zl_sync = sync; 3156 } 3157 3158 void 3159 zil_set_logbias(zilog_t *zilog, uint64_t logbias) 3160 { 3161 zilog->zl_logbias = logbias; 3162 } 3163 3164 zilog_t * 3165 zil_alloc(objset_t *os, zil_header_t *zh_phys) 3166 { 3167 zilog_t *zilog; 3168 3169 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 3170 3171 zilog->zl_header = zh_phys; 3172 zilog->zl_os = os; 3173 zilog->zl_spa = dmu_objset_spa(os); 3174 zilog->zl_dmu_pool = dmu_objset_pool(os); 3175 zilog->zl_destroy_txg = TXG_INITIAL - 1; 3176 zilog->zl_logbias = dmu_objset_logbias(os); 3177 zilog->zl_sync = dmu_objset_syncprop(os); 3178 zilog->zl_dirty_max_txg = 0; 3179 zilog->zl_last_lwb_opened = NULL; 3180 zilog->zl_last_lwb_latency = 0; 3181 zilog->zl_max_block_size = zil_maxblocksize; 3182 3183 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 3184 mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); 3185 3186 for (int i = 0; i < TXG_SIZE; i++) { 3187 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 3188 MUTEX_DEFAULT, NULL); 3189 } 3190 3191 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 3192 offsetof(lwb_t, lwb_node)); 3193 3194 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 3195 offsetof(itx_t, itx_node)); 3196 3197 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 3198 3199 return (zilog); 3200 } 3201 3202 void 3203 zil_free(zilog_t *zilog) 3204 { 3205 int i; 3206 3207 zilog->zl_stop_sync = 1; 3208 3209 ASSERT0(zilog->zl_suspend); 3210 ASSERT0(zilog->zl_suspending); 3211 3212 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3213 list_destroy(&zilog->zl_lwb_list); 3214 3215 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 3216 list_destroy(&zilog->zl_itx_commit_list); 3217 3218 for (i = 0; i < TXG_SIZE; i++) { 3219 /* 3220 * It's possible for an itx to be generated that doesn't dirty 3221 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 3222 * callback to remove the entry. We remove those here. 3223 * 3224 * Also free up the ziltest itxs. 3225 */ 3226 if (zilog->zl_itxg[i].itxg_itxs) 3227 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 3228 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 3229 } 3230 3231 mutex_destroy(&zilog->zl_issuer_lock); 3232 mutex_destroy(&zilog->zl_lock); 3233 3234 cv_destroy(&zilog->zl_cv_suspend); 3235 3236 kmem_free(zilog, sizeof (zilog_t)); 3237 } 3238 3239 /* 3240 * Open an intent log. 3241 */ 3242 zilog_t * 3243 zil_open(objset_t *os, zil_get_data_t *get_data) 3244 { 3245 zilog_t *zilog = dmu_objset_zil(os); 3246 3247 ASSERT3P(zilog->zl_get_data, ==, NULL); 3248 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 3249 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3250 3251 zilog->zl_get_data = get_data; 3252 3253 return (zilog); 3254 } 3255 3256 /* 3257 * Close an intent log. 3258 */ 3259 void 3260 zil_close(zilog_t *zilog) 3261 { 3262 lwb_t *lwb; 3263 uint64_t txg; 3264 3265 if (!dmu_objset_is_snapshot(zilog->zl_os)) { 3266 zil_commit(zilog, 0); 3267 } else { 3268 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 3269 ASSERT0(zilog->zl_dirty_max_txg); 3270 ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); 3271 } 3272 3273 mutex_enter(&zilog->zl_lock); 3274 lwb = list_tail(&zilog->zl_lwb_list); 3275 if (lwb == NULL) 3276 txg = zilog->zl_dirty_max_txg; 3277 else 3278 txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg); 3279 mutex_exit(&zilog->zl_lock); 3280 3281 /* 3282 * We need to use txg_wait_synced() to wait long enough for the 3283 * ZIL to be clean, and to wait for all pending lwbs to be 3284 * written out. 3285 */ 3286 if (txg != 0) 3287 txg_wait_synced(zilog->zl_dmu_pool, txg); 3288 3289 if (zilog_is_dirty(zilog)) 3290 zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog, 3291 (u_longlong_t)txg); 3292 if (txg < spa_freeze_txg(zilog->zl_spa)) 3293 VERIFY(!zilog_is_dirty(zilog)); 3294 3295 zilog->zl_get_data = NULL; 3296 3297 /* 3298 * We should have only one lwb left on the list; remove it now. 3299 */ 3300 mutex_enter(&zilog->zl_lock); 3301 lwb = list_head(&zilog->zl_lwb_list); 3302 if (lwb != NULL) { 3303 ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list)); 3304 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 3305 3306 if (lwb->lwb_fastwrite) 3307 metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk); 3308 3309 list_remove(&zilog->zl_lwb_list, lwb); 3310 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 3311 zil_free_lwb(zilog, lwb); 3312 } 3313 mutex_exit(&zilog->zl_lock); 3314 } 3315 3316 static char *suspend_tag = "zil suspending"; 3317 3318 /* 3319 * Suspend an intent log. While in suspended mode, we still honor 3320 * synchronous semantics, but we rely on txg_wait_synced() to do it. 3321 * On old version pools, we suspend the log briefly when taking a 3322 * snapshot so that it will have an empty intent log. 3323 * 3324 * Long holds are not really intended to be used the way we do here -- 3325 * held for such a short time. A concurrent caller of dsl_dataset_long_held() 3326 * could fail. Therefore we take pains to only put a long hold if it is 3327 * actually necessary. Fortunately, it will only be necessary if the 3328 * objset is currently mounted (or the ZVOL equivalent). In that case it 3329 * will already have a long hold, so we are not really making things any worse. 3330 * 3331 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 3332 * zvol_state_t), and use their mechanism to prevent their hold from being 3333 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 3334 * very little gain. 3335 * 3336 * if cookiep == NULL, this does both the suspend & resume. 3337 * Otherwise, it returns with the dataset "long held", and the cookie 3338 * should be passed into zil_resume(). 3339 */ 3340 int 3341 zil_suspend(const char *osname, void **cookiep) 3342 { 3343 objset_t *os; 3344 zilog_t *zilog; 3345 const zil_header_t *zh; 3346 int error; 3347 3348 error = dmu_objset_hold(osname, suspend_tag, &os); 3349 if (error != 0) 3350 return (error); 3351 zilog = dmu_objset_zil(os); 3352 3353 mutex_enter(&zilog->zl_lock); 3354 zh = zilog->zl_header; 3355 3356 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 3357 mutex_exit(&zilog->zl_lock); 3358 dmu_objset_rele(os, suspend_tag); 3359 return (SET_ERROR(EBUSY)); 3360 } 3361 3362 /* 3363 * Don't put a long hold in the cases where we can avoid it. This 3364 * is when there is no cookie so we are doing a suspend & resume 3365 * (i.e. called from zil_vdev_offline()), and there's nothing to do 3366 * for the suspend because it's already suspended, or there's no ZIL. 3367 */ 3368 if (cookiep == NULL && !zilog->zl_suspending && 3369 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 3370 mutex_exit(&zilog->zl_lock); 3371 dmu_objset_rele(os, suspend_tag); 3372 return (0); 3373 } 3374 3375 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 3376 dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 3377 3378 zilog->zl_suspend++; 3379 3380 if (zilog->zl_suspend > 1) { 3381 /* 3382 * Someone else is already suspending it. 3383 * Just wait for them to finish. 3384 */ 3385 3386 while (zilog->zl_suspending) 3387 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 3388 mutex_exit(&zilog->zl_lock); 3389 3390 if (cookiep == NULL) 3391 zil_resume(os); 3392 else 3393 *cookiep = os; 3394 return (0); 3395 } 3396 3397 /* 3398 * If there is no pointer to an on-disk block, this ZIL must not 3399 * be active (e.g. filesystem not mounted), so there's nothing 3400 * to clean up. 3401 */ 3402 if (BP_IS_HOLE(&zh->zh_log)) { 3403 ASSERT(cookiep != NULL); /* fast path already handled */ 3404 3405 *cookiep = os; 3406 mutex_exit(&zilog->zl_lock); 3407 return (0); 3408 } 3409 3410 /* 3411 * The ZIL has work to do. Ensure that the associated encryption 3412 * key will remain mapped while we are committing the log by 3413 * grabbing a reference to it. If the key isn't loaded we have no 3414 * choice but to return an error until the wrapping key is loaded. 3415 */ 3416 if (os->os_encrypted && 3417 dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) { 3418 zilog->zl_suspend--; 3419 mutex_exit(&zilog->zl_lock); 3420 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 3421 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 3422 return (SET_ERROR(EACCES)); 3423 } 3424 3425 zilog->zl_suspending = B_TRUE; 3426 mutex_exit(&zilog->zl_lock); 3427 3428 /* 3429 * We need to use zil_commit_impl to ensure we wait for all 3430 * LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed 3431 * to disk before proceeding. If we used zil_commit instead, it 3432 * would just call txg_wait_synced(), because zl_suspend is set. 3433 * txg_wait_synced() doesn't wait for these lwb's to be 3434 * LWB_STATE_FLUSH_DONE before returning. 3435 */ 3436 zil_commit_impl(zilog, 0); 3437 3438 /* 3439 * Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we 3440 * use txg_wait_synced() to ensure the data from the zilog has 3441 * migrated to the main pool before calling zil_destroy(). 3442 */ 3443 txg_wait_synced(zilog->zl_dmu_pool, 0); 3444 3445 zil_destroy(zilog, B_FALSE); 3446 3447 mutex_enter(&zilog->zl_lock); 3448 zilog->zl_suspending = B_FALSE; 3449 cv_broadcast(&zilog->zl_cv_suspend); 3450 mutex_exit(&zilog->zl_lock); 3451 3452 if (os->os_encrypted) 3453 dsl_dataset_remove_key_mapping(dmu_objset_ds(os)); 3454 3455 if (cookiep == NULL) 3456 zil_resume(os); 3457 else 3458 *cookiep = os; 3459 return (0); 3460 } 3461 3462 void 3463 zil_resume(void *cookie) 3464 { 3465 objset_t *os = cookie; 3466 zilog_t *zilog = dmu_objset_zil(os); 3467 3468 mutex_enter(&zilog->zl_lock); 3469 ASSERT(zilog->zl_suspend != 0); 3470 zilog->zl_suspend--; 3471 mutex_exit(&zilog->zl_lock); 3472 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 3473 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 3474 } 3475 3476 typedef struct zil_replay_arg { 3477 zil_replay_func_t **zr_replay; 3478 void *zr_arg; 3479 boolean_t zr_byteswap; 3480 char *zr_lr; 3481 } zil_replay_arg_t; 3482 3483 static int 3484 zil_replay_error(zilog_t *zilog, const lr_t *lr, int error) 3485 { 3486 char name[ZFS_MAX_DATASET_NAME_LEN]; 3487 3488 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 3489 3490 dmu_objset_name(zilog->zl_os, name); 3491 3492 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 3493 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 3494 (u_longlong_t)lr->lrc_seq, 3495 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 3496 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 3497 3498 return (error); 3499 } 3500 3501 static int 3502 zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra, 3503 uint64_t claim_txg) 3504 { 3505 zil_replay_arg_t *zr = zra; 3506 const zil_header_t *zh = zilog->zl_header; 3507 uint64_t reclen = lr->lrc_reclen; 3508 uint64_t txtype = lr->lrc_txtype; 3509 int error = 0; 3510 3511 zilog->zl_replaying_seq = lr->lrc_seq; 3512 3513 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 3514 return (0); 3515 3516 if (lr->lrc_txg < claim_txg) /* already committed */ 3517 return (0); 3518 3519 /* Strip case-insensitive bit, still present in log record */ 3520 txtype &= ~TX_CI; 3521 3522 if (txtype == 0 || txtype >= TX_MAX_TYPE) 3523 return (zil_replay_error(zilog, lr, EINVAL)); 3524 3525 /* 3526 * If this record type can be logged out of order, the object 3527 * (lr_foid) may no longer exist. That's legitimate, not an error. 3528 */ 3529 if (TX_OOO(txtype)) { 3530 error = dmu_object_info(zilog->zl_os, 3531 LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL); 3532 if (error == ENOENT || error == EEXIST) 3533 return (0); 3534 } 3535 3536 /* 3537 * Make a copy of the data so we can revise and extend it. 3538 */ 3539 bcopy(lr, zr->zr_lr, reclen); 3540 3541 /* 3542 * If this is a TX_WRITE with a blkptr, suck in the data. 3543 */ 3544 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 3545 error = zil_read_log_data(zilog, (lr_write_t *)lr, 3546 zr->zr_lr + reclen); 3547 if (error != 0) 3548 return (zil_replay_error(zilog, lr, error)); 3549 } 3550 3551 /* 3552 * The log block containing this lr may have been byteswapped 3553 * so that we can easily examine common fields like lrc_txtype. 3554 * However, the log is a mix of different record types, and only the 3555 * replay vectors know how to byteswap their records. Therefore, if 3556 * the lr was byteswapped, undo it before invoking the replay vector. 3557 */ 3558 if (zr->zr_byteswap) 3559 byteswap_uint64_array(zr->zr_lr, reclen); 3560 3561 /* 3562 * We must now do two things atomically: replay this log record, 3563 * and update the log header sequence number to reflect the fact that 3564 * we did so. At the end of each replay function the sequence number 3565 * is updated if we are in replay mode. 3566 */ 3567 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 3568 if (error != 0) { 3569 /* 3570 * The DMU's dnode layer doesn't see removes until the txg 3571 * commits, so a subsequent claim can spuriously fail with 3572 * EEXIST. So if we receive any error we try syncing out 3573 * any removes then retry the transaction. Note that we 3574 * specify B_FALSE for byteswap now, so we don't do it twice. 3575 */ 3576 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 3577 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 3578 if (error != 0) 3579 return (zil_replay_error(zilog, lr, error)); 3580 } 3581 return (0); 3582 } 3583 3584 /* ARGSUSED */ 3585 static int 3586 zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) 3587 { 3588 zilog->zl_replay_blks++; 3589 3590 return (0); 3591 } 3592 3593 /* 3594 * If this dataset has a non-empty intent log, replay it and destroy it. 3595 */ 3596 void 3597 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 3598 { 3599 zilog_t *zilog = dmu_objset_zil(os); 3600 const zil_header_t *zh = zilog->zl_header; 3601 zil_replay_arg_t zr; 3602 3603 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 3604 zil_destroy(zilog, B_TRUE); 3605 return; 3606 } 3607 3608 zr.zr_replay = replay_func; 3609 zr.zr_arg = arg; 3610 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 3611 zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 3612 3613 /* 3614 * Wait for in-progress removes to sync before starting replay. 3615 */ 3616 txg_wait_synced(zilog->zl_dmu_pool, 0); 3617 3618 zilog->zl_replay = B_TRUE; 3619 zilog->zl_replay_time = ddi_get_lbolt(); 3620 ASSERT(zilog->zl_replay_blks == 0); 3621 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 3622 zh->zh_claim_txg, B_TRUE); 3623 vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 3624 3625 zil_destroy(zilog, B_FALSE); 3626 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 3627 zilog->zl_replay = B_FALSE; 3628 } 3629 3630 boolean_t 3631 zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 3632 { 3633 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 3634 return (B_TRUE); 3635 3636 if (zilog->zl_replay) { 3637 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 3638 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 3639 zilog->zl_replaying_seq; 3640 return (B_TRUE); 3641 } 3642 3643 return (B_FALSE); 3644 } 3645 3646 /* ARGSUSED */ 3647 int 3648 zil_reset(const char *osname, void *arg) 3649 { 3650 int error; 3651 3652 error = zil_suspend(osname, NULL); 3653 /* EACCES means crypto key not loaded */ 3654 if ((error == EACCES) || (error == EBUSY)) 3655 return (SET_ERROR(error)); 3656 if (error != 0) 3657 return (SET_ERROR(EEXIST)); 3658 return (0); 3659 } 3660 3661 EXPORT_SYMBOL(zil_alloc); 3662 EXPORT_SYMBOL(zil_free); 3663 EXPORT_SYMBOL(zil_open); 3664 EXPORT_SYMBOL(zil_close); 3665 EXPORT_SYMBOL(zil_replay); 3666 EXPORT_SYMBOL(zil_replaying); 3667 EXPORT_SYMBOL(zil_destroy); 3668 EXPORT_SYMBOL(zil_destroy_sync); 3669 EXPORT_SYMBOL(zil_itx_create); 3670 EXPORT_SYMBOL(zil_itx_destroy); 3671 EXPORT_SYMBOL(zil_itx_assign); 3672 EXPORT_SYMBOL(zil_commit); 3673 EXPORT_SYMBOL(zil_claim); 3674 EXPORT_SYMBOL(zil_check_log_chain); 3675 EXPORT_SYMBOL(zil_sync); 3676 EXPORT_SYMBOL(zil_clean); 3677 EXPORT_SYMBOL(zil_suspend); 3678 EXPORT_SYMBOL(zil_resume); 3679 EXPORT_SYMBOL(zil_lwb_add_block); 3680 EXPORT_SYMBOL(zil_bp_tree_add); 3681 EXPORT_SYMBOL(zil_set_sync); 3682 EXPORT_SYMBOL(zil_set_logbias); 3683 3684 /* BEGIN CSTYLED */ 3685 ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, INT, ZMOD_RW, 3686 "ZIL block open timeout percentage"); 3687 3688 ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW, 3689 "Disable intent logging replay"); 3690 3691 ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW, 3692 "Disable ZIL cache flushes"); 3693 3694 ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, ULONG, ZMOD_RW, 3695 "Limit in bytes slog sync writes per commit"); 3696 3697 ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, INT, ZMOD_RW, 3698 "Limit in bytes of ZIL log block size"); 3699 /* END CSTYLED */ 3700