1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Integros [integros.com] 25 * Copyright (c) 2018 Datto Inc. 26 */ 27 28 /* Portions Copyright 2010 Robert Milkowski */ 29 30 #include <sys/zfs_context.h> 31 #include <sys/spa.h> 32 #include <sys/spa_impl.h> 33 #include <sys/dmu.h> 34 #include <sys/zap.h> 35 #include <sys/arc.h> 36 #include <sys/stat.h> 37 #include <sys/zil.h> 38 #include <sys/zil_impl.h> 39 #include <sys/dsl_dataset.h> 40 #include <sys/vdev_impl.h> 41 #include <sys/dmu_tx.h> 42 #include <sys/dsl_pool.h> 43 #include <sys/metaslab.h> 44 #include <sys/trace_zfs.h> 45 #include <sys/abd.h> 46 #include <sys/brt.h> 47 #include <sys/wmsum.h> 48 49 /* 50 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system 51 * calls that change the file system. Each itx has enough information to 52 * be able to replay them after a system crash, power loss, or 53 * equivalent failure mode. These are stored in memory until either: 54 * 55 * 1. they are committed to the pool by the DMU transaction group 56 * (txg), at which point they can be discarded; or 57 * 2. they are committed to the on-disk ZIL for the dataset being 58 * modified (e.g. due to an fsync, O_DSYNC, or other synchronous 59 * requirement). 60 * 61 * In the event of a crash or power loss, the itxs contained by each 62 * dataset's on-disk ZIL will be replayed when that dataset is first 63 * instantiated (e.g. if the dataset is a normal filesystem, when it is 64 * first mounted). 65 * 66 * As hinted at above, there is one ZIL per dataset (both the in-memory 67 * representation, and the on-disk representation). The on-disk format 68 * consists of 3 parts: 69 * 70 * - a single, per-dataset, ZIL header; which points to a chain of 71 * - zero or more ZIL blocks; each of which contains 72 * - zero or more ZIL records 73 * 74 * A ZIL record holds the information necessary to replay a single 75 * system call transaction. A ZIL block can hold many ZIL records, and 76 * the blocks are chained together, similarly to a singly linked list. 77 * 78 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL 79 * block in the chain, and the ZIL header points to the first block in 80 * the chain. 81 * 82 * Note, there is not a fixed place in the pool to hold these ZIL 83 * blocks; they are dynamically allocated and freed as needed from the 84 * blocks available on the pool, though they can be preferentially 85 * allocated from a dedicated "log" vdev. 86 */ 87 88 /* 89 * This controls the amount of time that a ZIL block (lwb) will remain 90 * "open" when it isn't "full", and it has a thread waiting for it to be 91 * committed to stable storage. Please refer to the zil_commit_waiter() 92 * function (and the comments within it) for more details. 93 */ 94 static uint_t zfs_commit_timeout_pct = 5; 95 96 /* 97 * Minimal time we care to delay commit waiting for more ZIL records. 98 * At least FreeBSD kernel can't sleep for less than 2us at its best. 99 * So requests to sleep for less then 5us is a waste of CPU time with 100 * a risk of significant log latency increase due to oversleep. 101 */ 102 static uint64_t zil_min_commit_timeout = 5000; 103 104 /* 105 * See zil.h for more information about these fields. 106 */ 107 static zil_kstat_values_t zil_stats = { 108 { "zil_commit_count", KSTAT_DATA_UINT64 }, 109 { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, 110 { "zil_itx_count", KSTAT_DATA_UINT64 }, 111 { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, 112 { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, 113 { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, 114 { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, 115 { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, 116 { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, 117 { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, 118 { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, 119 { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, 120 { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, 121 }; 122 123 static zil_sums_t zil_sums_global; 124 static kstat_t *zil_kstats_global; 125 126 /* 127 * Disable intent logging replay. This global ZIL switch affects all pools. 128 */ 129 int zil_replay_disable = 0; 130 131 /* 132 * Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to 133 * the disk(s) by the ZIL after an LWB write has completed. Setting this 134 * will cause ZIL corruption on power loss if a volatile out-of-order 135 * write cache is enabled. 136 */ 137 static int zil_nocacheflush = 0; 138 139 /* 140 * Limit SLOG write size per commit executed with synchronous priority. 141 * Any writes above that will be executed with lower (asynchronous) priority 142 * to limit potential SLOG device abuse by single active ZIL writer. 143 */ 144 static uint64_t zil_slog_bulk = 768 * 1024; 145 146 static kmem_cache_t *zil_lwb_cache; 147 static kmem_cache_t *zil_zcw_cache; 148 149 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 150 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 151 152 static int 153 zil_bp_compare(const void *x1, const void *x2) 154 { 155 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 156 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 157 158 int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2)); 159 if (likely(cmp)) 160 return (cmp); 161 162 return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2))); 163 } 164 165 static void 166 zil_bp_tree_init(zilog_t *zilog) 167 { 168 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 169 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 170 } 171 172 static void 173 zil_bp_tree_fini(zilog_t *zilog) 174 { 175 avl_tree_t *t = &zilog->zl_bp_tree; 176 zil_bp_node_t *zn; 177 void *cookie = NULL; 178 179 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 180 kmem_free(zn, sizeof (zil_bp_node_t)); 181 182 avl_destroy(t); 183 } 184 185 int 186 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 187 { 188 avl_tree_t *t = &zilog->zl_bp_tree; 189 const dva_t *dva; 190 zil_bp_node_t *zn; 191 avl_index_t where; 192 193 if (BP_IS_EMBEDDED(bp)) 194 return (0); 195 196 dva = BP_IDENTITY(bp); 197 198 if (avl_find(t, dva, &where) != NULL) 199 return (SET_ERROR(EEXIST)); 200 201 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 202 zn->zn_dva = *dva; 203 avl_insert(t, zn, where); 204 205 return (0); 206 } 207 208 static zil_header_t * 209 zil_header_in_syncing_context(zilog_t *zilog) 210 { 211 return ((zil_header_t *)zilog->zl_header); 212 } 213 214 static void 215 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 216 { 217 zio_cksum_t *zc = &bp->blk_cksum; 218 219 (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0], 220 sizeof (zc->zc_word[ZIL_ZC_GUID_0])); 221 (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1], 222 sizeof (zc->zc_word[ZIL_ZC_GUID_1])); 223 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 224 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 225 } 226 227 static int 228 zil_kstats_global_update(kstat_t *ksp, int rw) 229 { 230 zil_kstat_values_t *zs = ksp->ks_data; 231 ASSERT3P(&zil_stats, ==, zs); 232 233 if (rw == KSTAT_WRITE) { 234 return (SET_ERROR(EACCES)); 235 } 236 237 zil_kstat_values_update(zs, &zil_sums_global); 238 239 return (0); 240 } 241 242 /* 243 * Read a log block and make sure it's valid. 244 */ 245 static int 246 zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp, 247 blkptr_t *nbp, void *dst, char **end) 248 { 249 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 250 arc_flags_t aflags = ARC_FLAG_WAIT; 251 arc_buf_t *abuf = NULL; 252 zbookmark_phys_t zb; 253 int error; 254 255 if (zilog->zl_header->zh_claim_txg == 0) 256 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 257 258 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 259 zio_flags |= ZIO_FLAG_SPECULATIVE; 260 261 if (!decrypt) 262 zio_flags |= ZIO_FLAG_RAW; 263 264 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 265 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 266 267 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, 268 &abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 269 270 if (error == 0) { 271 zio_cksum_t cksum = bp->blk_cksum; 272 273 /* 274 * Validate the checksummed log block. 275 * 276 * Sequence numbers should be... sequential. The checksum 277 * verifier for the next block should be bp's checksum plus 1. 278 * 279 * Also check the log chain linkage and size used. 280 */ 281 cksum.zc_word[ZIL_ZC_SEQ]++; 282 283 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 284 zil_chain_t *zilc = abuf->b_data; 285 char *lr = (char *)(zilc + 1); 286 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 287 288 if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 289 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 290 error = SET_ERROR(ECKSUM); 291 } else { 292 ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); 293 memcpy(dst, lr, len); 294 *end = (char *)dst + len; 295 *nbp = zilc->zc_next_blk; 296 } 297 } else { 298 char *lr = abuf->b_data; 299 uint64_t size = BP_GET_LSIZE(bp); 300 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 301 302 if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 303 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 304 (zilc->zc_nused > (size - sizeof (*zilc)))) { 305 error = SET_ERROR(ECKSUM); 306 } else { 307 ASSERT3U(zilc->zc_nused, <=, 308 SPA_OLD_MAXBLOCKSIZE); 309 memcpy(dst, lr, zilc->zc_nused); 310 *end = (char *)dst + zilc->zc_nused; 311 *nbp = zilc->zc_next_blk; 312 } 313 } 314 315 arc_buf_destroy(abuf, &abuf); 316 } 317 318 return (error); 319 } 320 321 /* 322 * Read a TX_WRITE log data block. 323 */ 324 static int 325 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 326 { 327 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 328 const blkptr_t *bp = &lr->lr_blkptr; 329 arc_flags_t aflags = ARC_FLAG_WAIT; 330 arc_buf_t *abuf = NULL; 331 zbookmark_phys_t zb; 332 int error; 333 334 if (BP_IS_HOLE(bp)) { 335 if (wbuf != NULL) 336 memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 337 return (0); 338 } 339 340 if (zilog->zl_header->zh_claim_txg == 0) 341 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 342 343 /* 344 * If we are not using the resulting data, we are just checking that 345 * it hasn't been corrupted so we don't need to waste CPU time 346 * decompressing and decrypting it. 347 */ 348 if (wbuf == NULL) 349 zio_flags |= ZIO_FLAG_RAW; 350 351 ASSERT3U(BP_GET_LSIZE(bp), !=, 0); 352 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 353 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 354 355 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 356 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 357 358 if (error == 0) { 359 if (wbuf != NULL) 360 memcpy(wbuf, abuf->b_data, arc_buf_size(abuf)); 361 arc_buf_destroy(abuf, &abuf); 362 } 363 364 return (error); 365 } 366 367 void 368 zil_sums_init(zil_sums_t *zs) 369 { 370 wmsum_init(&zs->zil_commit_count, 0); 371 wmsum_init(&zs->zil_commit_writer_count, 0); 372 wmsum_init(&zs->zil_itx_count, 0); 373 wmsum_init(&zs->zil_itx_indirect_count, 0); 374 wmsum_init(&zs->zil_itx_indirect_bytes, 0); 375 wmsum_init(&zs->zil_itx_copied_count, 0); 376 wmsum_init(&zs->zil_itx_copied_bytes, 0); 377 wmsum_init(&zs->zil_itx_needcopy_count, 0); 378 wmsum_init(&zs->zil_itx_needcopy_bytes, 0); 379 wmsum_init(&zs->zil_itx_metaslab_normal_count, 0); 380 wmsum_init(&zs->zil_itx_metaslab_normal_bytes, 0); 381 wmsum_init(&zs->zil_itx_metaslab_slog_count, 0); 382 wmsum_init(&zs->zil_itx_metaslab_slog_bytes, 0); 383 } 384 385 void 386 zil_sums_fini(zil_sums_t *zs) 387 { 388 wmsum_fini(&zs->zil_commit_count); 389 wmsum_fini(&zs->zil_commit_writer_count); 390 wmsum_fini(&zs->zil_itx_count); 391 wmsum_fini(&zs->zil_itx_indirect_count); 392 wmsum_fini(&zs->zil_itx_indirect_bytes); 393 wmsum_fini(&zs->zil_itx_copied_count); 394 wmsum_fini(&zs->zil_itx_copied_bytes); 395 wmsum_fini(&zs->zil_itx_needcopy_count); 396 wmsum_fini(&zs->zil_itx_needcopy_bytes); 397 wmsum_fini(&zs->zil_itx_metaslab_normal_count); 398 wmsum_fini(&zs->zil_itx_metaslab_normal_bytes); 399 wmsum_fini(&zs->zil_itx_metaslab_slog_count); 400 wmsum_fini(&zs->zil_itx_metaslab_slog_bytes); 401 } 402 403 void 404 zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums) 405 { 406 zs->zil_commit_count.value.ui64 = 407 wmsum_value(&zil_sums->zil_commit_count); 408 zs->zil_commit_writer_count.value.ui64 = 409 wmsum_value(&zil_sums->zil_commit_writer_count); 410 zs->zil_itx_count.value.ui64 = 411 wmsum_value(&zil_sums->zil_itx_count); 412 zs->zil_itx_indirect_count.value.ui64 = 413 wmsum_value(&zil_sums->zil_itx_indirect_count); 414 zs->zil_itx_indirect_bytes.value.ui64 = 415 wmsum_value(&zil_sums->zil_itx_indirect_bytes); 416 zs->zil_itx_copied_count.value.ui64 = 417 wmsum_value(&zil_sums->zil_itx_copied_count); 418 zs->zil_itx_copied_bytes.value.ui64 = 419 wmsum_value(&zil_sums->zil_itx_copied_bytes); 420 zs->zil_itx_needcopy_count.value.ui64 = 421 wmsum_value(&zil_sums->zil_itx_needcopy_count); 422 zs->zil_itx_needcopy_bytes.value.ui64 = 423 wmsum_value(&zil_sums->zil_itx_needcopy_bytes); 424 zs->zil_itx_metaslab_normal_count.value.ui64 = 425 wmsum_value(&zil_sums->zil_itx_metaslab_normal_count); 426 zs->zil_itx_metaslab_normal_bytes.value.ui64 = 427 wmsum_value(&zil_sums->zil_itx_metaslab_normal_bytes); 428 zs->zil_itx_metaslab_slog_count.value.ui64 = 429 wmsum_value(&zil_sums->zil_itx_metaslab_slog_count); 430 zs->zil_itx_metaslab_slog_bytes.value.ui64 = 431 wmsum_value(&zil_sums->zil_itx_metaslab_slog_bytes); 432 } 433 434 /* 435 * Parse the intent log, and call parse_func for each valid record within. 436 */ 437 int 438 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 439 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, 440 boolean_t decrypt) 441 { 442 const zil_header_t *zh = zilog->zl_header; 443 boolean_t claimed = !!zh->zh_claim_txg; 444 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 445 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 446 uint64_t max_blk_seq = 0; 447 uint64_t max_lr_seq = 0; 448 uint64_t blk_count = 0; 449 uint64_t lr_count = 0; 450 blkptr_t blk, next_blk = {{{{0}}}}; 451 char *lrbuf, *lrp; 452 int error = 0; 453 454 /* 455 * Old logs didn't record the maximum zh_claim_lr_seq. 456 */ 457 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 458 claim_lr_seq = UINT64_MAX; 459 460 /* 461 * Starting at the block pointed to by zh_log we read the log chain. 462 * For each block in the chain we strongly check that block to 463 * ensure its validity. We stop when an invalid block is found. 464 * For each block pointer in the chain we call parse_blk_func(). 465 * For each record in each valid block we call parse_lr_func(). 466 * If the log has been claimed, stop if we encounter a sequence 467 * number greater than the highest claimed sequence number. 468 */ 469 lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); 470 zil_bp_tree_init(zilog); 471 472 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 473 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 474 int reclen; 475 char *end = NULL; 476 477 if (blk_seq > claim_blk_seq) 478 break; 479 480 error = parse_blk_func(zilog, &blk, arg, txg); 481 if (error != 0) 482 break; 483 ASSERT3U(max_blk_seq, <, blk_seq); 484 max_blk_seq = blk_seq; 485 blk_count++; 486 487 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 488 break; 489 490 error = zil_read_log_block(zilog, decrypt, &blk, &next_blk, 491 lrbuf, &end); 492 if (error != 0) { 493 if (claimed) { 494 char name[ZFS_MAX_DATASET_NAME_LEN]; 495 496 dmu_objset_name(zilog->zl_os, name); 497 498 cmn_err(CE_WARN, "ZFS read log block error %d, " 499 "dataset %s, seq 0x%llx\n", error, name, 500 (u_longlong_t)blk_seq); 501 } 502 break; 503 } 504 505 for (lrp = lrbuf; lrp < end; lrp += reclen) { 506 lr_t *lr = (lr_t *)lrp; 507 reclen = lr->lrc_reclen; 508 ASSERT3U(reclen, >=, sizeof (lr_t)); 509 if (lr->lrc_seq > claim_lr_seq) 510 goto done; 511 512 error = parse_lr_func(zilog, lr, arg, txg); 513 if (error != 0) 514 goto done; 515 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 516 max_lr_seq = lr->lrc_seq; 517 lr_count++; 518 } 519 } 520 done: 521 zilog->zl_parse_error = error; 522 zilog->zl_parse_blk_seq = max_blk_seq; 523 zilog->zl_parse_lr_seq = max_lr_seq; 524 zilog->zl_parse_blk_count = blk_count; 525 zilog->zl_parse_lr_count = lr_count; 526 527 zil_bp_tree_fini(zilog); 528 zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); 529 530 return (error); 531 } 532 533 static int 534 zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 535 uint64_t first_txg) 536 { 537 (void) tx; 538 ASSERT(!BP_IS_HOLE(bp)); 539 540 /* 541 * As we call this function from the context of a rewind to a 542 * checkpoint, each ZIL block whose txg is later than the txg 543 * that we rewind to is invalid. Thus, we return -1 so 544 * zil_parse() doesn't attempt to read it. 545 */ 546 if (bp->blk_birth >= first_txg) 547 return (-1); 548 549 if (zil_bp_tree_add(zilog, bp) != 0) 550 return (0); 551 552 zio_free(zilog->zl_spa, first_txg, bp); 553 return (0); 554 } 555 556 static int 557 zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 558 uint64_t first_txg) 559 { 560 (void) zilog, (void) lrc, (void) tx, (void) first_txg; 561 return (0); 562 } 563 564 static int 565 zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 566 uint64_t first_txg) 567 { 568 /* 569 * Claim log block if not already committed and not already claimed. 570 * If tx == NULL, just verify that the block is claimable. 571 */ 572 if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || 573 zil_bp_tree_add(zilog, bp) != 0) 574 return (0); 575 576 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 577 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 578 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 579 } 580 581 static int 582 zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg) 583 { 584 lr_write_t *lr = (lr_write_t *)lrc; 585 int error; 586 587 ASSERT(lrc->lrc_txtype == TX_WRITE); 588 589 /* 590 * If the block is not readable, don't claim it. This can happen 591 * in normal operation when a log block is written to disk before 592 * some of the dmu_sync() blocks it points to. In this case, the 593 * transaction cannot have been committed to anyone (we would have 594 * waited for all writes to be stable first), so it is semantically 595 * correct to declare this the end of the log. 596 */ 597 if (lr->lr_blkptr.blk_birth >= first_txg) { 598 error = zil_read_log_data(zilog, lr, NULL); 599 if (error != 0) 600 return (error); 601 } 602 603 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 604 } 605 606 static int 607 zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx) 608 { 609 const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; 610 const blkptr_t *bp; 611 spa_t *spa; 612 uint_t ii; 613 614 ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE); 615 616 if (tx == NULL) { 617 return (0); 618 } 619 620 /* 621 * XXX: Do we need to byteswap lr? 622 */ 623 624 spa = zilog->zl_spa; 625 626 for (ii = 0; ii < lr->lr_nbps; ii++) { 627 bp = &lr->lr_bps[ii]; 628 629 /* 630 * When data in embedded into BP there is no need to create 631 * BRT entry as there is no data block. Just copy the BP as 632 * it contains the data. 633 */ 634 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) { 635 brt_pending_add(spa, bp, tx); 636 } 637 } 638 639 return (0); 640 } 641 642 static int 643 zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 644 uint64_t first_txg) 645 { 646 647 switch (lrc->lrc_txtype) { 648 case TX_WRITE: 649 return (zil_claim_write(zilog, lrc, tx, first_txg)); 650 case TX_CLONE_RANGE: 651 return (zil_claim_clone_range(zilog, lrc, tx)); 652 default: 653 return (0); 654 } 655 } 656 657 static int 658 zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 659 uint64_t claim_txg) 660 { 661 (void) claim_txg; 662 663 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 664 665 return (0); 666 } 667 668 static int 669 zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg) 670 { 671 lr_write_t *lr = (lr_write_t *)lrc; 672 blkptr_t *bp = &lr->lr_blkptr; 673 674 ASSERT(lrc->lrc_txtype == TX_WRITE); 675 676 /* 677 * If we previously claimed it, we need to free it. 678 */ 679 if (bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && 680 !BP_IS_HOLE(bp)) { 681 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 682 } 683 684 return (0); 685 } 686 687 static int 688 zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx) 689 { 690 const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; 691 const blkptr_t *bp; 692 spa_t *spa; 693 uint_t ii; 694 695 ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE); 696 697 if (tx == NULL) { 698 return (0); 699 } 700 701 spa = zilog->zl_spa; 702 703 for (ii = 0; ii < lr->lr_nbps; ii++) { 704 bp = &lr->lr_bps[ii]; 705 706 if (!BP_IS_HOLE(bp)) { 707 zio_free(spa, dmu_tx_get_txg(tx), bp); 708 } 709 } 710 711 return (0); 712 } 713 714 static int 715 zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 716 uint64_t claim_txg) 717 { 718 719 if (claim_txg == 0) { 720 return (0); 721 } 722 723 switch (lrc->lrc_txtype) { 724 case TX_WRITE: 725 return (zil_free_write(zilog, lrc, tx, claim_txg)); 726 case TX_CLONE_RANGE: 727 return (zil_free_clone_range(zilog, lrc, tx)); 728 default: 729 return (0); 730 } 731 } 732 733 static int 734 zil_lwb_vdev_compare(const void *x1, const void *x2) 735 { 736 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 737 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 738 739 return (TREE_CMP(v1, v2)); 740 } 741 742 static lwb_t * 743 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg, 744 boolean_t fastwrite) 745 { 746 lwb_t *lwb; 747 748 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 749 lwb->lwb_zilog = zilog; 750 lwb->lwb_blk = *bp; 751 lwb->lwb_fastwrite = fastwrite; 752 lwb->lwb_slog = slog; 753 lwb->lwb_state = LWB_STATE_CLOSED; 754 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 755 lwb->lwb_max_txg = txg; 756 lwb->lwb_write_zio = NULL; 757 lwb->lwb_root_zio = NULL; 758 lwb->lwb_issued_timestamp = 0; 759 lwb->lwb_issued_txg = 0; 760 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 761 lwb->lwb_nused = sizeof (zil_chain_t); 762 lwb->lwb_sz = BP_GET_LSIZE(bp); 763 } else { 764 lwb->lwb_nused = 0; 765 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 766 } 767 768 mutex_enter(&zilog->zl_lock); 769 list_insert_tail(&zilog->zl_lwb_list, lwb); 770 mutex_exit(&zilog->zl_lock); 771 772 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 773 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 774 VERIFY(list_is_empty(&lwb->lwb_waiters)); 775 VERIFY(list_is_empty(&lwb->lwb_itxs)); 776 777 return (lwb); 778 } 779 780 static void 781 zil_free_lwb(zilog_t *zilog, lwb_t *lwb) 782 { 783 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 784 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 785 VERIFY(list_is_empty(&lwb->lwb_waiters)); 786 VERIFY(list_is_empty(&lwb->lwb_itxs)); 787 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 788 ASSERT3P(lwb->lwb_write_zio, ==, NULL); 789 ASSERT3P(lwb->lwb_root_zio, ==, NULL); 790 ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); 791 ASSERT(lwb->lwb_state == LWB_STATE_CLOSED || 792 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 793 794 /* 795 * Clear the zilog's field to indicate this lwb is no longer 796 * valid, and prevent use-after-free errors. 797 */ 798 if (zilog->zl_last_lwb_opened == lwb) 799 zilog->zl_last_lwb_opened = NULL; 800 801 kmem_cache_free(zil_lwb_cache, lwb); 802 } 803 804 /* 805 * Called when we create in-memory log transactions so that we know 806 * to cleanup the itxs at the end of spa_sync(). 807 */ 808 static void 809 zilog_dirty(zilog_t *zilog, uint64_t txg) 810 { 811 dsl_pool_t *dp = zilog->zl_dmu_pool; 812 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 813 814 ASSERT(spa_writeable(zilog->zl_spa)); 815 816 if (ds->ds_is_snapshot) 817 panic("dirtying snapshot!"); 818 819 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 820 /* up the hold count until we can be written out */ 821 dmu_buf_add_ref(ds->ds_dbuf, zilog); 822 823 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); 824 } 825 } 826 827 /* 828 * Determine if the zil is dirty in the specified txg. Callers wanting to 829 * ensure that the dirty state does not change must hold the itxg_lock for 830 * the specified txg. Holding the lock will ensure that the zil cannot be 831 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current 832 * state. 833 */ 834 static boolean_t __maybe_unused 835 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) 836 { 837 dsl_pool_t *dp = zilog->zl_dmu_pool; 838 839 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) 840 return (B_TRUE); 841 return (B_FALSE); 842 } 843 844 /* 845 * Determine if the zil is dirty. The zil is considered dirty if it has 846 * any pending itx records that have not been cleaned by zil_clean(). 847 */ 848 static boolean_t 849 zilog_is_dirty(zilog_t *zilog) 850 { 851 dsl_pool_t *dp = zilog->zl_dmu_pool; 852 853 for (int t = 0; t < TXG_SIZE; t++) { 854 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 855 return (B_TRUE); 856 } 857 return (B_FALSE); 858 } 859 860 /* 861 * Its called in zil_commit context (zil_process_commit_list()/zil_create()). 862 * It activates SPA_FEATURE_ZILSAXATTR feature, if its enabled. 863 * Check dsl_dataset_feature_is_active to avoid txg_wait_synced() on every 864 * zil_commit. 865 */ 866 static void 867 zil_commit_activate_saxattr_feature(zilog_t *zilog) 868 { 869 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 870 uint64_t txg = 0; 871 dmu_tx_t *tx = NULL; 872 873 if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && 874 dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL && 875 !dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) { 876 tx = dmu_tx_create(zilog->zl_os); 877 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 878 dsl_dataset_dirty(ds, tx); 879 txg = dmu_tx_get_txg(tx); 880 881 mutex_enter(&ds->ds_lock); 882 ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = 883 (void *)B_TRUE; 884 mutex_exit(&ds->ds_lock); 885 dmu_tx_commit(tx); 886 txg_wait_synced(zilog->zl_dmu_pool, txg); 887 } 888 } 889 890 /* 891 * Create an on-disk intent log. 892 */ 893 static lwb_t * 894 zil_create(zilog_t *zilog) 895 { 896 const zil_header_t *zh = zilog->zl_header; 897 lwb_t *lwb = NULL; 898 uint64_t txg = 0; 899 dmu_tx_t *tx = NULL; 900 blkptr_t blk; 901 int error = 0; 902 boolean_t fastwrite = FALSE; 903 boolean_t slog = FALSE; 904 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 905 906 907 /* 908 * Wait for any previous destroy to complete. 909 */ 910 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 911 912 ASSERT(zh->zh_claim_txg == 0); 913 ASSERT(zh->zh_replay_seq == 0); 914 915 blk = zh->zh_log; 916 917 /* 918 * Allocate an initial log block if: 919 * - there isn't one already 920 * - the existing block is the wrong endianness 921 */ 922 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 923 tx = dmu_tx_create(zilog->zl_os); 924 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 925 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 926 txg = dmu_tx_get_txg(tx); 927 928 if (!BP_IS_HOLE(&blk)) { 929 zio_free(zilog->zl_spa, txg, &blk); 930 BP_ZERO(&blk); 931 } 932 933 error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk, 934 ZIL_MIN_BLKSZ, &slog); 935 fastwrite = TRUE; 936 937 if (error == 0) 938 zil_init_log_chain(zilog, &blk); 939 } 940 941 /* 942 * Allocate a log write block (lwb) for the first log block. 943 */ 944 if (error == 0) 945 lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite); 946 947 /* 948 * If we just allocated the first log block, commit our transaction 949 * and wait for zil_sync() to stuff the block pointer into zh_log. 950 * (zh is part of the MOS, so we cannot modify it in open context.) 951 */ 952 if (tx != NULL) { 953 /* 954 * If "zilsaxattr" feature is enabled on zpool, then activate 955 * it now when we're creating the ZIL chain. We can't wait with 956 * this until we write the first xattr log record because we 957 * need to wait for the feature activation to sync out. 958 */ 959 if (spa_feature_is_enabled(zilog->zl_spa, 960 SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) != 961 DMU_OST_ZVOL) { 962 mutex_enter(&ds->ds_lock); 963 ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = 964 (void *)B_TRUE; 965 mutex_exit(&ds->ds_lock); 966 } 967 968 dmu_tx_commit(tx); 969 txg_wait_synced(zilog->zl_dmu_pool, txg); 970 } else { 971 /* 972 * This branch covers the case where we enable the feature on a 973 * zpool that has existing ZIL headers. 974 */ 975 zil_commit_activate_saxattr_feature(zilog); 976 } 977 IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && 978 dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL, 979 dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)); 980 981 ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 982 IMPLY(error == 0, lwb != NULL); 983 984 return (lwb); 985 } 986 987 /* 988 * In one tx, free all log blocks and clear the log header. If keep_first 989 * is set, then we're replaying a log with no content. We want to keep the 990 * first block, however, so that the first synchronous transaction doesn't 991 * require a txg_wait_synced() in zil_create(). We don't need to 992 * txg_wait_synced() here either when keep_first is set, because both 993 * zil_create() and zil_destroy() will wait for any in-progress destroys 994 * to complete. 995 * Return B_TRUE if there were any entries to replay. 996 */ 997 boolean_t 998 zil_destroy(zilog_t *zilog, boolean_t keep_first) 999 { 1000 const zil_header_t *zh = zilog->zl_header; 1001 lwb_t *lwb; 1002 dmu_tx_t *tx; 1003 uint64_t txg; 1004 1005 /* 1006 * Wait for any previous destroy to complete. 1007 */ 1008 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 1009 1010 zilog->zl_old_header = *zh; /* debugging aid */ 1011 1012 if (BP_IS_HOLE(&zh->zh_log)) 1013 return (B_FALSE); 1014 1015 tx = dmu_tx_create(zilog->zl_os); 1016 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1017 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1018 txg = dmu_tx_get_txg(tx); 1019 1020 mutex_enter(&zilog->zl_lock); 1021 1022 ASSERT3U(zilog->zl_destroy_txg, <, txg); 1023 zilog->zl_destroy_txg = txg; 1024 zilog->zl_keep_first = keep_first; 1025 1026 if (!list_is_empty(&zilog->zl_lwb_list)) { 1027 ASSERT(zh->zh_claim_txg == 0); 1028 VERIFY(!keep_first); 1029 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1030 if (lwb->lwb_fastwrite) 1031 metaslab_fastwrite_unmark(zilog->zl_spa, 1032 &lwb->lwb_blk); 1033 1034 list_remove(&zilog->zl_lwb_list, lwb); 1035 if (lwb->lwb_buf != NULL) 1036 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1037 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); 1038 zil_free_lwb(zilog, lwb); 1039 } 1040 } else if (!keep_first) { 1041 zil_destroy_sync(zilog, tx); 1042 } 1043 mutex_exit(&zilog->zl_lock); 1044 1045 dmu_tx_commit(tx); 1046 1047 return (B_TRUE); 1048 } 1049 1050 void 1051 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 1052 { 1053 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1054 (void) zil_parse(zilog, zil_free_log_block, 1055 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE); 1056 } 1057 1058 int 1059 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 1060 { 1061 dmu_tx_t *tx = txarg; 1062 zilog_t *zilog; 1063 uint64_t first_txg; 1064 zil_header_t *zh; 1065 objset_t *os; 1066 int error; 1067 1068 error = dmu_objset_own_obj(dp, ds->ds_object, 1069 DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os); 1070 if (error != 0) { 1071 /* 1072 * EBUSY indicates that the objset is inconsistent, in which 1073 * case it can not have a ZIL. 1074 */ 1075 if (error != EBUSY) { 1076 cmn_err(CE_WARN, "can't open objset for %llu, error %u", 1077 (unsigned long long)ds->ds_object, error); 1078 } 1079 1080 return (0); 1081 } 1082 1083 zilog = dmu_objset_zil(os); 1084 zh = zil_header_in_syncing_context(zilog); 1085 ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); 1086 first_txg = spa_min_claim_txg(zilog->zl_spa); 1087 1088 /* 1089 * If the spa_log_state is not set to be cleared, check whether 1090 * the current uberblock is a checkpoint one and if the current 1091 * header has been claimed before moving on. 1092 * 1093 * If the current uberblock is a checkpointed uberblock then 1094 * one of the following scenarios took place: 1095 * 1096 * 1] We are currently rewinding to the checkpoint of the pool. 1097 * 2] We crashed in the middle of a checkpoint rewind but we 1098 * did manage to write the checkpointed uberblock to the 1099 * vdev labels, so when we tried to import the pool again 1100 * the checkpointed uberblock was selected from the import 1101 * procedure. 1102 * 1103 * In both cases we want to zero out all the ZIL blocks, except 1104 * the ones that have been claimed at the time of the checkpoint 1105 * (their zh_claim_txg != 0). The reason is that these blocks 1106 * may be corrupted since we may have reused their locations on 1107 * disk after we took the checkpoint. 1108 * 1109 * We could try to set spa_log_state to SPA_LOG_CLEAR earlier 1110 * when we first figure out whether the current uberblock is 1111 * checkpointed or not. Unfortunately, that would discard all 1112 * the logs, including the ones that are claimed, and we would 1113 * leak space. 1114 */ 1115 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || 1116 (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 1117 zh->zh_claim_txg == 0)) { 1118 if (!BP_IS_HOLE(&zh->zh_log)) { 1119 (void) zil_parse(zilog, zil_clear_log_block, 1120 zil_noop_log_record, tx, first_txg, B_FALSE); 1121 } 1122 BP_ZERO(&zh->zh_log); 1123 if (os->os_encrypted) 1124 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; 1125 dsl_dataset_dirty(dmu_objset_ds(os), tx); 1126 dmu_objset_disown(os, B_FALSE, FTAG); 1127 return (0); 1128 } 1129 1130 /* 1131 * If we are not rewinding and opening the pool normally, then 1132 * the min_claim_txg should be equal to the first txg of the pool. 1133 */ 1134 ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); 1135 1136 /* 1137 * Claim all log blocks if we haven't already done so, and remember 1138 * the highest claimed sequence number. This ensures that if we can 1139 * read only part of the log now (e.g. due to a missing device), 1140 * but we can read the entire log later, we will not try to replay 1141 * or destroy beyond the last block we successfully claimed. 1142 */ 1143 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 1144 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 1145 (void) zil_parse(zilog, zil_claim_log_block, 1146 zil_claim_log_record, tx, first_txg, B_FALSE); 1147 zh->zh_claim_txg = first_txg; 1148 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 1149 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 1150 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 1151 zh->zh_flags |= ZIL_REPLAY_NEEDED; 1152 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 1153 if (os->os_encrypted) 1154 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; 1155 dsl_dataset_dirty(dmu_objset_ds(os), tx); 1156 } 1157 1158 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 1159 dmu_objset_disown(os, B_FALSE, FTAG); 1160 return (0); 1161 } 1162 1163 /* 1164 * Check the log by walking the log chain. 1165 * Checksum errors are ok as they indicate the end of the chain. 1166 * Any other error (no device or read failure) returns an error. 1167 */ 1168 int 1169 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 1170 { 1171 (void) dp; 1172 zilog_t *zilog; 1173 objset_t *os; 1174 blkptr_t *bp; 1175 int error; 1176 1177 ASSERT(tx == NULL); 1178 1179 error = dmu_objset_from_ds(ds, &os); 1180 if (error != 0) { 1181 cmn_err(CE_WARN, "can't open objset %llu, error %d", 1182 (unsigned long long)ds->ds_object, error); 1183 return (0); 1184 } 1185 1186 zilog = dmu_objset_zil(os); 1187 bp = (blkptr_t *)&zilog->zl_header->zh_log; 1188 1189 if (!BP_IS_HOLE(bp)) { 1190 vdev_t *vd; 1191 boolean_t valid = B_TRUE; 1192 1193 /* 1194 * Check the first block and determine if it's on a log device 1195 * which may have been removed or faulted prior to loading this 1196 * pool. If so, there's no point in checking the rest of the 1197 * log as its content should have already been synced to the 1198 * pool. 1199 */ 1200 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 1201 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 1202 if (vd->vdev_islog && vdev_is_dead(vd)) 1203 valid = vdev_log_state_valid(vd); 1204 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 1205 1206 if (!valid) 1207 return (0); 1208 1209 /* 1210 * Check whether the current uberblock is checkpointed (e.g. 1211 * we are rewinding) and whether the current header has been 1212 * claimed or not. If it hasn't then skip verifying it. We 1213 * do this because its ZIL blocks may be part of the pool's 1214 * state before the rewind, which is no longer valid. 1215 */ 1216 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1217 if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 1218 zh->zh_claim_txg == 0) 1219 return (0); 1220 } 1221 1222 /* 1223 * Because tx == NULL, zil_claim_log_block() will not actually claim 1224 * any blocks, but just determine whether it is possible to do so. 1225 * In addition to checking the log chain, zil_claim_log_block() 1226 * will invoke zio_claim() with a done func of spa_claim_notify(), 1227 * which will update spa_max_claim_txg. See spa_load() for details. 1228 */ 1229 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 1230 zilog->zl_header->zh_claim_txg ? -1ULL : 1231 spa_min_claim_txg(os->os_spa), B_FALSE); 1232 1233 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 1234 } 1235 1236 /* 1237 * When an itx is "skipped", this function is used to properly mark the 1238 * waiter as "done, and signal any thread(s) waiting on it. An itx can 1239 * be skipped (and not committed to an lwb) for a variety of reasons, 1240 * one of them being that the itx was committed via spa_sync(), prior to 1241 * it being committed to an lwb; this can happen if a thread calling 1242 * zil_commit() is racing with spa_sync(). 1243 */ 1244 static void 1245 zil_commit_waiter_skip(zil_commit_waiter_t *zcw) 1246 { 1247 mutex_enter(&zcw->zcw_lock); 1248 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1249 zcw->zcw_done = B_TRUE; 1250 cv_broadcast(&zcw->zcw_cv); 1251 mutex_exit(&zcw->zcw_lock); 1252 } 1253 1254 /* 1255 * This function is used when the given waiter is to be linked into an 1256 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. 1257 * At this point, the waiter will no longer be referenced by the itx, 1258 * and instead, will be referenced by the lwb. 1259 */ 1260 static void 1261 zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) 1262 { 1263 /* 1264 * The lwb_waiters field of the lwb is protected by the zilog's 1265 * zl_lock, thus it must be held when calling this function. 1266 */ 1267 ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); 1268 1269 mutex_enter(&zcw->zcw_lock); 1270 ASSERT(!list_link_active(&zcw->zcw_node)); 1271 ASSERT3P(zcw->zcw_lwb, ==, NULL); 1272 ASSERT3P(lwb, !=, NULL); 1273 ASSERT(lwb->lwb_state == LWB_STATE_OPENED || 1274 lwb->lwb_state == LWB_STATE_ISSUED || 1275 lwb->lwb_state == LWB_STATE_WRITE_DONE); 1276 1277 list_insert_tail(&lwb->lwb_waiters, zcw); 1278 zcw->zcw_lwb = lwb; 1279 mutex_exit(&zcw->zcw_lock); 1280 } 1281 1282 /* 1283 * This function is used when zio_alloc_zil() fails to allocate a ZIL 1284 * block, and the given waiter must be linked to the "nolwb waiters" 1285 * list inside of zil_process_commit_list(). 1286 */ 1287 static void 1288 zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) 1289 { 1290 mutex_enter(&zcw->zcw_lock); 1291 ASSERT(!list_link_active(&zcw->zcw_node)); 1292 ASSERT3P(zcw->zcw_lwb, ==, NULL); 1293 list_insert_tail(nolwb, zcw); 1294 mutex_exit(&zcw->zcw_lock); 1295 } 1296 1297 void 1298 zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) 1299 { 1300 avl_tree_t *t = &lwb->lwb_vdev_tree; 1301 avl_index_t where; 1302 zil_vdev_node_t *zv, zvsearch; 1303 int ndvas = BP_GET_NDVAS(bp); 1304 int i; 1305 1306 if (zil_nocacheflush) 1307 return; 1308 1309 mutex_enter(&lwb->lwb_vdev_lock); 1310 for (i = 0; i < ndvas; i++) { 1311 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 1312 if (avl_find(t, &zvsearch, &where) == NULL) { 1313 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 1314 zv->zv_vdev = zvsearch.zv_vdev; 1315 avl_insert(t, zv, where); 1316 } 1317 } 1318 mutex_exit(&lwb->lwb_vdev_lock); 1319 } 1320 1321 static void 1322 zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb) 1323 { 1324 avl_tree_t *src = &lwb->lwb_vdev_tree; 1325 avl_tree_t *dst = &nlwb->lwb_vdev_tree; 1326 void *cookie = NULL; 1327 zil_vdev_node_t *zv; 1328 1329 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1330 ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 1331 ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 1332 1333 /* 1334 * While 'lwb' is at a point in its lifetime where lwb_vdev_tree does 1335 * not need the protection of lwb_vdev_lock (it will only be modified 1336 * while holding zilog->zl_lock) as its writes and those of its 1337 * children have all completed. The younger 'nlwb' may be waiting on 1338 * future writes to additional vdevs. 1339 */ 1340 mutex_enter(&nlwb->lwb_vdev_lock); 1341 /* 1342 * Tear down the 'lwb' vdev tree, ensuring that entries which do not 1343 * exist in 'nlwb' are moved to it, freeing any would-be duplicates. 1344 */ 1345 while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) { 1346 avl_index_t where; 1347 1348 if (avl_find(dst, zv, &where) == NULL) { 1349 avl_insert(dst, zv, where); 1350 } else { 1351 kmem_free(zv, sizeof (*zv)); 1352 } 1353 } 1354 mutex_exit(&nlwb->lwb_vdev_lock); 1355 } 1356 1357 void 1358 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) 1359 { 1360 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1361 } 1362 1363 /* 1364 * This function is a called after all vdevs associated with a given lwb 1365 * write have completed their DKIOCFLUSHWRITECACHE command; or as soon 1366 * as the lwb write completes, if "zil_nocacheflush" is set. Further, 1367 * all "previous" lwb's will have completed before this function is 1368 * called; i.e. this function is called for all previous lwbs before 1369 * it's called for "this" lwb (enforced via zio the dependencies 1370 * configured in zil_lwb_set_zio_dependency()). 1371 * 1372 * The intention is for this function to be called as soon as the 1373 * contents of an lwb are considered "stable" on disk, and will survive 1374 * any sudden loss of power. At this point, any threads waiting for the 1375 * lwb to reach this state are signalled, and the "waiter" structures 1376 * are marked "done". 1377 */ 1378 static void 1379 zil_lwb_flush_vdevs_done(zio_t *zio) 1380 { 1381 lwb_t *lwb = zio->io_private; 1382 zilog_t *zilog = lwb->lwb_zilog; 1383 zil_commit_waiter_t *zcw; 1384 itx_t *itx; 1385 uint64_t txg; 1386 1387 spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); 1388 1389 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1390 1391 mutex_enter(&zilog->zl_lock); 1392 1393 /* 1394 * If we have had an allocation failure and the txg is 1395 * waiting to sync then we want zil_sync() to remove the lwb so 1396 * that it's not picked up as the next new one in 1397 * zil_process_commit_list(). zil_sync() will only remove the 1398 * lwb if lwb_buf is null. 1399 */ 1400 lwb->lwb_buf = NULL; 1401 1402 ASSERT3U(lwb->lwb_issued_timestamp, >, 0); 1403 zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 3 + 1404 gethrtime() - lwb->lwb_issued_timestamp) / 4; 1405 1406 lwb->lwb_root_zio = NULL; 1407 1408 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1409 lwb->lwb_state = LWB_STATE_FLUSH_DONE; 1410 1411 if (zilog->zl_last_lwb_opened == lwb) { 1412 /* 1413 * Remember the highest committed log sequence number 1414 * for ztest. We only update this value when all the log 1415 * writes succeeded, because ztest wants to ASSERT that 1416 * it got the whole log chain. 1417 */ 1418 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1419 } 1420 1421 while ((itx = list_head(&lwb->lwb_itxs)) != NULL) { 1422 list_remove(&lwb->lwb_itxs, itx); 1423 zil_itx_destroy(itx); 1424 } 1425 1426 while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) { 1427 mutex_enter(&zcw->zcw_lock); 1428 1429 ASSERT(list_link_active(&zcw->zcw_node)); 1430 list_remove(&lwb->lwb_waiters, zcw); 1431 1432 ASSERT3P(zcw->zcw_lwb, ==, lwb); 1433 zcw->zcw_lwb = NULL; 1434 /* 1435 * We expect any ZIO errors from child ZIOs to have been 1436 * propagated "up" to this specific LWB's root ZIO, in 1437 * order for this error handling to work correctly. This 1438 * includes ZIO errors from either this LWB's write or 1439 * flush, as well as any errors from other dependent LWBs 1440 * (e.g. a root LWB ZIO that might be a child of this LWB). 1441 * 1442 * With that said, it's important to note that LWB flush 1443 * errors are not propagated up to the LWB root ZIO. 1444 * This is incorrect behavior, and results in VDEV flush 1445 * errors not being handled correctly here. See the 1446 * comment above the call to "zio_flush" for details. 1447 */ 1448 1449 zcw->zcw_zio_error = zio->io_error; 1450 1451 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1452 zcw->zcw_done = B_TRUE; 1453 cv_broadcast(&zcw->zcw_cv); 1454 1455 mutex_exit(&zcw->zcw_lock); 1456 } 1457 1458 mutex_exit(&zilog->zl_lock); 1459 1460 mutex_enter(&zilog->zl_lwb_io_lock); 1461 txg = lwb->lwb_issued_txg; 1462 ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0); 1463 zilog->zl_lwb_inflight[txg & TXG_MASK]--; 1464 if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0) 1465 cv_broadcast(&zilog->zl_lwb_io_cv); 1466 mutex_exit(&zilog->zl_lwb_io_lock); 1467 } 1468 1469 /* 1470 * Wait for the completion of all issued write/flush of that txg provided. 1471 * It guarantees zil_lwb_flush_vdevs_done() is called and returned. 1472 */ 1473 static void 1474 zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg) 1475 { 1476 ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa)); 1477 1478 mutex_enter(&zilog->zl_lwb_io_lock); 1479 while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0) 1480 cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock); 1481 mutex_exit(&zilog->zl_lwb_io_lock); 1482 1483 #ifdef ZFS_DEBUG 1484 mutex_enter(&zilog->zl_lock); 1485 mutex_enter(&zilog->zl_lwb_io_lock); 1486 lwb_t *lwb = list_head(&zilog->zl_lwb_list); 1487 while (lwb != NULL && lwb->lwb_max_txg <= txg) { 1488 if (lwb->lwb_issued_txg <= txg) { 1489 ASSERT(lwb->lwb_state != LWB_STATE_ISSUED); 1490 ASSERT(lwb->lwb_state != LWB_STATE_WRITE_DONE); 1491 IMPLY(lwb->lwb_issued_txg > 0, 1492 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 1493 } 1494 IMPLY(lwb->lwb_state == LWB_STATE_FLUSH_DONE, 1495 lwb->lwb_buf == NULL); 1496 lwb = list_next(&zilog->zl_lwb_list, lwb); 1497 } 1498 mutex_exit(&zilog->zl_lwb_io_lock); 1499 mutex_exit(&zilog->zl_lock); 1500 #endif 1501 } 1502 1503 /* 1504 * This is called when an lwb's write zio completes. The callback's 1505 * purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs 1506 * in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved 1507 * in writing out this specific lwb's data, and in the case that cache 1508 * flushes have been deferred, vdevs involved in writing the data for 1509 * previous lwbs. The writes corresponding to all the vdevs in the 1510 * lwb_vdev_tree will have completed by the time this is called, due to 1511 * the zio dependencies configured in zil_lwb_set_zio_dependency(), 1512 * which takes deferred flushes into account. The lwb will be "done" 1513 * once zil_lwb_flush_vdevs_done() is called, which occurs in the zio 1514 * completion callback for the lwb's root zio. 1515 */ 1516 static void 1517 zil_lwb_write_done(zio_t *zio) 1518 { 1519 lwb_t *lwb = zio->io_private; 1520 spa_t *spa = zio->io_spa; 1521 zilog_t *zilog = lwb->lwb_zilog; 1522 avl_tree_t *t = &lwb->lwb_vdev_tree; 1523 void *cookie = NULL; 1524 zil_vdev_node_t *zv; 1525 lwb_t *nlwb; 1526 1527 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); 1528 1529 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1530 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 1531 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 1532 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 1533 ASSERT(!BP_IS_GANG(zio->io_bp)); 1534 ASSERT(!BP_IS_HOLE(zio->io_bp)); 1535 ASSERT(BP_GET_FILL(zio->io_bp) == 0); 1536 1537 abd_free(zio->io_abd); 1538 1539 mutex_enter(&zilog->zl_lock); 1540 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); 1541 lwb->lwb_state = LWB_STATE_WRITE_DONE; 1542 lwb->lwb_write_zio = NULL; 1543 lwb->lwb_fastwrite = FALSE; 1544 nlwb = list_next(&zilog->zl_lwb_list, lwb); 1545 mutex_exit(&zilog->zl_lock); 1546 1547 if (avl_numnodes(t) == 0) 1548 return; 1549 1550 /* 1551 * If there was an IO error, we're not going to call zio_flush() 1552 * on these vdevs, so we simply empty the tree and free the 1553 * nodes. We avoid calling zio_flush() since there isn't any 1554 * good reason for doing so, after the lwb block failed to be 1555 * written out. 1556 * 1557 * Additionally, we don't perform any further error handling at 1558 * this point (e.g. setting "zcw_zio_error" appropriately), as 1559 * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus, 1560 * we expect any error seen here, to have been propagated to 1561 * that function). 1562 */ 1563 if (zio->io_error != 0) { 1564 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) 1565 kmem_free(zv, sizeof (*zv)); 1566 return; 1567 } 1568 1569 /* 1570 * If this lwb does not have any threads waiting for it to 1571 * complete, we want to defer issuing the DKIOCFLUSHWRITECACHE 1572 * command to the vdevs written to by "this" lwb, and instead 1573 * rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE 1574 * command for those vdevs. Thus, we merge the vdev tree of 1575 * "this" lwb with the vdev tree of the "next" lwb in the list, 1576 * and assume the "next" lwb will handle flushing the vdevs (or 1577 * deferring the flush(s) again). 1578 * 1579 * This is a useful performance optimization, especially for 1580 * workloads with lots of async write activity and few sync 1581 * write and/or fsync activity, as it has the potential to 1582 * coalesce multiple flush commands to a vdev into one. 1583 */ 1584 if (list_head(&lwb->lwb_waiters) == NULL && nlwb != NULL) { 1585 zil_lwb_flush_defer(lwb, nlwb); 1586 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 1587 return; 1588 } 1589 1590 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 1591 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 1592 if (vd != NULL) { 1593 /* 1594 * The "ZIO_FLAG_DONT_PROPAGATE" is currently 1595 * always used within "zio_flush". This means, 1596 * any errors when flushing the vdev(s), will 1597 * (unfortunately) not be handled correctly, 1598 * since these "zio_flush" errors will not be 1599 * propagated up to "zil_lwb_flush_vdevs_done". 1600 */ 1601 zio_flush(lwb->lwb_root_zio, vd); 1602 } 1603 kmem_free(zv, sizeof (*zv)); 1604 } 1605 } 1606 1607 static void 1608 zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb) 1609 { 1610 lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened; 1611 1612 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1613 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 1614 1615 /* 1616 * The zilog's "zl_last_lwb_opened" field is used to build the 1617 * lwb/zio dependency chain, which is used to preserve the 1618 * ordering of lwb completions that is required by the semantics 1619 * of the ZIL. Each new lwb zio becomes a parent of the 1620 * "previous" lwb zio, such that the new lwb's zio cannot 1621 * complete until the "previous" lwb's zio completes. 1622 * 1623 * This is required by the semantics of zil_commit(); the commit 1624 * waiters attached to the lwbs will be woken in the lwb zio's 1625 * completion callback, so this zio dependency graph ensures the 1626 * waiters are woken in the correct order (the same order the 1627 * lwbs were created). 1628 */ 1629 if (last_lwb_opened != NULL && 1630 last_lwb_opened->lwb_state != LWB_STATE_FLUSH_DONE) { 1631 ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || 1632 last_lwb_opened->lwb_state == LWB_STATE_ISSUED || 1633 last_lwb_opened->lwb_state == LWB_STATE_WRITE_DONE); 1634 1635 ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL); 1636 zio_add_child(lwb->lwb_root_zio, 1637 last_lwb_opened->lwb_root_zio); 1638 1639 /* 1640 * If the previous lwb's write hasn't already completed, 1641 * we also want to order the completion of the lwb write 1642 * zios (above, we only order the completion of the lwb 1643 * root zios). This is required because of how we can 1644 * defer the DKIOCFLUSHWRITECACHE commands for each lwb. 1645 * 1646 * When the DKIOCFLUSHWRITECACHE commands are deferred, 1647 * the previous lwb will rely on this lwb to flush the 1648 * vdevs written to by that previous lwb. Thus, we need 1649 * to ensure this lwb doesn't issue the flush until 1650 * after the previous lwb's write completes. We ensure 1651 * this ordering by setting the zio parent/child 1652 * relationship here. 1653 * 1654 * Without this relationship on the lwb's write zio, 1655 * it's possible for this lwb's write to complete prior 1656 * to the previous lwb's write completing; and thus, the 1657 * vdevs for the previous lwb would be flushed prior to 1658 * that lwb's data being written to those vdevs (the 1659 * vdevs are flushed in the lwb write zio's completion 1660 * handler, zil_lwb_write_done()). 1661 */ 1662 if (last_lwb_opened->lwb_state != LWB_STATE_WRITE_DONE) { 1663 ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || 1664 last_lwb_opened->lwb_state == LWB_STATE_ISSUED); 1665 1666 ASSERT3P(last_lwb_opened->lwb_write_zio, !=, NULL); 1667 zio_add_child(lwb->lwb_write_zio, 1668 last_lwb_opened->lwb_write_zio); 1669 } 1670 } 1671 } 1672 1673 1674 /* 1675 * This function's purpose is to "open" an lwb such that it is ready to 1676 * accept new itxs being committed to it. To do this, the lwb's zio 1677 * structures are created, and linked to the lwb. This function is 1678 * idempotent; if the passed in lwb has already been opened, this 1679 * function is essentially a no-op. 1680 */ 1681 static void 1682 zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) 1683 { 1684 zbookmark_phys_t zb; 1685 zio_priority_t prio; 1686 1687 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1688 ASSERT3P(lwb, !=, NULL); 1689 EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED); 1690 EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED); 1691 1692 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1693 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 1694 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 1695 1696 /* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */ 1697 mutex_enter(&zilog->zl_lock); 1698 if (lwb->lwb_root_zio == NULL) { 1699 abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, 1700 BP_GET_LSIZE(&lwb->lwb_blk)); 1701 1702 if (!lwb->lwb_fastwrite) { 1703 metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk); 1704 lwb->lwb_fastwrite = 1; 1705 } 1706 1707 if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk) 1708 prio = ZIO_PRIORITY_SYNC_WRITE; 1709 else 1710 prio = ZIO_PRIORITY_ASYNC_WRITE; 1711 1712 lwb->lwb_root_zio = zio_root(zilog->zl_spa, 1713 zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL); 1714 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1715 1716 lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, 1717 zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd, 1718 BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb, 1719 prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_FASTWRITE, &zb); 1720 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1721 1722 lwb->lwb_state = LWB_STATE_OPENED; 1723 1724 zil_lwb_set_zio_dependency(zilog, lwb); 1725 zilog->zl_last_lwb_opened = lwb; 1726 } 1727 mutex_exit(&zilog->zl_lock); 1728 1729 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1730 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1731 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1732 } 1733 1734 /* 1735 * Define a limited set of intent log block sizes. 1736 * 1737 * These must be a multiple of 4KB. Note only the amount used (again 1738 * aligned to 4KB) actually gets written. However, we can't always just 1739 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. 1740 */ 1741 static const struct { 1742 uint64_t limit; 1743 uint64_t blksz; 1744 } zil_block_buckets[] = { 1745 { 4096, 4096 }, /* non TX_WRITE */ 1746 { 8192 + 4096, 8192 + 4096 }, /* database */ 1747 { 32768 + 4096, 32768 + 4096 }, /* NFS writes */ 1748 { 65536 + 4096, 65536 + 4096 }, /* 64KB writes */ 1749 { 131072, 131072 }, /* < 128KB writes */ 1750 { 131072 +4096, 65536 + 4096 }, /* 128KB writes */ 1751 { UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */ 1752 }; 1753 1754 /* 1755 * Maximum block size used by the ZIL. This is picked up when the ZIL is 1756 * initialized. Otherwise this should not be used directly; see 1757 * zl_max_block_size instead. 1758 */ 1759 static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; 1760 1761 /* 1762 * Start a log block write and advance to the next log block. 1763 * Calls are serialized. 1764 */ 1765 static lwb_t * 1766 zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) 1767 { 1768 lwb_t *nlwb = NULL; 1769 zil_chain_t *zilc; 1770 spa_t *spa = zilog->zl_spa; 1771 blkptr_t *bp; 1772 dmu_tx_t *tx; 1773 uint64_t txg; 1774 uint64_t zil_blksz, wsz; 1775 int i, error; 1776 boolean_t slog; 1777 1778 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1779 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1780 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1781 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1782 1783 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1784 zilc = (zil_chain_t *)lwb->lwb_buf; 1785 bp = &zilc->zc_next_blk; 1786 } else { 1787 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 1788 bp = &zilc->zc_next_blk; 1789 } 1790 1791 ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 1792 1793 /* 1794 * Allocate the next block and save its address in this block 1795 * before writing it in order to establish the log chain. 1796 */ 1797 1798 tx = dmu_tx_create(zilog->zl_os); 1799 1800 /* 1801 * Since we are not going to create any new dirty data, and we 1802 * can even help with clearing the existing dirty data, we 1803 * should not be subject to the dirty data based delays. We 1804 * use TXG_NOTHROTTLE to bypass the delay mechanism. 1805 */ 1806 VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE)); 1807 1808 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1809 txg = dmu_tx_get_txg(tx); 1810 1811 mutex_enter(&zilog->zl_lwb_io_lock); 1812 lwb->lwb_issued_txg = txg; 1813 zilog->zl_lwb_inflight[txg & TXG_MASK]++; 1814 zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg); 1815 mutex_exit(&zilog->zl_lwb_io_lock); 1816 1817 /* 1818 * Log blocks are pre-allocated. Here we select the size of the next 1819 * block, based on size used in the last block. 1820 * - first find the smallest bucket that will fit the block from a 1821 * limited set of block sizes. This is because it's faster to write 1822 * blocks allocated from the same metaslab as they are adjacent or 1823 * close. 1824 * - next find the maximum from the new suggested size and an array of 1825 * previous sizes. This lessens a picket fence effect of wrongly 1826 * guessing the size if we have a stream of say 2k, 64k, 2k, 64k 1827 * requests. 1828 * 1829 * Note we only write what is used, but we can't just allocate 1830 * the maximum block size because we can exhaust the available 1831 * pool log space. 1832 */ 1833 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 1834 for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++) 1835 continue; 1836 zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size); 1837 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 1838 for (i = 0; i < ZIL_PREV_BLKS; i++) 1839 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 1840 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 1841 1842 BP_ZERO(bp); 1843 error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog); 1844 if (slog) { 1845 ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count); 1846 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes, 1847 lwb->lwb_nused); 1848 } else { 1849 ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count); 1850 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes, 1851 lwb->lwb_nused); 1852 } 1853 if (error == 0) { 1854 ASSERT3U(bp->blk_birth, ==, txg); 1855 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 1856 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 1857 1858 /* 1859 * Allocate a new log write block (lwb). 1860 */ 1861 nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE); 1862 } 1863 1864 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1865 /* For Slim ZIL only write what is used. */ 1866 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 1867 ASSERT3U(wsz, <=, lwb->lwb_sz); 1868 zio_shrink(lwb->lwb_write_zio, wsz); 1869 1870 } else { 1871 wsz = lwb->lwb_sz; 1872 } 1873 1874 zilc->zc_pad = 0; 1875 zilc->zc_nused = lwb->lwb_nused; 1876 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1877 1878 /* 1879 * clear unused data for security 1880 */ 1881 memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused); 1882 1883 spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER); 1884 1885 zil_lwb_add_block(lwb, &lwb->lwb_blk); 1886 lwb->lwb_issued_timestamp = gethrtime(); 1887 lwb->lwb_state = LWB_STATE_ISSUED; 1888 1889 zio_nowait(lwb->lwb_root_zio); 1890 zio_nowait(lwb->lwb_write_zio); 1891 1892 dmu_tx_commit(tx); 1893 1894 /* 1895 * If there was an allocation failure then nlwb will be null which 1896 * forces a txg_wait_synced(). 1897 */ 1898 return (nlwb); 1899 } 1900 1901 /* 1902 * Maximum amount of data that can be put into single log block. 1903 */ 1904 uint64_t 1905 zil_max_log_data(zilog_t *zilog, size_t hdrsize) 1906 { 1907 return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize); 1908 } 1909 1910 /* 1911 * Maximum amount of log space we agree to waste to reduce number of 1912 * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%). 1913 */ 1914 static inline uint64_t 1915 zil_max_waste_space(zilog_t *zilog) 1916 { 1917 return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 8); 1918 } 1919 1920 /* 1921 * Maximum amount of write data for WR_COPIED. For correctness, consumers 1922 * must fall back to WR_NEED_COPY if we can't fit the entire record into one 1923 * maximum sized log block, because each WR_COPIED record must fit in a 1924 * single log block. For space efficiency, we want to fit two records into a 1925 * max-sized log block. 1926 */ 1927 uint64_t 1928 zil_max_copied_data(zilog_t *zilog) 1929 { 1930 return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 - 1931 sizeof (lr_write_t)); 1932 } 1933 1934 static lwb_t * 1935 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 1936 { 1937 lr_t *lrcb, *lrc; 1938 lr_write_t *lrwb, *lrw; 1939 char *lr_buf; 1940 uint64_t dlen, dnow, dpad, lwb_sp, reclen, txg, max_log_data; 1941 1942 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1943 ASSERT3P(lwb, !=, NULL); 1944 ASSERT3P(lwb->lwb_buf, !=, NULL); 1945 1946 zil_lwb_write_open(zilog, lwb); 1947 1948 lrc = &itx->itx_lr; 1949 lrw = (lr_write_t *)lrc; 1950 1951 /* 1952 * A commit itx doesn't represent any on-disk state; instead 1953 * it's simply used as a place holder on the commit list, and 1954 * provides a mechanism for attaching a "commit waiter" onto the 1955 * correct lwb (such that the waiter can be signalled upon 1956 * completion of that lwb). Thus, we don't process this itx's 1957 * log record if it's a commit itx (these itx's don't have log 1958 * records), and instead link the itx's waiter onto the lwb's 1959 * list of waiters. 1960 * 1961 * For more details, see the comment above zil_commit(). 1962 */ 1963 if (lrc->lrc_txtype == TX_COMMIT) { 1964 mutex_enter(&zilog->zl_lock); 1965 zil_commit_waiter_link_lwb(itx->itx_private, lwb); 1966 itx->itx_private = NULL; 1967 mutex_exit(&zilog->zl_lock); 1968 return (lwb); 1969 } 1970 1971 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { 1972 dlen = P2ROUNDUP_TYPED( 1973 lrw->lr_length, sizeof (uint64_t), uint64_t); 1974 dpad = dlen - lrw->lr_length; 1975 } else { 1976 dlen = dpad = 0; 1977 } 1978 reclen = lrc->lrc_reclen; 1979 zilog->zl_cur_used += (reclen + dlen); 1980 txg = lrc->lrc_txg; 1981 1982 ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen)); 1983 1984 cont: 1985 /* 1986 * If this record won't fit in the current log block, start a new one. 1987 * For WR_NEED_COPY optimize layout for minimal number of chunks. 1988 */ 1989 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 1990 max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t)); 1991 if (reclen > lwb_sp || (reclen + dlen > lwb_sp && 1992 lwb_sp < zil_max_waste_space(zilog) && 1993 (dlen % max_log_data == 0 || 1994 lwb_sp < reclen + dlen % max_log_data))) { 1995 lwb = zil_lwb_write_issue(zilog, lwb); 1996 if (lwb == NULL) 1997 return (NULL); 1998 zil_lwb_write_open(zilog, lwb); 1999 ASSERT(LWB_EMPTY(lwb)); 2000 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 2001 2002 /* 2003 * There must be enough space in the new, empty log block to 2004 * hold reclen. For WR_COPIED, we need to fit the whole 2005 * record in one block, and reclen is the header size + the 2006 * data size. For WR_NEED_COPY, we can create multiple 2007 * records, splitting the data into multiple blocks, so we 2008 * only need to fit one word of data per block; in this case 2009 * reclen is just the header size (no data). 2010 */ 2011 ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); 2012 } 2013 2014 dnow = MIN(dlen, lwb_sp - reclen); 2015 lr_buf = lwb->lwb_buf + lwb->lwb_nused; 2016 memcpy(lr_buf, lrc, reclen); 2017 lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */ 2018 lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */ 2019 2020 ZIL_STAT_BUMP(zilog, zil_itx_count); 2021 2022 /* 2023 * If it's a write, fetch the data or get its blkptr as appropriate. 2024 */ 2025 if (lrc->lrc_txtype == TX_WRITE) { 2026 if (txg > spa_freeze_txg(zilog->zl_spa)) 2027 txg_wait_synced(zilog->zl_dmu_pool, txg); 2028 if (itx->itx_wr_state == WR_COPIED) { 2029 ZIL_STAT_BUMP(zilog, zil_itx_copied_count); 2030 ZIL_STAT_INCR(zilog, zil_itx_copied_bytes, 2031 lrw->lr_length); 2032 } else { 2033 char *dbuf; 2034 int error; 2035 2036 if (itx->itx_wr_state == WR_NEED_COPY) { 2037 dbuf = lr_buf + reclen; 2038 lrcb->lrc_reclen += dnow; 2039 if (lrwb->lr_length > dnow) 2040 lrwb->lr_length = dnow; 2041 lrw->lr_offset += dnow; 2042 lrw->lr_length -= dnow; 2043 ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count); 2044 ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes, 2045 dnow); 2046 } else { 2047 ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT); 2048 dbuf = NULL; 2049 ZIL_STAT_BUMP(zilog, zil_itx_indirect_count); 2050 ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes, 2051 lrw->lr_length); 2052 } 2053 2054 /* 2055 * We pass in the "lwb_write_zio" rather than 2056 * "lwb_root_zio" so that the "lwb_write_zio" 2057 * becomes the parent of any zio's created by 2058 * the "zl_get_data" callback. The vdevs are 2059 * flushed after the "lwb_write_zio" completes, 2060 * so we want to make sure that completion 2061 * callback waits for these additional zio's, 2062 * such that the vdevs used by those zio's will 2063 * be included in the lwb's vdev tree, and those 2064 * vdevs will be properly flushed. If we passed 2065 * in "lwb_root_zio" here, then these additional 2066 * vdevs may not be flushed; e.g. if these zio's 2067 * completed after "lwb_write_zio" completed. 2068 */ 2069 error = zilog->zl_get_data(itx->itx_private, 2070 itx->itx_gen, lrwb, dbuf, lwb, 2071 lwb->lwb_write_zio); 2072 if (dbuf != NULL && error == 0 && dnow == dlen) 2073 /* Zero any padding bytes in the last block. */ 2074 memset((char *)dbuf + lrwb->lr_length, 0, dpad); 2075 2076 /* 2077 * Typically, the only return values we should see from 2078 * ->zl_get_data() are 0, EIO, ENOENT, EEXIST or 2079 * EALREADY. However, it is also possible to see other 2080 * error values such as ENOSPC or EINVAL from 2081 * dmu_read() -> dnode_hold() -> dnode_hold_impl() or 2082 * ENXIO as well as a multitude of others from the 2083 * block layer through dmu_buf_hold() -> dbuf_read() 2084 * -> zio_wait(), as well as through dmu_read() -> 2085 * dnode_hold() -> dnode_hold_impl() -> dbuf_read() -> 2086 * zio_wait(). When these errors happen, we can assume 2087 * that neither an immediate write nor an indirect 2088 * write occurred, so we need to fall back to 2089 * txg_wait_synced(). This is unusual, so we print to 2090 * dmesg whenever one of these errors occurs. 2091 */ 2092 switch (error) { 2093 case 0: 2094 break; 2095 default: 2096 cmn_err(CE_WARN, "zil_lwb_commit() received " 2097 "unexpected error %d from ->zl_get_data()" 2098 ". Falling back to txg_wait_synced().", 2099 error); 2100 zfs_fallthrough; 2101 case EIO: 2102 txg_wait_synced(zilog->zl_dmu_pool, txg); 2103 zfs_fallthrough; 2104 case ENOENT: 2105 zfs_fallthrough; 2106 case EEXIST: 2107 zfs_fallthrough; 2108 case EALREADY: 2109 return (lwb); 2110 } 2111 } 2112 } 2113 2114 /* 2115 * We're actually making an entry, so update lrc_seq to be the 2116 * log record sequence number. Note that this is generally not 2117 * equal to the itx sequence number because not all transactions 2118 * are synchronous, and sometimes spa_sync() gets there first. 2119 */ 2120 lrcb->lrc_seq = ++zilog->zl_lr_seq; 2121 lwb->lwb_nused += reclen + dnow; 2122 2123 zil_lwb_add_txg(lwb, txg); 2124 2125 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 2126 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 2127 2128 dlen -= dnow; 2129 if (dlen > 0) { 2130 zilog->zl_cur_used += reclen; 2131 goto cont; 2132 } 2133 2134 return (lwb); 2135 } 2136 2137 itx_t * 2138 zil_itx_create(uint64_t txtype, size_t olrsize) 2139 { 2140 size_t itxsize, lrsize; 2141 itx_t *itx; 2142 2143 lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t); 2144 itxsize = offsetof(itx_t, itx_lr) + lrsize; 2145 2146 itx = zio_data_buf_alloc(itxsize); 2147 itx->itx_lr.lrc_txtype = txtype; 2148 itx->itx_lr.lrc_reclen = lrsize; 2149 itx->itx_lr.lrc_seq = 0; /* defensive */ 2150 memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize); 2151 itx->itx_sync = B_TRUE; /* default is synchronous */ 2152 itx->itx_callback = NULL; 2153 itx->itx_callback_data = NULL; 2154 itx->itx_size = itxsize; 2155 2156 return (itx); 2157 } 2158 2159 void 2160 zil_itx_destroy(itx_t *itx) 2161 { 2162 IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL); 2163 IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 2164 2165 if (itx->itx_callback != NULL) 2166 itx->itx_callback(itx->itx_callback_data); 2167 2168 zio_data_buf_free(itx, itx->itx_size); 2169 } 2170 2171 /* 2172 * Free up the sync and async itxs. The itxs_t has already been detached 2173 * so no locks are needed. 2174 */ 2175 static void 2176 zil_itxg_clean(void *arg) 2177 { 2178 itx_t *itx; 2179 list_t *list; 2180 avl_tree_t *t; 2181 void *cookie; 2182 itxs_t *itxs = arg; 2183 itx_async_node_t *ian; 2184 2185 list = &itxs->i_sync_list; 2186 while ((itx = list_head(list)) != NULL) { 2187 /* 2188 * In the general case, commit itxs will not be found 2189 * here, as they'll be committed to an lwb via 2190 * zil_lwb_commit(), and free'd in that function. Having 2191 * said that, it is still possible for commit itxs to be 2192 * found here, due to the following race: 2193 * 2194 * - a thread calls zil_commit() which assigns the 2195 * commit itx to a per-txg i_sync_list 2196 * - zil_itxg_clean() is called (e.g. via spa_sync()) 2197 * while the waiter is still on the i_sync_list 2198 * 2199 * There's nothing to prevent syncing the txg while the 2200 * waiter is on the i_sync_list. This normally doesn't 2201 * happen because spa_sync() is slower than zil_commit(), 2202 * but if zil_commit() calls txg_wait_synced() (e.g. 2203 * because zil_create() or zil_commit_writer_stall() is 2204 * called) we will hit this case. 2205 */ 2206 if (itx->itx_lr.lrc_txtype == TX_COMMIT) 2207 zil_commit_waiter_skip(itx->itx_private); 2208 2209 list_remove(list, itx); 2210 zil_itx_destroy(itx); 2211 } 2212 2213 cookie = NULL; 2214 t = &itxs->i_async_tree; 2215 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 2216 list = &ian->ia_list; 2217 while ((itx = list_head(list)) != NULL) { 2218 list_remove(list, itx); 2219 /* commit itxs should never be on the async lists. */ 2220 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 2221 zil_itx_destroy(itx); 2222 } 2223 list_destroy(list); 2224 kmem_free(ian, sizeof (itx_async_node_t)); 2225 } 2226 avl_destroy(t); 2227 2228 kmem_free(itxs, sizeof (itxs_t)); 2229 } 2230 2231 static int 2232 zil_aitx_compare(const void *x1, const void *x2) 2233 { 2234 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 2235 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 2236 2237 return (TREE_CMP(o1, o2)); 2238 } 2239 2240 /* 2241 * Remove all async itx with the given oid. 2242 */ 2243 void 2244 zil_remove_async(zilog_t *zilog, uint64_t oid) 2245 { 2246 uint64_t otxg, txg; 2247 itx_async_node_t *ian; 2248 avl_tree_t *t; 2249 avl_index_t where; 2250 list_t clean_list; 2251 itx_t *itx; 2252 2253 ASSERT(oid != 0); 2254 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 2255 2256 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2257 otxg = ZILTEST_TXG; 2258 else 2259 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2260 2261 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2262 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2263 2264 mutex_enter(&itxg->itxg_lock); 2265 if (itxg->itxg_txg != txg) { 2266 mutex_exit(&itxg->itxg_lock); 2267 continue; 2268 } 2269 2270 /* 2271 * Locate the object node and append its list. 2272 */ 2273 t = &itxg->itxg_itxs->i_async_tree; 2274 ian = avl_find(t, &oid, &where); 2275 if (ian != NULL) 2276 list_move_tail(&clean_list, &ian->ia_list); 2277 mutex_exit(&itxg->itxg_lock); 2278 } 2279 while ((itx = list_head(&clean_list)) != NULL) { 2280 list_remove(&clean_list, itx); 2281 /* commit itxs should never be on the async lists. */ 2282 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 2283 zil_itx_destroy(itx); 2284 } 2285 list_destroy(&clean_list); 2286 } 2287 2288 void 2289 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 2290 { 2291 uint64_t txg; 2292 itxg_t *itxg; 2293 itxs_t *itxs, *clean = NULL; 2294 2295 /* 2296 * Ensure the data of a renamed file is committed before the rename. 2297 */ 2298 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 2299 zil_async_to_sync(zilog, itx->itx_oid); 2300 2301 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 2302 txg = ZILTEST_TXG; 2303 else 2304 txg = dmu_tx_get_txg(tx); 2305 2306 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2307 mutex_enter(&itxg->itxg_lock); 2308 itxs = itxg->itxg_itxs; 2309 if (itxg->itxg_txg != txg) { 2310 if (itxs != NULL) { 2311 /* 2312 * The zil_clean callback hasn't got around to cleaning 2313 * this itxg. Save the itxs for release below. 2314 * This should be rare. 2315 */ 2316 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " 2317 "txg %llu", (u_longlong_t)itxg->itxg_txg); 2318 clean = itxg->itxg_itxs; 2319 } 2320 itxg->itxg_txg = txg; 2321 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), 2322 KM_SLEEP); 2323 2324 list_create(&itxs->i_sync_list, sizeof (itx_t), 2325 offsetof(itx_t, itx_node)); 2326 avl_create(&itxs->i_async_tree, zil_aitx_compare, 2327 sizeof (itx_async_node_t), 2328 offsetof(itx_async_node_t, ia_node)); 2329 } 2330 if (itx->itx_sync) { 2331 list_insert_tail(&itxs->i_sync_list, itx); 2332 } else { 2333 avl_tree_t *t = &itxs->i_async_tree; 2334 uint64_t foid = 2335 LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid); 2336 itx_async_node_t *ian; 2337 avl_index_t where; 2338 2339 ian = avl_find(t, &foid, &where); 2340 if (ian == NULL) { 2341 ian = kmem_alloc(sizeof (itx_async_node_t), 2342 KM_SLEEP); 2343 list_create(&ian->ia_list, sizeof (itx_t), 2344 offsetof(itx_t, itx_node)); 2345 ian->ia_foid = foid; 2346 avl_insert(t, ian, where); 2347 } 2348 list_insert_tail(&ian->ia_list, itx); 2349 } 2350 2351 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 2352 2353 /* 2354 * We don't want to dirty the ZIL using ZILTEST_TXG, because 2355 * zil_clean() will never be called using ZILTEST_TXG. Thus, we 2356 * need to be careful to always dirty the ZIL using the "real" 2357 * TXG (not itxg_txg) even when the SPA is frozen. 2358 */ 2359 zilog_dirty(zilog, dmu_tx_get_txg(tx)); 2360 mutex_exit(&itxg->itxg_lock); 2361 2362 /* Release the old itxs now we've dropped the lock */ 2363 if (clean != NULL) 2364 zil_itxg_clean(clean); 2365 } 2366 2367 /* 2368 * If there are any in-memory intent log transactions which have now been 2369 * synced then start up a taskq to free them. We should only do this after we 2370 * have written out the uberblocks (i.e. txg has been committed) so that 2371 * don't inadvertently clean out in-memory log records that would be required 2372 * by zil_commit(). 2373 */ 2374 void 2375 zil_clean(zilog_t *zilog, uint64_t synced_txg) 2376 { 2377 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 2378 itxs_t *clean_me; 2379 2380 ASSERT3U(synced_txg, <, ZILTEST_TXG); 2381 2382 mutex_enter(&itxg->itxg_lock); 2383 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 2384 mutex_exit(&itxg->itxg_lock); 2385 return; 2386 } 2387 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 2388 ASSERT3U(itxg->itxg_txg, !=, 0); 2389 clean_me = itxg->itxg_itxs; 2390 itxg->itxg_itxs = NULL; 2391 itxg->itxg_txg = 0; 2392 mutex_exit(&itxg->itxg_lock); 2393 /* 2394 * Preferably start a task queue to free up the old itxs but 2395 * if taskq_dispatch can't allocate resources to do that then 2396 * free it in-line. This should be rare. Note, using TQ_SLEEP 2397 * created a bad performance problem. 2398 */ 2399 ASSERT3P(zilog->zl_dmu_pool, !=, NULL); 2400 ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); 2401 taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, 2402 zil_itxg_clean, clean_me, TQ_NOSLEEP); 2403 if (id == TASKQID_INVALID) 2404 zil_itxg_clean(clean_me); 2405 } 2406 2407 /* 2408 * This function will traverse the queue of itxs that need to be 2409 * committed, and move them onto the ZIL's zl_itx_commit_list. 2410 */ 2411 static void 2412 zil_get_commit_list(zilog_t *zilog) 2413 { 2414 uint64_t otxg, txg; 2415 list_t *commit_list = &zilog->zl_itx_commit_list; 2416 2417 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2418 2419 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2420 otxg = ZILTEST_TXG; 2421 else 2422 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2423 2424 /* 2425 * This is inherently racy, since there is nothing to prevent 2426 * the last synced txg from changing. That's okay since we'll 2427 * only commit things in the future. 2428 */ 2429 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2430 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2431 2432 mutex_enter(&itxg->itxg_lock); 2433 if (itxg->itxg_txg != txg) { 2434 mutex_exit(&itxg->itxg_lock); 2435 continue; 2436 } 2437 2438 /* 2439 * If we're adding itx records to the zl_itx_commit_list, 2440 * then the zil better be dirty in this "txg". We can assert 2441 * that here since we're holding the itxg_lock which will 2442 * prevent spa_sync from cleaning it. Once we add the itxs 2443 * to the zl_itx_commit_list we must commit it to disk even 2444 * if it's unnecessary (i.e. the txg was synced). 2445 */ 2446 ASSERT(zilog_is_dirty_in_txg(zilog, txg) || 2447 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 2448 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 2449 2450 mutex_exit(&itxg->itxg_lock); 2451 } 2452 } 2453 2454 /* 2455 * Move the async itxs for a specified object to commit into sync lists. 2456 */ 2457 void 2458 zil_async_to_sync(zilog_t *zilog, uint64_t foid) 2459 { 2460 uint64_t otxg, txg; 2461 itx_async_node_t *ian; 2462 avl_tree_t *t; 2463 avl_index_t where; 2464 2465 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2466 otxg = ZILTEST_TXG; 2467 else 2468 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2469 2470 /* 2471 * This is inherently racy, since there is nothing to prevent 2472 * the last synced txg from changing. 2473 */ 2474 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2475 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2476 2477 mutex_enter(&itxg->itxg_lock); 2478 if (itxg->itxg_txg != txg) { 2479 mutex_exit(&itxg->itxg_lock); 2480 continue; 2481 } 2482 2483 /* 2484 * If a foid is specified then find that node and append its 2485 * list. Otherwise walk the tree appending all the lists 2486 * to the sync list. We add to the end rather than the 2487 * beginning to ensure the create has happened. 2488 */ 2489 t = &itxg->itxg_itxs->i_async_tree; 2490 if (foid != 0) { 2491 ian = avl_find(t, &foid, &where); 2492 if (ian != NULL) { 2493 list_move_tail(&itxg->itxg_itxs->i_sync_list, 2494 &ian->ia_list); 2495 } 2496 } else { 2497 void *cookie = NULL; 2498 2499 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 2500 list_move_tail(&itxg->itxg_itxs->i_sync_list, 2501 &ian->ia_list); 2502 list_destroy(&ian->ia_list); 2503 kmem_free(ian, sizeof (itx_async_node_t)); 2504 } 2505 } 2506 mutex_exit(&itxg->itxg_lock); 2507 } 2508 } 2509 2510 /* 2511 * This function will prune commit itxs that are at the head of the 2512 * commit list (it won't prune past the first non-commit itx), and 2513 * either: a) attach them to the last lwb that's still pending 2514 * completion, or b) skip them altogether. 2515 * 2516 * This is used as a performance optimization to prevent commit itxs 2517 * from generating new lwbs when it's unnecessary to do so. 2518 */ 2519 static void 2520 zil_prune_commit_list(zilog_t *zilog) 2521 { 2522 itx_t *itx; 2523 2524 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2525 2526 while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { 2527 lr_t *lrc = &itx->itx_lr; 2528 if (lrc->lrc_txtype != TX_COMMIT) 2529 break; 2530 2531 mutex_enter(&zilog->zl_lock); 2532 2533 lwb_t *last_lwb = zilog->zl_last_lwb_opened; 2534 if (last_lwb == NULL || 2535 last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) { 2536 /* 2537 * All of the itxs this waiter was waiting on 2538 * must have already completed (or there were 2539 * never any itx's for it to wait on), so it's 2540 * safe to skip this waiter and mark it done. 2541 */ 2542 zil_commit_waiter_skip(itx->itx_private); 2543 } else { 2544 zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); 2545 itx->itx_private = NULL; 2546 } 2547 2548 mutex_exit(&zilog->zl_lock); 2549 2550 list_remove(&zilog->zl_itx_commit_list, itx); 2551 zil_itx_destroy(itx); 2552 } 2553 2554 IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 2555 } 2556 2557 static void 2558 zil_commit_writer_stall(zilog_t *zilog) 2559 { 2560 /* 2561 * When zio_alloc_zil() fails to allocate the next lwb block on 2562 * disk, we must call txg_wait_synced() to ensure all of the 2563 * lwbs in the zilog's zl_lwb_list are synced and then freed (in 2564 * zil_sync()), such that any subsequent ZIL writer (i.e. a call 2565 * to zil_process_commit_list()) will have to call zil_create(), 2566 * and start a new ZIL chain. 2567 * 2568 * Since zil_alloc_zil() failed, the lwb that was previously 2569 * issued does not have a pointer to the "next" lwb on disk. 2570 * Thus, if another ZIL writer thread was to allocate the "next" 2571 * on-disk lwb, that block could be leaked in the event of a 2572 * crash (because the previous lwb on-disk would not point to 2573 * it). 2574 * 2575 * We must hold the zilog's zl_issuer_lock while we do this, to 2576 * ensure no new threads enter zil_process_commit_list() until 2577 * all lwb's in the zl_lwb_list have been synced and freed 2578 * (which is achieved via the txg_wait_synced() call). 2579 */ 2580 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2581 txg_wait_synced(zilog->zl_dmu_pool, 0); 2582 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 2583 } 2584 2585 /* 2586 * This function will traverse the commit list, creating new lwbs as 2587 * needed, and committing the itxs from the commit list to these newly 2588 * created lwbs. Additionally, as a new lwb is created, the previous 2589 * lwb will be issued to the zio layer to be written to disk. 2590 */ 2591 static void 2592 zil_process_commit_list(zilog_t *zilog) 2593 { 2594 spa_t *spa = zilog->zl_spa; 2595 list_t nolwb_itxs; 2596 list_t nolwb_waiters; 2597 lwb_t *lwb, *plwb; 2598 itx_t *itx; 2599 boolean_t first = B_TRUE; 2600 2601 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2602 2603 /* 2604 * Return if there's nothing to commit before we dirty the fs by 2605 * calling zil_create(). 2606 */ 2607 if (list_head(&zilog->zl_itx_commit_list) == NULL) 2608 return; 2609 2610 list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); 2611 list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), 2612 offsetof(zil_commit_waiter_t, zcw_node)); 2613 2614 lwb = list_tail(&zilog->zl_lwb_list); 2615 if (lwb == NULL) { 2616 lwb = zil_create(zilog); 2617 } else { 2618 /* 2619 * Activate SPA_FEATURE_ZILSAXATTR for the cases where ZIL will 2620 * have already been created (zl_lwb_list not empty). 2621 */ 2622 zil_commit_activate_saxattr_feature(zilog); 2623 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2624 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 2625 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 2626 first = (lwb->lwb_state != LWB_STATE_OPENED) && 2627 ((plwb = list_prev(&zilog->zl_lwb_list, lwb)) == NULL || 2628 plwb->lwb_state == LWB_STATE_FLUSH_DONE); 2629 } 2630 2631 while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { 2632 lr_t *lrc = &itx->itx_lr; 2633 uint64_t txg = lrc->lrc_txg; 2634 2635 ASSERT3U(txg, !=, 0); 2636 2637 if (lrc->lrc_txtype == TX_COMMIT) { 2638 DTRACE_PROBE2(zil__process__commit__itx, 2639 zilog_t *, zilog, itx_t *, itx); 2640 } else { 2641 DTRACE_PROBE2(zil__process__normal__itx, 2642 zilog_t *, zilog, itx_t *, itx); 2643 } 2644 2645 list_remove(&zilog->zl_itx_commit_list, itx); 2646 2647 boolean_t synced = txg <= spa_last_synced_txg(spa); 2648 boolean_t frozen = txg > spa_freeze_txg(spa); 2649 2650 /* 2651 * If the txg of this itx has already been synced out, then 2652 * we don't need to commit this itx to an lwb. This is 2653 * because the data of this itx will have already been 2654 * written to the main pool. This is inherently racy, and 2655 * it's still ok to commit an itx whose txg has already 2656 * been synced; this will result in a write that's 2657 * unnecessary, but will do no harm. 2658 * 2659 * With that said, we always want to commit TX_COMMIT itxs 2660 * to an lwb, regardless of whether or not that itx's txg 2661 * has been synced out. We do this to ensure any OPENED lwb 2662 * will always have at least one zil_commit_waiter_t linked 2663 * to the lwb. 2664 * 2665 * As a counter-example, if we skipped TX_COMMIT itx's 2666 * whose txg had already been synced, the following 2667 * situation could occur if we happened to be racing with 2668 * spa_sync: 2669 * 2670 * 1. We commit a non-TX_COMMIT itx to an lwb, where the 2671 * itx's txg is 10 and the last synced txg is 9. 2672 * 2. spa_sync finishes syncing out txg 10. 2673 * 3. We move to the next itx in the list, it's a TX_COMMIT 2674 * whose txg is 10, so we skip it rather than committing 2675 * it to the lwb used in (1). 2676 * 2677 * If the itx that is skipped in (3) is the last TX_COMMIT 2678 * itx in the commit list, than it's possible for the lwb 2679 * used in (1) to remain in the OPENED state indefinitely. 2680 * 2681 * To prevent the above scenario from occurring, ensuring 2682 * that once an lwb is OPENED it will transition to ISSUED 2683 * and eventually DONE, we always commit TX_COMMIT itx's to 2684 * an lwb here, even if that itx's txg has already been 2685 * synced. 2686 * 2687 * Finally, if the pool is frozen, we _always_ commit the 2688 * itx. The point of freezing the pool is to prevent data 2689 * from being written to the main pool via spa_sync, and 2690 * instead rely solely on the ZIL to persistently store the 2691 * data; i.e. when the pool is frozen, the last synced txg 2692 * value can't be trusted. 2693 */ 2694 if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { 2695 if (lwb != NULL) { 2696 lwb = zil_lwb_commit(zilog, itx, lwb); 2697 2698 if (lwb == NULL) 2699 list_insert_tail(&nolwb_itxs, itx); 2700 else 2701 list_insert_tail(&lwb->lwb_itxs, itx); 2702 } else { 2703 if (lrc->lrc_txtype == TX_COMMIT) { 2704 zil_commit_waiter_link_nolwb( 2705 itx->itx_private, &nolwb_waiters); 2706 } 2707 2708 list_insert_tail(&nolwb_itxs, itx); 2709 } 2710 } else { 2711 ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT); 2712 zil_itx_destroy(itx); 2713 } 2714 } 2715 2716 if (lwb == NULL) { 2717 /* 2718 * This indicates zio_alloc_zil() failed to allocate the 2719 * "next" lwb on-disk. When this happens, we must stall 2720 * the ZIL write pipeline; see the comment within 2721 * zil_commit_writer_stall() for more details. 2722 */ 2723 zil_commit_writer_stall(zilog); 2724 2725 /* 2726 * Additionally, we have to signal and mark the "nolwb" 2727 * waiters as "done" here, since without an lwb, we 2728 * can't do this via zil_lwb_flush_vdevs_done() like 2729 * normal. 2730 */ 2731 zil_commit_waiter_t *zcw; 2732 while ((zcw = list_head(&nolwb_waiters)) != NULL) { 2733 zil_commit_waiter_skip(zcw); 2734 list_remove(&nolwb_waiters, zcw); 2735 } 2736 2737 /* 2738 * And finally, we have to destroy the itx's that 2739 * couldn't be committed to an lwb; this will also call 2740 * the itx's callback if one exists for the itx. 2741 */ 2742 while ((itx = list_head(&nolwb_itxs)) != NULL) { 2743 list_remove(&nolwb_itxs, itx); 2744 zil_itx_destroy(itx); 2745 } 2746 } else { 2747 ASSERT(list_is_empty(&nolwb_waiters)); 2748 ASSERT3P(lwb, !=, NULL); 2749 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2750 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 2751 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 2752 2753 /* 2754 * At this point, the ZIL block pointed at by the "lwb" 2755 * variable is in one of the following states: "closed" 2756 * or "open". 2757 * 2758 * If it's "closed", then no itxs have been committed to 2759 * it, so there's no point in issuing its zio (i.e. it's 2760 * "empty"). 2761 * 2762 * If it's "open", then it contains one or more itxs that 2763 * eventually need to be committed to stable storage. In 2764 * this case we intentionally do not issue the lwb's zio 2765 * to disk yet, and instead rely on one of the following 2766 * two mechanisms for issuing the zio: 2767 * 2768 * 1. Ideally, there will be more ZIL activity occurring 2769 * on the system, such that this function will be 2770 * immediately called again (not necessarily by the same 2771 * thread) and this lwb's zio will be issued via 2772 * zil_lwb_commit(). This way, the lwb is guaranteed to 2773 * be "full" when it is issued to disk, and we'll make 2774 * use of the lwb's size the best we can. 2775 * 2776 * 2. If there isn't sufficient ZIL activity occurring on 2777 * the system, such that this lwb's zio isn't issued via 2778 * zil_lwb_commit(), zil_commit_waiter() will issue the 2779 * lwb's zio. If this occurs, the lwb is not guaranteed 2780 * to be "full" by the time its zio is issued, and means 2781 * the size of the lwb was "too large" given the amount 2782 * of ZIL activity occurring on the system at that time. 2783 * 2784 * We do this for a couple of reasons: 2785 * 2786 * 1. To try and reduce the number of IOPs needed to 2787 * write the same number of itxs. If an lwb has space 2788 * available in its buffer for more itxs, and more itxs 2789 * will be committed relatively soon (relative to the 2790 * latency of performing a write), then it's beneficial 2791 * to wait for these "next" itxs. This way, more itxs 2792 * can be committed to stable storage with fewer writes. 2793 * 2794 * 2. To try and use the largest lwb block size that the 2795 * incoming rate of itxs can support. Again, this is to 2796 * try and pack as many itxs into as few lwbs as 2797 * possible, without significantly impacting the latency 2798 * of each individual itx. 2799 * 2800 * If we had no already running or open LWBs, it can be 2801 * the workload is single-threaded. And if the ZIL write 2802 * latency is very small or if the LWB is almost full, it 2803 * may be cheaper to bypass the delay. 2804 */ 2805 if (lwb->lwb_state == LWB_STATE_OPENED && first) { 2806 hrtime_t sleep = zilog->zl_last_lwb_latency * 2807 zfs_commit_timeout_pct / 100; 2808 if (sleep < zil_min_commit_timeout || 2809 lwb->lwb_sz - lwb->lwb_nused < lwb->lwb_sz / 8) { 2810 lwb = zil_lwb_write_issue(zilog, lwb); 2811 zilog->zl_cur_used = 0; 2812 if (lwb == NULL) 2813 zil_commit_writer_stall(zilog); 2814 } 2815 } 2816 } 2817 } 2818 2819 /* 2820 * This function is responsible for ensuring the passed in commit waiter 2821 * (and associated commit itx) is committed to an lwb. If the waiter is 2822 * not already committed to an lwb, all itxs in the zilog's queue of 2823 * itxs will be processed. The assumption is the passed in waiter's 2824 * commit itx will found in the queue just like the other non-commit 2825 * itxs, such that when the entire queue is processed, the waiter will 2826 * have been committed to an lwb. 2827 * 2828 * The lwb associated with the passed in waiter is not guaranteed to 2829 * have been issued by the time this function completes. If the lwb is 2830 * not issued, we rely on future calls to zil_commit_writer() to issue 2831 * the lwb, or the timeout mechanism found in zil_commit_waiter(). 2832 */ 2833 static void 2834 zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) 2835 { 2836 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 2837 ASSERT(spa_writeable(zilog->zl_spa)); 2838 2839 mutex_enter(&zilog->zl_issuer_lock); 2840 2841 if (zcw->zcw_lwb != NULL || zcw->zcw_done) { 2842 /* 2843 * It's possible that, while we were waiting to acquire 2844 * the "zl_issuer_lock", another thread committed this 2845 * waiter to an lwb. If that occurs, we bail out early, 2846 * without processing any of the zilog's queue of itxs. 2847 * 2848 * On certain workloads and system configurations, the 2849 * "zl_issuer_lock" can become highly contended. In an 2850 * attempt to reduce this contention, we immediately drop 2851 * the lock if the waiter has already been processed. 2852 * 2853 * We've measured this optimization to reduce CPU spent 2854 * contending on this lock by up to 5%, using a system 2855 * with 32 CPUs, low latency storage (~50 usec writes), 2856 * and 1024 threads performing sync writes. 2857 */ 2858 goto out; 2859 } 2860 2861 ZIL_STAT_BUMP(zilog, zil_commit_writer_count); 2862 2863 zil_get_commit_list(zilog); 2864 zil_prune_commit_list(zilog); 2865 zil_process_commit_list(zilog); 2866 2867 out: 2868 mutex_exit(&zilog->zl_issuer_lock); 2869 } 2870 2871 static void 2872 zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) 2873 { 2874 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 2875 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2876 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 2877 2878 lwb_t *lwb = zcw->zcw_lwb; 2879 ASSERT3P(lwb, !=, NULL); 2880 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED); 2881 2882 /* 2883 * If the lwb has already been issued by another thread, we can 2884 * immediately return since there's no work to be done (the 2885 * point of this function is to issue the lwb). Additionally, we 2886 * do this prior to acquiring the zl_issuer_lock, to avoid 2887 * acquiring it when it's not necessary to do so. 2888 */ 2889 if (lwb->lwb_state == LWB_STATE_ISSUED || 2890 lwb->lwb_state == LWB_STATE_WRITE_DONE || 2891 lwb->lwb_state == LWB_STATE_FLUSH_DONE) 2892 return; 2893 2894 /* 2895 * In order to call zil_lwb_write_issue() we must hold the 2896 * zilog's "zl_issuer_lock". We can't simply acquire that lock, 2897 * since we're already holding the commit waiter's "zcw_lock", 2898 * and those two locks are acquired in the opposite order 2899 * elsewhere. 2900 */ 2901 mutex_exit(&zcw->zcw_lock); 2902 mutex_enter(&zilog->zl_issuer_lock); 2903 mutex_enter(&zcw->zcw_lock); 2904 2905 /* 2906 * Since we just dropped and re-acquired the commit waiter's 2907 * lock, we have to re-check to see if the waiter was marked 2908 * "done" during that process. If the waiter was marked "done", 2909 * the "lwb" pointer is no longer valid (it can be free'd after 2910 * the waiter is marked "done"), so without this check we could 2911 * wind up with a use-after-free error below. 2912 */ 2913 if (zcw->zcw_done) 2914 goto out; 2915 2916 ASSERT3P(lwb, ==, zcw->zcw_lwb); 2917 2918 /* 2919 * We've already checked this above, but since we hadn't acquired 2920 * the zilog's zl_issuer_lock, we have to perform this check a 2921 * second time while holding the lock. 2922 * 2923 * We don't need to hold the zl_lock since the lwb cannot transition 2924 * from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb 2925 * _can_ transition from ISSUED to DONE, but it's OK to race with 2926 * that transition since we treat the lwb the same, whether it's in 2927 * the ISSUED or DONE states. 2928 * 2929 * The important thing, is we treat the lwb differently depending on 2930 * if it's ISSUED or OPENED, and block any other threads that might 2931 * attempt to issue this lwb. For that reason we hold the 2932 * zl_issuer_lock when checking the lwb_state; we must not call 2933 * zil_lwb_write_issue() if the lwb had already been issued. 2934 * 2935 * See the comment above the lwb_state_t structure definition for 2936 * more details on the lwb states, and locking requirements. 2937 */ 2938 if (lwb->lwb_state == LWB_STATE_ISSUED || 2939 lwb->lwb_state == LWB_STATE_WRITE_DONE || 2940 lwb->lwb_state == LWB_STATE_FLUSH_DONE) 2941 goto out; 2942 2943 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 2944 2945 /* 2946 * As described in the comments above zil_commit_waiter() and 2947 * zil_process_commit_list(), we need to issue this lwb's zio 2948 * since we've reached the commit waiter's timeout and it still 2949 * hasn't been issued. 2950 */ 2951 lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb); 2952 2953 IMPLY(nlwb != NULL, lwb->lwb_state != LWB_STATE_OPENED); 2954 2955 /* 2956 * Since the lwb's zio hadn't been issued by the time this thread 2957 * reached its timeout, we reset the zilog's "zl_cur_used" field 2958 * to influence the zil block size selection algorithm. 2959 * 2960 * By having to issue the lwb's zio here, it means the size of the 2961 * lwb was too large, given the incoming throughput of itxs. By 2962 * setting "zl_cur_used" to zero, we communicate this fact to the 2963 * block size selection algorithm, so it can take this information 2964 * into account, and potentially select a smaller size for the 2965 * next lwb block that is allocated. 2966 */ 2967 zilog->zl_cur_used = 0; 2968 2969 if (nlwb == NULL) { 2970 /* 2971 * When zil_lwb_write_issue() returns NULL, this 2972 * indicates zio_alloc_zil() failed to allocate the 2973 * "next" lwb on-disk. When this occurs, the ZIL write 2974 * pipeline must be stalled; see the comment within the 2975 * zil_commit_writer_stall() function for more details. 2976 * 2977 * We must drop the commit waiter's lock prior to 2978 * calling zil_commit_writer_stall() or else we can wind 2979 * up with the following deadlock: 2980 * 2981 * - This thread is waiting for the txg to sync while 2982 * holding the waiter's lock; txg_wait_synced() is 2983 * used within txg_commit_writer_stall(). 2984 * 2985 * - The txg can't sync because it is waiting for this 2986 * lwb's zio callback to call dmu_tx_commit(). 2987 * 2988 * - The lwb's zio callback can't call dmu_tx_commit() 2989 * because it's blocked trying to acquire the waiter's 2990 * lock, which occurs prior to calling dmu_tx_commit() 2991 */ 2992 mutex_exit(&zcw->zcw_lock); 2993 zil_commit_writer_stall(zilog); 2994 mutex_enter(&zcw->zcw_lock); 2995 } 2996 2997 out: 2998 mutex_exit(&zilog->zl_issuer_lock); 2999 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 3000 } 3001 3002 /* 3003 * This function is responsible for performing the following two tasks: 3004 * 3005 * 1. its primary responsibility is to block until the given "commit 3006 * waiter" is considered "done". 3007 * 3008 * 2. its secondary responsibility is to issue the zio for the lwb that 3009 * the given "commit waiter" is waiting on, if this function has 3010 * waited "long enough" and the lwb is still in the "open" state. 3011 * 3012 * Given a sufficient amount of itxs being generated and written using 3013 * the ZIL, the lwb's zio will be issued via the zil_lwb_commit() 3014 * function. If this does not occur, this secondary responsibility will 3015 * ensure the lwb is issued even if there is not other synchronous 3016 * activity on the system. 3017 * 3018 * For more details, see zil_process_commit_list(); more specifically, 3019 * the comment at the bottom of that function. 3020 */ 3021 static void 3022 zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) 3023 { 3024 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 3025 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 3026 ASSERT(spa_writeable(zilog->zl_spa)); 3027 3028 mutex_enter(&zcw->zcw_lock); 3029 3030 /* 3031 * The timeout is scaled based on the lwb latency to avoid 3032 * significantly impacting the latency of each individual itx. 3033 * For more details, see the comment at the bottom of the 3034 * zil_process_commit_list() function. 3035 */ 3036 int pct = MAX(zfs_commit_timeout_pct, 1); 3037 hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; 3038 hrtime_t wakeup = gethrtime() + sleep; 3039 boolean_t timedout = B_FALSE; 3040 3041 while (!zcw->zcw_done) { 3042 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 3043 3044 lwb_t *lwb = zcw->zcw_lwb; 3045 3046 /* 3047 * Usually, the waiter will have a non-NULL lwb field here, 3048 * but it's possible for it to be NULL as a result of 3049 * zil_commit() racing with spa_sync(). 3050 * 3051 * When zil_clean() is called, it's possible for the itxg 3052 * list (which may be cleaned via a taskq) to contain 3053 * commit itxs. When this occurs, the commit waiters linked 3054 * off of these commit itxs will not be committed to an 3055 * lwb. Additionally, these commit waiters will not be 3056 * marked done until zil_commit_waiter_skip() is called via 3057 * zil_itxg_clean(). 3058 * 3059 * Thus, it's possible for this commit waiter (i.e. the 3060 * "zcw" variable) to be found in this "in between" state; 3061 * where it's "zcw_lwb" field is NULL, and it hasn't yet 3062 * been skipped, so it's "zcw_done" field is still B_FALSE. 3063 */ 3064 IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED); 3065 3066 if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { 3067 ASSERT3B(timedout, ==, B_FALSE); 3068 3069 /* 3070 * If the lwb hasn't been issued yet, then we 3071 * need to wait with a timeout, in case this 3072 * function needs to issue the lwb after the 3073 * timeout is reached; responsibility (2) from 3074 * the comment above this function. 3075 */ 3076 int rc = cv_timedwait_hires(&zcw->zcw_cv, 3077 &zcw->zcw_lock, wakeup, USEC2NSEC(1), 3078 CALLOUT_FLAG_ABSOLUTE); 3079 3080 if (rc != -1 || zcw->zcw_done) 3081 continue; 3082 3083 timedout = B_TRUE; 3084 zil_commit_waiter_timeout(zilog, zcw); 3085 3086 if (!zcw->zcw_done) { 3087 /* 3088 * If the commit waiter has already been 3089 * marked "done", it's possible for the 3090 * waiter's lwb structure to have already 3091 * been freed. Thus, we can only reliably 3092 * make these assertions if the waiter 3093 * isn't done. 3094 */ 3095 ASSERT3P(lwb, ==, zcw->zcw_lwb); 3096 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); 3097 } 3098 } else { 3099 /* 3100 * If the lwb isn't open, then it must have already 3101 * been issued. In that case, there's no need to 3102 * use a timeout when waiting for the lwb to 3103 * complete. 3104 * 3105 * Additionally, if the lwb is NULL, the waiter 3106 * will soon be signaled and marked done via 3107 * zil_clean() and zil_itxg_clean(), so no timeout 3108 * is required. 3109 */ 3110 3111 IMPLY(lwb != NULL, 3112 lwb->lwb_state == LWB_STATE_ISSUED || 3113 lwb->lwb_state == LWB_STATE_WRITE_DONE || 3114 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 3115 cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); 3116 } 3117 } 3118 3119 mutex_exit(&zcw->zcw_lock); 3120 } 3121 3122 static zil_commit_waiter_t * 3123 zil_alloc_commit_waiter(void) 3124 { 3125 zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); 3126 3127 cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); 3128 mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); 3129 list_link_init(&zcw->zcw_node); 3130 zcw->zcw_lwb = NULL; 3131 zcw->zcw_done = B_FALSE; 3132 zcw->zcw_zio_error = 0; 3133 3134 return (zcw); 3135 } 3136 3137 static void 3138 zil_free_commit_waiter(zil_commit_waiter_t *zcw) 3139 { 3140 ASSERT(!list_link_active(&zcw->zcw_node)); 3141 ASSERT3P(zcw->zcw_lwb, ==, NULL); 3142 ASSERT3B(zcw->zcw_done, ==, B_TRUE); 3143 mutex_destroy(&zcw->zcw_lock); 3144 cv_destroy(&zcw->zcw_cv); 3145 kmem_cache_free(zil_zcw_cache, zcw); 3146 } 3147 3148 /* 3149 * This function is used to create a TX_COMMIT itx and assign it. This 3150 * way, it will be linked into the ZIL's list of synchronous itxs, and 3151 * then later committed to an lwb (or skipped) when 3152 * zil_process_commit_list() is called. 3153 */ 3154 static void 3155 zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) 3156 { 3157 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 3158 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 3159 3160 itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); 3161 itx->itx_sync = B_TRUE; 3162 itx->itx_private = zcw; 3163 3164 zil_itx_assign(zilog, itx, tx); 3165 3166 dmu_tx_commit(tx); 3167 } 3168 3169 /* 3170 * Commit ZFS Intent Log transactions (itxs) to stable storage. 3171 * 3172 * When writing ZIL transactions to the on-disk representation of the 3173 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple 3174 * itxs can be committed to a single lwb. Once a lwb is written and 3175 * committed to stable storage (i.e. the lwb is written, and vdevs have 3176 * been flushed), each itx that was committed to that lwb is also 3177 * considered to be committed to stable storage. 3178 * 3179 * When an itx is committed to an lwb, the log record (lr_t) contained 3180 * by the itx is copied into the lwb's zio buffer, and once this buffer 3181 * is written to disk, it becomes an on-disk ZIL block. 3182 * 3183 * As itxs are generated, they're inserted into the ZIL's queue of 3184 * uncommitted itxs. The semantics of zil_commit() are such that it will 3185 * block until all itxs that were in the queue when it was called, are 3186 * committed to stable storage. 3187 * 3188 * If "foid" is zero, this means all "synchronous" and "asynchronous" 3189 * itxs, for all objects in the dataset, will be committed to stable 3190 * storage prior to zil_commit() returning. If "foid" is non-zero, all 3191 * "synchronous" itxs for all objects, but only "asynchronous" itxs 3192 * that correspond to the foid passed in, will be committed to stable 3193 * storage prior to zil_commit() returning. 3194 * 3195 * Generally speaking, when zil_commit() is called, the consumer doesn't 3196 * actually care about _all_ of the uncommitted itxs. Instead, they're 3197 * simply trying to waiting for a specific itx to be committed to disk, 3198 * but the interface(s) for interacting with the ZIL don't allow such 3199 * fine-grained communication. A better interface would allow a consumer 3200 * to create and assign an itx, and then pass a reference to this itx to 3201 * zil_commit(); such that zil_commit() would return as soon as that 3202 * specific itx was committed to disk (instead of waiting for _all_ 3203 * itxs to be committed). 3204 * 3205 * When a thread calls zil_commit() a special "commit itx" will be 3206 * generated, along with a corresponding "waiter" for this commit itx. 3207 * zil_commit() will wait on this waiter's CV, such that when the waiter 3208 * is marked done, and signaled, zil_commit() will return. 3209 * 3210 * This commit itx is inserted into the queue of uncommitted itxs. This 3211 * provides an easy mechanism for determining which itxs were in the 3212 * queue prior to zil_commit() having been called, and which itxs were 3213 * added after zil_commit() was called. 3214 * 3215 * The commit itx is special; it doesn't have any on-disk representation. 3216 * When a commit itx is "committed" to an lwb, the waiter associated 3217 * with it is linked onto the lwb's list of waiters. Then, when that lwb 3218 * completes, each waiter on the lwb's list is marked done and signaled 3219 * -- allowing the thread waiting on the waiter to return from zil_commit(). 3220 * 3221 * It's important to point out a few critical factors that allow us 3222 * to make use of the commit itxs, commit waiters, per-lwb lists of 3223 * commit waiters, and zio completion callbacks like we're doing: 3224 * 3225 * 1. The list of waiters for each lwb is traversed, and each commit 3226 * waiter is marked "done" and signaled, in the zio completion 3227 * callback of the lwb's zio[*]. 3228 * 3229 * * Actually, the waiters are signaled in the zio completion 3230 * callback of the root zio for the DKIOCFLUSHWRITECACHE commands 3231 * that are sent to the vdevs upon completion of the lwb zio. 3232 * 3233 * 2. When the itxs are inserted into the ZIL's queue of uncommitted 3234 * itxs, the order in which they are inserted is preserved[*]; as 3235 * itxs are added to the queue, they are added to the tail of 3236 * in-memory linked lists. 3237 * 3238 * When committing the itxs to lwbs (to be written to disk), they 3239 * are committed in the same order in which the itxs were added to 3240 * the uncommitted queue's linked list(s); i.e. the linked list of 3241 * itxs to commit is traversed from head to tail, and each itx is 3242 * committed to an lwb in that order. 3243 * 3244 * * To clarify: 3245 * 3246 * - the order of "sync" itxs is preserved w.r.t. other 3247 * "sync" itxs, regardless of the corresponding objects. 3248 * - the order of "async" itxs is preserved w.r.t. other 3249 * "async" itxs corresponding to the same object. 3250 * - the order of "async" itxs is *not* preserved w.r.t. other 3251 * "async" itxs corresponding to different objects. 3252 * - the order of "sync" itxs w.r.t. "async" itxs (or vice 3253 * versa) is *not* preserved, even for itxs that correspond 3254 * to the same object. 3255 * 3256 * For more details, see: zil_itx_assign(), zil_async_to_sync(), 3257 * zil_get_commit_list(), and zil_process_commit_list(). 3258 * 3259 * 3. The lwbs represent a linked list of blocks on disk. Thus, any 3260 * lwb cannot be considered committed to stable storage, until its 3261 * "previous" lwb is also committed to stable storage. This fact, 3262 * coupled with the fact described above, means that itxs are 3263 * committed in (roughly) the order in which they were generated. 3264 * This is essential because itxs are dependent on prior itxs. 3265 * Thus, we *must not* deem an itx as being committed to stable 3266 * storage, until *all* prior itxs have also been committed to 3267 * stable storage. 3268 * 3269 * To enforce this ordering of lwb zio's, while still leveraging as 3270 * much of the underlying storage performance as possible, we rely 3271 * on two fundamental concepts: 3272 * 3273 * 1. The creation and issuance of lwb zio's is protected by 3274 * the zilog's "zl_issuer_lock", which ensures only a single 3275 * thread is creating and/or issuing lwb's at a time 3276 * 2. The "previous" lwb is a child of the "current" lwb 3277 * (leveraging the zio parent-child dependency graph) 3278 * 3279 * By relying on this parent-child zio relationship, we can have 3280 * many lwb zio's concurrently issued to the underlying storage, 3281 * but the order in which they complete will be the same order in 3282 * which they were created. 3283 */ 3284 void 3285 zil_commit(zilog_t *zilog, uint64_t foid) 3286 { 3287 /* 3288 * We should never attempt to call zil_commit on a snapshot for 3289 * a couple of reasons: 3290 * 3291 * 1. A snapshot may never be modified, thus it cannot have any 3292 * in-flight itxs that would have modified the dataset. 3293 * 3294 * 2. By design, when zil_commit() is called, a commit itx will 3295 * be assigned to this zilog; as a result, the zilog will be 3296 * dirtied. We must not dirty the zilog of a snapshot; there's 3297 * checks in the code that enforce this invariant, and will 3298 * cause a panic if it's not upheld. 3299 */ 3300 ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); 3301 3302 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 3303 return; 3304 3305 if (!spa_writeable(zilog->zl_spa)) { 3306 /* 3307 * If the SPA is not writable, there should never be any 3308 * pending itxs waiting to be committed to disk. If that 3309 * weren't true, we'd skip writing those itxs out, and 3310 * would break the semantics of zil_commit(); thus, we're 3311 * verifying that truth before we return to the caller. 3312 */ 3313 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3314 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 3315 for (int i = 0; i < TXG_SIZE; i++) 3316 ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); 3317 return; 3318 } 3319 3320 /* 3321 * If the ZIL is suspended, we don't want to dirty it by calling 3322 * zil_commit_itx_assign() below, nor can we write out 3323 * lwbs like would be done in zil_commit_write(). Thus, we 3324 * simply rely on txg_wait_synced() to maintain the necessary 3325 * semantics, and avoid calling those functions altogether. 3326 */ 3327 if (zilog->zl_suspend > 0) { 3328 txg_wait_synced(zilog->zl_dmu_pool, 0); 3329 return; 3330 } 3331 3332 zil_commit_impl(zilog, foid); 3333 } 3334 3335 void 3336 zil_commit_impl(zilog_t *zilog, uint64_t foid) 3337 { 3338 ZIL_STAT_BUMP(zilog, zil_commit_count); 3339 3340 /* 3341 * Move the "async" itxs for the specified foid to the "sync" 3342 * queues, such that they will be later committed (or skipped) 3343 * to an lwb when zil_process_commit_list() is called. 3344 * 3345 * Since these "async" itxs must be committed prior to this 3346 * call to zil_commit returning, we must perform this operation 3347 * before we call zil_commit_itx_assign(). 3348 */ 3349 zil_async_to_sync(zilog, foid); 3350 3351 /* 3352 * We allocate a new "waiter" structure which will initially be 3353 * linked to the commit itx using the itx's "itx_private" field. 3354 * Since the commit itx doesn't represent any on-disk state, 3355 * when it's committed to an lwb, rather than copying the its 3356 * lr_t into the lwb's buffer, the commit itx's "waiter" will be 3357 * added to the lwb's list of waiters. Then, when the lwb is 3358 * committed to stable storage, each waiter in the lwb's list of 3359 * waiters will be marked "done", and signalled. 3360 * 3361 * We must create the waiter and assign the commit itx prior to 3362 * calling zil_commit_writer(), or else our specific commit itx 3363 * is not guaranteed to be committed to an lwb prior to calling 3364 * zil_commit_waiter(). 3365 */ 3366 zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); 3367 zil_commit_itx_assign(zilog, zcw); 3368 3369 zil_commit_writer(zilog, zcw); 3370 zil_commit_waiter(zilog, zcw); 3371 3372 if (zcw->zcw_zio_error != 0) { 3373 /* 3374 * If there was an error writing out the ZIL blocks that 3375 * this thread is waiting on, then we fallback to 3376 * relying on spa_sync() to write out the data this 3377 * thread is waiting on. Obviously this has performance 3378 * implications, but the expectation is for this to be 3379 * an exceptional case, and shouldn't occur often. 3380 */ 3381 DTRACE_PROBE2(zil__commit__io__error, 3382 zilog_t *, zilog, zil_commit_waiter_t *, zcw); 3383 txg_wait_synced(zilog->zl_dmu_pool, 0); 3384 } 3385 3386 zil_free_commit_waiter(zcw); 3387 } 3388 3389 /* 3390 * Called in syncing context to free committed log blocks and update log header. 3391 */ 3392 void 3393 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 3394 { 3395 zil_header_t *zh = zil_header_in_syncing_context(zilog); 3396 uint64_t txg = dmu_tx_get_txg(tx); 3397 spa_t *spa = zilog->zl_spa; 3398 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 3399 lwb_t *lwb; 3400 3401 /* 3402 * We don't zero out zl_destroy_txg, so make sure we don't try 3403 * to destroy it twice. 3404 */ 3405 if (spa_sync_pass(spa) != 1) 3406 return; 3407 3408 zil_lwb_flush_wait_all(zilog, txg); 3409 3410 mutex_enter(&zilog->zl_lock); 3411 3412 ASSERT(zilog->zl_stop_sync == 0); 3413 3414 if (*replayed_seq != 0) { 3415 ASSERT(zh->zh_replay_seq < *replayed_seq); 3416 zh->zh_replay_seq = *replayed_seq; 3417 *replayed_seq = 0; 3418 } 3419 3420 if (zilog->zl_destroy_txg == txg) { 3421 blkptr_t blk = zh->zh_log; 3422 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 3423 3424 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 3425 3426 memset(zh, 0, sizeof (zil_header_t)); 3427 memset(zilog->zl_replayed_seq, 0, 3428 sizeof (zilog->zl_replayed_seq)); 3429 3430 if (zilog->zl_keep_first) { 3431 /* 3432 * If this block was part of log chain that couldn't 3433 * be claimed because a device was missing during 3434 * zil_claim(), but that device later returns, 3435 * then this block could erroneously appear valid. 3436 * To guard against this, assign a new GUID to the new 3437 * log chain so it doesn't matter what blk points to. 3438 */ 3439 zil_init_log_chain(zilog, &blk); 3440 zh->zh_log = blk; 3441 } else { 3442 /* 3443 * A destroyed ZIL chain can't contain any TX_SETSAXATTR 3444 * records. So, deactivate the feature for this dataset. 3445 * We activate it again when we start a new ZIL chain. 3446 */ 3447 if (dsl_dataset_feature_is_active(ds, 3448 SPA_FEATURE_ZILSAXATTR)) 3449 dsl_dataset_deactivate_feature(ds, 3450 SPA_FEATURE_ZILSAXATTR, tx); 3451 } 3452 } 3453 3454 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 3455 zh->zh_log = lwb->lwb_blk; 3456 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 3457 break; 3458 list_remove(&zilog->zl_lwb_list, lwb); 3459 zio_free(spa, txg, &lwb->lwb_blk); 3460 zil_free_lwb(zilog, lwb); 3461 3462 /* 3463 * If we don't have anything left in the lwb list then 3464 * we've had an allocation failure and we need to zero 3465 * out the zil_header blkptr so that we don't end 3466 * up freeing the same block twice. 3467 */ 3468 if (list_head(&zilog->zl_lwb_list) == NULL) 3469 BP_ZERO(&zh->zh_log); 3470 } 3471 3472 /* 3473 * Remove fastwrite on any blocks that have been pre-allocated for 3474 * the next commit. This prevents fastwrite counter pollution by 3475 * unused, long-lived LWBs. 3476 */ 3477 for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) { 3478 if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) { 3479 metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk); 3480 lwb->lwb_fastwrite = 0; 3481 } 3482 } 3483 3484 mutex_exit(&zilog->zl_lock); 3485 } 3486 3487 static int 3488 zil_lwb_cons(void *vbuf, void *unused, int kmflag) 3489 { 3490 (void) unused, (void) kmflag; 3491 lwb_t *lwb = vbuf; 3492 list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); 3493 list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), 3494 offsetof(zil_commit_waiter_t, zcw_node)); 3495 avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, 3496 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 3497 mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 3498 return (0); 3499 } 3500 3501 static void 3502 zil_lwb_dest(void *vbuf, void *unused) 3503 { 3504 (void) unused; 3505 lwb_t *lwb = vbuf; 3506 mutex_destroy(&lwb->lwb_vdev_lock); 3507 avl_destroy(&lwb->lwb_vdev_tree); 3508 list_destroy(&lwb->lwb_waiters); 3509 list_destroy(&lwb->lwb_itxs); 3510 } 3511 3512 void 3513 zil_init(void) 3514 { 3515 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 3516 sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); 3517 3518 zil_zcw_cache = kmem_cache_create("zil_zcw_cache", 3519 sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 3520 3521 zil_sums_init(&zil_sums_global); 3522 zil_kstats_global = kstat_create("zfs", 0, "zil", "misc", 3523 KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t), 3524 KSTAT_FLAG_VIRTUAL); 3525 3526 if (zil_kstats_global != NULL) { 3527 zil_kstats_global->ks_data = &zil_stats; 3528 zil_kstats_global->ks_update = zil_kstats_global_update; 3529 zil_kstats_global->ks_private = NULL; 3530 kstat_install(zil_kstats_global); 3531 } 3532 } 3533 3534 void 3535 zil_fini(void) 3536 { 3537 kmem_cache_destroy(zil_zcw_cache); 3538 kmem_cache_destroy(zil_lwb_cache); 3539 3540 if (zil_kstats_global != NULL) { 3541 kstat_delete(zil_kstats_global); 3542 zil_kstats_global = NULL; 3543 } 3544 3545 zil_sums_fini(&zil_sums_global); 3546 } 3547 3548 void 3549 zil_set_sync(zilog_t *zilog, uint64_t sync) 3550 { 3551 zilog->zl_sync = sync; 3552 } 3553 3554 void 3555 zil_set_logbias(zilog_t *zilog, uint64_t logbias) 3556 { 3557 zilog->zl_logbias = logbias; 3558 } 3559 3560 zilog_t * 3561 zil_alloc(objset_t *os, zil_header_t *zh_phys) 3562 { 3563 zilog_t *zilog; 3564 3565 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 3566 3567 zilog->zl_header = zh_phys; 3568 zilog->zl_os = os; 3569 zilog->zl_spa = dmu_objset_spa(os); 3570 zilog->zl_dmu_pool = dmu_objset_pool(os); 3571 zilog->zl_destroy_txg = TXG_INITIAL - 1; 3572 zilog->zl_logbias = dmu_objset_logbias(os); 3573 zilog->zl_sync = dmu_objset_syncprop(os); 3574 zilog->zl_dirty_max_txg = 0; 3575 zilog->zl_last_lwb_opened = NULL; 3576 zilog->zl_last_lwb_latency = 0; 3577 zilog->zl_max_block_size = zil_maxblocksize; 3578 3579 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 3580 mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); 3581 mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL); 3582 3583 for (int i = 0; i < TXG_SIZE; i++) { 3584 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 3585 MUTEX_DEFAULT, NULL); 3586 } 3587 3588 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 3589 offsetof(lwb_t, lwb_node)); 3590 3591 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 3592 offsetof(itx_t, itx_node)); 3593 3594 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 3595 cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL); 3596 3597 return (zilog); 3598 } 3599 3600 void 3601 zil_free(zilog_t *zilog) 3602 { 3603 int i; 3604 3605 zilog->zl_stop_sync = 1; 3606 3607 ASSERT0(zilog->zl_suspend); 3608 ASSERT0(zilog->zl_suspending); 3609 3610 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3611 list_destroy(&zilog->zl_lwb_list); 3612 3613 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 3614 list_destroy(&zilog->zl_itx_commit_list); 3615 3616 for (i = 0; i < TXG_SIZE; i++) { 3617 /* 3618 * It's possible for an itx to be generated that doesn't dirty 3619 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 3620 * callback to remove the entry. We remove those here. 3621 * 3622 * Also free up the ziltest itxs. 3623 */ 3624 if (zilog->zl_itxg[i].itxg_itxs) 3625 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 3626 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 3627 } 3628 3629 mutex_destroy(&zilog->zl_issuer_lock); 3630 mutex_destroy(&zilog->zl_lock); 3631 mutex_destroy(&zilog->zl_lwb_io_lock); 3632 3633 cv_destroy(&zilog->zl_cv_suspend); 3634 cv_destroy(&zilog->zl_lwb_io_cv); 3635 3636 kmem_free(zilog, sizeof (zilog_t)); 3637 } 3638 3639 /* 3640 * Open an intent log. 3641 */ 3642 zilog_t * 3643 zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums) 3644 { 3645 zilog_t *zilog = dmu_objset_zil(os); 3646 3647 ASSERT3P(zilog->zl_get_data, ==, NULL); 3648 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 3649 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3650 3651 zilog->zl_get_data = get_data; 3652 zilog->zl_sums = zil_sums; 3653 3654 return (zilog); 3655 } 3656 3657 /* 3658 * Close an intent log. 3659 */ 3660 void 3661 zil_close(zilog_t *zilog) 3662 { 3663 lwb_t *lwb; 3664 uint64_t txg; 3665 3666 if (!dmu_objset_is_snapshot(zilog->zl_os)) { 3667 zil_commit(zilog, 0); 3668 } else { 3669 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 3670 ASSERT0(zilog->zl_dirty_max_txg); 3671 ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); 3672 } 3673 3674 mutex_enter(&zilog->zl_lock); 3675 lwb = list_tail(&zilog->zl_lwb_list); 3676 if (lwb == NULL) 3677 txg = zilog->zl_dirty_max_txg; 3678 else 3679 txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg); 3680 mutex_exit(&zilog->zl_lock); 3681 3682 /* 3683 * zl_lwb_max_issued_txg may be larger than lwb_max_txg. It depends 3684 * on the time when the dmu_tx transaction is assigned in 3685 * zil_lwb_write_issue(). 3686 */ 3687 mutex_enter(&zilog->zl_lwb_io_lock); 3688 txg = MAX(zilog->zl_lwb_max_issued_txg, txg); 3689 mutex_exit(&zilog->zl_lwb_io_lock); 3690 3691 /* 3692 * We need to use txg_wait_synced() to wait until that txg is synced. 3693 * zil_sync() will guarantee all lwbs up to that txg have been 3694 * written out, flushed, and cleaned. 3695 */ 3696 if (txg != 0) 3697 txg_wait_synced(zilog->zl_dmu_pool, txg); 3698 3699 if (zilog_is_dirty(zilog)) 3700 zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog, 3701 (u_longlong_t)txg); 3702 if (txg < spa_freeze_txg(zilog->zl_spa)) 3703 VERIFY(!zilog_is_dirty(zilog)); 3704 3705 zilog->zl_get_data = NULL; 3706 3707 /* 3708 * We should have only one lwb left on the list; remove it now. 3709 */ 3710 mutex_enter(&zilog->zl_lock); 3711 lwb = list_head(&zilog->zl_lwb_list); 3712 if (lwb != NULL) { 3713 ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list)); 3714 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 3715 3716 if (lwb->lwb_fastwrite) 3717 metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk); 3718 3719 list_remove(&zilog->zl_lwb_list, lwb); 3720 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 3721 zil_free_lwb(zilog, lwb); 3722 } 3723 mutex_exit(&zilog->zl_lock); 3724 } 3725 3726 static const char *suspend_tag = "zil suspending"; 3727 3728 /* 3729 * Suspend an intent log. While in suspended mode, we still honor 3730 * synchronous semantics, but we rely on txg_wait_synced() to do it. 3731 * On old version pools, we suspend the log briefly when taking a 3732 * snapshot so that it will have an empty intent log. 3733 * 3734 * Long holds are not really intended to be used the way we do here -- 3735 * held for such a short time. A concurrent caller of dsl_dataset_long_held() 3736 * could fail. Therefore we take pains to only put a long hold if it is 3737 * actually necessary. Fortunately, it will only be necessary if the 3738 * objset is currently mounted (or the ZVOL equivalent). In that case it 3739 * will already have a long hold, so we are not really making things any worse. 3740 * 3741 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 3742 * zvol_state_t), and use their mechanism to prevent their hold from being 3743 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 3744 * very little gain. 3745 * 3746 * if cookiep == NULL, this does both the suspend & resume. 3747 * Otherwise, it returns with the dataset "long held", and the cookie 3748 * should be passed into zil_resume(). 3749 */ 3750 int 3751 zil_suspend(const char *osname, void **cookiep) 3752 { 3753 objset_t *os; 3754 zilog_t *zilog; 3755 const zil_header_t *zh; 3756 int error; 3757 3758 error = dmu_objset_hold(osname, suspend_tag, &os); 3759 if (error != 0) 3760 return (error); 3761 zilog = dmu_objset_zil(os); 3762 3763 mutex_enter(&zilog->zl_lock); 3764 zh = zilog->zl_header; 3765 3766 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 3767 mutex_exit(&zilog->zl_lock); 3768 dmu_objset_rele(os, suspend_tag); 3769 return (SET_ERROR(EBUSY)); 3770 } 3771 3772 /* 3773 * Don't put a long hold in the cases where we can avoid it. This 3774 * is when there is no cookie so we are doing a suspend & resume 3775 * (i.e. called from zil_vdev_offline()), and there's nothing to do 3776 * for the suspend because it's already suspended, or there's no ZIL. 3777 */ 3778 if (cookiep == NULL && !zilog->zl_suspending && 3779 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 3780 mutex_exit(&zilog->zl_lock); 3781 dmu_objset_rele(os, suspend_tag); 3782 return (0); 3783 } 3784 3785 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 3786 dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 3787 3788 zilog->zl_suspend++; 3789 3790 if (zilog->zl_suspend > 1) { 3791 /* 3792 * Someone else is already suspending it. 3793 * Just wait for them to finish. 3794 */ 3795 3796 while (zilog->zl_suspending) 3797 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 3798 mutex_exit(&zilog->zl_lock); 3799 3800 if (cookiep == NULL) 3801 zil_resume(os); 3802 else 3803 *cookiep = os; 3804 return (0); 3805 } 3806 3807 /* 3808 * If there is no pointer to an on-disk block, this ZIL must not 3809 * be active (e.g. filesystem not mounted), so there's nothing 3810 * to clean up. 3811 */ 3812 if (BP_IS_HOLE(&zh->zh_log)) { 3813 ASSERT(cookiep != NULL); /* fast path already handled */ 3814 3815 *cookiep = os; 3816 mutex_exit(&zilog->zl_lock); 3817 return (0); 3818 } 3819 3820 /* 3821 * The ZIL has work to do. Ensure that the associated encryption 3822 * key will remain mapped while we are committing the log by 3823 * grabbing a reference to it. If the key isn't loaded we have no 3824 * choice but to return an error until the wrapping key is loaded. 3825 */ 3826 if (os->os_encrypted && 3827 dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) { 3828 zilog->zl_suspend--; 3829 mutex_exit(&zilog->zl_lock); 3830 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 3831 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 3832 return (SET_ERROR(EACCES)); 3833 } 3834 3835 zilog->zl_suspending = B_TRUE; 3836 mutex_exit(&zilog->zl_lock); 3837 3838 /* 3839 * We need to use zil_commit_impl to ensure we wait for all 3840 * LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed 3841 * to disk before proceeding. If we used zil_commit instead, it 3842 * would just call txg_wait_synced(), because zl_suspend is set. 3843 * txg_wait_synced() doesn't wait for these lwb's to be 3844 * LWB_STATE_FLUSH_DONE before returning. 3845 */ 3846 zil_commit_impl(zilog, 0); 3847 3848 /* 3849 * Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we 3850 * use txg_wait_synced() to ensure the data from the zilog has 3851 * migrated to the main pool before calling zil_destroy(). 3852 */ 3853 txg_wait_synced(zilog->zl_dmu_pool, 0); 3854 3855 zil_destroy(zilog, B_FALSE); 3856 3857 mutex_enter(&zilog->zl_lock); 3858 zilog->zl_suspending = B_FALSE; 3859 cv_broadcast(&zilog->zl_cv_suspend); 3860 mutex_exit(&zilog->zl_lock); 3861 3862 if (os->os_encrypted) 3863 dsl_dataset_remove_key_mapping(dmu_objset_ds(os)); 3864 3865 if (cookiep == NULL) 3866 zil_resume(os); 3867 else 3868 *cookiep = os; 3869 return (0); 3870 } 3871 3872 void 3873 zil_resume(void *cookie) 3874 { 3875 objset_t *os = cookie; 3876 zilog_t *zilog = dmu_objset_zil(os); 3877 3878 mutex_enter(&zilog->zl_lock); 3879 ASSERT(zilog->zl_suspend != 0); 3880 zilog->zl_suspend--; 3881 mutex_exit(&zilog->zl_lock); 3882 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 3883 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 3884 } 3885 3886 typedef struct zil_replay_arg { 3887 zil_replay_func_t *const *zr_replay; 3888 void *zr_arg; 3889 boolean_t zr_byteswap; 3890 char *zr_lr; 3891 } zil_replay_arg_t; 3892 3893 static int 3894 zil_replay_error(zilog_t *zilog, const lr_t *lr, int error) 3895 { 3896 char name[ZFS_MAX_DATASET_NAME_LEN]; 3897 3898 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 3899 3900 dmu_objset_name(zilog->zl_os, name); 3901 3902 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 3903 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 3904 (u_longlong_t)lr->lrc_seq, 3905 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 3906 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 3907 3908 return (error); 3909 } 3910 3911 static int 3912 zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra, 3913 uint64_t claim_txg) 3914 { 3915 zil_replay_arg_t *zr = zra; 3916 const zil_header_t *zh = zilog->zl_header; 3917 uint64_t reclen = lr->lrc_reclen; 3918 uint64_t txtype = lr->lrc_txtype; 3919 int error = 0; 3920 3921 zilog->zl_replaying_seq = lr->lrc_seq; 3922 3923 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 3924 return (0); 3925 3926 if (lr->lrc_txg < claim_txg) /* already committed */ 3927 return (0); 3928 3929 /* Strip case-insensitive bit, still present in log record */ 3930 txtype &= ~TX_CI; 3931 3932 if (txtype == 0 || txtype >= TX_MAX_TYPE) 3933 return (zil_replay_error(zilog, lr, EINVAL)); 3934 3935 /* 3936 * If this record type can be logged out of order, the object 3937 * (lr_foid) may no longer exist. That's legitimate, not an error. 3938 */ 3939 if (TX_OOO(txtype)) { 3940 error = dmu_object_info(zilog->zl_os, 3941 LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL); 3942 if (error == ENOENT || error == EEXIST) 3943 return (0); 3944 } 3945 3946 /* 3947 * Make a copy of the data so we can revise and extend it. 3948 */ 3949 memcpy(zr->zr_lr, lr, reclen); 3950 3951 /* 3952 * If this is a TX_WRITE with a blkptr, suck in the data. 3953 */ 3954 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 3955 error = zil_read_log_data(zilog, (lr_write_t *)lr, 3956 zr->zr_lr + reclen); 3957 if (error != 0) 3958 return (zil_replay_error(zilog, lr, error)); 3959 } 3960 3961 /* 3962 * The log block containing this lr may have been byteswapped 3963 * so that we can easily examine common fields like lrc_txtype. 3964 * However, the log is a mix of different record types, and only the 3965 * replay vectors know how to byteswap their records. Therefore, if 3966 * the lr was byteswapped, undo it before invoking the replay vector. 3967 */ 3968 if (zr->zr_byteswap) 3969 byteswap_uint64_array(zr->zr_lr, reclen); 3970 3971 /* 3972 * We must now do two things atomically: replay this log record, 3973 * and update the log header sequence number to reflect the fact that 3974 * we did so. At the end of each replay function the sequence number 3975 * is updated if we are in replay mode. 3976 */ 3977 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 3978 if (error != 0) { 3979 /* 3980 * The DMU's dnode layer doesn't see removes until the txg 3981 * commits, so a subsequent claim can spuriously fail with 3982 * EEXIST. So if we receive any error we try syncing out 3983 * any removes then retry the transaction. Note that we 3984 * specify B_FALSE for byteswap now, so we don't do it twice. 3985 */ 3986 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 3987 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 3988 if (error != 0) 3989 return (zil_replay_error(zilog, lr, error)); 3990 } 3991 return (0); 3992 } 3993 3994 static int 3995 zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) 3996 { 3997 (void) bp, (void) arg, (void) claim_txg; 3998 3999 zilog->zl_replay_blks++; 4000 4001 return (0); 4002 } 4003 4004 /* 4005 * If this dataset has a non-empty intent log, replay it and destroy it. 4006 * Return B_TRUE if there were any entries to replay. 4007 */ 4008 boolean_t 4009 zil_replay(objset_t *os, void *arg, 4010 zil_replay_func_t *const replay_func[TX_MAX_TYPE]) 4011 { 4012 zilog_t *zilog = dmu_objset_zil(os); 4013 const zil_header_t *zh = zilog->zl_header; 4014 zil_replay_arg_t zr; 4015 4016 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 4017 return (zil_destroy(zilog, B_TRUE)); 4018 } 4019 4020 zr.zr_replay = replay_func; 4021 zr.zr_arg = arg; 4022 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 4023 zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 4024 4025 /* 4026 * Wait for in-progress removes to sync before starting replay. 4027 */ 4028 txg_wait_synced(zilog->zl_dmu_pool, 0); 4029 4030 zilog->zl_replay = B_TRUE; 4031 zilog->zl_replay_time = ddi_get_lbolt(); 4032 ASSERT(zilog->zl_replay_blks == 0); 4033 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 4034 zh->zh_claim_txg, B_TRUE); 4035 vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 4036 4037 zil_destroy(zilog, B_FALSE); 4038 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 4039 zilog->zl_replay = B_FALSE; 4040 4041 return (B_TRUE); 4042 } 4043 4044 boolean_t 4045 zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 4046 { 4047 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 4048 return (B_TRUE); 4049 4050 if (zilog->zl_replay) { 4051 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 4052 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 4053 zilog->zl_replaying_seq; 4054 return (B_TRUE); 4055 } 4056 4057 return (B_FALSE); 4058 } 4059 4060 int 4061 zil_reset(const char *osname, void *arg) 4062 { 4063 (void) arg; 4064 4065 int error = zil_suspend(osname, NULL); 4066 /* EACCES means crypto key not loaded */ 4067 if ((error == EACCES) || (error == EBUSY)) 4068 return (SET_ERROR(error)); 4069 if (error != 0) 4070 return (SET_ERROR(EEXIST)); 4071 return (0); 4072 } 4073 4074 EXPORT_SYMBOL(zil_alloc); 4075 EXPORT_SYMBOL(zil_free); 4076 EXPORT_SYMBOL(zil_open); 4077 EXPORT_SYMBOL(zil_close); 4078 EXPORT_SYMBOL(zil_replay); 4079 EXPORT_SYMBOL(zil_replaying); 4080 EXPORT_SYMBOL(zil_destroy); 4081 EXPORT_SYMBOL(zil_destroy_sync); 4082 EXPORT_SYMBOL(zil_itx_create); 4083 EXPORT_SYMBOL(zil_itx_destroy); 4084 EXPORT_SYMBOL(zil_itx_assign); 4085 EXPORT_SYMBOL(zil_commit); 4086 EXPORT_SYMBOL(zil_claim); 4087 EXPORT_SYMBOL(zil_check_log_chain); 4088 EXPORT_SYMBOL(zil_sync); 4089 EXPORT_SYMBOL(zil_clean); 4090 EXPORT_SYMBOL(zil_suspend); 4091 EXPORT_SYMBOL(zil_resume); 4092 EXPORT_SYMBOL(zil_lwb_add_block); 4093 EXPORT_SYMBOL(zil_bp_tree_add); 4094 EXPORT_SYMBOL(zil_set_sync); 4095 EXPORT_SYMBOL(zil_set_logbias); 4096 EXPORT_SYMBOL(zil_sums_init); 4097 EXPORT_SYMBOL(zil_sums_fini); 4098 EXPORT_SYMBOL(zil_kstat_values_update); 4099 4100 ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW, 4101 "ZIL block open timeout percentage"); 4102 4103 ZFS_MODULE_PARAM(zfs_zil, zil_, min_commit_timeout, U64, ZMOD_RW, 4104 "Minimum delay we care for ZIL block commit"); 4105 4106 ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW, 4107 "Disable intent logging replay"); 4108 4109 ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW, 4110 "Disable ZIL cache flushes"); 4111 4112 ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW, 4113 "Limit in bytes slog sync writes per commit"); 4114 4115 ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW, 4116 "Limit in bytes of ZIL log block size"); 4117