1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 * Copyright (c) 2018 Datto Inc. 27 */ 28 29 /* Portions Copyright 2010 Robert Milkowski */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/spa.h> 33 #include <sys/spa_impl.h> 34 #include <sys/dmu.h> 35 #include <sys/zap.h> 36 #include <sys/arc.h> 37 #include <sys/stat.h> 38 #include <sys/zil.h> 39 #include <sys/zil_impl.h> 40 #include <sys/dsl_dataset.h> 41 #include <sys/vdev_impl.h> 42 #include <sys/dmu_tx.h> 43 #include <sys/dsl_pool.h> 44 #include <sys/metaslab.h> 45 #include <sys/trace_zfs.h> 46 #include <sys/abd.h> 47 #include <sys/brt.h> 48 #include <sys/wmsum.h> 49 50 /* 51 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system 52 * calls that change the file system. Each itx has enough information to 53 * be able to replay them after a system crash, power loss, or 54 * equivalent failure mode. These are stored in memory until either: 55 * 56 * 1. they are committed to the pool by the DMU transaction group 57 * (txg), at which point they can be discarded; or 58 * 2. they are committed to the on-disk ZIL for the dataset being 59 * modified (e.g. due to an fsync, O_DSYNC, or other synchronous 60 * requirement). 61 * 62 * In the event of a crash or power loss, the itxs contained by each 63 * dataset's on-disk ZIL will be replayed when that dataset is first 64 * instantiated (e.g. if the dataset is a normal filesystem, when it is 65 * first mounted). 66 * 67 * As hinted at above, there is one ZIL per dataset (both the in-memory 68 * representation, and the on-disk representation). The on-disk format 69 * consists of 3 parts: 70 * 71 * - a single, per-dataset, ZIL header; which points to a chain of 72 * - zero or more ZIL blocks; each of which contains 73 * - zero or more ZIL records 74 * 75 * A ZIL record holds the information necessary to replay a single 76 * system call transaction. A ZIL block can hold many ZIL records, and 77 * the blocks are chained together, similarly to a singly linked list. 78 * 79 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL 80 * block in the chain, and the ZIL header points to the first block in 81 * the chain. 82 * 83 * Note, there is not a fixed place in the pool to hold these ZIL 84 * blocks; they are dynamically allocated and freed as needed from the 85 * blocks available on the pool, though they can be preferentially 86 * allocated from a dedicated "log" vdev. 87 */ 88 89 /* 90 * This controls the amount of time that a ZIL block (lwb) will remain 91 * "open" when it isn't "full", and it has a thread waiting for it to be 92 * committed to stable storage. Please refer to the zil_commit_waiter() 93 * function (and the comments within it) for more details. 94 */ 95 static uint_t zfs_commit_timeout_pct = 10; 96 97 /* 98 * See zil.h for more information about these fields. 99 */ 100 static zil_kstat_values_t zil_stats = { 101 { "zil_commit_count", KSTAT_DATA_UINT64 }, 102 { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, 103 { "zil_commit_error_count", KSTAT_DATA_UINT64 }, 104 { "zil_commit_stall_count", KSTAT_DATA_UINT64 }, 105 { "zil_commit_suspend_count", KSTAT_DATA_UINT64 }, 106 { "zil_itx_count", KSTAT_DATA_UINT64 }, 107 { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, 108 { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, 109 { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, 110 { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, 111 { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, 112 { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, 113 { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, 114 { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, 115 { "zil_itx_metaslab_normal_write", KSTAT_DATA_UINT64 }, 116 { "zil_itx_metaslab_normal_alloc", KSTAT_DATA_UINT64 }, 117 { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, 118 { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, 119 { "zil_itx_metaslab_slog_write", KSTAT_DATA_UINT64 }, 120 { "zil_itx_metaslab_slog_alloc", KSTAT_DATA_UINT64 }, 121 }; 122 123 static zil_sums_t zil_sums_global; 124 static kstat_t *zil_kstats_global; 125 126 /* 127 * Disable intent logging replay. This global ZIL switch affects all pools. 128 */ 129 int zil_replay_disable = 0; 130 131 /* 132 * Disable the flush commands that are normally sent to the disk(s) by the ZIL 133 * after an LWB write has completed. Setting this will cause ZIL corruption on 134 * power loss if a volatile out-of-order write cache is enabled. 135 */ 136 static int zil_nocacheflush = 0; 137 138 /* 139 * Limit SLOG write size per commit executed with synchronous priority. 140 * Any writes above that will be executed with lower (asynchronous) priority 141 * to limit potential SLOG device abuse by single active ZIL writer. 142 */ 143 static uint64_t zil_slog_bulk = 64 * 1024 * 1024; 144 145 static kmem_cache_t *zil_lwb_cache; 146 static kmem_cache_t *zil_zcw_cache; 147 148 static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx); 149 static itx_t *zil_itx_clone(itx_t *oitx); 150 static uint64_t zil_max_waste_space(zilog_t *zilog); 151 152 static int 153 zil_bp_compare(const void *x1, const void *x2) 154 { 155 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 156 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 157 158 int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2)); 159 if (likely(cmp)) 160 return (cmp); 161 162 return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2))); 163 } 164 165 static void 166 zil_bp_tree_init(zilog_t *zilog) 167 { 168 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 169 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 170 } 171 172 static void 173 zil_bp_tree_fini(zilog_t *zilog) 174 { 175 avl_tree_t *t = &zilog->zl_bp_tree; 176 zil_bp_node_t *zn; 177 void *cookie = NULL; 178 179 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 180 kmem_free(zn, sizeof (zil_bp_node_t)); 181 182 avl_destroy(t); 183 } 184 185 int 186 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 187 { 188 avl_tree_t *t = &zilog->zl_bp_tree; 189 const dva_t *dva; 190 zil_bp_node_t *zn; 191 avl_index_t where; 192 193 if (BP_IS_EMBEDDED(bp)) 194 return (0); 195 196 dva = BP_IDENTITY(bp); 197 198 if (avl_find(t, dva, &where) != NULL) 199 return (SET_ERROR(EEXIST)); 200 201 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 202 zn->zn_dva = *dva; 203 avl_insert(t, zn, where); 204 205 return (0); 206 } 207 208 static zil_header_t * 209 zil_header_in_syncing_context(zilog_t *zilog) 210 { 211 return ((zil_header_t *)zilog->zl_header); 212 } 213 214 static void 215 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 216 { 217 zio_cksum_t *zc = &bp->blk_cksum; 218 219 (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0], 220 sizeof (zc->zc_word[ZIL_ZC_GUID_0])); 221 (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1], 222 sizeof (zc->zc_word[ZIL_ZC_GUID_1])); 223 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 224 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 225 } 226 227 static int 228 zil_kstats_global_update(kstat_t *ksp, int rw) 229 { 230 zil_kstat_values_t *zs = ksp->ks_data; 231 ASSERT3P(&zil_stats, ==, zs); 232 233 if (rw == KSTAT_WRITE) { 234 return (SET_ERROR(EACCES)); 235 } 236 237 zil_kstat_values_update(zs, &zil_sums_global); 238 239 return (0); 240 } 241 242 /* 243 * Read a log block and make sure it's valid. 244 */ 245 static int 246 zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp, 247 blkptr_t *nbp, char **begin, char **end, arc_buf_t **abuf) 248 { 249 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 250 arc_flags_t aflags = ARC_FLAG_WAIT; 251 zbookmark_phys_t zb; 252 int error; 253 254 if (zilog->zl_header->zh_claim_txg == 0) 255 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 256 257 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 258 zio_flags |= ZIO_FLAG_SPECULATIVE; 259 260 if (!decrypt) 261 zio_flags |= ZIO_FLAG_RAW; 262 263 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 264 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 265 266 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, 267 abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 268 269 if (error == 0) { 270 zio_cksum_t cksum = bp->blk_cksum; 271 272 /* 273 * Validate the checksummed log block. 274 * 275 * Sequence numbers should be... sequential. The checksum 276 * verifier for the next block should be bp's checksum plus 1. 277 * 278 * Also check the log chain linkage and size used. 279 */ 280 cksum.zc_word[ZIL_ZC_SEQ]++; 281 282 uint64_t size = BP_GET_LSIZE(bp); 283 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 284 zil_chain_t *zilc = (*abuf)->b_data; 285 char *lr = (char *)(zilc + 1); 286 287 if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 288 sizeof (cksum)) || 289 zilc->zc_nused < sizeof (*zilc) || 290 zilc->zc_nused > size) { 291 error = SET_ERROR(ECKSUM); 292 } else { 293 *begin = lr; 294 *end = lr + zilc->zc_nused - sizeof (*zilc); 295 *nbp = zilc->zc_next_blk; 296 } 297 } else { 298 char *lr = (*abuf)->b_data; 299 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 300 301 if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 302 sizeof (cksum)) || 303 (zilc->zc_nused > (size - sizeof (*zilc)))) { 304 error = SET_ERROR(ECKSUM); 305 } else { 306 *begin = lr; 307 *end = lr + zilc->zc_nused; 308 *nbp = zilc->zc_next_blk; 309 } 310 } 311 } 312 313 return (error); 314 } 315 316 /* 317 * Read a TX_WRITE log data block. 318 */ 319 static int 320 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 321 { 322 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 323 const blkptr_t *bp = &lr->lr_blkptr; 324 arc_flags_t aflags = ARC_FLAG_WAIT; 325 arc_buf_t *abuf = NULL; 326 zbookmark_phys_t zb; 327 int error; 328 329 if (BP_IS_HOLE(bp)) { 330 if (wbuf != NULL) 331 memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 332 return (0); 333 } 334 335 if (zilog->zl_header->zh_claim_txg == 0) 336 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 337 338 /* 339 * If we are not using the resulting data, we are just checking that 340 * it hasn't been corrupted so we don't need to waste CPU time 341 * decompressing and decrypting it. 342 */ 343 if (wbuf == NULL) 344 zio_flags |= ZIO_FLAG_RAW; 345 346 ASSERT3U(BP_GET_LSIZE(bp), !=, 0); 347 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 348 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 349 350 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 351 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 352 353 if (error == 0) { 354 if (wbuf != NULL) 355 memcpy(wbuf, abuf->b_data, arc_buf_size(abuf)); 356 arc_buf_destroy(abuf, &abuf); 357 } 358 359 return (error); 360 } 361 362 void 363 zil_sums_init(zil_sums_t *zs) 364 { 365 wmsum_init(&zs->zil_commit_count, 0); 366 wmsum_init(&zs->zil_commit_writer_count, 0); 367 wmsum_init(&zs->zil_commit_error_count, 0); 368 wmsum_init(&zs->zil_commit_stall_count, 0); 369 wmsum_init(&zs->zil_commit_suspend_count, 0); 370 wmsum_init(&zs->zil_itx_count, 0); 371 wmsum_init(&zs->zil_itx_indirect_count, 0); 372 wmsum_init(&zs->zil_itx_indirect_bytes, 0); 373 wmsum_init(&zs->zil_itx_copied_count, 0); 374 wmsum_init(&zs->zil_itx_copied_bytes, 0); 375 wmsum_init(&zs->zil_itx_needcopy_count, 0); 376 wmsum_init(&zs->zil_itx_needcopy_bytes, 0); 377 wmsum_init(&zs->zil_itx_metaslab_normal_count, 0); 378 wmsum_init(&zs->zil_itx_metaslab_normal_bytes, 0); 379 wmsum_init(&zs->zil_itx_metaslab_normal_write, 0); 380 wmsum_init(&zs->zil_itx_metaslab_normal_alloc, 0); 381 wmsum_init(&zs->zil_itx_metaslab_slog_count, 0); 382 wmsum_init(&zs->zil_itx_metaslab_slog_bytes, 0); 383 wmsum_init(&zs->zil_itx_metaslab_slog_write, 0); 384 wmsum_init(&zs->zil_itx_metaslab_slog_alloc, 0); 385 } 386 387 void 388 zil_sums_fini(zil_sums_t *zs) 389 { 390 wmsum_fini(&zs->zil_commit_count); 391 wmsum_fini(&zs->zil_commit_writer_count); 392 wmsum_fini(&zs->zil_commit_error_count); 393 wmsum_fini(&zs->zil_commit_stall_count); 394 wmsum_fini(&zs->zil_commit_suspend_count); 395 wmsum_fini(&zs->zil_itx_count); 396 wmsum_fini(&zs->zil_itx_indirect_count); 397 wmsum_fini(&zs->zil_itx_indirect_bytes); 398 wmsum_fini(&zs->zil_itx_copied_count); 399 wmsum_fini(&zs->zil_itx_copied_bytes); 400 wmsum_fini(&zs->zil_itx_needcopy_count); 401 wmsum_fini(&zs->zil_itx_needcopy_bytes); 402 wmsum_fini(&zs->zil_itx_metaslab_normal_count); 403 wmsum_fini(&zs->zil_itx_metaslab_normal_bytes); 404 wmsum_fini(&zs->zil_itx_metaslab_normal_write); 405 wmsum_fini(&zs->zil_itx_metaslab_normal_alloc); 406 wmsum_fini(&zs->zil_itx_metaslab_slog_count); 407 wmsum_fini(&zs->zil_itx_metaslab_slog_bytes); 408 wmsum_fini(&zs->zil_itx_metaslab_slog_write); 409 wmsum_fini(&zs->zil_itx_metaslab_slog_alloc); 410 } 411 412 void 413 zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums) 414 { 415 zs->zil_commit_count.value.ui64 = 416 wmsum_value(&zil_sums->zil_commit_count); 417 zs->zil_commit_writer_count.value.ui64 = 418 wmsum_value(&zil_sums->zil_commit_writer_count); 419 zs->zil_commit_error_count.value.ui64 = 420 wmsum_value(&zil_sums->zil_commit_error_count); 421 zs->zil_commit_stall_count.value.ui64 = 422 wmsum_value(&zil_sums->zil_commit_stall_count); 423 zs->zil_commit_suspend_count.value.ui64 = 424 wmsum_value(&zil_sums->zil_commit_suspend_count); 425 zs->zil_itx_count.value.ui64 = 426 wmsum_value(&zil_sums->zil_itx_count); 427 zs->zil_itx_indirect_count.value.ui64 = 428 wmsum_value(&zil_sums->zil_itx_indirect_count); 429 zs->zil_itx_indirect_bytes.value.ui64 = 430 wmsum_value(&zil_sums->zil_itx_indirect_bytes); 431 zs->zil_itx_copied_count.value.ui64 = 432 wmsum_value(&zil_sums->zil_itx_copied_count); 433 zs->zil_itx_copied_bytes.value.ui64 = 434 wmsum_value(&zil_sums->zil_itx_copied_bytes); 435 zs->zil_itx_needcopy_count.value.ui64 = 436 wmsum_value(&zil_sums->zil_itx_needcopy_count); 437 zs->zil_itx_needcopy_bytes.value.ui64 = 438 wmsum_value(&zil_sums->zil_itx_needcopy_bytes); 439 zs->zil_itx_metaslab_normal_count.value.ui64 = 440 wmsum_value(&zil_sums->zil_itx_metaslab_normal_count); 441 zs->zil_itx_metaslab_normal_bytes.value.ui64 = 442 wmsum_value(&zil_sums->zil_itx_metaslab_normal_bytes); 443 zs->zil_itx_metaslab_normal_write.value.ui64 = 444 wmsum_value(&zil_sums->zil_itx_metaslab_normal_write); 445 zs->zil_itx_metaslab_normal_alloc.value.ui64 = 446 wmsum_value(&zil_sums->zil_itx_metaslab_normal_alloc); 447 zs->zil_itx_metaslab_slog_count.value.ui64 = 448 wmsum_value(&zil_sums->zil_itx_metaslab_slog_count); 449 zs->zil_itx_metaslab_slog_bytes.value.ui64 = 450 wmsum_value(&zil_sums->zil_itx_metaslab_slog_bytes); 451 zs->zil_itx_metaslab_slog_write.value.ui64 = 452 wmsum_value(&zil_sums->zil_itx_metaslab_slog_write); 453 zs->zil_itx_metaslab_slog_alloc.value.ui64 = 454 wmsum_value(&zil_sums->zil_itx_metaslab_slog_alloc); 455 } 456 457 /* 458 * Parse the intent log, and call parse_func for each valid record within. 459 */ 460 int 461 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 462 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, 463 boolean_t decrypt) 464 { 465 const zil_header_t *zh = zilog->zl_header; 466 boolean_t claimed = !!zh->zh_claim_txg; 467 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 468 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 469 uint64_t max_blk_seq = 0; 470 uint64_t max_lr_seq = 0; 471 uint64_t blk_count = 0; 472 uint64_t lr_count = 0; 473 blkptr_t blk, next_blk = {{{{0}}}}; 474 int error = 0; 475 476 /* 477 * Old logs didn't record the maximum zh_claim_lr_seq. 478 */ 479 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 480 claim_lr_seq = UINT64_MAX; 481 482 /* 483 * Starting at the block pointed to by zh_log we read the log chain. 484 * For each block in the chain we strongly check that block to 485 * ensure its validity. We stop when an invalid block is found. 486 * For each block pointer in the chain we call parse_blk_func(). 487 * For each record in each valid block we call parse_lr_func(). 488 * If the log has been claimed, stop if we encounter a sequence 489 * number greater than the highest claimed sequence number. 490 */ 491 zil_bp_tree_init(zilog); 492 493 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 494 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 495 int reclen; 496 char *lrp, *end; 497 arc_buf_t *abuf = NULL; 498 499 if (blk_seq > claim_blk_seq) 500 break; 501 502 error = parse_blk_func(zilog, &blk, arg, txg); 503 if (error != 0) 504 break; 505 ASSERT3U(max_blk_seq, <, blk_seq); 506 max_blk_seq = blk_seq; 507 blk_count++; 508 509 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 510 break; 511 512 error = zil_read_log_block(zilog, decrypt, &blk, &next_blk, 513 &lrp, &end, &abuf); 514 if (error != 0) { 515 if (abuf) 516 arc_buf_destroy(abuf, &abuf); 517 if (claimed) { 518 char name[ZFS_MAX_DATASET_NAME_LEN]; 519 520 dmu_objset_name(zilog->zl_os, name); 521 522 cmn_err(CE_WARN, "ZFS read log block error %d, " 523 "dataset %s, seq 0x%llx\n", error, name, 524 (u_longlong_t)blk_seq); 525 } 526 break; 527 } 528 529 for (; lrp < end; lrp += reclen) { 530 lr_t *lr = (lr_t *)lrp; 531 532 /* 533 * Are the remaining bytes large enough to hold an 534 * log record? 535 */ 536 if ((char *)(lr + 1) > end) { 537 cmn_err(CE_WARN, "zil_parse: lr_t overrun"); 538 error = SET_ERROR(ECKSUM); 539 arc_buf_destroy(abuf, &abuf); 540 goto done; 541 } 542 reclen = lr->lrc_reclen; 543 if (reclen < sizeof (lr_t) || reclen > end - lrp) { 544 cmn_err(CE_WARN, 545 "zil_parse: lr_t has an invalid reclen"); 546 error = SET_ERROR(ECKSUM); 547 arc_buf_destroy(abuf, &abuf); 548 goto done; 549 } 550 551 if (lr->lrc_seq > claim_lr_seq) { 552 arc_buf_destroy(abuf, &abuf); 553 goto done; 554 } 555 556 error = parse_lr_func(zilog, lr, arg, txg); 557 if (error != 0) { 558 arc_buf_destroy(abuf, &abuf); 559 goto done; 560 } 561 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 562 max_lr_seq = lr->lrc_seq; 563 lr_count++; 564 } 565 arc_buf_destroy(abuf, &abuf); 566 } 567 done: 568 zilog->zl_parse_error = error; 569 zilog->zl_parse_blk_seq = max_blk_seq; 570 zilog->zl_parse_lr_seq = max_lr_seq; 571 zilog->zl_parse_blk_count = blk_count; 572 zilog->zl_parse_lr_count = lr_count; 573 574 zil_bp_tree_fini(zilog); 575 576 return (error); 577 } 578 579 static int 580 zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 581 uint64_t first_txg) 582 { 583 (void) tx; 584 ASSERT(!BP_IS_HOLE(bp)); 585 586 /* 587 * As we call this function from the context of a rewind to a 588 * checkpoint, each ZIL block whose txg is later than the txg 589 * that we rewind to is invalid. Thus, we return -1 so 590 * zil_parse() doesn't attempt to read it. 591 */ 592 if (BP_GET_LOGICAL_BIRTH(bp) >= first_txg) 593 return (-1); 594 595 if (zil_bp_tree_add(zilog, bp) != 0) 596 return (0); 597 598 zio_free(zilog->zl_spa, first_txg, bp); 599 return (0); 600 } 601 602 static int 603 zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 604 uint64_t first_txg) 605 { 606 (void) zilog, (void) lrc, (void) tx, (void) first_txg; 607 return (0); 608 } 609 610 static int 611 zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 612 uint64_t first_txg) 613 { 614 /* 615 * Claim log block if not already committed and not already claimed. 616 * If tx == NULL, just verify that the block is claimable. 617 */ 618 if (BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) < first_txg || 619 zil_bp_tree_add(zilog, bp) != 0) 620 return (0); 621 622 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 623 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 624 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 625 } 626 627 static int 628 zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg) 629 { 630 lr_write_t *lr = (lr_write_t *)lrc; 631 int error; 632 633 ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); 634 635 /* 636 * If the block is not readable, don't claim it. This can happen 637 * in normal operation when a log block is written to disk before 638 * some of the dmu_sync() blocks it points to. In this case, the 639 * transaction cannot have been committed to anyone (we would have 640 * waited for all writes to be stable first), so it is semantically 641 * correct to declare this the end of the log. 642 */ 643 if (BP_GET_LOGICAL_BIRTH(&lr->lr_blkptr) >= first_txg) { 644 error = zil_read_log_data(zilog, lr, NULL); 645 if (error != 0) 646 return (error); 647 } 648 649 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 650 } 651 652 static int 653 zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx, 654 uint64_t first_txg) 655 { 656 const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; 657 const blkptr_t *bp; 658 spa_t *spa = zilog->zl_spa; 659 uint_t ii; 660 661 ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); 662 ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t, 663 lr_bps[lr->lr_nbps])); 664 665 if (tx == NULL) { 666 return (0); 667 } 668 669 /* 670 * XXX: Do we need to byteswap lr? 671 */ 672 673 for (ii = 0; ii < lr->lr_nbps; ii++) { 674 bp = &lr->lr_bps[ii]; 675 676 /* 677 * When data is embedded into the BP there is no need to create 678 * BRT entry as there is no data block. Just copy the BP as it 679 * contains the data. 680 */ 681 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 682 continue; 683 684 /* 685 * We can not handle block pointers from the future, since they 686 * are not yet allocated. It should not normally happen, but 687 * just in case lets be safe and just stop here now instead of 688 * corrupting the pool. 689 */ 690 if (BP_GET_BIRTH(bp) >= first_txg) 691 return (SET_ERROR(ENOENT)); 692 693 /* 694 * Assert the block is really allocated before we reference it. 695 */ 696 metaslab_check_free(spa, bp); 697 } 698 699 for (ii = 0; ii < lr->lr_nbps; ii++) { 700 bp = &lr->lr_bps[ii]; 701 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) 702 brt_pending_add(spa, bp, tx); 703 } 704 705 return (0); 706 } 707 708 static int 709 zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 710 uint64_t first_txg) 711 { 712 713 switch (lrc->lrc_txtype) { 714 case TX_WRITE: 715 return (zil_claim_write(zilog, lrc, tx, first_txg)); 716 case TX_CLONE_RANGE: 717 return (zil_claim_clone_range(zilog, lrc, tx, first_txg)); 718 default: 719 return (0); 720 } 721 } 722 723 static int 724 zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 725 uint64_t claim_txg) 726 { 727 (void) claim_txg; 728 729 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 730 731 return (0); 732 } 733 734 static int 735 zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg) 736 { 737 lr_write_t *lr = (lr_write_t *)lrc; 738 blkptr_t *bp = &lr->lr_blkptr; 739 740 ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); 741 742 /* 743 * If we previously claimed it, we need to free it. 744 */ 745 if (BP_GET_LOGICAL_BIRTH(bp) >= claim_txg && 746 zil_bp_tree_add(zilog, bp) == 0 && !BP_IS_HOLE(bp)) { 747 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 748 } 749 750 return (0); 751 } 752 753 static int 754 zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx) 755 { 756 const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; 757 const blkptr_t *bp; 758 spa_t *spa; 759 uint_t ii; 760 761 ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); 762 ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t, 763 lr_bps[lr->lr_nbps])); 764 765 if (tx == NULL) { 766 return (0); 767 } 768 769 spa = zilog->zl_spa; 770 771 for (ii = 0; ii < lr->lr_nbps; ii++) { 772 bp = &lr->lr_bps[ii]; 773 774 if (!BP_IS_HOLE(bp)) { 775 zio_free(spa, dmu_tx_get_txg(tx), bp); 776 } 777 } 778 779 return (0); 780 } 781 782 static int 783 zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 784 uint64_t claim_txg) 785 { 786 787 if (claim_txg == 0) { 788 return (0); 789 } 790 791 switch (lrc->lrc_txtype) { 792 case TX_WRITE: 793 return (zil_free_write(zilog, lrc, tx, claim_txg)); 794 case TX_CLONE_RANGE: 795 return (zil_free_clone_range(zilog, lrc, tx)); 796 default: 797 return (0); 798 } 799 } 800 801 static int 802 zil_lwb_vdev_compare(const void *x1, const void *x2) 803 { 804 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 805 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 806 807 return (TREE_CMP(v1, v2)); 808 } 809 810 /* 811 * Allocate a new lwb. We may already have a block pointer for it, in which 812 * case we get size and version from there. Or we may not yet, in which case 813 * we choose them here and later make the block allocation match. 814 */ 815 static lwb_t * 816 zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog, 817 uint64_t txg, lwb_state_t state) 818 { 819 lwb_t *lwb; 820 821 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 822 lwb->lwb_zilog = zilog; 823 if (bp) { 824 lwb->lwb_blk = *bp; 825 lwb->lwb_slim = (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2); 826 sz = BP_GET_LSIZE(bp); 827 } else { 828 BP_ZERO(&lwb->lwb_blk); 829 lwb->lwb_slim = (spa_version(zilog->zl_spa) >= 830 SPA_VERSION_SLIM_ZIL); 831 } 832 lwb->lwb_slog = slog; 833 lwb->lwb_error = 0; 834 if (lwb->lwb_slim) { 835 lwb->lwb_nmax = sz; 836 lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t); 837 } else { 838 lwb->lwb_nmax = sz - sizeof (zil_chain_t); 839 lwb->lwb_nused = lwb->lwb_nfilled = 0; 840 } 841 lwb->lwb_sz = sz; 842 lwb->lwb_state = state; 843 lwb->lwb_buf = zio_buf_alloc(sz); 844 lwb->lwb_child_zio = NULL; 845 lwb->lwb_write_zio = NULL; 846 lwb->lwb_root_zio = NULL; 847 lwb->lwb_issued_timestamp = 0; 848 lwb->lwb_issued_txg = 0; 849 lwb->lwb_alloc_txg = txg; 850 lwb->lwb_max_txg = 0; 851 852 mutex_enter(&zilog->zl_lock); 853 list_insert_tail(&zilog->zl_lwb_list, lwb); 854 if (state != LWB_STATE_NEW) 855 zilog->zl_last_lwb_opened = lwb; 856 mutex_exit(&zilog->zl_lock); 857 858 return (lwb); 859 } 860 861 static void 862 zil_free_lwb(zilog_t *zilog, lwb_t *lwb) 863 { 864 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 865 ASSERT(lwb->lwb_state == LWB_STATE_NEW || 866 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 867 ASSERT3P(lwb->lwb_child_zio, ==, NULL); 868 ASSERT3P(lwb->lwb_write_zio, ==, NULL); 869 ASSERT3P(lwb->lwb_root_zio, ==, NULL); 870 ASSERT3U(lwb->lwb_alloc_txg, <=, spa_syncing_txg(zilog->zl_spa)); 871 ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); 872 VERIFY(list_is_empty(&lwb->lwb_itxs)); 873 VERIFY(list_is_empty(&lwb->lwb_waiters)); 874 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 875 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 876 877 /* 878 * Clear the zilog's field to indicate this lwb is no longer 879 * valid, and prevent use-after-free errors. 880 */ 881 if (zilog->zl_last_lwb_opened == lwb) 882 zilog->zl_last_lwb_opened = NULL; 883 884 kmem_cache_free(zil_lwb_cache, lwb); 885 } 886 887 /* 888 * Called when we create in-memory log transactions so that we know 889 * to cleanup the itxs at the end of spa_sync(). 890 */ 891 static void 892 zilog_dirty(zilog_t *zilog, uint64_t txg) 893 { 894 dsl_pool_t *dp = zilog->zl_dmu_pool; 895 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 896 897 ASSERT(spa_writeable(zilog->zl_spa)); 898 899 if (ds->ds_is_snapshot) 900 panic("dirtying snapshot!"); 901 902 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 903 /* up the hold count until we can be written out */ 904 dmu_buf_add_ref(ds->ds_dbuf, zilog); 905 906 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); 907 } 908 } 909 910 /* 911 * Determine if the zil is dirty in the specified txg. Callers wanting to 912 * ensure that the dirty state does not change must hold the itxg_lock for 913 * the specified txg. Holding the lock will ensure that the zil cannot be 914 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current 915 * state. 916 */ 917 static boolean_t __maybe_unused 918 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) 919 { 920 dsl_pool_t *dp = zilog->zl_dmu_pool; 921 922 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) 923 return (B_TRUE); 924 return (B_FALSE); 925 } 926 927 /* 928 * Determine if the zil is dirty. The zil is considered dirty if it has 929 * any pending itx records that have not been cleaned by zil_clean(). 930 */ 931 static boolean_t 932 zilog_is_dirty(zilog_t *zilog) 933 { 934 dsl_pool_t *dp = zilog->zl_dmu_pool; 935 936 for (int t = 0; t < TXG_SIZE; t++) { 937 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 938 return (B_TRUE); 939 } 940 return (B_FALSE); 941 } 942 943 /* 944 * Its called in zil_commit context (zil_process_commit_list()/zil_create()). 945 * It activates SPA_FEATURE_ZILSAXATTR feature, if its enabled. 946 * Check dsl_dataset_feature_is_active to avoid txg_wait_synced() on every 947 * zil_commit. 948 */ 949 static void 950 zil_commit_activate_saxattr_feature(zilog_t *zilog) 951 { 952 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 953 uint64_t txg = 0; 954 dmu_tx_t *tx = NULL; 955 956 if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && 957 dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL && 958 !dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) { 959 tx = dmu_tx_create(zilog->zl_os); 960 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT)); 961 dsl_dataset_dirty(ds, tx); 962 txg = dmu_tx_get_txg(tx); 963 964 mutex_enter(&ds->ds_lock); 965 ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = 966 (void *)B_TRUE; 967 mutex_exit(&ds->ds_lock); 968 dmu_tx_commit(tx); 969 txg_wait_synced(zilog->zl_dmu_pool, txg); 970 } 971 } 972 973 /* 974 * Create an on-disk intent log. 975 */ 976 static lwb_t * 977 zil_create(zilog_t *zilog) 978 { 979 const zil_header_t *zh = zilog->zl_header; 980 lwb_t *lwb = NULL; 981 uint64_t txg = 0; 982 dmu_tx_t *tx = NULL; 983 blkptr_t blk; 984 int error = 0; 985 boolean_t slog = FALSE; 986 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 987 988 989 /* 990 * Wait for any previous destroy to complete. 991 */ 992 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 993 994 ASSERT(zh->zh_claim_txg == 0); 995 ASSERT(zh->zh_replay_seq == 0); 996 997 blk = zh->zh_log; 998 999 /* 1000 * Allocate an initial log block if: 1001 * - there isn't one already 1002 * - the existing block is the wrong endianness 1003 */ 1004 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 1005 tx = dmu_tx_create(zilog->zl_os); 1006 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT)); 1007 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1008 txg = dmu_tx_get_txg(tx); 1009 1010 if (!BP_IS_HOLE(&blk)) { 1011 zio_free(zilog->zl_spa, txg, &blk); 1012 BP_ZERO(&blk); 1013 } 1014 1015 error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk, 1016 ZIL_MIN_BLKSZ, &slog); 1017 if (error == 0) 1018 zil_init_log_chain(zilog, &blk); 1019 } 1020 1021 /* 1022 * Allocate a log write block (lwb) for the first log block. 1023 */ 1024 if (error == 0) 1025 lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW); 1026 1027 /* 1028 * If we just allocated the first log block, commit our transaction 1029 * and wait for zil_sync() to stuff the block pointer into zh_log. 1030 * (zh is part of the MOS, so we cannot modify it in open context.) 1031 */ 1032 if (tx != NULL) { 1033 /* 1034 * If "zilsaxattr" feature is enabled on zpool, then activate 1035 * it now when we're creating the ZIL chain. We can't wait with 1036 * this until we write the first xattr log record because we 1037 * need to wait for the feature activation to sync out. 1038 */ 1039 if (spa_feature_is_enabled(zilog->zl_spa, 1040 SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) != 1041 DMU_OST_ZVOL) { 1042 mutex_enter(&ds->ds_lock); 1043 ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = 1044 (void *)B_TRUE; 1045 mutex_exit(&ds->ds_lock); 1046 } 1047 1048 dmu_tx_commit(tx); 1049 txg_wait_synced(zilog->zl_dmu_pool, txg); 1050 } else { 1051 /* 1052 * This branch covers the case where we enable the feature on a 1053 * zpool that has existing ZIL headers. 1054 */ 1055 zil_commit_activate_saxattr_feature(zilog); 1056 } 1057 IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && 1058 dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL, 1059 dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)); 1060 1061 ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 1062 IMPLY(error == 0, lwb != NULL); 1063 1064 return (lwb); 1065 } 1066 1067 /* 1068 * In one tx, free all log blocks and clear the log header. If keep_first 1069 * is set, then we're replaying a log with no content. We want to keep the 1070 * first block, however, so that the first synchronous transaction doesn't 1071 * require a txg_wait_synced() in zil_create(). We don't need to 1072 * txg_wait_synced() here either when keep_first is set, because both 1073 * zil_create() and zil_destroy() will wait for any in-progress destroys 1074 * to complete. 1075 * Return B_TRUE if there were any entries to replay. 1076 */ 1077 boolean_t 1078 zil_destroy(zilog_t *zilog, boolean_t keep_first) 1079 { 1080 const zil_header_t *zh = zilog->zl_header; 1081 lwb_t *lwb; 1082 dmu_tx_t *tx; 1083 uint64_t txg; 1084 1085 /* 1086 * Wait for any previous destroy to complete. 1087 */ 1088 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 1089 1090 zilog->zl_old_header = *zh; /* debugging aid */ 1091 1092 if (BP_IS_HOLE(&zh->zh_log)) 1093 return (B_FALSE); 1094 1095 tx = dmu_tx_create(zilog->zl_os); 1096 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT)); 1097 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1098 txg = dmu_tx_get_txg(tx); 1099 1100 mutex_enter(&zilog->zl_lock); 1101 1102 ASSERT3U(zilog->zl_destroy_txg, <, txg); 1103 zilog->zl_destroy_txg = txg; 1104 zilog->zl_keep_first = keep_first; 1105 1106 if (!list_is_empty(&zilog->zl_lwb_list)) { 1107 ASSERT(zh->zh_claim_txg == 0); 1108 VERIFY(!keep_first); 1109 while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) { 1110 if (lwb->lwb_buf != NULL) 1111 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1112 if (!BP_IS_HOLE(&lwb->lwb_blk)) 1113 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); 1114 zil_free_lwb(zilog, lwb); 1115 } 1116 } else if (!keep_first) { 1117 zil_destroy_sync(zilog, tx); 1118 } 1119 mutex_exit(&zilog->zl_lock); 1120 1121 dmu_tx_commit(tx); 1122 1123 return (B_TRUE); 1124 } 1125 1126 void 1127 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 1128 { 1129 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1130 (void) zil_parse(zilog, zil_free_log_block, 1131 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE); 1132 } 1133 1134 int 1135 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 1136 { 1137 dmu_tx_t *tx = txarg; 1138 zilog_t *zilog; 1139 uint64_t first_txg; 1140 zil_header_t *zh; 1141 objset_t *os; 1142 int error; 1143 1144 error = dmu_objset_own_obj(dp, ds->ds_object, 1145 DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os); 1146 if (error != 0) { 1147 /* 1148 * EBUSY indicates that the objset is inconsistent, in which 1149 * case it can not have a ZIL. 1150 */ 1151 if (error != EBUSY) { 1152 cmn_err(CE_WARN, "can't open objset for %llu, error %u", 1153 (unsigned long long)ds->ds_object, error); 1154 } 1155 1156 return (0); 1157 } 1158 1159 zilog = dmu_objset_zil(os); 1160 zh = zil_header_in_syncing_context(zilog); 1161 ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); 1162 first_txg = spa_min_claim_txg(zilog->zl_spa); 1163 1164 /* 1165 * If the spa_log_state is not set to be cleared, check whether 1166 * the current uberblock is a checkpoint one and if the current 1167 * header has been claimed before moving on. 1168 * 1169 * If the current uberblock is a checkpointed uberblock then 1170 * one of the following scenarios took place: 1171 * 1172 * 1] We are currently rewinding to the checkpoint of the pool. 1173 * 2] We crashed in the middle of a checkpoint rewind but we 1174 * did manage to write the checkpointed uberblock to the 1175 * vdev labels, so when we tried to import the pool again 1176 * the checkpointed uberblock was selected from the import 1177 * procedure. 1178 * 1179 * In both cases we want to zero out all the ZIL blocks, except 1180 * the ones that have been claimed at the time of the checkpoint 1181 * (their zh_claim_txg != 0). The reason is that these blocks 1182 * may be corrupted since we may have reused their locations on 1183 * disk after we took the checkpoint. 1184 * 1185 * We could try to set spa_log_state to SPA_LOG_CLEAR earlier 1186 * when we first figure out whether the current uberblock is 1187 * checkpointed or not. Unfortunately, that would discard all 1188 * the logs, including the ones that are claimed, and we would 1189 * leak space. 1190 */ 1191 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || 1192 (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 1193 zh->zh_claim_txg == 0)) { 1194 if (!BP_IS_HOLE(&zh->zh_log)) { 1195 (void) zil_parse(zilog, zil_clear_log_block, 1196 zil_noop_log_record, tx, first_txg, B_FALSE); 1197 } 1198 BP_ZERO(&zh->zh_log); 1199 if (os->os_encrypted) 1200 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; 1201 dsl_dataset_dirty(dmu_objset_ds(os), tx); 1202 dmu_objset_disown(os, B_FALSE, FTAG); 1203 return (0); 1204 } 1205 1206 /* 1207 * If we are not rewinding and opening the pool normally, then 1208 * the min_claim_txg should be equal to the first txg of the pool. 1209 */ 1210 ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); 1211 1212 /* 1213 * Claim all log blocks if we haven't already done so, and remember 1214 * the highest claimed sequence number. This ensures that if we can 1215 * read only part of the log now (e.g. due to a missing device), 1216 * but we can read the entire log later, we will not try to replay 1217 * or destroy beyond the last block we successfully claimed. 1218 */ 1219 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 1220 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 1221 (void) zil_parse(zilog, zil_claim_log_block, 1222 zil_claim_log_record, tx, first_txg, B_FALSE); 1223 zh->zh_claim_txg = first_txg; 1224 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 1225 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 1226 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 1227 zh->zh_flags |= ZIL_REPLAY_NEEDED; 1228 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 1229 if (os->os_encrypted) 1230 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; 1231 dsl_dataset_dirty(dmu_objset_ds(os), tx); 1232 } 1233 1234 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 1235 dmu_objset_disown(os, B_FALSE, FTAG); 1236 return (0); 1237 } 1238 1239 /* 1240 * Check the log by walking the log chain. 1241 * Checksum errors are ok as they indicate the end of the chain. 1242 * Any other error (no device or read failure) returns an error. 1243 */ 1244 int 1245 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 1246 { 1247 (void) dp; 1248 zilog_t *zilog; 1249 objset_t *os; 1250 blkptr_t *bp; 1251 int error; 1252 1253 ASSERT(tx == NULL); 1254 1255 error = dmu_objset_from_ds(ds, &os); 1256 if (error != 0) { 1257 cmn_err(CE_WARN, "can't open objset %llu, error %d", 1258 (unsigned long long)ds->ds_object, error); 1259 return (0); 1260 } 1261 1262 zilog = dmu_objset_zil(os); 1263 bp = (blkptr_t *)&zilog->zl_header->zh_log; 1264 1265 if (!BP_IS_HOLE(bp)) { 1266 vdev_t *vd; 1267 boolean_t valid = B_TRUE; 1268 1269 /* 1270 * Check the first block and determine if it's on a log device 1271 * which may have been removed or faulted prior to loading this 1272 * pool. If so, there's no point in checking the rest of the 1273 * log as its content should have already been synced to the 1274 * pool. 1275 */ 1276 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 1277 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 1278 if (vd->vdev_islog && vdev_is_dead(vd)) 1279 valid = vdev_log_state_valid(vd); 1280 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 1281 1282 if (!valid) 1283 return (0); 1284 1285 /* 1286 * Check whether the current uberblock is checkpointed (e.g. 1287 * we are rewinding) and whether the current header has been 1288 * claimed or not. If it hasn't then skip verifying it. We 1289 * do this because its ZIL blocks may be part of the pool's 1290 * state before the rewind, which is no longer valid. 1291 */ 1292 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1293 if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 1294 zh->zh_claim_txg == 0) 1295 return (0); 1296 } 1297 1298 /* 1299 * Because tx == NULL, zil_claim_log_block() will not actually claim 1300 * any blocks, but just determine whether it is possible to do so. 1301 * In addition to checking the log chain, zil_claim_log_block() 1302 * will invoke zio_claim() with a done func of spa_claim_notify(), 1303 * which will update spa_max_claim_txg. See spa_load() for details. 1304 */ 1305 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 1306 zilog->zl_header->zh_claim_txg ? -1ULL : 1307 spa_min_claim_txg(os->os_spa), B_FALSE); 1308 1309 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 1310 } 1311 1312 /* 1313 * When an itx is "skipped", this function is used to properly mark the 1314 * waiter as "done, and signal any thread(s) waiting on it. An itx can 1315 * be skipped (and not committed to an lwb) for a variety of reasons, 1316 * one of them being that the itx was committed via spa_sync(), prior to 1317 * it being committed to an lwb; this can happen if a thread calling 1318 * zil_commit() is racing with spa_sync(). 1319 */ 1320 static void 1321 zil_commit_waiter_skip(zil_commit_waiter_t *zcw) 1322 { 1323 mutex_enter(&zcw->zcw_lock); 1324 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1325 zcw->zcw_done = B_TRUE; 1326 cv_broadcast(&zcw->zcw_cv); 1327 mutex_exit(&zcw->zcw_lock); 1328 } 1329 1330 /* 1331 * This function is used when the given waiter is to be linked into an 1332 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. 1333 * At this point, the waiter will no longer be referenced by the itx, 1334 * and instead, will be referenced by the lwb. 1335 */ 1336 static void 1337 zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) 1338 { 1339 /* 1340 * The lwb_waiters field of the lwb is protected by the zilog's 1341 * zl_issuer_lock while the lwb is open and zl_lock otherwise. 1342 * zl_issuer_lock also protects leaving the open state. 1343 * zcw_lwb setting is protected by zl_issuer_lock and state != 1344 * flush_done, which transition is protected by zl_lock. 1345 */ 1346 ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_issuer_lock)); 1347 IMPLY(lwb->lwb_state != LWB_STATE_OPENED, 1348 MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); 1349 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW); 1350 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 1351 1352 ASSERT(!list_link_active(&zcw->zcw_node)); 1353 list_insert_tail(&lwb->lwb_waiters, zcw); 1354 ASSERT3P(zcw->zcw_lwb, ==, NULL); 1355 zcw->zcw_lwb = lwb; 1356 } 1357 1358 /* 1359 * This function is used when zio_alloc_zil() fails to allocate a ZIL 1360 * block, and the given waiter must be linked to the "nolwb waiters" 1361 * list inside of zil_process_commit_list(). 1362 */ 1363 static void 1364 zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) 1365 { 1366 ASSERT(!list_link_active(&zcw->zcw_node)); 1367 list_insert_tail(nolwb, zcw); 1368 ASSERT3P(zcw->zcw_lwb, ==, NULL); 1369 } 1370 1371 void 1372 zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) 1373 { 1374 avl_tree_t *t = &lwb->lwb_vdev_tree; 1375 avl_index_t where; 1376 zil_vdev_node_t *zv, zvsearch; 1377 int ndvas = BP_GET_NDVAS(bp); 1378 int i; 1379 1380 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 1381 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 1382 1383 if (zil_nocacheflush) 1384 return; 1385 1386 mutex_enter(&lwb->lwb_vdev_lock); 1387 for (i = 0; i < ndvas; i++) { 1388 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 1389 if (avl_find(t, &zvsearch, &where) == NULL) { 1390 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 1391 zv->zv_vdev = zvsearch.zv_vdev; 1392 avl_insert(t, zv, where); 1393 } 1394 } 1395 mutex_exit(&lwb->lwb_vdev_lock); 1396 } 1397 1398 static void 1399 zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb) 1400 { 1401 avl_tree_t *src = &lwb->lwb_vdev_tree; 1402 avl_tree_t *dst = &nlwb->lwb_vdev_tree; 1403 void *cookie = NULL; 1404 zil_vdev_node_t *zv; 1405 1406 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1407 ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 1408 ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 1409 1410 /* 1411 * While 'lwb' is at a point in its lifetime where lwb_vdev_tree does 1412 * not need the protection of lwb_vdev_lock (it will only be modified 1413 * while holding zilog->zl_lock) as its writes and those of its 1414 * children have all completed. The younger 'nlwb' may be waiting on 1415 * future writes to additional vdevs. 1416 */ 1417 mutex_enter(&nlwb->lwb_vdev_lock); 1418 /* 1419 * Tear down the 'lwb' vdev tree, ensuring that entries which do not 1420 * exist in 'nlwb' are moved to it, freeing any would-be duplicates. 1421 */ 1422 while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) { 1423 avl_index_t where; 1424 1425 if (avl_find(dst, zv, &where) == NULL) { 1426 avl_insert(dst, zv, where); 1427 } else { 1428 kmem_free(zv, sizeof (*zv)); 1429 } 1430 } 1431 mutex_exit(&nlwb->lwb_vdev_lock); 1432 } 1433 1434 void 1435 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) 1436 { 1437 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1438 } 1439 1440 /* 1441 * This function is a called after all vdevs associated with a given lwb write 1442 * have completed their flush command; or as soon as the lwb write completes, 1443 * if "zil_nocacheflush" is set. Further, all "previous" lwb's will have 1444 * completed before this function is called; i.e. this function is called for 1445 * all previous lwbs before it's called for "this" lwb (enforced via zio the 1446 * dependencies configured in zil_lwb_set_zio_dependency()). 1447 * 1448 * The intention is for this function to be called as soon as the contents of 1449 * an lwb are considered "stable" on disk, and will survive any sudden loss of 1450 * power. At this point, any threads waiting for the lwb to reach this state 1451 * are signalled, and the "waiter" structures are marked "done". 1452 */ 1453 static void 1454 zil_lwb_flush_vdevs_done(zio_t *zio) 1455 { 1456 lwb_t *lwb = zio->io_private; 1457 zilog_t *zilog = lwb->lwb_zilog; 1458 zil_commit_waiter_t *zcw; 1459 itx_t *itx; 1460 1461 spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); 1462 1463 hrtime_t t = gethrtime() - lwb->lwb_issued_timestamp; 1464 1465 mutex_enter(&zilog->zl_lock); 1466 1467 zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 7 + t) / 8; 1468 1469 lwb->lwb_root_zio = NULL; 1470 1471 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1472 lwb->lwb_state = LWB_STATE_FLUSH_DONE; 1473 1474 if (zilog->zl_last_lwb_opened == lwb) { 1475 /* 1476 * Remember the highest committed log sequence number 1477 * for ztest. We only update this value when all the log 1478 * writes succeeded, because ztest wants to ASSERT that 1479 * it got the whole log chain. 1480 */ 1481 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1482 } 1483 1484 while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL) 1485 zil_itx_destroy(itx); 1486 1487 while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) { 1488 mutex_enter(&zcw->zcw_lock); 1489 1490 ASSERT3P(zcw->zcw_lwb, ==, lwb); 1491 zcw->zcw_lwb = NULL; 1492 /* 1493 * We expect any ZIO errors from child ZIOs to have been 1494 * propagated "up" to this specific LWB's root ZIO, in 1495 * order for this error handling to work correctly. This 1496 * includes ZIO errors from either this LWB's write or 1497 * flush, as well as any errors from other dependent LWBs 1498 * (e.g. a root LWB ZIO that might be a child of this LWB). 1499 * 1500 * With that said, it's important to note that LWB flush 1501 * errors are not propagated up to the LWB root ZIO. 1502 * This is incorrect behavior, and results in VDEV flush 1503 * errors not being handled correctly here. See the 1504 * comment above the call to "zio_flush" for details. 1505 */ 1506 1507 zcw->zcw_zio_error = zio->io_error; 1508 1509 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1510 zcw->zcw_done = B_TRUE; 1511 cv_broadcast(&zcw->zcw_cv); 1512 1513 mutex_exit(&zcw->zcw_lock); 1514 } 1515 1516 uint64_t txg = lwb->lwb_issued_txg; 1517 1518 /* Once we drop the lock, lwb may be freed by zil_sync(). */ 1519 mutex_exit(&zilog->zl_lock); 1520 1521 mutex_enter(&zilog->zl_lwb_io_lock); 1522 ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0); 1523 zilog->zl_lwb_inflight[txg & TXG_MASK]--; 1524 if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0) 1525 cv_broadcast(&zilog->zl_lwb_io_cv); 1526 mutex_exit(&zilog->zl_lwb_io_lock); 1527 } 1528 1529 /* 1530 * Wait for the completion of all issued write/flush of that txg provided. 1531 * It guarantees zil_lwb_flush_vdevs_done() is called and returned. 1532 */ 1533 static void 1534 zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg) 1535 { 1536 ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa)); 1537 1538 mutex_enter(&zilog->zl_lwb_io_lock); 1539 while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0) 1540 cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock); 1541 mutex_exit(&zilog->zl_lwb_io_lock); 1542 1543 #ifdef ZFS_DEBUG 1544 mutex_enter(&zilog->zl_lock); 1545 mutex_enter(&zilog->zl_lwb_io_lock); 1546 lwb_t *lwb = list_head(&zilog->zl_lwb_list); 1547 while (lwb != NULL) { 1548 if (lwb->lwb_issued_txg <= txg) { 1549 ASSERT(lwb->lwb_state != LWB_STATE_ISSUED); 1550 ASSERT(lwb->lwb_state != LWB_STATE_WRITE_DONE); 1551 IMPLY(lwb->lwb_issued_txg > 0, 1552 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 1553 } 1554 IMPLY(lwb->lwb_state == LWB_STATE_WRITE_DONE || 1555 lwb->lwb_state == LWB_STATE_FLUSH_DONE, 1556 lwb->lwb_buf == NULL); 1557 lwb = list_next(&zilog->zl_lwb_list, lwb); 1558 } 1559 mutex_exit(&zilog->zl_lwb_io_lock); 1560 mutex_exit(&zilog->zl_lock); 1561 #endif 1562 } 1563 1564 /* 1565 * This is called when an lwb's write zio completes. The callback's purpose is 1566 * to issue the flush commands for the vdevs in the lwb's lwb_vdev_tree. The 1567 * tree will contain the vdevs involved in writing out this specific lwb's 1568 * data, and in the case that cache flushes have been deferred, vdevs involved 1569 * in writing the data for previous lwbs. The writes corresponding to all the 1570 * vdevs in the lwb_vdev_tree will have completed by the time this is called, 1571 * due to the zio dependencies configured in zil_lwb_set_zio_dependency(), 1572 * which takes deferred flushes into account. The lwb will be "done" once 1573 * zil_lwb_flush_vdevs_done() is called, which occurs in the zio completion 1574 * callback for the lwb's root zio. 1575 */ 1576 static void 1577 zil_lwb_write_done(zio_t *zio) 1578 { 1579 lwb_t *lwb = zio->io_private; 1580 spa_t *spa = zio->io_spa; 1581 zilog_t *zilog = lwb->lwb_zilog; 1582 avl_tree_t *t = &lwb->lwb_vdev_tree; 1583 void *cookie = NULL; 1584 zil_vdev_node_t *zv; 1585 lwb_t *nlwb; 1586 1587 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); 1588 1589 abd_free(zio->io_abd); 1590 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1591 lwb->lwb_buf = NULL; 1592 1593 mutex_enter(&zilog->zl_lock); 1594 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); 1595 lwb->lwb_state = LWB_STATE_WRITE_DONE; 1596 lwb->lwb_child_zio = NULL; 1597 lwb->lwb_write_zio = NULL; 1598 1599 /* 1600 * If nlwb is not yet issued, zil_lwb_set_zio_dependency() is not 1601 * called for it yet, and when it will be, it won't be able to make 1602 * its write ZIO a parent this ZIO. In such case we can not defer 1603 * our flushes or below may be a race between the done callbacks. 1604 */ 1605 nlwb = list_next(&zilog->zl_lwb_list, lwb); 1606 if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED) 1607 nlwb = NULL; 1608 mutex_exit(&zilog->zl_lock); 1609 1610 if (avl_numnodes(t) == 0) 1611 return; 1612 1613 /* 1614 * If there was an IO error, we're not going to call zio_flush() 1615 * on these vdevs, so we simply empty the tree and free the 1616 * nodes. We avoid calling zio_flush() since there isn't any 1617 * good reason for doing so, after the lwb block failed to be 1618 * written out. 1619 * 1620 * Additionally, we don't perform any further error handling at 1621 * this point (e.g. setting "zcw_zio_error" appropriately), as 1622 * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus, 1623 * we expect any error seen here, to have been propagated to 1624 * that function). 1625 */ 1626 if (zio->io_error != 0) { 1627 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) 1628 kmem_free(zv, sizeof (*zv)); 1629 return; 1630 } 1631 1632 /* 1633 * If this lwb does not have any threads waiting for it to complete, we 1634 * want to defer issuing the flush command to the vdevs written to by 1635 * "this" lwb, and instead rely on the "next" lwb to handle the flush 1636 * command for those vdevs. Thus, we merge the vdev tree of "this" lwb 1637 * with the vdev tree of the "next" lwb in the list, and assume the 1638 * "next" lwb will handle flushing the vdevs (or deferring the flush(s) 1639 * again). 1640 * 1641 * This is a useful performance optimization, especially for workloads 1642 * with lots of async write activity and few sync write and/or fsync 1643 * activity, as it has the potential to coalesce multiple flush 1644 * commands to a vdev into one. 1645 */ 1646 if (list_is_empty(&lwb->lwb_waiters) && nlwb != NULL) { 1647 zil_lwb_flush_defer(lwb, nlwb); 1648 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 1649 return; 1650 } 1651 1652 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 1653 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 1654 if (vd != NULL) { 1655 /* 1656 * The "ZIO_FLAG_DONT_PROPAGATE" is currently 1657 * always used within "zio_flush". This means, 1658 * any errors when flushing the vdev(s), will 1659 * (unfortunately) not be handled correctly, 1660 * since these "zio_flush" errors will not be 1661 * propagated up to "zil_lwb_flush_vdevs_done". 1662 */ 1663 zio_flush(lwb->lwb_root_zio, vd); 1664 } 1665 kmem_free(zv, sizeof (*zv)); 1666 } 1667 } 1668 1669 /* 1670 * Build the zio dependency chain, which is used to preserve the ordering of 1671 * lwb completions that is required by the semantics of the ZIL. Each new lwb 1672 * zio becomes a parent of the previous lwb zio, such that the new lwb's zio 1673 * cannot complete until the previous lwb's zio completes. 1674 * 1675 * This is required by the semantics of zil_commit(): the commit waiters 1676 * attached to the lwbs will be woken in the lwb zio's completion callback, 1677 * so this zio dependency graph ensures the waiters are woken in the correct 1678 * order (the same order the lwbs were created). 1679 */ 1680 static void 1681 zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb) 1682 { 1683 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 1684 1685 lwb_t *prev_lwb = list_prev(&zilog->zl_lwb_list, lwb); 1686 if (prev_lwb == NULL || 1687 prev_lwb->lwb_state == LWB_STATE_FLUSH_DONE) 1688 return; 1689 1690 /* 1691 * If the previous lwb's write hasn't already completed, we also want 1692 * to order the completion of the lwb write zios (above, we only order 1693 * the completion of the lwb root zios). This is required because of 1694 * how we can defer the flush commands for each lwb. 1695 * 1696 * When the flush commands are deferred, the previous lwb will rely on 1697 * this lwb to flush the vdevs written to by that previous lwb. Thus, 1698 * we need to ensure this lwb doesn't issue the flush until after the 1699 * previous lwb's write completes. We ensure this ordering by setting 1700 * the zio parent/child relationship here. 1701 * 1702 * Without this relationship on the lwb's write zio, it's possible for 1703 * this lwb's write to complete prior to the previous lwb's write 1704 * completing; and thus, the vdevs for the previous lwb would be 1705 * flushed prior to that lwb's data being written to those vdevs (the 1706 * vdevs are flushed in the lwb write zio's completion handler, 1707 * zil_lwb_write_done()). 1708 */ 1709 if (prev_lwb->lwb_state == LWB_STATE_ISSUED) { 1710 ASSERT3P(prev_lwb->lwb_write_zio, !=, NULL); 1711 zio_add_child(lwb->lwb_write_zio, prev_lwb->lwb_write_zio); 1712 } else { 1713 ASSERT3S(prev_lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1714 } 1715 1716 ASSERT3P(prev_lwb->lwb_root_zio, !=, NULL); 1717 zio_add_child(lwb->lwb_root_zio, prev_lwb->lwb_root_zio); 1718 } 1719 1720 1721 /* 1722 * This function's purpose is to "open" an lwb such that it is ready to 1723 * accept new itxs being committed to it. This function is idempotent; if 1724 * the passed in lwb has already been opened, it is essentially a no-op. 1725 */ 1726 static void 1727 zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) 1728 { 1729 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1730 1731 if (lwb->lwb_state != LWB_STATE_NEW) { 1732 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1733 return; 1734 } 1735 1736 mutex_enter(&zilog->zl_lock); 1737 lwb->lwb_state = LWB_STATE_OPENED; 1738 zilog->zl_last_lwb_opened = lwb; 1739 mutex_exit(&zilog->zl_lock); 1740 } 1741 1742 /* 1743 * Maximum block size used by the ZIL. This is picked up when the ZIL is 1744 * initialized. Otherwise this should not be used directly; see 1745 * zl_max_block_size instead. 1746 */ 1747 static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; 1748 1749 /* 1750 * Plan splitting of the provided burst size between several blocks. 1751 */ 1752 static uint_t 1753 zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize) 1754 { 1755 uint_t md = zilog->zl_max_block_size - sizeof (zil_chain_t); 1756 1757 if (size <= md) { 1758 /* 1759 * Small bursts are written as-is in one block. 1760 */ 1761 *minsize = size; 1762 return (size); 1763 } else if (size > 8 * md) { 1764 /* 1765 * Big bursts use maximum blocks. The first block size 1766 * is hard to predict, but it does not really matter. 1767 */ 1768 *minsize = 0; 1769 return (md); 1770 } 1771 1772 /* 1773 * Medium bursts try to divide evenly to better utilize several SLOG 1774 * VDEVs. The first block size we predict assuming the worst case of 1775 * maxing out others. Fall back to using maximum blocks if due to 1776 * large records or wasted space we can not predict anything better. 1777 */ 1778 uint_t s = size; 1779 uint_t n = DIV_ROUND_UP(s, md - sizeof (lr_write_t)); 1780 uint_t chunk = DIV_ROUND_UP(s, n); 1781 uint_t waste = zil_max_waste_space(zilog); 1782 waste = MAX(waste, zilog->zl_cur_max); 1783 if (chunk <= md - waste) { 1784 *minsize = MAX(s - (md - waste) * (n - 1), waste); 1785 return (chunk); 1786 } else { 1787 *minsize = 0; 1788 return (md); 1789 } 1790 } 1791 1792 /* 1793 * Try to predict next block size based on previous history. Make prediction 1794 * sufficient for 7 of 8 previous bursts. Don't try to save if the saving is 1795 * less then 50%, extra writes may cost more, but we don't want single spike 1796 * to badly affect our predictions. 1797 */ 1798 static uint_t 1799 zil_lwb_predict(zilog_t *zilog) 1800 { 1801 uint_t m, o; 1802 1803 /* If we are in the middle of a burst, take it into account also. */ 1804 if (zilog->zl_cur_size > 0) { 1805 o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m); 1806 } else { 1807 o = UINT_MAX; 1808 m = 0; 1809 } 1810 1811 /* Find minimum optimal size. We don't need to go below that. */ 1812 for (int i = 0; i < ZIL_BURSTS; i++) 1813 o = MIN(o, zilog->zl_prev_opt[i]); 1814 1815 /* Find two biggest minimal first block sizes above the optimal. */ 1816 uint_t m1 = MAX(m, o), m2 = o; 1817 for (int i = 0; i < ZIL_BURSTS; i++) { 1818 m = zilog->zl_prev_min[i]; 1819 if (m >= m1) { 1820 m2 = m1; 1821 m1 = m; 1822 } else if (m > m2) { 1823 m2 = m; 1824 } 1825 } 1826 1827 /* 1828 * If second minimum size gives 50% saving -- use it. It may cost us 1829 * one additional write later, but the space saving is just too big. 1830 */ 1831 return ((m1 < m2 * 2) ? m1 : m2); 1832 } 1833 1834 /* 1835 * Close the log block for being issued and allocate the next one. 1836 * Has to be called under zl_issuer_lock to chain more lwbs. 1837 */ 1838 static lwb_t * 1839 zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state) 1840 { 1841 uint64_t blksz, plan, plan2; 1842 1843 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1844 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1845 lwb->lwb_state = LWB_STATE_CLOSED; 1846 1847 /* 1848 * If there was an allocation failure then returned NULL will trigger 1849 * zil_commit_writer_stall() at the caller. This is inherently racy, 1850 * since allocation may not have happened yet. 1851 */ 1852 if (lwb->lwb_error != 0) 1853 return (NULL); 1854 1855 /* 1856 * Log blocks are pre-allocated. Here we select the size of the next 1857 * block, based on what's left of this burst and the previous history. 1858 * While we try to only write used part of the block, we can't just 1859 * always allocate the maximum block size because we can exhaust all 1860 * available pool log space, so we try to be reasonable. 1861 */ 1862 if (zilog->zl_cur_left > 0) { 1863 /* 1864 * We are in the middle of a burst and know how much is left. 1865 * But if workload is multi-threaded there may be more soon. 1866 * Try to predict what can it be and plan for the worst case. 1867 */ 1868 uint_t m; 1869 plan = zil_lwb_plan(zilog, zilog->zl_cur_left, &m); 1870 if (zilog->zl_parallel) { 1871 plan2 = zil_lwb_plan(zilog, zilog->zl_cur_left + 1872 zil_lwb_predict(zilog), &m); 1873 if (plan < plan2) 1874 plan = plan2; 1875 } 1876 } else { 1877 /* 1878 * The previous burst is done and we can only predict what 1879 * will come next. 1880 */ 1881 plan = zil_lwb_predict(zilog); 1882 } 1883 blksz = plan + sizeof (zil_chain_t); 1884 blksz = P2ROUNDUP_TYPED(blksz, ZIL_MIN_BLKSZ, uint64_t); 1885 blksz = MIN(blksz, zilog->zl_max_block_size); 1886 DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, blksz, 1887 uint64_t, plan); 1888 1889 return (zil_alloc_lwb(zilog, blksz, NULL, 0, 0, state)); 1890 } 1891 1892 /* 1893 * Finalize previously closed block and issue the write zio. 1894 */ 1895 static void 1896 zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) 1897 { 1898 spa_t *spa = zilog->zl_spa; 1899 zil_chain_t *zilc; 1900 boolean_t slog; 1901 zbookmark_phys_t zb; 1902 zio_priority_t prio; 1903 int error; 1904 1905 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED); 1906 1907 /* Actually fill the lwb with the data. */ 1908 for (itx_t *itx = list_head(&lwb->lwb_itxs); itx; 1909 itx = list_next(&lwb->lwb_itxs, itx)) 1910 zil_lwb_commit(zilog, lwb, itx); 1911 lwb->lwb_nused = lwb->lwb_nfilled; 1912 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax); 1913 1914 lwb->lwb_root_zio = zio_root(spa, zil_lwb_flush_vdevs_done, lwb, 1915 ZIO_FLAG_CANFAIL); 1916 1917 /* 1918 * The lwb is now ready to be issued, but it can be only if it already 1919 * got its block pointer allocated or the allocation has failed. 1920 * Otherwise leave it as-is, relying on some other thread to issue it 1921 * after allocating its block pointer via calling zil_lwb_write_issue() 1922 * for the previous lwb(s) in the chain. 1923 */ 1924 mutex_enter(&zilog->zl_lock); 1925 lwb->lwb_state = LWB_STATE_READY; 1926 if (BP_IS_HOLE(&lwb->lwb_blk) && lwb->lwb_error == 0) { 1927 mutex_exit(&zilog->zl_lock); 1928 return; 1929 } 1930 mutex_exit(&zilog->zl_lock); 1931 1932 next_lwb: 1933 if (lwb->lwb_slim) 1934 zilc = (zil_chain_t *)lwb->lwb_buf; 1935 else 1936 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_nmax); 1937 int wsz = lwb->lwb_sz; 1938 if (lwb->lwb_error == 0) { 1939 abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, lwb->lwb_sz); 1940 if (!lwb->lwb_slog || zilog->zl_cur_size <= zil_slog_bulk) 1941 prio = ZIO_PRIORITY_SYNC_WRITE; 1942 else 1943 prio = ZIO_PRIORITY_ASYNC_WRITE; 1944 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1945 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 1946 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 1947 lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, spa, 0, 1948 &lwb->lwb_blk, lwb_abd, lwb->lwb_sz, zil_lwb_write_done, 1949 lwb, prio, ZIO_FLAG_CANFAIL, &zb); 1950 zil_lwb_add_block(lwb, &lwb->lwb_blk); 1951 1952 if (lwb->lwb_slim) { 1953 /* For Slim ZIL only write what is used. */ 1954 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, 1955 int); 1956 ASSERT3S(wsz, <=, lwb->lwb_sz); 1957 zio_shrink(lwb->lwb_write_zio, wsz); 1958 wsz = lwb->lwb_write_zio->io_size; 1959 } 1960 memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused); 1961 zilc->zc_pad = 0; 1962 zilc->zc_nused = lwb->lwb_nused; 1963 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1964 } else { 1965 /* 1966 * We can't write the lwb if there was an allocation failure, 1967 * so create a null zio instead just to maintain dependencies. 1968 */ 1969 lwb->lwb_write_zio = zio_null(lwb->lwb_root_zio, spa, NULL, 1970 zil_lwb_write_done, lwb, ZIO_FLAG_CANFAIL); 1971 lwb->lwb_write_zio->io_error = lwb->lwb_error; 1972 } 1973 if (lwb->lwb_child_zio) 1974 zio_add_child(lwb->lwb_write_zio, lwb->lwb_child_zio); 1975 1976 /* 1977 * Open transaction to allocate the next block pointer. 1978 */ 1979 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1980 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_NOTHROTTLE)); 1981 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1982 uint64_t txg = dmu_tx_get_txg(tx); 1983 1984 /* 1985 * Allocate next the block pointer unless we are already in error. 1986 */ 1987 lwb_t *nlwb = list_next(&zilog->zl_lwb_list, lwb); 1988 blkptr_t *bp = &zilc->zc_next_blk; 1989 BP_ZERO(bp); 1990 error = lwb->lwb_error; 1991 if (error == 0) { 1992 error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz, 1993 &slog); 1994 } 1995 if (error == 0) { 1996 ASSERT3U(BP_GET_LOGICAL_BIRTH(bp), ==, txg); 1997 BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 : 1998 ZIO_CHECKSUM_ZILOG); 1999 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 2000 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 2001 } 2002 2003 /* 2004 * Reduce TXG open time by incrementing inflight counter and committing 2005 * the transaciton. zil_sync() will wait for it to return to zero. 2006 */ 2007 mutex_enter(&zilog->zl_lwb_io_lock); 2008 lwb->lwb_issued_txg = txg; 2009 zilog->zl_lwb_inflight[txg & TXG_MASK]++; 2010 zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg); 2011 mutex_exit(&zilog->zl_lwb_io_lock); 2012 dmu_tx_commit(tx); 2013 2014 spa_config_enter(spa, SCL_STATE, lwb, RW_READER); 2015 2016 /* 2017 * We've completed all potentially blocking operations. Update the 2018 * nlwb and allow it proceed without possible lock order reversals. 2019 */ 2020 mutex_enter(&zilog->zl_lock); 2021 zil_lwb_set_zio_dependency(zilog, lwb); 2022 lwb->lwb_state = LWB_STATE_ISSUED; 2023 2024 if (nlwb) { 2025 nlwb->lwb_blk = *bp; 2026 nlwb->lwb_error = error; 2027 nlwb->lwb_slog = slog; 2028 nlwb->lwb_alloc_txg = txg; 2029 if (nlwb->lwb_state != LWB_STATE_READY) 2030 nlwb = NULL; 2031 } 2032 mutex_exit(&zilog->zl_lock); 2033 2034 if (lwb->lwb_slog) { 2035 ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count); 2036 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes, 2037 lwb->lwb_nused); 2038 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_write, 2039 wsz); 2040 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_alloc, 2041 BP_GET_LSIZE(&lwb->lwb_blk)); 2042 } else { 2043 ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count); 2044 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes, 2045 lwb->lwb_nused); 2046 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_write, 2047 wsz); 2048 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_alloc, 2049 BP_GET_LSIZE(&lwb->lwb_blk)); 2050 } 2051 lwb->lwb_issued_timestamp = gethrtime(); 2052 if (lwb->lwb_child_zio) 2053 zio_nowait(lwb->lwb_child_zio); 2054 zio_nowait(lwb->lwb_write_zio); 2055 zio_nowait(lwb->lwb_root_zio); 2056 2057 /* 2058 * If nlwb was ready when we gave it the block pointer, 2059 * it is on us to issue it and possibly following ones. 2060 */ 2061 lwb = nlwb; 2062 if (lwb) 2063 goto next_lwb; 2064 } 2065 2066 /* 2067 * Maximum amount of data that can be put into single log block. 2068 */ 2069 uint64_t 2070 zil_max_log_data(zilog_t *zilog, size_t hdrsize) 2071 { 2072 return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize); 2073 } 2074 2075 /* 2076 * Maximum amount of log space we agree to waste to reduce number of 2077 * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~6%). 2078 */ 2079 static inline uint64_t 2080 zil_max_waste_space(zilog_t *zilog) 2081 { 2082 return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 16); 2083 } 2084 2085 /* 2086 * Maximum amount of write data for WR_COPIED. For correctness, consumers 2087 * must fall back to WR_NEED_COPY if we can't fit the entire record into one 2088 * maximum sized log block, because each WR_COPIED record must fit in a 2089 * single log block. Below that it is a tradeoff of additional memory copy 2090 * and possibly worse log space efficiency vs additional range lock/unlock. 2091 */ 2092 static uint_t zil_maxcopied = 7680; 2093 2094 uint64_t 2095 zil_max_copied_data(zilog_t *zilog) 2096 { 2097 uint64_t max_data = zil_max_log_data(zilog, sizeof (lr_write_t)); 2098 return (MIN(max_data, zil_maxcopied)); 2099 } 2100 2101 static uint64_t 2102 zil_itx_record_size(itx_t *itx) 2103 { 2104 lr_t *lr = &itx->itx_lr; 2105 2106 if (lr->lrc_txtype == TX_COMMIT) 2107 return (0); 2108 ASSERT3U(lr->lrc_reclen, >=, sizeof (lr_t)); 2109 return (lr->lrc_reclen); 2110 } 2111 2112 static uint64_t 2113 zil_itx_data_size(itx_t *itx) 2114 { 2115 lr_t *lr = &itx->itx_lr; 2116 lr_write_t *lrw = (lr_write_t *)lr; 2117 2118 if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { 2119 ASSERT3U(lr->lrc_reclen, ==, sizeof (lr_write_t)); 2120 return (P2ROUNDUP_TYPED(lrw->lr_length, sizeof (uint64_t), 2121 uint64_t)); 2122 } 2123 return (0); 2124 } 2125 2126 static uint64_t 2127 zil_itx_full_size(itx_t *itx) 2128 { 2129 lr_t *lr = &itx->itx_lr; 2130 2131 if (lr->lrc_txtype == TX_COMMIT) 2132 return (0); 2133 ASSERT3U(lr->lrc_reclen, >=, sizeof (lr_t)); 2134 return (lr->lrc_reclen + zil_itx_data_size(itx)); 2135 } 2136 2137 /* 2138 * Estimate space needed in the lwb for the itx. Allocate more lwbs or 2139 * split the itx as needed, but don't touch the actual transaction data. 2140 * Has to be called under zl_issuer_lock to call zil_lwb_write_close() 2141 * to chain more lwbs. 2142 */ 2143 static lwb_t * 2144 zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs) 2145 { 2146 itx_t *citx; 2147 lr_t *lr, *clr; 2148 lr_write_t *lrw; 2149 uint64_t dlen, dnow, lwb_sp, reclen, max_log_data; 2150 2151 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2152 ASSERT3P(lwb, !=, NULL); 2153 ASSERT3P(lwb->lwb_buf, !=, NULL); 2154 2155 zil_lwb_write_open(zilog, lwb); 2156 2157 lr = &itx->itx_lr; 2158 lrw = (lr_write_t *)lr; 2159 2160 /* 2161 * A commit itx doesn't represent any on-disk state; instead 2162 * it's simply used as a place holder on the commit list, and 2163 * provides a mechanism for attaching a "commit waiter" onto the 2164 * correct lwb (such that the waiter can be signalled upon 2165 * completion of that lwb). Thus, we don't process this itx's 2166 * log record if it's a commit itx (these itx's don't have log 2167 * records), and instead link the itx's waiter onto the lwb's 2168 * list of waiters. 2169 * 2170 * For more details, see the comment above zil_commit(). 2171 */ 2172 if (lr->lrc_txtype == TX_COMMIT) { 2173 zil_commit_waiter_link_lwb(itx->itx_private, lwb); 2174 list_insert_tail(&lwb->lwb_itxs, itx); 2175 return (lwb); 2176 } 2177 2178 reclen = lr->lrc_reclen; 2179 ASSERT3U(reclen, >=, sizeof (lr_t)); 2180 ASSERT3U(reclen, <=, zil_max_log_data(zilog, 0)); 2181 dlen = zil_itx_data_size(itx); 2182 2183 cont: 2184 /* 2185 * If this record won't fit in the current log block, start a new one. 2186 * For WR_NEED_COPY optimize layout for minimal number of chunks. 2187 */ 2188 lwb_sp = lwb->lwb_nmax - lwb->lwb_nused; 2189 max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t)); 2190 if (reclen > lwb_sp || (reclen + dlen > lwb_sp && 2191 lwb_sp < zil_max_waste_space(zilog) && 2192 (dlen % max_log_data == 0 || 2193 lwb_sp < reclen + dlen % max_log_data))) { 2194 list_insert_tail(ilwbs, lwb); 2195 lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED); 2196 if (lwb == NULL) 2197 return (NULL); 2198 lwb_sp = lwb->lwb_nmax - lwb->lwb_nused; 2199 } 2200 2201 /* 2202 * There must be enough space in the log block to hold reclen. 2203 * For WR_COPIED, we need to fit the whole record in one block, 2204 * and reclen is the write record header size + the data size. 2205 * For WR_NEED_COPY, we can create multiple records, splitting 2206 * the data into multiple blocks, so we only need to fit one 2207 * word of data per block; in this case reclen is just the header 2208 * size (no data). 2209 */ 2210 ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); 2211 2212 dnow = MIN(dlen, lwb_sp - reclen); 2213 if (dlen > dnow) { 2214 ASSERT3U(lr->lrc_txtype, ==, TX_WRITE); 2215 ASSERT3U(itx->itx_wr_state, ==, WR_NEED_COPY); 2216 citx = zil_itx_clone(itx); 2217 clr = &citx->itx_lr; 2218 lr_write_t *clrw = (lr_write_t *)clr; 2219 clrw->lr_length = dnow; 2220 lrw->lr_offset += dnow; 2221 lrw->lr_length -= dnow; 2222 zilog->zl_cur_left -= dnow; 2223 } else { 2224 citx = itx; 2225 clr = lr; 2226 } 2227 2228 /* 2229 * We're actually making an entry, so update lrc_seq to be the 2230 * log record sequence number. Note that this is generally not 2231 * equal to the itx sequence number because not all transactions 2232 * are synchronous, and sometimes spa_sync() gets there first. 2233 */ 2234 clr->lrc_seq = ++zilog->zl_lr_seq; 2235 2236 lwb->lwb_nused += reclen + dnow; 2237 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax); 2238 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 2239 2240 zil_lwb_add_txg(lwb, lr->lrc_txg); 2241 list_insert_tail(&lwb->lwb_itxs, citx); 2242 2243 dlen -= dnow; 2244 if (dlen > 0) 2245 goto cont; 2246 2247 if (lr->lrc_txtype == TX_WRITE && 2248 lr->lrc_txg > spa_freeze_txg(zilog->zl_spa)) 2249 txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg); 2250 2251 return (lwb); 2252 } 2253 2254 /* 2255 * Fill the actual transaction data into the lwb, following zil_lwb_assign(). 2256 * Does not require locking. 2257 */ 2258 static void 2259 zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx) 2260 { 2261 lr_t *lr, *lrb; 2262 lr_write_t *lrw, *lrwb; 2263 char *lr_buf; 2264 uint64_t dlen, reclen; 2265 2266 lr = &itx->itx_lr; 2267 lrw = (lr_write_t *)lr; 2268 2269 if (lr->lrc_txtype == TX_COMMIT) 2270 return; 2271 2272 reclen = lr->lrc_reclen; 2273 dlen = zil_itx_data_size(itx); 2274 ASSERT3U(reclen + dlen, <=, lwb->lwb_nused - lwb->lwb_nfilled); 2275 2276 lr_buf = lwb->lwb_buf + lwb->lwb_nfilled; 2277 memcpy(lr_buf, lr, reclen); 2278 lrb = (lr_t *)lr_buf; /* Like lr, but inside lwb. */ 2279 lrwb = (lr_write_t *)lrb; /* Like lrw, but inside lwb. */ 2280 2281 ZIL_STAT_BUMP(zilog, zil_itx_count); 2282 2283 /* 2284 * If it's a write, fetch the data or get its blkptr as appropriate. 2285 */ 2286 if (lr->lrc_txtype == TX_WRITE) { 2287 if (itx->itx_wr_state == WR_COPIED) { 2288 ZIL_STAT_BUMP(zilog, zil_itx_copied_count); 2289 ZIL_STAT_INCR(zilog, zil_itx_copied_bytes, 2290 lrw->lr_length); 2291 } else { 2292 char *dbuf; 2293 int error; 2294 2295 if (itx->itx_wr_state == WR_NEED_COPY) { 2296 dbuf = lr_buf + reclen; 2297 lrb->lrc_reclen += dlen; 2298 ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count); 2299 ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes, 2300 dlen); 2301 } else { 2302 ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT); 2303 dbuf = NULL; 2304 ZIL_STAT_BUMP(zilog, zil_itx_indirect_count); 2305 ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes, 2306 lrw->lr_length); 2307 if (lwb->lwb_child_zio == NULL) { 2308 lwb->lwb_child_zio = zio_null(NULL, 2309 zilog->zl_spa, NULL, NULL, NULL, 2310 ZIO_FLAG_CANFAIL); 2311 } 2312 } 2313 2314 /* 2315 * The "lwb_child_zio" we pass in will become a child of 2316 * "lwb_write_zio", when one is created, so one will be 2317 * a parent of any zio's created by the "zl_get_data". 2318 * This way "lwb_write_zio" will first wait for children 2319 * block pointers before own writing, and then for their 2320 * writing completion before the vdev cache flushing. 2321 */ 2322 error = zilog->zl_get_data(itx->itx_private, 2323 itx->itx_gen, lrwb, dbuf, lwb, 2324 lwb->lwb_child_zio); 2325 if (dbuf != NULL && error == 0) { 2326 /* Zero any padding bytes in the last block. */ 2327 memset((char *)dbuf + lrwb->lr_length, 0, 2328 dlen - lrwb->lr_length); 2329 } 2330 2331 /* 2332 * Typically, the only return values we should see from 2333 * ->zl_get_data() are 0, EIO, ENOENT, EEXIST or 2334 * EALREADY. However, it is also possible to see other 2335 * error values such as ENOSPC or EINVAL from 2336 * dmu_read() -> dnode_hold() -> dnode_hold_impl() or 2337 * ENXIO as well as a multitude of others from the 2338 * block layer through dmu_buf_hold() -> dbuf_read() 2339 * -> zio_wait(), as well as through dmu_read() -> 2340 * dnode_hold() -> dnode_hold_impl() -> dbuf_read() -> 2341 * zio_wait(). When these errors happen, we can assume 2342 * that neither an immediate write nor an indirect 2343 * write occurred, so we need to fall back to 2344 * txg_wait_synced(). This is unusual, so we print to 2345 * dmesg whenever one of these errors occurs. 2346 */ 2347 switch (error) { 2348 case 0: 2349 break; 2350 default: 2351 cmn_err(CE_WARN, "zil_lwb_commit() received " 2352 "unexpected error %d from ->zl_get_data()" 2353 ". Falling back to txg_wait_synced().", 2354 error); 2355 zfs_fallthrough; 2356 case EIO: 2357 txg_wait_synced(zilog->zl_dmu_pool, 2358 lr->lrc_txg); 2359 zfs_fallthrough; 2360 case ENOENT: 2361 zfs_fallthrough; 2362 case EEXIST: 2363 zfs_fallthrough; 2364 case EALREADY: 2365 return; 2366 } 2367 } 2368 } 2369 2370 lwb->lwb_nfilled += reclen + dlen; 2371 ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused); 2372 ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t))); 2373 } 2374 2375 itx_t * 2376 zil_itx_create(uint64_t txtype, size_t olrsize) 2377 { 2378 size_t itxsize, lrsize; 2379 itx_t *itx; 2380 2381 ASSERT3U(olrsize, >=, sizeof (lr_t)); 2382 lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t); 2383 ASSERT3U(lrsize, >=, olrsize); 2384 itxsize = offsetof(itx_t, itx_lr) + lrsize; 2385 2386 itx = zio_data_buf_alloc(itxsize); 2387 itx->itx_lr.lrc_txtype = txtype; 2388 itx->itx_lr.lrc_reclen = lrsize; 2389 itx->itx_lr.lrc_seq = 0; /* defensive */ 2390 memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize); 2391 itx->itx_sync = B_TRUE; /* default is synchronous */ 2392 itx->itx_callback = NULL; 2393 itx->itx_callback_data = NULL; 2394 itx->itx_size = itxsize; 2395 2396 return (itx); 2397 } 2398 2399 static itx_t * 2400 zil_itx_clone(itx_t *oitx) 2401 { 2402 ASSERT3U(oitx->itx_size, >=, sizeof (itx_t)); 2403 ASSERT3U(oitx->itx_size, ==, 2404 offsetof(itx_t, itx_lr) + oitx->itx_lr.lrc_reclen); 2405 2406 itx_t *itx = zio_data_buf_alloc(oitx->itx_size); 2407 memcpy(itx, oitx, oitx->itx_size); 2408 itx->itx_callback = NULL; 2409 itx->itx_callback_data = NULL; 2410 return (itx); 2411 } 2412 2413 void 2414 zil_itx_destroy(itx_t *itx) 2415 { 2416 ASSERT3U(itx->itx_size, >=, sizeof (itx_t)); 2417 ASSERT3U(itx->itx_lr.lrc_reclen, ==, 2418 itx->itx_size - offsetof(itx_t, itx_lr)); 2419 IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL); 2420 IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 2421 2422 if (itx->itx_callback != NULL) 2423 itx->itx_callback(itx->itx_callback_data); 2424 2425 zio_data_buf_free(itx, itx->itx_size); 2426 } 2427 2428 /* 2429 * Free up the sync and async itxs. The itxs_t has already been detached 2430 * so no locks are needed. 2431 */ 2432 static void 2433 zil_itxg_clean(void *arg) 2434 { 2435 itx_t *itx; 2436 list_t *list; 2437 avl_tree_t *t; 2438 void *cookie; 2439 itxs_t *itxs = arg; 2440 itx_async_node_t *ian; 2441 2442 list = &itxs->i_sync_list; 2443 while ((itx = list_remove_head(list)) != NULL) { 2444 /* 2445 * In the general case, commit itxs will not be found 2446 * here, as they'll be committed to an lwb via 2447 * zil_lwb_assign(), and free'd in that function. Having 2448 * said that, it is still possible for commit itxs to be 2449 * found here, due to the following race: 2450 * 2451 * - a thread calls zil_commit() which assigns the 2452 * commit itx to a per-txg i_sync_list 2453 * - zil_itxg_clean() is called (e.g. via spa_sync()) 2454 * while the waiter is still on the i_sync_list 2455 * 2456 * There's nothing to prevent syncing the txg while the 2457 * waiter is on the i_sync_list. This normally doesn't 2458 * happen because spa_sync() is slower than zil_commit(), 2459 * but if zil_commit() calls txg_wait_synced() (e.g. 2460 * because zil_create() or zil_commit_writer_stall() is 2461 * called) we will hit this case. 2462 */ 2463 if (itx->itx_lr.lrc_txtype == TX_COMMIT) 2464 zil_commit_waiter_skip(itx->itx_private); 2465 2466 zil_itx_destroy(itx); 2467 } 2468 2469 cookie = NULL; 2470 t = &itxs->i_async_tree; 2471 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 2472 list = &ian->ia_list; 2473 while ((itx = list_remove_head(list)) != NULL) { 2474 /* commit itxs should never be on the async lists. */ 2475 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 2476 zil_itx_destroy(itx); 2477 } 2478 list_destroy(list); 2479 kmem_free(ian, sizeof (itx_async_node_t)); 2480 } 2481 avl_destroy(t); 2482 2483 kmem_free(itxs, sizeof (itxs_t)); 2484 } 2485 2486 static int 2487 zil_aitx_compare(const void *x1, const void *x2) 2488 { 2489 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 2490 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 2491 2492 return (TREE_CMP(o1, o2)); 2493 } 2494 2495 /* 2496 * Remove all async itx with the given oid. 2497 */ 2498 void 2499 zil_remove_async(zilog_t *zilog, uint64_t oid) 2500 { 2501 uint64_t otxg, txg; 2502 itx_async_node_t *ian, ian_search; 2503 avl_tree_t *t; 2504 avl_index_t where; 2505 list_t clean_list; 2506 itx_t *itx; 2507 2508 ASSERT(oid != 0); 2509 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 2510 2511 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2512 otxg = ZILTEST_TXG; 2513 else 2514 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2515 2516 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2517 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2518 2519 mutex_enter(&itxg->itxg_lock); 2520 if (itxg->itxg_txg != txg) { 2521 mutex_exit(&itxg->itxg_lock); 2522 continue; 2523 } 2524 2525 /* 2526 * Locate the object node and append its list. 2527 */ 2528 t = &itxg->itxg_itxs->i_async_tree; 2529 ian_search.ia_foid = oid; 2530 ian = avl_find(t, &ian_search, &where); 2531 if (ian != NULL) 2532 list_move_tail(&clean_list, &ian->ia_list); 2533 mutex_exit(&itxg->itxg_lock); 2534 } 2535 while ((itx = list_remove_head(&clean_list)) != NULL) { 2536 /* commit itxs should never be on the async lists. */ 2537 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 2538 zil_itx_destroy(itx); 2539 } 2540 list_destroy(&clean_list); 2541 } 2542 2543 void 2544 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 2545 { 2546 uint64_t txg; 2547 itxg_t *itxg; 2548 itxs_t *itxs, *clean = NULL; 2549 2550 /* 2551 * Ensure the data of a renamed file is committed before the rename. 2552 */ 2553 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 2554 zil_async_to_sync(zilog, itx->itx_oid); 2555 2556 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 2557 txg = ZILTEST_TXG; 2558 else 2559 txg = dmu_tx_get_txg(tx); 2560 2561 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2562 mutex_enter(&itxg->itxg_lock); 2563 itxs = itxg->itxg_itxs; 2564 if (itxg->itxg_txg != txg) { 2565 if (itxs != NULL) { 2566 /* 2567 * The zil_clean callback hasn't got around to cleaning 2568 * this itxg. Save the itxs for release below. 2569 * This should be rare. 2570 */ 2571 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " 2572 "txg %llu", (u_longlong_t)itxg->itxg_txg); 2573 clean = itxg->itxg_itxs; 2574 } 2575 itxg->itxg_txg = txg; 2576 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), 2577 KM_SLEEP); 2578 2579 list_create(&itxs->i_sync_list, sizeof (itx_t), 2580 offsetof(itx_t, itx_node)); 2581 avl_create(&itxs->i_async_tree, zil_aitx_compare, 2582 sizeof (itx_async_node_t), 2583 offsetof(itx_async_node_t, ia_node)); 2584 } 2585 if (itx->itx_sync) { 2586 list_insert_tail(&itxs->i_sync_list, itx); 2587 } else { 2588 avl_tree_t *t = &itxs->i_async_tree; 2589 uint64_t foid = 2590 LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid); 2591 itx_async_node_t *ian; 2592 avl_index_t where; 2593 2594 ian = avl_find(t, &foid, &where); 2595 if (ian == NULL) { 2596 ian = kmem_alloc(sizeof (itx_async_node_t), 2597 KM_SLEEP); 2598 list_create(&ian->ia_list, sizeof (itx_t), 2599 offsetof(itx_t, itx_node)); 2600 ian->ia_foid = foid; 2601 avl_insert(t, ian, where); 2602 } 2603 list_insert_tail(&ian->ia_list, itx); 2604 } 2605 2606 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 2607 2608 /* 2609 * We don't want to dirty the ZIL using ZILTEST_TXG, because 2610 * zil_clean() will never be called using ZILTEST_TXG. Thus, we 2611 * need to be careful to always dirty the ZIL using the "real" 2612 * TXG (not itxg_txg) even when the SPA is frozen. 2613 */ 2614 zilog_dirty(zilog, dmu_tx_get_txg(tx)); 2615 mutex_exit(&itxg->itxg_lock); 2616 2617 /* Release the old itxs now we've dropped the lock */ 2618 if (clean != NULL) 2619 zil_itxg_clean(clean); 2620 } 2621 2622 /* 2623 * If there are any in-memory intent log transactions which have now been 2624 * synced then start up a taskq to free them. We should only do this after we 2625 * have written out the uberblocks (i.e. txg has been committed) so that 2626 * don't inadvertently clean out in-memory log records that would be required 2627 * by zil_commit(). 2628 */ 2629 void 2630 zil_clean(zilog_t *zilog, uint64_t synced_txg) 2631 { 2632 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 2633 itxs_t *clean_me; 2634 2635 ASSERT3U(synced_txg, <, ZILTEST_TXG); 2636 2637 mutex_enter(&itxg->itxg_lock); 2638 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 2639 mutex_exit(&itxg->itxg_lock); 2640 return; 2641 } 2642 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 2643 ASSERT3U(itxg->itxg_txg, !=, 0); 2644 clean_me = itxg->itxg_itxs; 2645 itxg->itxg_itxs = NULL; 2646 itxg->itxg_txg = 0; 2647 mutex_exit(&itxg->itxg_lock); 2648 /* 2649 * Preferably start a task queue to free up the old itxs but 2650 * if taskq_dispatch can't allocate resources to do that then 2651 * free it in-line. This should be rare. Note, using TQ_SLEEP 2652 * created a bad performance problem. 2653 */ 2654 ASSERT3P(zilog->zl_dmu_pool, !=, NULL); 2655 ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); 2656 taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, 2657 zil_itxg_clean, clean_me, TQ_NOSLEEP); 2658 if (id == TASKQID_INVALID) 2659 zil_itxg_clean(clean_me); 2660 } 2661 2662 /* 2663 * This function will traverse the queue of itxs that need to be 2664 * committed, and move them onto the ZIL's zl_itx_commit_list. 2665 */ 2666 static uint64_t 2667 zil_get_commit_list(zilog_t *zilog) 2668 { 2669 uint64_t otxg, txg, wtxg = 0; 2670 list_t *commit_list = &zilog->zl_itx_commit_list; 2671 2672 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2673 2674 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2675 otxg = ZILTEST_TXG; 2676 else 2677 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2678 2679 /* 2680 * This is inherently racy, since there is nothing to prevent 2681 * the last synced txg from changing. That's okay since we'll 2682 * only commit things in the future. 2683 */ 2684 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2685 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2686 2687 mutex_enter(&itxg->itxg_lock); 2688 if (itxg->itxg_txg != txg) { 2689 mutex_exit(&itxg->itxg_lock); 2690 continue; 2691 } 2692 2693 /* 2694 * If we're adding itx records to the zl_itx_commit_list, 2695 * then the zil better be dirty in this "txg". We can assert 2696 * that here since we're holding the itxg_lock which will 2697 * prevent spa_sync from cleaning it. Once we add the itxs 2698 * to the zl_itx_commit_list we must commit it to disk even 2699 * if it's unnecessary (i.e. the txg was synced). 2700 */ 2701 ASSERT(zilog_is_dirty_in_txg(zilog, txg) || 2702 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 2703 list_t *sync_list = &itxg->itxg_itxs->i_sync_list; 2704 itx_t *itx = NULL; 2705 if (unlikely(zilog->zl_suspend > 0)) { 2706 /* 2707 * ZIL was just suspended, but we lost the race. 2708 * Allow all earlier itxs to be committed, but ask 2709 * caller to do txg_wait_synced(txg) for any new. 2710 */ 2711 if (!list_is_empty(sync_list)) 2712 wtxg = MAX(wtxg, txg); 2713 } else { 2714 itx = list_head(sync_list); 2715 list_move_tail(commit_list, sync_list); 2716 } 2717 2718 mutex_exit(&itxg->itxg_lock); 2719 2720 while (itx != NULL) { 2721 uint64_t s = zil_itx_full_size(itx); 2722 zilog->zl_cur_size += s; 2723 zilog->zl_cur_left += s; 2724 s = zil_itx_record_size(itx); 2725 zilog->zl_cur_max = MAX(zilog->zl_cur_max, s); 2726 itx = list_next(commit_list, itx); 2727 } 2728 } 2729 return (wtxg); 2730 } 2731 2732 /* 2733 * Move the async itxs for a specified object to commit into sync lists. 2734 */ 2735 void 2736 zil_async_to_sync(zilog_t *zilog, uint64_t foid) 2737 { 2738 uint64_t otxg, txg; 2739 itx_async_node_t *ian, ian_search; 2740 avl_tree_t *t; 2741 avl_index_t where; 2742 2743 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2744 otxg = ZILTEST_TXG; 2745 else 2746 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2747 2748 /* 2749 * This is inherently racy, since there is nothing to prevent 2750 * the last synced txg from changing. 2751 */ 2752 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2753 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2754 2755 mutex_enter(&itxg->itxg_lock); 2756 if (itxg->itxg_txg != txg) { 2757 mutex_exit(&itxg->itxg_lock); 2758 continue; 2759 } 2760 2761 /* 2762 * If a foid is specified then find that node and append its 2763 * list. Otherwise walk the tree appending all the lists 2764 * to the sync list. We add to the end rather than the 2765 * beginning to ensure the create has happened. 2766 */ 2767 t = &itxg->itxg_itxs->i_async_tree; 2768 if (foid != 0) { 2769 ian_search.ia_foid = foid; 2770 ian = avl_find(t, &ian_search, &where); 2771 if (ian != NULL) { 2772 list_move_tail(&itxg->itxg_itxs->i_sync_list, 2773 &ian->ia_list); 2774 } 2775 } else { 2776 void *cookie = NULL; 2777 2778 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 2779 list_move_tail(&itxg->itxg_itxs->i_sync_list, 2780 &ian->ia_list); 2781 list_destroy(&ian->ia_list); 2782 kmem_free(ian, sizeof (itx_async_node_t)); 2783 } 2784 } 2785 mutex_exit(&itxg->itxg_lock); 2786 } 2787 } 2788 2789 /* 2790 * This function will prune commit itxs that are at the head of the 2791 * commit list (it won't prune past the first non-commit itx), and 2792 * either: a) attach them to the last lwb that's still pending 2793 * completion, or b) skip them altogether. 2794 * 2795 * This is used as a performance optimization to prevent commit itxs 2796 * from generating new lwbs when it's unnecessary to do so. 2797 */ 2798 static void 2799 zil_prune_commit_list(zilog_t *zilog) 2800 { 2801 itx_t *itx; 2802 2803 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2804 2805 while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { 2806 lr_t *lrc = &itx->itx_lr; 2807 if (lrc->lrc_txtype != TX_COMMIT) 2808 break; 2809 2810 mutex_enter(&zilog->zl_lock); 2811 2812 lwb_t *last_lwb = zilog->zl_last_lwb_opened; 2813 if (last_lwb == NULL || 2814 last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) { 2815 /* 2816 * All of the itxs this waiter was waiting on 2817 * must have already completed (or there were 2818 * never any itx's for it to wait on), so it's 2819 * safe to skip this waiter and mark it done. 2820 */ 2821 zil_commit_waiter_skip(itx->itx_private); 2822 } else { 2823 zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); 2824 } 2825 2826 mutex_exit(&zilog->zl_lock); 2827 2828 list_remove(&zilog->zl_itx_commit_list, itx); 2829 zil_itx_destroy(itx); 2830 } 2831 2832 IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 2833 } 2834 2835 static void 2836 zil_commit_writer_stall(zilog_t *zilog) 2837 { 2838 /* 2839 * When zio_alloc_zil() fails to allocate the next lwb block on 2840 * disk, we must call txg_wait_synced() to ensure all of the 2841 * lwbs in the zilog's zl_lwb_list are synced and then freed (in 2842 * zil_sync()), such that any subsequent ZIL writer (i.e. a call 2843 * to zil_process_commit_list()) will have to call zil_create(), 2844 * and start a new ZIL chain. 2845 * 2846 * Since zil_alloc_zil() failed, the lwb that was previously 2847 * issued does not have a pointer to the "next" lwb on disk. 2848 * Thus, if another ZIL writer thread was to allocate the "next" 2849 * on-disk lwb, that block could be leaked in the event of a 2850 * crash (because the previous lwb on-disk would not point to 2851 * it). 2852 * 2853 * We must hold the zilog's zl_issuer_lock while we do this, to 2854 * ensure no new threads enter zil_process_commit_list() until 2855 * all lwb's in the zl_lwb_list have been synced and freed 2856 * (which is achieved via the txg_wait_synced() call). 2857 */ 2858 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2859 ZIL_STAT_BUMP(zilog, zil_commit_stall_count); 2860 txg_wait_synced(zilog->zl_dmu_pool, 0); 2861 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2862 } 2863 2864 static void 2865 zil_burst_done(zilog_t *zilog) 2866 { 2867 if (!list_is_empty(&zilog->zl_itx_commit_list) || 2868 zilog->zl_cur_size == 0) 2869 return; 2870 2871 if (zilog->zl_parallel) 2872 zilog->zl_parallel--; 2873 2874 uint_t r = (zilog->zl_prev_rotor + 1) & (ZIL_BURSTS - 1); 2875 zilog->zl_prev_rotor = r; 2876 zilog->zl_prev_opt[r] = zil_lwb_plan(zilog, zilog->zl_cur_size, 2877 &zilog->zl_prev_min[r]); 2878 2879 zilog->zl_cur_size = 0; 2880 zilog->zl_cur_max = 0; 2881 zilog->zl_cur_left = 0; 2882 } 2883 2884 /* 2885 * This function will traverse the commit list, creating new lwbs as 2886 * needed, and committing the itxs from the commit list to these newly 2887 * created lwbs. Additionally, as a new lwb is created, the previous 2888 * lwb will be issued to the zio layer to be written to disk. 2889 */ 2890 static void 2891 zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs) 2892 { 2893 spa_t *spa = zilog->zl_spa; 2894 list_t nolwb_itxs; 2895 list_t nolwb_waiters; 2896 lwb_t *lwb, *plwb; 2897 itx_t *itx; 2898 2899 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2900 2901 /* 2902 * Return if there's nothing to commit before we dirty the fs by 2903 * calling zil_create(). 2904 */ 2905 if (list_is_empty(&zilog->zl_itx_commit_list)) 2906 return; 2907 2908 list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); 2909 list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), 2910 offsetof(zil_commit_waiter_t, zcw_node)); 2911 2912 lwb = list_tail(&zilog->zl_lwb_list); 2913 if (lwb == NULL) { 2914 lwb = zil_create(zilog); 2915 } else { 2916 /* 2917 * Activate SPA_FEATURE_ZILSAXATTR for the cases where ZIL will 2918 * have already been created (zl_lwb_list not empty). 2919 */ 2920 zil_commit_activate_saxattr_feature(zilog); 2921 ASSERT(lwb->lwb_state == LWB_STATE_NEW || 2922 lwb->lwb_state == LWB_STATE_OPENED); 2923 2924 /* 2925 * If the lwb is still opened, it means the workload is really 2926 * multi-threaded and we won the chance of write aggregation. 2927 * If it is not opened yet, but previous lwb is still not 2928 * flushed, it still means the workload is multi-threaded, but 2929 * there was too much time between the commits to aggregate, so 2930 * we try aggregation next times, but without too much hopes. 2931 */ 2932 if (lwb->lwb_state == LWB_STATE_OPENED) { 2933 zilog->zl_parallel = ZIL_BURSTS; 2934 } else if ((plwb = list_prev(&zilog->zl_lwb_list, lwb)) 2935 != NULL && plwb->lwb_state != LWB_STATE_FLUSH_DONE) { 2936 zilog->zl_parallel = MAX(zilog->zl_parallel, 2937 ZIL_BURSTS / 2); 2938 } 2939 } 2940 2941 while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) { 2942 lr_t *lrc = &itx->itx_lr; 2943 uint64_t txg = lrc->lrc_txg; 2944 2945 ASSERT3U(txg, !=, 0); 2946 2947 if (lrc->lrc_txtype == TX_COMMIT) { 2948 DTRACE_PROBE2(zil__process__commit__itx, 2949 zilog_t *, zilog, itx_t *, itx); 2950 } else { 2951 DTRACE_PROBE2(zil__process__normal__itx, 2952 zilog_t *, zilog, itx_t *, itx); 2953 } 2954 2955 boolean_t synced = txg <= spa_last_synced_txg(spa); 2956 boolean_t frozen = txg > spa_freeze_txg(spa); 2957 2958 /* 2959 * If the txg of this itx has already been synced out, then 2960 * we don't need to commit this itx to an lwb. This is 2961 * because the data of this itx will have already been 2962 * written to the main pool. This is inherently racy, and 2963 * it's still ok to commit an itx whose txg has already 2964 * been synced; this will result in a write that's 2965 * unnecessary, but will do no harm. 2966 * 2967 * With that said, we always want to commit TX_COMMIT itxs 2968 * to an lwb, regardless of whether or not that itx's txg 2969 * has been synced out. We do this to ensure any OPENED lwb 2970 * will always have at least one zil_commit_waiter_t linked 2971 * to the lwb. 2972 * 2973 * As a counter-example, if we skipped TX_COMMIT itx's 2974 * whose txg had already been synced, the following 2975 * situation could occur if we happened to be racing with 2976 * spa_sync: 2977 * 2978 * 1. We commit a non-TX_COMMIT itx to an lwb, where the 2979 * itx's txg is 10 and the last synced txg is 9. 2980 * 2. spa_sync finishes syncing out txg 10. 2981 * 3. We move to the next itx in the list, it's a TX_COMMIT 2982 * whose txg is 10, so we skip it rather than committing 2983 * it to the lwb used in (1). 2984 * 2985 * If the itx that is skipped in (3) is the last TX_COMMIT 2986 * itx in the commit list, than it's possible for the lwb 2987 * used in (1) to remain in the OPENED state indefinitely. 2988 * 2989 * To prevent the above scenario from occurring, ensuring 2990 * that once an lwb is OPENED it will transition to ISSUED 2991 * and eventually DONE, we always commit TX_COMMIT itx's to 2992 * an lwb here, even if that itx's txg has already been 2993 * synced. 2994 * 2995 * Finally, if the pool is frozen, we _always_ commit the 2996 * itx. The point of freezing the pool is to prevent data 2997 * from being written to the main pool via spa_sync, and 2998 * instead rely solely on the ZIL to persistently store the 2999 * data; i.e. when the pool is frozen, the last synced txg 3000 * value can't be trusted. 3001 */ 3002 if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { 3003 if (lwb != NULL) { 3004 lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs); 3005 if (lwb == NULL) { 3006 list_insert_tail(&nolwb_itxs, itx); 3007 } else if ((zcw->zcw_lwb != NULL && 3008 zcw->zcw_lwb != lwb) || zcw->zcw_done) { 3009 /* 3010 * Our lwb is done, leave the rest of 3011 * itx list to somebody else who care. 3012 */ 3013 zilog->zl_parallel = ZIL_BURSTS; 3014 zilog->zl_cur_left -= 3015 zil_itx_full_size(itx); 3016 break; 3017 } 3018 } else { 3019 if (lrc->lrc_txtype == TX_COMMIT) { 3020 zil_commit_waiter_link_nolwb( 3021 itx->itx_private, &nolwb_waiters); 3022 } 3023 list_insert_tail(&nolwb_itxs, itx); 3024 } 3025 zilog->zl_cur_left -= zil_itx_full_size(itx); 3026 } else { 3027 ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT); 3028 zilog->zl_cur_left -= zil_itx_full_size(itx); 3029 zil_itx_destroy(itx); 3030 } 3031 } 3032 3033 if (lwb == NULL) { 3034 /* 3035 * This indicates zio_alloc_zil() failed to allocate the 3036 * "next" lwb on-disk. When this happens, we must stall 3037 * the ZIL write pipeline; see the comment within 3038 * zil_commit_writer_stall() for more details. 3039 */ 3040 while ((lwb = list_remove_head(ilwbs)) != NULL) 3041 zil_lwb_write_issue(zilog, lwb); 3042 zil_commit_writer_stall(zilog); 3043 3044 /* 3045 * Additionally, we have to signal and mark the "nolwb" 3046 * waiters as "done" here, since without an lwb, we 3047 * can't do this via zil_lwb_flush_vdevs_done() like 3048 * normal. 3049 */ 3050 zil_commit_waiter_t *zcw; 3051 while ((zcw = list_remove_head(&nolwb_waiters)) != NULL) 3052 zil_commit_waiter_skip(zcw); 3053 3054 /* 3055 * And finally, we have to destroy the itx's that 3056 * couldn't be committed to an lwb; this will also call 3057 * the itx's callback if one exists for the itx. 3058 */ 3059 while ((itx = list_remove_head(&nolwb_itxs)) != NULL) 3060 zil_itx_destroy(itx); 3061 } else { 3062 ASSERT(list_is_empty(&nolwb_waiters)); 3063 ASSERT3P(lwb, !=, NULL); 3064 ASSERT(lwb->lwb_state == LWB_STATE_NEW || 3065 lwb->lwb_state == LWB_STATE_OPENED); 3066 3067 /* 3068 * At this point, the ZIL block pointed at by the "lwb" 3069 * variable is in "new" or "opened" state. 3070 * 3071 * If it's "new", then no itxs have been committed to it, so 3072 * there's no point in issuing its zio (i.e. it's "empty"). 3073 * 3074 * If it's "opened", then it contains one or more itxs that 3075 * eventually need to be committed to stable storage. In 3076 * this case we intentionally do not issue the lwb's zio 3077 * to disk yet, and instead rely on one of the following 3078 * two mechanisms for issuing the zio: 3079 * 3080 * 1. Ideally, there will be more ZIL activity occurring on 3081 * the system, such that this function will be immediately 3082 * called again by different thread and this lwb will be 3083 * closed by zil_lwb_assign(). This way, the lwb will be 3084 * "full" when it is issued to disk, and we'll make use of 3085 * the lwb's size the best we can. 3086 * 3087 * 2. If there isn't sufficient ZIL activity occurring on 3088 * the system, zil_commit_waiter() will close it and issue 3089 * the zio. If this occurs, the lwb is not guaranteed 3090 * to be "full" by the time its zio is issued, and means 3091 * the size of the lwb was "too large" given the amount 3092 * of ZIL activity occurring on the system at that time. 3093 * 3094 * We do this for a couple of reasons: 3095 * 3096 * 1. To try and reduce the number of IOPs needed to 3097 * write the same number of itxs. If an lwb has space 3098 * available in its buffer for more itxs, and more itxs 3099 * will be committed relatively soon (relative to the 3100 * latency of performing a write), then it's beneficial 3101 * to wait for these "next" itxs. This way, more itxs 3102 * can be committed to stable storage with fewer writes. 3103 * 3104 * 2. To try and use the largest lwb block size that the 3105 * incoming rate of itxs can support. Again, this is to 3106 * try and pack as many itxs into as few lwbs as 3107 * possible, without significantly impacting the latency 3108 * of each individual itx. 3109 */ 3110 if (lwb->lwb_state == LWB_STATE_OPENED && !zilog->zl_parallel) { 3111 zil_burst_done(zilog); 3112 list_insert_tail(ilwbs, lwb); 3113 lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW); 3114 if (lwb == NULL) { 3115 while ((lwb = list_remove_head(ilwbs)) != NULL) 3116 zil_lwb_write_issue(zilog, lwb); 3117 zil_commit_writer_stall(zilog); 3118 } 3119 } 3120 } 3121 } 3122 3123 /* 3124 * This function is responsible for ensuring the passed in commit waiter 3125 * (and associated commit itx) is committed to an lwb. If the waiter is 3126 * not already committed to an lwb, all itxs in the zilog's queue of 3127 * itxs will be processed. The assumption is the passed in waiter's 3128 * commit itx will found in the queue just like the other non-commit 3129 * itxs, such that when the entire queue is processed, the waiter will 3130 * have been committed to an lwb. 3131 * 3132 * The lwb associated with the passed in waiter is not guaranteed to 3133 * have been issued by the time this function completes. If the lwb is 3134 * not issued, we rely on future calls to zil_commit_writer() to issue 3135 * the lwb, or the timeout mechanism found in zil_commit_waiter(). 3136 */ 3137 static uint64_t 3138 zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) 3139 { 3140 list_t ilwbs; 3141 lwb_t *lwb; 3142 uint64_t wtxg = 0; 3143 3144 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 3145 ASSERT(spa_writeable(zilog->zl_spa)); 3146 3147 list_create(&ilwbs, sizeof (lwb_t), offsetof(lwb_t, lwb_issue_node)); 3148 mutex_enter(&zilog->zl_issuer_lock); 3149 3150 if (zcw->zcw_lwb != NULL || zcw->zcw_done) { 3151 /* 3152 * It's possible that, while we were waiting to acquire 3153 * the "zl_issuer_lock", another thread committed this 3154 * waiter to an lwb. If that occurs, we bail out early, 3155 * without processing any of the zilog's queue of itxs. 3156 * 3157 * On certain workloads and system configurations, the 3158 * "zl_issuer_lock" can become highly contended. In an 3159 * attempt to reduce this contention, we immediately drop 3160 * the lock if the waiter has already been processed. 3161 * 3162 * We've measured this optimization to reduce CPU spent 3163 * contending on this lock by up to 5%, using a system 3164 * with 32 CPUs, low latency storage (~50 usec writes), 3165 * and 1024 threads performing sync writes. 3166 */ 3167 goto out; 3168 } 3169 3170 ZIL_STAT_BUMP(zilog, zil_commit_writer_count); 3171 3172 wtxg = zil_get_commit_list(zilog); 3173 zil_prune_commit_list(zilog); 3174 zil_process_commit_list(zilog, zcw, &ilwbs); 3175 3176 out: 3177 mutex_exit(&zilog->zl_issuer_lock); 3178 while ((lwb = list_remove_head(&ilwbs)) != NULL) 3179 zil_lwb_write_issue(zilog, lwb); 3180 list_destroy(&ilwbs); 3181 return (wtxg); 3182 } 3183 3184 static void 3185 zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) 3186 { 3187 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 3188 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 3189 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 3190 3191 lwb_t *lwb = zcw->zcw_lwb; 3192 ASSERT3P(lwb, !=, NULL); 3193 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW); 3194 3195 /* 3196 * If the lwb has already been issued by another thread, we can 3197 * immediately return since there's no work to be done (the 3198 * point of this function is to issue the lwb). Additionally, we 3199 * do this prior to acquiring the zl_issuer_lock, to avoid 3200 * acquiring it when it's not necessary to do so. 3201 */ 3202 if (lwb->lwb_state != LWB_STATE_OPENED) 3203 return; 3204 3205 /* 3206 * In order to call zil_lwb_write_close() we must hold the 3207 * zilog's "zl_issuer_lock". We can't simply acquire that lock, 3208 * since we're already holding the commit waiter's "zcw_lock", 3209 * and those two locks are acquired in the opposite order 3210 * elsewhere. 3211 */ 3212 mutex_exit(&zcw->zcw_lock); 3213 mutex_enter(&zilog->zl_issuer_lock); 3214 mutex_enter(&zcw->zcw_lock); 3215 3216 /* 3217 * Since we just dropped and re-acquired the commit waiter's 3218 * lock, we have to re-check to see if the waiter was marked 3219 * "done" during that process. If the waiter was marked "done", 3220 * the "lwb" pointer is no longer valid (it can be free'd after 3221 * the waiter is marked "done"), so without this check we could 3222 * wind up with a use-after-free error below. 3223 */ 3224 if (zcw->zcw_done) { 3225 mutex_exit(&zilog->zl_issuer_lock); 3226 return; 3227 } 3228 3229 ASSERT3P(lwb, ==, zcw->zcw_lwb); 3230 3231 /* 3232 * We've already checked this above, but since we hadn't acquired 3233 * the zilog's zl_issuer_lock, we have to perform this check a 3234 * second time while holding the lock. 3235 * 3236 * We don't need to hold the zl_lock since the lwb cannot transition 3237 * from OPENED to CLOSED while we hold the zl_issuer_lock. The lwb 3238 * _can_ transition from CLOSED to DONE, but it's OK to race with 3239 * that transition since we treat the lwb the same, whether it's in 3240 * the CLOSED, ISSUED or DONE states. 3241 * 3242 * The important thing, is we treat the lwb differently depending on 3243 * if it's OPENED or CLOSED, and block any other threads that might 3244 * attempt to close/issue this lwb. For that reason we hold the 3245 * zl_issuer_lock when checking the lwb_state; we must not call 3246 * zil_lwb_write_close() if the lwb had already been closed/issued. 3247 * 3248 * See the comment above the lwb_state_t structure definition for 3249 * more details on the lwb states, and locking requirements. 3250 */ 3251 if (lwb->lwb_state != LWB_STATE_OPENED) { 3252 mutex_exit(&zilog->zl_issuer_lock); 3253 return; 3254 } 3255 3256 /* 3257 * We do not need zcw_lock once we hold zl_issuer_lock and know lwb 3258 * is still open. But we have to drop it to avoid a deadlock in case 3259 * callback of zio issued by zil_lwb_write_issue() try to get it, 3260 * while zil_lwb_write_issue() is blocked on attempt to issue next 3261 * lwb it found in LWB_STATE_READY state. 3262 */ 3263 mutex_exit(&zcw->zcw_lock); 3264 3265 /* 3266 * As described in the comments above zil_commit_waiter() and 3267 * zil_process_commit_list(), we need to issue this lwb's zio 3268 * since we've reached the commit waiter's timeout and it still 3269 * hasn't been issued. 3270 */ 3271 zil_burst_done(zilog); 3272 lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW); 3273 3274 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED); 3275 3276 if (nlwb == NULL) { 3277 /* 3278 * When zil_lwb_write_close() returns NULL, this 3279 * indicates zio_alloc_zil() failed to allocate the 3280 * "next" lwb on-disk. When this occurs, the ZIL write 3281 * pipeline must be stalled; see the comment within the 3282 * zil_commit_writer_stall() function for more details. 3283 */ 3284 zil_lwb_write_issue(zilog, lwb); 3285 zil_commit_writer_stall(zilog); 3286 mutex_exit(&zilog->zl_issuer_lock); 3287 } else { 3288 mutex_exit(&zilog->zl_issuer_lock); 3289 zil_lwb_write_issue(zilog, lwb); 3290 } 3291 mutex_enter(&zcw->zcw_lock); 3292 } 3293 3294 /* 3295 * This function is responsible for performing the following two tasks: 3296 * 3297 * 1. its primary responsibility is to block until the given "commit 3298 * waiter" is considered "done". 3299 * 3300 * 2. its secondary responsibility is to issue the zio for the lwb that 3301 * the given "commit waiter" is waiting on, if this function has 3302 * waited "long enough" and the lwb is still in the "open" state. 3303 * 3304 * Given a sufficient amount of itxs being generated and written using 3305 * the ZIL, the lwb's zio will be issued via the zil_lwb_assign() 3306 * function. If this does not occur, this secondary responsibility will 3307 * ensure the lwb is issued even if there is not other synchronous 3308 * activity on the system. 3309 * 3310 * For more details, see zil_process_commit_list(); more specifically, 3311 * the comment at the bottom of that function. 3312 */ 3313 static void 3314 zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) 3315 { 3316 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 3317 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 3318 ASSERT(spa_writeable(zilog->zl_spa)); 3319 3320 mutex_enter(&zcw->zcw_lock); 3321 3322 /* 3323 * The timeout is scaled based on the lwb latency to avoid 3324 * significantly impacting the latency of each individual itx. 3325 * For more details, see the comment at the bottom of the 3326 * zil_process_commit_list() function. 3327 */ 3328 int pct = MAX(zfs_commit_timeout_pct, 1); 3329 hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; 3330 hrtime_t wakeup = gethrtime() + sleep; 3331 boolean_t timedout = B_FALSE; 3332 3333 while (!zcw->zcw_done) { 3334 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 3335 3336 lwb_t *lwb = zcw->zcw_lwb; 3337 3338 /* 3339 * Usually, the waiter will have a non-NULL lwb field here, 3340 * but it's possible for it to be NULL as a result of 3341 * zil_commit() racing with spa_sync(). 3342 * 3343 * When zil_clean() is called, it's possible for the itxg 3344 * list (which may be cleaned via a taskq) to contain 3345 * commit itxs. When this occurs, the commit waiters linked 3346 * off of these commit itxs will not be committed to an 3347 * lwb. Additionally, these commit waiters will not be 3348 * marked done until zil_commit_waiter_skip() is called via 3349 * zil_itxg_clean(). 3350 * 3351 * Thus, it's possible for this commit waiter (i.e. the 3352 * "zcw" variable) to be found in this "in between" state; 3353 * where it's "zcw_lwb" field is NULL, and it hasn't yet 3354 * been skipped, so it's "zcw_done" field is still B_FALSE. 3355 */ 3356 IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_NEW); 3357 3358 if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { 3359 ASSERT3B(timedout, ==, B_FALSE); 3360 3361 /* 3362 * If the lwb hasn't been issued yet, then we 3363 * need to wait with a timeout, in case this 3364 * function needs to issue the lwb after the 3365 * timeout is reached; responsibility (2) from 3366 * the comment above this function. 3367 */ 3368 int rc = cv_timedwait_hires(&zcw->zcw_cv, 3369 &zcw->zcw_lock, wakeup, USEC2NSEC(1), 3370 CALLOUT_FLAG_ABSOLUTE); 3371 3372 if (rc != -1 || zcw->zcw_done) 3373 continue; 3374 3375 timedout = B_TRUE; 3376 zil_commit_waiter_timeout(zilog, zcw); 3377 3378 if (!zcw->zcw_done) { 3379 /* 3380 * If the commit waiter has already been 3381 * marked "done", it's possible for the 3382 * waiter's lwb structure to have already 3383 * been freed. Thus, we can only reliably 3384 * make these assertions if the waiter 3385 * isn't done. 3386 */ 3387 ASSERT3P(lwb, ==, zcw->zcw_lwb); 3388 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); 3389 } 3390 } else { 3391 /* 3392 * If the lwb isn't open, then it must have already 3393 * been issued. In that case, there's no need to 3394 * use a timeout when waiting for the lwb to 3395 * complete. 3396 * 3397 * Additionally, if the lwb is NULL, the waiter 3398 * will soon be signaled and marked done via 3399 * zil_clean() and zil_itxg_clean(), so no timeout 3400 * is required. 3401 */ 3402 3403 IMPLY(lwb != NULL, 3404 lwb->lwb_state == LWB_STATE_CLOSED || 3405 lwb->lwb_state == LWB_STATE_READY || 3406 lwb->lwb_state == LWB_STATE_ISSUED || 3407 lwb->lwb_state == LWB_STATE_WRITE_DONE || 3408 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 3409 cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); 3410 } 3411 } 3412 3413 mutex_exit(&zcw->zcw_lock); 3414 } 3415 3416 static zil_commit_waiter_t * 3417 zil_alloc_commit_waiter(void) 3418 { 3419 zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); 3420 3421 cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); 3422 mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); 3423 list_link_init(&zcw->zcw_node); 3424 zcw->zcw_lwb = NULL; 3425 zcw->zcw_done = B_FALSE; 3426 zcw->zcw_zio_error = 0; 3427 3428 return (zcw); 3429 } 3430 3431 static void 3432 zil_free_commit_waiter(zil_commit_waiter_t *zcw) 3433 { 3434 ASSERT(!list_link_active(&zcw->zcw_node)); 3435 ASSERT3P(zcw->zcw_lwb, ==, NULL); 3436 ASSERT3B(zcw->zcw_done, ==, B_TRUE); 3437 mutex_destroy(&zcw->zcw_lock); 3438 cv_destroy(&zcw->zcw_cv); 3439 kmem_cache_free(zil_zcw_cache, zcw); 3440 } 3441 3442 /* 3443 * This function is used to create a TX_COMMIT itx and assign it. This 3444 * way, it will be linked into the ZIL's list of synchronous itxs, and 3445 * then later committed to an lwb (or skipped) when 3446 * zil_process_commit_list() is called. 3447 */ 3448 static void 3449 zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) 3450 { 3451 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 3452 3453 /* 3454 * Since we are not going to create any new dirty data, and we 3455 * can even help with clearing the existing dirty data, we 3456 * should not be subject to the dirty data based delays. We 3457 * use DMU_TX_NOTHROTTLE to bypass the delay mechanism. 3458 */ 3459 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_NOTHROTTLE)); 3460 3461 itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); 3462 itx->itx_sync = B_TRUE; 3463 itx->itx_private = zcw; 3464 3465 zil_itx_assign(zilog, itx, tx); 3466 3467 dmu_tx_commit(tx); 3468 } 3469 3470 /* 3471 * Commit ZFS Intent Log transactions (itxs) to stable storage. 3472 * 3473 * When writing ZIL transactions to the on-disk representation of the 3474 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple 3475 * itxs can be committed to a single lwb. Once a lwb is written and 3476 * committed to stable storage (i.e. the lwb is written, and vdevs have 3477 * been flushed), each itx that was committed to that lwb is also 3478 * considered to be committed to stable storage. 3479 * 3480 * When an itx is committed to an lwb, the log record (lr_t) contained 3481 * by the itx is copied into the lwb's zio buffer, and once this buffer 3482 * is written to disk, it becomes an on-disk ZIL block. 3483 * 3484 * As itxs are generated, they're inserted into the ZIL's queue of 3485 * uncommitted itxs. The semantics of zil_commit() are such that it will 3486 * block until all itxs that were in the queue when it was called, are 3487 * committed to stable storage. 3488 * 3489 * If "foid" is zero, this means all "synchronous" and "asynchronous" 3490 * itxs, for all objects in the dataset, will be committed to stable 3491 * storage prior to zil_commit() returning. If "foid" is non-zero, all 3492 * "synchronous" itxs for all objects, but only "asynchronous" itxs 3493 * that correspond to the foid passed in, will be committed to stable 3494 * storage prior to zil_commit() returning. 3495 * 3496 * Generally speaking, when zil_commit() is called, the consumer doesn't 3497 * actually care about _all_ of the uncommitted itxs. Instead, they're 3498 * simply trying to waiting for a specific itx to be committed to disk, 3499 * but the interface(s) for interacting with the ZIL don't allow such 3500 * fine-grained communication. A better interface would allow a consumer 3501 * to create and assign an itx, and then pass a reference to this itx to 3502 * zil_commit(); such that zil_commit() would return as soon as that 3503 * specific itx was committed to disk (instead of waiting for _all_ 3504 * itxs to be committed). 3505 * 3506 * When a thread calls zil_commit() a special "commit itx" will be 3507 * generated, along with a corresponding "waiter" for this commit itx. 3508 * zil_commit() will wait on this waiter's CV, such that when the waiter 3509 * is marked done, and signaled, zil_commit() will return. 3510 * 3511 * This commit itx is inserted into the queue of uncommitted itxs. This 3512 * provides an easy mechanism for determining which itxs were in the 3513 * queue prior to zil_commit() having been called, and which itxs were 3514 * added after zil_commit() was called. 3515 * 3516 * The commit itx is special; it doesn't have any on-disk representation. 3517 * When a commit itx is "committed" to an lwb, the waiter associated 3518 * with it is linked onto the lwb's list of waiters. Then, when that lwb 3519 * completes, each waiter on the lwb's list is marked done and signaled 3520 * -- allowing the thread waiting on the waiter to return from zil_commit(). 3521 * 3522 * It's important to point out a few critical factors that allow us 3523 * to make use of the commit itxs, commit waiters, per-lwb lists of 3524 * commit waiters, and zio completion callbacks like we're doing: 3525 * 3526 * 1. The list of waiters for each lwb is traversed, and each commit 3527 * waiter is marked "done" and signaled, in the zio completion 3528 * callback of the lwb's zio[*]. 3529 * 3530 * * Actually, the waiters are signaled in the zio completion 3531 * callback of the root zio for the flush commands that are sent to 3532 * the vdevs upon completion of the lwb zio. 3533 * 3534 * 2. When the itxs are inserted into the ZIL's queue of uncommitted 3535 * itxs, the order in which they are inserted is preserved[*]; as 3536 * itxs are added to the queue, they are added to the tail of 3537 * in-memory linked lists. 3538 * 3539 * When committing the itxs to lwbs (to be written to disk), they 3540 * are committed in the same order in which the itxs were added to 3541 * the uncommitted queue's linked list(s); i.e. the linked list of 3542 * itxs to commit is traversed from head to tail, and each itx is 3543 * committed to an lwb in that order. 3544 * 3545 * * To clarify: 3546 * 3547 * - the order of "sync" itxs is preserved w.r.t. other 3548 * "sync" itxs, regardless of the corresponding objects. 3549 * - the order of "async" itxs is preserved w.r.t. other 3550 * "async" itxs corresponding to the same object. 3551 * - the order of "async" itxs is *not* preserved w.r.t. other 3552 * "async" itxs corresponding to different objects. 3553 * - the order of "sync" itxs w.r.t. "async" itxs (or vice 3554 * versa) is *not* preserved, even for itxs that correspond 3555 * to the same object. 3556 * 3557 * For more details, see: zil_itx_assign(), zil_async_to_sync(), 3558 * zil_get_commit_list(), and zil_process_commit_list(). 3559 * 3560 * 3. The lwbs represent a linked list of blocks on disk. Thus, any 3561 * lwb cannot be considered committed to stable storage, until its 3562 * "previous" lwb is also committed to stable storage. This fact, 3563 * coupled with the fact described above, means that itxs are 3564 * committed in (roughly) the order in which they were generated. 3565 * This is essential because itxs are dependent on prior itxs. 3566 * Thus, we *must not* deem an itx as being committed to stable 3567 * storage, until *all* prior itxs have also been committed to 3568 * stable storage. 3569 * 3570 * To enforce this ordering of lwb zio's, while still leveraging as 3571 * much of the underlying storage performance as possible, we rely 3572 * on two fundamental concepts: 3573 * 3574 * 1. The creation and issuance of lwb zio's is protected by 3575 * the zilog's "zl_issuer_lock", which ensures only a single 3576 * thread is creating and/or issuing lwb's at a time 3577 * 2. The "previous" lwb is a child of the "current" lwb 3578 * (leveraging the zio parent-child dependency graph) 3579 * 3580 * By relying on this parent-child zio relationship, we can have 3581 * many lwb zio's concurrently issued to the underlying storage, 3582 * but the order in which they complete will be the same order in 3583 * which they were created. 3584 */ 3585 void 3586 zil_commit(zilog_t *zilog, uint64_t foid) 3587 { 3588 /* 3589 * We should never attempt to call zil_commit on a snapshot for 3590 * a couple of reasons: 3591 * 3592 * 1. A snapshot may never be modified, thus it cannot have any 3593 * in-flight itxs that would have modified the dataset. 3594 * 3595 * 2. By design, when zil_commit() is called, a commit itx will 3596 * be assigned to this zilog; as a result, the zilog will be 3597 * dirtied. We must not dirty the zilog of a snapshot; there's 3598 * checks in the code that enforce this invariant, and will 3599 * cause a panic if it's not upheld. 3600 */ 3601 ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); 3602 3603 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 3604 return; 3605 3606 if (!spa_writeable(zilog->zl_spa)) { 3607 /* 3608 * If the SPA is not writable, there should never be any 3609 * pending itxs waiting to be committed to disk. If that 3610 * weren't true, we'd skip writing those itxs out, and 3611 * would break the semantics of zil_commit(); thus, we're 3612 * verifying that truth before we return to the caller. 3613 */ 3614 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3615 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 3616 for (int i = 0; i < TXG_SIZE; i++) 3617 ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); 3618 return; 3619 } 3620 3621 /* 3622 * If the ZIL is suspended, we don't want to dirty it by calling 3623 * zil_commit_itx_assign() below, nor can we write out 3624 * lwbs like would be done in zil_commit_write(). Thus, we 3625 * simply rely on txg_wait_synced() to maintain the necessary 3626 * semantics, and avoid calling those functions altogether. 3627 */ 3628 if (zilog->zl_suspend > 0) { 3629 ZIL_STAT_BUMP(zilog, zil_commit_suspend_count); 3630 txg_wait_synced(zilog->zl_dmu_pool, 0); 3631 return; 3632 } 3633 3634 zil_commit_impl(zilog, foid); 3635 } 3636 3637 void 3638 zil_commit_impl(zilog_t *zilog, uint64_t foid) 3639 { 3640 ZIL_STAT_BUMP(zilog, zil_commit_count); 3641 3642 /* 3643 * Move the "async" itxs for the specified foid to the "sync" 3644 * queues, such that they will be later committed (or skipped) 3645 * to an lwb when zil_process_commit_list() is called. 3646 * 3647 * Since these "async" itxs must be committed prior to this 3648 * call to zil_commit returning, we must perform this operation 3649 * before we call zil_commit_itx_assign(). 3650 */ 3651 zil_async_to_sync(zilog, foid); 3652 3653 /* 3654 * We allocate a new "waiter" structure which will initially be 3655 * linked to the commit itx using the itx's "itx_private" field. 3656 * Since the commit itx doesn't represent any on-disk state, 3657 * when it's committed to an lwb, rather than copying the its 3658 * lr_t into the lwb's buffer, the commit itx's "waiter" will be 3659 * added to the lwb's list of waiters. Then, when the lwb is 3660 * committed to stable storage, each waiter in the lwb's list of 3661 * waiters will be marked "done", and signalled. 3662 * 3663 * We must create the waiter and assign the commit itx prior to 3664 * calling zil_commit_writer(), or else our specific commit itx 3665 * is not guaranteed to be committed to an lwb prior to calling 3666 * zil_commit_waiter(). 3667 */ 3668 zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); 3669 zil_commit_itx_assign(zilog, zcw); 3670 3671 uint64_t wtxg = zil_commit_writer(zilog, zcw); 3672 zil_commit_waiter(zilog, zcw); 3673 3674 if (zcw->zcw_zio_error != 0) { 3675 /* 3676 * If there was an error writing out the ZIL blocks that 3677 * this thread is waiting on, then we fallback to 3678 * relying on spa_sync() to write out the data this 3679 * thread is waiting on. Obviously this has performance 3680 * implications, but the expectation is for this to be 3681 * an exceptional case, and shouldn't occur often. 3682 */ 3683 ZIL_STAT_BUMP(zilog, zil_commit_error_count); 3684 DTRACE_PROBE2(zil__commit__io__error, 3685 zilog_t *, zilog, zil_commit_waiter_t *, zcw); 3686 txg_wait_synced(zilog->zl_dmu_pool, 0); 3687 } else if (wtxg != 0) { 3688 ZIL_STAT_BUMP(zilog, zil_commit_suspend_count); 3689 txg_wait_synced(zilog->zl_dmu_pool, wtxg); 3690 } 3691 3692 zil_free_commit_waiter(zcw); 3693 } 3694 3695 /* 3696 * Called in syncing context to free committed log blocks and update log header. 3697 */ 3698 void 3699 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 3700 { 3701 zil_header_t *zh = zil_header_in_syncing_context(zilog); 3702 uint64_t txg = dmu_tx_get_txg(tx); 3703 spa_t *spa = zilog->zl_spa; 3704 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 3705 lwb_t *lwb; 3706 3707 /* 3708 * We don't zero out zl_destroy_txg, so make sure we don't try 3709 * to destroy it twice. 3710 */ 3711 if (spa_sync_pass(spa) != 1) 3712 return; 3713 3714 zil_lwb_flush_wait_all(zilog, txg); 3715 3716 mutex_enter(&zilog->zl_lock); 3717 3718 ASSERT(zilog->zl_stop_sync == 0); 3719 3720 if (*replayed_seq != 0) { 3721 ASSERT(zh->zh_replay_seq < *replayed_seq); 3722 zh->zh_replay_seq = *replayed_seq; 3723 *replayed_seq = 0; 3724 } 3725 3726 if (zilog->zl_destroy_txg == txg) { 3727 blkptr_t blk = zh->zh_log; 3728 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 3729 3730 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3731 3732 memset(zh, 0, sizeof (zil_header_t)); 3733 memset(zilog->zl_replayed_seq, 0, 3734 sizeof (zilog->zl_replayed_seq)); 3735 3736 if (zilog->zl_keep_first) { 3737 /* 3738 * If this block was part of log chain that couldn't 3739 * be claimed because a device was missing during 3740 * zil_claim(), but that device later returns, 3741 * then this block could erroneously appear valid. 3742 * To guard against this, assign a new GUID to the new 3743 * log chain so it doesn't matter what blk points to. 3744 */ 3745 zil_init_log_chain(zilog, &blk); 3746 zh->zh_log = blk; 3747 } else { 3748 /* 3749 * A destroyed ZIL chain can't contain any TX_SETSAXATTR 3750 * records. So, deactivate the feature for this dataset. 3751 * We activate it again when we start a new ZIL chain. 3752 */ 3753 if (dsl_dataset_feature_is_active(ds, 3754 SPA_FEATURE_ZILSAXATTR)) 3755 dsl_dataset_deactivate_feature(ds, 3756 SPA_FEATURE_ZILSAXATTR, tx); 3757 } 3758 } 3759 3760 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 3761 zh->zh_log = lwb->lwb_blk; 3762 if (lwb->lwb_state != LWB_STATE_FLUSH_DONE || 3763 lwb->lwb_alloc_txg > txg || lwb->lwb_max_txg > txg) 3764 break; 3765 list_remove(&zilog->zl_lwb_list, lwb); 3766 if (!BP_IS_HOLE(&lwb->lwb_blk)) 3767 zio_free(spa, txg, &lwb->lwb_blk); 3768 zil_free_lwb(zilog, lwb); 3769 3770 /* 3771 * If we don't have anything left in the lwb list then 3772 * we've had an allocation failure and we need to zero 3773 * out the zil_header blkptr so that we don't end 3774 * up freeing the same block twice. 3775 */ 3776 if (list_is_empty(&zilog->zl_lwb_list)) 3777 BP_ZERO(&zh->zh_log); 3778 } 3779 3780 mutex_exit(&zilog->zl_lock); 3781 } 3782 3783 static int 3784 zil_lwb_cons(void *vbuf, void *unused, int kmflag) 3785 { 3786 (void) unused, (void) kmflag; 3787 lwb_t *lwb = vbuf; 3788 list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); 3789 list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), 3790 offsetof(zil_commit_waiter_t, zcw_node)); 3791 avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, 3792 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 3793 mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 3794 return (0); 3795 } 3796 3797 static void 3798 zil_lwb_dest(void *vbuf, void *unused) 3799 { 3800 (void) unused; 3801 lwb_t *lwb = vbuf; 3802 mutex_destroy(&lwb->lwb_vdev_lock); 3803 avl_destroy(&lwb->lwb_vdev_tree); 3804 list_destroy(&lwb->lwb_waiters); 3805 list_destroy(&lwb->lwb_itxs); 3806 } 3807 3808 void 3809 zil_init(void) 3810 { 3811 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 3812 sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); 3813 3814 zil_zcw_cache = kmem_cache_create("zil_zcw_cache", 3815 sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 3816 3817 zil_sums_init(&zil_sums_global); 3818 zil_kstats_global = kstat_create("zfs", 0, "zil", "misc", 3819 KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t), 3820 KSTAT_FLAG_VIRTUAL); 3821 3822 if (zil_kstats_global != NULL) { 3823 zil_kstats_global->ks_data = &zil_stats; 3824 zil_kstats_global->ks_update = zil_kstats_global_update; 3825 zil_kstats_global->ks_private = NULL; 3826 kstat_install(zil_kstats_global); 3827 } 3828 } 3829 3830 void 3831 zil_fini(void) 3832 { 3833 kmem_cache_destroy(zil_zcw_cache); 3834 kmem_cache_destroy(zil_lwb_cache); 3835 3836 if (zil_kstats_global != NULL) { 3837 kstat_delete(zil_kstats_global); 3838 zil_kstats_global = NULL; 3839 } 3840 3841 zil_sums_fini(&zil_sums_global); 3842 } 3843 3844 void 3845 zil_set_sync(zilog_t *zilog, uint64_t sync) 3846 { 3847 zilog->zl_sync = sync; 3848 } 3849 3850 void 3851 zil_set_logbias(zilog_t *zilog, uint64_t logbias) 3852 { 3853 zilog->zl_logbias = logbias; 3854 } 3855 3856 zilog_t * 3857 zil_alloc(objset_t *os, zil_header_t *zh_phys) 3858 { 3859 zilog_t *zilog; 3860 3861 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 3862 3863 zilog->zl_header = zh_phys; 3864 zilog->zl_os = os; 3865 zilog->zl_spa = dmu_objset_spa(os); 3866 zilog->zl_dmu_pool = dmu_objset_pool(os); 3867 zilog->zl_destroy_txg = TXG_INITIAL - 1; 3868 zilog->zl_logbias = dmu_objset_logbias(os); 3869 zilog->zl_sync = dmu_objset_syncprop(os); 3870 zilog->zl_dirty_max_txg = 0; 3871 zilog->zl_last_lwb_opened = NULL; 3872 zilog->zl_last_lwb_latency = 0; 3873 zilog->zl_max_block_size = MIN(MAX(P2ALIGN_TYPED(zil_maxblocksize, 3874 ZIL_MIN_BLKSZ, uint64_t), ZIL_MIN_BLKSZ), 3875 spa_maxblocksize(dmu_objset_spa(os))); 3876 3877 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 3878 mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); 3879 mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL); 3880 3881 for (int i = 0; i < TXG_SIZE; i++) { 3882 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 3883 MUTEX_DEFAULT, NULL); 3884 } 3885 3886 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 3887 offsetof(lwb_t, lwb_node)); 3888 3889 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 3890 offsetof(itx_t, itx_node)); 3891 3892 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 3893 cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL); 3894 3895 for (int i = 0; i < ZIL_BURSTS; i++) { 3896 zilog->zl_prev_opt[i] = zilog->zl_max_block_size - 3897 sizeof (zil_chain_t); 3898 } 3899 3900 return (zilog); 3901 } 3902 3903 void 3904 zil_free(zilog_t *zilog) 3905 { 3906 int i; 3907 3908 zilog->zl_stop_sync = 1; 3909 3910 ASSERT0(zilog->zl_suspend); 3911 ASSERT0(zilog->zl_suspending); 3912 3913 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3914 list_destroy(&zilog->zl_lwb_list); 3915 3916 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 3917 list_destroy(&zilog->zl_itx_commit_list); 3918 3919 for (i = 0; i < TXG_SIZE; i++) { 3920 /* 3921 * It's possible for an itx to be generated that doesn't dirty 3922 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 3923 * callback to remove the entry. We remove those here. 3924 * 3925 * Also free up the ziltest itxs. 3926 */ 3927 if (zilog->zl_itxg[i].itxg_itxs) 3928 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 3929 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 3930 } 3931 3932 mutex_destroy(&zilog->zl_issuer_lock); 3933 mutex_destroy(&zilog->zl_lock); 3934 mutex_destroy(&zilog->zl_lwb_io_lock); 3935 3936 cv_destroy(&zilog->zl_cv_suspend); 3937 cv_destroy(&zilog->zl_lwb_io_cv); 3938 3939 kmem_free(zilog, sizeof (zilog_t)); 3940 } 3941 3942 /* 3943 * Open an intent log. 3944 */ 3945 zilog_t * 3946 zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums) 3947 { 3948 zilog_t *zilog = dmu_objset_zil(os); 3949 3950 ASSERT3P(zilog->zl_get_data, ==, NULL); 3951 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 3952 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3953 3954 zilog->zl_get_data = get_data; 3955 zilog->zl_sums = zil_sums; 3956 3957 return (zilog); 3958 } 3959 3960 /* 3961 * Close an intent log. 3962 */ 3963 void 3964 zil_close(zilog_t *zilog) 3965 { 3966 lwb_t *lwb; 3967 uint64_t txg; 3968 3969 if (!dmu_objset_is_snapshot(zilog->zl_os)) { 3970 zil_commit(zilog, 0); 3971 } else { 3972 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3973 ASSERT0(zilog->zl_dirty_max_txg); 3974 ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); 3975 } 3976 3977 mutex_enter(&zilog->zl_lock); 3978 txg = zilog->zl_dirty_max_txg; 3979 lwb = list_tail(&zilog->zl_lwb_list); 3980 if (lwb != NULL) { 3981 txg = MAX(txg, lwb->lwb_alloc_txg); 3982 txg = MAX(txg, lwb->lwb_max_txg); 3983 } 3984 mutex_exit(&zilog->zl_lock); 3985 3986 /* 3987 * zl_lwb_max_issued_txg may be larger than lwb_max_txg. It depends 3988 * on the time when the dmu_tx transaction is assigned in 3989 * zil_lwb_write_issue(). 3990 */ 3991 mutex_enter(&zilog->zl_lwb_io_lock); 3992 txg = MAX(zilog->zl_lwb_max_issued_txg, txg); 3993 mutex_exit(&zilog->zl_lwb_io_lock); 3994 3995 /* 3996 * We need to use txg_wait_synced() to wait until that txg is synced. 3997 * zil_sync() will guarantee all lwbs up to that txg have been 3998 * written out, flushed, and cleaned. 3999 */ 4000 if (txg != 0) 4001 txg_wait_synced(zilog->zl_dmu_pool, txg); 4002 4003 if (zilog_is_dirty(zilog)) 4004 zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog, 4005 (u_longlong_t)txg); 4006 if (txg < spa_freeze_txg(zilog->zl_spa)) 4007 VERIFY(!zilog_is_dirty(zilog)); 4008 4009 zilog->zl_get_data = NULL; 4010 4011 /* 4012 * We should have only one lwb left on the list; remove it now. 4013 */ 4014 mutex_enter(&zilog->zl_lock); 4015 lwb = list_remove_head(&zilog->zl_lwb_list); 4016 if (lwb != NULL) { 4017 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 4018 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_NEW); 4019 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 4020 zil_free_lwb(zilog, lwb); 4021 } 4022 mutex_exit(&zilog->zl_lock); 4023 } 4024 4025 static const char *suspend_tag = "zil suspending"; 4026 4027 /* 4028 * Suspend an intent log. While in suspended mode, we still honor 4029 * synchronous semantics, but we rely on txg_wait_synced() to do it. 4030 * On old version pools, we suspend the log briefly when taking a 4031 * snapshot so that it will have an empty intent log. 4032 * 4033 * Long holds are not really intended to be used the way we do here -- 4034 * held for such a short time. A concurrent caller of dsl_dataset_long_held() 4035 * could fail. Therefore we take pains to only put a long hold if it is 4036 * actually necessary. Fortunately, it will only be necessary if the 4037 * objset is currently mounted (or the ZVOL equivalent). In that case it 4038 * will already have a long hold, so we are not really making things any worse. 4039 * 4040 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 4041 * zvol_state_t), and use their mechanism to prevent their hold from being 4042 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 4043 * very little gain. 4044 * 4045 * if cookiep == NULL, this does both the suspend & resume. 4046 * Otherwise, it returns with the dataset "long held", and the cookie 4047 * should be passed into zil_resume(). 4048 */ 4049 int 4050 zil_suspend(const char *osname, void **cookiep) 4051 { 4052 objset_t *os; 4053 zilog_t *zilog; 4054 const zil_header_t *zh; 4055 int error; 4056 4057 error = dmu_objset_hold(osname, suspend_tag, &os); 4058 if (error != 0) 4059 return (error); 4060 zilog = dmu_objset_zil(os); 4061 4062 mutex_enter(&zilog->zl_lock); 4063 zh = zilog->zl_header; 4064 4065 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 4066 mutex_exit(&zilog->zl_lock); 4067 dmu_objset_rele(os, suspend_tag); 4068 return (SET_ERROR(EBUSY)); 4069 } 4070 4071 /* 4072 * Don't put a long hold in the cases where we can avoid it. This 4073 * is when there is no cookie so we are doing a suspend & resume 4074 * (i.e. called from zil_vdev_offline()), and there's nothing to do 4075 * for the suspend because it's already suspended, or there's no ZIL. 4076 */ 4077 if (cookiep == NULL && !zilog->zl_suspending && 4078 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 4079 mutex_exit(&zilog->zl_lock); 4080 dmu_objset_rele(os, suspend_tag); 4081 return (0); 4082 } 4083 4084 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 4085 dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 4086 4087 zilog->zl_suspend++; 4088 4089 if (zilog->zl_suspend > 1) { 4090 /* 4091 * Someone else is already suspending it. 4092 * Just wait for them to finish. 4093 */ 4094 4095 while (zilog->zl_suspending) 4096 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 4097 mutex_exit(&zilog->zl_lock); 4098 4099 if (cookiep == NULL) 4100 zil_resume(os); 4101 else 4102 *cookiep = os; 4103 return (0); 4104 } 4105 4106 /* 4107 * If there is no pointer to an on-disk block, this ZIL must not 4108 * be active (e.g. filesystem not mounted), so there's nothing 4109 * to clean up. 4110 */ 4111 if (BP_IS_HOLE(&zh->zh_log)) { 4112 ASSERT(cookiep != NULL); /* fast path already handled */ 4113 4114 *cookiep = os; 4115 mutex_exit(&zilog->zl_lock); 4116 return (0); 4117 } 4118 4119 /* 4120 * The ZIL has work to do. Ensure that the associated encryption 4121 * key will remain mapped while we are committing the log by 4122 * grabbing a reference to it. If the key isn't loaded we have no 4123 * choice but to return an error until the wrapping key is loaded. 4124 */ 4125 if (os->os_encrypted && 4126 dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) { 4127 zilog->zl_suspend--; 4128 mutex_exit(&zilog->zl_lock); 4129 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 4130 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 4131 return (SET_ERROR(EACCES)); 4132 } 4133 4134 zilog->zl_suspending = B_TRUE; 4135 mutex_exit(&zilog->zl_lock); 4136 4137 /* 4138 * We need to use zil_commit_impl to ensure we wait for all 4139 * LWB_STATE_OPENED, _CLOSED and _READY lwbs to be committed 4140 * to disk before proceeding. If we used zil_commit instead, it 4141 * would just call txg_wait_synced(), because zl_suspend is set. 4142 * txg_wait_synced() doesn't wait for these lwb's to be 4143 * LWB_STATE_FLUSH_DONE before returning. 4144 */ 4145 zil_commit_impl(zilog, 0); 4146 4147 /* 4148 * Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we 4149 * use txg_wait_synced() to ensure the data from the zilog has 4150 * migrated to the main pool before calling zil_destroy(). 4151 */ 4152 txg_wait_synced(zilog->zl_dmu_pool, 0); 4153 4154 zil_destroy(zilog, B_FALSE); 4155 4156 mutex_enter(&zilog->zl_lock); 4157 zilog->zl_suspending = B_FALSE; 4158 cv_broadcast(&zilog->zl_cv_suspend); 4159 mutex_exit(&zilog->zl_lock); 4160 4161 if (os->os_encrypted) 4162 dsl_dataset_remove_key_mapping(dmu_objset_ds(os)); 4163 4164 if (cookiep == NULL) 4165 zil_resume(os); 4166 else 4167 *cookiep = os; 4168 return (0); 4169 } 4170 4171 void 4172 zil_resume(void *cookie) 4173 { 4174 objset_t *os = cookie; 4175 zilog_t *zilog = dmu_objset_zil(os); 4176 4177 mutex_enter(&zilog->zl_lock); 4178 ASSERT(zilog->zl_suspend != 0); 4179 zilog->zl_suspend--; 4180 mutex_exit(&zilog->zl_lock); 4181 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 4182 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 4183 } 4184 4185 typedef struct zil_replay_arg { 4186 zil_replay_func_t *const *zr_replay; 4187 void *zr_arg; 4188 boolean_t zr_byteswap; 4189 char *zr_lr; 4190 } zil_replay_arg_t; 4191 4192 static int 4193 zil_replay_error(zilog_t *zilog, const lr_t *lr, int error) 4194 { 4195 char name[ZFS_MAX_DATASET_NAME_LEN]; 4196 4197 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 4198 4199 dmu_objset_name(zilog->zl_os, name); 4200 4201 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 4202 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 4203 (u_longlong_t)lr->lrc_seq, 4204 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 4205 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 4206 4207 return (error); 4208 } 4209 4210 static int 4211 zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra, 4212 uint64_t claim_txg) 4213 { 4214 zil_replay_arg_t *zr = zra; 4215 const zil_header_t *zh = zilog->zl_header; 4216 uint64_t reclen = lr->lrc_reclen; 4217 uint64_t txtype = lr->lrc_txtype; 4218 int error = 0; 4219 4220 zilog->zl_replaying_seq = lr->lrc_seq; 4221 4222 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 4223 return (0); 4224 4225 if (lr->lrc_txg < claim_txg) /* already committed */ 4226 return (0); 4227 4228 /* Strip case-insensitive bit, still present in log record */ 4229 txtype &= ~TX_CI; 4230 4231 if (txtype == 0 || txtype >= TX_MAX_TYPE) 4232 return (zil_replay_error(zilog, lr, EINVAL)); 4233 4234 /* 4235 * If this record type can be logged out of order, the object 4236 * (lr_foid) may no longer exist. That's legitimate, not an error. 4237 */ 4238 if (TX_OOO(txtype)) { 4239 error = dmu_object_info(zilog->zl_os, 4240 LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL); 4241 if (error == ENOENT || error == EEXIST) 4242 return (0); 4243 } 4244 4245 /* 4246 * Make a copy of the data so we can revise and extend it. 4247 */ 4248 memcpy(zr->zr_lr, lr, reclen); 4249 4250 /* 4251 * If this is a TX_WRITE with a blkptr, suck in the data. 4252 */ 4253 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 4254 error = zil_read_log_data(zilog, (lr_write_t *)lr, 4255 zr->zr_lr + reclen); 4256 if (error != 0) 4257 return (zil_replay_error(zilog, lr, error)); 4258 } 4259 4260 /* 4261 * The log block containing this lr may have been byteswapped 4262 * so that we can easily examine common fields like lrc_txtype. 4263 * However, the log is a mix of different record types, and only the 4264 * replay vectors know how to byteswap their records. Therefore, if 4265 * the lr was byteswapped, undo it before invoking the replay vector. 4266 */ 4267 if (zr->zr_byteswap) 4268 byteswap_uint64_array(zr->zr_lr, reclen); 4269 4270 /* 4271 * We must now do two things atomically: replay this log record, 4272 * and update the log header sequence number to reflect the fact that 4273 * we did so. At the end of each replay function the sequence number 4274 * is updated if we are in replay mode. 4275 */ 4276 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 4277 if (error != 0) { 4278 /* 4279 * The DMU's dnode layer doesn't see removes until the txg 4280 * commits, so a subsequent claim can spuriously fail with 4281 * EEXIST. So if we receive any error we try syncing out 4282 * any removes then retry the transaction. Note that we 4283 * specify B_FALSE for byteswap now, so we don't do it twice. 4284 */ 4285 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 4286 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 4287 if (error != 0) 4288 return (zil_replay_error(zilog, lr, error)); 4289 } 4290 return (0); 4291 } 4292 4293 static int 4294 zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) 4295 { 4296 (void) bp, (void) arg, (void) claim_txg; 4297 4298 zilog->zl_replay_blks++; 4299 4300 return (0); 4301 } 4302 4303 /* 4304 * If this dataset has a non-empty intent log, replay it and destroy it. 4305 * Return B_TRUE if there were any entries to replay. 4306 */ 4307 boolean_t 4308 zil_replay(objset_t *os, void *arg, 4309 zil_replay_func_t *const replay_func[TX_MAX_TYPE]) 4310 { 4311 zilog_t *zilog = dmu_objset_zil(os); 4312 const zil_header_t *zh = zilog->zl_header; 4313 zil_replay_arg_t zr; 4314 4315 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 4316 return (zil_destroy(zilog, B_TRUE)); 4317 } 4318 4319 zr.zr_replay = replay_func; 4320 zr.zr_arg = arg; 4321 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 4322 zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 4323 4324 /* 4325 * Wait for in-progress removes to sync before starting replay. 4326 */ 4327 txg_wait_synced(zilog->zl_dmu_pool, 0); 4328 4329 zilog->zl_replay = B_TRUE; 4330 zilog->zl_replay_time = ddi_get_lbolt(); 4331 ASSERT(zilog->zl_replay_blks == 0); 4332 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 4333 zh->zh_claim_txg, B_TRUE); 4334 vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 4335 4336 zil_destroy(zilog, B_FALSE); 4337 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 4338 zilog->zl_replay = B_FALSE; 4339 4340 return (B_TRUE); 4341 } 4342 4343 boolean_t 4344 zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 4345 { 4346 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 4347 return (B_TRUE); 4348 4349 if (zilog->zl_replay) { 4350 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 4351 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 4352 zilog->zl_replaying_seq; 4353 return (B_TRUE); 4354 } 4355 4356 return (B_FALSE); 4357 } 4358 4359 int 4360 zil_reset(const char *osname, void *arg) 4361 { 4362 (void) arg; 4363 4364 int error = zil_suspend(osname, NULL); 4365 /* EACCES means crypto key not loaded */ 4366 if ((error == EACCES) || (error == EBUSY)) 4367 return (SET_ERROR(error)); 4368 if (error != 0) 4369 return (SET_ERROR(EEXIST)); 4370 return (0); 4371 } 4372 4373 EXPORT_SYMBOL(zil_alloc); 4374 EXPORT_SYMBOL(zil_free); 4375 EXPORT_SYMBOL(zil_open); 4376 EXPORT_SYMBOL(zil_close); 4377 EXPORT_SYMBOL(zil_replay); 4378 EXPORT_SYMBOL(zil_replaying); 4379 EXPORT_SYMBOL(zil_destroy); 4380 EXPORT_SYMBOL(zil_destroy_sync); 4381 EXPORT_SYMBOL(zil_itx_create); 4382 EXPORT_SYMBOL(zil_itx_destroy); 4383 EXPORT_SYMBOL(zil_itx_assign); 4384 EXPORT_SYMBOL(zil_commit); 4385 EXPORT_SYMBOL(zil_claim); 4386 EXPORT_SYMBOL(zil_check_log_chain); 4387 EXPORT_SYMBOL(zil_sync); 4388 EXPORT_SYMBOL(zil_clean); 4389 EXPORT_SYMBOL(zil_suspend); 4390 EXPORT_SYMBOL(zil_resume); 4391 EXPORT_SYMBOL(zil_lwb_add_block); 4392 EXPORT_SYMBOL(zil_bp_tree_add); 4393 EXPORT_SYMBOL(zil_set_sync); 4394 EXPORT_SYMBOL(zil_set_logbias); 4395 EXPORT_SYMBOL(zil_sums_init); 4396 EXPORT_SYMBOL(zil_sums_fini); 4397 EXPORT_SYMBOL(zil_kstat_values_update); 4398 4399 ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW, 4400 "ZIL block open timeout percentage"); 4401 4402 ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW, 4403 "Disable intent logging replay"); 4404 4405 ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW, 4406 "Disable ZIL cache flushes"); 4407 4408 ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW, 4409 "Limit in bytes slog sync writes per commit"); 4410 4411 ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW, 4412 "Limit in bytes of ZIL log block size"); 4413 4414 ZFS_MODULE_PARAM(zfs_zil, zil_, maxcopied, UINT, ZMOD_RW, 4415 "Limit in bytes WR_COPIED size"); 4416