1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 * Copyright (c) 2018 Datto Inc. 27 */ 28 29 /* Portions Copyright 2010 Robert Milkowski */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/spa.h> 33 #include <sys/spa_impl.h> 34 #include <sys/dmu.h> 35 #include <sys/zap.h> 36 #include <sys/arc.h> 37 #include <sys/stat.h> 38 #include <sys/zil.h> 39 #include <sys/zil_impl.h> 40 #include <sys/dsl_dataset.h> 41 #include <sys/vdev_impl.h> 42 #include <sys/dmu_tx.h> 43 #include <sys/dsl_pool.h> 44 #include <sys/metaslab.h> 45 #include <sys/trace_zfs.h> 46 #include <sys/abd.h> 47 #include <sys/brt.h> 48 #include <sys/wmsum.h> 49 50 /* 51 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system 52 * calls that change the file system. Each itx has enough information to 53 * be able to replay them after a system crash, power loss, or 54 * equivalent failure mode. These are stored in memory until either: 55 * 56 * 1. they are committed to the pool by the DMU transaction group 57 * (txg), at which point they can be discarded; or 58 * 2. they are committed to the on-disk ZIL for the dataset being 59 * modified (e.g. due to an fsync, O_DSYNC, or other synchronous 60 * requirement). 61 * 62 * In the event of a crash or power loss, the itxs contained by each 63 * dataset's on-disk ZIL will be replayed when that dataset is first 64 * instantiated (e.g. if the dataset is a normal filesystem, when it is 65 * first mounted). 66 * 67 * As hinted at above, there is one ZIL per dataset (both the in-memory 68 * representation, and the on-disk representation). The on-disk format 69 * consists of 3 parts: 70 * 71 * - a single, per-dataset, ZIL header; which points to a chain of 72 * - zero or more ZIL blocks; each of which contains 73 * - zero or more ZIL records 74 * 75 * A ZIL record holds the information necessary to replay a single 76 * system call transaction. A ZIL block can hold many ZIL records, and 77 * the blocks are chained together, similarly to a singly linked list. 78 * 79 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL 80 * block in the chain, and the ZIL header points to the first block in 81 * the chain. 82 * 83 * Note, there is not a fixed place in the pool to hold these ZIL 84 * blocks; they are dynamically allocated and freed as needed from the 85 * blocks available on the pool, though they can be preferentially 86 * allocated from a dedicated "log" vdev. 87 */ 88 89 /* 90 * This controls the amount of time that a ZIL block (lwb) will remain 91 * "open" when it isn't "full", and it has a thread waiting for it to be 92 * committed to stable storage. Please refer to the zil_commit_waiter() 93 * function (and the comments within it) for more details. 94 */ 95 static uint_t zfs_commit_timeout_pct = 10; 96 97 /* 98 * See zil.h for more information about these fields. 99 */ 100 static zil_kstat_values_t zil_stats = { 101 { "zil_commit_count", KSTAT_DATA_UINT64 }, 102 { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, 103 { "zil_commit_error_count", KSTAT_DATA_UINT64 }, 104 { "zil_commit_stall_count", KSTAT_DATA_UINT64 }, 105 { "zil_commit_suspend_count", KSTAT_DATA_UINT64 }, 106 { "zil_itx_count", KSTAT_DATA_UINT64 }, 107 { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, 108 { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, 109 { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, 110 { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, 111 { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, 112 { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, 113 { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, 114 { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, 115 { "zil_itx_metaslab_normal_write", KSTAT_DATA_UINT64 }, 116 { "zil_itx_metaslab_normal_alloc", KSTAT_DATA_UINT64 }, 117 { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, 118 { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, 119 { "zil_itx_metaslab_slog_write", KSTAT_DATA_UINT64 }, 120 { "zil_itx_metaslab_slog_alloc", KSTAT_DATA_UINT64 }, 121 }; 122 123 static zil_sums_t zil_sums_global; 124 static kstat_t *zil_kstats_global; 125 126 /* 127 * Disable intent logging replay. This global ZIL switch affects all pools. 128 */ 129 int zil_replay_disable = 0; 130 131 /* 132 * Disable the flush commands that are normally sent to the disk(s) by the ZIL 133 * after an LWB write has completed. Setting this will cause ZIL corruption on 134 * power loss if a volatile out-of-order write cache is enabled. 135 */ 136 static int zil_nocacheflush = 0; 137 138 /* 139 * Limit SLOG write size per commit executed with synchronous priority. 140 * Any writes above that will be executed with lower (asynchronous) priority 141 * to limit potential SLOG device abuse by single active ZIL writer. 142 */ 143 static uint64_t zil_slog_bulk = 64 * 1024 * 1024; 144 145 static kmem_cache_t *zil_lwb_cache; 146 static kmem_cache_t *zil_zcw_cache; 147 148 static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx); 149 static itx_t *zil_itx_clone(itx_t *oitx); 150 static uint64_t zil_max_waste_space(zilog_t *zilog); 151 152 static int 153 zil_bp_compare(const void *x1, const void *x2) 154 { 155 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 156 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 157 158 int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2)); 159 if (likely(cmp)) 160 return (cmp); 161 162 return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2))); 163 } 164 165 static void 166 zil_bp_tree_init(zilog_t *zilog) 167 { 168 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 169 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 170 } 171 172 static void 173 zil_bp_tree_fini(zilog_t *zilog) 174 { 175 avl_tree_t *t = &zilog->zl_bp_tree; 176 zil_bp_node_t *zn; 177 void *cookie = NULL; 178 179 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 180 kmem_free(zn, sizeof (zil_bp_node_t)); 181 182 avl_destroy(t); 183 } 184 185 int 186 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 187 { 188 avl_tree_t *t = &zilog->zl_bp_tree; 189 const dva_t *dva; 190 zil_bp_node_t *zn; 191 avl_index_t where; 192 193 if (BP_IS_EMBEDDED(bp)) 194 return (0); 195 196 dva = BP_IDENTITY(bp); 197 198 if (avl_find(t, dva, &where) != NULL) 199 return (SET_ERROR(EEXIST)); 200 201 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 202 zn->zn_dva = *dva; 203 avl_insert(t, zn, where); 204 205 return (0); 206 } 207 208 static zil_header_t * 209 zil_header_in_syncing_context(zilog_t *zilog) 210 { 211 return ((zil_header_t *)zilog->zl_header); 212 } 213 214 static void 215 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 216 { 217 zio_cksum_t *zc = &bp->blk_cksum; 218 219 (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0], 220 sizeof (zc->zc_word[ZIL_ZC_GUID_0])); 221 (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1], 222 sizeof (zc->zc_word[ZIL_ZC_GUID_1])); 223 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 224 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 225 } 226 227 static int 228 zil_kstats_global_update(kstat_t *ksp, int rw) 229 { 230 zil_kstat_values_t *zs = ksp->ks_data; 231 ASSERT3P(&zil_stats, ==, zs); 232 233 if (rw == KSTAT_WRITE) { 234 return (SET_ERROR(EACCES)); 235 } 236 237 zil_kstat_values_update(zs, &zil_sums_global); 238 239 return (0); 240 } 241 242 /* 243 * Read a log block and make sure it's valid. 244 */ 245 static int 246 zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp, 247 blkptr_t *nbp, char **begin, char **end, arc_buf_t **abuf) 248 { 249 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 250 arc_flags_t aflags = ARC_FLAG_WAIT; 251 zbookmark_phys_t zb; 252 int error; 253 254 if (zilog->zl_header->zh_claim_txg == 0) 255 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 256 257 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 258 zio_flags |= ZIO_FLAG_SPECULATIVE; 259 260 if (!decrypt) 261 zio_flags |= ZIO_FLAG_RAW; 262 263 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 264 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 265 266 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, 267 abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 268 269 if (error == 0) { 270 zio_cksum_t cksum = bp->blk_cksum; 271 272 /* 273 * Validate the checksummed log block. 274 * 275 * Sequence numbers should be... sequential. The checksum 276 * verifier for the next block should be bp's checksum plus 1. 277 * 278 * Also check the log chain linkage and size used. 279 */ 280 cksum.zc_word[ZIL_ZC_SEQ]++; 281 282 uint64_t size = BP_GET_LSIZE(bp); 283 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 284 zil_chain_t *zilc = (*abuf)->b_data; 285 char *lr = (char *)(zilc + 1); 286 287 if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 288 sizeof (cksum)) || 289 zilc->zc_nused < sizeof (*zilc) || 290 zilc->zc_nused > size) { 291 error = SET_ERROR(ECKSUM); 292 } else { 293 *begin = lr; 294 *end = lr + zilc->zc_nused - sizeof (*zilc); 295 *nbp = zilc->zc_next_blk; 296 } 297 } else { 298 char *lr = (*abuf)->b_data; 299 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 300 301 if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 302 sizeof (cksum)) || 303 (zilc->zc_nused > (size - sizeof (*zilc)))) { 304 error = SET_ERROR(ECKSUM); 305 } else { 306 *begin = lr; 307 *end = lr + zilc->zc_nused; 308 *nbp = zilc->zc_next_blk; 309 } 310 } 311 } 312 313 return (error); 314 } 315 316 /* 317 * Read a TX_WRITE log data block. 318 */ 319 static int 320 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 321 { 322 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 323 const blkptr_t *bp = &lr->lr_blkptr; 324 arc_flags_t aflags = ARC_FLAG_WAIT; 325 arc_buf_t *abuf = NULL; 326 zbookmark_phys_t zb; 327 int error; 328 329 if (BP_IS_HOLE(bp)) { 330 if (wbuf != NULL) 331 memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 332 return (0); 333 } 334 335 if (zilog->zl_header->zh_claim_txg == 0) 336 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 337 338 /* 339 * If we are not using the resulting data, we are just checking that 340 * it hasn't been corrupted so we don't need to waste CPU time 341 * decompressing and decrypting it. 342 */ 343 if (wbuf == NULL) 344 zio_flags |= ZIO_FLAG_RAW; 345 346 ASSERT3U(BP_GET_LSIZE(bp), !=, 0); 347 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 348 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 349 350 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 351 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 352 353 if (error == 0) { 354 if (wbuf != NULL) 355 memcpy(wbuf, abuf->b_data, arc_buf_size(abuf)); 356 arc_buf_destroy(abuf, &abuf); 357 } 358 359 return (error); 360 } 361 362 void 363 zil_sums_init(zil_sums_t *zs) 364 { 365 wmsum_init(&zs->zil_commit_count, 0); 366 wmsum_init(&zs->zil_commit_writer_count, 0); 367 wmsum_init(&zs->zil_commit_error_count, 0); 368 wmsum_init(&zs->zil_commit_stall_count, 0); 369 wmsum_init(&zs->zil_commit_suspend_count, 0); 370 wmsum_init(&zs->zil_itx_count, 0); 371 wmsum_init(&zs->zil_itx_indirect_count, 0); 372 wmsum_init(&zs->zil_itx_indirect_bytes, 0); 373 wmsum_init(&zs->zil_itx_copied_count, 0); 374 wmsum_init(&zs->zil_itx_copied_bytes, 0); 375 wmsum_init(&zs->zil_itx_needcopy_count, 0); 376 wmsum_init(&zs->zil_itx_needcopy_bytes, 0); 377 wmsum_init(&zs->zil_itx_metaslab_normal_count, 0); 378 wmsum_init(&zs->zil_itx_metaslab_normal_bytes, 0); 379 wmsum_init(&zs->zil_itx_metaslab_normal_write, 0); 380 wmsum_init(&zs->zil_itx_metaslab_normal_alloc, 0); 381 wmsum_init(&zs->zil_itx_metaslab_slog_count, 0); 382 wmsum_init(&zs->zil_itx_metaslab_slog_bytes, 0); 383 wmsum_init(&zs->zil_itx_metaslab_slog_write, 0); 384 wmsum_init(&zs->zil_itx_metaslab_slog_alloc, 0); 385 } 386 387 void 388 zil_sums_fini(zil_sums_t *zs) 389 { 390 wmsum_fini(&zs->zil_commit_count); 391 wmsum_fini(&zs->zil_commit_writer_count); 392 wmsum_fini(&zs->zil_commit_error_count); 393 wmsum_fini(&zs->zil_commit_stall_count); 394 wmsum_fini(&zs->zil_commit_suspend_count); 395 wmsum_fini(&zs->zil_itx_count); 396 wmsum_fini(&zs->zil_itx_indirect_count); 397 wmsum_fini(&zs->zil_itx_indirect_bytes); 398 wmsum_fini(&zs->zil_itx_copied_count); 399 wmsum_fini(&zs->zil_itx_copied_bytes); 400 wmsum_fini(&zs->zil_itx_needcopy_count); 401 wmsum_fini(&zs->zil_itx_needcopy_bytes); 402 wmsum_fini(&zs->zil_itx_metaslab_normal_count); 403 wmsum_fini(&zs->zil_itx_metaslab_normal_bytes); 404 wmsum_fini(&zs->zil_itx_metaslab_normal_write); 405 wmsum_fini(&zs->zil_itx_metaslab_normal_alloc); 406 wmsum_fini(&zs->zil_itx_metaslab_slog_count); 407 wmsum_fini(&zs->zil_itx_metaslab_slog_bytes); 408 wmsum_fini(&zs->zil_itx_metaslab_slog_write); 409 wmsum_fini(&zs->zil_itx_metaslab_slog_alloc); 410 } 411 412 void 413 zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums) 414 { 415 zs->zil_commit_count.value.ui64 = 416 wmsum_value(&zil_sums->zil_commit_count); 417 zs->zil_commit_writer_count.value.ui64 = 418 wmsum_value(&zil_sums->zil_commit_writer_count); 419 zs->zil_commit_error_count.value.ui64 = 420 wmsum_value(&zil_sums->zil_commit_error_count); 421 zs->zil_commit_stall_count.value.ui64 = 422 wmsum_value(&zil_sums->zil_commit_stall_count); 423 zs->zil_commit_suspend_count.value.ui64 = 424 wmsum_value(&zil_sums->zil_commit_suspend_count); 425 zs->zil_itx_count.value.ui64 = 426 wmsum_value(&zil_sums->zil_itx_count); 427 zs->zil_itx_indirect_count.value.ui64 = 428 wmsum_value(&zil_sums->zil_itx_indirect_count); 429 zs->zil_itx_indirect_bytes.value.ui64 = 430 wmsum_value(&zil_sums->zil_itx_indirect_bytes); 431 zs->zil_itx_copied_count.value.ui64 = 432 wmsum_value(&zil_sums->zil_itx_copied_count); 433 zs->zil_itx_copied_bytes.value.ui64 = 434 wmsum_value(&zil_sums->zil_itx_copied_bytes); 435 zs->zil_itx_needcopy_count.value.ui64 = 436 wmsum_value(&zil_sums->zil_itx_needcopy_count); 437 zs->zil_itx_needcopy_bytes.value.ui64 = 438 wmsum_value(&zil_sums->zil_itx_needcopy_bytes); 439 zs->zil_itx_metaslab_normal_count.value.ui64 = 440 wmsum_value(&zil_sums->zil_itx_metaslab_normal_count); 441 zs->zil_itx_metaslab_normal_bytes.value.ui64 = 442 wmsum_value(&zil_sums->zil_itx_metaslab_normal_bytes); 443 zs->zil_itx_metaslab_normal_write.value.ui64 = 444 wmsum_value(&zil_sums->zil_itx_metaslab_normal_write); 445 zs->zil_itx_metaslab_normal_alloc.value.ui64 = 446 wmsum_value(&zil_sums->zil_itx_metaslab_normal_alloc); 447 zs->zil_itx_metaslab_slog_count.value.ui64 = 448 wmsum_value(&zil_sums->zil_itx_metaslab_slog_count); 449 zs->zil_itx_metaslab_slog_bytes.value.ui64 = 450 wmsum_value(&zil_sums->zil_itx_metaslab_slog_bytes); 451 zs->zil_itx_metaslab_slog_write.value.ui64 = 452 wmsum_value(&zil_sums->zil_itx_metaslab_slog_write); 453 zs->zil_itx_metaslab_slog_alloc.value.ui64 = 454 wmsum_value(&zil_sums->zil_itx_metaslab_slog_alloc); 455 } 456 457 /* 458 * Parse the intent log, and call parse_func for each valid record within. 459 */ 460 int 461 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 462 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, 463 boolean_t decrypt) 464 { 465 const zil_header_t *zh = zilog->zl_header; 466 boolean_t claimed = !!zh->zh_claim_txg; 467 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 468 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 469 uint64_t max_blk_seq = 0; 470 uint64_t max_lr_seq = 0; 471 uint64_t blk_count = 0; 472 uint64_t lr_count = 0; 473 blkptr_t blk, next_blk = {{{{0}}}}; 474 int error = 0; 475 476 /* 477 * Old logs didn't record the maximum zh_claim_lr_seq. 478 */ 479 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 480 claim_lr_seq = UINT64_MAX; 481 482 /* 483 * Starting at the block pointed to by zh_log we read the log chain. 484 * For each block in the chain we strongly check that block to 485 * ensure its validity. We stop when an invalid block is found. 486 * For each block pointer in the chain we call parse_blk_func(). 487 * For each record in each valid block we call parse_lr_func(). 488 * If the log has been claimed, stop if we encounter a sequence 489 * number greater than the highest claimed sequence number. 490 */ 491 zil_bp_tree_init(zilog); 492 493 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 494 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 495 int reclen; 496 char *lrp, *end; 497 arc_buf_t *abuf = NULL; 498 499 if (blk_seq > claim_blk_seq) 500 break; 501 502 error = parse_blk_func(zilog, &blk, arg, txg); 503 if (error != 0) 504 break; 505 ASSERT3U(max_blk_seq, <, blk_seq); 506 max_blk_seq = blk_seq; 507 blk_count++; 508 509 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 510 break; 511 512 error = zil_read_log_block(zilog, decrypt, &blk, &next_blk, 513 &lrp, &end, &abuf); 514 if (error != 0) { 515 if (abuf) 516 arc_buf_destroy(abuf, &abuf); 517 if (claimed) { 518 char name[ZFS_MAX_DATASET_NAME_LEN]; 519 520 dmu_objset_name(zilog->zl_os, name); 521 522 cmn_err(CE_WARN, "ZFS read log block error %d, " 523 "dataset %s, seq 0x%llx\n", error, name, 524 (u_longlong_t)blk_seq); 525 } 526 break; 527 } 528 529 for (; lrp < end; lrp += reclen) { 530 lr_t *lr = (lr_t *)lrp; 531 532 /* 533 * Are the remaining bytes large enough to hold an 534 * log record? 535 */ 536 if ((char *)(lr + 1) > end) { 537 cmn_err(CE_WARN, "zil_parse: lr_t overrun"); 538 error = SET_ERROR(ECKSUM); 539 arc_buf_destroy(abuf, &abuf); 540 goto done; 541 } 542 reclen = lr->lrc_reclen; 543 if (reclen < sizeof (lr_t) || reclen > end - lrp) { 544 cmn_err(CE_WARN, 545 "zil_parse: lr_t has an invalid reclen"); 546 error = SET_ERROR(ECKSUM); 547 arc_buf_destroy(abuf, &abuf); 548 goto done; 549 } 550 551 if (lr->lrc_seq > claim_lr_seq) { 552 arc_buf_destroy(abuf, &abuf); 553 goto done; 554 } 555 556 error = parse_lr_func(zilog, lr, arg, txg); 557 if (error != 0) { 558 arc_buf_destroy(abuf, &abuf); 559 goto done; 560 } 561 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 562 max_lr_seq = lr->lrc_seq; 563 lr_count++; 564 } 565 arc_buf_destroy(abuf, &abuf); 566 } 567 done: 568 zilog->zl_parse_error = error; 569 zilog->zl_parse_blk_seq = max_blk_seq; 570 zilog->zl_parse_lr_seq = max_lr_seq; 571 zilog->zl_parse_blk_count = blk_count; 572 zilog->zl_parse_lr_count = lr_count; 573 574 zil_bp_tree_fini(zilog); 575 576 return (error); 577 } 578 579 static int 580 zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 581 uint64_t first_txg) 582 { 583 (void) tx; 584 ASSERT(!BP_IS_HOLE(bp)); 585 586 /* 587 * As we call this function from the context of a rewind to a 588 * checkpoint, each ZIL block whose txg is later than the txg 589 * that we rewind to is invalid. Thus, we return -1 so 590 * zil_parse() doesn't attempt to read it. 591 */ 592 if (BP_GET_LOGICAL_BIRTH(bp) >= first_txg) 593 return (-1); 594 595 if (zil_bp_tree_add(zilog, bp) != 0) 596 return (0); 597 598 zio_free(zilog->zl_spa, first_txg, bp); 599 return (0); 600 } 601 602 static int 603 zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 604 uint64_t first_txg) 605 { 606 (void) zilog, (void) lrc, (void) tx, (void) first_txg; 607 return (0); 608 } 609 610 static int 611 zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 612 uint64_t first_txg) 613 { 614 /* 615 * Claim log block if not already committed and not already claimed. 616 * If tx == NULL, just verify that the block is claimable. 617 */ 618 if (BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) < first_txg || 619 zil_bp_tree_add(zilog, bp) != 0) 620 return (0); 621 622 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 623 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 624 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 625 } 626 627 static int 628 zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg) 629 { 630 lr_write_t *lr = (lr_write_t *)lrc; 631 int error; 632 633 ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); 634 635 /* 636 * If the block is not readable, don't claim it. This can happen 637 * in normal operation when a log block is written to disk before 638 * some of the dmu_sync() blocks it points to. In this case, the 639 * transaction cannot have been committed to anyone (we would have 640 * waited for all writes to be stable first), so it is semantically 641 * correct to declare this the end of the log. 642 */ 643 if (BP_GET_LOGICAL_BIRTH(&lr->lr_blkptr) >= first_txg) { 644 error = zil_read_log_data(zilog, lr, NULL); 645 if (error != 0) 646 return (error); 647 } 648 649 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 650 } 651 652 static int 653 zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx, 654 uint64_t first_txg) 655 { 656 const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; 657 const blkptr_t *bp; 658 spa_t *spa = zilog->zl_spa; 659 uint_t ii; 660 661 ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); 662 ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t, 663 lr_bps[lr->lr_nbps])); 664 665 if (tx == NULL) { 666 return (0); 667 } 668 669 /* 670 * XXX: Do we need to byteswap lr? 671 */ 672 673 for (ii = 0; ii < lr->lr_nbps; ii++) { 674 bp = &lr->lr_bps[ii]; 675 676 /* 677 * When data is embedded into the BP there is no need to create 678 * BRT entry as there is no data block. Just copy the BP as it 679 * contains the data. 680 */ 681 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 682 continue; 683 684 /* 685 * We can not handle block pointers from the future, since they 686 * are not yet allocated. It should not normally happen, but 687 * just in case lets be safe and just stop here now instead of 688 * corrupting the pool. 689 */ 690 if (BP_GET_BIRTH(bp) >= first_txg) 691 return (SET_ERROR(ENOENT)); 692 693 /* 694 * Assert the block is really allocated before we reference it. 695 */ 696 metaslab_check_free(spa, bp); 697 } 698 699 for (ii = 0; ii < lr->lr_nbps; ii++) { 700 bp = &lr->lr_bps[ii]; 701 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) 702 brt_pending_add(spa, bp, tx); 703 } 704 705 return (0); 706 } 707 708 static int 709 zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 710 uint64_t first_txg) 711 { 712 713 switch (lrc->lrc_txtype) { 714 case TX_WRITE: 715 return (zil_claim_write(zilog, lrc, tx, first_txg)); 716 case TX_CLONE_RANGE: 717 return (zil_claim_clone_range(zilog, lrc, tx, first_txg)); 718 default: 719 return (0); 720 } 721 } 722 723 static int 724 zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, 725 uint64_t claim_txg) 726 { 727 (void) claim_txg; 728 729 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 730 731 return (0); 732 } 733 734 static int 735 zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg) 736 { 737 lr_write_t *lr = (lr_write_t *)lrc; 738 blkptr_t *bp = &lr->lr_blkptr; 739 740 ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); 741 742 /* 743 * If we previously claimed it, we need to free it. 744 */ 745 if (BP_GET_LOGICAL_BIRTH(bp) >= claim_txg && 746 zil_bp_tree_add(zilog, bp) == 0 && !BP_IS_HOLE(bp)) { 747 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 748 } 749 750 return (0); 751 } 752 753 static int 754 zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx) 755 { 756 const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; 757 const blkptr_t *bp; 758 spa_t *spa; 759 uint_t ii; 760 761 ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); 762 ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t, 763 lr_bps[lr->lr_nbps])); 764 765 if (tx == NULL) { 766 return (0); 767 } 768 769 spa = zilog->zl_spa; 770 771 for (ii = 0; ii < lr->lr_nbps; ii++) { 772 bp = &lr->lr_bps[ii]; 773 774 if (!BP_IS_HOLE(bp)) { 775 zio_free(spa, dmu_tx_get_txg(tx), bp); 776 } 777 } 778 779 return (0); 780 } 781 782 static int 783 zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, 784 uint64_t claim_txg) 785 { 786 787 if (claim_txg == 0) { 788 return (0); 789 } 790 791 switch (lrc->lrc_txtype) { 792 case TX_WRITE: 793 return (zil_free_write(zilog, lrc, tx, claim_txg)); 794 case TX_CLONE_RANGE: 795 return (zil_free_clone_range(zilog, lrc, tx)); 796 default: 797 return (0); 798 } 799 } 800 801 static int 802 zil_lwb_vdev_compare(const void *x1, const void *x2) 803 { 804 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 805 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 806 807 return (TREE_CMP(v1, v2)); 808 } 809 810 /* 811 * Allocate a new lwb. We may already have a block pointer for it, in which 812 * case we get size and version from there. Or we may not yet, in which case 813 * we choose them here and later make the block allocation match. 814 */ 815 static lwb_t * 816 zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog, 817 uint64_t txg, lwb_state_t state) 818 { 819 lwb_t *lwb; 820 821 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 822 lwb->lwb_zilog = zilog; 823 if (bp) { 824 lwb->lwb_blk = *bp; 825 lwb->lwb_slim = (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2); 826 sz = BP_GET_LSIZE(bp); 827 } else { 828 BP_ZERO(&lwb->lwb_blk); 829 lwb->lwb_slim = (spa_version(zilog->zl_spa) >= 830 SPA_VERSION_SLIM_ZIL); 831 } 832 lwb->lwb_slog = slog; 833 lwb->lwb_error = 0; 834 if (lwb->lwb_slim) { 835 lwb->lwb_nmax = sz; 836 lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t); 837 } else { 838 lwb->lwb_nmax = sz - sizeof (zil_chain_t); 839 lwb->lwb_nused = lwb->lwb_nfilled = 0; 840 } 841 lwb->lwb_sz = sz; 842 lwb->lwb_state = state; 843 lwb->lwb_buf = zio_buf_alloc(sz); 844 lwb->lwb_child_zio = NULL; 845 lwb->lwb_write_zio = NULL; 846 lwb->lwb_root_zio = NULL; 847 lwb->lwb_issued_timestamp = 0; 848 lwb->lwb_issued_txg = 0; 849 lwb->lwb_alloc_txg = txg; 850 lwb->lwb_max_txg = 0; 851 852 mutex_enter(&zilog->zl_lock); 853 list_insert_tail(&zilog->zl_lwb_list, lwb); 854 if (state != LWB_STATE_NEW) 855 zilog->zl_last_lwb_opened = lwb; 856 mutex_exit(&zilog->zl_lock); 857 858 return (lwb); 859 } 860 861 static void 862 zil_free_lwb(zilog_t *zilog, lwb_t *lwb) 863 { 864 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 865 ASSERT(lwb->lwb_state == LWB_STATE_NEW || 866 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 867 ASSERT3P(lwb->lwb_child_zio, ==, NULL); 868 ASSERT3P(lwb->lwb_write_zio, ==, NULL); 869 ASSERT3P(lwb->lwb_root_zio, ==, NULL); 870 ASSERT3U(lwb->lwb_alloc_txg, <=, spa_syncing_txg(zilog->zl_spa)); 871 ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); 872 VERIFY(list_is_empty(&lwb->lwb_itxs)); 873 VERIFY(list_is_empty(&lwb->lwb_waiters)); 874 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 875 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 876 877 /* 878 * Clear the zilog's field to indicate this lwb is no longer 879 * valid, and prevent use-after-free errors. 880 */ 881 if (zilog->zl_last_lwb_opened == lwb) 882 zilog->zl_last_lwb_opened = NULL; 883 884 kmem_cache_free(zil_lwb_cache, lwb); 885 } 886 887 /* 888 * Called when we create in-memory log transactions so that we know 889 * to cleanup the itxs at the end of spa_sync(). 890 */ 891 static void 892 zilog_dirty(zilog_t *zilog, uint64_t txg) 893 { 894 dsl_pool_t *dp = zilog->zl_dmu_pool; 895 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 896 897 ASSERT(spa_writeable(zilog->zl_spa)); 898 899 if (ds->ds_is_snapshot) 900 panic("dirtying snapshot!"); 901 902 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 903 /* up the hold count until we can be written out */ 904 dmu_buf_add_ref(ds->ds_dbuf, zilog); 905 906 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); 907 } 908 } 909 910 /* 911 * Determine if the zil is dirty in the specified txg. Callers wanting to 912 * ensure that the dirty state does not change must hold the itxg_lock for 913 * the specified txg. Holding the lock will ensure that the zil cannot be 914 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current 915 * state. 916 */ 917 static boolean_t __maybe_unused 918 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) 919 { 920 dsl_pool_t *dp = zilog->zl_dmu_pool; 921 922 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) 923 return (B_TRUE); 924 return (B_FALSE); 925 } 926 927 /* 928 * Determine if the zil is dirty. The zil is considered dirty if it has 929 * any pending itx records that have not been cleaned by zil_clean(). 930 */ 931 static boolean_t 932 zilog_is_dirty(zilog_t *zilog) 933 { 934 dsl_pool_t *dp = zilog->zl_dmu_pool; 935 936 for (int t = 0; t < TXG_SIZE; t++) { 937 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 938 return (B_TRUE); 939 } 940 return (B_FALSE); 941 } 942 943 /* 944 * Its called in zil_commit context (zil_process_commit_list()/zil_create()). 945 * It activates SPA_FEATURE_ZILSAXATTR feature, if its enabled. 946 * Check dsl_dataset_feature_is_active to avoid txg_wait_synced() on every 947 * zil_commit. 948 */ 949 static void 950 zil_commit_activate_saxattr_feature(zilog_t *zilog) 951 { 952 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 953 uint64_t txg = 0; 954 dmu_tx_t *tx = NULL; 955 956 if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && 957 dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL && 958 !dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) { 959 tx = dmu_tx_create(zilog->zl_os); 960 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND)); 961 dsl_dataset_dirty(ds, tx); 962 txg = dmu_tx_get_txg(tx); 963 964 mutex_enter(&ds->ds_lock); 965 ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = 966 (void *)B_TRUE; 967 mutex_exit(&ds->ds_lock); 968 dmu_tx_commit(tx); 969 txg_wait_synced(zilog->zl_dmu_pool, txg); 970 } 971 } 972 973 /* 974 * Create an on-disk intent log. 975 */ 976 static lwb_t * 977 zil_create(zilog_t *zilog) 978 { 979 const zil_header_t *zh = zilog->zl_header; 980 lwb_t *lwb = NULL; 981 uint64_t txg = 0; 982 dmu_tx_t *tx = NULL; 983 blkptr_t blk; 984 int error = 0; 985 boolean_t slog = FALSE; 986 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 987 988 989 /* 990 * Wait for any previous destroy to complete. 991 */ 992 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 993 994 ASSERT(zh->zh_claim_txg == 0); 995 ASSERT(zh->zh_replay_seq == 0); 996 997 blk = zh->zh_log; 998 999 /* 1000 * Allocate an initial log block if: 1001 * - there isn't one already 1002 * - the existing block is the wrong endianness 1003 */ 1004 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 1005 tx = dmu_tx_create(zilog->zl_os); 1006 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND)); 1007 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1008 txg = dmu_tx_get_txg(tx); 1009 1010 if (!BP_IS_HOLE(&blk)) { 1011 zio_free(zilog->zl_spa, txg, &blk); 1012 BP_ZERO(&blk); 1013 } 1014 1015 error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk, 1016 ZIL_MIN_BLKSZ, &slog); 1017 if (error == 0) 1018 zil_init_log_chain(zilog, &blk); 1019 } 1020 1021 /* 1022 * Allocate a log write block (lwb) for the first log block. 1023 */ 1024 if (error == 0) 1025 lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW); 1026 1027 /* 1028 * If we just allocated the first log block, commit our transaction 1029 * and wait for zil_sync() to stuff the block pointer into zh_log. 1030 * (zh is part of the MOS, so we cannot modify it in open context.) 1031 */ 1032 if (tx != NULL) { 1033 /* 1034 * If "zilsaxattr" feature is enabled on zpool, then activate 1035 * it now when we're creating the ZIL chain. We can't wait with 1036 * this until we write the first xattr log record because we 1037 * need to wait for the feature activation to sync out. 1038 */ 1039 if (spa_feature_is_enabled(zilog->zl_spa, 1040 SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) != 1041 DMU_OST_ZVOL) { 1042 mutex_enter(&ds->ds_lock); 1043 ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = 1044 (void *)B_TRUE; 1045 mutex_exit(&ds->ds_lock); 1046 } 1047 1048 dmu_tx_commit(tx); 1049 txg_wait_synced(zilog->zl_dmu_pool, txg); 1050 } else { 1051 /* 1052 * This branch covers the case where we enable the feature on a 1053 * zpool that has existing ZIL headers. 1054 */ 1055 zil_commit_activate_saxattr_feature(zilog); 1056 } 1057 IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && 1058 dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL, 1059 dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)); 1060 1061 ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 1062 IMPLY(error == 0, lwb != NULL); 1063 1064 return (lwb); 1065 } 1066 1067 /* 1068 * In one tx, free all log blocks and clear the log header. If keep_first 1069 * is set, then we're replaying a log with no content. We want to keep the 1070 * first block, however, so that the first synchronous transaction doesn't 1071 * require a txg_wait_synced() in zil_create(). We don't need to 1072 * txg_wait_synced() here either when keep_first is set, because both 1073 * zil_create() and zil_destroy() will wait for any in-progress destroys 1074 * to complete. 1075 * Return B_TRUE if there were any entries to replay. 1076 */ 1077 boolean_t 1078 zil_destroy(zilog_t *zilog, boolean_t keep_first) 1079 { 1080 const zil_header_t *zh = zilog->zl_header; 1081 lwb_t *lwb; 1082 dmu_tx_t *tx; 1083 uint64_t txg; 1084 1085 /* 1086 * Wait for any previous destroy to complete. 1087 */ 1088 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 1089 1090 zilog->zl_old_header = *zh; /* debugging aid */ 1091 1092 if (BP_IS_HOLE(&zh->zh_log)) 1093 return (B_FALSE); 1094 1095 tx = dmu_tx_create(zilog->zl_os); 1096 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND)); 1097 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1098 txg = dmu_tx_get_txg(tx); 1099 1100 mutex_enter(&zilog->zl_lock); 1101 1102 ASSERT3U(zilog->zl_destroy_txg, <, txg); 1103 zilog->zl_destroy_txg = txg; 1104 zilog->zl_keep_first = keep_first; 1105 1106 if (!list_is_empty(&zilog->zl_lwb_list)) { 1107 ASSERT(zh->zh_claim_txg == 0); 1108 VERIFY(!keep_first); 1109 while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) { 1110 if (lwb->lwb_buf != NULL) 1111 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1112 if (!BP_IS_HOLE(&lwb->lwb_blk)) 1113 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); 1114 zil_free_lwb(zilog, lwb); 1115 } 1116 } else if (!keep_first) { 1117 zil_destroy_sync(zilog, tx); 1118 } 1119 mutex_exit(&zilog->zl_lock); 1120 1121 dmu_tx_commit(tx); 1122 1123 return (B_TRUE); 1124 } 1125 1126 void 1127 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 1128 { 1129 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1130 (void) zil_parse(zilog, zil_free_log_block, 1131 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE); 1132 } 1133 1134 int 1135 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 1136 { 1137 dmu_tx_t *tx = txarg; 1138 zilog_t *zilog; 1139 uint64_t first_txg; 1140 zil_header_t *zh; 1141 objset_t *os; 1142 int error; 1143 1144 error = dmu_objset_own_obj(dp, ds->ds_object, 1145 DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os); 1146 if (error != 0) { 1147 /* 1148 * EBUSY indicates that the objset is inconsistent, in which 1149 * case it can not have a ZIL. 1150 */ 1151 if (error != EBUSY) { 1152 cmn_err(CE_WARN, "can't open objset for %llu, error %u", 1153 (unsigned long long)ds->ds_object, error); 1154 } 1155 1156 return (0); 1157 } 1158 1159 zilog = dmu_objset_zil(os); 1160 zh = zil_header_in_syncing_context(zilog); 1161 ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); 1162 first_txg = spa_min_claim_txg(zilog->zl_spa); 1163 1164 /* 1165 * If the spa_log_state is not set to be cleared, check whether 1166 * the current uberblock is a checkpoint one and if the current 1167 * header has been claimed before moving on. 1168 * 1169 * If the current uberblock is a checkpointed uberblock then 1170 * one of the following scenarios took place: 1171 * 1172 * 1] We are currently rewinding to the checkpoint of the pool. 1173 * 2] We crashed in the middle of a checkpoint rewind but we 1174 * did manage to write the checkpointed uberblock to the 1175 * vdev labels, so when we tried to import the pool again 1176 * the checkpointed uberblock was selected from the import 1177 * procedure. 1178 * 1179 * In both cases we want to zero out all the ZIL blocks, except 1180 * the ones that have been claimed at the time of the checkpoint 1181 * (their zh_claim_txg != 0). The reason is that these blocks 1182 * may be corrupted since we may have reused their locations on 1183 * disk after we took the checkpoint. 1184 * 1185 * We could try to set spa_log_state to SPA_LOG_CLEAR earlier 1186 * when we first figure out whether the current uberblock is 1187 * checkpointed or not. Unfortunately, that would discard all 1188 * the logs, including the ones that are claimed, and we would 1189 * leak space. 1190 */ 1191 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || 1192 (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 1193 zh->zh_claim_txg == 0)) { 1194 if (!BP_IS_HOLE(&zh->zh_log)) { 1195 (void) zil_parse(zilog, zil_clear_log_block, 1196 zil_noop_log_record, tx, first_txg, B_FALSE); 1197 } 1198 BP_ZERO(&zh->zh_log); 1199 if (os->os_encrypted) 1200 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; 1201 dsl_dataset_dirty(dmu_objset_ds(os), tx); 1202 dmu_objset_disown(os, B_FALSE, FTAG); 1203 return (0); 1204 } 1205 1206 /* 1207 * If we are not rewinding and opening the pool normally, then 1208 * the min_claim_txg should be equal to the first txg of the pool. 1209 */ 1210 ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); 1211 1212 /* 1213 * Claim all log blocks if we haven't already done so, and remember 1214 * the highest claimed sequence number. This ensures that if we can 1215 * read only part of the log now (e.g. due to a missing device), 1216 * but we can read the entire log later, we will not try to replay 1217 * or destroy beyond the last block we successfully claimed. 1218 */ 1219 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 1220 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 1221 (void) zil_parse(zilog, zil_claim_log_block, 1222 zil_claim_log_record, tx, first_txg, B_FALSE); 1223 zh->zh_claim_txg = first_txg; 1224 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 1225 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 1226 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 1227 zh->zh_flags |= ZIL_REPLAY_NEEDED; 1228 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 1229 if (os->os_encrypted) 1230 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; 1231 dsl_dataset_dirty(dmu_objset_ds(os), tx); 1232 } 1233 1234 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 1235 dmu_objset_disown(os, B_FALSE, FTAG); 1236 return (0); 1237 } 1238 1239 /* 1240 * Check the log by walking the log chain. 1241 * Checksum errors are ok as they indicate the end of the chain. 1242 * Any other error (no device or read failure) returns an error. 1243 */ 1244 int 1245 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 1246 { 1247 (void) dp; 1248 zilog_t *zilog; 1249 objset_t *os; 1250 blkptr_t *bp; 1251 int error; 1252 1253 ASSERT(tx == NULL); 1254 1255 error = dmu_objset_from_ds(ds, &os); 1256 if (error != 0) { 1257 cmn_err(CE_WARN, "can't open objset %llu, error %d", 1258 (unsigned long long)ds->ds_object, error); 1259 return (0); 1260 } 1261 1262 zilog = dmu_objset_zil(os); 1263 bp = (blkptr_t *)&zilog->zl_header->zh_log; 1264 1265 if (!BP_IS_HOLE(bp)) { 1266 vdev_t *vd; 1267 boolean_t valid = B_TRUE; 1268 1269 /* 1270 * Check the first block and determine if it's on a log device 1271 * which may have been removed or faulted prior to loading this 1272 * pool. If so, there's no point in checking the rest of the 1273 * log as its content should have already been synced to the 1274 * pool. 1275 */ 1276 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 1277 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 1278 if (vd->vdev_islog && vdev_is_dead(vd)) 1279 valid = vdev_log_state_valid(vd); 1280 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 1281 1282 if (!valid) 1283 return (0); 1284 1285 /* 1286 * Check whether the current uberblock is checkpointed (e.g. 1287 * we are rewinding) and whether the current header has been 1288 * claimed or not. If it hasn't then skip verifying it. We 1289 * do this because its ZIL blocks may be part of the pool's 1290 * state before the rewind, which is no longer valid. 1291 */ 1292 zil_header_t *zh = zil_header_in_syncing_context(zilog); 1293 if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && 1294 zh->zh_claim_txg == 0) 1295 return (0); 1296 } 1297 1298 /* 1299 * Because tx == NULL, zil_claim_log_block() will not actually claim 1300 * any blocks, but just determine whether it is possible to do so. 1301 * In addition to checking the log chain, zil_claim_log_block() 1302 * will invoke zio_claim() with a done func of spa_claim_notify(), 1303 * which will update spa_max_claim_txg. See spa_load() for details. 1304 */ 1305 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 1306 zilog->zl_header->zh_claim_txg ? -1ULL : 1307 spa_min_claim_txg(os->os_spa), B_FALSE); 1308 1309 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 1310 } 1311 1312 /* 1313 * When an itx is "skipped", this function is used to properly mark the 1314 * waiter as "done, and signal any thread(s) waiting on it. An itx can 1315 * be skipped (and not committed to an lwb) for a variety of reasons, 1316 * one of them being that the itx was committed via spa_sync(), prior to 1317 * it being committed to an lwb; this can happen if a thread calling 1318 * zil_commit() is racing with spa_sync(). 1319 */ 1320 static void 1321 zil_commit_waiter_skip(zil_commit_waiter_t *zcw) 1322 { 1323 mutex_enter(&zcw->zcw_lock); 1324 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1325 zcw->zcw_done = B_TRUE; 1326 cv_broadcast(&zcw->zcw_cv); 1327 mutex_exit(&zcw->zcw_lock); 1328 } 1329 1330 /* 1331 * This function is used when the given waiter is to be linked into an 1332 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. 1333 * At this point, the waiter will no longer be referenced by the itx, 1334 * and instead, will be referenced by the lwb. 1335 */ 1336 static void 1337 zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) 1338 { 1339 /* 1340 * The lwb_waiters field of the lwb is protected by the zilog's 1341 * zl_issuer_lock while the lwb is open and zl_lock otherwise. 1342 * zl_issuer_lock also protects leaving the open state. 1343 * zcw_lwb setting is protected by zl_issuer_lock and state != 1344 * flush_done, which transition is protected by zl_lock. 1345 */ 1346 ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_issuer_lock)); 1347 IMPLY(lwb->lwb_state != LWB_STATE_OPENED, 1348 MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); 1349 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW); 1350 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 1351 1352 ASSERT(!list_link_active(&zcw->zcw_node)); 1353 list_insert_tail(&lwb->lwb_waiters, zcw); 1354 ASSERT3P(zcw->zcw_lwb, ==, NULL); 1355 zcw->zcw_lwb = lwb; 1356 } 1357 1358 /* 1359 * This function is used when zio_alloc_zil() fails to allocate a ZIL 1360 * block, and the given waiter must be linked to the "nolwb waiters" 1361 * list inside of zil_process_commit_list(). 1362 */ 1363 static void 1364 zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) 1365 { 1366 ASSERT(!list_link_active(&zcw->zcw_node)); 1367 list_insert_tail(nolwb, zcw); 1368 ASSERT3P(zcw->zcw_lwb, ==, NULL); 1369 } 1370 1371 void 1372 zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) 1373 { 1374 avl_tree_t *t = &lwb->lwb_vdev_tree; 1375 avl_index_t where; 1376 zil_vdev_node_t *zv, zvsearch; 1377 int ndvas = BP_GET_NDVAS(bp); 1378 int i; 1379 1380 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 1381 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 1382 1383 if (zil_nocacheflush) 1384 return; 1385 1386 mutex_enter(&lwb->lwb_vdev_lock); 1387 for (i = 0; i < ndvas; i++) { 1388 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 1389 if (avl_find(t, &zvsearch, &where) == NULL) { 1390 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 1391 zv->zv_vdev = zvsearch.zv_vdev; 1392 avl_insert(t, zv, where); 1393 } 1394 } 1395 mutex_exit(&lwb->lwb_vdev_lock); 1396 } 1397 1398 static void 1399 zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb) 1400 { 1401 avl_tree_t *src = &lwb->lwb_vdev_tree; 1402 avl_tree_t *dst = &nlwb->lwb_vdev_tree; 1403 void *cookie = NULL; 1404 zil_vdev_node_t *zv; 1405 1406 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1407 ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE); 1408 ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); 1409 1410 /* 1411 * While 'lwb' is at a point in its lifetime where lwb_vdev_tree does 1412 * not need the protection of lwb_vdev_lock (it will only be modified 1413 * while holding zilog->zl_lock) as its writes and those of its 1414 * children have all completed. The younger 'nlwb' may be waiting on 1415 * future writes to additional vdevs. 1416 */ 1417 mutex_enter(&nlwb->lwb_vdev_lock); 1418 /* 1419 * Tear down the 'lwb' vdev tree, ensuring that entries which do not 1420 * exist in 'nlwb' are moved to it, freeing any would-be duplicates. 1421 */ 1422 while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) { 1423 avl_index_t where; 1424 1425 if (avl_find(dst, zv, &where) == NULL) { 1426 avl_insert(dst, zv, where); 1427 } else { 1428 kmem_free(zv, sizeof (*zv)); 1429 } 1430 } 1431 mutex_exit(&nlwb->lwb_vdev_lock); 1432 } 1433 1434 void 1435 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) 1436 { 1437 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 1438 } 1439 1440 /* 1441 * This function is a called after all vdevs associated with a given lwb write 1442 * have completed their flush command; or as soon as the lwb write completes, 1443 * if "zil_nocacheflush" is set. Further, all "previous" lwb's will have 1444 * completed before this function is called; i.e. this function is called for 1445 * all previous lwbs before it's called for "this" lwb (enforced via zio the 1446 * dependencies configured in zil_lwb_set_zio_dependency()). 1447 * 1448 * The intention is for this function to be called as soon as the contents of 1449 * an lwb are considered "stable" on disk, and will survive any sudden loss of 1450 * power. At this point, any threads waiting for the lwb to reach this state 1451 * are signalled, and the "waiter" structures are marked "done". 1452 */ 1453 static void 1454 zil_lwb_flush_vdevs_done(zio_t *zio) 1455 { 1456 lwb_t *lwb = zio->io_private; 1457 zilog_t *zilog = lwb->lwb_zilog; 1458 zil_commit_waiter_t *zcw; 1459 itx_t *itx; 1460 1461 spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); 1462 1463 hrtime_t t = gethrtime() - lwb->lwb_issued_timestamp; 1464 1465 mutex_enter(&zilog->zl_lock); 1466 1467 zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 7 + t) / 8; 1468 1469 lwb->lwb_root_zio = NULL; 1470 1471 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1472 lwb->lwb_state = LWB_STATE_FLUSH_DONE; 1473 1474 if (zilog->zl_last_lwb_opened == lwb) { 1475 /* 1476 * Remember the highest committed log sequence number 1477 * for ztest. We only update this value when all the log 1478 * writes succeeded, because ztest wants to ASSERT that 1479 * it got the whole log chain. 1480 */ 1481 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1482 } 1483 1484 while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL) 1485 zil_itx_destroy(itx); 1486 1487 while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) { 1488 mutex_enter(&zcw->zcw_lock); 1489 1490 ASSERT3P(zcw->zcw_lwb, ==, lwb); 1491 zcw->zcw_lwb = NULL; 1492 /* 1493 * We expect any ZIO errors from child ZIOs to have been 1494 * propagated "up" to this specific LWB's root ZIO, in 1495 * order for this error handling to work correctly. This 1496 * includes ZIO errors from either this LWB's write or 1497 * flush, as well as any errors from other dependent LWBs 1498 * (e.g. a root LWB ZIO that might be a child of this LWB). 1499 * 1500 * With that said, it's important to note that LWB flush 1501 * errors are not propagated up to the LWB root ZIO. 1502 * This is incorrect behavior, and results in VDEV flush 1503 * errors not being handled correctly here. See the 1504 * comment above the call to "zio_flush" for details. 1505 */ 1506 1507 zcw->zcw_zio_error = zio->io_error; 1508 1509 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1510 zcw->zcw_done = B_TRUE; 1511 cv_broadcast(&zcw->zcw_cv); 1512 1513 mutex_exit(&zcw->zcw_lock); 1514 } 1515 1516 uint64_t txg = lwb->lwb_issued_txg; 1517 1518 /* Once we drop the lock, lwb may be freed by zil_sync(). */ 1519 mutex_exit(&zilog->zl_lock); 1520 1521 mutex_enter(&zilog->zl_lwb_io_lock); 1522 ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0); 1523 zilog->zl_lwb_inflight[txg & TXG_MASK]--; 1524 if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0) 1525 cv_broadcast(&zilog->zl_lwb_io_cv); 1526 mutex_exit(&zilog->zl_lwb_io_lock); 1527 } 1528 1529 /* 1530 * Wait for the completion of all issued write/flush of that txg provided. 1531 * It guarantees zil_lwb_flush_vdevs_done() is called and returned. 1532 */ 1533 static void 1534 zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg) 1535 { 1536 ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa)); 1537 1538 mutex_enter(&zilog->zl_lwb_io_lock); 1539 while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0) 1540 cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock); 1541 mutex_exit(&zilog->zl_lwb_io_lock); 1542 1543 #ifdef ZFS_DEBUG 1544 mutex_enter(&zilog->zl_lock); 1545 mutex_enter(&zilog->zl_lwb_io_lock); 1546 lwb_t *lwb = list_head(&zilog->zl_lwb_list); 1547 while (lwb != NULL) { 1548 if (lwb->lwb_issued_txg <= txg) { 1549 ASSERT(lwb->lwb_state != LWB_STATE_ISSUED); 1550 ASSERT(lwb->lwb_state != LWB_STATE_WRITE_DONE); 1551 IMPLY(lwb->lwb_issued_txg > 0, 1552 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 1553 } 1554 IMPLY(lwb->lwb_state == LWB_STATE_WRITE_DONE || 1555 lwb->lwb_state == LWB_STATE_FLUSH_DONE, 1556 lwb->lwb_buf == NULL); 1557 lwb = list_next(&zilog->zl_lwb_list, lwb); 1558 } 1559 mutex_exit(&zilog->zl_lwb_io_lock); 1560 mutex_exit(&zilog->zl_lock); 1561 #endif 1562 } 1563 1564 /* 1565 * This is called when an lwb's write zio completes. The callback's purpose is 1566 * to issue the flush commands for the vdevs in the lwb's lwb_vdev_tree. The 1567 * tree will contain the vdevs involved in writing out this specific lwb's 1568 * data, and in the case that cache flushes have been deferred, vdevs involved 1569 * in writing the data for previous lwbs. The writes corresponding to all the 1570 * vdevs in the lwb_vdev_tree will have completed by the time this is called, 1571 * due to the zio dependencies configured in zil_lwb_set_zio_dependency(), 1572 * which takes deferred flushes into account. The lwb will be "done" once 1573 * zil_lwb_flush_vdevs_done() is called, which occurs in the zio completion 1574 * callback for the lwb's root zio. 1575 */ 1576 static void 1577 zil_lwb_write_done(zio_t *zio) 1578 { 1579 lwb_t *lwb = zio->io_private; 1580 spa_t *spa = zio->io_spa; 1581 zilog_t *zilog = lwb->lwb_zilog; 1582 avl_tree_t *t = &lwb->lwb_vdev_tree; 1583 void *cookie = NULL; 1584 zil_vdev_node_t *zv; 1585 lwb_t *nlwb; 1586 1587 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); 1588 1589 abd_free(zio->io_abd); 1590 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1591 lwb->lwb_buf = NULL; 1592 1593 mutex_enter(&zilog->zl_lock); 1594 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); 1595 lwb->lwb_state = LWB_STATE_WRITE_DONE; 1596 lwb->lwb_child_zio = NULL; 1597 lwb->lwb_write_zio = NULL; 1598 1599 /* 1600 * If nlwb is not yet issued, zil_lwb_set_zio_dependency() is not 1601 * called for it yet, and when it will be, it won't be able to make 1602 * its write ZIO a parent this ZIO. In such case we can not defer 1603 * our flushes or below may be a race between the done callbacks. 1604 */ 1605 nlwb = list_next(&zilog->zl_lwb_list, lwb); 1606 if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED) 1607 nlwb = NULL; 1608 mutex_exit(&zilog->zl_lock); 1609 1610 if (avl_numnodes(t) == 0) 1611 return; 1612 1613 /* 1614 * If there was an IO error, we're not going to call zio_flush() 1615 * on these vdevs, so we simply empty the tree and free the 1616 * nodes. We avoid calling zio_flush() since there isn't any 1617 * good reason for doing so, after the lwb block failed to be 1618 * written out. 1619 * 1620 * Additionally, we don't perform any further error handling at 1621 * this point (e.g. setting "zcw_zio_error" appropriately), as 1622 * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus, 1623 * we expect any error seen here, to have been propagated to 1624 * that function). 1625 */ 1626 if (zio->io_error != 0) { 1627 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) 1628 kmem_free(zv, sizeof (*zv)); 1629 return; 1630 } 1631 1632 /* 1633 * If this lwb does not have any threads waiting for it to complete, we 1634 * want to defer issuing the flush command to the vdevs written to by 1635 * "this" lwb, and instead rely on the "next" lwb to handle the flush 1636 * command for those vdevs. Thus, we merge the vdev tree of "this" lwb 1637 * with the vdev tree of the "next" lwb in the list, and assume the 1638 * "next" lwb will handle flushing the vdevs (or deferring the flush(s) 1639 * again). 1640 * 1641 * This is a useful performance optimization, especially for workloads 1642 * with lots of async write activity and few sync write and/or fsync 1643 * activity, as it has the potential to coalesce multiple flush 1644 * commands to a vdev into one. 1645 */ 1646 if (list_is_empty(&lwb->lwb_waiters) && nlwb != NULL) { 1647 zil_lwb_flush_defer(lwb, nlwb); 1648 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 1649 return; 1650 } 1651 1652 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 1653 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 1654 if (vd != NULL) { 1655 /* 1656 * The "ZIO_FLAG_DONT_PROPAGATE" is currently 1657 * always used within "zio_flush". This means, 1658 * any errors when flushing the vdev(s), will 1659 * (unfortunately) not be handled correctly, 1660 * since these "zio_flush" errors will not be 1661 * propagated up to "zil_lwb_flush_vdevs_done". 1662 */ 1663 zio_flush(lwb->lwb_root_zio, vd); 1664 } 1665 kmem_free(zv, sizeof (*zv)); 1666 } 1667 } 1668 1669 /* 1670 * Build the zio dependency chain, which is used to preserve the ordering of 1671 * lwb completions that is required by the semantics of the ZIL. Each new lwb 1672 * zio becomes a parent of the previous lwb zio, such that the new lwb's zio 1673 * cannot complete until the previous lwb's zio completes. 1674 * 1675 * This is required by the semantics of zil_commit(): the commit waiters 1676 * attached to the lwbs will be woken in the lwb zio's completion callback, 1677 * so this zio dependency graph ensures the waiters are woken in the correct 1678 * order (the same order the lwbs were created). 1679 */ 1680 static void 1681 zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb) 1682 { 1683 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 1684 1685 lwb_t *prev_lwb = list_prev(&zilog->zl_lwb_list, lwb); 1686 if (prev_lwb == NULL || 1687 prev_lwb->lwb_state == LWB_STATE_FLUSH_DONE) 1688 return; 1689 1690 /* 1691 * If the previous lwb's write hasn't already completed, we also want 1692 * to order the completion of the lwb write zios (above, we only order 1693 * the completion of the lwb root zios). This is required because of 1694 * how we can defer the flush commands for each lwb. 1695 * 1696 * When the flush commands are deferred, the previous lwb will rely on 1697 * this lwb to flush the vdevs written to by that previous lwb. Thus, 1698 * we need to ensure this lwb doesn't issue the flush until after the 1699 * previous lwb's write completes. We ensure this ordering by setting 1700 * the zio parent/child relationship here. 1701 * 1702 * Without this relationship on the lwb's write zio, it's possible for 1703 * this lwb's write to complete prior to the previous lwb's write 1704 * completing; and thus, the vdevs for the previous lwb would be 1705 * flushed prior to that lwb's data being written to those vdevs (the 1706 * vdevs are flushed in the lwb write zio's completion handler, 1707 * zil_lwb_write_done()). 1708 */ 1709 if (prev_lwb->lwb_state == LWB_STATE_ISSUED) { 1710 ASSERT3P(prev_lwb->lwb_write_zio, !=, NULL); 1711 zio_add_child(lwb->lwb_write_zio, prev_lwb->lwb_write_zio); 1712 } else { 1713 ASSERT3S(prev_lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); 1714 } 1715 1716 ASSERT3P(prev_lwb->lwb_root_zio, !=, NULL); 1717 zio_add_child(lwb->lwb_root_zio, prev_lwb->lwb_root_zio); 1718 } 1719 1720 1721 /* 1722 * This function's purpose is to "open" an lwb such that it is ready to 1723 * accept new itxs being committed to it. This function is idempotent; if 1724 * the passed in lwb has already been opened, it is essentially a no-op. 1725 */ 1726 static void 1727 zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) 1728 { 1729 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1730 1731 if (lwb->lwb_state != LWB_STATE_NEW) { 1732 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1733 return; 1734 } 1735 1736 mutex_enter(&zilog->zl_lock); 1737 lwb->lwb_state = LWB_STATE_OPENED; 1738 zilog->zl_last_lwb_opened = lwb; 1739 mutex_exit(&zilog->zl_lock); 1740 } 1741 1742 /* 1743 * Maximum block size used by the ZIL. This is picked up when the ZIL is 1744 * initialized. Otherwise this should not be used directly; see 1745 * zl_max_block_size instead. 1746 */ 1747 static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; 1748 1749 /* 1750 * Plan splitting of the provided burst size between several blocks. 1751 */ 1752 static uint_t 1753 zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize) 1754 { 1755 uint_t md = zilog->zl_max_block_size - sizeof (zil_chain_t); 1756 1757 if (size <= md) { 1758 /* 1759 * Small bursts are written as-is in one block. 1760 */ 1761 *minsize = size; 1762 return (size); 1763 } else if (size > 8 * md) { 1764 /* 1765 * Big bursts use maximum blocks. The first block size 1766 * is hard to predict, but it does not really matter. 1767 */ 1768 *minsize = 0; 1769 return (md); 1770 } 1771 1772 /* 1773 * Medium bursts try to divide evenly to better utilize several SLOG 1774 * VDEVs. The first block size we predict assuming the worst case of 1775 * maxing out others. Fall back to using maximum blocks if due to 1776 * large records or wasted space we can not predict anything better. 1777 */ 1778 uint_t s = size; 1779 uint_t n = DIV_ROUND_UP(s, md - sizeof (lr_write_t)); 1780 uint_t chunk = DIV_ROUND_UP(s, n); 1781 uint_t waste = zil_max_waste_space(zilog); 1782 waste = MAX(waste, zilog->zl_cur_max); 1783 if (chunk <= md - waste) { 1784 *minsize = MAX(s - (md - waste) * (n - 1), waste); 1785 return (chunk); 1786 } else { 1787 *minsize = 0; 1788 return (md); 1789 } 1790 } 1791 1792 /* 1793 * Try to predict next block size based on previous history. Make prediction 1794 * sufficient for 7 of 8 previous bursts. Don't try to save if the saving is 1795 * less then 50%, extra writes may cost more, but we don't want single spike 1796 * to badly affect our predictions. 1797 */ 1798 static uint_t 1799 zil_lwb_predict(zilog_t *zilog) 1800 { 1801 uint_t m, o; 1802 1803 /* If we are in the middle of a burst, take it into account also. */ 1804 if (zilog->zl_cur_size > 0) { 1805 o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m); 1806 } else { 1807 o = UINT_MAX; 1808 m = 0; 1809 } 1810 1811 /* Find minimum optimal size. We don't need to go below that. */ 1812 for (int i = 0; i < ZIL_BURSTS; i++) 1813 o = MIN(o, zilog->zl_prev_opt[i]); 1814 1815 /* Find two biggest minimal first block sizes above the optimal. */ 1816 uint_t m1 = MAX(m, o), m2 = o; 1817 for (int i = 0; i < ZIL_BURSTS; i++) { 1818 m = zilog->zl_prev_min[i]; 1819 if (m >= m1) { 1820 m2 = m1; 1821 m1 = m; 1822 } else if (m > m2) { 1823 m2 = m; 1824 } 1825 } 1826 1827 /* 1828 * If second minimum size gives 50% saving -- use it. It may cost us 1829 * one additional write later, but the space saving is just too big. 1830 */ 1831 return ((m1 < m2 * 2) ? m1 : m2); 1832 } 1833 1834 /* 1835 * Close the log block for being issued and allocate the next one. 1836 * Has to be called under zl_issuer_lock to chain more lwbs. 1837 */ 1838 static lwb_t * 1839 zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state) 1840 { 1841 uint64_t blksz, plan, plan2; 1842 1843 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 1844 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1845 lwb->lwb_state = LWB_STATE_CLOSED; 1846 1847 /* 1848 * If there was an allocation failure then returned NULL will trigger 1849 * zil_commit_writer_stall() at the caller. This is inherently racy, 1850 * since allocation may not have happened yet. 1851 */ 1852 if (lwb->lwb_error != 0) 1853 return (NULL); 1854 1855 /* 1856 * Log blocks are pre-allocated. Here we select the size of the next 1857 * block, based on what's left of this burst and the previous history. 1858 * While we try to only write used part of the block, we can't just 1859 * always allocate the maximum block size because we can exhaust all 1860 * available pool log space, so we try to be reasonable. 1861 */ 1862 if (zilog->zl_cur_left > 0) { 1863 /* 1864 * We are in the middle of a burst and know how much is left. 1865 * But if workload is multi-threaded there may be more soon. 1866 * Try to predict what can it be and plan for the worst case. 1867 */ 1868 uint_t m; 1869 plan = zil_lwb_plan(zilog, zilog->zl_cur_left, &m); 1870 if (zilog->zl_parallel) { 1871 plan2 = zil_lwb_plan(zilog, zilog->zl_cur_left + 1872 zil_lwb_predict(zilog), &m); 1873 if (plan < plan2) 1874 plan = plan2; 1875 } 1876 } else { 1877 /* 1878 * The previous burst is done and we can only predict what 1879 * will come next. 1880 */ 1881 plan = zil_lwb_predict(zilog); 1882 } 1883 blksz = plan + sizeof (zil_chain_t); 1884 blksz = P2ROUNDUP_TYPED(blksz, ZIL_MIN_BLKSZ, uint64_t); 1885 blksz = MIN(blksz, zilog->zl_max_block_size); 1886 DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, blksz, 1887 uint64_t, plan); 1888 1889 return (zil_alloc_lwb(zilog, blksz, NULL, 0, 0, state)); 1890 } 1891 1892 /* 1893 * Finalize previously closed block and issue the write zio. 1894 */ 1895 static void 1896 zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) 1897 { 1898 spa_t *spa = zilog->zl_spa; 1899 zil_chain_t *zilc; 1900 boolean_t slog; 1901 zbookmark_phys_t zb; 1902 zio_priority_t prio; 1903 int error; 1904 1905 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED); 1906 1907 /* Actually fill the lwb with the data. */ 1908 for (itx_t *itx = list_head(&lwb->lwb_itxs); itx; 1909 itx = list_next(&lwb->lwb_itxs, itx)) 1910 zil_lwb_commit(zilog, lwb, itx); 1911 lwb->lwb_nused = lwb->lwb_nfilled; 1912 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax); 1913 1914 lwb->lwb_root_zio = zio_root(spa, zil_lwb_flush_vdevs_done, lwb, 1915 ZIO_FLAG_CANFAIL); 1916 1917 /* 1918 * The lwb is now ready to be issued, but it can be only if it already 1919 * got its block pointer allocated or the allocation has failed. 1920 * Otherwise leave it as-is, relying on some other thread to issue it 1921 * after allocating its block pointer via calling zil_lwb_write_issue() 1922 * for the previous lwb(s) in the chain. 1923 */ 1924 mutex_enter(&zilog->zl_lock); 1925 lwb->lwb_state = LWB_STATE_READY; 1926 if (BP_IS_HOLE(&lwb->lwb_blk) && lwb->lwb_error == 0) { 1927 mutex_exit(&zilog->zl_lock); 1928 return; 1929 } 1930 mutex_exit(&zilog->zl_lock); 1931 1932 next_lwb: 1933 if (lwb->lwb_slim) 1934 zilc = (zil_chain_t *)lwb->lwb_buf; 1935 else 1936 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_nmax); 1937 int wsz = lwb->lwb_sz; 1938 if (lwb->lwb_error == 0) { 1939 abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, lwb->lwb_sz); 1940 if (!lwb->lwb_slog || zilog->zl_cur_size <= zil_slog_bulk) 1941 prio = ZIO_PRIORITY_SYNC_WRITE; 1942 else 1943 prio = ZIO_PRIORITY_ASYNC_WRITE; 1944 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1945 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 1946 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 1947 lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, spa, 0, 1948 &lwb->lwb_blk, lwb_abd, lwb->lwb_sz, zil_lwb_write_done, 1949 lwb, prio, ZIO_FLAG_CANFAIL, &zb); 1950 zil_lwb_add_block(lwb, &lwb->lwb_blk); 1951 1952 if (lwb->lwb_slim) { 1953 /* For Slim ZIL only write what is used. */ 1954 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, 1955 int); 1956 ASSERT3S(wsz, <=, lwb->lwb_sz); 1957 zio_shrink(lwb->lwb_write_zio, wsz); 1958 wsz = lwb->lwb_write_zio->io_size; 1959 } 1960 memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused); 1961 zilc->zc_pad = 0; 1962 zilc->zc_nused = lwb->lwb_nused; 1963 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1964 } else { 1965 /* 1966 * We can't write the lwb if there was an allocation failure, 1967 * so create a null zio instead just to maintain dependencies. 1968 */ 1969 lwb->lwb_write_zio = zio_null(lwb->lwb_root_zio, spa, NULL, 1970 zil_lwb_write_done, lwb, ZIO_FLAG_CANFAIL); 1971 lwb->lwb_write_zio->io_error = lwb->lwb_error; 1972 } 1973 if (lwb->lwb_child_zio) 1974 zio_add_child(lwb->lwb_write_zio, lwb->lwb_child_zio); 1975 1976 /* 1977 * Open transaction to allocate the next block pointer. 1978 */ 1979 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 1980 VERIFY0(dmu_tx_assign(tx, 1981 DMU_TX_WAIT | DMU_TX_NOTHROTTLE | DMU_TX_SUSPEND)); 1982 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1983 uint64_t txg = dmu_tx_get_txg(tx); 1984 1985 /* 1986 * Allocate next the block pointer unless we are already in error. 1987 */ 1988 lwb_t *nlwb = list_next(&zilog->zl_lwb_list, lwb); 1989 blkptr_t *bp = &zilc->zc_next_blk; 1990 BP_ZERO(bp); 1991 error = lwb->lwb_error; 1992 if (error == 0) { 1993 error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz, 1994 &slog); 1995 } 1996 if (error == 0) { 1997 ASSERT3U(BP_GET_LOGICAL_BIRTH(bp), ==, txg); 1998 BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 : 1999 ZIO_CHECKSUM_ZILOG); 2000 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 2001 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 2002 } 2003 2004 /* 2005 * Reduce TXG open time by incrementing inflight counter and committing 2006 * the transaciton. zil_sync() will wait for it to return to zero. 2007 */ 2008 mutex_enter(&zilog->zl_lwb_io_lock); 2009 lwb->lwb_issued_txg = txg; 2010 zilog->zl_lwb_inflight[txg & TXG_MASK]++; 2011 zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg); 2012 mutex_exit(&zilog->zl_lwb_io_lock); 2013 dmu_tx_commit(tx); 2014 2015 spa_config_enter(spa, SCL_STATE, lwb, RW_READER); 2016 2017 /* 2018 * We've completed all potentially blocking operations. Update the 2019 * nlwb and allow it proceed without possible lock order reversals. 2020 */ 2021 mutex_enter(&zilog->zl_lock); 2022 zil_lwb_set_zio_dependency(zilog, lwb); 2023 lwb->lwb_state = LWB_STATE_ISSUED; 2024 2025 if (nlwb) { 2026 nlwb->lwb_blk = *bp; 2027 nlwb->lwb_error = error; 2028 nlwb->lwb_slog = slog; 2029 nlwb->lwb_alloc_txg = txg; 2030 if (nlwb->lwb_state != LWB_STATE_READY) 2031 nlwb = NULL; 2032 } 2033 mutex_exit(&zilog->zl_lock); 2034 2035 if (lwb->lwb_slog) { 2036 ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count); 2037 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes, 2038 lwb->lwb_nused); 2039 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_write, 2040 wsz); 2041 ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_alloc, 2042 BP_GET_LSIZE(&lwb->lwb_blk)); 2043 } else { 2044 ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count); 2045 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes, 2046 lwb->lwb_nused); 2047 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_write, 2048 wsz); 2049 ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_alloc, 2050 BP_GET_LSIZE(&lwb->lwb_blk)); 2051 } 2052 lwb->lwb_issued_timestamp = gethrtime(); 2053 if (lwb->lwb_child_zio) 2054 zio_nowait(lwb->lwb_child_zio); 2055 zio_nowait(lwb->lwb_write_zio); 2056 zio_nowait(lwb->lwb_root_zio); 2057 2058 /* 2059 * If nlwb was ready when we gave it the block pointer, 2060 * it is on us to issue it and possibly following ones. 2061 */ 2062 lwb = nlwb; 2063 if (lwb) 2064 goto next_lwb; 2065 } 2066 2067 /* 2068 * Maximum amount of data that can be put into single log block. 2069 */ 2070 uint64_t 2071 zil_max_log_data(zilog_t *zilog, size_t hdrsize) 2072 { 2073 return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize); 2074 } 2075 2076 /* 2077 * Maximum amount of log space we agree to waste to reduce number of 2078 * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~6%). 2079 */ 2080 static inline uint64_t 2081 zil_max_waste_space(zilog_t *zilog) 2082 { 2083 return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 16); 2084 } 2085 2086 /* 2087 * Maximum amount of write data for WR_COPIED. For correctness, consumers 2088 * must fall back to WR_NEED_COPY if we can't fit the entire record into one 2089 * maximum sized log block, because each WR_COPIED record must fit in a 2090 * single log block. Below that it is a tradeoff of additional memory copy 2091 * and possibly worse log space efficiency vs additional range lock/unlock. 2092 */ 2093 static uint_t zil_maxcopied = 7680; 2094 2095 uint64_t 2096 zil_max_copied_data(zilog_t *zilog) 2097 { 2098 uint64_t max_data = zil_max_log_data(zilog, sizeof (lr_write_t)); 2099 return (MIN(max_data, zil_maxcopied)); 2100 } 2101 2102 static uint64_t 2103 zil_itx_record_size(itx_t *itx) 2104 { 2105 lr_t *lr = &itx->itx_lr; 2106 2107 if (lr->lrc_txtype == TX_COMMIT) 2108 return (0); 2109 ASSERT3U(lr->lrc_reclen, >=, sizeof (lr_t)); 2110 return (lr->lrc_reclen); 2111 } 2112 2113 static uint64_t 2114 zil_itx_data_size(itx_t *itx) 2115 { 2116 lr_t *lr = &itx->itx_lr; 2117 lr_write_t *lrw = (lr_write_t *)lr; 2118 2119 if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { 2120 ASSERT3U(lr->lrc_reclen, ==, sizeof (lr_write_t)); 2121 return (P2ROUNDUP_TYPED(lrw->lr_length, sizeof (uint64_t), 2122 uint64_t)); 2123 } 2124 return (0); 2125 } 2126 2127 static uint64_t 2128 zil_itx_full_size(itx_t *itx) 2129 { 2130 lr_t *lr = &itx->itx_lr; 2131 2132 if (lr->lrc_txtype == TX_COMMIT) 2133 return (0); 2134 ASSERT3U(lr->lrc_reclen, >=, sizeof (lr_t)); 2135 return (lr->lrc_reclen + zil_itx_data_size(itx)); 2136 } 2137 2138 /* 2139 * Estimate space needed in the lwb for the itx. Allocate more lwbs or 2140 * split the itx as needed, but don't touch the actual transaction data. 2141 * Has to be called under zl_issuer_lock to call zil_lwb_write_close() 2142 * to chain more lwbs. 2143 */ 2144 static lwb_t * 2145 zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs) 2146 { 2147 itx_t *citx; 2148 lr_t *lr, *clr; 2149 lr_write_t *lrw; 2150 uint64_t dlen, dnow, lwb_sp, reclen, max_log_data; 2151 2152 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2153 ASSERT3P(lwb, !=, NULL); 2154 ASSERT3P(lwb->lwb_buf, !=, NULL); 2155 2156 zil_lwb_write_open(zilog, lwb); 2157 2158 lr = &itx->itx_lr; 2159 lrw = (lr_write_t *)lr; 2160 2161 /* 2162 * A commit itx doesn't represent any on-disk state; instead 2163 * it's simply used as a place holder on the commit list, and 2164 * provides a mechanism for attaching a "commit waiter" onto the 2165 * correct lwb (such that the waiter can be signalled upon 2166 * completion of that lwb). Thus, we don't process this itx's 2167 * log record if it's a commit itx (these itx's don't have log 2168 * records), and instead link the itx's waiter onto the lwb's 2169 * list of waiters. 2170 * 2171 * For more details, see the comment above zil_commit(). 2172 */ 2173 if (lr->lrc_txtype == TX_COMMIT) { 2174 zil_commit_waiter_link_lwb(itx->itx_private, lwb); 2175 list_insert_tail(&lwb->lwb_itxs, itx); 2176 return (lwb); 2177 } 2178 2179 reclen = lr->lrc_reclen; 2180 ASSERT3U(reclen, >=, sizeof (lr_t)); 2181 ASSERT3U(reclen, <=, zil_max_log_data(zilog, 0)); 2182 dlen = zil_itx_data_size(itx); 2183 2184 cont: 2185 /* 2186 * If this record won't fit in the current log block, start a new one. 2187 * For WR_NEED_COPY optimize layout for minimal number of chunks. 2188 */ 2189 lwb_sp = lwb->lwb_nmax - lwb->lwb_nused; 2190 max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t)); 2191 if (reclen > lwb_sp || (reclen + dlen > lwb_sp && 2192 lwb_sp < zil_max_waste_space(zilog) && 2193 (dlen % max_log_data == 0 || 2194 lwb_sp < reclen + dlen % max_log_data))) { 2195 list_insert_tail(ilwbs, lwb); 2196 lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED); 2197 if (lwb == NULL) 2198 return (NULL); 2199 lwb_sp = lwb->lwb_nmax - lwb->lwb_nused; 2200 } 2201 2202 /* 2203 * There must be enough space in the log block to hold reclen. 2204 * For WR_COPIED, we need to fit the whole record in one block, 2205 * and reclen is the write record header size + the data size. 2206 * For WR_NEED_COPY, we can create multiple records, splitting 2207 * the data into multiple blocks, so we only need to fit one 2208 * word of data per block; in this case reclen is just the header 2209 * size (no data). 2210 */ 2211 ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); 2212 2213 dnow = MIN(dlen, lwb_sp - reclen); 2214 if (dlen > dnow) { 2215 ASSERT3U(lr->lrc_txtype, ==, TX_WRITE); 2216 ASSERT3U(itx->itx_wr_state, ==, WR_NEED_COPY); 2217 citx = zil_itx_clone(itx); 2218 clr = &citx->itx_lr; 2219 lr_write_t *clrw = (lr_write_t *)clr; 2220 clrw->lr_length = dnow; 2221 lrw->lr_offset += dnow; 2222 lrw->lr_length -= dnow; 2223 zilog->zl_cur_left -= dnow; 2224 } else { 2225 citx = itx; 2226 clr = lr; 2227 } 2228 2229 /* 2230 * We're actually making an entry, so update lrc_seq to be the 2231 * log record sequence number. Note that this is generally not 2232 * equal to the itx sequence number because not all transactions 2233 * are synchronous, and sometimes spa_sync() gets there first. 2234 */ 2235 clr->lrc_seq = ++zilog->zl_lr_seq; 2236 2237 lwb->lwb_nused += reclen + dnow; 2238 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax); 2239 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 2240 2241 zil_lwb_add_txg(lwb, lr->lrc_txg); 2242 list_insert_tail(&lwb->lwb_itxs, citx); 2243 2244 dlen -= dnow; 2245 if (dlen > 0) 2246 goto cont; 2247 2248 if (lr->lrc_txtype == TX_WRITE && 2249 lr->lrc_txg > spa_freeze_txg(zilog->zl_spa)) 2250 txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg); 2251 2252 return (lwb); 2253 } 2254 2255 /* 2256 * Fill the actual transaction data into the lwb, following zil_lwb_assign(). 2257 * Does not require locking. 2258 */ 2259 static void 2260 zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx) 2261 { 2262 lr_t *lr, *lrb; 2263 lr_write_t *lrw, *lrwb; 2264 char *lr_buf; 2265 uint64_t dlen, reclen; 2266 2267 lr = &itx->itx_lr; 2268 lrw = (lr_write_t *)lr; 2269 2270 if (lr->lrc_txtype == TX_COMMIT) 2271 return; 2272 2273 reclen = lr->lrc_reclen; 2274 dlen = zil_itx_data_size(itx); 2275 ASSERT3U(reclen + dlen, <=, lwb->lwb_nused - lwb->lwb_nfilled); 2276 2277 lr_buf = lwb->lwb_buf + lwb->lwb_nfilled; 2278 memcpy(lr_buf, lr, reclen); 2279 lrb = (lr_t *)lr_buf; /* Like lr, but inside lwb. */ 2280 lrwb = (lr_write_t *)lrb; /* Like lrw, but inside lwb. */ 2281 2282 ZIL_STAT_BUMP(zilog, zil_itx_count); 2283 2284 /* 2285 * If it's a write, fetch the data or get its blkptr as appropriate. 2286 */ 2287 if (lr->lrc_txtype == TX_WRITE) { 2288 if (itx->itx_wr_state == WR_COPIED) { 2289 ZIL_STAT_BUMP(zilog, zil_itx_copied_count); 2290 ZIL_STAT_INCR(zilog, zil_itx_copied_bytes, 2291 lrw->lr_length); 2292 } else { 2293 char *dbuf; 2294 int error; 2295 2296 if (itx->itx_wr_state == WR_NEED_COPY) { 2297 dbuf = lr_buf + reclen; 2298 lrb->lrc_reclen += dlen; 2299 ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count); 2300 ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes, 2301 dlen); 2302 } else { 2303 ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT); 2304 dbuf = NULL; 2305 ZIL_STAT_BUMP(zilog, zil_itx_indirect_count); 2306 ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes, 2307 lrw->lr_length); 2308 if (lwb->lwb_child_zio == NULL) { 2309 lwb->lwb_child_zio = zio_null(NULL, 2310 zilog->zl_spa, NULL, NULL, NULL, 2311 ZIO_FLAG_CANFAIL); 2312 } 2313 } 2314 2315 /* 2316 * The "lwb_child_zio" we pass in will become a child of 2317 * "lwb_write_zio", when one is created, so one will be 2318 * a parent of any zio's created by the "zl_get_data". 2319 * This way "lwb_write_zio" will first wait for children 2320 * block pointers before own writing, and then for their 2321 * writing completion before the vdev cache flushing. 2322 */ 2323 error = zilog->zl_get_data(itx->itx_private, 2324 itx->itx_gen, lrwb, dbuf, lwb, 2325 lwb->lwb_child_zio); 2326 if (dbuf != NULL && error == 0) { 2327 /* Zero any padding bytes in the last block. */ 2328 memset((char *)dbuf + lrwb->lr_length, 0, 2329 dlen - lrwb->lr_length); 2330 } 2331 2332 /* 2333 * Typically, the only return values we should see from 2334 * ->zl_get_data() are 0, EIO, ENOENT, EEXIST or 2335 * EALREADY. However, it is also possible to see other 2336 * error values such as ENOSPC or EINVAL from 2337 * dmu_read() -> dnode_hold() -> dnode_hold_impl() or 2338 * ENXIO as well as a multitude of others from the 2339 * block layer through dmu_buf_hold() -> dbuf_read() 2340 * -> zio_wait(), as well as through dmu_read() -> 2341 * dnode_hold() -> dnode_hold_impl() -> dbuf_read() -> 2342 * zio_wait(). When these errors happen, we can assume 2343 * that neither an immediate write nor an indirect 2344 * write occurred, so we need to fall back to 2345 * txg_wait_synced(). This is unusual, so we print to 2346 * dmesg whenever one of these errors occurs. 2347 */ 2348 switch (error) { 2349 case 0: 2350 break; 2351 default: 2352 cmn_err(CE_WARN, "zil_lwb_commit() received " 2353 "unexpected error %d from ->zl_get_data()" 2354 ". Falling back to txg_wait_synced().", 2355 error); 2356 zfs_fallthrough; 2357 case EIO: 2358 txg_wait_synced(zilog->zl_dmu_pool, 2359 lr->lrc_txg); 2360 zfs_fallthrough; 2361 case ENOENT: 2362 zfs_fallthrough; 2363 case EEXIST: 2364 zfs_fallthrough; 2365 case EALREADY: 2366 return; 2367 } 2368 } 2369 } 2370 2371 lwb->lwb_nfilled += reclen + dlen; 2372 ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused); 2373 ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t))); 2374 } 2375 2376 itx_t * 2377 zil_itx_create(uint64_t txtype, size_t olrsize) 2378 { 2379 size_t itxsize, lrsize; 2380 itx_t *itx; 2381 2382 ASSERT3U(olrsize, >=, sizeof (lr_t)); 2383 lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t); 2384 ASSERT3U(lrsize, >=, olrsize); 2385 itxsize = offsetof(itx_t, itx_lr) + lrsize; 2386 2387 itx = zio_data_buf_alloc(itxsize); 2388 itx->itx_lr.lrc_txtype = txtype; 2389 itx->itx_lr.lrc_reclen = lrsize; 2390 itx->itx_lr.lrc_seq = 0; /* defensive */ 2391 memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize); 2392 itx->itx_sync = B_TRUE; /* default is synchronous */ 2393 itx->itx_callback = NULL; 2394 itx->itx_callback_data = NULL; 2395 itx->itx_size = itxsize; 2396 2397 return (itx); 2398 } 2399 2400 static itx_t * 2401 zil_itx_clone(itx_t *oitx) 2402 { 2403 ASSERT3U(oitx->itx_size, >=, sizeof (itx_t)); 2404 ASSERT3U(oitx->itx_size, ==, 2405 offsetof(itx_t, itx_lr) + oitx->itx_lr.lrc_reclen); 2406 2407 itx_t *itx = zio_data_buf_alloc(oitx->itx_size); 2408 memcpy(itx, oitx, oitx->itx_size); 2409 itx->itx_callback = NULL; 2410 itx->itx_callback_data = NULL; 2411 return (itx); 2412 } 2413 2414 void 2415 zil_itx_destroy(itx_t *itx) 2416 { 2417 ASSERT3U(itx->itx_size, >=, sizeof (itx_t)); 2418 ASSERT3U(itx->itx_lr.lrc_reclen, ==, 2419 itx->itx_size - offsetof(itx_t, itx_lr)); 2420 IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL); 2421 IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 2422 2423 if (itx->itx_callback != NULL) 2424 itx->itx_callback(itx->itx_callback_data); 2425 2426 zio_data_buf_free(itx, itx->itx_size); 2427 } 2428 2429 /* 2430 * Free up the sync and async itxs. The itxs_t has already been detached 2431 * so no locks are needed. 2432 */ 2433 static void 2434 zil_itxg_clean(void *arg) 2435 { 2436 itx_t *itx; 2437 list_t *list; 2438 avl_tree_t *t; 2439 void *cookie; 2440 itxs_t *itxs = arg; 2441 itx_async_node_t *ian; 2442 2443 list = &itxs->i_sync_list; 2444 while ((itx = list_remove_head(list)) != NULL) { 2445 /* 2446 * In the general case, commit itxs will not be found 2447 * here, as they'll be committed to an lwb via 2448 * zil_lwb_assign(), and free'd in that function. Having 2449 * said that, it is still possible for commit itxs to be 2450 * found here, due to the following race: 2451 * 2452 * - a thread calls zil_commit() which assigns the 2453 * commit itx to a per-txg i_sync_list 2454 * - zil_itxg_clean() is called (e.g. via spa_sync()) 2455 * while the waiter is still on the i_sync_list 2456 * 2457 * There's nothing to prevent syncing the txg while the 2458 * waiter is on the i_sync_list. This normally doesn't 2459 * happen because spa_sync() is slower than zil_commit(), 2460 * but if zil_commit() calls txg_wait_synced() (e.g. 2461 * because zil_create() or zil_commit_writer_stall() is 2462 * called) we will hit this case. 2463 */ 2464 if (itx->itx_lr.lrc_txtype == TX_COMMIT) 2465 zil_commit_waiter_skip(itx->itx_private); 2466 2467 zil_itx_destroy(itx); 2468 } 2469 2470 cookie = NULL; 2471 t = &itxs->i_async_tree; 2472 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 2473 list = &ian->ia_list; 2474 while ((itx = list_remove_head(list)) != NULL) { 2475 /* commit itxs should never be on the async lists. */ 2476 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 2477 zil_itx_destroy(itx); 2478 } 2479 list_destroy(list); 2480 kmem_free(ian, sizeof (itx_async_node_t)); 2481 } 2482 avl_destroy(t); 2483 2484 kmem_free(itxs, sizeof (itxs_t)); 2485 } 2486 2487 static int 2488 zil_aitx_compare(const void *x1, const void *x2) 2489 { 2490 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 2491 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 2492 2493 return (TREE_CMP(o1, o2)); 2494 } 2495 2496 /* 2497 * Remove all async itx with the given oid. 2498 */ 2499 void 2500 zil_remove_async(zilog_t *zilog, uint64_t oid) 2501 { 2502 uint64_t otxg, txg; 2503 itx_async_node_t *ian, ian_search; 2504 avl_tree_t *t; 2505 avl_index_t where; 2506 list_t clean_list; 2507 itx_t *itx; 2508 2509 ASSERT(oid != 0); 2510 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 2511 2512 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2513 otxg = ZILTEST_TXG; 2514 else 2515 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2516 2517 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2518 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2519 2520 mutex_enter(&itxg->itxg_lock); 2521 if (itxg->itxg_txg != txg) { 2522 mutex_exit(&itxg->itxg_lock); 2523 continue; 2524 } 2525 2526 /* 2527 * Locate the object node and append its list. 2528 */ 2529 t = &itxg->itxg_itxs->i_async_tree; 2530 ian_search.ia_foid = oid; 2531 ian = avl_find(t, &ian_search, &where); 2532 if (ian != NULL) 2533 list_move_tail(&clean_list, &ian->ia_list); 2534 mutex_exit(&itxg->itxg_lock); 2535 } 2536 while ((itx = list_remove_head(&clean_list)) != NULL) { 2537 /* commit itxs should never be on the async lists. */ 2538 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 2539 zil_itx_destroy(itx); 2540 } 2541 list_destroy(&clean_list); 2542 } 2543 2544 void 2545 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 2546 { 2547 uint64_t txg; 2548 itxg_t *itxg; 2549 itxs_t *itxs, *clean = NULL; 2550 2551 /* 2552 * Ensure the data of a renamed file is committed before the rename. 2553 */ 2554 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 2555 zil_async_to_sync(zilog, itx->itx_oid); 2556 2557 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 2558 txg = ZILTEST_TXG; 2559 else 2560 txg = dmu_tx_get_txg(tx); 2561 2562 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2563 mutex_enter(&itxg->itxg_lock); 2564 itxs = itxg->itxg_itxs; 2565 if (itxg->itxg_txg != txg) { 2566 if (itxs != NULL) { 2567 /* 2568 * The zil_clean callback hasn't got around to cleaning 2569 * this itxg. Save the itxs for release below. 2570 * This should be rare. 2571 */ 2572 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " 2573 "txg %llu", (u_longlong_t)itxg->itxg_txg); 2574 clean = itxg->itxg_itxs; 2575 } 2576 itxg->itxg_txg = txg; 2577 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), 2578 KM_SLEEP); 2579 2580 list_create(&itxs->i_sync_list, sizeof (itx_t), 2581 offsetof(itx_t, itx_node)); 2582 avl_create(&itxs->i_async_tree, zil_aitx_compare, 2583 sizeof (itx_async_node_t), 2584 offsetof(itx_async_node_t, ia_node)); 2585 } 2586 if (itx->itx_sync) { 2587 list_insert_tail(&itxs->i_sync_list, itx); 2588 } else { 2589 avl_tree_t *t = &itxs->i_async_tree; 2590 uint64_t foid = 2591 LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid); 2592 itx_async_node_t *ian; 2593 avl_index_t where; 2594 2595 ian = avl_find(t, &foid, &where); 2596 if (ian == NULL) { 2597 ian = kmem_alloc(sizeof (itx_async_node_t), 2598 KM_SLEEP); 2599 list_create(&ian->ia_list, sizeof (itx_t), 2600 offsetof(itx_t, itx_node)); 2601 ian->ia_foid = foid; 2602 avl_insert(t, ian, where); 2603 } 2604 list_insert_tail(&ian->ia_list, itx); 2605 } 2606 2607 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 2608 2609 /* 2610 * We don't want to dirty the ZIL using ZILTEST_TXG, because 2611 * zil_clean() will never be called using ZILTEST_TXG. Thus, we 2612 * need to be careful to always dirty the ZIL using the "real" 2613 * TXG (not itxg_txg) even when the SPA is frozen. 2614 */ 2615 zilog_dirty(zilog, dmu_tx_get_txg(tx)); 2616 mutex_exit(&itxg->itxg_lock); 2617 2618 /* Release the old itxs now we've dropped the lock */ 2619 if (clean != NULL) 2620 zil_itxg_clean(clean); 2621 } 2622 2623 /* 2624 * If there are any in-memory intent log transactions which have now been 2625 * synced then start up a taskq to free them. We should only do this after we 2626 * have written out the uberblocks (i.e. txg has been committed) so that 2627 * don't inadvertently clean out in-memory log records that would be required 2628 * by zil_commit(). 2629 */ 2630 void 2631 zil_clean(zilog_t *zilog, uint64_t synced_txg) 2632 { 2633 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 2634 itxs_t *clean_me; 2635 2636 ASSERT3U(synced_txg, <, ZILTEST_TXG); 2637 2638 mutex_enter(&itxg->itxg_lock); 2639 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 2640 mutex_exit(&itxg->itxg_lock); 2641 return; 2642 } 2643 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 2644 ASSERT3U(itxg->itxg_txg, !=, 0); 2645 clean_me = itxg->itxg_itxs; 2646 itxg->itxg_itxs = NULL; 2647 itxg->itxg_txg = 0; 2648 mutex_exit(&itxg->itxg_lock); 2649 /* 2650 * Preferably start a task queue to free up the old itxs but 2651 * if taskq_dispatch can't allocate resources to do that then 2652 * free it in-line. This should be rare. Note, using TQ_SLEEP 2653 * created a bad performance problem. 2654 */ 2655 ASSERT3P(zilog->zl_dmu_pool, !=, NULL); 2656 ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); 2657 taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, 2658 zil_itxg_clean, clean_me, TQ_NOSLEEP); 2659 if (id == TASKQID_INVALID) 2660 zil_itxg_clean(clean_me); 2661 } 2662 2663 /* 2664 * This function will traverse the queue of itxs that need to be 2665 * committed, and move them onto the ZIL's zl_itx_commit_list. 2666 */ 2667 static uint64_t 2668 zil_get_commit_list(zilog_t *zilog) 2669 { 2670 uint64_t otxg, txg, wtxg = 0; 2671 list_t *commit_list = &zilog->zl_itx_commit_list; 2672 2673 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2674 2675 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2676 otxg = ZILTEST_TXG; 2677 else 2678 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2679 2680 /* 2681 * This is inherently racy, since there is nothing to prevent 2682 * the last synced txg from changing. That's okay since we'll 2683 * only commit things in the future. 2684 */ 2685 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2686 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2687 2688 mutex_enter(&itxg->itxg_lock); 2689 if (itxg->itxg_txg != txg) { 2690 mutex_exit(&itxg->itxg_lock); 2691 continue; 2692 } 2693 2694 /* 2695 * If we're adding itx records to the zl_itx_commit_list, 2696 * then the zil better be dirty in this "txg". We can assert 2697 * that here since we're holding the itxg_lock which will 2698 * prevent spa_sync from cleaning it. Once we add the itxs 2699 * to the zl_itx_commit_list we must commit it to disk even 2700 * if it's unnecessary (i.e. the txg was synced). 2701 */ 2702 ASSERT(zilog_is_dirty_in_txg(zilog, txg) || 2703 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 2704 list_t *sync_list = &itxg->itxg_itxs->i_sync_list; 2705 itx_t *itx = NULL; 2706 if (unlikely(zilog->zl_suspend > 0)) { 2707 /* 2708 * ZIL was just suspended, but we lost the race. 2709 * Allow all earlier itxs to be committed, but ask 2710 * caller to do txg_wait_synced(txg) for any new. 2711 */ 2712 if (!list_is_empty(sync_list)) 2713 wtxg = MAX(wtxg, txg); 2714 } else { 2715 itx = list_head(sync_list); 2716 list_move_tail(commit_list, sync_list); 2717 } 2718 2719 mutex_exit(&itxg->itxg_lock); 2720 2721 while (itx != NULL) { 2722 uint64_t s = zil_itx_full_size(itx); 2723 zilog->zl_cur_size += s; 2724 zilog->zl_cur_left += s; 2725 s = zil_itx_record_size(itx); 2726 zilog->zl_cur_max = MAX(zilog->zl_cur_max, s); 2727 itx = list_next(commit_list, itx); 2728 } 2729 } 2730 return (wtxg); 2731 } 2732 2733 /* 2734 * Move the async itxs for a specified object to commit into sync lists. 2735 */ 2736 void 2737 zil_async_to_sync(zilog_t *zilog, uint64_t foid) 2738 { 2739 uint64_t otxg, txg; 2740 itx_async_node_t *ian, ian_search; 2741 avl_tree_t *t; 2742 avl_index_t where; 2743 2744 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 2745 otxg = ZILTEST_TXG; 2746 else 2747 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 2748 2749 /* 2750 * This is inherently racy, since there is nothing to prevent 2751 * the last synced txg from changing. 2752 */ 2753 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 2754 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 2755 2756 mutex_enter(&itxg->itxg_lock); 2757 if (itxg->itxg_txg != txg) { 2758 mutex_exit(&itxg->itxg_lock); 2759 continue; 2760 } 2761 2762 /* 2763 * If a foid is specified then find that node and append its 2764 * list. Otherwise walk the tree appending all the lists 2765 * to the sync list. We add to the end rather than the 2766 * beginning to ensure the create has happened. 2767 */ 2768 t = &itxg->itxg_itxs->i_async_tree; 2769 if (foid != 0) { 2770 ian_search.ia_foid = foid; 2771 ian = avl_find(t, &ian_search, &where); 2772 if (ian != NULL) { 2773 list_move_tail(&itxg->itxg_itxs->i_sync_list, 2774 &ian->ia_list); 2775 } 2776 } else { 2777 void *cookie = NULL; 2778 2779 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 2780 list_move_tail(&itxg->itxg_itxs->i_sync_list, 2781 &ian->ia_list); 2782 list_destroy(&ian->ia_list); 2783 kmem_free(ian, sizeof (itx_async_node_t)); 2784 } 2785 } 2786 mutex_exit(&itxg->itxg_lock); 2787 } 2788 } 2789 2790 /* 2791 * This function will prune commit itxs that are at the head of the 2792 * commit list (it won't prune past the first non-commit itx), and 2793 * either: a) attach them to the last lwb that's still pending 2794 * completion, or b) skip them altogether. 2795 * 2796 * This is used as a performance optimization to prevent commit itxs 2797 * from generating new lwbs when it's unnecessary to do so. 2798 */ 2799 static void 2800 zil_prune_commit_list(zilog_t *zilog) 2801 { 2802 itx_t *itx; 2803 2804 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2805 2806 while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { 2807 lr_t *lrc = &itx->itx_lr; 2808 if (lrc->lrc_txtype != TX_COMMIT) 2809 break; 2810 2811 mutex_enter(&zilog->zl_lock); 2812 2813 lwb_t *last_lwb = zilog->zl_last_lwb_opened; 2814 if (last_lwb == NULL || 2815 last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) { 2816 /* 2817 * All of the itxs this waiter was waiting on 2818 * must have already completed (or there were 2819 * never any itx's for it to wait on), so it's 2820 * safe to skip this waiter and mark it done. 2821 */ 2822 zil_commit_waiter_skip(itx->itx_private); 2823 } else { 2824 zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); 2825 } 2826 2827 mutex_exit(&zilog->zl_lock); 2828 2829 list_remove(&zilog->zl_itx_commit_list, itx); 2830 zil_itx_destroy(itx); 2831 } 2832 2833 IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 2834 } 2835 2836 static void 2837 zil_commit_writer_stall(zilog_t *zilog) 2838 { 2839 /* 2840 * When zio_alloc_zil() fails to allocate the next lwb block on 2841 * disk, we must call txg_wait_synced() to ensure all of the 2842 * lwbs in the zilog's zl_lwb_list are synced and then freed (in 2843 * zil_sync()), such that any subsequent ZIL writer (i.e. a call 2844 * to zil_process_commit_list()) will have to call zil_create(), 2845 * and start a new ZIL chain. 2846 * 2847 * Since zil_alloc_zil() failed, the lwb that was previously 2848 * issued does not have a pointer to the "next" lwb on disk. 2849 * Thus, if another ZIL writer thread was to allocate the "next" 2850 * on-disk lwb, that block could be leaked in the event of a 2851 * crash (because the previous lwb on-disk would not point to 2852 * it). 2853 * 2854 * We must hold the zilog's zl_issuer_lock while we do this, to 2855 * ensure no new threads enter zil_process_commit_list() until 2856 * all lwb's in the zl_lwb_list have been synced and freed 2857 * (which is achieved via the txg_wait_synced() call). 2858 */ 2859 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2860 ZIL_STAT_BUMP(zilog, zil_commit_stall_count); 2861 txg_wait_synced(zilog->zl_dmu_pool, 0); 2862 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2863 } 2864 2865 static void 2866 zil_burst_done(zilog_t *zilog) 2867 { 2868 if (!list_is_empty(&zilog->zl_itx_commit_list) || 2869 zilog->zl_cur_size == 0) 2870 return; 2871 2872 if (zilog->zl_parallel) 2873 zilog->zl_parallel--; 2874 2875 uint_t r = (zilog->zl_prev_rotor + 1) & (ZIL_BURSTS - 1); 2876 zilog->zl_prev_rotor = r; 2877 zilog->zl_prev_opt[r] = zil_lwb_plan(zilog, zilog->zl_cur_size, 2878 &zilog->zl_prev_min[r]); 2879 2880 zilog->zl_cur_size = 0; 2881 zilog->zl_cur_max = 0; 2882 zilog->zl_cur_left = 0; 2883 } 2884 2885 /* 2886 * This function will traverse the commit list, creating new lwbs as 2887 * needed, and committing the itxs from the commit list to these newly 2888 * created lwbs. Additionally, as a new lwb is created, the previous 2889 * lwb will be issued to the zio layer to be written to disk. 2890 */ 2891 static void 2892 zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs) 2893 { 2894 spa_t *spa = zilog->zl_spa; 2895 list_t nolwb_itxs; 2896 list_t nolwb_waiters; 2897 lwb_t *lwb, *plwb; 2898 itx_t *itx; 2899 2900 ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); 2901 2902 /* 2903 * Return if there's nothing to commit before we dirty the fs by 2904 * calling zil_create(). 2905 */ 2906 if (list_is_empty(&zilog->zl_itx_commit_list)) 2907 return; 2908 2909 list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); 2910 list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), 2911 offsetof(zil_commit_waiter_t, zcw_node)); 2912 2913 lwb = list_tail(&zilog->zl_lwb_list); 2914 if (lwb == NULL) { 2915 lwb = zil_create(zilog); 2916 } else { 2917 /* 2918 * Activate SPA_FEATURE_ZILSAXATTR for the cases where ZIL will 2919 * have already been created (zl_lwb_list not empty). 2920 */ 2921 zil_commit_activate_saxattr_feature(zilog); 2922 ASSERT(lwb->lwb_state == LWB_STATE_NEW || 2923 lwb->lwb_state == LWB_STATE_OPENED); 2924 2925 /* 2926 * If the lwb is still opened, it means the workload is really 2927 * multi-threaded and we won the chance of write aggregation. 2928 * If it is not opened yet, but previous lwb is still not 2929 * flushed, it still means the workload is multi-threaded, but 2930 * there was too much time between the commits to aggregate, so 2931 * we try aggregation next times, but without too much hopes. 2932 */ 2933 if (lwb->lwb_state == LWB_STATE_OPENED) { 2934 zilog->zl_parallel = ZIL_BURSTS; 2935 } else if ((plwb = list_prev(&zilog->zl_lwb_list, lwb)) 2936 != NULL && plwb->lwb_state != LWB_STATE_FLUSH_DONE) { 2937 zilog->zl_parallel = MAX(zilog->zl_parallel, 2938 ZIL_BURSTS / 2); 2939 } 2940 } 2941 2942 while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) { 2943 lr_t *lrc = &itx->itx_lr; 2944 uint64_t txg = lrc->lrc_txg; 2945 2946 ASSERT3U(txg, !=, 0); 2947 2948 if (lrc->lrc_txtype == TX_COMMIT) { 2949 DTRACE_PROBE2(zil__process__commit__itx, 2950 zilog_t *, zilog, itx_t *, itx); 2951 } else { 2952 DTRACE_PROBE2(zil__process__normal__itx, 2953 zilog_t *, zilog, itx_t *, itx); 2954 } 2955 2956 boolean_t synced = txg <= spa_last_synced_txg(spa); 2957 boolean_t frozen = txg > spa_freeze_txg(spa); 2958 2959 /* 2960 * If the txg of this itx has already been synced out, then 2961 * we don't need to commit this itx to an lwb. This is 2962 * because the data of this itx will have already been 2963 * written to the main pool. This is inherently racy, and 2964 * it's still ok to commit an itx whose txg has already 2965 * been synced; this will result in a write that's 2966 * unnecessary, but will do no harm. 2967 * 2968 * With that said, we always want to commit TX_COMMIT itxs 2969 * to an lwb, regardless of whether or not that itx's txg 2970 * has been synced out. We do this to ensure any OPENED lwb 2971 * will always have at least one zil_commit_waiter_t linked 2972 * to the lwb. 2973 * 2974 * As a counter-example, if we skipped TX_COMMIT itx's 2975 * whose txg had already been synced, the following 2976 * situation could occur if we happened to be racing with 2977 * spa_sync: 2978 * 2979 * 1. We commit a non-TX_COMMIT itx to an lwb, where the 2980 * itx's txg is 10 and the last synced txg is 9. 2981 * 2. spa_sync finishes syncing out txg 10. 2982 * 3. We move to the next itx in the list, it's a TX_COMMIT 2983 * whose txg is 10, so we skip it rather than committing 2984 * it to the lwb used in (1). 2985 * 2986 * If the itx that is skipped in (3) is the last TX_COMMIT 2987 * itx in the commit list, than it's possible for the lwb 2988 * used in (1) to remain in the OPENED state indefinitely. 2989 * 2990 * To prevent the above scenario from occurring, ensuring 2991 * that once an lwb is OPENED it will transition to ISSUED 2992 * and eventually DONE, we always commit TX_COMMIT itx's to 2993 * an lwb here, even if that itx's txg has already been 2994 * synced. 2995 * 2996 * Finally, if the pool is frozen, we _always_ commit the 2997 * itx. The point of freezing the pool is to prevent data 2998 * from being written to the main pool via spa_sync, and 2999 * instead rely solely on the ZIL to persistently store the 3000 * data; i.e. when the pool is frozen, the last synced txg 3001 * value can't be trusted. 3002 */ 3003 if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { 3004 if (lwb != NULL) { 3005 lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs); 3006 if (lwb == NULL) { 3007 list_insert_tail(&nolwb_itxs, itx); 3008 } else if ((zcw->zcw_lwb != NULL && 3009 zcw->zcw_lwb != lwb) || zcw->zcw_done) { 3010 /* 3011 * Our lwb is done, leave the rest of 3012 * itx list to somebody else who care. 3013 */ 3014 zilog->zl_parallel = ZIL_BURSTS; 3015 zilog->zl_cur_left -= 3016 zil_itx_full_size(itx); 3017 break; 3018 } 3019 } else { 3020 if (lrc->lrc_txtype == TX_COMMIT) { 3021 zil_commit_waiter_link_nolwb( 3022 itx->itx_private, &nolwb_waiters); 3023 } 3024 list_insert_tail(&nolwb_itxs, itx); 3025 } 3026 zilog->zl_cur_left -= zil_itx_full_size(itx); 3027 } else { 3028 ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT); 3029 zilog->zl_cur_left -= zil_itx_full_size(itx); 3030 zil_itx_destroy(itx); 3031 } 3032 } 3033 3034 if (lwb == NULL) { 3035 /* 3036 * This indicates zio_alloc_zil() failed to allocate the 3037 * "next" lwb on-disk. When this happens, we must stall 3038 * the ZIL write pipeline; see the comment within 3039 * zil_commit_writer_stall() for more details. 3040 */ 3041 while ((lwb = list_remove_head(ilwbs)) != NULL) 3042 zil_lwb_write_issue(zilog, lwb); 3043 zil_commit_writer_stall(zilog); 3044 3045 /* 3046 * Additionally, we have to signal and mark the "nolwb" 3047 * waiters as "done" here, since without an lwb, we 3048 * can't do this via zil_lwb_flush_vdevs_done() like 3049 * normal. 3050 */ 3051 zil_commit_waiter_t *zcw; 3052 while ((zcw = list_remove_head(&nolwb_waiters)) != NULL) 3053 zil_commit_waiter_skip(zcw); 3054 3055 /* 3056 * And finally, we have to destroy the itx's that 3057 * couldn't be committed to an lwb; this will also call 3058 * the itx's callback if one exists for the itx. 3059 */ 3060 while ((itx = list_remove_head(&nolwb_itxs)) != NULL) 3061 zil_itx_destroy(itx); 3062 } else { 3063 ASSERT(list_is_empty(&nolwb_waiters)); 3064 ASSERT3P(lwb, !=, NULL); 3065 ASSERT(lwb->lwb_state == LWB_STATE_NEW || 3066 lwb->lwb_state == LWB_STATE_OPENED); 3067 3068 /* 3069 * At this point, the ZIL block pointed at by the "lwb" 3070 * variable is in "new" or "opened" state. 3071 * 3072 * If it's "new", then no itxs have been committed to it, so 3073 * there's no point in issuing its zio (i.e. it's "empty"). 3074 * 3075 * If it's "opened", then it contains one or more itxs that 3076 * eventually need to be committed to stable storage. In 3077 * this case we intentionally do not issue the lwb's zio 3078 * to disk yet, and instead rely on one of the following 3079 * two mechanisms for issuing the zio: 3080 * 3081 * 1. Ideally, there will be more ZIL activity occurring on 3082 * the system, such that this function will be immediately 3083 * called again by different thread and this lwb will be 3084 * closed by zil_lwb_assign(). This way, the lwb will be 3085 * "full" when it is issued to disk, and we'll make use of 3086 * the lwb's size the best we can. 3087 * 3088 * 2. If there isn't sufficient ZIL activity occurring on 3089 * the system, zil_commit_waiter() will close it and issue 3090 * the zio. If this occurs, the lwb is not guaranteed 3091 * to be "full" by the time its zio is issued, and means 3092 * the size of the lwb was "too large" given the amount 3093 * of ZIL activity occurring on the system at that time. 3094 * 3095 * We do this for a couple of reasons: 3096 * 3097 * 1. To try and reduce the number of IOPs needed to 3098 * write the same number of itxs. If an lwb has space 3099 * available in its buffer for more itxs, and more itxs 3100 * will be committed relatively soon (relative to the 3101 * latency of performing a write), then it's beneficial 3102 * to wait for these "next" itxs. This way, more itxs 3103 * can be committed to stable storage with fewer writes. 3104 * 3105 * 2. To try and use the largest lwb block size that the 3106 * incoming rate of itxs can support. Again, this is to 3107 * try and pack as many itxs into as few lwbs as 3108 * possible, without significantly impacting the latency 3109 * of each individual itx. 3110 */ 3111 if (lwb->lwb_state == LWB_STATE_OPENED && !zilog->zl_parallel) { 3112 zil_burst_done(zilog); 3113 list_insert_tail(ilwbs, lwb); 3114 lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW); 3115 if (lwb == NULL) { 3116 while ((lwb = list_remove_head(ilwbs)) != NULL) 3117 zil_lwb_write_issue(zilog, lwb); 3118 zil_commit_writer_stall(zilog); 3119 } 3120 } 3121 } 3122 } 3123 3124 /* 3125 * This function is responsible for ensuring the passed in commit waiter 3126 * (and associated commit itx) is committed to an lwb. If the waiter is 3127 * not already committed to an lwb, all itxs in the zilog's queue of 3128 * itxs will be processed. The assumption is the passed in waiter's 3129 * commit itx will found in the queue just like the other non-commit 3130 * itxs, such that when the entire queue is processed, the waiter will 3131 * have been committed to an lwb. 3132 * 3133 * The lwb associated with the passed in waiter is not guaranteed to 3134 * have been issued by the time this function completes. If the lwb is 3135 * not issued, we rely on future calls to zil_commit_writer() to issue 3136 * the lwb, or the timeout mechanism found in zil_commit_waiter(). 3137 */ 3138 static uint64_t 3139 zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) 3140 { 3141 list_t ilwbs; 3142 lwb_t *lwb; 3143 uint64_t wtxg = 0; 3144 3145 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 3146 ASSERT(spa_writeable(zilog->zl_spa)); 3147 3148 list_create(&ilwbs, sizeof (lwb_t), offsetof(lwb_t, lwb_issue_node)); 3149 mutex_enter(&zilog->zl_issuer_lock); 3150 3151 if (zcw->zcw_lwb != NULL || zcw->zcw_done) { 3152 /* 3153 * It's possible that, while we were waiting to acquire 3154 * the "zl_issuer_lock", another thread committed this 3155 * waiter to an lwb. If that occurs, we bail out early, 3156 * without processing any of the zilog's queue of itxs. 3157 * 3158 * On certain workloads and system configurations, the 3159 * "zl_issuer_lock" can become highly contended. In an 3160 * attempt to reduce this contention, we immediately drop 3161 * the lock if the waiter has already been processed. 3162 * 3163 * We've measured this optimization to reduce CPU spent 3164 * contending on this lock by up to 5%, using a system 3165 * with 32 CPUs, low latency storage (~50 usec writes), 3166 * and 1024 threads performing sync writes. 3167 */ 3168 goto out; 3169 } 3170 3171 ZIL_STAT_BUMP(zilog, zil_commit_writer_count); 3172 3173 wtxg = zil_get_commit_list(zilog); 3174 zil_prune_commit_list(zilog); 3175 zil_process_commit_list(zilog, zcw, &ilwbs); 3176 3177 out: 3178 mutex_exit(&zilog->zl_issuer_lock); 3179 while ((lwb = list_remove_head(&ilwbs)) != NULL) 3180 zil_lwb_write_issue(zilog, lwb); 3181 list_destroy(&ilwbs); 3182 return (wtxg); 3183 } 3184 3185 static void 3186 zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) 3187 { 3188 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 3189 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 3190 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 3191 3192 lwb_t *lwb = zcw->zcw_lwb; 3193 ASSERT3P(lwb, !=, NULL); 3194 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW); 3195 3196 /* 3197 * If the lwb has already been issued by another thread, we can 3198 * immediately return since there's no work to be done (the 3199 * point of this function is to issue the lwb). Additionally, we 3200 * do this prior to acquiring the zl_issuer_lock, to avoid 3201 * acquiring it when it's not necessary to do so. 3202 */ 3203 if (lwb->lwb_state != LWB_STATE_OPENED) 3204 return; 3205 3206 /* 3207 * In order to call zil_lwb_write_close() we must hold the 3208 * zilog's "zl_issuer_lock". We can't simply acquire that lock, 3209 * since we're already holding the commit waiter's "zcw_lock", 3210 * and those two locks are acquired in the opposite order 3211 * elsewhere. 3212 */ 3213 mutex_exit(&zcw->zcw_lock); 3214 mutex_enter(&zilog->zl_issuer_lock); 3215 mutex_enter(&zcw->zcw_lock); 3216 3217 /* 3218 * Since we just dropped and re-acquired the commit waiter's 3219 * lock, we have to re-check to see if the waiter was marked 3220 * "done" during that process. If the waiter was marked "done", 3221 * the "lwb" pointer is no longer valid (it can be free'd after 3222 * the waiter is marked "done"), so without this check we could 3223 * wind up with a use-after-free error below. 3224 */ 3225 if (zcw->zcw_done) { 3226 mutex_exit(&zilog->zl_issuer_lock); 3227 return; 3228 } 3229 3230 ASSERT3P(lwb, ==, zcw->zcw_lwb); 3231 3232 /* 3233 * We've already checked this above, but since we hadn't acquired 3234 * the zilog's zl_issuer_lock, we have to perform this check a 3235 * second time while holding the lock. 3236 * 3237 * We don't need to hold the zl_lock since the lwb cannot transition 3238 * from OPENED to CLOSED while we hold the zl_issuer_lock. The lwb 3239 * _can_ transition from CLOSED to DONE, but it's OK to race with 3240 * that transition since we treat the lwb the same, whether it's in 3241 * the CLOSED, ISSUED or DONE states. 3242 * 3243 * The important thing, is we treat the lwb differently depending on 3244 * if it's OPENED or CLOSED, and block any other threads that might 3245 * attempt to close/issue this lwb. For that reason we hold the 3246 * zl_issuer_lock when checking the lwb_state; we must not call 3247 * zil_lwb_write_close() if the lwb had already been closed/issued. 3248 * 3249 * See the comment above the lwb_state_t structure definition for 3250 * more details on the lwb states, and locking requirements. 3251 */ 3252 if (lwb->lwb_state != LWB_STATE_OPENED) { 3253 mutex_exit(&zilog->zl_issuer_lock); 3254 return; 3255 } 3256 3257 /* 3258 * We do not need zcw_lock once we hold zl_issuer_lock and know lwb 3259 * is still open. But we have to drop it to avoid a deadlock in case 3260 * callback of zio issued by zil_lwb_write_issue() try to get it, 3261 * while zil_lwb_write_issue() is blocked on attempt to issue next 3262 * lwb it found in LWB_STATE_READY state. 3263 */ 3264 mutex_exit(&zcw->zcw_lock); 3265 3266 /* 3267 * As described in the comments above zil_commit_waiter() and 3268 * zil_process_commit_list(), we need to issue this lwb's zio 3269 * since we've reached the commit waiter's timeout and it still 3270 * hasn't been issued. 3271 */ 3272 zil_burst_done(zilog); 3273 lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW); 3274 3275 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED); 3276 3277 if (nlwb == NULL) { 3278 /* 3279 * When zil_lwb_write_close() returns NULL, this 3280 * indicates zio_alloc_zil() failed to allocate the 3281 * "next" lwb on-disk. When this occurs, the ZIL write 3282 * pipeline must be stalled; see the comment within the 3283 * zil_commit_writer_stall() function for more details. 3284 */ 3285 zil_lwb_write_issue(zilog, lwb); 3286 zil_commit_writer_stall(zilog); 3287 mutex_exit(&zilog->zl_issuer_lock); 3288 } else { 3289 mutex_exit(&zilog->zl_issuer_lock); 3290 zil_lwb_write_issue(zilog, lwb); 3291 } 3292 mutex_enter(&zcw->zcw_lock); 3293 } 3294 3295 /* 3296 * This function is responsible for performing the following two tasks: 3297 * 3298 * 1. its primary responsibility is to block until the given "commit 3299 * waiter" is considered "done". 3300 * 3301 * 2. its secondary responsibility is to issue the zio for the lwb that 3302 * the given "commit waiter" is waiting on, if this function has 3303 * waited "long enough" and the lwb is still in the "open" state. 3304 * 3305 * Given a sufficient amount of itxs being generated and written using 3306 * the ZIL, the lwb's zio will be issued via the zil_lwb_assign() 3307 * function. If this does not occur, this secondary responsibility will 3308 * ensure the lwb is issued even if there is not other synchronous 3309 * activity on the system. 3310 * 3311 * For more details, see zil_process_commit_list(); more specifically, 3312 * the comment at the bottom of that function. 3313 */ 3314 static void 3315 zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) 3316 { 3317 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 3318 ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); 3319 ASSERT(spa_writeable(zilog->zl_spa)); 3320 3321 mutex_enter(&zcw->zcw_lock); 3322 3323 /* 3324 * The timeout is scaled based on the lwb latency to avoid 3325 * significantly impacting the latency of each individual itx. 3326 * For more details, see the comment at the bottom of the 3327 * zil_process_commit_list() function. 3328 */ 3329 int pct = MAX(zfs_commit_timeout_pct, 1); 3330 hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; 3331 hrtime_t wakeup = gethrtime() + sleep; 3332 boolean_t timedout = B_FALSE; 3333 3334 while (!zcw->zcw_done) { 3335 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 3336 3337 lwb_t *lwb = zcw->zcw_lwb; 3338 3339 /* 3340 * Usually, the waiter will have a non-NULL lwb field here, 3341 * but it's possible for it to be NULL as a result of 3342 * zil_commit() racing with spa_sync(). 3343 * 3344 * When zil_clean() is called, it's possible for the itxg 3345 * list (which may be cleaned via a taskq) to contain 3346 * commit itxs. When this occurs, the commit waiters linked 3347 * off of these commit itxs will not be committed to an 3348 * lwb. Additionally, these commit waiters will not be 3349 * marked done until zil_commit_waiter_skip() is called via 3350 * zil_itxg_clean(). 3351 * 3352 * Thus, it's possible for this commit waiter (i.e. the 3353 * "zcw" variable) to be found in this "in between" state; 3354 * where it's "zcw_lwb" field is NULL, and it hasn't yet 3355 * been skipped, so it's "zcw_done" field is still B_FALSE. 3356 */ 3357 IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_NEW); 3358 3359 if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { 3360 ASSERT3B(timedout, ==, B_FALSE); 3361 3362 /* 3363 * If the lwb hasn't been issued yet, then we 3364 * need to wait with a timeout, in case this 3365 * function needs to issue the lwb after the 3366 * timeout is reached; responsibility (2) from 3367 * the comment above this function. 3368 */ 3369 int rc = cv_timedwait_hires(&zcw->zcw_cv, 3370 &zcw->zcw_lock, wakeup, USEC2NSEC(1), 3371 CALLOUT_FLAG_ABSOLUTE); 3372 3373 if (rc != -1 || zcw->zcw_done) 3374 continue; 3375 3376 timedout = B_TRUE; 3377 zil_commit_waiter_timeout(zilog, zcw); 3378 3379 if (!zcw->zcw_done) { 3380 /* 3381 * If the commit waiter has already been 3382 * marked "done", it's possible for the 3383 * waiter's lwb structure to have already 3384 * been freed. Thus, we can only reliably 3385 * make these assertions if the waiter 3386 * isn't done. 3387 */ 3388 ASSERT3P(lwb, ==, zcw->zcw_lwb); 3389 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); 3390 } 3391 } else { 3392 /* 3393 * If the lwb isn't open, then it must have already 3394 * been issued. In that case, there's no need to 3395 * use a timeout when waiting for the lwb to 3396 * complete. 3397 * 3398 * Additionally, if the lwb is NULL, the waiter 3399 * will soon be signaled and marked done via 3400 * zil_clean() and zil_itxg_clean(), so no timeout 3401 * is required. 3402 */ 3403 3404 IMPLY(lwb != NULL, 3405 lwb->lwb_state == LWB_STATE_CLOSED || 3406 lwb->lwb_state == LWB_STATE_READY || 3407 lwb->lwb_state == LWB_STATE_ISSUED || 3408 lwb->lwb_state == LWB_STATE_WRITE_DONE || 3409 lwb->lwb_state == LWB_STATE_FLUSH_DONE); 3410 cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); 3411 } 3412 } 3413 3414 mutex_exit(&zcw->zcw_lock); 3415 } 3416 3417 static zil_commit_waiter_t * 3418 zil_alloc_commit_waiter(void) 3419 { 3420 zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); 3421 3422 cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); 3423 mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); 3424 list_link_init(&zcw->zcw_node); 3425 zcw->zcw_lwb = NULL; 3426 zcw->zcw_done = B_FALSE; 3427 zcw->zcw_zio_error = 0; 3428 3429 return (zcw); 3430 } 3431 3432 static void 3433 zil_free_commit_waiter(zil_commit_waiter_t *zcw) 3434 { 3435 ASSERT(!list_link_active(&zcw->zcw_node)); 3436 ASSERT3P(zcw->zcw_lwb, ==, NULL); 3437 ASSERT3B(zcw->zcw_done, ==, B_TRUE); 3438 mutex_destroy(&zcw->zcw_lock); 3439 cv_destroy(&zcw->zcw_cv); 3440 kmem_cache_free(zil_zcw_cache, zcw); 3441 } 3442 3443 /* 3444 * This function is used to create a TX_COMMIT itx and assign it. This 3445 * way, it will be linked into the ZIL's list of synchronous itxs, and 3446 * then later committed to an lwb (or skipped) when 3447 * zil_process_commit_list() is called. 3448 */ 3449 static void 3450 zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) 3451 { 3452 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 3453 3454 /* 3455 * Since we are not going to create any new dirty data, and we 3456 * can even help with clearing the existing dirty data, we 3457 * should not be subject to the dirty data based delays. We 3458 * use DMU_TX_NOTHROTTLE to bypass the delay mechanism. 3459 */ 3460 VERIFY0(dmu_tx_assign(tx, 3461 DMU_TX_WAIT | DMU_TX_NOTHROTTLE | DMU_TX_SUSPEND)); 3462 3463 itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); 3464 itx->itx_sync = B_TRUE; 3465 itx->itx_private = zcw; 3466 3467 zil_itx_assign(zilog, itx, tx); 3468 3469 dmu_tx_commit(tx); 3470 } 3471 3472 /* 3473 * Commit ZFS Intent Log transactions (itxs) to stable storage. 3474 * 3475 * When writing ZIL transactions to the on-disk representation of the 3476 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple 3477 * itxs can be committed to a single lwb. Once a lwb is written and 3478 * committed to stable storage (i.e. the lwb is written, and vdevs have 3479 * been flushed), each itx that was committed to that lwb is also 3480 * considered to be committed to stable storage. 3481 * 3482 * When an itx is committed to an lwb, the log record (lr_t) contained 3483 * by the itx is copied into the lwb's zio buffer, and once this buffer 3484 * is written to disk, it becomes an on-disk ZIL block. 3485 * 3486 * As itxs are generated, they're inserted into the ZIL's queue of 3487 * uncommitted itxs. The semantics of zil_commit() are such that it will 3488 * block until all itxs that were in the queue when it was called, are 3489 * committed to stable storage. 3490 * 3491 * If "foid" is zero, this means all "synchronous" and "asynchronous" 3492 * itxs, for all objects in the dataset, will be committed to stable 3493 * storage prior to zil_commit() returning. If "foid" is non-zero, all 3494 * "synchronous" itxs for all objects, but only "asynchronous" itxs 3495 * that correspond to the foid passed in, will be committed to stable 3496 * storage prior to zil_commit() returning. 3497 * 3498 * Generally speaking, when zil_commit() is called, the consumer doesn't 3499 * actually care about _all_ of the uncommitted itxs. Instead, they're 3500 * simply trying to waiting for a specific itx to be committed to disk, 3501 * but the interface(s) for interacting with the ZIL don't allow such 3502 * fine-grained communication. A better interface would allow a consumer 3503 * to create and assign an itx, and then pass a reference to this itx to 3504 * zil_commit(); such that zil_commit() would return as soon as that 3505 * specific itx was committed to disk (instead of waiting for _all_ 3506 * itxs to be committed). 3507 * 3508 * When a thread calls zil_commit() a special "commit itx" will be 3509 * generated, along with a corresponding "waiter" for this commit itx. 3510 * zil_commit() will wait on this waiter's CV, such that when the waiter 3511 * is marked done, and signaled, zil_commit() will return. 3512 * 3513 * This commit itx is inserted into the queue of uncommitted itxs. This 3514 * provides an easy mechanism for determining which itxs were in the 3515 * queue prior to zil_commit() having been called, and which itxs were 3516 * added after zil_commit() was called. 3517 * 3518 * The commit itx is special; it doesn't have any on-disk representation. 3519 * When a commit itx is "committed" to an lwb, the waiter associated 3520 * with it is linked onto the lwb's list of waiters. Then, when that lwb 3521 * completes, each waiter on the lwb's list is marked done and signaled 3522 * -- allowing the thread waiting on the waiter to return from zil_commit(). 3523 * 3524 * It's important to point out a few critical factors that allow us 3525 * to make use of the commit itxs, commit waiters, per-lwb lists of 3526 * commit waiters, and zio completion callbacks like we're doing: 3527 * 3528 * 1. The list of waiters for each lwb is traversed, and each commit 3529 * waiter is marked "done" and signaled, in the zio completion 3530 * callback of the lwb's zio[*]. 3531 * 3532 * * Actually, the waiters are signaled in the zio completion 3533 * callback of the root zio for the flush commands that are sent to 3534 * the vdevs upon completion of the lwb zio. 3535 * 3536 * 2. When the itxs are inserted into the ZIL's queue of uncommitted 3537 * itxs, the order in which they are inserted is preserved[*]; as 3538 * itxs are added to the queue, they are added to the tail of 3539 * in-memory linked lists. 3540 * 3541 * When committing the itxs to lwbs (to be written to disk), they 3542 * are committed in the same order in which the itxs were added to 3543 * the uncommitted queue's linked list(s); i.e. the linked list of 3544 * itxs to commit is traversed from head to tail, and each itx is 3545 * committed to an lwb in that order. 3546 * 3547 * * To clarify: 3548 * 3549 * - the order of "sync" itxs is preserved w.r.t. other 3550 * "sync" itxs, regardless of the corresponding objects. 3551 * - the order of "async" itxs is preserved w.r.t. other 3552 * "async" itxs corresponding to the same object. 3553 * - the order of "async" itxs is *not* preserved w.r.t. other 3554 * "async" itxs corresponding to different objects. 3555 * - the order of "sync" itxs w.r.t. "async" itxs (or vice 3556 * versa) is *not* preserved, even for itxs that correspond 3557 * to the same object. 3558 * 3559 * For more details, see: zil_itx_assign(), zil_async_to_sync(), 3560 * zil_get_commit_list(), and zil_process_commit_list(). 3561 * 3562 * 3. The lwbs represent a linked list of blocks on disk. Thus, any 3563 * lwb cannot be considered committed to stable storage, until its 3564 * "previous" lwb is also committed to stable storage. This fact, 3565 * coupled with the fact described above, means that itxs are 3566 * committed in (roughly) the order in which they were generated. 3567 * This is essential because itxs are dependent on prior itxs. 3568 * Thus, we *must not* deem an itx as being committed to stable 3569 * storage, until *all* prior itxs have also been committed to 3570 * stable storage. 3571 * 3572 * To enforce this ordering of lwb zio's, while still leveraging as 3573 * much of the underlying storage performance as possible, we rely 3574 * on two fundamental concepts: 3575 * 3576 * 1. The creation and issuance of lwb zio's is protected by 3577 * the zilog's "zl_issuer_lock", which ensures only a single 3578 * thread is creating and/or issuing lwb's at a time 3579 * 2. The "previous" lwb is a child of the "current" lwb 3580 * (leveraging the zio parent-child dependency graph) 3581 * 3582 * By relying on this parent-child zio relationship, we can have 3583 * many lwb zio's concurrently issued to the underlying storage, 3584 * but the order in which they complete will be the same order in 3585 * which they were created. 3586 */ 3587 void 3588 zil_commit(zilog_t *zilog, uint64_t foid) 3589 { 3590 /* 3591 * We should never attempt to call zil_commit on a snapshot for 3592 * a couple of reasons: 3593 * 3594 * 1. A snapshot may never be modified, thus it cannot have any 3595 * in-flight itxs that would have modified the dataset. 3596 * 3597 * 2. By design, when zil_commit() is called, a commit itx will 3598 * be assigned to this zilog; as a result, the zilog will be 3599 * dirtied. We must not dirty the zilog of a snapshot; there's 3600 * checks in the code that enforce this invariant, and will 3601 * cause a panic if it's not upheld. 3602 */ 3603 ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); 3604 3605 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 3606 return; 3607 3608 if (!spa_writeable(zilog->zl_spa)) { 3609 /* 3610 * If the SPA is not writable, there should never be any 3611 * pending itxs waiting to be committed to disk. If that 3612 * weren't true, we'd skip writing those itxs out, and 3613 * would break the semantics of zil_commit(); thus, we're 3614 * verifying that truth before we return to the caller. 3615 */ 3616 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3617 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 3618 for (int i = 0; i < TXG_SIZE; i++) 3619 ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); 3620 return; 3621 } 3622 3623 /* 3624 * If the ZIL is suspended, we don't want to dirty it by calling 3625 * zil_commit_itx_assign() below, nor can we write out 3626 * lwbs like would be done in zil_commit_write(). Thus, we 3627 * simply rely on txg_wait_synced() to maintain the necessary 3628 * semantics, and avoid calling those functions altogether. 3629 */ 3630 if (zilog->zl_suspend > 0) { 3631 ZIL_STAT_BUMP(zilog, zil_commit_suspend_count); 3632 txg_wait_synced(zilog->zl_dmu_pool, 0); 3633 return; 3634 } 3635 3636 zil_commit_impl(zilog, foid); 3637 } 3638 3639 void 3640 zil_commit_impl(zilog_t *zilog, uint64_t foid) 3641 { 3642 ZIL_STAT_BUMP(zilog, zil_commit_count); 3643 3644 /* 3645 * Move the "async" itxs for the specified foid to the "sync" 3646 * queues, such that they will be later committed (or skipped) 3647 * to an lwb when zil_process_commit_list() is called. 3648 * 3649 * Since these "async" itxs must be committed prior to this 3650 * call to zil_commit returning, we must perform this operation 3651 * before we call zil_commit_itx_assign(). 3652 */ 3653 zil_async_to_sync(zilog, foid); 3654 3655 /* 3656 * We allocate a new "waiter" structure which will initially be 3657 * linked to the commit itx using the itx's "itx_private" field. 3658 * Since the commit itx doesn't represent any on-disk state, 3659 * when it's committed to an lwb, rather than copying the its 3660 * lr_t into the lwb's buffer, the commit itx's "waiter" will be 3661 * added to the lwb's list of waiters. Then, when the lwb is 3662 * committed to stable storage, each waiter in the lwb's list of 3663 * waiters will be marked "done", and signalled. 3664 * 3665 * We must create the waiter and assign the commit itx prior to 3666 * calling zil_commit_writer(), or else our specific commit itx 3667 * is not guaranteed to be committed to an lwb prior to calling 3668 * zil_commit_waiter(). 3669 */ 3670 zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); 3671 zil_commit_itx_assign(zilog, zcw); 3672 3673 uint64_t wtxg = zil_commit_writer(zilog, zcw); 3674 zil_commit_waiter(zilog, zcw); 3675 3676 if (zcw->zcw_zio_error != 0) { 3677 /* 3678 * If there was an error writing out the ZIL blocks that 3679 * this thread is waiting on, then we fallback to 3680 * relying on spa_sync() to write out the data this 3681 * thread is waiting on. Obviously this has performance 3682 * implications, but the expectation is for this to be 3683 * an exceptional case, and shouldn't occur often. 3684 */ 3685 ZIL_STAT_BUMP(zilog, zil_commit_error_count); 3686 DTRACE_PROBE2(zil__commit__io__error, 3687 zilog_t *, zilog, zil_commit_waiter_t *, zcw); 3688 txg_wait_synced(zilog->zl_dmu_pool, 0); 3689 } else if (wtxg != 0) { 3690 ZIL_STAT_BUMP(zilog, zil_commit_suspend_count); 3691 txg_wait_synced(zilog->zl_dmu_pool, wtxg); 3692 } 3693 3694 zil_free_commit_waiter(zcw); 3695 } 3696 3697 /* 3698 * Called in syncing context to free committed log blocks and update log header. 3699 */ 3700 void 3701 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 3702 { 3703 zil_header_t *zh = zil_header_in_syncing_context(zilog); 3704 uint64_t txg = dmu_tx_get_txg(tx); 3705 spa_t *spa = zilog->zl_spa; 3706 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 3707 lwb_t *lwb; 3708 3709 /* 3710 * We don't zero out zl_destroy_txg, so make sure we don't try 3711 * to destroy it twice. 3712 */ 3713 if (spa_sync_pass(spa) != 1) 3714 return; 3715 3716 zil_lwb_flush_wait_all(zilog, txg); 3717 3718 mutex_enter(&zilog->zl_lock); 3719 3720 ASSERT(zilog->zl_stop_sync == 0); 3721 3722 if (*replayed_seq != 0) { 3723 ASSERT(zh->zh_replay_seq < *replayed_seq); 3724 zh->zh_replay_seq = *replayed_seq; 3725 *replayed_seq = 0; 3726 } 3727 3728 if (zilog->zl_destroy_txg == txg) { 3729 blkptr_t blk = zh->zh_log; 3730 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 3731 3732 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3733 3734 memset(zh, 0, sizeof (zil_header_t)); 3735 memset(zilog->zl_replayed_seq, 0, 3736 sizeof (zilog->zl_replayed_seq)); 3737 3738 if (zilog->zl_keep_first) { 3739 /* 3740 * If this block was part of log chain that couldn't 3741 * be claimed because a device was missing during 3742 * zil_claim(), but that device later returns, 3743 * then this block could erroneously appear valid. 3744 * To guard against this, assign a new GUID to the new 3745 * log chain so it doesn't matter what blk points to. 3746 */ 3747 zil_init_log_chain(zilog, &blk); 3748 zh->zh_log = blk; 3749 } else { 3750 /* 3751 * A destroyed ZIL chain can't contain any TX_SETSAXATTR 3752 * records. So, deactivate the feature for this dataset. 3753 * We activate it again when we start a new ZIL chain. 3754 */ 3755 if (dsl_dataset_feature_is_active(ds, 3756 SPA_FEATURE_ZILSAXATTR)) 3757 dsl_dataset_deactivate_feature(ds, 3758 SPA_FEATURE_ZILSAXATTR, tx); 3759 } 3760 } 3761 3762 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 3763 zh->zh_log = lwb->lwb_blk; 3764 if (lwb->lwb_state != LWB_STATE_FLUSH_DONE || 3765 lwb->lwb_alloc_txg > txg || lwb->lwb_max_txg > txg) 3766 break; 3767 list_remove(&zilog->zl_lwb_list, lwb); 3768 if (!BP_IS_HOLE(&lwb->lwb_blk)) 3769 zio_free(spa, txg, &lwb->lwb_blk); 3770 zil_free_lwb(zilog, lwb); 3771 3772 /* 3773 * If we don't have anything left in the lwb list then 3774 * we've had an allocation failure and we need to zero 3775 * out the zil_header blkptr so that we don't end 3776 * up freeing the same block twice. 3777 */ 3778 if (list_is_empty(&zilog->zl_lwb_list)) 3779 BP_ZERO(&zh->zh_log); 3780 } 3781 3782 mutex_exit(&zilog->zl_lock); 3783 } 3784 3785 static int 3786 zil_lwb_cons(void *vbuf, void *unused, int kmflag) 3787 { 3788 (void) unused, (void) kmflag; 3789 lwb_t *lwb = vbuf; 3790 list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); 3791 list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), 3792 offsetof(zil_commit_waiter_t, zcw_node)); 3793 avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, 3794 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 3795 mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 3796 return (0); 3797 } 3798 3799 static void 3800 zil_lwb_dest(void *vbuf, void *unused) 3801 { 3802 (void) unused; 3803 lwb_t *lwb = vbuf; 3804 mutex_destroy(&lwb->lwb_vdev_lock); 3805 avl_destroy(&lwb->lwb_vdev_tree); 3806 list_destroy(&lwb->lwb_waiters); 3807 list_destroy(&lwb->lwb_itxs); 3808 } 3809 3810 void 3811 zil_init(void) 3812 { 3813 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 3814 sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); 3815 3816 zil_zcw_cache = kmem_cache_create("zil_zcw_cache", 3817 sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 3818 3819 zil_sums_init(&zil_sums_global); 3820 zil_kstats_global = kstat_create("zfs", 0, "zil", "misc", 3821 KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t), 3822 KSTAT_FLAG_VIRTUAL); 3823 3824 if (zil_kstats_global != NULL) { 3825 zil_kstats_global->ks_data = &zil_stats; 3826 zil_kstats_global->ks_update = zil_kstats_global_update; 3827 zil_kstats_global->ks_private = NULL; 3828 kstat_install(zil_kstats_global); 3829 } 3830 } 3831 3832 void 3833 zil_fini(void) 3834 { 3835 kmem_cache_destroy(zil_zcw_cache); 3836 kmem_cache_destroy(zil_lwb_cache); 3837 3838 if (zil_kstats_global != NULL) { 3839 kstat_delete(zil_kstats_global); 3840 zil_kstats_global = NULL; 3841 } 3842 3843 zil_sums_fini(&zil_sums_global); 3844 } 3845 3846 void 3847 zil_set_sync(zilog_t *zilog, uint64_t sync) 3848 { 3849 zilog->zl_sync = sync; 3850 } 3851 3852 void 3853 zil_set_logbias(zilog_t *zilog, uint64_t logbias) 3854 { 3855 zilog->zl_logbias = logbias; 3856 } 3857 3858 zilog_t * 3859 zil_alloc(objset_t *os, zil_header_t *zh_phys) 3860 { 3861 zilog_t *zilog; 3862 3863 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 3864 3865 zilog->zl_header = zh_phys; 3866 zilog->zl_os = os; 3867 zilog->zl_spa = dmu_objset_spa(os); 3868 zilog->zl_dmu_pool = dmu_objset_pool(os); 3869 zilog->zl_destroy_txg = TXG_INITIAL - 1; 3870 zilog->zl_logbias = dmu_objset_logbias(os); 3871 zilog->zl_sync = dmu_objset_syncprop(os); 3872 zilog->zl_dirty_max_txg = 0; 3873 zilog->zl_last_lwb_opened = NULL; 3874 zilog->zl_last_lwb_latency = 0; 3875 zilog->zl_max_block_size = MIN(MAX(P2ALIGN_TYPED(zil_maxblocksize, 3876 ZIL_MIN_BLKSZ, uint64_t), ZIL_MIN_BLKSZ), 3877 spa_maxblocksize(dmu_objset_spa(os))); 3878 3879 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 3880 mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); 3881 mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL); 3882 3883 for (int i = 0; i < TXG_SIZE; i++) { 3884 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 3885 MUTEX_DEFAULT, NULL); 3886 } 3887 3888 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 3889 offsetof(lwb_t, lwb_node)); 3890 3891 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 3892 offsetof(itx_t, itx_node)); 3893 3894 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 3895 cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL); 3896 3897 for (int i = 0; i < ZIL_BURSTS; i++) { 3898 zilog->zl_prev_opt[i] = zilog->zl_max_block_size - 3899 sizeof (zil_chain_t); 3900 } 3901 3902 return (zilog); 3903 } 3904 3905 void 3906 zil_free(zilog_t *zilog) 3907 { 3908 int i; 3909 3910 zilog->zl_stop_sync = 1; 3911 3912 ASSERT0(zilog->zl_suspend); 3913 ASSERT0(zilog->zl_suspending); 3914 3915 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3916 list_destroy(&zilog->zl_lwb_list); 3917 3918 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 3919 list_destroy(&zilog->zl_itx_commit_list); 3920 3921 for (i = 0; i < TXG_SIZE; i++) { 3922 /* 3923 * It's possible for an itx to be generated that doesn't dirty 3924 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 3925 * callback to remove the entry. We remove those here. 3926 * 3927 * Also free up the ziltest itxs. 3928 */ 3929 if (zilog->zl_itxg[i].itxg_itxs) 3930 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 3931 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 3932 } 3933 3934 mutex_destroy(&zilog->zl_issuer_lock); 3935 mutex_destroy(&zilog->zl_lock); 3936 mutex_destroy(&zilog->zl_lwb_io_lock); 3937 3938 cv_destroy(&zilog->zl_cv_suspend); 3939 cv_destroy(&zilog->zl_lwb_io_cv); 3940 3941 kmem_free(zilog, sizeof (zilog_t)); 3942 } 3943 3944 /* 3945 * Open an intent log. 3946 */ 3947 zilog_t * 3948 zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums) 3949 { 3950 zilog_t *zilog = dmu_objset_zil(os); 3951 3952 ASSERT3P(zilog->zl_get_data, ==, NULL); 3953 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 3954 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3955 3956 zilog->zl_get_data = get_data; 3957 zilog->zl_sums = zil_sums; 3958 3959 return (zilog); 3960 } 3961 3962 /* 3963 * Close an intent log. 3964 */ 3965 void 3966 zil_close(zilog_t *zilog) 3967 { 3968 lwb_t *lwb; 3969 uint64_t txg; 3970 3971 if (!dmu_objset_is_snapshot(zilog->zl_os)) { 3972 zil_commit(zilog, 0); 3973 } else { 3974 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 3975 ASSERT0(zilog->zl_dirty_max_txg); 3976 ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); 3977 } 3978 3979 mutex_enter(&zilog->zl_lock); 3980 txg = zilog->zl_dirty_max_txg; 3981 lwb = list_tail(&zilog->zl_lwb_list); 3982 if (lwb != NULL) { 3983 txg = MAX(txg, lwb->lwb_alloc_txg); 3984 txg = MAX(txg, lwb->lwb_max_txg); 3985 } 3986 mutex_exit(&zilog->zl_lock); 3987 3988 /* 3989 * zl_lwb_max_issued_txg may be larger than lwb_max_txg. It depends 3990 * on the time when the dmu_tx transaction is assigned in 3991 * zil_lwb_write_issue(). 3992 */ 3993 mutex_enter(&zilog->zl_lwb_io_lock); 3994 txg = MAX(zilog->zl_lwb_max_issued_txg, txg); 3995 mutex_exit(&zilog->zl_lwb_io_lock); 3996 3997 /* 3998 * We need to use txg_wait_synced() to wait until that txg is synced. 3999 * zil_sync() will guarantee all lwbs up to that txg have been 4000 * written out, flushed, and cleaned. 4001 */ 4002 if (txg != 0) 4003 txg_wait_synced(zilog->zl_dmu_pool, txg); 4004 4005 if (zilog_is_dirty(zilog)) 4006 zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog, 4007 (u_longlong_t)txg); 4008 if (txg < spa_freeze_txg(zilog->zl_spa)) 4009 VERIFY(!zilog_is_dirty(zilog)); 4010 4011 zilog->zl_get_data = NULL; 4012 4013 /* 4014 * We should have only one lwb left on the list; remove it now. 4015 */ 4016 mutex_enter(&zilog->zl_lock); 4017 lwb = list_remove_head(&zilog->zl_lwb_list); 4018 if (lwb != NULL) { 4019 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 4020 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_NEW); 4021 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 4022 zil_free_lwb(zilog, lwb); 4023 } 4024 mutex_exit(&zilog->zl_lock); 4025 } 4026 4027 static const char *suspend_tag = "zil suspending"; 4028 4029 /* 4030 * Suspend an intent log. While in suspended mode, we still honor 4031 * synchronous semantics, but we rely on txg_wait_synced() to do it. 4032 * On old version pools, we suspend the log briefly when taking a 4033 * snapshot so that it will have an empty intent log. 4034 * 4035 * Long holds are not really intended to be used the way we do here -- 4036 * held for such a short time. A concurrent caller of dsl_dataset_long_held() 4037 * could fail. Therefore we take pains to only put a long hold if it is 4038 * actually necessary. Fortunately, it will only be necessary if the 4039 * objset is currently mounted (or the ZVOL equivalent). In that case it 4040 * will already have a long hold, so we are not really making things any worse. 4041 * 4042 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 4043 * zvol_state_t), and use their mechanism to prevent their hold from being 4044 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 4045 * very little gain. 4046 * 4047 * if cookiep == NULL, this does both the suspend & resume. 4048 * Otherwise, it returns with the dataset "long held", and the cookie 4049 * should be passed into zil_resume(). 4050 */ 4051 int 4052 zil_suspend(const char *osname, void **cookiep) 4053 { 4054 objset_t *os; 4055 zilog_t *zilog; 4056 const zil_header_t *zh; 4057 int error; 4058 4059 error = dmu_objset_hold(osname, suspend_tag, &os); 4060 if (error != 0) 4061 return (error); 4062 zilog = dmu_objset_zil(os); 4063 4064 mutex_enter(&zilog->zl_lock); 4065 zh = zilog->zl_header; 4066 4067 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 4068 mutex_exit(&zilog->zl_lock); 4069 dmu_objset_rele(os, suspend_tag); 4070 return (SET_ERROR(EBUSY)); 4071 } 4072 4073 /* 4074 * Don't put a long hold in the cases where we can avoid it. This 4075 * is when there is no cookie so we are doing a suspend & resume 4076 * (i.e. called from zil_vdev_offline()), and there's nothing to do 4077 * for the suspend because it's already suspended, or there's no ZIL. 4078 */ 4079 if (cookiep == NULL && !zilog->zl_suspending && 4080 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 4081 mutex_exit(&zilog->zl_lock); 4082 dmu_objset_rele(os, suspend_tag); 4083 return (0); 4084 } 4085 4086 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 4087 dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 4088 4089 zilog->zl_suspend++; 4090 4091 if (zilog->zl_suspend > 1) { 4092 /* 4093 * Someone else is already suspending it. 4094 * Just wait for them to finish. 4095 */ 4096 4097 while (zilog->zl_suspending) 4098 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 4099 mutex_exit(&zilog->zl_lock); 4100 4101 if (cookiep == NULL) 4102 zil_resume(os); 4103 else 4104 *cookiep = os; 4105 return (0); 4106 } 4107 4108 /* 4109 * If there is no pointer to an on-disk block, this ZIL must not 4110 * be active (e.g. filesystem not mounted), so there's nothing 4111 * to clean up. 4112 */ 4113 if (BP_IS_HOLE(&zh->zh_log)) { 4114 ASSERT(cookiep != NULL); /* fast path already handled */ 4115 4116 *cookiep = os; 4117 mutex_exit(&zilog->zl_lock); 4118 return (0); 4119 } 4120 4121 /* 4122 * The ZIL has work to do. Ensure that the associated encryption 4123 * key will remain mapped while we are committing the log by 4124 * grabbing a reference to it. If the key isn't loaded we have no 4125 * choice but to return an error until the wrapping key is loaded. 4126 */ 4127 if (os->os_encrypted && 4128 dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) { 4129 zilog->zl_suspend--; 4130 mutex_exit(&zilog->zl_lock); 4131 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 4132 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 4133 return (SET_ERROR(EACCES)); 4134 } 4135 4136 zilog->zl_suspending = B_TRUE; 4137 mutex_exit(&zilog->zl_lock); 4138 4139 /* 4140 * We need to use zil_commit_impl to ensure we wait for all 4141 * LWB_STATE_OPENED, _CLOSED and _READY lwbs to be committed 4142 * to disk before proceeding. If we used zil_commit instead, it 4143 * would just call txg_wait_synced(), because zl_suspend is set. 4144 * txg_wait_synced() doesn't wait for these lwb's to be 4145 * LWB_STATE_FLUSH_DONE before returning. 4146 */ 4147 zil_commit_impl(zilog, 0); 4148 4149 /* 4150 * Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we 4151 * use txg_wait_synced() to ensure the data from the zilog has 4152 * migrated to the main pool before calling zil_destroy(). 4153 */ 4154 txg_wait_synced(zilog->zl_dmu_pool, 0); 4155 4156 zil_destroy(zilog, B_FALSE); 4157 4158 mutex_enter(&zilog->zl_lock); 4159 zilog->zl_suspending = B_FALSE; 4160 cv_broadcast(&zilog->zl_cv_suspend); 4161 mutex_exit(&zilog->zl_lock); 4162 4163 if (os->os_encrypted) 4164 dsl_dataset_remove_key_mapping(dmu_objset_ds(os)); 4165 4166 if (cookiep == NULL) 4167 zil_resume(os); 4168 else 4169 *cookiep = os; 4170 return (0); 4171 } 4172 4173 void 4174 zil_resume(void *cookie) 4175 { 4176 objset_t *os = cookie; 4177 zilog_t *zilog = dmu_objset_zil(os); 4178 4179 mutex_enter(&zilog->zl_lock); 4180 ASSERT(zilog->zl_suspend != 0); 4181 zilog->zl_suspend--; 4182 mutex_exit(&zilog->zl_lock); 4183 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 4184 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 4185 } 4186 4187 typedef struct zil_replay_arg { 4188 zil_replay_func_t *const *zr_replay; 4189 void *zr_arg; 4190 boolean_t zr_byteswap; 4191 char *zr_lr; 4192 } zil_replay_arg_t; 4193 4194 static int 4195 zil_replay_error(zilog_t *zilog, const lr_t *lr, int error) 4196 { 4197 char name[ZFS_MAX_DATASET_NAME_LEN]; 4198 4199 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 4200 4201 dmu_objset_name(zilog->zl_os, name); 4202 4203 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 4204 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 4205 (u_longlong_t)lr->lrc_seq, 4206 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 4207 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 4208 4209 return (error); 4210 } 4211 4212 static int 4213 zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra, 4214 uint64_t claim_txg) 4215 { 4216 zil_replay_arg_t *zr = zra; 4217 const zil_header_t *zh = zilog->zl_header; 4218 uint64_t reclen = lr->lrc_reclen; 4219 uint64_t txtype = lr->lrc_txtype; 4220 int error = 0; 4221 4222 zilog->zl_replaying_seq = lr->lrc_seq; 4223 4224 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 4225 return (0); 4226 4227 if (lr->lrc_txg < claim_txg) /* already committed */ 4228 return (0); 4229 4230 /* Strip case-insensitive bit, still present in log record */ 4231 txtype &= ~TX_CI; 4232 4233 if (txtype == 0 || txtype >= TX_MAX_TYPE) 4234 return (zil_replay_error(zilog, lr, EINVAL)); 4235 4236 /* 4237 * If this record type can be logged out of order, the object 4238 * (lr_foid) may no longer exist. That's legitimate, not an error. 4239 */ 4240 if (TX_OOO(txtype)) { 4241 error = dmu_object_info(zilog->zl_os, 4242 LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL); 4243 if (error == ENOENT || error == EEXIST) 4244 return (0); 4245 } 4246 4247 /* 4248 * Make a copy of the data so we can revise and extend it. 4249 */ 4250 memcpy(zr->zr_lr, lr, reclen); 4251 4252 /* 4253 * If this is a TX_WRITE with a blkptr, suck in the data. 4254 */ 4255 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 4256 error = zil_read_log_data(zilog, (lr_write_t *)lr, 4257 zr->zr_lr + reclen); 4258 if (error != 0) 4259 return (zil_replay_error(zilog, lr, error)); 4260 } 4261 4262 /* 4263 * The log block containing this lr may have been byteswapped 4264 * so that we can easily examine common fields like lrc_txtype. 4265 * However, the log is a mix of different record types, and only the 4266 * replay vectors know how to byteswap their records. Therefore, if 4267 * the lr was byteswapped, undo it before invoking the replay vector. 4268 */ 4269 if (zr->zr_byteswap) 4270 byteswap_uint64_array(zr->zr_lr, reclen); 4271 4272 /* 4273 * We must now do two things atomically: replay this log record, 4274 * and update the log header sequence number to reflect the fact that 4275 * we did so. At the end of each replay function the sequence number 4276 * is updated if we are in replay mode. 4277 */ 4278 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 4279 if (error != 0) { 4280 /* 4281 * The DMU's dnode layer doesn't see removes until the txg 4282 * commits, so a subsequent claim can spuriously fail with 4283 * EEXIST. So if we receive any error we try syncing out 4284 * any removes then retry the transaction. Note that we 4285 * specify B_FALSE for byteswap now, so we don't do it twice. 4286 */ 4287 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 4288 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 4289 if (error != 0) 4290 return (zil_replay_error(zilog, lr, error)); 4291 } 4292 return (0); 4293 } 4294 4295 static int 4296 zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) 4297 { 4298 (void) bp, (void) arg, (void) claim_txg; 4299 4300 zilog->zl_replay_blks++; 4301 4302 return (0); 4303 } 4304 4305 /* 4306 * If this dataset has a non-empty intent log, replay it and destroy it. 4307 * Return B_TRUE if there were any entries to replay. 4308 */ 4309 boolean_t 4310 zil_replay(objset_t *os, void *arg, 4311 zil_replay_func_t *const replay_func[TX_MAX_TYPE]) 4312 { 4313 zilog_t *zilog = dmu_objset_zil(os); 4314 const zil_header_t *zh = zilog->zl_header; 4315 zil_replay_arg_t zr; 4316 4317 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 4318 return (zil_destroy(zilog, B_TRUE)); 4319 } 4320 4321 zr.zr_replay = replay_func; 4322 zr.zr_arg = arg; 4323 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 4324 zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 4325 4326 /* 4327 * Wait for in-progress removes to sync before starting replay. 4328 */ 4329 txg_wait_synced(zilog->zl_dmu_pool, 0); 4330 4331 zilog->zl_replay = B_TRUE; 4332 zilog->zl_replay_time = ddi_get_lbolt(); 4333 ASSERT(zilog->zl_replay_blks == 0); 4334 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 4335 zh->zh_claim_txg, B_TRUE); 4336 vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 4337 4338 zil_destroy(zilog, B_FALSE); 4339 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 4340 zilog->zl_replay = B_FALSE; 4341 4342 return (B_TRUE); 4343 } 4344 4345 boolean_t 4346 zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 4347 { 4348 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 4349 return (B_TRUE); 4350 4351 if (zilog->zl_replay) { 4352 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 4353 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 4354 zilog->zl_replaying_seq; 4355 return (B_TRUE); 4356 } 4357 4358 return (B_FALSE); 4359 } 4360 4361 int 4362 zil_reset(const char *osname, void *arg) 4363 { 4364 (void) arg; 4365 4366 int error = zil_suspend(osname, NULL); 4367 /* EACCES means crypto key not loaded */ 4368 if ((error == EACCES) || (error == EBUSY)) 4369 return (SET_ERROR(error)); 4370 if (error != 0) 4371 return (SET_ERROR(EEXIST)); 4372 return (0); 4373 } 4374 4375 EXPORT_SYMBOL(zil_alloc); 4376 EXPORT_SYMBOL(zil_free); 4377 EXPORT_SYMBOL(zil_open); 4378 EXPORT_SYMBOL(zil_close); 4379 EXPORT_SYMBOL(zil_replay); 4380 EXPORT_SYMBOL(zil_replaying); 4381 EXPORT_SYMBOL(zil_destroy); 4382 EXPORT_SYMBOL(zil_destroy_sync); 4383 EXPORT_SYMBOL(zil_itx_create); 4384 EXPORT_SYMBOL(zil_itx_destroy); 4385 EXPORT_SYMBOL(zil_itx_assign); 4386 EXPORT_SYMBOL(zil_commit); 4387 EXPORT_SYMBOL(zil_claim); 4388 EXPORT_SYMBOL(zil_check_log_chain); 4389 EXPORT_SYMBOL(zil_sync); 4390 EXPORT_SYMBOL(zil_clean); 4391 EXPORT_SYMBOL(zil_suspend); 4392 EXPORT_SYMBOL(zil_resume); 4393 EXPORT_SYMBOL(zil_lwb_add_block); 4394 EXPORT_SYMBOL(zil_bp_tree_add); 4395 EXPORT_SYMBOL(zil_set_sync); 4396 EXPORT_SYMBOL(zil_set_logbias); 4397 EXPORT_SYMBOL(zil_sums_init); 4398 EXPORT_SYMBOL(zil_sums_fini); 4399 EXPORT_SYMBOL(zil_kstat_values_update); 4400 4401 ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW, 4402 "ZIL block open timeout percentage"); 4403 4404 ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW, 4405 "Disable intent logging replay"); 4406 4407 ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW, 4408 "Disable ZIL cache flushes"); 4409 4410 ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW, 4411 "Limit in bytes slog sync writes per commit"); 4412 4413 ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW, 4414 "Limit in bytes of ZIL log block size"); 4415 4416 ZFS_MODULE_PARAM(zfs_zil, zil_, maxcopied, UINT, ZMOD_RW, 4417 "Limit in bytes WR_COPIED size"); 4418