1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Integros [integros.com] 25 */ 26 27 /* Portions Copyright 2010 Robert Milkowski */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/dmu.h> 32 #include <sys/zap.h> 33 #include <sys/arc.h> 34 #include <sys/stat.h> 35 #include <sys/resource.h> 36 #include <sys/zil.h> 37 #include <sys/zil_impl.h> 38 #include <sys/dsl_dataset.h> 39 #include <sys/vdev_impl.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/dsl_pool.h> 42 #include <sys/abd.h> 43 44 /* 45 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system 46 * calls that change the file system. Each itx has enough information to 47 * be able to replay them after a system crash, power loss, or 48 * equivalent failure mode. These are stored in memory until either: 49 * 50 * 1. they are committed to the pool by the DMU transaction group 51 * (txg), at which point they can be discarded; or 52 * 2. they are committed to the on-disk ZIL for the dataset being 53 * modified (e.g. due to an fsync, O_DSYNC, or other synchronous 54 * requirement). 55 * 56 * In the event of a crash or power loss, the itxs contained by each 57 * dataset's on-disk ZIL will be replayed when that dataset is first 58 * instantianted (e.g. if the dataset is a normal fileystem, when it is 59 * first mounted). 60 * 61 * As hinted at above, there is one ZIL per dataset (both the in-memory 62 * representation, and the on-disk representation). The on-disk format 63 * consists of 3 parts: 64 * 65 * - a single, per-dataset, ZIL header; which points to a chain of 66 * - zero or more ZIL blocks; each of which contains 67 * - zero or more ZIL records 68 * 69 * A ZIL record holds the information necessary to replay a single 70 * system call transaction. A ZIL block can hold many ZIL records, and 71 * the blocks are chained together, similarly to a singly linked list. 72 * 73 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL 74 * block in the chain, and the ZIL header points to the first block in 75 * the chain. 76 * 77 * Note, there is not a fixed place in the pool to hold these ZIL 78 * blocks; they are dynamically allocated and freed as needed from the 79 * blocks available on the pool, though they can be preferentially 80 * allocated from a dedicated "log" vdev. 81 */ 82 83 /* 84 * This controls the amount of time that a ZIL block (lwb) will remain 85 * "open" when it isn't "full", and it has a thread waiting for it to be 86 * committed to stable storage. Please refer to the zil_commit_waiter() 87 * function (and the comments within it) for more details. 88 */ 89 int zfs_commit_timeout_pct = 5; 90 91 /* 92 * Disable intent logging replay. This global ZIL switch affects all pools. 93 */ 94 int zil_replay_disable = 0; 95 96 /* 97 * Tunable parameter for debugging or performance analysis. Setting 98 * zfs_nocacheflush will cause corruption on power loss if a volatile 99 * out-of-order write cache is enabled. 100 */ 101 boolean_t zfs_nocacheflush = B_FALSE; 102 103 /* 104 * Limit SLOG write size per commit executed with synchronous priority. 105 * Any writes above that will be executed with lower (asynchronous) priority 106 * to limit potential SLOG device abuse by single active ZIL writer. 107 */ 108 uint64_t zil_slog_bulk = 768 * 1024; 109 110 static kmem_cache_t *zil_lwb_cache; 111 static kmem_cache_t *zil_zcw_cache; 112 113 static void zil_async_to_sync(zilog_t *zilog, uint64_t foid); 114 115 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 116 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 117 118 static int 119 zil_bp_compare(const void *x1, const void *x2) 120 { 121 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 122 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 123 124 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 125 return (-1); 126 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 127 return (1); 128 129 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 130 return (-1); 131 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 132 return (1); 133 134 return (0); 135 } 136 137 static void 138 zil_bp_tree_init(zilog_t *zilog) 139 { 140 avl_create(&zilog->zl_bp_tree, zil_bp_compare, 141 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 142 } 143 144 static void 145 zil_bp_tree_fini(zilog_t *zilog) 146 { 147 avl_tree_t *t = &zilog->zl_bp_tree; 148 zil_bp_node_t *zn; 149 void *cookie = NULL; 150 151 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 152 kmem_free(zn, sizeof (zil_bp_node_t)); 153 154 avl_destroy(t); 155 } 156 157 int 158 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 159 { 160 avl_tree_t *t = &zilog->zl_bp_tree; 161 const dva_t *dva; 162 zil_bp_node_t *zn; 163 avl_index_t where; 164 165 if (BP_IS_EMBEDDED(bp)) 166 return (0); 167 168 dva = BP_IDENTITY(bp); 169 170 if (avl_find(t, dva, &where) != NULL) 171 return (SET_ERROR(EEXIST)); 172 173 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 174 zn->zn_dva = *dva; 175 avl_insert(t, zn, where); 176 177 return (0); 178 } 179 180 static zil_header_t * 181 zil_header_in_syncing_context(zilog_t *zilog) 182 { 183 return ((zil_header_t *)zilog->zl_header); 184 } 185 186 static void 187 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 188 { 189 zio_cksum_t *zc = &bp->blk_cksum; 190 191 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 192 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 193 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 194 zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 195 } 196 197 /* 198 * Read a log block and make sure it's valid. 199 */ 200 static int 201 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 202 char **end) 203 { 204 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 205 arc_flags_t aflags = ARC_FLAG_WAIT; 206 arc_buf_t *abuf = NULL; 207 zbookmark_phys_t zb; 208 int error; 209 210 if (zilog->zl_header->zh_claim_txg == 0) 211 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 212 213 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 214 zio_flags |= ZIO_FLAG_SPECULATIVE; 215 216 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 217 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 218 219 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 220 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 221 222 if (error == 0) { 223 zio_cksum_t cksum = bp->blk_cksum; 224 225 /* 226 * Validate the checksummed log block. 227 * 228 * Sequence numbers should be... sequential. The checksum 229 * verifier for the next block should be bp's checksum plus 1. 230 * 231 * Also check the log chain linkage and size used. 232 */ 233 cksum.zc_word[ZIL_ZC_SEQ]++; 234 235 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 236 zil_chain_t *zilc = abuf->b_data; 237 char *lr = (char *)(zilc + 1); 238 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 239 240 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 241 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 242 error = SET_ERROR(ECKSUM); 243 } else { 244 ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); 245 bcopy(lr, dst, len); 246 *end = (char *)dst + len; 247 *nbp = zilc->zc_next_blk; 248 } 249 } else { 250 char *lr = abuf->b_data; 251 uint64_t size = BP_GET_LSIZE(bp); 252 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 253 254 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 255 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 256 (zilc->zc_nused > (size - sizeof (*zilc)))) { 257 error = SET_ERROR(ECKSUM); 258 } else { 259 ASSERT3U(zilc->zc_nused, <=, 260 SPA_OLD_MAXBLOCKSIZE); 261 bcopy(lr, dst, zilc->zc_nused); 262 *end = (char *)dst + zilc->zc_nused; 263 *nbp = zilc->zc_next_blk; 264 } 265 } 266 267 arc_buf_destroy(abuf, &abuf); 268 } 269 270 return (error); 271 } 272 273 /* 274 * Read a TX_WRITE log data block. 275 */ 276 static int 277 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 278 { 279 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 280 const blkptr_t *bp = &lr->lr_blkptr; 281 arc_flags_t aflags = ARC_FLAG_WAIT; 282 arc_buf_t *abuf = NULL; 283 zbookmark_phys_t zb; 284 int error; 285 286 if (BP_IS_HOLE(bp)) { 287 if (wbuf != NULL) 288 bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 289 return (0); 290 } 291 292 if (zilog->zl_header->zh_claim_txg == 0) 293 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 294 295 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 296 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 297 298 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 299 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 300 301 if (error == 0) { 302 if (wbuf != NULL) 303 bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 304 arc_buf_destroy(abuf, &abuf); 305 } 306 307 return (error); 308 } 309 310 /* 311 * Parse the intent log, and call parse_func for each valid record within. 312 */ 313 int 314 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 315 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 316 { 317 const zil_header_t *zh = zilog->zl_header; 318 boolean_t claimed = !!zh->zh_claim_txg; 319 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 320 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 321 uint64_t max_blk_seq = 0; 322 uint64_t max_lr_seq = 0; 323 uint64_t blk_count = 0; 324 uint64_t lr_count = 0; 325 blkptr_t blk, next_blk; 326 char *lrbuf, *lrp; 327 int error = 0; 328 329 /* 330 * Old logs didn't record the maximum zh_claim_lr_seq. 331 */ 332 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 333 claim_lr_seq = UINT64_MAX; 334 335 /* 336 * Starting at the block pointed to by zh_log we read the log chain. 337 * For each block in the chain we strongly check that block to 338 * ensure its validity. We stop when an invalid block is found. 339 * For each block pointer in the chain we call parse_blk_func(). 340 * For each record in each valid block we call parse_lr_func(). 341 * If the log has been claimed, stop if we encounter a sequence 342 * number greater than the highest claimed sequence number. 343 */ 344 lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); 345 zil_bp_tree_init(zilog); 346 347 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 348 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 349 int reclen; 350 char *end; 351 352 if (blk_seq > claim_blk_seq) 353 break; 354 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 355 break; 356 ASSERT3U(max_blk_seq, <, blk_seq); 357 max_blk_seq = blk_seq; 358 blk_count++; 359 360 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 361 break; 362 363 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 364 if (error != 0) 365 break; 366 367 for (lrp = lrbuf; lrp < end; lrp += reclen) { 368 lr_t *lr = (lr_t *)lrp; 369 reclen = lr->lrc_reclen; 370 ASSERT3U(reclen, >=, sizeof (lr_t)); 371 if (lr->lrc_seq > claim_lr_seq) 372 goto done; 373 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 374 goto done; 375 ASSERT3U(max_lr_seq, <, lr->lrc_seq); 376 max_lr_seq = lr->lrc_seq; 377 lr_count++; 378 } 379 } 380 done: 381 zilog->zl_parse_error = error; 382 zilog->zl_parse_blk_seq = max_blk_seq; 383 zilog->zl_parse_lr_seq = max_lr_seq; 384 zilog->zl_parse_blk_count = blk_count; 385 zilog->zl_parse_lr_count = lr_count; 386 387 ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 388 (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 389 390 zil_bp_tree_fini(zilog); 391 zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); 392 393 return (error); 394 } 395 396 static int 397 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 398 { 399 /* 400 * Claim log block if not already committed and not already claimed. 401 * If tx == NULL, just verify that the block is claimable. 402 */ 403 if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || 404 zil_bp_tree_add(zilog, bp) != 0) 405 return (0); 406 407 return (zio_wait(zio_claim(NULL, zilog->zl_spa, 408 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 409 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 410 } 411 412 static int 413 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 414 { 415 lr_write_t *lr = (lr_write_t *)lrc; 416 int error; 417 418 if (lrc->lrc_txtype != TX_WRITE) 419 return (0); 420 421 /* 422 * If the block is not readable, don't claim it. This can happen 423 * in normal operation when a log block is written to disk before 424 * some of the dmu_sync() blocks it points to. In this case, the 425 * transaction cannot have been committed to anyone (we would have 426 * waited for all writes to be stable first), so it is semantically 427 * correct to declare this the end of the log. 428 */ 429 if (lr->lr_blkptr.blk_birth >= first_txg && 430 (error = zil_read_log_data(zilog, lr, NULL)) != 0) 431 return (error); 432 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 433 } 434 435 /* ARGSUSED */ 436 static int 437 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 438 { 439 zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 440 441 return (0); 442 } 443 444 static int 445 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 446 { 447 lr_write_t *lr = (lr_write_t *)lrc; 448 blkptr_t *bp = &lr->lr_blkptr; 449 450 /* 451 * If we previously claimed it, we need to free it. 452 */ 453 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 454 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && 455 !BP_IS_HOLE(bp)) 456 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 457 458 return (0); 459 } 460 461 static int 462 zil_lwb_vdev_compare(const void *x1, const void *x2) 463 { 464 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 465 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 466 467 if (v1 < v2) 468 return (-1); 469 if (v1 > v2) 470 return (1); 471 472 return (0); 473 } 474 475 static lwb_t * 476 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg) 477 { 478 lwb_t *lwb; 479 480 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 481 lwb->lwb_zilog = zilog; 482 lwb->lwb_blk = *bp; 483 lwb->lwb_slog = slog; 484 lwb->lwb_state = LWB_STATE_CLOSED; 485 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 486 lwb->lwb_max_txg = txg; 487 lwb->lwb_write_zio = NULL; 488 lwb->lwb_root_zio = NULL; 489 lwb->lwb_tx = NULL; 490 lwb->lwb_issued_timestamp = 0; 491 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 492 lwb->lwb_nused = sizeof (zil_chain_t); 493 lwb->lwb_sz = BP_GET_LSIZE(bp); 494 } else { 495 lwb->lwb_nused = 0; 496 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 497 } 498 499 mutex_enter(&zilog->zl_lock); 500 list_insert_tail(&zilog->zl_lwb_list, lwb); 501 mutex_exit(&zilog->zl_lock); 502 503 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 504 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 505 ASSERT(list_is_empty(&lwb->lwb_waiters)); 506 507 return (lwb); 508 } 509 510 static void 511 zil_free_lwb(zilog_t *zilog, lwb_t *lwb) 512 { 513 ASSERT(MUTEX_HELD(&zilog->zl_lock)); 514 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); 515 ASSERT(list_is_empty(&lwb->lwb_waiters)); 516 517 if (lwb->lwb_state == LWB_STATE_OPENED) { 518 avl_tree_t *t = &lwb->lwb_vdev_tree; 519 void *cookie = NULL; 520 zil_vdev_node_t *zv; 521 522 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) 523 kmem_free(zv, sizeof (*zv)); 524 525 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 526 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 527 528 zio_cancel(lwb->lwb_root_zio); 529 zio_cancel(lwb->lwb_write_zio); 530 531 lwb->lwb_root_zio = NULL; 532 lwb->lwb_write_zio = NULL; 533 } else { 534 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 535 } 536 537 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); 538 ASSERT3P(lwb->lwb_write_zio, ==, NULL); 539 ASSERT3P(lwb->lwb_root_zio, ==, NULL); 540 541 /* 542 * Clear the zilog's field to indicate this lwb is no longer 543 * valid, and prevent use-after-free errors. 544 */ 545 if (zilog->zl_last_lwb_opened == lwb) 546 zilog->zl_last_lwb_opened = NULL; 547 548 kmem_cache_free(zil_lwb_cache, lwb); 549 } 550 551 /* 552 * Called when we create in-memory log transactions so that we know 553 * to cleanup the itxs at the end of spa_sync(). 554 */ 555 void 556 zilog_dirty(zilog_t *zilog, uint64_t txg) 557 { 558 dsl_pool_t *dp = zilog->zl_dmu_pool; 559 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 560 561 ASSERT(spa_writeable(zilog->zl_spa)); 562 563 if (ds->ds_is_snapshot) 564 panic("dirtying snapshot!"); 565 566 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 567 /* up the hold count until we can be written out */ 568 dmu_buf_add_ref(ds->ds_dbuf, zilog); 569 570 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); 571 } 572 } 573 574 /* 575 * Determine if the zil is dirty in the specified txg. Callers wanting to 576 * ensure that the dirty state does not change must hold the itxg_lock for 577 * the specified txg. Holding the lock will ensure that the zil cannot be 578 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current 579 * state. 580 */ 581 boolean_t 582 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) 583 { 584 dsl_pool_t *dp = zilog->zl_dmu_pool; 585 586 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) 587 return (B_TRUE); 588 return (B_FALSE); 589 } 590 591 /* 592 * Determine if the zil is dirty. The zil is considered dirty if it has 593 * any pending itx records that have not been cleaned by zil_clean(). 594 */ 595 boolean_t 596 zilog_is_dirty(zilog_t *zilog) 597 { 598 dsl_pool_t *dp = zilog->zl_dmu_pool; 599 600 for (int t = 0; t < TXG_SIZE; t++) { 601 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 602 return (B_TRUE); 603 } 604 return (B_FALSE); 605 } 606 607 /* 608 * Create an on-disk intent log. 609 */ 610 static lwb_t * 611 zil_create(zilog_t *zilog) 612 { 613 const zil_header_t *zh = zilog->zl_header; 614 lwb_t *lwb = NULL; 615 uint64_t txg = 0; 616 dmu_tx_t *tx = NULL; 617 blkptr_t blk; 618 int error = 0; 619 boolean_t slog = FALSE; 620 621 /* 622 * Wait for any previous destroy to complete. 623 */ 624 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 625 626 ASSERT(zh->zh_claim_txg == 0); 627 ASSERT(zh->zh_replay_seq == 0); 628 629 blk = zh->zh_log; 630 631 /* 632 * Allocate an initial log block if: 633 * - there isn't one already 634 * - the existing block is the wrong endianess 635 */ 636 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 637 tx = dmu_tx_create(zilog->zl_os); 638 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 639 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 640 txg = dmu_tx_get_txg(tx); 641 642 if (!BP_IS_HOLE(&blk)) { 643 zio_free_zil(zilog->zl_spa, txg, &blk); 644 BP_ZERO(&blk); 645 } 646 647 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL, 648 ZIL_MIN_BLKSZ, &slog); 649 650 if (error == 0) 651 zil_init_log_chain(zilog, &blk); 652 } 653 654 /* 655 * Allocate a log write block (lwb) for the first log block. 656 */ 657 if (error == 0) 658 lwb = zil_alloc_lwb(zilog, &blk, slog, txg); 659 660 /* 661 * If we just allocated the first log block, commit our transaction 662 * and wait for zil_sync() to stuff the block poiner into zh_log. 663 * (zh is part of the MOS, so we cannot modify it in open context.) 664 */ 665 if (tx != NULL) { 666 dmu_tx_commit(tx); 667 txg_wait_synced(zilog->zl_dmu_pool, txg); 668 } 669 670 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 671 672 return (lwb); 673 } 674 675 /* 676 * In one tx, free all log blocks and clear the log header. If keep_first 677 * is set, then we're replaying a log with no content. We want to keep the 678 * first block, however, so that the first synchronous transaction doesn't 679 * require a txg_wait_synced() in zil_create(). We don't need to 680 * txg_wait_synced() here either when keep_first is set, because both 681 * zil_create() and zil_destroy() will wait for any in-progress destroys 682 * to complete. 683 */ 684 void 685 zil_destroy(zilog_t *zilog, boolean_t keep_first) 686 { 687 const zil_header_t *zh = zilog->zl_header; 688 lwb_t *lwb; 689 dmu_tx_t *tx; 690 uint64_t txg; 691 692 /* 693 * Wait for any previous destroy to complete. 694 */ 695 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 696 697 zilog->zl_old_header = *zh; /* debugging aid */ 698 699 if (BP_IS_HOLE(&zh->zh_log)) 700 return; 701 702 tx = dmu_tx_create(zilog->zl_os); 703 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 704 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 705 txg = dmu_tx_get_txg(tx); 706 707 mutex_enter(&zilog->zl_lock); 708 709 ASSERT3U(zilog->zl_destroy_txg, <, txg); 710 zilog->zl_destroy_txg = txg; 711 zilog->zl_keep_first = keep_first; 712 713 if (!list_is_empty(&zilog->zl_lwb_list)) { 714 ASSERT(zh->zh_claim_txg == 0); 715 VERIFY(!keep_first); 716 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 717 list_remove(&zilog->zl_lwb_list, lwb); 718 if (lwb->lwb_buf != NULL) 719 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 720 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); 721 zil_free_lwb(zilog, lwb); 722 } 723 } else if (!keep_first) { 724 zil_destroy_sync(zilog, tx); 725 } 726 mutex_exit(&zilog->zl_lock); 727 728 dmu_tx_commit(tx); 729 } 730 731 void 732 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 733 { 734 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 735 (void) zil_parse(zilog, zil_free_log_block, 736 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg); 737 } 738 739 int 740 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 741 { 742 dmu_tx_t *tx = txarg; 743 uint64_t first_txg = dmu_tx_get_txg(tx); 744 zilog_t *zilog; 745 zil_header_t *zh; 746 objset_t *os; 747 int error; 748 749 error = dmu_objset_own_obj(dp, ds->ds_object, 750 DMU_OST_ANY, B_FALSE, FTAG, &os); 751 if (error != 0) { 752 /* 753 * EBUSY indicates that the objset is inconsistent, in which 754 * case it can not have a ZIL. 755 */ 756 if (error != EBUSY) { 757 cmn_err(CE_WARN, "can't open objset for %llu, error %u", 758 (unsigned long long)ds->ds_object, error); 759 } 760 return (0); 761 } 762 763 zilog = dmu_objset_zil(os); 764 zh = zil_header_in_syncing_context(zilog); 765 766 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { 767 if (!BP_IS_HOLE(&zh->zh_log)) 768 zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); 769 BP_ZERO(&zh->zh_log); 770 dsl_dataset_dirty(dmu_objset_ds(os), tx); 771 dmu_objset_disown(os, FTAG); 772 return (0); 773 } 774 775 /* 776 * Claim all log blocks if we haven't already done so, and remember 777 * the highest claimed sequence number. This ensures that if we can 778 * read only part of the log now (e.g. due to a missing device), 779 * but we can read the entire log later, we will not try to replay 780 * or destroy beyond the last block we successfully claimed. 781 */ 782 ASSERT3U(zh->zh_claim_txg, <=, first_txg); 783 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 784 (void) zil_parse(zilog, zil_claim_log_block, 785 zil_claim_log_record, tx, first_txg); 786 zh->zh_claim_txg = first_txg; 787 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 788 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 789 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 790 zh->zh_flags |= ZIL_REPLAY_NEEDED; 791 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 792 dsl_dataset_dirty(dmu_objset_ds(os), tx); 793 } 794 795 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 796 dmu_objset_disown(os, FTAG); 797 return (0); 798 } 799 800 /* 801 * Check the log by walking the log chain. 802 * Checksum errors are ok as they indicate the end of the chain. 803 * Any other error (no device or read failure) returns an error. 804 */ 805 /* ARGSUSED */ 806 int 807 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 808 { 809 zilog_t *zilog; 810 objset_t *os; 811 blkptr_t *bp; 812 int error; 813 814 ASSERT(tx == NULL); 815 816 error = dmu_objset_from_ds(ds, &os); 817 if (error != 0) { 818 cmn_err(CE_WARN, "can't open objset %llu, error %d", 819 (unsigned long long)ds->ds_object, error); 820 return (0); 821 } 822 823 zilog = dmu_objset_zil(os); 824 bp = (blkptr_t *)&zilog->zl_header->zh_log; 825 826 /* 827 * Check the first block and determine if it's on a log device 828 * which may have been removed or faulted prior to loading this 829 * pool. If so, there's no point in checking the rest of the log 830 * as its content should have already been synced to the pool. 831 */ 832 if (!BP_IS_HOLE(bp)) { 833 vdev_t *vd; 834 boolean_t valid = B_TRUE; 835 836 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 837 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 838 if (vd->vdev_islog && vdev_is_dead(vd)) 839 valid = vdev_log_state_valid(vd); 840 spa_config_exit(os->os_spa, SCL_STATE, FTAG); 841 842 if (!valid) 843 return (0); 844 } 845 846 /* 847 * Because tx == NULL, zil_claim_log_block() will not actually claim 848 * any blocks, but just determine whether it is possible to do so. 849 * In addition to checking the log chain, zil_claim_log_block() 850 * will invoke zio_claim() with a done func of spa_claim_notify(), 851 * which will update spa_max_claim_txg. See spa_load() for details. 852 */ 853 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 854 zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa)); 855 856 return ((error == ECKSUM || error == ENOENT) ? 0 : error); 857 } 858 859 /* 860 * When an itx is "skipped", this function is used to properly mark the 861 * waiter as "done, and signal any thread(s) waiting on it. An itx can 862 * be skipped (and not committed to an lwb) for a variety of reasons, 863 * one of them being that the itx was committed via spa_sync(), prior to 864 * it being committed to an lwb; this can happen if a thread calling 865 * zil_commit() is racing with spa_sync(). 866 */ 867 static void 868 zil_commit_waiter_skip(zil_commit_waiter_t *zcw) 869 { 870 mutex_enter(&zcw->zcw_lock); 871 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 872 zcw->zcw_done = B_TRUE; 873 cv_broadcast(&zcw->zcw_cv); 874 mutex_exit(&zcw->zcw_lock); 875 } 876 877 /* 878 * This function is used when the given waiter is to be linked into an 879 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. 880 * At this point, the waiter will no longer be referenced by the itx, 881 * and instead, will be referenced by the lwb. 882 */ 883 static void 884 zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) 885 { 886 mutex_enter(&zcw->zcw_lock); 887 ASSERT(!list_link_active(&zcw->zcw_node)); 888 ASSERT3P(zcw->zcw_lwb, ==, NULL); 889 ASSERT3P(lwb, !=, NULL); 890 ASSERT(lwb->lwb_state == LWB_STATE_OPENED || 891 lwb->lwb_state == LWB_STATE_ISSUED); 892 893 list_insert_tail(&lwb->lwb_waiters, zcw); 894 zcw->zcw_lwb = lwb; 895 mutex_exit(&zcw->zcw_lock); 896 } 897 898 /* 899 * This function is used when zio_alloc_zil() fails to allocate a ZIL 900 * block, and the given waiter must be linked to the "nolwb waiters" 901 * list inside of zil_process_commit_list(). 902 */ 903 static void 904 zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) 905 { 906 mutex_enter(&zcw->zcw_lock); 907 ASSERT(!list_link_active(&zcw->zcw_node)); 908 ASSERT3P(zcw->zcw_lwb, ==, NULL); 909 list_insert_tail(nolwb, zcw); 910 mutex_exit(&zcw->zcw_lock); 911 } 912 913 void 914 zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) 915 { 916 avl_tree_t *t = &lwb->lwb_vdev_tree; 917 avl_index_t where; 918 zil_vdev_node_t *zv, zvsearch; 919 int ndvas = BP_GET_NDVAS(bp); 920 int i; 921 922 if (zfs_nocacheflush) 923 return; 924 925 mutex_enter(&lwb->lwb_vdev_lock); 926 for (i = 0; i < ndvas; i++) { 927 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 928 if (avl_find(t, &zvsearch, &where) == NULL) { 929 zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 930 zv->zv_vdev = zvsearch.zv_vdev; 931 avl_insert(t, zv, where); 932 } 933 } 934 mutex_exit(&lwb->lwb_vdev_lock); 935 } 936 937 void 938 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) 939 { 940 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 941 } 942 943 /* 944 * This function is a called after all VDEVs associated with a given lwb 945 * write have completed their DKIOCFLUSHWRITECACHE command; or as soon 946 * as the lwb write completes, if "zfs_nocacheflush" is set. 947 * 948 * The intention is for this function to be called as soon as the 949 * contents of an lwb are considered "stable" on disk, and will survive 950 * any sudden loss of power. At this point, any threads waiting for the 951 * lwb to reach this state are signalled, and the "waiter" structures 952 * are marked "done". 953 */ 954 static void 955 zil_lwb_flush_vdevs_done(zio_t *zio) 956 { 957 lwb_t *lwb = zio->io_private; 958 zilog_t *zilog = lwb->lwb_zilog; 959 dmu_tx_t *tx = lwb->lwb_tx; 960 zil_commit_waiter_t *zcw; 961 962 spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); 963 964 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 965 966 mutex_enter(&zilog->zl_lock); 967 968 /* 969 * Ensure the lwb buffer pointer is cleared before releasing the 970 * txg. If we have had an allocation failure and the txg is 971 * waiting to sync then we want zil_sync() to remove the lwb so 972 * that it's not picked up as the next new one in 973 * zil_process_commit_list(). zil_sync() will only remove the 974 * lwb if lwb_buf is null. 975 */ 976 lwb->lwb_buf = NULL; 977 lwb->lwb_tx = NULL; 978 979 ASSERT3U(lwb->lwb_issued_timestamp, >, 0); 980 zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp; 981 982 lwb->lwb_root_zio = NULL; 983 lwb->lwb_state = LWB_STATE_DONE; 984 985 if (zilog->zl_last_lwb_opened == lwb) { 986 /* 987 * Remember the highest committed log sequence number 988 * for ztest. We only update this value when all the log 989 * writes succeeded, because ztest wants to ASSERT that 990 * it got the whole log chain. 991 */ 992 zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 993 } 994 995 while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) { 996 mutex_enter(&zcw->zcw_lock); 997 998 ASSERT(list_link_active(&zcw->zcw_node)); 999 list_remove(&lwb->lwb_waiters, zcw); 1000 1001 ASSERT3P(zcw->zcw_lwb, ==, lwb); 1002 zcw->zcw_lwb = NULL; 1003 1004 zcw->zcw_zio_error = zio->io_error; 1005 1006 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 1007 zcw->zcw_done = B_TRUE; 1008 cv_broadcast(&zcw->zcw_cv); 1009 1010 mutex_exit(&zcw->zcw_lock); 1011 } 1012 1013 mutex_exit(&zilog->zl_lock); 1014 1015 /* 1016 * Now that we've written this log block, we have a stable pointer 1017 * to the next block in the chain, so it's OK to let the txg in 1018 * which we allocated the next block sync. 1019 */ 1020 dmu_tx_commit(tx); 1021 } 1022 1023 /* 1024 * This is called when an lwb write completes. This means, this specific 1025 * lwb was written to disk, and all dependent lwb have also been 1026 * written to disk. 1027 * 1028 * At this point, a DKIOCFLUSHWRITECACHE command hasn't been issued to 1029 * the VDEVs involved in writing out this specific lwb. The lwb will be 1030 * "done" once zil_lwb_flush_vdevs_done() is called, which occurs in the 1031 * zio completion callback for the lwb's root zio. 1032 */ 1033 static void 1034 zil_lwb_write_done(zio_t *zio) 1035 { 1036 lwb_t *lwb = zio->io_private; 1037 spa_t *spa = zio->io_spa; 1038 zilog_t *zilog = lwb->lwb_zilog; 1039 avl_tree_t *t = &lwb->lwb_vdev_tree; 1040 void *cookie = NULL; 1041 zil_vdev_node_t *zv; 1042 1043 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); 1044 1045 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1046 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 1047 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 1048 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 1049 ASSERT(!BP_IS_GANG(zio->io_bp)); 1050 ASSERT(!BP_IS_HOLE(zio->io_bp)); 1051 ASSERT(BP_GET_FILL(zio->io_bp) == 0); 1052 1053 abd_put(zio->io_abd); 1054 1055 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); 1056 1057 mutex_enter(&zilog->zl_lock); 1058 lwb->lwb_write_zio = NULL; 1059 mutex_exit(&zilog->zl_lock); 1060 1061 if (avl_numnodes(t) == 0) 1062 return; 1063 1064 /* 1065 * If there was an IO error, we're not going to call zio_flush() 1066 * on these vdevs, so we simply empty the tree and free the 1067 * nodes. We avoid calling zio_flush() since there isn't any 1068 * good reason for doing so, after the lwb block failed to be 1069 * written out. 1070 */ 1071 if (zio->io_error != 0) { 1072 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) 1073 kmem_free(zv, sizeof (*zv)); 1074 return; 1075 } 1076 1077 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 1078 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 1079 if (vd != NULL) 1080 zio_flush(lwb->lwb_root_zio, vd); 1081 kmem_free(zv, sizeof (*zv)); 1082 } 1083 } 1084 1085 /* 1086 * This function's purpose is to "open" an lwb such that it is ready to 1087 * accept new itxs being committed to it. To do this, the lwb's zio 1088 * structures are created, and linked to the lwb. This function is 1089 * idempotent; if the passed in lwb has already been opened, this 1090 * function is essentially a no-op. 1091 */ 1092 static void 1093 zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) 1094 { 1095 zbookmark_phys_t zb; 1096 zio_priority_t prio; 1097 1098 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock)); 1099 ASSERT3P(lwb, !=, NULL); 1100 EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED); 1101 EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED); 1102 1103 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1104 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 1105 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 1106 1107 if (lwb->lwb_root_zio == NULL) { 1108 abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, 1109 BP_GET_LSIZE(&lwb->lwb_blk)); 1110 1111 if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk) 1112 prio = ZIO_PRIORITY_SYNC_WRITE; 1113 else 1114 prio = ZIO_PRIORITY_ASYNC_WRITE; 1115 1116 lwb->lwb_root_zio = zio_root(zilog->zl_spa, 1117 zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL); 1118 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1119 1120 lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, 1121 zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd, 1122 BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb, 1123 prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 1124 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1125 1126 lwb->lwb_state = LWB_STATE_OPENED; 1127 1128 mutex_enter(&zilog->zl_lock); 1129 1130 /* 1131 * The zilog's "zl_last_lwb_opened" field is used to 1132 * build the lwb/zio dependency chain, which is used to 1133 * preserve the ordering of lwb completions that is 1134 * required by the semantics of the ZIL. Each new lwb 1135 * zio becomes a parent of the "previous" lwb zio, such 1136 * that the new lwb's zio cannot complete until the 1137 * "previous" lwb's zio completes. 1138 * 1139 * This is required by the semantics of zil_commit(); 1140 * the commit waiters attached to the lwbs will be woken 1141 * in the lwb zio's completion callback, so this zio 1142 * dependency graph ensures the waiters are woken in the 1143 * correct order (the same order the lwbs were created). 1144 */ 1145 lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened; 1146 if (last_lwb_opened != NULL && 1147 last_lwb_opened->lwb_state != LWB_STATE_DONE) { 1148 ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || 1149 last_lwb_opened->lwb_state == LWB_STATE_ISSUED); 1150 ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL); 1151 zio_add_child(lwb->lwb_root_zio, 1152 last_lwb_opened->lwb_root_zio); 1153 } 1154 zilog->zl_last_lwb_opened = lwb; 1155 1156 mutex_exit(&zilog->zl_lock); 1157 } 1158 1159 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1160 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1161 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1162 } 1163 1164 /* 1165 * Define a limited set of intent log block sizes. 1166 * 1167 * These must be a multiple of 4KB. Note only the amount used (again 1168 * aligned to 4KB) actually gets written. However, we can't always just 1169 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. 1170 */ 1171 uint64_t zil_block_buckets[] = { 1172 4096, /* non TX_WRITE */ 1173 8192+4096, /* data base */ 1174 32*1024 + 4096, /* NFS writes */ 1175 UINT64_MAX 1176 }; 1177 1178 /* 1179 * Start a log block write and advance to the next log block. 1180 * Calls are serialized. 1181 */ 1182 static lwb_t * 1183 zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) 1184 { 1185 lwb_t *nlwb = NULL; 1186 zil_chain_t *zilc; 1187 spa_t *spa = zilog->zl_spa; 1188 blkptr_t *bp; 1189 dmu_tx_t *tx; 1190 uint64_t txg; 1191 uint64_t zil_blksz, wsz; 1192 int i, error; 1193 boolean_t slog; 1194 1195 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock)); 1196 ASSERT3P(lwb->lwb_root_zio, !=, NULL); 1197 ASSERT3P(lwb->lwb_write_zio, !=, NULL); 1198 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 1199 1200 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1201 zilc = (zil_chain_t *)lwb->lwb_buf; 1202 bp = &zilc->zc_next_blk; 1203 } else { 1204 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 1205 bp = &zilc->zc_next_blk; 1206 } 1207 1208 ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 1209 1210 /* 1211 * Allocate the next block and save its address in this block 1212 * before writing it in order to establish the log chain. 1213 * Note that if the allocation of nlwb synced before we wrote 1214 * the block that points at it (lwb), we'd leak it if we crashed. 1215 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 1216 * We dirty the dataset to ensure that zil_sync() will be called 1217 * to clean up in the event of allocation failure or I/O failure. 1218 */ 1219 1220 tx = dmu_tx_create(zilog->zl_os); 1221 1222 /* 1223 * Since we are not going to create any new dirty data and we can even 1224 * help with clearing the existing dirty data, we should not be subject 1225 * to the dirty data based delays. 1226 * We (ab)use TXG_WAITED to bypass the delay mechanism. 1227 * One side effect from using TXG_WAITED is that dmu_tx_assign() can 1228 * fail if the pool is suspended. Those are dramatic circumstances, 1229 * so we return NULL to signal that the normal ZIL processing is not 1230 * possible and txg_wait_synced() should be used to ensure that the data 1231 * is on disk. 1232 */ 1233 error = dmu_tx_assign(tx, TXG_WAITED); 1234 if (error != 0) { 1235 ASSERT3S(error, ==, EIO); 1236 dmu_tx_abort(tx); 1237 return (NULL); 1238 } 1239 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 1240 txg = dmu_tx_get_txg(tx); 1241 1242 lwb->lwb_tx = tx; 1243 1244 /* 1245 * Log blocks are pre-allocated. Here we select the size of the next 1246 * block, based on size used in the last block. 1247 * - first find the smallest bucket that will fit the block from a 1248 * limited set of block sizes. This is because it's faster to write 1249 * blocks allocated from the same metaslab as they are adjacent or 1250 * close. 1251 * - next find the maximum from the new suggested size and an array of 1252 * previous sizes. This lessens a picket fence effect of wrongly 1253 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 1254 * requests. 1255 * 1256 * Note we only write what is used, but we can't just allocate 1257 * the maximum block size because we can exhaust the available 1258 * pool log space. 1259 */ 1260 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 1261 for (i = 0; zil_blksz > zil_block_buckets[i]; i++) 1262 continue; 1263 zil_blksz = zil_block_buckets[i]; 1264 if (zil_blksz == UINT64_MAX) 1265 zil_blksz = SPA_OLD_MAXBLOCKSIZE; 1266 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 1267 for (i = 0; i < ZIL_PREV_BLKS; i++) 1268 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 1269 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 1270 1271 BP_ZERO(bp); 1272 1273 /* pass the old blkptr in order to spread log blocks across devs */ 1274 error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, &slog); 1275 if (error == 0) { 1276 ASSERT3U(bp->blk_birth, ==, txg); 1277 bp->blk_cksum = lwb->lwb_blk.blk_cksum; 1278 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 1279 1280 /* 1281 * Allocate a new log write block (lwb). 1282 */ 1283 nlwb = zil_alloc_lwb(zilog, bp, slog, txg); 1284 } 1285 1286 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 1287 /* For Slim ZIL only write what is used. */ 1288 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 1289 ASSERT3U(wsz, <=, lwb->lwb_sz); 1290 zio_shrink(lwb->lwb_write_zio, wsz); 1291 1292 } else { 1293 wsz = lwb->lwb_sz; 1294 } 1295 1296 zilc->zc_pad = 0; 1297 zilc->zc_nused = lwb->lwb_nused; 1298 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 1299 1300 /* 1301 * clear unused data for security 1302 */ 1303 bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 1304 1305 spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER); 1306 1307 zil_lwb_add_block(lwb, &lwb->lwb_blk); 1308 lwb->lwb_issued_timestamp = gethrtime(); 1309 lwb->lwb_state = LWB_STATE_ISSUED; 1310 1311 zio_nowait(lwb->lwb_root_zio); 1312 zio_nowait(lwb->lwb_write_zio); 1313 1314 /* 1315 * If there was an allocation failure then nlwb will be null which 1316 * forces a txg_wait_synced(). 1317 */ 1318 return (nlwb); 1319 } 1320 1321 static lwb_t * 1322 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 1323 { 1324 lr_t *lrcb, *lrc; 1325 lr_write_t *lrwb, *lrw; 1326 char *lr_buf; 1327 uint64_t dlen, dnow, lwb_sp, reclen, txg; 1328 1329 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock)); 1330 ASSERT3P(lwb, !=, NULL); 1331 ASSERT3P(lwb->lwb_buf, !=, NULL); 1332 1333 zil_lwb_write_open(zilog, lwb); 1334 1335 lrc = &itx->itx_lr; 1336 lrw = (lr_write_t *)lrc; 1337 1338 /* 1339 * A commit itx doesn't represent any on-disk state; instead 1340 * it's simply used as a place holder on the commit list, and 1341 * provides a mechanism for attaching a "commit waiter" onto the 1342 * correct lwb (such that the waiter can be signalled upon 1343 * completion of that lwb). Thus, we don't process this itx's 1344 * log record if it's a commit itx (these itx's don't have log 1345 * records), and instead link the itx's waiter onto the lwb's 1346 * list of waiters. 1347 * 1348 * For more details, see the comment above zil_commit(). 1349 */ 1350 if (lrc->lrc_txtype == TX_COMMIT) { 1351 zil_commit_waiter_link_lwb(itx->itx_private, lwb); 1352 itx->itx_private = NULL; 1353 return (lwb); 1354 } 1355 1356 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { 1357 dlen = P2ROUNDUP_TYPED( 1358 lrw->lr_length, sizeof (uint64_t), uint64_t); 1359 } else { 1360 dlen = 0; 1361 } 1362 reclen = lrc->lrc_reclen; 1363 zilog->zl_cur_used += (reclen + dlen); 1364 txg = lrc->lrc_txg; 1365 1366 ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen)); 1367 1368 cont: 1369 /* 1370 * If this record won't fit in the current log block, start a new one. 1371 * For WR_NEED_COPY optimize layout for minimal number of chunks. 1372 */ 1373 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 1374 if (reclen > lwb_sp || (reclen + dlen > lwb_sp && 1375 lwb_sp < ZIL_MAX_WASTE_SPACE && (dlen % ZIL_MAX_LOG_DATA == 0 || 1376 lwb_sp < reclen + dlen % ZIL_MAX_LOG_DATA))) { 1377 lwb = zil_lwb_write_issue(zilog, lwb); 1378 if (lwb == NULL) 1379 return (NULL); 1380 zil_lwb_write_open(zilog, lwb); 1381 ASSERT(LWB_EMPTY(lwb)); 1382 lwb_sp = lwb->lwb_sz - lwb->lwb_nused; 1383 ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); 1384 } 1385 1386 dnow = MIN(dlen, lwb_sp - reclen); 1387 lr_buf = lwb->lwb_buf + lwb->lwb_nused; 1388 bcopy(lrc, lr_buf, reclen); 1389 lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */ 1390 lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */ 1391 1392 /* 1393 * If it's a write, fetch the data or get its blkptr as appropriate. 1394 */ 1395 if (lrc->lrc_txtype == TX_WRITE) { 1396 if (txg > spa_freeze_txg(zilog->zl_spa)) 1397 txg_wait_synced(zilog->zl_dmu_pool, txg); 1398 if (itx->itx_wr_state != WR_COPIED) { 1399 char *dbuf; 1400 int error; 1401 1402 if (itx->itx_wr_state == WR_NEED_COPY) { 1403 dbuf = lr_buf + reclen; 1404 lrcb->lrc_reclen += dnow; 1405 if (lrwb->lr_length > dnow) 1406 lrwb->lr_length = dnow; 1407 lrw->lr_offset += dnow; 1408 lrw->lr_length -= dnow; 1409 } else { 1410 ASSERT(itx->itx_wr_state == WR_INDIRECT); 1411 dbuf = NULL; 1412 } 1413 1414 /* 1415 * We pass in the "lwb_write_zio" rather than 1416 * "lwb_root_zio" so that the "lwb_write_zio" 1417 * becomes the parent of any zio's created by 1418 * the "zl_get_data" callback. The vdevs are 1419 * flushed after the "lwb_write_zio" completes, 1420 * so we want to make sure that completion 1421 * callback waits for these additional zio's, 1422 * such that the vdevs used by those zio's will 1423 * be included in the lwb's vdev tree, and those 1424 * vdevs will be properly flushed. If we passed 1425 * in "lwb_root_zio" here, then these additional 1426 * vdevs may not be flushed; e.g. if these zio's 1427 * completed after "lwb_write_zio" completed. 1428 */ 1429 error = zilog->zl_get_data(itx->itx_private, 1430 lrwb, dbuf, lwb, lwb->lwb_write_zio); 1431 1432 if (error == EIO) { 1433 txg_wait_synced(zilog->zl_dmu_pool, txg); 1434 return (lwb); 1435 } 1436 if (error != 0) { 1437 ASSERT(error == ENOENT || error == EEXIST || 1438 error == EALREADY); 1439 return (lwb); 1440 } 1441 } 1442 } 1443 1444 /* 1445 * We're actually making an entry, so update lrc_seq to be the 1446 * log record sequence number. Note that this is generally not 1447 * equal to the itx sequence number because not all transactions 1448 * are synchronous, and sometimes spa_sync() gets there first. 1449 */ 1450 lrcb->lrc_seq = ++zilog->zl_lr_seq; 1451 lwb->lwb_nused += reclen + dnow; 1452 1453 zil_lwb_add_txg(lwb, txg); 1454 1455 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1456 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 1457 1458 dlen -= dnow; 1459 if (dlen > 0) { 1460 zilog->zl_cur_used += reclen; 1461 goto cont; 1462 } 1463 1464 return (lwb); 1465 } 1466 1467 itx_t * 1468 zil_itx_create(uint64_t txtype, size_t lrsize) 1469 { 1470 itx_t *itx; 1471 1472 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1473 1474 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1475 itx->itx_lr.lrc_txtype = txtype; 1476 itx->itx_lr.lrc_reclen = lrsize; 1477 itx->itx_lr.lrc_seq = 0; /* defensive */ 1478 itx->itx_sync = B_TRUE; /* default is synchronous */ 1479 1480 return (itx); 1481 } 1482 1483 void 1484 zil_itx_destroy(itx_t *itx) 1485 { 1486 kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 1487 } 1488 1489 /* 1490 * Free up the sync and async itxs. The itxs_t has already been detached 1491 * so no locks are needed. 1492 */ 1493 static void 1494 zil_itxg_clean(itxs_t *itxs) 1495 { 1496 itx_t *itx; 1497 list_t *list; 1498 avl_tree_t *t; 1499 void *cookie; 1500 itx_async_node_t *ian; 1501 1502 list = &itxs->i_sync_list; 1503 while ((itx = list_head(list)) != NULL) { 1504 /* 1505 * In the general case, commit itxs will not be found 1506 * here, as they'll be committed to an lwb via 1507 * zil_lwb_commit(), and free'd in that function. Having 1508 * said that, it is still possible for commit itxs to be 1509 * found here, due to the following race: 1510 * 1511 * - a thread calls zil_commit() which assigns the 1512 * commit itx to a per-txg i_sync_list 1513 * - zil_itxg_clean() is called (e.g. via spa_sync()) 1514 * while the waiter is still on the i_sync_list 1515 * 1516 * There's nothing to prevent syncing the txg while the 1517 * waiter is on the i_sync_list. This normally doesn't 1518 * happen because spa_sync() is slower than zil_commit(), 1519 * but if zil_commit() calls txg_wait_synced() (e.g. 1520 * because zil_create() or zil_commit_writer_stall() is 1521 * called) we will hit this case. 1522 */ 1523 if (itx->itx_lr.lrc_txtype == TX_COMMIT) 1524 zil_commit_waiter_skip(itx->itx_private); 1525 1526 list_remove(list, itx); 1527 zil_itx_destroy(itx); 1528 } 1529 1530 cookie = NULL; 1531 t = &itxs->i_async_tree; 1532 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1533 list = &ian->ia_list; 1534 while ((itx = list_head(list)) != NULL) { 1535 list_remove(list, itx); 1536 /* commit itxs should never be on the async lists. */ 1537 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 1538 zil_itx_destroy(itx); 1539 } 1540 list_destroy(list); 1541 kmem_free(ian, sizeof (itx_async_node_t)); 1542 } 1543 avl_destroy(t); 1544 1545 kmem_free(itxs, sizeof (itxs_t)); 1546 } 1547 1548 static int 1549 zil_aitx_compare(const void *x1, const void *x2) 1550 { 1551 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 1552 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 1553 1554 if (o1 < o2) 1555 return (-1); 1556 if (o1 > o2) 1557 return (1); 1558 1559 return (0); 1560 } 1561 1562 /* 1563 * Remove all async itx with the given oid. 1564 */ 1565 static void 1566 zil_remove_async(zilog_t *zilog, uint64_t oid) 1567 { 1568 uint64_t otxg, txg; 1569 itx_async_node_t *ian; 1570 avl_tree_t *t; 1571 avl_index_t where; 1572 list_t clean_list; 1573 itx_t *itx; 1574 1575 ASSERT(oid != 0); 1576 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1577 1578 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1579 otxg = ZILTEST_TXG; 1580 else 1581 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1582 1583 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1584 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1585 1586 mutex_enter(&itxg->itxg_lock); 1587 if (itxg->itxg_txg != txg) { 1588 mutex_exit(&itxg->itxg_lock); 1589 continue; 1590 } 1591 1592 /* 1593 * Locate the object node and append its list. 1594 */ 1595 t = &itxg->itxg_itxs->i_async_tree; 1596 ian = avl_find(t, &oid, &where); 1597 if (ian != NULL) 1598 list_move_tail(&clean_list, &ian->ia_list); 1599 mutex_exit(&itxg->itxg_lock); 1600 } 1601 while ((itx = list_head(&clean_list)) != NULL) { 1602 list_remove(&clean_list, itx); 1603 /* commit itxs should never be on the async lists. */ 1604 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); 1605 zil_itx_destroy(itx); 1606 } 1607 list_destroy(&clean_list); 1608 } 1609 1610 void 1611 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 1612 { 1613 uint64_t txg; 1614 itxg_t *itxg; 1615 itxs_t *itxs, *clean = NULL; 1616 1617 /* 1618 * Object ids can be re-instantiated in the next txg so 1619 * remove any async transactions to avoid future leaks. 1620 * This can happen if a fsync occurs on the re-instantiated 1621 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets 1622 * the new file data and flushes a write record for the old object. 1623 */ 1624 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE) 1625 zil_remove_async(zilog, itx->itx_oid); 1626 1627 /* 1628 * Ensure the data of a renamed file is committed before the rename. 1629 */ 1630 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 1631 zil_async_to_sync(zilog, itx->itx_oid); 1632 1633 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 1634 txg = ZILTEST_TXG; 1635 else 1636 txg = dmu_tx_get_txg(tx); 1637 1638 itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1639 mutex_enter(&itxg->itxg_lock); 1640 itxs = itxg->itxg_itxs; 1641 if (itxg->itxg_txg != txg) { 1642 if (itxs != NULL) { 1643 /* 1644 * The zil_clean callback hasn't got around to cleaning 1645 * this itxg. Save the itxs for release below. 1646 * This should be rare. 1647 */ 1648 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " 1649 "txg %llu", itxg->itxg_txg); 1650 clean = itxg->itxg_itxs; 1651 } 1652 itxg->itxg_txg = txg; 1653 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP); 1654 1655 list_create(&itxs->i_sync_list, sizeof (itx_t), 1656 offsetof(itx_t, itx_node)); 1657 avl_create(&itxs->i_async_tree, zil_aitx_compare, 1658 sizeof (itx_async_node_t), 1659 offsetof(itx_async_node_t, ia_node)); 1660 } 1661 if (itx->itx_sync) { 1662 list_insert_tail(&itxs->i_sync_list, itx); 1663 } else { 1664 avl_tree_t *t = &itxs->i_async_tree; 1665 uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid; 1666 itx_async_node_t *ian; 1667 avl_index_t where; 1668 1669 ian = avl_find(t, &foid, &where); 1670 if (ian == NULL) { 1671 ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP); 1672 list_create(&ian->ia_list, sizeof (itx_t), 1673 offsetof(itx_t, itx_node)); 1674 ian->ia_foid = foid; 1675 avl_insert(t, ian, where); 1676 } 1677 list_insert_tail(&ian->ia_list, itx); 1678 } 1679 1680 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1681 1682 /* 1683 * We don't want to dirty the ZIL using ZILTEST_TXG, because 1684 * zil_clean() will never be called using ZILTEST_TXG. Thus, we 1685 * need to be careful to always dirty the ZIL using the "real" 1686 * TXG (not itxg_txg) even when the SPA is frozen. 1687 */ 1688 zilog_dirty(zilog, dmu_tx_get_txg(tx)); 1689 mutex_exit(&itxg->itxg_lock); 1690 1691 /* Release the old itxs now we've dropped the lock */ 1692 if (clean != NULL) 1693 zil_itxg_clean(clean); 1694 } 1695 1696 /* 1697 * If there are any in-memory intent log transactions which have now been 1698 * synced then start up a taskq to free them. We should only do this after we 1699 * have written out the uberblocks (i.e. txg has been comitted) so that 1700 * don't inadvertently clean out in-memory log records that would be required 1701 * by zil_commit(). 1702 */ 1703 void 1704 zil_clean(zilog_t *zilog, uint64_t synced_txg) 1705 { 1706 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 1707 itxs_t *clean_me; 1708 1709 ASSERT3U(synced_txg, <, ZILTEST_TXG); 1710 1711 mutex_enter(&itxg->itxg_lock); 1712 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 1713 mutex_exit(&itxg->itxg_lock); 1714 return; 1715 } 1716 ASSERT3U(itxg->itxg_txg, <=, synced_txg); 1717 ASSERT3U(itxg->itxg_txg, !=, 0); 1718 clean_me = itxg->itxg_itxs; 1719 itxg->itxg_itxs = NULL; 1720 itxg->itxg_txg = 0; 1721 mutex_exit(&itxg->itxg_lock); 1722 /* 1723 * Preferably start a task queue to free up the old itxs but 1724 * if taskq_dispatch can't allocate resources to do that then 1725 * free it in-line. This should be rare. Note, using TQ_SLEEP 1726 * created a bad performance problem. 1727 */ 1728 ASSERT3P(zilog->zl_dmu_pool, !=, NULL); 1729 ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); 1730 if (taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, 1731 (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == NULL) 1732 zil_itxg_clean(clean_me); 1733 } 1734 1735 /* 1736 * This function will traverse the queue of itxs that need to be 1737 * committed, and move them onto the ZIL's zl_itx_commit_list. 1738 */ 1739 static void 1740 zil_get_commit_list(zilog_t *zilog) 1741 { 1742 uint64_t otxg, txg; 1743 list_t *commit_list = &zilog->zl_itx_commit_list; 1744 1745 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock)); 1746 1747 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1748 otxg = ZILTEST_TXG; 1749 else 1750 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1751 1752 /* 1753 * This is inherently racy, since there is nothing to prevent 1754 * the last synced txg from changing. That's okay since we'll 1755 * only commit things in the future. 1756 */ 1757 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1758 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1759 1760 mutex_enter(&itxg->itxg_lock); 1761 if (itxg->itxg_txg != txg) { 1762 mutex_exit(&itxg->itxg_lock); 1763 continue; 1764 } 1765 1766 /* 1767 * If we're adding itx records to the zl_itx_commit_list, 1768 * then the zil better be dirty in this "txg". We can assert 1769 * that here since we're holding the itxg_lock which will 1770 * prevent spa_sync from cleaning it. Once we add the itxs 1771 * to the zl_itx_commit_list we must commit it to disk even 1772 * if it's unnecessary (i.e. the txg was synced). 1773 */ 1774 ASSERT(zilog_is_dirty_in_txg(zilog, txg) || 1775 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 1776 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 1777 1778 mutex_exit(&itxg->itxg_lock); 1779 } 1780 } 1781 1782 /* 1783 * Move the async itxs for a specified object to commit into sync lists. 1784 */ 1785 static void 1786 zil_async_to_sync(zilog_t *zilog, uint64_t foid) 1787 { 1788 uint64_t otxg, txg; 1789 itx_async_node_t *ian; 1790 avl_tree_t *t; 1791 avl_index_t where; 1792 1793 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 1794 otxg = ZILTEST_TXG; 1795 else 1796 otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 1797 1798 /* 1799 * This is inherently racy, since there is nothing to prevent 1800 * the last synced txg from changing. 1801 */ 1802 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 1803 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 1804 1805 mutex_enter(&itxg->itxg_lock); 1806 if (itxg->itxg_txg != txg) { 1807 mutex_exit(&itxg->itxg_lock); 1808 continue; 1809 } 1810 1811 /* 1812 * If a foid is specified then find that node and append its 1813 * list. Otherwise walk the tree appending all the lists 1814 * to the sync list. We add to the end rather than the 1815 * beginning to ensure the create has happened. 1816 */ 1817 t = &itxg->itxg_itxs->i_async_tree; 1818 if (foid != 0) { 1819 ian = avl_find(t, &foid, &where); 1820 if (ian != NULL) { 1821 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1822 &ian->ia_list); 1823 } 1824 } else { 1825 void *cookie = NULL; 1826 1827 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 1828 list_move_tail(&itxg->itxg_itxs->i_sync_list, 1829 &ian->ia_list); 1830 list_destroy(&ian->ia_list); 1831 kmem_free(ian, sizeof (itx_async_node_t)); 1832 } 1833 } 1834 mutex_exit(&itxg->itxg_lock); 1835 } 1836 } 1837 1838 /* 1839 * This function will prune commit itxs that are at the head of the 1840 * commit list (it won't prune past the first non-commit itx), and 1841 * either: a) attach them to the last lwb that's still pending 1842 * completion, or b) skip them altogether. 1843 * 1844 * This is used as a performance optimization to prevent commit itxs 1845 * from generating new lwbs when it's unnecessary to do so. 1846 */ 1847 static void 1848 zil_prune_commit_list(zilog_t *zilog) 1849 { 1850 itx_t *itx; 1851 1852 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock)); 1853 1854 while (itx = list_head(&zilog->zl_itx_commit_list)) { 1855 lr_t *lrc = &itx->itx_lr; 1856 if (lrc->lrc_txtype != TX_COMMIT) 1857 break; 1858 1859 mutex_enter(&zilog->zl_lock); 1860 1861 lwb_t *last_lwb = zilog->zl_last_lwb_opened; 1862 if (last_lwb == NULL || last_lwb->lwb_state == LWB_STATE_DONE) { 1863 /* 1864 * All of the itxs this waiter was waiting on 1865 * must have already completed (or there were 1866 * never any itx's for it to wait on), so it's 1867 * safe to skip this waiter and mark it done. 1868 */ 1869 zil_commit_waiter_skip(itx->itx_private); 1870 } else { 1871 zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); 1872 itx->itx_private = NULL; 1873 } 1874 1875 mutex_exit(&zilog->zl_lock); 1876 1877 list_remove(&zilog->zl_itx_commit_list, itx); 1878 zil_itx_destroy(itx); 1879 } 1880 1881 IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); 1882 } 1883 1884 static void 1885 zil_commit_writer_stall(zilog_t *zilog) 1886 { 1887 /* 1888 * When zio_alloc_zil() fails to allocate the next lwb block on 1889 * disk, we must call txg_wait_synced() to ensure all of the 1890 * lwbs in the zilog's zl_lwb_list are synced and then freed (in 1891 * zil_sync()), such that any subsequent ZIL writer (i.e. a call 1892 * to zil_process_commit_list()) will have to call zil_create(), 1893 * and start a new ZIL chain. 1894 * 1895 * Since zil_alloc_zil() failed, the lwb that was previously 1896 * issued does not have a pointer to the "next" lwb on disk. 1897 * Thus, if another ZIL writer thread was to allocate the "next" 1898 * on-disk lwb, that block could be leaked in the event of a 1899 * crash (because the previous lwb on-disk would not point to 1900 * it). 1901 * 1902 * We must hold the zilog's zl_writer_lock while we do this, to 1903 * ensure no new threads enter zil_process_commit_list() until 1904 * all lwb's in the zl_lwb_list have been synced and freed 1905 * (which is achieved via the txg_wait_synced() call). 1906 */ 1907 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock)); 1908 txg_wait_synced(zilog->zl_dmu_pool, 0); 1909 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 1910 } 1911 1912 /* 1913 * This function will traverse the commit list, creating new lwbs as 1914 * needed, and committing the itxs from the commit list to these newly 1915 * created lwbs. Additionally, as a new lwb is created, the previous 1916 * lwb will be issued to the zio layer to be written to disk. 1917 */ 1918 static void 1919 zil_process_commit_list(zilog_t *zilog) 1920 { 1921 spa_t *spa = zilog->zl_spa; 1922 list_t nolwb_waiters; 1923 lwb_t *lwb; 1924 itx_t *itx; 1925 1926 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock)); 1927 1928 /* 1929 * Return if there's nothing to commit before we dirty the fs by 1930 * calling zil_create(). 1931 */ 1932 if (list_head(&zilog->zl_itx_commit_list) == NULL) 1933 return; 1934 1935 list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), 1936 offsetof(zil_commit_waiter_t, zcw_node)); 1937 1938 lwb = list_tail(&zilog->zl_lwb_list); 1939 if (lwb == NULL) { 1940 lwb = zil_create(zilog); 1941 } else { 1942 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 1943 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE); 1944 } 1945 1946 while (itx = list_head(&zilog->zl_itx_commit_list)) { 1947 lr_t *lrc = &itx->itx_lr; 1948 uint64_t txg = lrc->lrc_txg; 1949 1950 ASSERT3U(txg, !=, 0); 1951 1952 if (lrc->lrc_txtype == TX_COMMIT) { 1953 DTRACE_PROBE2(zil__process__commit__itx, 1954 zilog_t *, zilog, itx_t *, itx); 1955 } else { 1956 DTRACE_PROBE2(zil__process__normal__itx, 1957 zilog_t *, zilog, itx_t *, itx); 1958 } 1959 1960 /* 1961 * This is inherently racy and may result in us writing 1962 * out a log block for a txg that was just synced. This 1963 * is ok since we'll end cleaning up that log block the 1964 * next time we call zil_sync(). 1965 */ 1966 boolean_t synced = txg <= spa_last_synced_txg(spa); 1967 boolean_t frozen = txg > spa_freeze_txg(spa); 1968 1969 if (!synced || frozen) { 1970 if (lwb != NULL) { 1971 lwb = zil_lwb_commit(zilog, itx, lwb); 1972 } else if (lrc->lrc_txtype == TX_COMMIT) { 1973 ASSERT3P(lwb, ==, NULL); 1974 zil_commit_waiter_link_nolwb( 1975 itx->itx_private, &nolwb_waiters); 1976 } 1977 } else if (lrc->lrc_txtype == TX_COMMIT) { 1978 ASSERT3B(synced, ==, B_TRUE); 1979 ASSERT3B(frozen, ==, B_FALSE); 1980 1981 /* 1982 * If this is a commit itx, then there will be a 1983 * thread that is either: already waiting for 1984 * it, or soon will be waiting. 1985 * 1986 * This itx has already been committed to disk 1987 * via spa_sync() so we don't bother committing 1988 * it to an lwb. As a result, we cannot use the 1989 * lwb zio callback to signal the waiter and 1990 * mark it as done, so we must do that here. 1991 */ 1992 zil_commit_waiter_skip(itx->itx_private); 1993 } 1994 1995 list_remove(&zilog->zl_itx_commit_list, itx); 1996 zil_itx_destroy(itx); 1997 } 1998 1999 if (lwb == NULL) { 2000 /* 2001 * This indicates zio_alloc_zil() failed to allocate the 2002 * "next" lwb on-disk. When this happens, we must stall 2003 * the ZIL write pipeline; see the comment within 2004 * zil_commit_writer_stall() for more details. 2005 */ 2006 zil_commit_writer_stall(zilog); 2007 2008 /* 2009 * Additionally, we have to signal and mark the "nolwb" 2010 * waiters as "done" here, since without an lwb, we 2011 * can't do this via zil_lwb_flush_vdevs_done() like 2012 * normal. 2013 */ 2014 zil_commit_waiter_t *zcw; 2015 while (zcw = list_head(&nolwb_waiters)) { 2016 zil_commit_waiter_skip(zcw); 2017 list_remove(&nolwb_waiters, zcw); 2018 } 2019 } else { 2020 ASSERT(list_is_empty(&nolwb_waiters)); 2021 ASSERT3P(lwb, !=, NULL); 2022 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2023 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE); 2024 2025 /* 2026 * At this point, the ZIL block pointed at by the "lwb" 2027 * variable is in one of the following states: "closed" 2028 * or "open". 2029 * 2030 * If its "closed", then no itxs have been committed to 2031 * it, so there's no point in issuing its zio (i.e. 2032 * it's "empty"). 2033 * 2034 * If its "open" state, then it contains one or more 2035 * itxs that eventually need to be committed to stable 2036 * storage. In this case we intentionally do not issue 2037 * the lwb's zio to disk yet, and instead rely on one of 2038 * the following two mechanisms for issuing the zio: 2039 * 2040 * 1. Ideally, there will be more ZIL activity occuring 2041 * on the system, such that this function will be 2042 * immediately called again (not necessarily by the same 2043 * thread) and this lwb's zio will be issued via 2044 * zil_lwb_commit(). This way, the lwb is guaranteed to 2045 * be "full" when it is issued to disk, and we'll make 2046 * use of the lwb's size the best we can. 2047 * 2048 * 2. If there isn't sufficient ZIL activity occuring on 2049 * the system, such that this lwb's zio isn't issued via 2050 * zil_lwb_commit(), zil_commit_waiter() will issue the 2051 * lwb's zio. If this occurs, the lwb is not guaranteed 2052 * to be "full" by the time its zio is issued, and means 2053 * the size of the lwb was "too large" given the amount 2054 * of ZIL activity occuring on the system at that time. 2055 * 2056 * We do this for a couple of reasons: 2057 * 2058 * 1. To try and reduce the number of IOPs needed to 2059 * write the same number of itxs. If an lwb has space 2060 * available in it's buffer for more itxs, and more itxs 2061 * will be committed relatively soon (relative to the 2062 * latency of performing a write), then it's beneficial 2063 * to wait for these "next" itxs. This way, more itxs 2064 * can be committed to stable storage with fewer writes. 2065 * 2066 * 2. To try and use the largest lwb block size that the 2067 * incoming rate of itxs can support. Again, this is to 2068 * try and pack as many itxs into as few lwbs as 2069 * possible, without significantly impacting the latency 2070 * of each individual itx. 2071 */ 2072 } 2073 } 2074 2075 /* 2076 * This function is responsible for ensuring the passed in commit waiter 2077 * (and associated commit itx) is committed to an lwb. If the waiter is 2078 * not already committed to an lwb, all itxs in the zilog's queue of 2079 * itxs will be processed. The assumption is the passed in waiter's 2080 * commit itx will found in the queue just like the other non-commit 2081 * itxs, such that when the entire queue is processed, the waiter will 2082 * have been commited to an lwb. 2083 * 2084 * The lwb associated with the passed in waiter is not guaranteed to 2085 * have been issued by the time this function completes. If the lwb is 2086 * not issued, we rely on future calls to zil_commit_writer() to issue 2087 * the lwb, or the timeout mechanism found in zil_commit_waiter(). 2088 */ 2089 static void 2090 zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) 2091 { 2092 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 2093 ASSERT(spa_writeable(zilog->zl_spa)); 2094 ASSERT0(zilog->zl_suspend); 2095 2096 mutex_enter(&zilog->zl_writer_lock); 2097 2098 if (zcw->zcw_lwb != NULL || zcw->zcw_done) { 2099 /* 2100 * It's possible that, while we were waiting to acquire 2101 * the "zl_writer_lock", another thread committed this 2102 * waiter to an lwb. If that occurs, we bail out early, 2103 * without processing any of the zilog's queue of itxs. 2104 * 2105 * On certain workloads and system configurations, the 2106 * "zl_writer_lock" can become highly contended. In an 2107 * attempt to reduce this contention, we immediately drop 2108 * the lock if the waiter has already been processed. 2109 * 2110 * We've measured this optimization to reduce CPU spent 2111 * contending on this lock by up to 5%, using a system 2112 * with 32 CPUs, low latency storage (~50 usec writes), 2113 * and 1024 threads performing sync writes. 2114 */ 2115 goto out; 2116 } 2117 2118 zil_get_commit_list(zilog); 2119 zil_prune_commit_list(zilog); 2120 zil_process_commit_list(zilog); 2121 2122 out: 2123 mutex_exit(&zilog->zl_writer_lock); 2124 } 2125 2126 static void 2127 zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) 2128 { 2129 ASSERT(!MUTEX_HELD(&zilog->zl_writer_lock)); 2130 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2131 ASSERT3B(zcw->zcw_done, ==, B_FALSE); 2132 2133 lwb_t *lwb = zcw->zcw_lwb; 2134 ASSERT3P(lwb, !=, NULL); 2135 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED); 2136 2137 /* 2138 * If the lwb has already been issued by another thread, we can 2139 * immediately return since there's no work to be done (the 2140 * point of this function is to issue the lwb). Additionally, we 2141 * do this prior to acquiring the zl_writer_lock, to avoid 2142 * acquiring it when it's not necessary to do so. 2143 */ 2144 if (lwb->lwb_state == LWB_STATE_ISSUED || 2145 lwb->lwb_state == LWB_STATE_DONE) 2146 return; 2147 2148 /* 2149 * In order to call zil_lwb_write_issue() we must hold the 2150 * zilog's "zl_writer_lock". We can't simply acquire that lock, 2151 * since we're already holding the commit waiter's "zcw_lock", 2152 * and those two locks are aquired in the opposite order 2153 * elsewhere. 2154 */ 2155 mutex_exit(&zcw->zcw_lock); 2156 mutex_enter(&zilog->zl_writer_lock); 2157 mutex_enter(&zcw->zcw_lock); 2158 2159 /* 2160 * Since we just dropped and re-acquired the commit waiter's 2161 * lock, we have to re-check to see if the waiter was marked 2162 * "done" during that process. If the waiter was marked "done", 2163 * the "lwb" pointer is no longer valid (it can be free'd after 2164 * the waiter is marked "done"), so without this check we could 2165 * wind up with a use-after-free error below. 2166 */ 2167 if (zcw->zcw_done) 2168 goto out; 2169 2170 ASSERT3P(lwb, ==, zcw->zcw_lwb); 2171 2172 /* 2173 * We've already checked this above, but since we hadn't 2174 * acquired the zilog's zl_writer_lock, we have to perform this 2175 * check a second time while holding the lock. We can't call 2176 * zil_lwb_write_issue() if the lwb had already been issued. 2177 */ 2178 if (lwb->lwb_state == LWB_STATE_ISSUED || 2179 lwb->lwb_state == LWB_STATE_DONE) 2180 goto out; 2181 2182 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); 2183 2184 /* 2185 * As described in the comments above zil_commit_waiter() and 2186 * zil_process_commit_list(), we need to issue this lwb's zio 2187 * since we've reached the commit waiter's timeout and it still 2188 * hasn't been issued. 2189 */ 2190 lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb); 2191 2192 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); 2193 2194 /* 2195 * Since the lwb's zio hadn't been issued by the time this thread 2196 * reached its timeout, we reset the zilog's "zl_cur_used" field 2197 * to influence the zil block size selection algorithm. 2198 * 2199 * By having to issue the lwb's zio here, it means the size of the 2200 * lwb was too large, given the incoming throughput of itxs. By 2201 * setting "zl_cur_used" to zero, we communicate this fact to the 2202 * block size selection algorithm, so it can take this informaiton 2203 * into account, and potentially select a smaller size for the 2204 * next lwb block that is allocated. 2205 */ 2206 zilog->zl_cur_used = 0; 2207 2208 if (nlwb == NULL) { 2209 /* 2210 * When zil_lwb_write_issue() returns NULL, this 2211 * indicates zio_alloc_zil() failed to allocate the 2212 * "next" lwb on-disk. When this occurs, the ZIL write 2213 * pipeline must be stalled; see the comment within the 2214 * zil_commit_writer_stall() function for more details. 2215 * 2216 * We must drop the commit waiter's lock prior to 2217 * calling zil_commit_writer_stall() or else we can wind 2218 * up with the following deadlock: 2219 * 2220 * - This thread is waiting for the txg to sync while 2221 * holding the waiter's lock; txg_wait_synced() is 2222 * used within txg_commit_writer_stall(). 2223 * 2224 * - The txg can't sync because it is waiting for this 2225 * lwb's zio callback to call dmu_tx_commit(). 2226 * 2227 * - The lwb's zio callback can't call dmu_tx_commit() 2228 * because it's blocked trying to acquire the waiter's 2229 * lock, which occurs prior to calling dmu_tx_commit() 2230 */ 2231 mutex_exit(&zcw->zcw_lock); 2232 zil_commit_writer_stall(zilog); 2233 mutex_enter(&zcw->zcw_lock); 2234 } 2235 2236 out: 2237 mutex_exit(&zilog->zl_writer_lock); 2238 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2239 } 2240 2241 /* 2242 * This function is responsible for performing the following two tasks: 2243 * 2244 * 1. its primary responsibility is to block until the given "commit 2245 * waiter" is considered "done". 2246 * 2247 * 2. its secondary responsibility is to issue the zio for the lwb that 2248 * the given "commit waiter" is waiting on, if this function has 2249 * waited "long enough" and the lwb is still in the "open" state. 2250 * 2251 * Given a sufficient amount of itxs being generated and written using 2252 * the ZIL, the lwb's zio will be issued via the zil_lwb_commit() 2253 * function. If this does not occur, this secondary responsibility will 2254 * ensure the lwb is issued even if there is not other synchronous 2255 * activity on the system. 2256 * 2257 * For more details, see zil_process_commit_list(); more specifically, 2258 * the comment at the bottom of that function. 2259 */ 2260 static void 2261 zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) 2262 { 2263 ASSERT(!MUTEX_HELD(&zilog->zl_lock)); 2264 ASSERT(!MUTEX_HELD(&zilog->zl_writer_lock)); 2265 ASSERT(spa_writeable(zilog->zl_spa)); 2266 ASSERT0(zilog->zl_suspend); 2267 2268 mutex_enter(&zcw->zcw_lock); 2269 2270 /* 2271 * The timeout is scaled based on the lwb latency to avoid 2272 * significantly impacting the latency of each individual itx. 2273 * For more details, see the comment at the bottom of the 2274 * zil_process_commit_list() function. 2275 */ 2276 int pct = MAX(zfs_commit_timeout_pct, 1); 2277 hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; 2278 hrtime_t wakeup = gethrtime() + sleep; 2279 boolean_t timedout = B_FALSE; 2280 2281 while (!zcw->zcw_done) { 2282 ASSERT(MUTEX_HELD(&zcw->zcw_lock)); 2283 2284 lwb_t *lwb = zcw->zcw_lwb; 2285 2286 /* 2287 * Usually, the waiter will have a non-NULL lwb field here, 2288 * but it's possible for it to be NULL as a result of 2289 * zil_commit() racing with spa_sync(). 2290 * 2291 * When zil_clean() is called, it's possible for the itxg 2292 * list (which may be cleaned via a taskq) to contain 2293 * commit itxs. When this occurs, the commit waiters linked 2294 * off of these commit itxs will not be committed to an 2295 * lwb. Additionally, these commit waiters will not be 2296 * marked done until zil_commit_waiter_skip() is called via 2297 * zil_itxg_clean(). 2298 * 2299 * Thus, it's possible for this commit waiter (i.e. the 2300 * "zcw" variable) to be found in this "in between" state; 2301 * where it's "zcw_lwb" field is NULL, and it hasn't yet 2302 * been skipped, so it's "zcw_done" field is still B_FALSE. 2303 */ 2304 IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED); 2305 2306 if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { 2307 ASSERT3B(timedout, ==, B_FALSE); 2308 2309 /* 2310 * If the lwb hasn't been issued yet, then we 2311 * need to wait with a timeout, in case this 2312 * function needs to issue the lwb after the 2313 * timeout is reached; responsibility (2) from 2314 * the comment above this function. 2315 */ 2316 clock_t timeleft = cv_timedwait_hires(&zcw->zcw_cv, 2317 &zcw->zcw_lock, wakeup, USEC2NSEC(1), 2318 CALLOUT_FLAG_ABSOLUTE); 2319 2320 if (timeleft >= 0 || zcw->zcw_done) 2321 continue; 2322 2323 timedout = B_TRUE; 2324 zil_commit_waiter_timeout(zilog, zcw); 2325 2326 if (!zcw->zcw_done) { 2327 /* 2328 * If the commit waiter has already been 2329 * marked "done", it's possible for the 2330 * waiter's lwb structure to have already 2331 * been freed. Thus, we can only reliably 2332 * make these assertions if the waiter 2333 * isn't done. 2334 */ 2335 ASSERT3P(lwb, ==, zcw->zcw_lwb); 2336 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); 2337 } 2338 } else { 2339 /* 2340 * If the lwb isn't open, then it must have already 2341 * been issued. In that case, there's no need to 2342 * use a timeout when waiting for the lwb to 2343 * complete. 2344 * 2345 * Additionally, if the lwb is NULL, the waiter 2346 * will soon be signalled and marked done via 2347 * zil_clean() and zil_itxg_clean(), so no timeout 2348 * is required. 2349 */ 2350 2351 IMPLY(lwb != NULL, 2352 lwb->lwb_state == LWB_STATE_ISSUED || 2353 lwb->lwb_state == LWB_STATE_DONE); 2354 cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); 2355 } 2356 } 2357 2358 mutex_exit(&zcw->zcw_lock); 2359 } 2360 2361 static zil_commit_waiter_t * 2362 zil_alloc_commit_waiter() 2363 { 2364 zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); 2365 2366 cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); 2367 mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); 2368 list_link_init(&zcw->zcw_node); 2369 zcw->zcw_lwb = NULL; 2370 zcw->zcw_done = B_FALSE; 2371 zcw->zcw_zio_error = 0; 2372 2373 return (zcw); 2374 } 2375 2376 static void 2377 zil_free_commit_waiter(zil_commit_waiter_t *zcw) 2378 { 2379 ASSERT(!list_link_active(&zcw->zcw_node)); 2380 ASSERT3P(zcw->zcw_lwb, ==, NULL); 2381 ASSERT3B(zcw->zcw_done, ==, B_TRUE); 2382 mutex_destroy(&zcw->zcw_lock); 2383 cv_destroy(&zcw->zcw_cv); 2384 kmem_cache_free(zil_zcw_cache, zcw); 2385 } 2386 2387 /* 2388 * This function is used to create a TX_COMMIT itx and assign it. This 2389 * way, it will be linked into the ZIL's list of synchronous itxs, and 2390 * then later committed to an lwb (or skipped) when 2391 * zil_process_commit_list() is called. 2392 */ 2393 static void 2394 zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) 2395 { 2396 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); 2397 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 2398 2399 itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); 2400 itx->itx_sync = B_TRUE; 2401 itx->itx_private = zcw; 2402 2403 zil_itx_assign(zilog, itx, tx); 2404 2405 dmu_tx_commit(tx); 2406 } 2407 2408 /* 2409 * Commit ZFS Intent Log transactions (itxs) to stable storage. 2410 * 2411 * When writing ZIL transactions to the on-disk representation of the 2412 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple 2413 * itxs can be committed to a single lwb. Once a lwb is written and 2414 * committed to stable storage (i.e. the lwb is written, and vdevs have 2415 * been flushed), each itx that was committed to that lwb is also 2416 * considered to be committed to stable storage. 2417 * 2418 * When an itx is committed to an lwb, the log record (lr_t) contained 2419 * by the itx is copied into the lwb's zio buffer, and once this buffer 2420 * is written to disk, it becomes an on-disk ZIL block. 2421 * 2422 * As itxs are generated, they're inserted into the ZIL's queue of 2423 * uncommitted itxs. The semantics of zil_commit() are such that it will 2424 * block until all itxs that were in the queue when it was called, are 2425 * committed to stable storage. 2426 * 2427 * If "foid" is zero, this means all "synchronous" and "asynchronous" 2428 * itxs, for all objects in the dataset, will be committed to stable 2429 * storage prior to zil_commit() returning. If "foid" is non-zero, all 2430 * "synchronous" itxs for all objects, but only "asynchronous" itxs 2431 * that correspond to the foid passed in, will be committed to stable 2432 * storage prior to zil_commit() returning. 2433 * 2434 * Generally speaking, when zil_commit() is called, the consumer doesn't 2435 * actually care about _all_ of the uncommitted itxs. Instead, they're 2436 * simply trying to waiting for a specific itx to be committed to disk, 2437 * but the interface(s) for interacting with the ZIL don't allow such 2438 * fine-grained communication. A better interface would allow a consumer 2439 * to create and assign an itx, and then pass a reference to this itx to 2440 * zil_commit(); such that zil_commit() would return as soon as that 2441 * specific itx was committed to disk (instead of waiting for _all_ 2442 * itxs to be committed). 2443 * 2444 * When a thread calls zil_commit() a special "commit itx" will be 2445 * generated, along with a corresponding "waiter" for this commit itx. 2446 * zil_commit() will wait on this waiter's CV, such that when the waiter 2447 * is marked done, and signalled, zil_commit() will return. 2448 * 2449 * This commit itx is inserted into the queue of uncommitted itxs. This 2450 * provides an easy mechanism for determining which itxs were in the 2451 * queue prior to zil_commit() having been called, and which itxs were 2452 * added after zil_commit() was called. 2453 * 2454 * The commit it is special; it doesn't have any on-disk representation. 2455 * When a commit itx is "committed" to an lwb, the waiter associated 2456 * with it is linked onto the lwb's list of waiters. Then, when that lwb 2457 * completes, each waiter on the lwb's list is marked done and signalled 2458 * -- allowing the thread waiting on the waiter to return from zil_commit(). 2459 * 2460 * It's important to point out a few critical factors that allow us 2461 * to make use of the commit itxs, commit waiters, per-lwb lists of 2462 * commit waiters, and zio completion callbacks like we're doing: 2463 * 2464 * 1. The list of waiters for each lwb is traversed, and each commit 2465 * waiter is marked "done" and signalled, in the zio completion 2466 * callback of the lwb's zio[*]. 2467 * 2468 * * Actually, the waiters are signalled in the zio completion 2469 * callback of the root zio for the DKIOCFLUSHWRITECACHE commands 2470 * that are sent to the vdevs upon completion of the lwb zio. 2471 * 2472 * 2. When the itxs are inserted into the ZIL's queue of uncommitted 2473 * itxs, the order in which they are inserted is preserved[*]; as 2474 * itxs are added to the queue, they are added to the tail of 2475 * in-memory linked lists. 2476 * 2477 * When committing the itxs to lwbs (to be written to disk), they 2478 * are committed in the same order in which the itxs were added to 2479 * the uncommitted queue's linked list(s); i.e. the linked list of 2480 * itxs to commit is traversed from head to tail, and each itx is 2481 * committed to an lwb in that order. 2482 * 2483 * * To clarify: 2484 * 2485 * - the order of "sync" itxs is preserved w.r.t. other 2486 * "sync" itxs, regardless of the corresponding objects. 2487 * - the order of "async" itxs is preserved w.r.t. other 2488 * "async" itxs corresponding to the same object. 2489 * - the order of "async" itxs is *not* preserved w.r.t. other 2490 * "async" itxs corresponding to different objects. 2491 * - the order of "sync" itxs w.r.t. "async" itxs (or vice 2492 * versa) is *not* preserved, even for itxs that correspond 2493 * to the same object. 2494 * 2495 * For more details, see: zil_itx_assign(), zil_async_to_sync(), 2496 * zil_get_commit_list(), and zil_process_commit_list(). 2497 * 2498 * 3. The lwbs represent a linked list of blocks on disk. Thus, any 2499 * lwb cannot be considered committed to stable storage, until its 2500 * "previous" lwb is also committed to stable storage. This fact, 2501 * coupled with the fact described above, means that itxs are 2502 * committed in (roughly) the order in which they were generated. 2503 * This is essential because itxs are dependent on prior itxs. 2504 * Thus, we *must not* deem an itx as being committed to stable 2505 * storage, until *all* prior itxs have also been committed to 2506 * stable storage. 2507 * 2508 * To enforce this ordering of lwb zio's, while still leveraging as 2509 * much of the underlying storage performance as possible, we rely 2510 * on two fundamental concepts: 2511 * 2512 * 1. The creation and issuance of lwb zio's is protected by 2513 * the zilog's "zl_writer_lock", which ensures only a single 2514 * thread is creating and/or issuing lwb's at a time 2515 * 2. The "previous" lwb is a child of the "current" lwb 2516 * (leveraging the zio parent-child depenency graph) 2517 * 2518 * By relying on this parent-child zio relationship, we can have 2519 * many lwb zio's concurrently issued to the underlying storage, 2520 * but the order in which they complete will be the same order in 2521 * which they were created. 2522 */ 2523 void 2524 zil_commit(zilog_t *zilog, uint64_t foid) 2525 { 2526 /* 2527 * We should never attempt to call zil_commit on a snapshot for 2528 * a couple of reasons: 2529 * 2530 * 1. A snapshot may never be modified, thus it cannot have any 2531 * in-flight itxs that would have modified the dataset. 2532 * 2533 * 2. By design, when zil_commit() is called, a commit itx will 2534 * be assigned to this zilog; as a result, the zilog will be 2535 * dirtied. We must not dirty the zilog of a snapshot; there's 2536 * checks in the code that enforce this invariant, and will 2537 * cause a panic if it's not upheld. 2538 */ 2539 ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); 2540 2541 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 2542 return; 2543 2544 if (!spa_writeable(zilog->zl_spa)) { 2545 /* 2546 * If the SPA is not writable, there should never be any 2547 * pending itxs waiting to be committed to disk. If that 2548 * weren't true, we'd skip writing those itxs out, and 2549 * would break the sematics of zil_commit(); thus, we're 2550 * verifying that truth before we return to the caller. 2551 */ 2552 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2553 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 2554 for (int i = 0; i < TXG_SIZE; i++) 2555 ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); 2556 return; 2557 } 2558 2559 /* 2560 * If the ZIL is suspended, we don't want to dirty it by calling 2561 * zil_commit_itx_assign() below, nor can we write out 2562 * lwbs like would be done in zil_commit_write(). Thus, we 2563 * simply rely on txg_wait_synced() to maintain the necessary 2564 * semantics, and avoid calling those functions altogether. 2565 */ 2566 if (zilog->zl_suspend > 0) { 2567 txg_wait_synced(zilog->zl_dmu_pool, 0); 2568 return; 2569 } 2570 2571 /* 2572 * Move the "async" itxs for the specified foid to the "sync" 2573 * queues, such that they will be later committed (or skipped) 2574 * to an lwb when zil_process_commit_list() is called. 2575 * 2576 * Since these "async" itxs must be committed prior to this 2577 * call to zil_commit returning, we must perform this operation 2578 * before we call zil_commit_itx_assign(). 2579 */ 2580 zil_async_to_sync(zilog, foid); 2581 2582 /* 2583 * We allocate a new "waiter" structure which will initially be 2584 * linked to the commit itx using the itx's "itx_private" field. 2585 * Since the commit itx doesn't represent any on-disk state, 2586 * when it's committed to an lwb, rather than copying the its 2587 * lr_t into the lwb's buffer, the commit itx's "waiter" will be 2588 * added to the lwb's list of waiters. Then, when the lwb is 2589 * committed to stable storage, each waiter in the lwb's list of 2590 * waiters will be marked "done", and signalled. 2591 * 2592 * We must create the waiter and assign the commit itx prior to 2593 * calling zil_commit_writer(), or else our specific commit itx 2594 * is not guaranteed to be committed to an lwb prior to calling 2595 * zil_commit_waiter(). 2596 */ 2597 zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); 2598 zil_commit_itx_assign(zilog, zcw); 2599 2600 zil_commit_writer(zilog, zcw); 2601 zil_commit_waiter(zilog, zcw); 2602 2603 if (zcw->zcw_zio_error != 0) { 2604 /* 2605 * If there was an error writing out the ZIL blocks that 2606 * this thread is waiting on, then we fallback to 2607 * relying on spa_sync() to write out the data this 2608 * thread is waiting on. Obviously this has performance 2609 * implications, but the expectation is for this to be 2610 * an exceptional case, and shouldn't occur often. 2611 */ 2612 DTRACE_PROBE2(zil__commit__io__error, 2613 zilog_t *, zilog, zil_commit_waiter_t *, zcw); 2614 txg_wait_synced(zilog->zl_dmu_pool, 0); 2615 } 2616 2617 zil_free_commit_waiter(zcw); 2618 } 2619 2620 /* 2621 * Called in syncing context to free committed log blocks and update log header. 2622 */ 2623 void 2624 zil_sync(zilog_t *zilog, dmu_tx_t *tx) 2625 { 2626 zil_header_t *zh = zil_header_in_syncing_context(zilog); 2627 uint64_t txg = dmu_tx_get_txg(tx); 2628 spa_t *spa = zilog->zl_spa; 2629 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 2630 lwb_t *lwb; 2631 2632 /* 2633 * We don't zero out zl_destroy_txg, so make sure we don't try 2634 * to destroy it twice. 2635 */ 2636 if (spa_sync_pass(spa) != 1) 2637 return; 2638 2639 mutex_enter(&zilog->zl_lock); 2640 2641 ASSERT(zilog->zl_stop_sync == 0); 2642 2643 if (*replayed_seq != 0) { 2644 ASSERT(zh->zh_replay_seq < *replayed_seq); 2645 zh->zh_replay_seq = *replayed_seq; 2646 *replayed_seq = 0; 2647 } 2648 2649 if (zilog->zl_destroy_txg == txg) { 2650 blkptr_t blk = zh->zh_log; 2651 2652 ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 2653 2654 bzero(zh, sizeof (zil_header_t)); 2655 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 2656 2657 if (zilog->zl_keep_first) { 2658 /* 2659 * If this block was part of log chain that couldn't 2660 * be claimed because a device was missing during 2661 * zil_claim(), but that device later returns, 2662 * then this block could erroneously appear valid. 2663 * To guard against this, assign a new GUID to the new 2664 * log chain so it doesn't matter what blk points to. 2665 */ 2666 zil_init_log_chain(zilog, &blk); 2667 zh->zh_log = blk; 2668 } 2669 } 2670 2671 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 2672 zh->zh_log = lwb->lwb_blk; 2673 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 2674 break; 2675 list_remove(&zilog->zl_lwb_list, lwb); 2676 zio_free(spa, txg, &lwb->lwb_blk); 2677 zil_free_lwb(zilog, lwb); 2678 2679 /* 2680 * If we don't have anything left in the lwb list then 2681 * we've had an allocation failure and we need to zero 2682 * out the zil_header blkptr so that we don't end 2683 * up freeing the same block twice. 2684 */ 2685 if (list_head(&zilog->zl_lwb_list) == NULL) 2686 BP_ZERO(&zh->zh_log); 2687 } 2688 mutex_exit(&zilog->zl_lock); 2689 } 2690 2691 /* ARGSUSED */ 2692 static int 2693 zil_lwb_cons(void *vbuf, void *unused, int kmflag) 2694 { 2695 lwb_t *lwb = vbuf; 2696 list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), 2697 offsetof(zil_commit_waiter_t, zcw_node)); 2698 avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, 2699 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 2700 mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 2701 return (0); 2702 } 2703 2704 /* ARGSUSED */ 2705 static void 2706 zil_lwb_dest(void *vbuf, void *unused) 2707 { 2708 lwb_t *lwb = vbuf; 2709 mutex_destroy(&lwb->lwb_vdev_lock); 2710 avl_destroy(&lwb->lwb_vdev_tree); 2711 list_destroy(&lwb->lwb_waiters); 2712 } 2713 2714 void 2715 zil_init(void) 2716 { 2717 zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 2718 sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); 2719 2720 zil_zcw_cache = kmem_cache_create("zil_zcw_cache", 2721 sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 2722 } 2723 2724 void 2725 zil_fini(void) 2726 { 2727 kmem_cache_destroy(zil_zcw_cache); 2728 kmem_cache_destroy(zil_lwb_cache); 2729 } 2730 2731 void 2732 zil_set_sync(zilog_t *zilog, uint64_t sync) 2733 { 2734 zilog->zl_sync = sync; 2735 } 2736 2737 void 2738 zil_set_logbias(zilog_t *zilog, uint64_t logbias) 2739 { 2740 zilog->zl_logbias = logbias; 2741 } 2742 2743 zilog_t * 2744 zil_alloc(objset_t *os, zil_header_t *zh_phys) 2745 { 2746 zilog_t *zilog; 2747 2748 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 2749 2750 zilog->zl_header = zh_phys; 2751 zilog->zl_os = os; 2752 zilog->zl_spa = dmu_objset_spa(os); 2753 zilog->zl_dmu_pool = dmu_objset_pool(os); 2754 zilog->zl_destroy_txg = TXG_INITIAL - 1; 2755 zilog->zl_logbias = dmu_objset_logbias(os); 2756 zilog->zl_sync = dmu_objset_syncprop(os); 2757 zilog->zl_dirty_max_txg = 0; 2758 zilog->zl_last_lwb_opened = NULL; 2759 zilog->zl_last_lwb_latency = 0; 2760 2761 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 2762 mutex_init(&zilog->zl_writer_lock, NULL, MUTEX_DEFAULT, NULL); 2763 2764 for (int i = 0; i < TXG_SIZE; i++) { 2765 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 2766 MUTEX_DEFAULT, NULL); 2767 } 2768 2769 list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 2770 offsetof(lwb_t, lwb_node)); 2771 2772 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 2773 offsetof(itx_t, itx_node)); 2774 2775 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 2776 2777 return (zilog); 2778 } 2779 2780 void 2781 zil_free(zilog_t *zilog) 2782 { 2783 zilog->zl_stop_sync = 1; 2784 2785 ASSERT0(zilog->zl_suspend); 2786 ASSERT0(zilog->zl_suspending); 2787 2788 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2789 list_destroy(&zilog->zl_lwb_list); 2790 2791 ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 2792 list_destroy(&zilog->zl_itx_commit_list); 2793 2794 for (int i = 0; i < TXG_SIZE; i++) { 2795 /* 2796 * It's possible for an itx to be generated that doesn't dirty 2797 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 2798 * callback to remove the entry. We remove those here. 2799 * 2800 * Also free up the ziltest itxs. 2801 */ 2802 if (zilog->zl_itxg[i].itxg_itxs) 2803 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 2804 mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 2805 } 2806 2807 mutex_destroy(&zilog->zl_writer_lock); 2808 mutex_destroy(&zilog->zl_lock); 2809 2810 cv_destroy(&zilog->zl_cv_suspend); 2811 2812 kmem_free(zilog, sizeof (zilog_t)); 2813 } 2814 2815 /* 2816 * Open an intent log. 2817 */ 2818 zilog_t * 2819 zil_open(objset_t *os, zil_get_data_t *get_data) 2820 { 2821 zilog_t *zilog = dmu_objset_zil(os); 2822 2823 ASSERT3P(zilog->zl_get_data, ==, NULL); 2824 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); 2825 ASSERT(list_is_empty(&zilog->zl_lwb_list)); 2826 2827 zilog->zl_get_data = get_data; 2828 2829 return (zilog); 2830 } 2831 2832 /* 2833 * Close an intent log. 2834 */ 2835 void 2836 zil_close(zilog_t *zilog) 2837 { 2838 lwb_t *lwb; 2839 uint64_t txg; 2840 2841 if (!dmu_objset_is_snapshot(zilog->zl_os)) { 2842 zil_commit(zilog, 0); 2843 } else { 2844 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); 2845 ASSERT0(zilog->zl_dirty_max_txg); 2846 ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); 2847 } 2848 2849 mutex_enter(&zilog->zl_lock); 2850 lwb = list_tail(&zilog->zl_lwb_list); 2851 if (lwb == NULL) 2852 txg = zilog->zl_dirty_max_txg; 2853 else 2854 txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg); 2855 mutex_exit(&zilog->zl_lock); 2856 2857 /* 2858 * We need to use txg_wait_synced() to wait long enough for the 2859 * ZIL to be clean, and to wait for all pending lwbs to be 2860 * written out. 2861 */ 2862 if (txg != 0) 2863 txg_wait_synced(zilog->zl_dmu_pool, txg); 2864 2865 if (zilog_is_dirty(zilog)) 2866 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg); 2867 VERIFY(!zilog_is_dirty(zilog)); 2868 2869 zilog->zl_get_data = NULL; 2870 2871 /* 2872 * We should have only one lwb left on the list; remove it now. 2873 */ 2874 mutex_enter(&zilog->zl_lock); 2875 lwb = list_head(&zilog->zl_lwb_list); 2876 if (lwb != NULL) { 2877 ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list)); 2878 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); 2879 list_remove(&zilog->zl_lwb_list, lwb); 2880 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 2881 zil_free_lwb(zilog, lwb); 2882 } 2883 mutex_exit(&zilog->zl_lock); 2884 } 2885 2886 static char *suspend_tag = "zil suspending"; 2887 2888 /* 2889 * Suspend an intent log. While in suspended mode, we still honor 2890 * synchronous semantics, but we rely on txg_wait_synced() to do it. 2891 * On old version pools, we suspend the log briefly when taking a 2892 * snapshot so that it will have an empty intent log. 2893 * 2894 * Long holds are not really intended to be used the way we do here -- 2895 * held for such a short time. A concurrent caller of dsl_dataset_long_held() 2896 * could fail. Therefore we take pains to only put a long hold if it is 2897 * actually necessary. Fortunately, it will only be necessary if the 2898 * objset is currently mounted (or the ZVOL equivalent). In that case it 2899 * will already have a long hold, so we are not really making things any worse. 2900 * 2901 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 2902 * zvol_state_t), and use their mechanism to prevent their hold from being 2903 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 2904 * very little gain. 2905 * 2906 * if cookiep == NULL, this does both the suspend & resume. 2907 * Otherwise, it returns with the dataset "long held", and the cookie 2908 * should be passed into zil_resume(). 2909 */ 2910 int 2911 zil_suspend(const char *osname, void **cookiep) 2912 { 2913 objset_t *os; 2914 zilog_t *zilog; 2915 const zil_header_t *zh; 2916 int error; 2917 2918 error = dmu_objset_hold(osname, suspend_tag, &os); 2919 if (error != 0) 2920 return (error); 2921 zilog = dmu_objset_zil(os); 2922 2923 mutex_enter(&zilog->zl_lock); 2924 zh = zilog->zl_header; 2925 2926 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 2927 mutex_exit(&zilog->zl_lock); 2928 dmu_objset_rele(os, suspend_tag); 2929 return (SET_ERROR(EBUSY)); 2930 } 2931 2932 /* 2933 * Don't put a long hold in the cases where we can avoid it. This 2934 * is when there is no cookie so we are doing a suspend & resume 2935 * (i.e. called from zil_vdev_offline()), and there's nothing to do 2936 * for the suspend because it's already suspended, or there's no ZIL. 2937 */ 2938 if (cookiep == NULL && !zilog->zl_suspending && 2939 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 2940 mutex_exit(&zilog->zl_lock); 2941 dmu_objset_rele(os, suspend_tag); 2942 return (0); 2943 } 2944 2945 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 2946 dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 2947 2948 zilog->zl_suspend++; 2949 2950 if (zilog->zl_suspend > 1) { 2951 /* 2952 * Someone else is already suspending it. 2953 * Just wait for them to finish. 2954 */ 2955 2956 while (zilog->zl_suspending) 2957 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 2958 mutex_exit(&zilog->zl_lock); 2959 2960 if (cookiep == NULL) 2961 zil_resume(os); 2962 else 2963 *cookiep = os; 2964 return (0); 2965 } 2966 2967 /* 2968 * If there is no pointer to an on-disk block, this ZIL must not 2969 * be active (e.g. filesystem not mounted), so there's nothing 2970 * to clean up. 2971 */ 2972 if (BP_IS_HOLE(&zh->zh_log)) { 2973 ASSERT(cookiep != NULL); /* fast path already handled */ 2974 2975 *cookiep = os; 2976 mutex_exit(&zilog->zl_lock); 2977 return (0); 2978 } 2979 2980 zilog->zl_suspending = B_TRUE; 2981 mutex_exit(&zilog->zl_lock); 2982 2983 zil_commit(zilog, 0); 2984 2985 zil_destroy(zilog, B_FALSE); 2986 2987 mutex_enter(&zilog->zl_lock); 2988 zilog->zl_suspending = B_FALSE; 2989 cv_broadcast(&zilog->zl_cv_suspend); 2990 mutex_exit(&zilog->zl_lock); 2991 2992 if (cookiep == NULL) 2993 zil_resume(os); 2994 else 2995 *cookiep = os; 2996 return (0); 2997 } 2998 2999 void 3000 zil_resume(void *cookie) 3001 { 3002 objset_t *os = cookie; 3003 zilog_t *zilog = dmu_objset_zil(os); 3004 3005 mutex_enter(&zilog->zl_lock); 3006 ASSERT(zilog->zl_suspend != 0); 3007 zilog->zl_suspend--; 3008 mutex_exit(&zilog->zl_lock); 3009 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 3010 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 3011 } 3012 3013 typedef struct zil_replay_arg { 3014 zil_replay_func_t **zr_replay; 3015 void *zr_arg; 3016 boolean_t zr_byteswap; 3017 char *zr_lr; 3018 } zil_replay_arg_t; 3019 3020 static int 3021 zil_replay_error(zilog_t *zilog, lr_t *lr, int error) 3022 { 3023 char name[ZFS_MAX_DATASET_NAME_LEN]; 3024 3025 zilog->zl_replaying_seq--; /* didn't actually replay this one */ 3026 3027 dmu_objset_name(zilog->zl_os, name); 3028 3029 cmn_err(CE_WARN, "ZFS replay transaction error %d, " 3030 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 3031 (u_longlong_t)lr->lrc_seq, 3032 (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 3033 (lr->lrc_txtype & TX_CI) ? "CI" : ""); 3034 3035 return (error); 3036 } 3037 3038 static int 3039 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 3040 { 3041 zil_replay_arg_t *zr = zra; 3042 const zil_header_t *zh = zilog->zl_header; 3043 uint64_t reclen = lr->lrc_reclen; 3044 uint64_t txtype = lr->lrc_txtype; 3045 int error = 0; 3046 3047 zilog->zl_replaying_seq = lr->lrc_seq; 3048 3049 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 3050 return (0); 3051 3052 if (lr->lrc_txg < claim_txg) /* already committed */ 3053 return (0); 3054 3055 /* Strip case-insensitive bit, still present in log record */ 3056 txtype &= ~TX_CI; 3057 3058 if (txtype == 0 || txtype >= TX_MAX_TYPE) 3059 return (zil_replay_error(zilog, lr, EINVAL)); 3060 3061 /* 3062 * If this record type can be logged out of order, the object 3063 * (lr_foid) may no longer exist. That's legitimate, not an error. 3064 */ 3065 if (TX_OOO(txtype)) { 3066 error = dmu_object_info(zilog->zl_os, 3067 ((lr_ooo_t *)lr)->lr_foid, NULL); 3068 if (error == ENOENT || error == EEXIST) 3069 return (0); 3070 } 3071 3072 /* 3073 * Make a copy of the data so we can revise and extend it. 3074 */ 3075 bcopy(lr, zr->zr_lr, reclen); 3076 3077 /* 3078 * If this is a TX_WRITE with a blkptr, suck in the data. 3079 */ 3080 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 3081 error = zil_read_log_data(zilog, (lr_write_t *)lr, 3082 zr->zr_lr + reclen); 3083 if (error != 0) 3084 return (zil_replay_error(zilog, lr, error)); 3085 } 3086 3087 /* 3088 * The log block containing this lr may have been byteswapped 3089 * so that we can easily examine common fields like lrc_txtype. 3090 * However, the log is a mix of different record types, and only the 3091 * replay vectors know how to byteswap their records. Therefore, if 3092 * the lr was byteswapped, undo it before invoking the replay vector. 3093 */ 3094 if (zr->zr_byteswap) 3095 byteswap_uint64_array(zr->zr_lr, reclen); 3096 3097 /* 3098 * We must now do two things atomically: replay this log record, 3099 * and update the log header sequence number to reflect the fact that 3100 * we did so. At the end of each replay function the sequence number 3101 * is updated if we are in replay mode. 3102 */ 3103 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 3104 if (error != 0) { 3105 /* 3106 * The DMU's dnode layer doesn't see removes until the txg 3107 * commits, so a subsequent claim can spuriously fail with 3108 * EEXIST. So if we receive any error we try syncing out 3109 * any removes then retry the transaction. Note that we 3110 * specify B_FALSE for byteswap now, so we don't do it twice. 3111 */ 3112 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 3113 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 3114 if (error != 0) 3115 return (zil_replay_error(zilog, lr, error)); 3116 } 3117 return (0); 3118 } 3119 3120 /* ARGSUSED */ 3121 static int 3122 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 3123 { 3124 zilog->zl_replay_blks++; 3125 3126 return (0); 3127 } 3128 3129 /* 3130 * If this dataset has a non-empty intent log, replay it and destroy it. 3131 */ 3132 void 3133 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 3134 { 3135 zilog_t *zilog = dmu_objset_zil(os); 3136 const zil_header_t *zh = zilog->zl_header; 3137 zil_replay_arg_t zr; 3138 3139 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 3140 zil_destroy(zilog, B_TRUE); 3141 return; 3142 } 3143 3144 zr.zr_replay = replay_func; 3145 zr.zr_arg = arg; 3146 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 3147 zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 3148 3149 /* 3150 * Wait for in-progress removes to sync before starting replay. 3151 */ 3152 txg_wait_synced(zilog->zl_dmu_pool, 0); 3153 3154 zilog->zl_replay = B_TRUE; 3155 zilog->zl_replay_time = ddi_get_lbolt(); 3156 ASSERT(zilog->zl_replay_blks == 0); 3157 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 3158 zh->zh_claim_txg); 3159 kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 3160 3161 zil_destroy(zilog, B_FALSE); 3162 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 3163 zilog->zl_replay = B_FALSE; 3164 } 3165 3166 boolean_t 3167 zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 3168 { 3169 if (zilog->zl_sync == ZFS_SYNC_DISABLED) 3170 return (B_TRUE); 3171 3172 if (zilog->zl_replay) { 3173 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 3174 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 3175 zilog->zl_replaying_seq; 3176 return (B_TRUE); 3177 } 3178 3179 return (B_FALSE); 3180 } 3181 3182 /* ARGSUSED */ 3183 int 3184 zil_vdev_offline(const char *osname, void *arg) 3185 { 3186 int error; 3187 3188 error = zil_suspend(osname, NULL); 3189 if (error != 0) 3190 return (SET_ERROR(EEXIST)); 3191 return (0); 3192 } 3193