1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5fe9cf88cSperrin * Common Development and Distribution License (the "License"). 6fe9cf88cSperrin * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2255da60b9SMark J Musante * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 237802d7bfSMatthew Ahrens * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 24c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 25fa9e4066Sahrens */ 26fa9e4066Sahrens 2755da60b9SMark J Musante /* Portions Copyright 2010 Robert Milkowski */ 2855da60b9SMark J Musante 29fa9e4066Sahrens #include <sys/zfs_context.h> 30fa9e4066Sahrens #include <sys/spa.h> 31fa9e4066Sahrens #include <sys/dmu.h> 32fa9e4066Sahrens #include <sys/zap.h> 33fa9e4066Sahrens #include <sys/arc.h> 34fa9e4066Sahrens #include <sys/stat.h> 35fa9e4066Sahrens #include <sys/resource.h> 36fa9e4066Sahrens #include <sys/zil.h> 37fa9e4066Sahrens #include <sys/zil_impl.h> 38fa9e4066Sahrens #include <sys/dsl_dataset.h> 394b964adaSGeorge Wilson #include <sys/vdev_impl.h> 40d63d470bSgw25295 #include <sys/dmu_tx.h> 413f9d6ad7SLin Ling #include <sys/dsl_pool.h> 42fa9e4066Sahrens 43fa9e4066Sahrens /* 44fa9e4066Sahrens * The zfs intent log (ZIL) saves transaction records of system calls 45fa9e4066Sahrens * that change the file system in memory with enough information 46fa9e4066Sahrens * to be able to replay them. These are stored in memory until 47fa9e4066Sahrens * either the DMU transaction group (txg) commits them to the stable pool 48fa9e4066Sahrens * and they can be discarded, or they are flushed to the stable log 49fa9e4066Sahrens * (also in the pool) due to a fsync, O_DSYNC or other synchronous 50fa9e4066Sahrens * requirement. In the event of a panic or power fail then those log 51fa9e4066Sahrens * records (transactions) are replayed. 52fa9e4066Sahrens * 53fa9e4066Sahrens * There is one ZIL per file system. Its on-disk (pool) format consists 54fa9e4066Sahrens * of 3 parts: 55fa9e4066Sahrens * 56fa9e4066Sahrens * - ZIL header 57fa9e4066Sahrens * - ZIL blocks 58fa9e4066Sahrens * - ZIL records 59fa9e4066Sahrens * 60fa9e4066Sahrens * A log record holds a system call transaction. Log blocks can 61fa9e4066Sahrens * hold many log records and the blocks are chained together. 62fa9e4066Sahrens * Each ZIL block contains a block pointer (blkptr_t) to the next 63fa9e4066Sahrens * ZIL block in the chain. The ZIL header points to the first 64fa9e4066Sahrens * block in the chain. Note there is not a fixed place in the pool 65fa9e4066Sahrens * to hold blocks. They are dynamically allocated and freed as 66fa9e4066Sahrens * needed from the blocks available. Figure X shows the ZIL structure: 67fa9e4066Sahrens */ 68fa9e4066Sahrens 69fa9e4066Sahrens /* 70f7170741SWill Andrews * Disable intent logging replay. This global ZIL switch affects all pools. 71fa9e4066Sahrens */ 72f7170741SWill Andrews int zil_replay_disable = 0; 73416e0cd8Sek110237 74416e0cd8Sek110237 /* 75416e0cd8Sek110237 * Tunable parameter for debugging or performance analysis. Setting 76416e0cd8Sek110237 * zfs_nocacheflush will cause corruption on power loss if a volatile 77416e0cd8Sek110237 * out-of-order write cache is enabled. 78416e0cd8Sek110237 */ 79416e0cd8Sek110237 boolean_t zfs_nocacheflush = B_FALSE; 80fa9e4066Sahrens 81fa9e4066Sahrens static kmem_cache_t *zil_lwb_cache; 82fa9e4066Sahrens 8391de656bSNeil Perrin static void zil_async_to_sync(zilog_t *zilog, uint64_t foid); 848f18d1faSGeorge Wilson 856e1f5caaSNeil Perrin #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ 866e1f5caaSNeil Perrin sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) 876e1f5caaSNeil Perrin 886e1f5caaSNeil Perrin 895002558fSNeil Perrin /* 905002558fSNeil Perrin * ziltest is by and large an ugly hack, but very useful in 915002558fSNeil Perrin * checking replay without tedious work. 925002558fSNeil Perrin * When running ziltest we want to keep all itx's and so maintain 935002558fSNeil Perrin * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG 945002558fSNeil Perrin * We subtract TXG_CONCURRENT_STATES to allow for common code. 955002558fSNeil Perrin */ 965002558fSNeil Perrin #define ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES) 975002558fSNeil Perrin 98fa9e4066Sahrens static int 99b24ab676SJeff Bonwick zil_bp_compare(const void *x1, const void *x2) 100fa9e4066Sahrens { 101b24ab676SJeff Bonwick const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; 102b24ab676SJeff Bonwick const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; 103fa9e4066Sahrens 104fa9e4066Sahrens if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 105fa9e4066Sahrens return (-1); 106fa9e4066Sahrens if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 107fa9e4066Sahrens return (1); 108fa9e4066Sahrens 109fa9e4066Sahrens if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 110fa9e4066Sahrens return (-1); 111fa9e4066Sahrens if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 112fa9e4066Sahrens return (1); 113fa9e4066Sahrens 114fa9e4066Sahrens return (0); 115fa9e4066Sahrens } 116fa9e4066Sahrens 117fa9e4066Sahrens static void 118b24ab676SJeff Bonwick zil_bp_tree_init(zilog_t *zilog) 119fa9e4066Sahrens { 120b24ab676SJeff Bonwick avl_create(&zilog->zl_bp_tree, zil_bp_compare, 121b24ab676SJeff Bonwick sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); 122fa9e4066Sahrens } 123fa9e4066Sahrens 124fa9e4066Sahrens static void 125b24ab676SJeff Bonwick zil_bp_tree_fini(zilog_t *zilog) 126fa9e4066Sahrens { 127b24ab676SJeff Bonwick avl_tree_t *t = &zilog->zl_bp_tree; 128b24ab676SJeff Bonwick zil_bp_node_t *zn; 129fa9e4066Sahrens void *cookie = NULL; 130fa9e4066Sahrens 131fa9e4066Sahrens while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 132b24ab676SJeff Bonwick kmem_free(zn, sizeof (zil_bp_node_t)); 133fa9e4066Sahrens 134fa9e4066Sahrens avl_destroy(t); 135fa9e4066Sahrens } 136fa9e4066Sahrens 137b24ab676SJeff Bonwick int 138b24ab676SJeff Bonwick zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) 139fa9e4066Sahrens { 140b24ab676SJeff Bonwick avl_tree_t *t = &zilog->zl_bp_tree; 1415d7b4d43SMatthew Ahrens const dva_t *dva; 142b24ab676SJeff Bonwick zil_bp_node_t *zn; 143fa9e4066Sahrens avl_index_t where; 144fa9e4066Sahrens 1455d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 1465d7b4d43SMatthew Ahrens return (0); 1475d7b4d43SMatthew Ahrens 1485d7b4d43SMatthew Ahrens dva = BP_IDENTITY(bp); 1495d7b4d43SMatthew Ahrens 150fa9e4066Sahrens if (avl_find(t, dva, &where) != NULL) 151be6fd75aSMatthew Ahrens return (SET_ERROR(EEXIST)); 152fa9e4066Sahrens 153b24ab676SJeff Bonwick zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); 154fa9e4066Sahrens zn->zn_dva = *dva; 155fa9e4066Sahrens avl_insert(t, zn, where); 156fa9e4066Sahrens 157fa9e4066Sahrens return (0); 158fa9e4066Sahrens } 159fa9e4066Sahrens 160d80c45e0Sbonwick static zil_header_t * 161d80c45e0Sbonwick zil_header_in_syncing_context(zilog_t *zilog) 162d80c45e0Sbonwick { 163d80c45e0Sbonwick return ((zil_header_t *)zilog->zl_header); 164d80c45e0Sbonwick } 165d80c45e0Sbonwick 166d80c45e0Sbonwick static void 167d80c45e0Sbonwick zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) 168d80c45e0Sbonwick { 169d80c45e0Sbonwick zio_cksum_t *zc = &bp->blk_cksum; 170d80c45e0Sbonwick 171d80c45e0Sbonwick zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); 172d80c45e0Sbonwick zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); 173d80c45e0Sbonwick zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); 174d80c45e0Sbonwick zc->zc_word[ZIL_ZC_SEQ] = 1ULL; 175d80c45e0Sbonwick } 176d80c45e0Sbonwick 177fa9e4066Sahrens /* 178b24ab676SJeff Bonwick * Read a log block and make sure it's valid. 179fa9e4066Sahrens */ 180fa9e4066Sahrens static int 1816e1f5caaSNeil Perrin zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst, 1826e1f5caaSNeil Perrin char **end) 183fa9e4066Sahrens { 184b24ab676SJeff Bonwick enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 1857adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_WAIT; 186b24ab676SJeff Bonwick arc_buf_t *abuf = NULL; 1877802d7bfSMatthew Ahrens zbookmark_phys_t zb; 188fa9e4066Sahrens int error; 189fa9e4066Sahrens 190b24ab676SJeff Bonwick if (zilog->zl_header->zh_claim_txg == 0) 191b24ab676SJeff Bonwick zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 192ea8dc4b6Seschrock 193b24ab676SJeff Bonwick if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 194b24ab676SJeff Bonwick zio_flags |= ZIO_FLAG_SPECULATIVE; 195fa9e4066Sahrens 196b24ab676SJeff Bonwick SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], 197b24ab676SJeff Bonwick ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 198b24ab676SJeff Bonwick 1991b912ec7SGeorge Wilson error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 200b24ab676SJeff Bonwick ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 201d80c45e0Sbonwick 202d80c45e0Sbonwick if (error == 0) { 203d80c45e0Sbonwick zio_cksum_t cksum = bp->blk_cksum; 204fa9e4066Sahrens 205fa9e4066Sahrens /* 206f5e6e722SNeil Perrin * Validate the checksummed log block. 207f5e6e722SNeil Perrin * 208d80c45e0Sbonwick * Sequence numbers should be... sequential. The checksum 209d80c45e0Sbonwick * verifier for the next block should be bp's checksum plus 1. 210f5e6e722SNeil Perrin * 211f5e6e722SNeil Perrin * Also check the log chain linkage and size used. 212fa9e4066Sahrens */ 213d80c45e0Sbonwick cksum.zc_word[ZIL_ZC_SEQ]++; 214d80c45e0Sbonwick 2156e1f5caaSNeil Perrin if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 2166e1f5caaSNeil Perrin zil_chain_t *zilc = abuf->b_data; 2176e1f5caaSNeil Perrin char *lr = (char *)(zilc + 1); 2186e1f5caaSNeil Perrin uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); 2196e1f5caaSNeil Perrin 2206e1f5caaSNeil Perrin if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 2216e1f5caaSNeil Perrin sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { 222be6fd75aSMatthew Ahrens error = SET_ERROR(ECKSUM); 2236e1f5caaSNeil Perrin } else { 224b5152584SMatthew Ahrens ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); 2256e1f5caaSNeil Perrin bcopy(lr, dst, len); 2266e1f5caaSNeil Perrin *end = (char *)dst + len; 2276e1f5caaSNeil Perrin *nbp = zilc->zc_next_blk; 2286e1f5caaSNeil Perrin } 2296e1f5caaSNeil Perrin } else { 2306e1f5caaSNeil Perrin char *lr = abuf->b_data; 2316e1f5caaSNeil Perrin uint64_t size = BP_GET_LSIZE(bp); 2326e1f5caaSNeil Perrin zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; 2336e1f5caaSNeil Perrin 2346e1f5caaSNeil Perrin if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, 2356e1f5caaSNeil Perrin sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || 2366e1f5caaSNeil Perrin (zilc->zc_nused > (size - sizeof (*zilc)))) { 237be6fd75aSMatthew Ahrens error = SET_ERROR(ECKSUM); 2386e1f5caaSNeil Perrin } else { 239b5152584SMatthew Ahrens ASSERT3U(zilc->zc_nused, <=, 240b5152584SMatthew Ahrens SPA_OLD_MAXBLOCKSIZE); 2416e1f5caaSNeil Perrin bcopy(lr, dst, zilc->zc_nused); 2426e1f5caaSNeil Perrin *end = (char *)dst + zilc->zc_nused; 2436e1f5caaSNeil Perrin *nbp = zilc->zc_next_blk; 2446e1f5caaSNeil Perrin } 2456e1f5caaSNeil Perrin } 246b24ab676SJeff Bonwick 2473b2aab18SMatthew Ahrens VERIFY(arc_buf_remove_ref(abuf, &abuf)); 248f5e6e722SNeil Perrin } 249d80c45e0Sbonwick 250b24ab676SJeff Bonwick return (error); 251fa9e4066Sahrens } 252fa9e4066Sahrens 253b24ab676SJeff Bonwick /* 254b24ab676SJeff Bonwick * Read a TX_WRITE log data block. 255b24ab676SJeff Bonwick */ 256b24ab676SJeff Bonwick static int 257b24ab676SJeff Bonwick zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) 258b24ab676SJeff Bonwick { 259b24ab676SJeff Bonwick enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 260b24ab676SJeff Bonwick const blkptr_t *bp = &lr->lr_blkptr; 2617adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_WAIT; 262b24ab676SJeff Bonwick arc_buf_t *abuf = NULL; 2637802d7bfSMatthew Ahrens zbookmark_phys_t zb; 264b24ab676SJeff Bonwick int error; 265b24ab676SJeff Bonwick 266b24ab676SJeff Bonwick if (BP_IS_HOLE(bp)) { 267b24ab676SJeff Bonwick if (wbuf != NULL) 268b24ab676SJeff Bonwick bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); 269b24ab676SJeff Bonwick return (0); 270b24ab676SJeff Bonwick } 271b24ab676SJeff Bonwick 272b24ab676SJeff Bonwick if (zilog->zl_header->zh_claim_txg == 0) 273b24ab676SJeff Bonwick zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; 274b24ab676SJeff Bonwick 275b24ab676SJeff Bonwick SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, 276b24ab676SJeff Bonwick ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 277b24ab676SJeff Bonwick 2781b912ec7SGeorge Wilson error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, 279b24ab676SJeff Bonwick ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); 280b24ab676SJeff Bonwick 281b24ab676SJeff Bonwick if (error == 0) { 282b24ab676SJeff Bonwick if (wbuf != NULL) 283b24ab676SJeff Bonwick bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); 284b24ab676SJeff Bonwick (void) arc_buf_remove_ref(abuf, &abuf); 285b24ab676SJeff Bonwick } 286fa9e4066Sahrens 287d80c45e0Sbonwick return (error); 288fa9e4066Sahrens } 289fa9e4066Sahrens 290fa9e4066Sahrens /* 291fa9e4066Sahrens * Parse the intent log, and call parse_func for each valid record within. 292fa9e4066Sahrens */ 293b24ab676SJeff Bonwick int 294fa9e4066Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 295fa9e4066Sahrens zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 296fa9e4066Sahrens { 297d80c45e0Sbonwick const zil_header_t *zh = zilog->zl_header; 298b24ab676SJeff Bonwick boolean_t claimed = !!zh->zh_claim_txg; 299b24ab676SJeff Bonwick uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; 300b24ab676SJeff Bonwick uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; 301b24ab676SJeff Bonwick uint64_t max_blk_seq = 0; 302b24ab676SJeff Bonwick uint64_t max_lr_seq = 0; 303b24ab676SJeff Bonwick uint64_t blk_count = 0; 304b24ab676SJeff Bonwick uint64_t lr_count = 0; 305b24ab676SJeff Bonwick blkptr_t blk, next_blk; 306fa9e4066Sahrens char *lrbuf, *lrp; 307b24ab676SJeff Bonwick int error = 0; 308fa9e4066Sahrens 309b24ab676SJeff Bonwick /* 310b24ab676SJeff Bonwick * Old logs didn't record the maximum zh_claim_lr_seq. 311b24ab676SJeff Bonwick */ 312b24ab676SJeff Bonwick if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) 313b24ab676SJeff Bonwick claim_lr_seq = UINT64_MAX; 314fa9e4066Sahrens 315fa9e4066Sahrens /* 316fa9e4066Sahrens * Starting at the block pointed to by zh_log we read the log chain. 317fa9e4066Sahrens * For each block in the chain we strongly check that block to 318fa9e4066Sahrens * ensure its validity. We stop when an invalid block is found. 319fa9e4066Sahrens * For each block pointer in the chain we call parse_blk_func(). 320fa9e4066Sahrens * For each record in each valid block we call parse_lr_func(). 321d80c45e0Sbonwick * If the log has been claimed, stop if we encounter a sequence 322d80c45e0Sbonwick * number greater than the highest claimed sequence number. 323fa9e4066Sahrens */ 324b5152584SMatthew Ahrens lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); 325b24ab676SJeff Bonwick zil_bp_tree_init(zilog); 326d80c45e0Sbonwick 327b24ab676SJeff Bonwick for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { 328b24ab676SJeff Bonwick uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; 329b24ab676SJeff Bonwick int reclen; 3306e1f5caaSNeil Perrin char *end; 331b24ab676SJeff Bonwick 332b24ab676SJeff Bonwick if (blk_seq > claim_blk_seq) 333b24ab676SJeff Bonwick break; 334b24ab676SJeff Bonwick if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0) 335b24ab676SJeff Bonwick break; 3366e1f5caaSNeil Perrin ASSERT3U(max_blk_seq, <, blk_seq); 337b24ab676SJeff Bonwick max_blk_seq = blk_seq; 338b24ab676SJeff Bonwick blk_count++; 339b24ab676SJeff Bonwick 340b24ab676SJeff Bonwick if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) 341d80c45e0Sbonwick break; 342d80c45e0Sbonwick 3436e1f5caaSNeil Perrin error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end); 3443b2aab18SMatthew Ahrens if (error != 0) 345fa9e4066Sahrens break; 346fa9e4066Sahrens 3476e1f5caaSNeil Perrin for (lrp = lrbuf; lrp < end; lrp += reclen) { 348fa9e4066Sahrens lr_t *lr = (lr_t *)lrp; 349fa9e4066Sahrens reclen = lr->lrc_reclen; 350fa9e4066Sahrens ASSERT3U(reclen, >=, sizeof (lr_t)); 351b24ab676SJeff Bonwick if (lr->lrc_seq > claim_lr_seq) 352b24ab676SJeff Bonwick goto done; 353b24ab676SJeff Bonwick if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0) 354b24ab676SJeff Bonwick goto done; 3556e1f5caaSNeil Perrin ASSERT3U(max_lr_seq, <, lr->lrc_seq); 356b24ab676SJeff Bonwick max_lr_seq = lr->lrc_seq; 357b24ab676SJeff Bonwick lr_count++; 358fa9e4066Sahrens } 359fa9e4066Sahrens } 360b24ab676SJeff Bonwick done: 361b24ab676SJeff Bonwick zilog->zl_parse_error = error; 362b24ab676SJeff Bonwick zilog->zl_parse_blk_seq = max_blk_seq; 363b24ab676SJeff Bonwick zilog->zl_parse_lr_seq = max_lr_seq; 364b24ab676SJeff Bonwick zilog->zl_parse_blk_count = blk_count; 365b24ab676SJeff Bonwick zilog->zl_parse_lr_count = lr_count; 366d80c45e0Sbonwick 367b24ab676SJeff Bonwick ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || 368b24ab676SJeff Bonwick (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq)); 369b24ab676SJeff Bonwick 370b24ab676SJeff Bonwick zil_bp_tree_fini(zilog); 371b5152584SMatthew Ahrens zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); 372b24ab676SJeff Bonwick 373b24ab676SJeff Bonwick return (error); 374fa9e4066Sahrens } 375fa9e4066Sahrens 376b24ab676SJeff Bonwick static int 377fa9e4066Sahrens zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 378fa9e4066Sahrens { 379fa9e4066Sahrens /* 380fa9e4066Sahrens * Claim log block if not already committed and not already claimed. 381b24ab676SJeff Bonwick * If tx == NULL, just verify that the block is claimable. 382fa9e4066Sahrens */ 38343466aaeSMax Grossman if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || 38443466aaeSMax Grossman zil_bp_tree_add(zilog, bp) != 0) 385b24ab676SJeff Bonwick return (0); 386b24ab676SJeff Bonwick 387b24ab676SJeff Bonwick return (zio_wait(zio_claim(NULL, zilog->zl_spa, 388b24ab676SJeff Bonwick tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, 389b24ab676SJeff Bonwick ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); 390fa9e4066Sahrens } 391fa9e4066Sahrens 392b24ab676SJeff Bonwick static int 393fa9e4066Sahrens zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 394fa9e4066Sahrens { 395fa9e4066Sahrens lr_write_t *lr = (lr_write_t *)lrc; 396b24ab676SJeff Bonwick int error; 397b24ab676SJeff Bonwick 398b24ab676SJeff Bonwick if (lrc->lrc_txtype != TX_WRITE) 399b24ab676SJeff Bonwick return (0); 400b24ab676SJeff Bonwick 401b24ab676SJeff Bonwick /* 402b24ab676SJeff Bonwick * If the block is not readable, don't claim it. This can happen 403b24ab676SJeff Bonwick * in normal operation when a log block is written to disk before 404b24ab676SJeff Bonwick * some of the dmu_sync() blocks it points to. In this case, the 405b24ab676SJeff Bonwick * transaction cannot have been committed to anyone (we would have 406b24ab676SJeff Bonwick * waited for all writes to be stable first), so it is semantically 407b24ab676SJeff Bonwick * correct to declare this the end of the log. 408b24ab676SJeff Bonwick */ 409b24ab676SJeff Bonwick if (lr->lr_blkptr.blk_birth >= first_txg && 410b24ab676SJeff Bonwick (error = zil_read_log_data(zilog, lr, NULL)) != 0) 411b24ab676SJeff Bonwick return (error); 412b24ab676SJeff Bonwick return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); 413fa9e4066Sahrens } 414fa9e4066Sahrens 415fa9e4066Sahrens /* ARGSUSED */ 416b24ab676SJeff Bonwick static int 417fa9e4066Sahrens zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 418fa9e4066Sahrens { 419b24ab676SJeff Bonwick zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 420b24ab676SJeff Bonwick 421b24ab676SJeff Bonwick return (0); 422fa9e4066Sahrens } 423fa9e4066Sahrens 424b24ab676SJeff Bonwick static int 425fa9e4066Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 426fa9e4066Sahrens { 427b24ab676SJeff Bonwick lr_write_t *lr = (lr_write_t *)lrc; 428b24ab676SJeff Bonwick blkptr_t *bp = &lr->lr_blkptr; 429b24ab676SJeff Bonwick 430fa9e4066Sahrens /* 431fa9e4066Sahrens * If we previously claimed it, we need to free it. 432fa9e4066Sahrens */ 433b24ab676SJeff Bonwick if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && 43443466aaeSMax Grossman bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && 43543466aaeSMax Grossman !BP_IS_HOLE(bp)) 436b24ab676SJeff Bonwick zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); 437b24ab676SJeff Bonwick 438b24ab676SJeff Bonwick return (0); 439fa9e4066Sahrens } 440fa9e4066Sahrens 4416e1f5caaSNeil Perrin static lwb_t * 4426e1f5caaSNeil Perrin zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg) 4436e1f5caaSNeil Perrin { 4446e1f5caaSNeil Perrin lwb_t *lwb; 4456e1f5caaSNeil Perrin 4466e1f5caaSNeil Perrin lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 4476e1f5caaSNeil Perrin lwb->lwb_zilog = zilog; 4486e1f5caaSNeil Perrin lwb->lwb_blk = *bp; 4496e1f5caaSNeil Perrin lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); 4506e1f5caaSNeil Perrin lwb->lwb_max_txg = txg; 4516e1f5caaSNeil Perrin lwb->lwb_zio = NULL; 4526e1f5caaSNeil Perrin lwb->lwb_tx = NULL; 4536e1f5caaSNeil Perrin if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { 4546e1f5caaSNeil Perrin lwb->lwb_nused = sizeof (zil_chain_t); 4556e1f5caaSNeil Perrin lwb->lwb_sz = BP_GET_LSIZE(bp); 4566e1f5caaSNeil Perrin } else { 4576e1f5caaSNeil Perrin lwb->lwb_nused = 0; 4586e1f5caaSNeil Perrin lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); 4596e1f5caaSNeil Perrin } 4606e1f5caaSNeil Perrin 4616e1f5caaSNeil Perrin mutex_enter(&zilog->zl_lock); 4626e1f5caaSNeil Perrin list_insert_tail(&zilog->zl_lwb_list, lwb); 4636e1f5caaSNeil Perrin mutex_exit(&zilog->zl_lock); 4646e1f5caaSNeil Perrin 4656e1f5caaSNeil Perrin return (lwb); 4666e1f5caaSNeil Perrin } 4676e1f5caaSNeil Perrin 468fa9e4066Sahrens /* 469ce636f8bSMatthew Ahrens * Called when we create in-memory log transactions so that we know 470ce636f8bSMatthew Ahrens * to cleanup the itxs at the end of spa_sync(). 471ce636f8bSMatthew Ahrens */ 472ce636f8bSMatthew Ahrens void 473ce636f8bSMatthew Ahrens zilog_dirty(zilog_t *zilog, uint64_t txg) 474ce636f8bSMatthew Ahrens { 475ce636f8bSMatthew Ahrens dsl_pool_t *dp = zilog->zl_dmu_pool; 476ce636f8bSMatthew Ahrens dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 477ce636f8bSMatthew Ahrens 478bc9014e6SJustin Gibbs if (ds->ds_is_snapshot) 479ce636f8bSMatthew Ahrens panic("dirtying snapshot!"); 480ce636f8bSMatthew Ahrens 4813b2aab18SMatthew Ahrens if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { 482ce636f8bSMatthew Ahrens /* up the hold count until we can be written out */ 483ce636f8bSMatthew Ahrens dmu_buf_add_ref(ds->ds_dbuf, zilog); 484ce636f8bSMatthew Ahrens } 485ce636f8bSMatthew Ahrens } 486ce636f8bSMatthew Ahrens 487ce636f8bSMatthew Ahrens boolean_t 488ce636f8bSMatthew Ahrens zilog_is_dirty(zilog_t *zilog) 489ce636f8bSMatthew Ahrens { 490ce636f8bSMatthew Ahrens dsl_pool_t *dp = zilog->zl_dmu_pool; 491ce636f8bSMatthew Ahrens 492ce636f8bSMatthew Ahrens for (int t = 0; t < TXG_SIZE; t++) { 493ce636f8bSMatthew Ahrens if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) 494ce636f8bSMatthew Ahrens return (B_TRUE); 495ce636f8bSMatthew Ahrens } 496ce636f8bSMatthew Ahrens return (B_FALSE); 497ce636f8bSMatthew Ahrens } 498ce636f8bSMatthew Ahrens 499ce636f8bSMatthew Ahrens /* 500fa9e4066Sahrens * Create an on-disk intent log. 501fa9e4066Sahrens */ 5026e1f5caaSNeil Perrin static lwb_t * 503fa9e4066Sahrens zil_create(zilog_t *zilog) 504fa9e4066Sahrens { 505d80c45e0Sbonwick const zil_header_t *zh = zilog->zl_header; 5066e1f5caaSNeil Perrin lwb_t *lwb = NULL; 507d80c45e0Sbonwick uint64_t txg = 0; 508d80c45e0Sbonwick dmu_tx_t *tx = NULL; 509fa9e4066Sahrens blkptr_t blk; 510d80c45e0Sbonwick int error = 0; 511fa9e4066Sahrens 512fa9e4066Sahrens /* 513d80c45e0Sbonwick * Wait for any previous destroy to complete. 514fa9e4066Sahrens */ 515d80c45e0Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 516d80c45e0Sbonwick 517d80c45e0Sbonwick ASSERT(zh->zh_claim_txg == 0); 518d80c45e0Sbonwick ASSERT(zh->zh_replay_seq == 0); 519d80c45e0Sbonwick 520d80c45e0Sbonwick blk = zh->zh_log; 521d80c45e0Sbonwick 522d80c45e0Sbonwick /* 5236e1f5caaSNeil Perrin * Allocate an initial log block if: 5246e1f5caaSNeil Perrin * - there isn't one already 5256e1f5caaSNeil Perrin * - the existing block is the wrong endianess 526d80c45e0Sbonwick */ 527899217ddSNeil Perrin if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { 528fa9e4066Sahrens tx = dmu_tx_create(zilog->zl_os); 529b24ab676SJeff Bonwick VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 530fa9e4066Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 531fa9e4066Sahrens txg = dmu_tx_get_txg(tx); 532fa9e4066Sahrens 533899217ddSNeil Perrin if (!BP_IS_HOLE(&blk)) { 534b24ab676SJeff Bonwick zio_free_zil(zilog->zl_spa, txg, &blk); 535899217ddSNeil Perrin BP_ZERO(&blk); 536899217ddSNeil Perrin } 537899217ddSNeil Perrin 538b24ab676SJeff Bonwick error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL, 539b24ab676SJeff Bonwick ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 540d80c45e0Sbonwick 541d80c45e0Sbonwick if (error == 0) 542d80c45e0Sbonwick zil_init_log_chain(zilog, &blk); 54313f5297eSperrin } 544fa9e4066Sahrens 545fa9e4066Sahrens /* 546fa9e4066Sahrens * Allocate a log write buffer (lwb) for the first log block. 547fa9e4066Sahrens */ 5486e1f5caaSNeil Perrin if (error == 0) 5496e1f5caaSNeil Perrin lwb = zil_alloc_lwb(zilog, &blk, txg); 550fa9e4066Sahrens 551d80c45e0Sbonwick /* 552d80c45e0Sbonwick * If we just allocated the first log block, commit our transaction 553d80c45e0Sbonwick * and wait for zil_sync() to stuff the block poiner into zh_log. 554d80c45e0Sbonwick * (zh is part of the MOS, so we cannot modify it in open context.) 555d80c45e0Sbonwick */ 556d80c45e0Sbonwick if (tx != NULL) { 557fa9e4066Sahrens dmu_tx_commit(tx); 558fa9e4066Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 559fa9e4066Sahrens } 560fa9e4066Sahrens 561d80c45e0Sbonwick ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); 5626e1f5caaSNeil Perrin 5636e1f5caaSNeil Perrin return (lwb); 564d80c45e0Sbonwick } 565d80c45e0Sbonwick 566fa9e4066Sahrens /* 567fa9e4066Sahrens * In one tx, free all log blocks and clear the log header. 568d80c45e0Sbonwick * If keep_first is set, then we're replaying a log with no content. 569d80c45e0Sbonwick * We want to keep the first block, however, so that the first 570d80c45e0Sbonwick * synchronous transaction doesn't require a txg_wait_synced() 571d80c45e0Sbonwick * in zil_create(). We don't need to txg_wait_synced() here either 572d80c45e0Sbonwick * when keep_first is set, because both zil_create() and zil_destroy() 573d80c45e0Sbonwick * will wait for any in-progress destroys to complete. 574fa9e4066Sahrens */ 575fa9e4066Sahrens void 576d80c45e0Sbonwick zil_destroy(zilog_t *zilog, boolean_t keep_first) 577fa9e4066Sahrens { 578d80c45e0Sbonwick const zil_header_t *zh = zilog->zl_header; 579d80c45e0Sbonwick lwb_t *lwb; 580fa9e4066Sahrens dmu_tx_t *tx; 581fa9e4066Sahrens uint64_t txg; 582fa9e4066Sahrens 583d80c45e0Sbonwick /* 584d80c45e0Sbonwick * Wait for any previous destroy to complete. 585d80c45e0Sbonwick */ 586d80c45e0Sbonwick txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 587fa9e4066Sahrens 588b24ab676SJeff Bonwick zilog->zl_old_header = *zh; /* debugging aid */ 589b24ab676SJeff Bonwick 590d80c45e0Sbonwick if (BP_IS_HOLE(&zh->zh_log)) 591fa9e4066Sahrens return; 592fa9e4066Sahrens 593fa9e4066Sahrens tx = dmu_tx_create(zilog->zl_os); 594b24ab676SJeff Bonwick VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 595fa9e4066Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 596fa9e4066Sahrens txg = dmu_tx_get_txg(tx); 597fa9e4066Sahrens 598d80c45e0Sbonwick mutex_enter(&zilog->zl_lock); 599d80c45e0Sbonwick 600d80c45e0Sbonwick ASSERT3U(zilog->zl_destroy_txg, <, txg); 601fa9e4066Sahrens zilog->zl_destroy_txg = txg; 602b24ab676SJeff Bonwick zilog->zl_keep_first = keep_first; 603d80c45e0Sbonwick 604d80c45e0Sbonwick if (!list_is_empty(&zilog->zl_lwb_list)) { 605d80c45e0Sbonwick ASSERT(zh->zh_claim_txg == 0); 606c9ba2a43SEric Schrock VERIFY(!keep_first); 607d80c45e0Sbonwick while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 608d80c45e0Sbonwick list_remove(&zilog->zl_lwb_list, lwb); 609d80c45e0Sbonwick if (lwb->lwb_buf != NULL) 610d80c45e0Sbonwick zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 611b24ab676SJeff Bonwick zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk); 612d80c45e0Sbonwick kmem_cache_free(zil_lwb_cache, lwb); 613d80c45e0Sbonwick } 614b24ab676SJeff Bonwick } else if (!keep_first) { 615ce636f8bSMatthew Ahrens zil_destroy_sync(zilog, tx); 616d80c45e0Sbonwick } 617b19a79ecSperrin mutex_exit(&zilog->zl_lock); 618fa9e4066Sahrens 619fa9e4066Sahrens dmu_tx_commit(tx); 620fa9e4066Sahrens } 621fa9e4066Sahrens 622ce636f8bSMatthew Ahrens void 623ce636f8bSMatthew Ahrens zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) 624ce636f8bSMatthew Ahrens { 625ce636f8bSMatthew Ahrens ASSERT(list_is_empty(&zilog->zl_lwb_list)); 626ce636f8bSMatthew Ahrens (void) zil_parse(zilog, zil_free_log_block, 627ce636f8bSMatthew Ahrens zil_free_log_record, tx, zilog->zl_header->zh_claim_txg); 628ce636f8bSMatthew Ahrens } 629ce636f8bSMatthew Ahrens 6301d452cf5Sahrens int 63112380e1eSArne Jansen zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) 632fa9e4066Sahrens { 633fa9e4066Sahrens dmu_tx_t *tx = txarg; 634fa9e4066Sahrens uint64_t first_txg = dmu_tx_get_txg(tx); 635fa9e4066Sahrens zilog_t *zilog; 636fa9e4066Sahrens zil_header_t *zh; 637fa9e4066Sahrens objset_t *os; 638fa9e4066Sahrens int error; 639fa9e4066Sahrens 64012380e1eSArne Jansen error = dmu_objset_own_obj(dp, ds->ds_object, 64112380e1eSArne Jansen DMU_OST_ANY, B_FALSE, FTAG, &os); 6423b2aab18SMatthew Ahrens if (error != 0) { 64322438533SMatthew Ahrens /* 64422438533SMatthew Ahrens * EBUSY indicates that the objset is inconsistent, in which 64522438533SMatthew Ahrens * case it can not have a ZIL. 64622438533SMatthew Ahrens */ 64722438533SMatthew Ahrens if (error != EBUSY) { 64812380e1eSArne Jansen cmn_err(CE_WARN, "can't open objset for %llu, error %u", 64912380e1eSArne Jansen (unsigned long long)ds->ds_object, error); 65022438533SMatthew Ahrens } 6511d452cf5Sahrens return (0); 652fa9e4066Sahrens } 653fa9e4066Sahrens 654fa9e4066Sahrens zilog = dmu_objset_zil(os); 655d80c45e0Sbonwick zh = zil_header_in_syncing_context(zilog); 656fa9e4066Sahrens 657b24ab676SJeff Bonwick if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { 658e6ca193dSGeorge Wilson if (!BP_IS_HOLE(&zh->zh_log)) 659b24ab676SJeff Bonwick zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); 660e6ca193dSGeorge Wilson BP_ZERO(&zh->zh_log); 661e6ca193dSGeorge Wilson dsl_dataset_dirty(dmu_objset_ds(os), tx); 6623b2aab18SMatthew Ahrens dmu_objset_disown(os, FTAG); 663468c413aSTim Haley return (0); 664e6ca193dSGeorge Wilson } 665e6ca193dSGeorge Wilson 666fa9e4066Sahrens /* 667d80c45e0Sbonwick * Claim all log blocks if we haven't already done so, and remember 668d80c45e0Sbonwick * the highest claimed sequence number. This ensures that if we can 669d80c45e0Sbonwick * read only part of the log now (e.g. due to a missing device), 670d80c45e0Sbonwick * but we can read the entire log later, we will not try to replay 671d80c45e0Sbonwick * or destroy beyond the last block we successfully claimed. 672fa9e4066Sahrens */ 673fa9e4066Sahrens ASSERT3U(zh->zh_claim_txg, <=, first_txg); 674fa9e4066Sahrens if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 675b24ab676SJeff Bonwick (void) zil_parse(zilog, zil_claim_log_block, 676d80c45e0Sbonwick zil_claim_log_record, tx, first_txg); 677b24ab676SJeff Bonwick zh->zh_claim_txg = first_txg; 678b24ab676SJeff Bonwick zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; 679b24ab676SJeff Bonwick zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; 680b24ab676SJeff Bonwick if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) 681b24ab676SJeff Bonwick zh->zh_flags |= ZIL_REPLAY_NEEDED; 682b24ab676SJeff Bonwick zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; 683fa9e4066Sahrens dsl_dataset_dirty(dmu_objset_ds(os), tx); 684fa9e4066Sahrens } 685d80c45e0Sbonwick 686fa9e4066Sahrens ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 6873b2aab18SMatthew Ahrens dmu_objset_disown(os, FTAG); 6881d452cf5Sahrens return (0); 689fa9e4066Sahrens } 690fa9e4066Sahrens 691b87f3af3Sperrin /* 692b87f3af3Sperrin * Check the log by walking the log chain. 693b87f3af3Sperrin * Checksum errors are ok as they indicate the end of the chain. 694b87f3af3Sperrin * Any other error (no device or read failure) returns an error. 695b87f3af3Sperrin */ 69612380e1eSArne Jansen /* ARGSUSED */ 697b87f3af3Sperrin int 69812380e1eSArne Jansen zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) 699b87f3af3Sperrin { 700b87f3af3Sperrin zilog_t *zilog; 701b87f3af3Sperrin objset_t *os; 7024b964adaSGeorge Wilson blkptr_t *bp; 703b87f3af3Sperrin int error; 704b87f3af3Sperrin 705b24ab676SJeff Bonwick ASSERT(tx == NULL); 706b24ab676SJeff Bonwick 70712380e1eSArne Jansen error = dmu_objset_from_ds(ds, &os); 7083b2aab18SMatthew Ahrens if (error != 0) { 70912380e1eSArne Jansen cmn_err(CE_WARN, "can't open objset %llu, error %d", 71012380e1eSArne Jansen (unsigned long long)ds->ds_object, error); 711b87f3af3Sperrin return (0); 712b87f3af3Sperrin } 713b87f3af3Sperrin 714b87f3af3Sperrin zilog = dmu_objset_zil(os); 7154b964adaSGeorge Wilson bp = (blkptr_t *)&zilog->zl_header->zh_log; 7164b964adaSGeorge Wilson 7174b964adaSGeorge Wilson /* 7184b964adaSGeorge Wilson * Check the first block and determine if it's on a log device 7194b964adaSGeorge Wilson * which may have been removed or faulted prior to loading this 7204b964adaSGeorge Wilson * pool. If so, there's no point in checking the rest of the log 7214b964adaSGeorge Wilson * as its content should have already been synced to the pool. 7224b964adaSGeorge Wilson */ 7234b964adaSGeorge Wilson if (!BP_IS_HOLE(bp)) { 7244b964adaSGeorge Wilson vdev_t *vd; 7254b964adaSGeorge Wilson boolean_t valid = B_TRUE; 7264b964adaSGeorge Wilson 7274b964adaSGeorge Wilson spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); 7284b964adaSGeorge Wilson vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); 7294b964adaSGeorge Wilson if (vd->vdev_islog && vdev_is_dead(vd)) 7304b964adaSGeorge Wilson valid = vdev_log_state_valid(vd); 7314b964adaSGeorge Wilson spa_config_exit(os->os_spa, SCL_STATE, FTAG); 7324b964adaSGeorge Wilson 73312380e1eSArne Jansen if (!valid) 7344b964adaSGeorge Wilson return (0); 7354b964adaSGeorge Wilson } 736b87f3af3Sperrin 737b24ab676SJeff Bonwick /* 738b24ab676SJeff Bonwick * Because tx == NULL, zil_claim_log_block() will not actually claim 739b24ab676SJeff Bonwick * any blocks, but just determine whether it is possible to do so. 740b24ab676SJeff Bonwick * In addition to checking the log chain, zil_claim_log_block() 741b24ab676SJeff Bonwick * will invoke zio_claim() with a done func of spa_claim_notify(), 742b24ab676SJeff Bonwick * which will update spa_max_claim_txg. See spa_load() for details. 743b24ab676SJeff Bonwick */ 744b24ab676SJeff Bonwick error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, 745b24ab676SJeff Bonwick zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa)); 746b24ab676SJeff Bonwick 747b24ab676SJeff Bonwick return ((error == ECKSUM || error == ENOENT) ? 0 : error); 748b87f3af3Sperrin } 749b87f3af3Sperrin 75017f17c2dSbonwick static int 75117f17c2dSbonwick zil_vdev_compare(const void *x1, const void *x2) 752fa9e4066Sahrens { 7535002558fSNeil Perrin const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; 7545002558fSNeil Perrin const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; 75517f17c2dSbonwick 75617f17c2dSbonwick if (v1 < v2) 75717f17c2dSbonwick return (-1); 75817f17c2dSbonwick if (v1 > v2) 75917f17c2dSbonwick return (1); 76017f17c2dSbonwick 76117f17c2dSbonwick return (0); 76217f17c2dSbonwick } 76317f17c2dSbonwick 76417f17c2dSbonwick void 765b24ab676SJeff Bonwick zil_add_block(zilog_t *zilog, const blkptr_t *bp) 76617f17c2dSbonwick { 76717f17c2dSbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 76817f17c2dSbonwick avl_index_t where; 76917f17c2dSbonwick zil_vdev_node_t *zv, zvsearch; 77017f17c2dSbonwick int ndvas = BP_GET_NDVAS(bp); 77117f17c2dSbonwick int i; 772fa9e4066Sahrens 773416e0cd8Sek110237 if (zfs_nocacheflush) 774fa9e4066Sahrens return; 775fa9e4066Sahrens 77617f17c2dSbonwick ASSERT(zilog->zl_writer); 77717f17c2dSbonwick 77867bd71c6Sperrin /* 77917f17c2dSbonwick * Even though we're zl_writer, we still need a lock because the 78017f17c2dSbonwick * zl_get_data() callbacks may have dmu_sync() done callbacks 78117f17c2dSbonwick * that will run concurrently. 78267bd71c6Sperrin */ 78317f17c2dSbonwick mutex_enter(&zilog->zl_vdev_lock); 78417f17c2dSbonwick for (i = 0; i < ndvas; i++) { 78517f17c2dSbonwick zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 78617f17c2dSbonwick if (avl_find(t, &zvsearch, &where) == NULL) { 78717f17c2dSbonwick zv = kmem_alloc(sizeof (*zv), KM_SLEEP); 78817f17c2dSbonwick zv->zv_vdev = zvsearch.zv_vdev; 78917f17c2dSbonwick avl_insert(t, zv, where); 79067bd71c6Sperrin } 79167bd71c6Sperrin } 79217f17c2dSbonwick mutex_exit(&zilog->zl_vdev_lock); 79367bd71c6Sperrin } 79467bd71c6Sperrin 79591de656bSNeil Perrin static void 796b19a79ecSperrin zil_flush_vdevs(zilog_t *zilog) 797fa9e4066Sahrens { 79867bd71c6Sperrin spa_t *spa = zilog->zl_spa; 79917f17c2dSbonwick avl_tree_t *t = &zilog->zl_vdev_tree; 80017f17c2dSbonwick void *cookie = NULL; 80117f17c2dSbonwick zil_vdev_node_t *zv; 80217f17c2dSbonwick zio_t *zio; 803fa9e4066Sahrens 80467bd71c6Sperrin ASSERT(zilog->zl_writer); 805fa9e4066Sahrens 80617f17c2dSbonwick /* 80717f17c2dSbonwick * We don't need zl_vdev_lock here because we're the zl_writer, 80817f17c2dSbonwick * and all zl_get_data() callbacks are done. 80917f17c2dSbonwick */ 81017f17c2dSbonwick if (avl_numnodes(t) == 0) 81117f17c2dSbonwick return; 81217f17c2dSbonwick 813e14bb325SJeff Bonwick spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 81417f17c2dSbonwick 815e14bb325SJeff Bonwick zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 81617f17c2dSbonwick 81717f17c2dSbonwick while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { 81817f17c2dSbonwick vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); 81917f17c2dSbonwick if (vd != NULL) 82017f17c2dSbonwick zio_flush(zio, vd); 82117f17c2dSbonwick kmem_free(zv, sizeof (*zv)); 82267bd71c6Sperrin } 823fa9e4066Sahrens 824fa9e4066Sahrens /* 825fa9e4066Sahrens * Wait for all the flushes to complete. Not all devices actually 826fa9e4066Sahrens * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 827fa9e4066Sahrens */ 828fa9e4066Sahrens (void) zio_wait(zio); 82917f17c2dSbonwick 830e14bb325SJeff Bonwick spa_config_exit(spa, SCL_STATE, FTAG); 831fa9e4066Sahrens } 832fa9e4066Sahrens 833fa9e4066Sahrens /* 834fa9e4066Sahrens * Function called when a log block write completes 835fa9e4066Sahrens */ 836fa9e4066Sahrens static void 837fa9e4066Sahrens zil_lwb_write_done(zio_t *zio) 838fa9e4066Sahrens { 839fa9e4066Sahrens lwb_t *lwb = zio->io_private; 840fa9e4066Sahrens zilog_t *zilog = lwb->lwb_zilog; 841b24ab676SJeff Bonwick dmu_tx_t *tx = lwb->lwb_tx; 842fa9e4066Sahrens 843e14bb325SJeff Bonwick ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 844e14bb325SJeff Bonwick ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); 845e14bb325SJeff Bonwick ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 846e14bb325SJeff Bonwick ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); 847e14bb325SJeff Bonwick ASSERT(!BP_IS_GANG(zio->io_bp)); 848e14bb325SJeff Bonwick ASSERT(!BP_IS_HOLE(zio->io_bp)); 8495d7b4d43SMatthew Ahrens ASSERT(BP_GET_FILL(zio->io_bp) == 0); 850e14bb325SJeff Bonwick 851fa9e4066Sahrens /* 852ef0d8e11SNeil Perrin * Ensure the lwb buffer pointer is cleared before releasing 853ef0d8e11SNeil Perrin * the txg. If we have had an allocation failure and 854ef0d8e11SNeil Perrin * the txg is waiting to sync then we want want zil_sync() 855ef0d8e11SNeil Perrin * to remove the lwb so that it's not picked up as the next new 856ef0d8e11SNeil Perrin * one in zil_commit_writer(). zil_sync() will only remove 857ef0d8e11SNeil Perrin * the lwb if lwb_buf is null. 858fa9e4066Sahrens */ 859fa9e4066Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 860fa9e4066Sahrens mutex_enter(&zilog->zl_lock); 861fa9e4066Sahrens lwb->lwb_buf = NULL; 862b24ab676SJeff Bonwick lwb->lwb_tx = NULL; 863b24ab676SJeff Bonwick mutex_exit(&zilog->zl_lock); 864ef0d8e11SNeil Perrin 865ef0d8e11SNeil Perrin /* 866ef0d8e11SNeil Perrin * Now that we've written this log block, we have a stable pointer 867ef0d8e11SNeil Perrin * to the next block in the chain, so it's OK to let the txg in 868b24ab676SJeff Bonwick * which we allocated the next block sync. 869ef0d8e11SNeil Perrin */ 870b24ab676SJeff Bonwick dmu_tx_commit(tx); 871fa9e4066Sahrens } 872fa9e4066Sahrens 873fa9e4066Sahrens /* 874c5c6ffa0Smaybee * Initialize the io for a log block. 875c5c6ffa0Smaybee */ 876c5c6ffa0Smaybee static void 877c5c6ffa0Smaybee zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb) 878c5c6ffa0Smaybee { 8797802d7bfSMatthew Ahrens zbookmark_phys_t zb; 880c5c6ffa0Smaybee 881b24ab676SJeff Bonwick SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], 882b24ab676SJeff Bonwick ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 883b24ab676SJeff Bonwick lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); 884c5c6ffa0Smaybee 885b19a79ecSperrin if (zilog->zl_root_zio == NULL) { 886b19a79ecSperrin zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL, 887b19a79ecSperrin ZIO_FLAG_CANFAIL); 888b19a79ecSperrin } 88967bd71c6Sperrin if (lwb->lwb_zio == NULL) { 890b19a79ecSperrin lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa, 8916e1f5caaSNeil Perrin 0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk), 89269962b56SMatthew Ahrens zil_lwb_write_done, lwb, ZIO_PRIORITY_SYNC_WRITE, 8938f18d1faSGeorge Wilson ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb); 894c5c6ffa0Smaybee } 89567bd71c6Sperrin } 896c5c6ffa0Smaybee 897c5c6ffa0Smaybee /* 8986e1f5caaSNeil Perrin * Define a limited set of intent log block sizes. 899f7170741SWill Andrews * 9006e1f5caaSNeil Perrin * These must be a multiple of 4KB. Note only the amount used (again 9016e1f5caaSNeil Perrin * aligned to 4KB) actually gets written. However, we can't always just 902b5152584SMatthew Ahrens * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. 9036e1f5caaSNeil Perrin */ 9046e1f5caaSNeil Perrin uint64_t zil_block_buckets[] = { 9056e1f5caaSNeil Perrin 4096, /* non TX_WRITE */ 9066e1f5caaSNeil Perrin 8192+4096, /* data base */ 9076e1f5caaSNeil Perrin 32*1024 + 4096, /* NFS writes */ 9086e1f5caaSNeil Perrin UINT64_MAX 9096e1f5caaSNeil Perrin }; 9106e1f5caaSNeil Perrin 9116e1f5caaSNeil Perrin /* 912d48e086fSNeil Perrin * Use the slog as long as the logbias is 'latency' and the current commit size 913d48e086fSNeil Perrin * is less than the limit or the total list size is less than 2X the limit. 914d48e086fSNeil Perrin * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX. 915d48e086fSNeil Perrin */ 916d48e086fSNeil Perrin uint64_t zil_slog_limit = 1024 * 1024; 917d48e086fSNeil Perrin #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \ 918d48e086fSNeil Perrin (((zilog)->zl_cur_used < zil_slog_limit) || \ 919d48e086fSNeil Perrin ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))) 920d48e086fSNeil Perrin 921d48e086fSNeil Perrin /* 922fa9e4066Sahrens * Start a log block write and advance to the next log block. 923fa9e4066Sahrens * Calls are serialized. 924fa9e4066Sahrens */ 925fa9e4066Sahrens static lwb_t * 926fa9e4066Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 927fa9e4066Sahrens { 9286e1f5caaSNeil Perrin lwb_t *nlwb = NULL; 9296e1f5caaSNeil Perrin zil_chain_t *zilc; 930d80c45e0Sbonwick spa_t *spa = zilog->zl_spa; 9316e1f5caaSNeil Perrin blkptr_t *bp; 932b24ab676SJeff Bonwick dmu_tx_t *tx; 933fa9e4066Sahrens uint64_t txg; 934ada693c4SNeil Perrin uint64_t zil_blksz, wsz; 9356e1f5caaSNeil Perrin int i, error; 936fa9e4066Sahrens 9376e1f5caaSNeil Perrin if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 9386e1f5caaSNeil Perrin zilc = (zil_chain_t *)lwb->lwb_buf; 9396e1f5caaSNeil Perrin bp = &zilc->zc_next_blk; 9406e1f5caaSNeil Perrin } else { 9416e1f5caaSNeil Perrin zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); 9426e1f5caaSNeil Perrin bp = &zilc->zc_next_blk; 9436e1f5caaSNeil Perrin } 9446e1f5caaSNeil Perrin 9456e1f5caaSNeil Perrin ASSERT(lwb->lwb_nused <= lwb->lwb_sz); 946fa9e4066Sahrens 947fa9e4066Sahrens /* 948fa9e4066Sahrens * Allocate the next block and save its address in this block 949fa9e4066Sahrens * before writing it in order to establish the log chain. 950fa9e4066Sahrens * Note that if the allocation of nlwb synced before we wrote 951fa9e4066Sahrens * the block that points at it (lwb), we'd leak it if we crashed. 952b24ab676SJeff Bonwick * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). 953b24ab676SJeff Bonwick * We dirty the dataset to ensure that zil_sync() will be called 954b24ab676SJeff Bonwick * to clean up in the event of allocation failure or I/O failure. 955fa9e4066Sahrens */ 956b24ab676SJeff Bonwick tx = dmu_tx_create(zilog->zl_os); 957b24ab676SJeff Bonwick VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0); 958b24ab676SJeff Bonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 959b24ab676SJeff Bonwick txg = dmu_tx_get_txg(tx); 960b24ab676SJeff Bonwick 961b24ab676SJeff Bonwick lwb->lwb_tx = tx; 962fa9e4066Sahrens 963fa9e4066Sahrens /* 9646e1f5caaSNeil Perrin * Log blocks are pre-allocated. Here we select the size of the next 9656e1f5caaSNeil Perrin * block, based on size used in the last block. 9666e1f5caaSNeil Perrin * - first find the smallest bucket that will fit the block from a 9676e1f5caaSNeil Perrin * limited set of block sizes. This is because it's faster to write 9686e1f5caaSNeil Perrin * blocks allocated from the same metaslab as they are adjacent or 9696e1f5caaSNeil Perrin * close. 9706e1f5caaSNeil Perrin * - next find the maximum from the new suggested size and an array of 9716e1f5caaSNeil Perrin * previous sizes. This lessens a picket fence effect of wrongly 9726e1f5caaSNeil Perrin * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k 9736e1f5caaSNeil Perrin * requests. 9746e1f5caaSNeil Perrin * 9756e1f5caaSNeil Perrin * Note we only write what is used, but we can't just allocate 9766e1f5caaSNeil Perrin * the maximum block size because we can exhaust the available 9776e1f5caaSNeil Perrin * pool log space. 978fa9e4066Sahrens */ 9796e1f5caaSNeil Perrin zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); 9806e1f5caaSNeil Perrin for (i = 0; zil_blksz > zil_block_buckets[i]; i++) 9816e1f5caaSNeil Perrin continue; 9826e1f5caaSNeil Perrin zil_blksz = zil_block_buckets[i]; 9836e1f5caaSNeil Perrin if (zil_blksz == UINT64_MAX) 984b5152584SMatthew Ahrens zil_blksz = SPA_OLD_MAXBLOCKSIZE; 9856e1f5caaSNeil Perrin zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; 9866e1f5caaSNeil Perrin for (i = 0; i < ZIL_PREV_BLKS; i++) 9876e1f5caaSNeil Perrin zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); 9886e1f5caaSNeil Perrin zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); 989fa9e4066Sahrens 99067bd71c6Sperrin BP_ZERO(bp); 99167bd71c6Sperrin /* pass the old blkptr in order to spread log blocks across devs */ 992b24ab676SJeff Bonwick error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, 993d48e086fSNeil Perrin USE_SLOG(zilog)); 9943b2aab18SMatthew Ahrens if (error == 0) { 995d80c45e0Sbonwick ASSERT3U(bp->blk_birth, ==, txg); 996d80c45e0Sbonwick bp->blk_cksum = lwb->lwb_blk.blk_cksum; 997d80c45e0Sbonwick bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; 998fa9e4066Sahrens 999fa9e4066Sahrens /* 1000fa9e4066Sahrens * Allocate a new log write buffer (lwb). 1001fa9e4066Sahrens */ 10026e1f5caaSNeil Perrin nlwb = zil_alloc_lwb(zilog, bp, txg); 1003fa9e4066Sahrens 100417f17c2dSbonwick /* Record the block for later vdev flushing */ 100517f17c2dSbonwick zil_add_block(zilog, &lwb->lwb_blk); 10066e1f5caaSNeil Perrin } 10076e1f5caaSNeil Perrin 10086e1f5caaSNeil Perrin if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { 10096e1f5caaSNeil Perrin /* For Slim ZIL only write what is used. */ 1010ada693c4SNeil Perrin wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); 1011ada693c4SNeil Perrin ASSERT3U(wsz, <=, lwb->lwb_sz); 1012ada693c4SNeil Perrin zio_shrink(lwb->lwb_zio, wsz); 10136e1f5caaSNeil Perrin 1014ada693c4SNeil Perrin } else { 1015ada693c4SNeil Perrin wsz = lwb->lwb_sz; 10166e1f5caaSNeil Perrin } 1017ada693c4SNeil Perrin 10186e1f5caaSNeil Perrin zilc->zc_pad = 0; 10196e1f5caaSNeil Perrin zilc->zc_nused = lwb->lwb_nused; 10206e1f5caaSNeil Perrin zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; 10216e1f5caaSNeil Perrin 1022ada693c4SNeil Perrin /* 1023ada693c4SNeil Perrin * clear unused data for security 1024ada693c4SNeil Perrin */ 1025ada693c4SNeil Perrin bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); 1026ada693c4SNeil Perrin 10276e1f5caaSNeil Perrin zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */ 102867bd71c6Sperrin 1029fa9e4066Sahrens /* 10306e1f5caaSNeil Perrin * If there was an allocation failure then nlwb will be null which 10316e1f5caaSNeil Perrin * forces a txg_wait_synced(). 1032fa9e4066Sahrens */ 1033fa9e4066Sahrens return (nlwb); 1034fa9e4066Sahrens } 1035fa9e4066Sahrens 1036fa9e4066Sahrens static lwb_t * 1037fa9e4066Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 1038fa9e4066Sahrens { 1039fa9e4066Sahrens lr_t *lrc = &itx->itx_lr; /* common log record */ 1040b24ab676SJeff Bonwick lr_write_t *lrw = (lr_write_t *)lrc; 1041b24ab676SJeff Bonwick char *lr_buf; 1042fa9e4066Sahrens uint64_t txg = lrc->lrc_txg; 1043fa9e4066Sahrens uint64_t reclen = lrc->lrc_reclen; 1044b24ab676SJeff Bonwick uint64_t dlen = 0; 1045fa9e4066Sahrens 1046fa9e4066Sahrens if (lwb == NULL) 1047fa9e4066Sahrens return (NULL); 1048b24ab676SJeff Bonwick 1049fa9e4066Sahrens ASSERT(lwb->lwb_buf != NULL); 1050ce636f8bSMatthew Ahrens ASSERT(zilog_is_dirty(zilog) || 1051ce636f8bSMatthew Ahrens spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); 1052fa9e4066Sahrens 1053c5c6ffa0Smaybee if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) 1054c5c6ffa0Smaybee dlen = P2ROUNDUP_TYPED( 1055b24ab676SJeff Bonwick lrw->lr_length, sizeof (uint64_t), uint64_t); 1056fa9e4066Sahrens 1057104e2ed7Sperrin zilog->zl_cur_used += (reclen + dlen); 105822ac5be4Sperrin 105967bd71c6Sperrin zil_lwb_write_init(zilog, lwb); 106067bd71c6Sperrin 1061fa9e4066Sahrens /* 1062fa9e4066Sahrens * If this record won't fit in the current log block, start a new one. 1063fa9e4066Sahrens */ 10646e1f5caaSNeil Perrin if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1065fa9e4066Sahrens lwb = zil_lwb_write_start(zilog, lwb); 1066c5c6ffa0Smaybee if (lwb == NULL) 1067fa9e4066Sahrens return (NULL); 106867bd71c6Sperrin zil_lwb_write_init(zilog, lwb); 10696e1f5caaSNeil Perrin ASSERT(LWB_EMPTY(lwb)); 10706e1f5caaSNeil Perrin if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) { 1071fa9e4066Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 1072fa9e4066Sahrens return (lwb); 1073fa9e4066Sahrens } 1074fa9e4066Sahrens } 1075fa9e4066Sahrens 1076b24ab676SJeff Bonwick lr_buf = lwb->lwb_buf + lwb->lwb_nused; 1077b24ab676SJeff Bonwick bcopy(lrc, lr_buf, reclen); 1078b24ab676SJeff Bonwick lrc = (lr_t *)lr_buf; 1079b24ab676SJeff Bonwick lrw = (lr_write_t *)lrc; 1080c5c6ffa0Smaybee 1081c5c6ffa0Smaybee /* 1082c5c6ffa0Smaybee * If it's a write, fetch the data or get its blkptr as appropriate. 1083c5c6ffa0Smaybee */ 1084c5c6ffa0Smaybee if (lrc->lrc_txtype == TX_WRITE) { 1085c5c6ffa0Smaybee if (txg > spa_freeze_txg(zilog->zl_spa)) 1086c5c6ffa0Smaybee txg_wait_synced(zilog->zl_dmu_pool, txg); 1087c5c6ffa0Smaybee if (itx->itx_wr_state != WR_COPIED) { 1088c5c6ffa0Smaybee char *dbuf; 1089c5c6ffa0Smaybee int error; 1090c5c6ffa0Smaybee 1091104e2ed7Sperrin if (dlen) { 1092c5c6ffa0Smaybee ASSERT(itx->itx_wr_state == WR_NEED_COPY); 1093b24ab676SJeff Bonwick dbuf = lr_buf + reclen; 1094b24ab676SJeff Bonwick lrw->lr_common.lrc_reclen += dlen; 1095c5c6ffa0Smaybee } else { 1096c5c6ffa0Smaybee ASSERT(itx->itx_wr_state == WR_INDIRECT); 1097c5c6ffa0Smaybee dbuf = NULL; 1098104e2ed7Sperrin } 1099c5c6ffa0Smaybee error = zilog->zl_get_data( 1100b24ab676SJeff Bonwick itx->itx_private, lrw, dbuf, lwb->lwb_zio); 1101c87b8fc5SMark J Musante if (error == EIO) { 1102c87b8fc5SMark J Musante txg_wait_synced(zilog->zl_dmu_pool, txg); 1103c87b8fc5SMark J Musante return (lwb); 1104c87b8fc5SMark J Musante } 11053b2aab18SMatthew Ahrens if (error != 0) { 1106c5c6ffa0Smaybee ASSERT(error == ENOENT || error == EEXIST || 1107c5c6ffa0Smaybee error == EALREADY); 1108c5c6ffa0Smaybee return (lwb); 1109c5c6ffa0Smaybee } 1110c5c6ffa0Smaybee } 1111c5c6ffa0Smaybee } 1112c5c6ffa0Smaybee 1113b24ab676SJeff Bonwick /* 1114b24ab676SJeff Bonwick * We're actually making an entry, so update lrc_seq to be the 1115b24ab676SJeff Bonwick * log record sequence number. Note that this is generally not 1116b24ab676SJeff Bonwick * equal to the itx sequence number because not all transactions 1117b24ab676SJeff Bonwick * are synchronous, and sometimes spa_sync() gets there first. 1118b24ab676SJeff Bonwick */ 1119b24ab676SJeff Bonwick lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ 1120c5c6ffa0Smaybee lwb->lwb_nused += reclen + dlen; 1121fa9e4066Sahrens lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 11226e1f5caaSNeil Perrin ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); 1123fb09f5aaSMadhav Suresh ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); 1124fa9e4066Sahrens 1125fa9e4066Sahrens return (lwb); 1126fa9e4066Sahrens } 1127fa9e4066Sahrens 1128fa9e4066Sahrens itx_t * 1129da6c28aaSamw zil_itx_create(uint64_t txtype, size_t lrsize) 1130fa9e4066Sahrens { 1131fa9e4066Sahrens itx_t *itx; 1132fa9e4066Sahrens 1133b4d654b0Sperrin lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); 1134fa9e4066Sahrens 1135fa9e4066Sahrens itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 1136fa9e4066Sahrens itx->itx_lr.lrc_txtype = txtype; 1137fa9e4066Sahrens itx->itx_lr.lrc_reclen = lrsize; 1138abf76b6eSperrin itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ 1139fa9e4066Sahrens itx->itx_lr.lrc_seq = 0; /* defensive */ 11405002558fSNeil Perrin itx->itx_sync = B_TRUE; /* default is synchronous */ 1141fa9e4066Sahrens 1142fa9e4066Sahrens return (itx); 1143fa9e4066Sahrens } 1144fa9e4066Sahrens 1145b24ab676SJeff Bonwick void 1146b24ab676SJeff Bonwick zil_itx_destroy(itx_t *itx) 1147b24ab676SJeff Bonwick { 1148b24ab676SJeff Bonwick kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); 1149b24ab676SJeff Bonwick } 1150b24ab676SJeff Bonwick 11515002558fSNeil Perrin /* 11525002558fSNeil Perrin * Free up the sync and async itxs. The itxs_t has already been detached 11535002558fSNeil Perrin * so no locks are needed. 11545002558fSNeil Perrin */ 11555002558fSNeil Perrin static void 11565002558fSNeil Perrin zil_itxg_clean(itxs_t *itxs) 1157fa9e4066Sahrens { 11585002558fSNeil Perrin itx_t *itx; 11595002558fSNeil Perrin list_t *list; 11605002558fSNeil Perrin avl_tree_t *t; 11615002558fSNeil Perrin void *cookie; 11625002558fSNeil Perrin itx_async_node_t *ian; 1163fa9e4066Sahrens 11645002558fSNeil Perrin list = &itxs->i_sync_list; 11655002558fSNeil Perrin while ((itx = list_head(list)) != NULL) { 11665002558fSNeil Perrin list_remove(list, itx); 11675002558fSNeil Perrin kmem_free(itx, offsetof(itx_t, itx_lr) + 11685002558fSNeil Perrin itx->itx_lr.lrc_reclen); 11695002558fSNeil Perrin } 1170fa9e4066Sahrens 11715002558fSNeil Perrin cookie = NULL; 11725002558fSNeil Perrin t = &itxs->i_async_tree; 11735002558fSNeil Perrin while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 11745002558fSNeil Perrin list = &ian->ia_list; 11755002558fSNeil Perrin while ((itx = list_head(list)) != NULL) { 11765002558fSNeil Perrin list_remove(list, itx); 11775002558fSNeil Perrin kmem_free(itx, offsetof(itx_t, itx_lr) + 11785002558fSNeil Perrin itx->itx_lr.lrc_reclen); 11795002558fSNeil Perrin } 11805002558fSNeil Perrin list_destroy(list); 11815002558fSNeil Perrin kmem_free(ian, sizeof (itx_async_node_t)); 11825002558fSNeil Perrin } 11835002558fSNeil Perrin avl_destroy(t); 1184fa9e4066Sahrens 11855002558fSNeil Perrin kmem_free(itxs, sizeof (itxs_t)); 11865002558fSNeil Perrin } 11875002558fSNeil Perrin 11885002558fSNeil Perrin static int 11895002558fSNeil Perrin zil_aitx_compare(const void *x1, const void *x2) 11905002558fSNeil Perrin { 11915002558fSNeil Perrin const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; 11925002558fSNeil Perrin const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; 11935002558fSNeil Perrin 11945002558fSNeil Perrin if (o1 < o2) 11955002558fSNeil Perrin return (-1); 11965002558fSNeil Perrin if (o1 > o2) 11975002558fSNeil Perrin return (1); 11985002558fSNeil Perrin 11995002558fSNeil Perrin return (0); 1200fa9e4066Sahrens } 1201fa9e4066Sahrens 1202fa9e4066Sahrens /* 12035002558fSNeil Perrin * Remove all async itx with the given oid. 1204fa9e4066Sahrens */ 120591de656bSNeil Perrin static void 12065002558fSNeil Perrin zil_remove_async(zilog_t *zilog, uint64_t oid) 1207fa9e4066Sahrens { 12085002558fSNeil Perrin uint64_t otxg, txg; 12095002558fSNeil Perrin itx_async_node_t *ian; 12105002558fSNeil Perrin avl_tree_t *t; 12115002558fSNeil Perrin avl_index_t where; 1212a584ef65Sjohansen list_t clean_list; 1213fa9e4066Sahrens itx_t *itx; 1214fa9e4066Sahrens 12155002558fSNeil Perrin ASSERT(oid != 0); 1216a584ef65Sjohansen list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); 1217a584ef65Sjohansen 12185002558fSNeil Perrin if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 12195002558fSNeil Perrin otxg = ZILTEST_TXG; 12205002558fSNeil Perrin else 12215002558fSNeil Perrin otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 12225002558fSNeil Perrin 12235002558fSNeil Perrin for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 12245002558fSNeil Perrin itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 12255002558fSNeil Perrin 12265002558fSNeil Perrin mutex_enter(&itxg->itxg_lock); 12275002558fSNeil Perrin if (itxg->itxg_txg != txg) { 12285002558fSNeil Perrin mutex_exit(&itxg->itxg_lock); 12295002558fSNeil Perrin continue; 1230b19a79ecSperrin } 1231a584ef65Sjohansen 1232a584ef65Sjohansen /* 12335002558fSNeil Perrin * Locate the object node and append its list. 1234a584ef65Sjohansen */ 12355002558fSNeil Perrin t = &itxg->itxg_itxs->i_async_tree; 12365002558fSNeil Perrin ian = avl_find(t, &oid, &where); 12375002558fSNeil Perrin if (ian != NULL) 12385002558fSNeil Perrin list_move_tail(&clean_list, &ian->ia_list); 12395002558fSNeil Perrin mutex_exit(&itxg->itxg_lock); 1240fa9e4066Sahrens } 1241a584ef65Sjohansen while ((itx = list_head(&clean_list)) != NULL) { 1242a584ef65Sjohansen list_remove(&clean_list, itx); 12435002558fSNeil Perrin kmem_free(itx, offsetof(itx_t, itx_lr) + 12445002558fSNeil Perrin itx->itx_lr.lrc_reclen); 1245a584ef65Sjohansen } 1246a584ef65Sjohansen list_destroy(&clean_list); 1247fa9e4066Sahrens } 1248fa9e4066Sahrens 12495002558fSNeil Perrin void 12505002558fSNeil Perrin zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 12515002558fSNeil Perrin { 12525002558fSNeil Perrin uint64_t txg; 12535002558fSNeil Perrin itxg_t *itxg; 12545002558fSNeil Perrin itxs_t *itxs, *clean = NULL; 12555002558fSNeil Perrin 12565002558fSNeil Perrin /* 125791de656bSNeil Perrin * Object ids can be re-instantiated in the next txg so 12585002558fSNeil Perrin * remove any async transactions to avoid future leaks. 12595002558fSNeil Perrin * This can happen if a fsync occurs on the re-instantiated 12605002558fSNeil Perrin * object for a WR_INDIRECT or WR_NEED_COPY write, which gets 12615002558fSNeil Perrin * the new file data and flushes a write record for the old object. 12625002558fSNeil Perrin */ 12635002558fSNeil Perrin if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE) 126451bd2f97SNeil Perrin zil_remove_async(zilog, itx->itx_oid); 12655002558fSNeil Perrin 126691de656bSNeil Perrin /* 126791de656bSNeil Perrin * Ensure the data of a renamed file is committed before the rename. 126891de656bSNeil Perrin */ 126991de656bSNeil Perrin if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) 127091de656bSNeil Perrin zil_async_to_sync(zilog, itx->itx_oid); 127191de656bSNeil Perrin 12725002558fSNeil Perrin if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) 12735002558fSNeil Perrin txg = ZILTEST_TXG; 12745002558fSNeil Perrin else 12755002558fSNeil Perrin txg = dmu_tx_get_txg(tx); 12765002558fSNeil Perrin 12775002558fSNeil Perrin itxg = &zilog->zl_itxg[txg & TXG_MASK]; 12785002558fSNeil Perrin mutex_enter(&itxg->itxg_lock); 12795002558fSNeil Perrin itxs = itxg->itxg_itxs; 12805002558fSNeil Perrin if (itxg->itxg_txg != txg) { 12815002558fSNeil Perrin if (itxs != NULL) { 12825002558fSNeil Perrin /* 12835002558fSNeil Perrin * The zil_clean callback hasn't got around to cleaning 12845002558fSNeil Perrin * this itxg. Save the itxs for release below. 12855002558fSNeil Perrin * This should be rare. 12865002558fSNeil Perrin */ 12875002558fSNeil Perrin atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 12885002558fSNeil Perrin itxg->itxg_sod = 0; 12895002558fSNeil Perrin clean = itxg->itxg_itxs; 12905002558fSNeil Perrin } 12915002558fSNeil Perrin ASSERT(itxg->itxg_sod == 0); 12925002558fSNeil Perrin itxg->itxg_txg = txg; 12935002558fSNeil Perrin itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP); 12945002558fSNeil Perrin 12955002558fSNeil Perrin list_create(&itxs->i_sync_list, sizeof (itx_t), 12965002558fSNeil Perrin offsetof(itx_t, itx_node)); 12975002558fSNeil Perrin avl_create(&itxs->i_async_tree, zil_aitx_compare, 12985002558fSNeil Perrin sizeof (itx_async_node_t), 12995002558fSNeil Perrin offsetof(itx_async_node_t, ia_node)); 13005002558fSNeil Perrin } 13015002558fSNeil Perrin if (itx->itx_sync) { 13025002558fSNeil Perrin list_insert_tail(&itxs->i_sync_list, itx); 13035002558fSNeil Perrin atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod); 13045002558fSNeil Perrin itxg->itxg_sod += itx->itx_sod; 13055002558fSNeil Perrin } else { 13065002558fSNeil Perrin avl_tree_t *t = &itxs->i_async_tree; 13075002558fSNeil Perrin uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid; 13085002558fSNeil Perrin itx_async_node_t *ian; 13095002558fSNeil Perrin avl_index_t where; 13105002558fSNeil Perrin 13115002558fSNeil Perrin ian = avl_find(t, &foid, &where); 13125002558fSNeil Perrin if (ian == NULL) { 13135002558fSNeil Perrin ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP); 13145002558fSNeil Perrin list_create(&ian->ia_list, sizeof (itx_t), 13155002558fSNeil Perrin offsetof(itx_t, itx_node)); 13165002558fSNeil Perrin ian->ia_foid = foid; 13175002558fSNeil Perrin avl_insert(t, ian, where); 13185002558fSNeil Perrin } 13195002558fSNeil Perrin list_insert_tail(&ian->ia_list, itx); 13205002558fSNeil Perrin } 13215002558fSNeil Perrin 13225002558fSNeil Perrin itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 1323ce636f8bSMatthew Ahrens zilog_dirty(zilog, txg); 13245002558fSNeil Perrin mutex_exit(&itxg->itxg_lock); 13255002558fSNeil Perrin 13265002558fSNeil Perrin /* Release the old itxs now we've dropped the lock */ 13275002558fSNeil Perrin if (clean != NULL) 13285002558fSNeil Perrin zil_itxg_clean(clean); 13295002558fSNeil Perrin } 13305002558fSNeil Perrin 1331b19a79ecSperrin /* 133267bd71c6Sperrin * If there are any in-memory intent log transactions which have now been 1333ce636f8bSMatthew Ahrens * synced then start up a taskq to free them. We should only do this after we 1334ce636f8bSMatthew Ahrens * have written out the uberblocks (i.e. txg has been comitted) so that 1335ce636f8bSMatthew Ahrens * don't inadvertently clean out in-memory log records that would be required 1336ce636f8bSMatthew Ahrens * by zil_commit(). 1337b19a79ecSperrin */ 1338fa9e4066Sahrens void 13395002558fSNeil Perrin zil_clean(zilog_t *zilog, uint64_t synced_txg) 1340fa9e4066Sahrens { 13415002558fSNeil Perrin itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; 13425002558fSNeil Perrin itxs_t *clean_me; 134367bd71c6Sperrin 13445002558fSNeil Perrin mutex_enter(&itxg->itxg_lock); 13455002558fSNeil Perrin if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { 13465002558fSNeil Perrin mutex_exit(&itxg->itxg_lock); 13475002558fSNeil Perrin return; 134867bd71c6Sperrin } 13495002558fSNeil Perrin ASSERT3U(itxg->itxg_txg, <=, synced_txg); 13505002558fSNeil Perrin ASSERT(itxg->itxg_txg != 0); 13515002558fSNeil Perrin ASSERT(zilog->zl_clean_taskq != NULL); 13525002558fSNeil Perrin atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod); 13535002558fSNeil Perrin itxg->itxg_sod = 0; 13545002558fSNeil Perrin clean_me = itxg->itxg_itxs; 13555002558fSNeil Perrin itxg->itxg_itxs = NULL; 13565002558fSNeil Perrin itxg->itxg_txg = 0; 13575002558fSNeil Perrin mutex_exit(&itxg->itxg_lock); 13585002558fSNeil Perrin /* 13595002558fSNeil Perrin * Preferably start a task queue to free up the old itxs but 13605002558fSNeil Perrin * if taskq_dispatch can't allocate resources to do that then 13615002558fSNeil Perrin * free it in-line. This should be rare. Note, using TQ_SLEEP 13625002558fSNeil Perrin * created a bad performance problem. 13635002558fSNeil Perrin */ 13645002558fSNeil Perrin if (taskq_dispatch(zilog->zl_clean_taskq, 13655002558fSNeil Perrin (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == NULL) 13665002558fSNeil Perrin zil_itxg_clean(clean_me); 13675002558fSNeil Perrin } 13685002558fSNeil Perrin 13695002558fSNeil Perrin /* 13705002558fSNeil Perrin * Get the list of itxs to commit into zl_itx_commit_list. 13715002558fSNeil Perrin */ 137291de656bSNeil Perrin static void 13735002558fSNeil Perrin zil_get_commit_list(zilog_t *zilog) 13745002558fSNeil Perrin { 13755002558fSNeil Perrin uint64_t otxg, txg; 13765002558fSNeil Perrin list_t *commit_list = &zilog->zl_itx_commit_list; 13775002558fSNeil Perrin uint64_t push_sod = 0; 13785002558fSNeil Perrin 13795002558fSNeil Perrin if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 13805002558fSNeil Perrin otxg = ZILTEST_TXG; 13815002558fSNeil Perrin else 13825002558fSNeil Perrin otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 13835002558fSNeil Perrin 13845002558fSNeil Perrin for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 13855002558fSNeil Perrin itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 13865002558fSNeil Perrin 13875002558fSNeil Perrin mutex_enter(&itxg->itxg_lock); 13885002558fSNeil Perrin if (itxg->itxg_txg != txg) { 13895002558fSNeil Perrin mutex_exit(&itxg->itxg_lock); 13905002558fSNeil Perrin continue; 13915002558fSNeil Perrin } 13925002558fSNeil Perrin 13935002558fSNeil Perrin list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); 13945002558fSNeil Perrin push_sod += itxg->itxg_sod; 13955002558fSNeil Perrin itxg->itxg_sod = 0; 13965002558fSNeil Perrin 13975002558fSNeil Perrin mutex_exit(&itxg->itxg_lock); 13985002558fSNeil Perrin } 13995002558fSNeil Perrin atomic_add_64(&zilog->zl_itx_list_sz, -push_sod); 14005002558fSNeil Perrin } 14015002558fSNeil Perrin 14025002558fSNeil Perrin /* 14035002558fSNeil Perrin * Move the async itxs for a specified object to commit into sync lists. 14045002558fSNeil Perrin */ 140591de656bSNeil Perrin static void 14065002558fSNeil Perrin zil_async_to_sync(zilog_t *zilog, uint64_t foid) 14075002558fSNeil Perrin { 14085002558fSNeil Perrin uint64_t otxg, txg; 14095002558fSNeil Perrin itx_async_node_t *ian; 14105002558fSNeil Perrin avl_tree_t *t; 14115002558fSNeil Perrin avl_index_t where; 14125002558fSNeil Perrin 14135002558fSNeil Perrin if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ 14145002558fSNeil Perrin otxg = ZILTEST_TXG; 14155002558fSNeil Perrin else 14165002558fSNeil Perrin otxg = spa_last_synced_txg(zilog->zl_spa) + 1; 14175002558fSNeil Perrin 14185002558fSNeil Perrin for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { 14195002558fSNeil Perrin itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; 14205002558fSNeil Perrin 14215002558fSNeil Perrin mutex_enter(&itxg->itxg_lock); 14225002558fSNeil Perrin if (itxg->itxg_txg != txg) { 14235002558fSNeil Perrin mutex_exit(&itxg->itxg_lock); 14245002558fSNeil Perrin continue; 14255002558fSNeil Perrin } 14265002558fSNeil Perrin 14275002558fSNeil Perrin /* 14285002558fSNeil Perrin * If a foid is specified then find that node and append its 14295002558fSNeil Perrin * list. Otherwise walk the tree appending all the lists 14305002558fSNeil Perrin * to the sync list. We add to the end rather than the 14315002558fSNeil Perrin * beginning to ensure the create has happened. 14325002558fSNeil Perrin */ 14335002558fSNeil Perrin t = &itxg->itxg_itxs->i_async_tree; 14345002558fSNeil Perrin if (foid != 0) { 14355002558fSNeil Perrin ian = avl_find(t, &foid, &where); 14365002558fSNeil Perrin if (ian != NULL) { 14375002558fSNeil Perrin list_move_tail(&itxg->itxg_itxs->i_sync_list, 14385002558fSNeil Perrin &ian->ia_list); 14395002558fSNeil Perrin } 14405002558fSNeil Perrin } else { 14415002558fSNeil Perrin void *cookie = NULL; 14425002558fSNeil Perrin 14435002558fSNeil Perrin while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { 14445002558fSNeil Perrin list_move_tail(&itxg->itxg_itxs->i_sync_list, 14455002558fSNeil Perrin &ian->ia_list); 14465002558fSNeil Perrin list_destroy(&ian->ia_list); 14475002558fSNeil Perrin kmem_free(ian, sizeof (itx_async_node_t)); 14485002558fSNeil Perrin } 14495002558fSNeil Perrin } 14505002558fSNeil Perrin mutex_exit(&itxg->itxg_lock); 14515002558fSNeil Perrin } 1452fa9e4066Sahrens } 1453fa9e4066Sahrens 1454e14bb325SJeff Bonwick static void 14555002558fSNeil Perrin zil_commit_writer(zilog_t *zilog) 1456fa9e4066Sahrens { 1457fa9e4066Sahrens uint64_t txg; 14585002558fSNeil Perrin itx_t *itx; 1459fa9e4066Sahrens lwb_t *lwb; 14605002558fSNeil Perrin spa_t *spa = zilog->zl_spa; 1461b24ab676SJeff Bonwick int error = 0; 1462fa9e4066Sahrens 1463e14bb325SJeff Bonwick ASSERT(zilog->zl_root_zio == NULL); 14645002558fSNeil Perrin 14655002558fSNeil Perrin mutex_exit(&zilog->zl_lock); 14665002558fSNeil Perrin 14675002558fSNeil Perrin zil_get_commit_list(zilog); 14685002558fSNeil Perrin 14695002558fSNeil Perrin /* 14705002558fSNeil Perrin * Return if there's nothing to commit before we dirty the fs by 14715002558fSNeil Perrin * calling zil_create(). 14725002558fSNeil Perrin */ 14735002558fSNeil Perrin if (list_head(&zilog->zl_itx_commit_list) == NULL) { 14745002558fSNeil Perrin mutex_enter(&zilog->zl_lock); 14755002558fSNeil Perrin return; 14765002558fSNeil Perrin } 1477fa9e4066Sahrens 1478fa9e4066Sahrens if (zilog->zl_suspend) { 1479fa9e4066Sahrens lwb = NULL; 1480fa9e4066Sahrens } else { 1481fa9e4066Sahrens lwb = list_tail(&zilog->zl_lwb_list); 14825002558fSNeil Perrin if (lwb == NULL) 14836e1f5caaSNeil Perrin lwb = zil_create(zilog); 1484fa9e4066Sahrens } 1485fa9e4066Sahrens 1486b19a79ecSperrin DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); 14875002558fSNeil Perrin while (itx = list_head(&zilog->zl_itx_commit_list)) { 1488fa9e4066Sahrens txg = itx->itx_lr.lrc_txg; 1489fa9e4066Sahrens ASSERT(txg); 1490fa9e4066Sahrens 14915002558fSNeil Perrin if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa)) 1492fa9e4066Sahrens lwb = zil_lwb_commit(zilog, itx, lwb); 14935002558fSNeil Perrin list_remove(&zilog->zl_itx_commit_list, itx); 14945002558fSNeil Perrin kmem_free(itx, offsetof(itx_t, itx_lr) 14955002558fSNeil Perrin + itx->itx_lr.lrc_reclen); 1496fa9e4066Sahrens } 1497b19a79ecSperrin DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); 1498fa9e4066Sahrens 1499fa9e4066Sahrens /* write the last block out */ 150067bd71c6Sperrin if (lwb != NULL && lwb->lwb_zio != NULL) 1501fa9e4066Sahrens lwb = zil_lwb_write_start(zilog, lwb); 1502fa9e4066Sahrens 150322ac5be4Sperrin zilog->zl_cur_used = 0; 1504fa9e4066Sahrens 1505fa9e4066Sahrens /* 1506b19a79ecSperrin * Wait if necessary for the log blocks to be on stable storage. 1507fa9e4066Sahrens */ 1508b19a79ecSperrin if (zilog->zl_root_zio) { 1509b24ab676SJeff Bonwick error = zio_wait(zilog->zl_root_zio); 1510e14bb325SJeff Bonwick zilog->zl_root_zio = NULL; 1511b19a79ecSperrin zil_flush_vdevs(zilog); 1512fa9e4066Sahrens } 151322ac5be4Sperrin 1514b24ab676SJeff Bonwick if (error || lwb == NULL) 1515fa9e4066Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 151667bd71c6Sperrin 151767bd71c6Sperrin mutex_enter(&zilog->zl_lock); 1518b24ab676SJeff Bonwick 1519b24ab676SJeff Bonwick /* 1520b24ab676SJeff Bonwick * Remember the highest committed log sequence number for ztest. 1521b24ab676SJeff Bonwick * We only update this value when all the log writes succeeded, 1522b24ab676SJeff Bonwick * because ztest wants to ASSERT that it got the whole log chain. 1523b24ab676SJeff Bonwick */ 1524b24ab676SJeff Bonwick if (error == 0 && lwb != NULL) 1525b24ab676SJeff Bonwick zilog->zl_commit_lr_seq = zilog->zl_lr_seq; 1526b19a79ecSperrin } 1527b19a79ecSperrin 1528b19a79ecSperrin /* 15295002558fSNeil Perrin * Commit zfs transactions to stable storage. 1530b19a79ecSperrin * If foid is 0 push out all transactions, otherwise push only those 15315002558fSNeil Perrin * for that object or might reference that object. 15325002558fSNeil Perrin * 15335002558fSNeil Perrin * itxs are committed in batches. In a heavily stressed zil there will be 15345002558fSNeil Perrin * a commit writer thread who is writing out a bunch of itxs to the log 15355002558fSNeil Perrin * for a set of committing threads (cthreads) in the same batch as the writer. 15365002558fSNeil Perrin * Those cthreads are all waiting on the same cv for that batch. 15375002558fSNeil Perrin * 15385002558fSNeil Perrin * There will also be a different and growing batch of threads that are 15395002558fSNeil Perrin * waiting to commit (qthreads). When the committing batch completes 15405002558fSNeil Perrin * a transition occurs such that the cthreads exit and the qthreads become 15415002558fSNeil Perrin * cthreads. One of the new cthreads becomes the writer thread for the 15425002558fSNeil Perrin * batch. Any new threads arriving become new qthreads. 15435002558fSNeil Perrin * 15445002558fSNeil Perrin * Only 2 condition variables are needed and there's no transition 15455002558fSNeil Perrin * between the two cvs needed. They just flip-flop between qthreads 15465002558fSNeil Perrin * and cthreads. 15475002558fSNeil Perrin * 15485002558fSNeil Perrin * Using this scheme we can efficiently wakeup up only those threads 15495002558fSNeil Perrin * that have been committed. 1550b19a79ecSperrin */ 1551b19a79ecSperrin void 15525002558fSNeil Perrin zil_commit(zilog_t *zilog, uint64_t foid) 1553b19a79ecSperrin { 15545002558fSNeil Perrin uint64_t mybatch; 15555002558fSNeil Perrin 15565002558fSNeil Perrin if (zilog->zl_sync == ZFS_SYNC_DISABLED) 1557b19a79ecSperrin return; 1558b19a79ecSperrin 15595002558fSNeil Perrin /* move the async itxs for the foid to the sync queues */ 15605002558fSNeil Perrin zil_async_to_sync(zilog, foid); 15615002558fSNeil Perrin 1562b19a79ecSperrin mutex_enter(&zilog->zl_lock); 15635002558fSNeil Perrin mybatch = zilog->zl_next_batch; 156467bd71c6Sperrin while (zilog->zl_writer) { 15655002558fSNeil Perrin cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock); 15665002558fSNeil Perrin if (mybatch <= zilog->zl_com_batch) { 156767bd71c6Sperrin mutex_exit(&zilog->zl_lock); 156867bd71c6Sperrin return; 156967bd71c6Sperrin } 157067bd71c6Sperrin } 15715002558fSNeil Perrin 15725002558fSNeil Perrin zilog->zl_next_batch++; 15735002558fSNeil Perrin zilog->zl_writer = B_TRUE; 15745002558fSNeil Perrin zil_commit_writer(zilog); 15755002558fSNeil Perrin zilog->zl_com_batch = mybatch; 15765002558fSNeil Perrin zilog->zl_writer = B_FALSE; 157767bd71c6Sperrin mutex_exit(&zilog->zl_lock); 1578fa9e4066Sahrens 15795002558fSNeil Perrin /* wake up one thread to become the next writer */ 15805002558fSNeil Perrin cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]); 1581b24ab676SJeff Bonwick 15825002558fSNeil Perrin /* wake up all threads waiting for this batch to be committed */ 15835002558fSNeil Perrin cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]); 1584b24ab676SJeff Bonwick } 1585b24ab676SJeff Bonwick 1586b24ab676SJeff Bonwick /* 1587fa9e4066Sahrens * Called in syncing context to free committed log blocks and update log header. 1588fa9e4066Sahrens */ 1589fa9e4066Sahrens void 1590fa9e4066Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx) 1591fa9e4066Sahrens { 1592d80c45e0Sbonwick zil_header_t *zh = zil_header_in_syncing_context(zilog); 1593fa9e4066Sahrens uint64_t txg = dmu_tx_get_txg(tx); 1594fa9e4066Sahrens spa_t *spa = zilog->zl_spa; 1595b24ab676SJeff Bonwick uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; 1596fa9e4066Sahrens lwb_t *lwb; 1597fa9e4066Sahrens 159814843421SMatthew Ahrens /* 159914843421SMatthew Ahrens * We don't zero out zl_destroy_txg, so make sure we don't try 160014843421SMatthew Ahrens * to destroy it twice. 160114843421SMatthew Ahrens */ 160214843421SMatthew Ahrens if (spa_sync_pass(spa) != 1) 160314843421SMatthew Ahrens return; 160414843421SMatthew Ahrens 1605d80c45e0Sbonwick mutex_enter(&zilog->zl_lock); 1606d80c45e0Sbonwick 1607fa9e4066Sahrens ASSERT(zilog->zl_stop_sync == 0); 1608fa9e4066Sahrens 1609b24ab676SJeff Bonwick if (*replayed_seq != 0) { 1610b24ab676SJeff Bonwick ASSERT(zh->zh_replay_seq < *replayed_seq); 1611b24ab676SJeff Bonwick zh->zh_replay_seq = *replayed_seq; 1612b24ab676SJeff Bonwick *replayed_seq = 0; 1613b24ab676SJeff Bonwick } 1614fa9e4066Sahrens 1615fa9e4066Sahrens if (zilog->zl_destroy_txg == txg) { 1616d80c45e0Sbonwick blkptr_t blk = zh->zh_log; 1617d80c45e0Sbonwick 1618d80c45e0Sbonwick ASSERT(list_head(&zilog->zl_lwb_list) == NULL); 1619d80c45e0Sbonwick 1620d80c45e0Sbonwick bzero(zh, sizeof (zil_header_t)); 16211209a471SNeil Perrin bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); 1622d80c45e0Sbonwick 1623d80c45e0Sbonwick if (zilog->zl_keep_first) { 1624d80c45e0Sbonwick /* 1625d80c45e0Sbonwick * If this block was part of log chain that couldn't 1626d80c45e0Sbonwick * be claimed because a device was missing during 1627d80c45e0Sbonwick * zil_claim(), but that device later returns, 1628d80c45e0Sbonwick * then this block could erroneously appear valid. 1629d80c45e0Sbonwick * To guard against this, assign a new GUID to the new 1630d80c45e0Sbonwick * log chain so it doesn't matter what blk points to. 1631d80c45e0Sbonwick */ 1632d80c45e0Sbonwick zil_init_log_chain(zilog, &blk); 1633d80c45e0Sbonwick zh->zh_log = blk; 1634d80c45e0Sbonwick } 1635fa9e4066Sahrens } 1636fa9e4066Sahrens 1637e6ca193dSGeorge Wilson while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1638b19a79ecSperrin zh->zh_log = lwb->lwb_blk; 1639fa9e4066Sahrens if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 1640fa9e4066Sahrens break; 1641fa9e4066Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1642b24ab676SJeff Bonwick zio_free_zil(spa, txg, &lwb->lwb_blk); 1643fa9e4066Sahrens kmem_cache_free(zil_lwb_cache, lwb); 1644d63d470bSgw25295 1645d63d470bSgw25295 /* 1646d63d470bSgw25295 * If we don't have anything left in the lwb list then 1647d63d470bSgw25295 * we've had an allocation failure and we need to zero 1648d63d470bSgw25295 * out the zil_header blkptr so that we don't end 1649d63d470bSgw25295 * up freeing the same block twice. 1650d63d470bSgw25295 */ 1651d63d470bSgw25295 if (list_head(&zilog->zl_lwb_list) == NULL) 1652d63d470bSgw25295 BP_ZERO(&zh->zh_log); 1653fa9e4066Sahrens } 1654fa9e4066Sahrens mutex_exit(&zilog->zl_lock); 1655fa9e4066Sahrens } 1656fa9e4066Sahrens 1657fa9e4066Sahrens void 1658fa9e4066Sahrens zil_init(void) 1659fa9e4066Sahrens { 1660fa9e4066Sahrens zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 16615ad82045Snd150628 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); 1662fa9e4066Sahrens } 1663fa9e4066Sahrens 1664fa9e4066Sahrens void 1665fa9e4066Sahrens zil_fini(void) 1666fa9e4066Sahrens { 1667fa9e4066Sahrens kmem_cache_destroy(zil_lwb_cache); 1668fa9e4066Sahrens } 1669fa9e4066Sahrens 1670e09fa4daSNeil Perrin void 167155da60b9SMark J Musante zil_set_sync(zilog_t *zilog, uint64_t sync) 167255da60b9SMark J Musante { 167355da60b9SMark J Musante zilog->zl_sync = sync; 167455da60b9SMark J Musante } 167555da60b9SMark J Musante 167655da60b9SMark J Musante void 1677e09fa4daSNeil Perrin zil_set_logbias(zilog_t *zilog, uint64_t logbias) 1678e09fa4daSNeil Perrin { 1679e09fa4daSNeil Perrin zilog->zl_logbias = logbias; 1680e09fa4daSNeil Perrin } 1681e09fa4daSNeil Perrin 1682fa9e4066Sahrens zilog_t * 1683fa9e4066Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys) 1684fa9e4066Sahrens { 1685fa9e4066Sahrens zilog_t *zilog; 1686fa9e4066Sahrens 1687fa9e4066Sahrens zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 1688fa9e4066Sahrens 1689fa9e4066Sahrens zilog->zl_header = zh_phys; 1690fa9e4066Sahrens zilog->zl_os = os; 1691fa9e4066Sahrens zilog->zl_spa = dmu_objset_spa(os); 1692fa9e4066Sahrens zilog->zl_dmu_pool = dmu_objset_pool(os); 1693d80c45e0Sbonwick zilog->zl_destroy_txg = TXG_INITIAL - 1; 1694e09fa4daSNeil Perrin zilog->zl_logbias = dmu_objset_logbias(os); 169555da60b9SMark J Musante zilog->zl_sync = dmu_objset_syncprop(os); 16965002558fSNeil Perrin zilog->zl_next_batch = 1; 1697fa9e4066Sahrens 16985ad82045Snd150628 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); 16995ad82045Snd150628 17005002558fSNeil Perrin for (int i = 0; i < TXG_SIZE; i++) { 17015002558fSNeil Perrin mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, 17025002558fSNeil Perrin MUTEX_DEFAULT, NULL); 17035002558fSNeil Perrin } 1704fa9e4066Sahrens 1705fa9e4066Sahrens list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 1706fa9e4066Sahrens offsetof(lwb_t, lwb_node)); 1707fa9e4066Sahrens 17085002558fSNeil Perrin list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), 17095002558fSNeil Perrin offsetof(itx_t, itx_node)); 17105002558fSNeil Perrin 171117f17c2dSbonwick mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 171217f17c2dSbonwick 171317f17c2dSbonwick avl_create(&zilog->zl_vdev_tree, zil_vdev_compare, 171417f17c2dSbonwick sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); 1715fa9e4066Sahrens 1716b7b97454Sperrin cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL); 1717b7b97454Sperrin cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); 17185002558fSNeil Perrin cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL); 17195002558fSNeil Perrin cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL); 1720b7b97454Sperrin 1721fa9e4066Sahrens return (zilog); 1722fa9e4066Sahrens } 1723fa9e4066Sahrens 1724fa9e4066Sahrens void 1725fa9e4066Sahrens zil_free(zilog_t *zilog) 1726fa9e4066Sahrens { 1727fa9e4066Sahrens zilog->zl_stop_sync = 1; 1728fa9e4066Sahrens 17293b2aab18SMatthew Ahrens ASSERT0(zilog->zl_suspend); 17303b2aab18SMatthew Ahrens ASSERT0(zilog->zl_suspending); 17313b2aab18SMatthew Ahrens 1732c9ba2a43SEric Schrock ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1733fa9e4066Sahrens list_destroy(&zilog->zl_lwb_list); 1734fa9e4066Sahrens 173517f17c2dSbonwick avl_destroy(&zilog->zl_vdev_tree); 173617f17c2dSbonwick mutex_destroy(&zilog->zl_vdev_lock); 1737fa9e4066Sahrens 17385002558fSNeil Perrin ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); 17395002558fSNeil Perrin list_destroy(&zilog->zl_itx_commit_list); 17405002558fSNeil Perrin 17415002558fSNeil Perrin for (int i = 0; i < TXG_SIZE; i++) { 17425002558fSNeil Perrin /* 17435002558fSNeil Perrin * It's possible for an itx to be generated that doesn't dirty 17445002558fSNeil Perrin * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() 17455002558fSNeil Perrin * callback to remove the entry. We remove those here. 17465002558fSNeil Perrin * 17475002558fSNeil Perrin * Also free up the ziltest itxs. 17485002558fSNeil Perrin */ 17495002558fSNeil Perrin if (zilog->zl_itxg[i].itxg_itxs) 17505002558fSNeil Perrin zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); 17515002558fSNeil Perrin mutex_destroy(&zilog->zl_itxg[i].itxg_lock); 17525002558fSNeil Perrin } 17535002558fSNeil Perrin 17545ad82045Snd150628 mutex_destroy(&zilog->zl_lock); 1755fa9e4066Sahrens 1756b7b97454Sperrin cv_destroy(&zilog->zl_cv_writer); 1757b7b97454Sperrin cv_destroy(&zilog->zl_cv_suspend); 17585002558fSNeil Perrin cv_destroy(&zilog->zl_cv_batch[0]); 17595002558fSNeil Perrin cv_destroy(&zilog->zl_cv_batch[1]); 1760b7b97454Sperrin 1761fa9e4066Sahrens kmem_free(zilog, sizeof (zilog_t)); 1762fa9e4066Sahrens } 1763fa9e4066Sahrens 1764fa9e4066Sahrens /* 1765fa9e4066Sahrens * Open an intent log. 1766fa9e4066Sahrens */ 1767fa9e4066Sahrens zilog_t * 1768fa9e4066Sahrens zil_open(objset_t *os, zil_get_data_t *get_data) 1769fa9e4066Sahrens { 1770fa9e4066Sahrens zilog_t *zilog = dmu_objset_zil(os); 1771fa9e4066Sahrens 1772c9ba2a43SEric Schrock ASSERT(zilog->zl_clean_taskq == NULL); 1773c9ba2a43SEric Schrock ASSERT(zilog->zl_get_data == NULL); 1774c9ba2a43SEric Schrock ASSERT(list_is_empty(&zilog->zl_lwb_list)); 1775c9ba2a43SEric Schrock 1776fa9e4066Sahrens zilog->zl_get_data = get_data; 1777fa9e4066Sahrens zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1778fa9e4066Sahrens 2, 2, TASKQ_PREPOPULATE); 1779fa9e4066Sahrens 1780fa9e4066Sahrens return (zilog); 1781fa9e4066Sahrens } 1782fa9e4066Sahrens 1783fa9e4066Sahrens /* 1784fa9e4066Sahrens * Close an intent log. 1785fa9e4066Sahrens */ 1786fa9e4066Sahrens void 1787fa9e4066Sahrens zil_close(zilog_t *zilog) 1788fa9e4066Sahrens { 1789c9ba2a43SEric Schrock lwb_t *lwb; 17905002558fSNeil Perrin uint64_t txg = 0; 17915002558fSNeil Perrin 17925002558fSNeil Perrin zil_commit(zilog, 0); /* commit all itx */ 17935002558fSNeil Perrin 1794d80c45e0Sbonwick /* 17955002558fSNeil Perrin * The lwb_max_txg for the stubby lwb will reflect the last activity 17965002558fSNeil Perrin * for the zil. After a txg_wait_synced() on the txg we know all the 17975002558fSNeil Perrin * callbacks have occurred that may clean the zil. Only then can we 17985002558fSNeil Perrin * destroy the zl_clean_taskq. 1799d80c45e0Sbonwick */ 18005002558fSNeil Perrin mutex_enter(&zilog->zl_lock); 1801c9ba2a43SEric Schrock lwb = list_tail(&zilog->zl_lwb_list); 1802c9ba2a43SEric Schrock if (lwb != NULL) 1803c9ba2a43SEric Schrock txg = lwb->lwb_max_txg; 18045002558fSNeil Perrin mutex_exit(&zilog->zl_lock); 18055002558fSNeil Perrin if (txg) 1806d80c45e0Sbonwick txg_wait_synced(zilog->zl_dmu_pool, txg); 1807ce636f8bSMatthew Ahrens ASSERT(!zilog_is_dirty(zilog)); 1808d80c45e0Sbonwick 1809fa9e4066Sahrens taskq_destroy(zilog->zl_clean_taskq); 1810fa9e4066Sahrens zilog->zl_clean_taskq = NULL; 1811fa9e4066Sahrens zilog->zl_get_data = NULL; 1812c9ba2a43SEric Schrock 1813c9ba2a43SEric Schrock /* 1814c9ba2a43SEric Schrock * We should have only one LWB left on the list; remove it now. 1815c9ba2a43SEric Schrock */ 1816c9ba2a43SEric Schrock mutex_enter(&zilog->zl_lock); 1817c9ba2a43SEric Schrock lwb = list_head(&zilog->zl_lwb_list); 1818c9ba2a43SEric Schrock if (lwb != NULL) { 1819c9ba2a43SEric Schrock ASSERT(lwb == list_tail(&zilog->zl_lwb_list)); 1820c9ba2a43SEric Schrock list_remove(&zilog->zl_lwb_list, lwb); 1821c9ba2a43SEric Schrock zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1822c9ba2a43SEric Schrock kmem_cache_free(zil_lwb_cache, lwb); 1823c9ba2a43SEric Schrock } 1824c9ba2a43SEric Schrock mutex_exit(&zilog->zl_lock); 1825fa9e4066Sahrens } 1826fa9e4066Sahrens 18273b2aab18SMatthew Ahrens static char *suspend_tag = "zil suspending"; 18283b2aab18SMatthew Ahrens 1829fa9e4066Sahrens /* 1830fa9e4066Sahrens * Suspend an intent log. While in suspended mode, we still honor 1831fa9e4066Sahrens * synchronous semantics, but we rely on txg_wait_synced() to do it. 18323b2aab18SMatthew Ahrens * On old version pools, we suspend the log briefly when taking a 18333b2aab18SMatthew Ahrens * snapshot so that it will have an empty intent log. 18343b2aab18SMatthew Ahrens * 18353b2aab18SMatthew Ahrens * Long holds are not really intended to be used the way we do here -- 18363b2aab18SMatthew Ahrens * held for such a short time. A concurrent caller of dsl_dataset_long_held() 18373b2aab18SMatthew Ahrens * could fail. Therefore we take pains to only put a long hold if it is 18383b2aab18SMatthew Ahrens * actually necessary. Fortunately, it will only be necessary if the 18393b2aab18SMatthew Ahrens * objset is currently mounted (or the ZVOL equivalent). In that case it 18403b2aab18SMatthew Ahrens * will already have a long hold, so we are not really making things any worse. 18413b2aab18SMatthew Ahrens * 18423b2aab18SMatthew Ahrens * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or 18433b2aab18SMatthew Ahrens * zvol_state_t), and use their mechanism to prevent their hold from being 18443b2aab18SMatthew Ahrens * dropped (e.g. VFS_HOLD()). However, that would be even more pain for 18453b2aab18SMatthew Ahrens * very little gain. 18463b2aab18SMatthew Ahrens * 18473b2aab18SMatthew Ahrens * if cookiep == NULL, this does both the suspend & resume. 18483b2aab18SMatthew Ahrens * Otherwise, it returns with the dataset "long held", and the cookie 18493b2aab18SMatthew Ahrens * should be passed into zil_resume(). 1850fa9e4066Sahrens */ 1851fa9e4066Sahrens int 18523b2aab18SMatthew Ahrens zil_suspend(const char *osname, void **cookiep) 1853fa9e4066Sahrens { 18543b2aab18SMatthew Ahrens objset_t *os; 18553b2aab18SMatthew Ahrens zilog_t *zilog; 18563b2aab18SMatthew Ahrens const zil_header_t *zh; 18573b2aab18SMatthew Ahrens int error; 18583b2aab18SMatthew Ahrens 18593b2aab18SMatthew Ahrens error = dmu_objset_hold(osname, suspend_tag, &os); 18603b2aab18SMatthew Ahrens if (error != 0) 18613b2aab18SMatthew Ahrens return (error); 18623b2aab18SMatthew Ahrens zilog = dmu_objset_zil(os); 1863fa9e4066Sahrens 1864fa9e4066Sahrens mutex_enter(&zilog->zl_lock); 18653b2aab18SMatthew Ahrens zh = zilog->zl_header; 18663b2aab18SMatthew Ahrens 18673589c4f0SNeil Perrin if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ 1868fa9e4066Sahrens mutex_exit(&zilog->zl_lock); 18693b2aab18SMatthew Ahrens dmu_objset_rele(os, suspend_tag); 1870be6fd75aSMatthew Ahrens return (SET_ERROR(EBUSY)); 1871fa9e4066Sahrens } 18723b2aab18SMatthew Ahrens 1873d80c45e0Sbonwick /* 18743b2aab18SMatthew Ahrens * Don't put a long hold in the cases where we can avoid it. This 18753b2aab18SMatthew Ahrens * is when there is no cookie so we are doing a suspend & resume 18763b2aab18SMatthew Ahrens * (i.e. called from zil_vdev_offline()), and there's nothing to do 18773b2aab18SMatthew Ahrens * for the suspend because it's already suspended, or there's no ZIL. 18783b2aab18SMatthew Ahrens */ 18793b2aab18SMatthew Ahrens if (cookiep == NULL && !zilog->zl_suspending && 18803b2aab18SMatthew Ahrens (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { 18813b2aab18SMatthew Ahrens mutex_exit(&zilog->zl_lock); 18823b2aab18SMatthew Ahrens dmu_objset_rele(os, suspend_tag); 18833b2aab18SMatthew Ahrens return (0); 18843b2aab18SMatthew Ahrens } 18853b2aab18SMatthew Ahrens 18863b2aab18SMatthew Ahrens dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); 18873b2aab18SMatthew Ahrens dsl_pool_rele(dmu_objset_pool(os), suspend_tag); 18883b2aab18SMatthew Ahrens 18893b2aab18SMatthew Ahrens zilog->zl_suspend++; 18903b2aab18SMatthew Ahrens 18913b2aab18SMatthew Ahrens if (zilog->zl_suspend > 1) { 18923b2aab18SMatthew Ahrens /* 18933b2aab18SMatthew Ahrens * Someone else is already suspending it. 1894d80c45e0Sbonwick * Just wait for them to finish. 1895d80c45e0Sbonwick */ 18963b2aab18SMatthew Ahrens 1897d80c45e0Sbonwick while (zilog->zl_suspending) 1898d80c45e0Sbonwick cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); 1899d80c45e0Sbonwick mutex_exit(&zilog->zl_lock); 19003b2aab18SMatthew Ahrens 19013b2aab18SMatthew Ahrens if (cookiep == NULL) 19023b2aab18SMatthew Ahrens zil_resume(os); 19033b2aab18SMatthew Ahrens else 19043b2aab18SMatthew Ahrens *cookiep = os; 1905d80c45e0Sbonwick return (0); 1906d80c45e0Sbonwick } 19073b2aab18SMatthew Ahrens 19083b2aab18SMatthew Ahrens /* 19093b2aab18SMatthew Ahrens * If there is no pointer to an on-disk block, this ZIL must not 19103b2aab18SMatthew Ahrens * be active (e.g. filesystem not mounted), so there's nothing 19113b2aab18SMatthew Ahrens * to clean up. 19123b2aab18SMatthew Ahrens */ 19133b2aab18SMatthew Ahrens if (BP_IS_HOLE(&zh->zh_log)) { 19143b2aab18SMatthew Ahrens ASSERT(cookiep != NULL); /* fast path already handled */ 19153b2aab18SMatthew Ahrens 19163b2aab18SMatthew Ahrens *cookiep = os; 19173b2aab18SMatthew Ahrens mutex_exit(&zilog->zl_lock); 19183b2aab18SMatthew Ahrens return (0); 19193b2aab18SMatthew Ahrens } 19203b2aab18SMatthew Ahrens 1921d80c45e0Sbonwick zilog->zl_suspending = B_TRUE; 1922fa9e4066Sahrens mutex_exit(&zilog->zl_lock); 1923fa9e4066Sahrens 19245002558fSNeil Perrin zil_commit(zilog, 0); 1925fa9e4066Sahrens 1926d80c45e0Sbonwick zil_destroy(zilog, B_FALSE); 1927d80c45e0Sbonwick 1928d80c45e0Sbonwick mutex_enter(&zilog->zl_lock); 1929d80c45e0Sbonwick zilog->zl_suspending = B_FALSE; 1930d80c45e0Sbonwick cv_broadcast(&zilog->zl_cv_suspend); 1931d80c45e0Sbonwick mutex_exit(&zilog->zl_lock); 1932fa9e4066Sahrens 19333b2aab18SMatthew Ahrens if (cookiep == NULL) 19343b2aab18SMatthew Ahrens zil_resume(os); 19353b2aab18SMatthew Ahrens else 19363b2aab18SMatthew Ahrens *cookiep = os; 1937fa9e4066Sahrens return (0); 1938fa9e4066Sahrens } 1939fa9e4066Sahrens 1940fa9e4066Sahrens void 19413b2aab18SMatthew Ahrens zil_resume(void *cookie) 1942fa9e4066Sahrens { 19433b2aab18SMatthew Ahrens objset_t *os = cookie; 19443b2aab18SMatthew Ahrens zilog_t *zilog = dmu_objset_zil(os); 19453b2aab18SMatthew Ahrens 1946fa9e4066Sahrens mutex_enter(&zilog->zl_lock); 1947fa9e4066Sahrens ASSERT(zilog->zl_suspend != 0); 1948fa9e4066Sahrens zilog->zl_suspend--; 1949fa9e4066Sahrens mutex_exit(&zilog->zl_lock); 19503b2aab18SMatthew Ahrens dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); 19513b2aab18SMatthew Ahrens dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); 1952fa9e4066Sahrens } 1953fa9e4066Sahrens 1954fa9e4066Sahrens typedef struct zil_replay_arg { 1955fa9e4066Sahrens zil_replay_func_t **zr_replay; 1956fa9e4066Sahrens void *zr_arg; 1957fa9e4066Sahrens boolean_t zr_byteswap; 1958b24ab676SJeff Bonwick char *zr_lr; 1959fa9e4066Sahrens } zil_replay_arg_t; 1960fa9e4066Sahrens 1961b24ab676SJeff Bonwick static int 1962b24ab676SJeff Bonwick zil_replay_error(zilog_t *zilog, lr_t *lr, int error) 1963b24ab676SJeff Bonwick { 1964*40a5c998SMatthew Ahrens char name[ZFS_MAX_DATASET_NAME_LEN]; 1965b24ab676SJeff Bonwick 1966b24ab676SJeff Bonwick zilog->zl_replaying_seq--; /* didn't actually replay this one */ 1967b24ab676SJeff Bonwick 1968b24ab676SJeff Bonwick dmu_objset_name(zilog->zl_os, name); 1969b24ab676SJeff Bonwick 1970b24ab676SJeff Bonwick cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1971b24ab676SJeff Bonwick "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, 1972b24ab676SJeff Bonwick (u_longlong_t)lr->lrc_seq, 1973b24ab676SJeff Bonwick (u_longlong_t)(lr->lrc_txtype & ~TX_CI), 1974b24ab676SJeff Bonwick (lr->lrc_txtype & TX_CI) ? "CI" : ""); 1975b24ab676SJeff Bonwick 1976b24ab676SJeff Bonwick return (error); 1977b24ab676SJeff Bonwick } 1978b24ab676SJeff Bonwick 1979b24ab676SJeff Bonwick static int 1980fa9e4066Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1981fa9e4066Sahrens { 1982fa9e4066Sahrens zil_replay_arg_t *zr = zra; 1983d80c45e0Sbonwick const zil_header_t *zh = zilog->zl_header; 1984fa9e4066Sahrens uint64_t reclen = lr->lrc_reclen; 1985fa9e4066Sahrens uint64_t txtype = lr->lrc_txtype; 1986b24ab676SJeff Bonwick int error = 0; 1987fa9e4066Sahrens 1988b24ab676SJeff Bonwick zilog->zl_replaying_seq = lr->lrc_seq; 1989fa9e4066Sahrens 1990fa9e4066Sahrens if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1991b24ab676SJeff Bonwick return (0); 1992b24ab676SJeff Bonwick 1993b24ab676SJeff Bonwick if (lr->lrc_txg < claim_txg) /* already committed */ 1994b24ab676SJeff Bonwick return (0); 1995fa9e4066Sahrens 1996da6c28aaSamw /* Strip case-insensitive bit, still present in log record */ 1997da6c28aaSamw txtype &= ~TX_CI; 1998da6c28aaSamw 1999b24ab676SJeff Bonwick if (txtype == 0 || txtype >= TX_MAX_TYPE) 2000b24ab676SJeff Bonwick return (zil_replay_error(zilog, lr, EINVAL)); 2001b24ab676SJeff Bonwick 2002b24ab676SJeff Bonwick /* 2003b24ab676SJeff Bonwick * If this record type can be logged out of order, the object 2004b24ab676SJeff Bonwick * (lr_foid) may no longer exist. That's legitimate, not an error. 2005b24ab676SJeff Bonwick */ 2006b24ab676SJeff Bonwick if (TX_OOO(txtype)) { 2007b24ab676SJeff Bonwick error = dmu_object_info(zilog->zl_os, 2008b24ab676SJeff Bonwick ((lr_ooo_t *)lr)->lr_foid, NULL); 2009b24ab676SJeff Bonwick if (error == ENOENT || error == EEXIST) 2010b24ab676SJeff Bonwick return (0); 20111209a471SNeil Perrin } 20121209a471SNeil Perrin 2013fa9e4066Sahrens /* 2014fa9e4066Sahrens * Make a copy of the data so we can revise and extend it. 2015fa9e4066Sahrens */ 2016b24ab676SJeff Bonwick bcopy(lr, zr->zr_lr, reclen); 2017b24ab676SJeff Bonwick 2018b24ab676SJeff Bonwick /* 2019b24ab676SJeff Bonwick * If this is a TX_WRITE with a blkptr, suck in the data. 2020b24ab676SJeff Bonwick */ 2021b24ab676SJeff Bonwick if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 2022b24ab676SJeff Bonwick error = zil_read_log_data(zilog, (lr_write_t *)lr, 2023b24ab676SJeff Bonwick zr->zr_lr + reclen); 20243b2aab18SMatthew Ahrens if (error != 0) 2025b24ab676SJeff Bonwick return (zil_replay_error(zilog, lr, error)); 2026b24ab676SJeff Bonwick } 2027fa9e4066Sahrens 2028fa9e4066Sahrens /* 2029fa9e4066Sahrens * The log block containing this lr may have been byteswapped 2030fa9e4066Sahrens * so that we can easily examine common fields like lrc_txtype. 2031b24ab676SJeff Bonwick * However, the log is a mix of different record types, and only the 2032fa9e4066Sahrens * replay vectors know how to byteswap their records. Therefore, if 2033fa9e4066Sahrens * the lr was byteswapped, undo it before invoking the replay vector. 2034fa9e4066Sahrens */ 2035fa9e4066Sahrens if (zr->zr_byteswap) 2036b24ab676SJeff Bonwick byteswap_uint64_array(zr->zr_lr, reclen); 2037fa9e4066Sahrens 2038fa9e4066Sahrens /* 2039fa9e4066Sahrens * We must now do two things atomically: replay this log record, 20401209a471SNeil Perrin * and update the log header sequence number to reflect the fact that 20411209a471SNeil Perrin * we did so. At the end of each replay function the sequence number 20421209a471SNeil Perrin * is updated if we are in replay mode. 2043fa9e4066Sahrens */ 2044b24ab676SJeff Bonwick error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); 20453b2aab18SMatthew Ahrens if (error != 0) { 204667bd71c6Sperrin /* 204767bd71c6Sperrin * The DMU's dnode layer doesn't see removes until the txg 204867bd71c6Sperrin * commits, so a subsequent claim can spuriously fail with 20491209a471SNeil Perrin * EEXIST. So if we receive any error we try syncing out 2050b24ab676SJeff Bonwick * any removes then retry the transaction. Note that we 2051b24ab676SJeff Bonwick * specify B_FALSE for byteswap now, so we don't do it twice. 205267bd71c6Sperrin */ 205367bd71c6Sperrin txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 2054b24ab676SJeff Bonwick error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); 20553b2aab18SMatthew Ahrens if (error != 0) 2056b24ab676SJeff Bonwick return (zil_replay_error(zilog, lr, error)); 2057fa9e4066Sahrens } 2058b24ab676SJeff Bonwick return (0); 2059fa9e4066Sahrens } 2060fa9e4066Sahrens 206167bd71c6Sperrin /* ARGSUSED */ 2062b24ab676SJeff Bonwick static int 206367bd71c6Sperrin zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 206467bd71c6Sperrin { 206567bd71c6Sperrin zilog->zl_replay_blks++; 2066b24ab676SJeff Bonwick 2067b24ab676SJeff Bonwick return (0); 2068fa9e4066Sahrens } 2069fa9e4066Sahrens 2070fa9e4066Sahrens /* 207113f5297eSperrin * If this dataset has a non-empty intent log, replay it and destroy it. 2072fa9e4066Sahrens */ 2073fa9e4066Sahrens void 20741209a471SNeil Perrin zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) 2075fa9e4066Sahrens { 2076fa9e4066Sahrens zilog_t *zilog = dmu_objset_zil(os); 2077d80c45e0Sbonwick const zil_header_t *zh = zilog->zl_header; 2078fa9e4066Sahrens zil_replay_arg_t zr; 2079fa9e4066Sahrens 20803589c4f0SNeil Perrin if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { 2081d80c45e0Sbonwick zil_destroy(zilog, B_TRUE); 208213f5297eSperrin return; 208313f5297eSperrin } 208413f5297eSperrin 2085fa9e4066Sahrens zr.zr_replay = replay_func; 2086fa9e4066Sahrens zr.zr_arg = arg; 2087d80c45e0Sbonwick zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); 2088b24ab676SJeff Bonwick zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 2089fa9e4066Sahrens 2090fa9e4066Sahrens /* 2091fa9e4066Sahrens * Wait for in-progress removes to sync before starting replay. 2092fa9e4066Sahrens */ 2093fa9e4066Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 2094fa9e4066Sahrens 20951209a471SNeil Perrin zilog->zl_replay = B_TRUE; 2096d3d50737SRafael Vanoni zilog->zl_replay_time = ddi_get_lbolt(); 209767bd71c6Sperrin ASSERT(zilog->zl_replay_blks == 0); 209867bd71c6Sperrin (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, 2099d80c45e0Sbonwick zh->zh_claim_txg); 2100b24ab676SJeff Bonwick kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); 2101fa9e4066Sahrens 2102d80c45e0Sbonwick zil_destroy(zilog, B_FALSE); 2103a4611edeSahrens txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); 21041209a471SNeil Perrin zilog->zl_replay = B_FALSE; 2105fa9e4066Sahrens } 2106436b2950Sperrin 2107b24ab676SJeff Bonwick boolean_t 2108b24ab676SJeff Bonwick zil_replaying(zilog_t *zilog, dmu_tx_t *tx) 2109436b2950Sperrin { 211055da60b9SMark J Musante if (zilog->zl_sync == ZFS_SYNC_DISABLED) 2111b24ab676SJeff Bonwick return (B_TRUE); 2112436b2950Sperrin 2113b24ab676SJeff Bonwick if (zilog->zl_replay) { 2114b24ab676SJeff Bonwick dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 2115b24ab676SJeff Bonwick zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 2116b24ab676SJeff Bonwick zilog->zl_replaying_seq; 2117b24ab676SJeff Bonwick return (B_TRUE); 2118b19a79ecSperrin } 2119b19a79ecSperrin 2120b24ab676SJeff Bonwick return (B_FALSE); 2121436b2950Sperrin } 2122e6ca193dSGeorge Wilson 2123e6ca193dSGeorge Wilson /* ARGSUSED */ 2124e6ca193dSGeorge Wilson int 2125fd136879SMatthew Ahrens zil_vdev_offline(const char *osname, void *arg) 2126e6ca193dSGeorge Wilson { 2127e6ca193dSGeorge Wilson int error; 2128e6ca193dSGeorge Wilson 21293b2aab18SMatthew Ahrens error = zil_suspend(osname, NULL); 21303b2aab18SMatthew Ahrens if (error != 0) 2131be6fd75aSMatthew Ahrens return (SET_ERROR(EEXIST)); 21323b2aab18SMatthew Ahrens return (0); 2133e6ca193dSGeorge Wilson } 2134