Lines Matching defs:zilog
83 static void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
118 zil_bp_tree_init(zilog_t *zilog)
120 avl_create(&zilog->zl_bp_tree, zil_bp_compare,
125 zil_bp_tree_fini(zilog_t *zilog)
127 avl_tree_t *t = &zilog->zl_bp_tree;
138 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
140 avl_tree_t *t = &zilog->zl_bp_tree;
161 zil_header_in_syncing_context(zilog_t *zilog)
163 return ((zil_header_t *)zilog->zl_header);
167 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
173 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
181 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
190 if (zilog->zl_header->zh_claim_txg == 0)
193 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
199 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
257 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
272 if (zilog->zl_header->zh_claim_txg == 0)
275 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
278 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
294 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
297 const zil_header_t *zh = zilog->zl_header;
325 zil_bp_tree_init(zilog);
334 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
343 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
353 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
361 zilog->zl_parse_error = error;
362 zilog->zl_parse_blk_seq = max_blk_seq;
363 zilog->zl_parse_lr_seq = max_lr_seq;
364 zilog->zl_parse_blk_count = blk_count;
365 zilog->zl_parse_lr_count = lr_count;
370 zil_bp_tree_fini(zilog);
377 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
384 zil_bp_tree_add(zilog, bp) != 0)
387 return (zio_wait(zio_claim(NULL, zilog->zl_spa,
393 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
410 (error = zil_read_log_data(zilog, lr, NULL)) != 0)
412 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
417 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
419 zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
425 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
434 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
436 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
442 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
447 lwb->lwb_zilog = zilog;
461 mutex_enter(&zilog->zl_lock);
462 list_insert_tail(&zilog->zl_lwb_list, lwb);
463 mutex_exit(&zilog->zl_lock);
473 zilog_dirty(zilog_t *zilog, uint64_t txg)
475 dsl_pool_t *dp = zilog->zl_dmu_pool;
476 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
481 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
483 dmu_buf_add_ref(ds->ds_dbuf, zilog);
488 zilog_is_dirty(zilog_t *zilog)
490 dsl_pool_t *dp = zilog->zl_dmu_pool;
493 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
503 zil_create(zilog_t *zilog)
505 const zil_header_t *zh = zilog->zl_header;
515 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
528 tx = dmu_tx_create(zilog->zl_os);
530 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
534 zio_free_zil(zilog->zl_spa, txg, &blk);
538 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
539 ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
542 zil_init_log_chain(zilog, &blk);
549 lwb = zil_alloc_lwb(zilog, &blk, txg);
558 txg_wait_synced(zilog->zl_dmu_pool, txg);
576 zil_destroy(zilog_t *zilog, boolean_t keep_first)
578 const zil_header_t *zh = zilog->zl_header;
586 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
588 zilog->zl_old_header = *zh; /* debugging aid */
593 tx = dmu_tx_create(zilog->zl_os);
595 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
598 mutex_enter(&zilog->zl_lock);
600 ASSERT3U(zilog->zl_destroy_txg, <, txg);
601 zilog->zl_destroy_txg = txg;
602 zilog->zl_keep_first = keep_first;
604 if (!list_is_empty(&zilog->zl_lwb_list)) {
607 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
608 list_remove(&zilog->zl_lwb_list, lwb);
611 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
615 zil_destroy_sync(zilog, tx);
617 mutex_exit(&zilog->zl_lock);
623 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
625 ASSERT(list_is_empty(&zilog->zl_lwb_list));
626 (void) zil_parse(zilog, zil_free_log_block,
627 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
635 zilog_t *zilog;
654 zilog = dmu_objset_zil(os);
655 zh = zil_header_in_syncing_context(zilog);
657 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
659 zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
675 (void) zil_parse(zilog, zil_claim_log_block,
678 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
679 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
680 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
686 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
700 zilog_t *zilog;
714 zilog = dmu_objset_zil(os);
715 bp = (blkptr_t *)&zilog->zl_header->zh_log;
744 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
745 zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
765 zil_add_block(zilog_t *zilog, const blkptr_t *bp)
767 avl_tree_t *t = &zilog->zl_vdev_tree;
776 ASSERT(zilog->zl_writer);
783 mutex_enter(&zilog->zl_vdev_lock);
792 mutex_exit(&zilog->zl_vdev_lock);
796 zil_flush_vdevs(zilog_t *zilog)
798 spa_t *spa = zilog->zl_spa;
799 avl_tree_t *t = &zilog->zl_vdev_tree;
804 ASSERT(zilog->zl_writer);
840 zilog_t *zilog = lwb->lwb_zilog;
860 mutex_enter(&zilog->zl_lock);
863 mutex_exit(&zilog->zl_lock);
877 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
885 if (zilog->zl_root_zio == NULL) {
886 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
890 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
917 #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
918 (((zilog)->zl_cur_used < zil_slog_limit) || \
919 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
926 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
930 spa_t *spa = zilog->zl_spa;
956 tx = dmu_tx_create(zilog->zl_os);
958 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
979 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
985 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
987 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
988 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
993 USE_SLOG(zilog));
1002 nlwb = zil_alloc_lwb(zilog, bp, txg);
1005 zil_add_block(zilog, &lwb->lwb_blk);
1037 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1050 ASSERT(zilog_is_dirty(zilog) ||
1051 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1057 zilog->zl_cur_used += (reclen + dlen);
1059 zil_lwb_write_init(zilog, lwb);
1065 lwb = zil_lwb_write_start(zilog, lwb);
1068 zil_lwb_write_init(zilog, lwb);
1071 txg_wait_synced(zilog->zl_dmu_pool, txg);
1085 if (txg > spa_freeze_txg(zilog->zl_spa))
1086 txg_wait_synced(zilog->zl_dmu_pool, txg);
1099 error = zilog->zl_get_data(
1102 txg_wait_synced(zilog->zl_dmu_pool, txg);
1119 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
1206 zil_remove_async(zilog_t *zilog, uint64_t oid)
1218 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1221 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1224 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1250 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1264 zil_remove_async(zilog, itx->itx_oid);
1270 zil_async_to_sync(zilog, itx->itx_oid);
1272 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1277 itxg = &zilog->zl_itxg[txg & TXG_MASK];
1287 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1303 atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod);
1323 zilog_dirty(zilog, txg);
1339 zil_clean(zilog_t *zilog, uint64_t synced_txg)
1341 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1351 ASSERT(zilog->zl_clean_taskq != NULL);
1352 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1364 if (taskq_dispatch(zilog->zl_clean_taskq,
1373 zil_get_commit_list(zilog_t *zilog)
1376 list_t *commit_list = &zilog->zl_itx_commit_list;
1379 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1382 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1385 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1399 atomic_add_64(&zilog->zl_itx_list_sz, -push_sod);
1406 zil_async_to_sync(zilog_t *zilog, uint64_t foid)
1413 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1416 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1419 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1455 zil_commit_writer(zilog_t *zilog)
1460 spa_t *spa = zilog->zl_spa;
1463 ASSERT(zilog->zl_root_zio == NULL);
1465 mutex_exit(&zilog->zl_lock);
1467 zil_get_commit_list(zilog);
1473 if (list_head(&zilog->zl_itx_commit_list) == NULL) {
1474 mutex_enter(&zilog->zl_lock);
1478 if (zilog->zl_suspend) {
1481 lwb = list_tail(&zilog->zl_lwb_list);
1483 lwb = zil_create(zilog);
1486 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1487 while (itx = list_head(&zilog->zl_itx_commit_list)) {
1492 lwb = zil_lwb_commit(zilog, itx, lwb);
1493 list_remove(&zilog->zl_itx_commit_list, itx);
1497 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1501 lwb = zil_lwb_write_start(zilog, lwb);
1503 zilog->zl_cur_used = 0;
1508 if (zilog->zl_root_zio) {
1509 error = zio_wait(zilog->zl_root_zio);
1510 zilog->zl_root_zio = NULL;
1511 zil_flush_vdevs(zilog);
1515 txg_wait_synced(zilog->zl_dmu_pool, 0);
1517 mutex_enter(&zilog->zl_lock);
1525 zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1552 zil_commit(zilog_t *zilog, uint64_t foid)
1556 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1560 zil_async_to_sync(zilog, foid);
1562 mutex_enter(&zilog->zl_lock);
1563 mybatch = zilog->zl_next_batch;
1564 while (zilog->zl_writer) {
1565 cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
1566 if (mybatch <= zilog->zl_com_batch) {
1567 mutex_exit(&zilog->zl_lock);
1572 zilog->zl_next_batch++;
1573 zilog->zl_writer = B_TRUE;
1574 zil_commit_writer(zilog);
1575 zilog->zl_com_batch = mybatch;
1576 zilog->zl_writer = B_FALSE;
1577 mutex_exit(&zilog->zl_lock);
1580 cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
1583 cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
1590 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1592 zil_header_t *zh = zil_header_in_syncing_context(zilog);
1594 spa_t *spa = zilog->zl_spa;
1595 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1605 mutex_enter(&zilog->zl_lock);
1607 ASSERT(zilog->zl_stop_sync == 0);
1615 if (zilog->zl_destroy_txg == txg) {
1618 ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1621 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1623 if (zilog->zl_keep_first) {
1632 zil_init_log_chain(zilog, &blk);
1637 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1641 list_remove(&zilog->zl_lwb_list, lwb);
1651 if (list_head(&zilog->zl_lwb_list) == NULL)
1654 mutex_exit(&zilog->zl_lock);
1671 zil_set_sync(zilog_t *zilog, uint64_t sync)
1673 zilog->zl_sync = sync;
1677 zil_set_logbias(zilog_t *zilog, uint64_t logbias)
1679 zilog->zl_logbias = logbias;
1685 zilog_t *zilog;
1687 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1689 zilog->zl_header = zh_phys;
1690 zilog->zl_os = os;
1691 zilog->zl_spa = dmu_objset_spa(os);
1692 zilog->zl_dmu_pool = dmu_objset_pool(os);
1693 zilog->zl_destroy_txg = TXG_INITIAL - 1;
1694 zilog->zl_logbias = dmu_objset_logbias(os);
1695 zilog->zl_sync = dmu_objset_syncprop(os);
1696 zilog->zl_next_batch = 1;
1698 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1701 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
1705 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1708 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
1711 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1713 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1716 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1717 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1718 cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
1719 cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
1721 return (zilog);
1725 zil_free(zilog_t *zilog)
1727 zilog->zl_stop_sync = 1;
1729 ASSERT0(zilog->zl_suspend);
1730 ASSERT0(zilog->zl_suspending);
1732 ASSERT(list_is_empty(&zilog->zl_lwb_list));
1733 list_destroy(&zilog->zl_lwb_list);
1735 avl_destroy(&zilog->zl_vdev_tree);
1736 mutex_destroy(&zilog->zl_vdev_lock);
1738 ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
1739 list_destroy(&zilog->zl_itx_commit_list);
1749 if (zilog->zl_itxg[i].itxg_itxs)
1750 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
1751 mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
1754 mutex_destroy(&zilog->zl_lock);
1756 cv_destroy(&zilog->zl_cv_writer);
1757 cv_destroy(&zilog->zl_cv_suspend);
1758 cv_destroy(&zilog->zl_cv_batch[0]);
1759 cv_destroy(&zilog->zl_cv_batch[1]);
1761 kmem_free(zilog, sizeof (zilog_t));
1770 zilog_t *zilog = dmu_objset_zil(os);
1772 ASSERT(zilog->zl_clean_taskq == NULL);
1773 ASSERT(zilog->zl_get_data == NULL);
1774 ASSERT(list_is_empty(&zilog->zl_lwb_list));
1776 zilog->zl_get_data = get_data;
1777 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1780 return (zilog);
1787 zil_close(zilog_t *zilog)
1792 zil_commit(zilog, 0); /* commit all itx */
1800 mutex_enter(&zilog->zl_lock);
1801 lwb = list_tail(&zilog->zl_lwb_list);
1804 mutex_exit(&zilog->zl_lock);
1806 txg_wait_synced(zilog->zl_dmu_pool, txg);
1807 ASSERT(!zilog_is_dirty(zilog));
1809 taskq_destroy(zilog->zl_clean_taskq);
1810 zilog->zl_clean_taskq = NULL;
1811 zilog->zl_get_data = NULL;
1816 mutex_enter(&zilog->zl_lock);
1817 lwb = list_head(&zilog->zl_lwb_list);
1819 ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
1820 list_remove(&zilog->zl_lwb_list, lwb);
1824 mutex_exit(&zilog->zl_lock);
1855 zilog_t *zilog;
1862 zilog = dmu_objset_zil(os);
1864 mutex_enter(&zilog->zl_lock);
1865 zh = zilog->zl_header;
1868 mutex_exit(&zilog->zl_lock);
1879 if (cookiep == NULL && !zilog->zl_suspending &&
1880 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
1881 mutex_exit(&zilog->zl_lock);
1889 zilog->zl_suspend++;
1891 if (zilog->zl_suspend > 1) {
1897 while (zilog->zl_suspending)
1898 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1899 mutex_exit(&zilog->zl_lock);
1917 mutex_exit(&zilog->zl_lock);
1921 zilog->zl_suspending = B_TRUE;
1922 mutex_exit(&zilog->zl_lock);
1924 zil_commit(zilog, 0);
1926 zil_destroy(zilog, B_FALSE);
1928 mutex_enter(&zilog->zl_lock);
1929 zilog->zl_suspending = B_FALSE;
1930 cv_broadcast(&zilog->zl_cv_suspend);
1931 mutex_exit(&zilog->zl_lock);
1944 zilog_t *zilog = dmu_objset_zil(os);
1946 mutex_enter(&zilog->zl_lock);
1947 ASSERT(zilog->zl_suspend != 0);
1948 zilog->zl_suspend--;
1949 mutex_exit(&zilog->zl_lock);
1962 zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
1966 zilog->zl_replaying_seq--; /* didn't actually replay this one */
1968 dmu_objset_name(zilog->zl_os, name);
1980 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1983 const zil_header_t *zh = zilog->zl_header;
1988 zilog->zl_replaying_seq = lr->lrc_seq;
2000 return (zil_replay_error(zilog, lr, EINVAL));
2007 error = dmu_object_info(zilog->zl_os,
2022 error = zil_read_log_data(zilog, (lr_write_t *)lr,
2025 return (zil_replay_error(zilog, lr, error));
2053 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
2056 return (zil_replay_error(zilog, lr, error));
2063 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
2065 zilog->zl_replay_blks++;
2076 zilog_t *zilog = dmu_objset_zil(os);
2077 const zil_header_t *zh = zilog->zl_header;
2081 zil_destroy(zilog, B_TRUE);
2093 txg_wait_synced(zilog->zl_dmu_pool, 0);
2095 zilog->zl_replay = B_TRUE;
2096 zilog->zl_replay_time = ddi_get_lbolt();
2097 ASSERT(zilog->zl_replay_blks == 0);
2098 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
2102 zil_destroy(zilog, B_FALSE);
2103 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
2104 zilog->zl_replay = B_FALSE;
2108 zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
2110 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2113 if (zilog->zl_replay) {
2114 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
2115 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
2116 zilog->zl_replaying_seq;