xref: /freebsd/sys/contrib/openzfs/module/zfs/txg.c (revision 7ec2f6bce5d28e6662c29e63f6ab6b7ef57d98b2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Portions Copyright 2011 Martin Matuska
24  * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
25  */
26 
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_scan.h>
34 #include <sys/zil.h>
35 #include <sys/callb.h>
36 #include <sys/trace_zfs.h>
37 
38 /*
39  * ZFS Transaction Groups
40  * ----------------------
41  *
42  * ZFS transaction groups are, as the name implies, groups of transactions
43  * that act on persistent state. ZFS asserts consistency at the granularity of
44  * these transaction groups. Each successive transaction group (txg) is
45  * assigned a 64-bit consecutive identifier. There are three active
46  * transaction group states: open, quiescing, or syncing. At any given time,
47  * there may be an active txg associated with each state; each active txg may
48  * either be processing, or blocked waiting to enter the next state. There may
49  * be up to three active txgs, and there is always a txg in the open state
50  * (though it may be blocked waiting to enter the quiescing state). In broad
51  * strokes, transactions -- operations that change in-memory structures -- are
52  * accepted into the txg in the open state, and are completed while the txg is
53  * in the open or quiescing states. The accumulated changes are written to
54  * disk in the syncing state.
55  *
56  * Open
57  *
58  * When a new txg becomes active, it first enters the open state. New
59  * transactions -- updates to in-memory structures -- are assigned to the
60  * currently open txg. There is always a txg in the open state so that ZFS can
61  * accept new changes (though the txg may refuse new changes if it has hit
62  * some limit). ZFS advances the open txg to the next state for a variety of
63  * reasons such as it hitting a time or size threshold, or the execution of an
64  * administrative action that must be completed in the syncing state.
65  *
66  * Quiescing
67  *
68  * After a txg exits the open state, it enters the quiescing state. The
69  * quiescing state is intended to provide a buffer between accepting new
70  * transactions in the open state and writing them out to stable storage in
71  * the syncing state. While quiescing, transactions can continue their
72  * operation without delaying either of the other states. Typically, a txg is
73  * in the quiescing state very briefly since the operations are bounded by
74  * software latencies rather than, say, slower I/O latencies. After all
75  * transactions complete, the txg is ready to enter the next state.
76  *
77  * Syncing
78  *
79  * In the syncing state, the in-memory state built up during the open and (to
80  * a lesser degree) the quiescing states is written to stable storage. The
81  * process of writing out modified data can, in turn modify more data. For
82  * example when we write new blocks, we need to allocate space for them; those
83  * allocations modify metadata (space maps)... which themselves must be
84  * written to stable storage. During the sync state, ZFS iterates, writing out
85  * data until it converges and all in-memory changes have been written out.
86  * The first such pass is the largest as it encompasses all the modified user
87  * data (as opposed to filesystem metadata). Subsequent passes typically have
88  * far less data to write as they consist exclusively of filesystem metadata.
89  *
90  * To ensure convergence, after a certain number of passes ZFS begins
91  * overwriting locations on stable storage that had been allocated earlier in
92  * the syncing state (and subsequently freed). ZFS usually allocates new
93  * blocks to optimize for large, continuous, writes. For the syncing state to
94  * converge however it must complete a pass where no new blocks are allocated
95  * since each allocation requires a modification of persistent metadata.
96  * Further, to hasten convergence, after a prescribed number of passes, ZFS
97  * also defers frees, and stops compressing.
98  *
99  * In addition to writing out user data, we must also execute synctasks during
100  * the syncing context. A synctask is the mechanism by which some
101  * administrative activities work such as creating and destroying snapshots or
102  * datasets. Note that when a synctask is initiated it enters the open txg,
103  * and ZFS then pushes that txg as quickly as possible to completion of the
104  * syncing state in order to reduce the latency of the administrative
105  * activity. To complete the syncing state, ZFS writes out a new uberblock,
106  * the root of the tree of blocks that comprise all state stored on the ZFS
107  * pool. Finally, if there is a quiesced txg waiting, we signal that it can
108  * now transition to the syncing state.
109  */
110 
111 static void txg_sync_thread(void *arg);
112 static void txg_quiesce_thread(void *arg);
113 
114 int zfs_txg_timeout = 5;	/* max seconds worth of delta per txg */
115 
116 /*
117  * Prepare the txg subsystem.
118  */
119 void
120 txg_init(dsl_pool_t *dp, uint64_t txg)
121 {
122 	tx_state_t *tx = &dp->dp_tx;
123 	int c;
124 	bzero(tx, sizeof (tx_state_t));
125 
126 	tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
127 
128 	for (c = 0; c < max_ncpus; c++) {
129 		int i;
130 
131 		mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
132 		mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_NOLOCKDEP,
133 		    NULL);
134 		for (i = 0; i < TXG_SIZE; i++) {
135 			cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
136 			    NULL);
137 			list_create(&tx->tx_cpu[c].tc_callbacks[i],
138 			    sizeof (dmu_tx_callback_t),
139 			    offsetof(dmu_tx_callback_t, dcb_node));
140 		}
141 	}
142 
143 	mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
144 
145 	cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
146 	cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
147 	cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
148 	cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
149 	cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
150 
151 	tx->tx_open_txg = txg;
152 }
153 
154 /*
155  * Close down the txg subsystem.
156  */
157 void
158 txg_fini(dsl_pool_t *dp)
159 {
160 	tx_state_t *tx = &dp->dp_tx;
161 	int c;
162 
163 	ASSERT0(tx->tx_threads);
164 
165 	mutex_destroy(&tx->tx_sync_lock);
166 
167 	cv_destroy(&tx->tx_sync_more_cv);
168 	cv_destroy(&tx->tx_sync_done_cv);
169 	cv_destroy(&tx->tx_quiesce_more_cv);
170 	cv_destroy(&tx->tx_quiesce_done_cv);
171 	cv_destroy(&tx->tx_exit_cv);
172 
173 	for (c = 0; c < max_ncpus; c++) {
174 		int i;
175 
176 		mutex_destroy(&tx->tx_cpu[c].tc_open_lock);
177 		mutex_destroy(&tx->tx_cpu[c].tc_lock);
178 		for (i = 0; i < TXG_SIZE; i++) {
179 			cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
180 			list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
181 		}
182 	}
183 
184 	if (tx->tx_commit_cb_taskq != NULL)
185 		taskq_destroy(tx->tx_commit_cb_taskq);
186 
187 	vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
188 
189 	bzero(tx, sizeof (tx_state_t));
190 }
191 
192 /*
193  * Start syncing transaction groups.
194  */
195 void
196 txg_sync_start(dsl_pool_t *dp)
197 {
198 	tx_state_t *tx = &dp->dp_tx;
199 
200 	mutex_enter(&tx->tx_sync_lock);
201 
202 	dprintf("pool %p\n", dp);
203 
204 	ASSERT0(tx->tx_threads);
205 
206 	tx->tx_threads = 2;
207 
208 	tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
209 	    dp, 0, &p0, TS_RUN, defclsyspri);
210 
211 	/*
212 	 * The sync thread can need a larger-than-default stack size on
213 	 * 32-bit x86.  This is due in part to nested pools and
214 	 * scrub_visitbp() recursion.
215 	 */
216 	tx->tx_sync_thread = thread_create(NULL, 0, txg_sync_thread,
217 	    dp, 0, &p0, TS_RUN, defclsyspri);
218 
219 	mutex_exit(&tx->tx_sync_lock);
220 }
221 
222 static void
223 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
224 {
225 	CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
226 	mutex_enter(&tx->tx_sync_lock);
227 }
228 
229 static void
230 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
231 {
232 	ASSERT(*tpp != NULL);
233 	*tpp = NULL;
234 	tx->tx_threads--;
235 	cv_broadcast(&tx->tx_exit_cv);
236 	CALLB_CPR_EXIT(cpr);		/* drops &tx->tx_sync_lock */
237 	thread_exit();
238 }
239 
240 static void
241 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time)
242 {
243 	CALLB_CPR_SAFE_BEGIN(cpr);
244 
245 	if (time) {
246 		(void) cv_timedwait_idle(cv, &tx->tx_sync_lock,
247 		    ddi_get_lbolt() + time);
248 	} else {
249 		cv_wait_idle(cv, &tx->tx_sync_lock);
250 	}
251 
252 	CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
253 }
254 
255 /*
256  * Stop syncing transaction groups.
257  */
258 void
259 txg_sync_stop(dsl_pool_t *dp)
260 {
261 	tx_state_t *tx = &dp->dp_tx;
262 
263 	dprintf("pool %p\n", dp);
264 	/*
265 	 * Finish off any work in progress.
266 	 */
267 	ASSERT3U(tx->tx_threads, ==, 2);
268 
269 	/*
270 	 * We need to ensure that we've vacated the deferred metaslab trees.
271 	 */
272 	txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
273 
274 	/*
275 	 * Wake all sync threads and wait for them to die.
276 	 */
277 	mutex_enter(&tx->tx_sync_lock);
278 
279 	ASSERT3U(tx->tx_threads, ==, 2);
280 
281 	tx->tx_exiting = 1;
282 
283 	cv_broadcast(&tx->tx_quiesce_more_cv);
284 	cv_broadcast(&tx->tx_quiesce_done_cv);
285 	cv_broadcast(&tx->tx_sync_more_cv);
286 
287 	while (tx->tx_threads != 0)
288 		cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
289 
290 	tx->tx_exiting = 0;
291 
292 	mutex_exit(&tx->tx_sync_lock);
293 }
294 
295 uint64_t
296 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
297 {
298 	tx_state_t *tx = &dp->dp_tx;
299 	tx_cpu_t *tc;
300 	uint64_t txg;
301 
302 	/*
303 	 * It appears the processor id is simply used as a "random"
304 	 * number to index into the array, and there isn't any other
305 	 * significance to the chosen tx_cpu. Because.. Why not use
306 	 * the current cpu to index into the array?
307 	 */
308 	kpreempt_disable();
309 	tc = &tx->tx_cpu[CPU_SEQID];
310 	kpreempt_enable();
311 
312 	mutex_enter(&tc->tc_open_lock);
313 	txg = tx->tx_open_txg;
314 
315 	mutex_enter(&tc->tc_lock);
316 	tc->tc_count[txg & TXG_MASK]++;
317 	mutex_exit(&tc->tc_lock);
318 
319 	th->th_cpu = tc;
320 	th->th_txg = txg;
321 
322 	return (txg);
323 }
324 
325 void
326 txg_rele_to_quiesce(txg_handle_t *th)
327 {
328 	tx_cpu_t *tc = th->th_cpu;
329 
330 	ASSERT(!MUTEX_HELD(&tc->tc_lock));
331 	mutex_exit(&tc->tc_open_lock);
332 }
333 
334 void
335 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
336 {
337 	tx_cpu_t *tc = th->th_cpu;
338 	int g = th->th_txg & TXG_MASK;
339 
340 	mutex_enter(&tc->tc_lock);
341 	list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
342 	mutex_exit(&tc->tc_lock);
343 }
344 
345 void
346 txg_rele_to_sync(txg_handle_t *th)
347 {
348 	tx_cpu_t *tc = th->th_cpu;
349 	int g = th->th_txg & TXG_MASK;
350 
351 	mutex_enter(&tc->tc_lock);
352 	ASSERT(tc->tc_count[g] != 0);
353 	if (--tc->tc_count[g] == 0)
354 		cv_broadcast(&tc->tc_cv[g]);
355 	mutex_exit(&tc->tc_lock);
356 
357 	th->th_cpu = NULL;	/* defensive */
358 }
359 
360 /*
361  * Blocks until all transactions in the group are committed.
362  *
363  * On return, the transaction group has reached a stable state in which it can
364  * then be passed off to the syncing context.
365  */
366 static void
367 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
368 {
369 	tx_state_t *tx = &dp->dp_tx;
370 	uint64_t tx_open_time;
371 	int g = txg & TXG_MASK;
372 	int c;
373 
374 	/*
375 	 * Grab all tc_open_locks so nobody else can get into this txg.
376 	 */
377 	for (c = 0; c < max_ncpus; c++)
378 		mutex_enter(&tx->tx_cpu[c].tc_open_lock);
379 
380 	ASSERT(txg == tx->tx_open_txg);
381 	tx->tx_open_txg++;
382 	tx->tx_open_time = tx_open_time = gethrtime();
383 
384 	DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
385 	DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg);
386 
387 	/*
388 	 * Now that we've incremented tx_open_txg, we can let threads
389 	 * enter the next transaction group.
390 	 */
391 	for (c = 0; c < max_ncpus; c++)
392 		mutex_exit(&tx->tx_cpu[c].tc_open_lock);
393 
394 	spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time);
395 	spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time);
396 
397 	/*
398 	 * Quiesce the transaction group by waiting for everyone to txg_exit().
399 	 */
400 	for (c = 0; c < max_ncpus; c++) {
401 		tx_cpu_t *tc = &tx->tx_cpu[c];
402 		mutex_enter(&tc->tc_lock);
403 		while (tc->tc_count[g] != 0)
404 			cv_wait(&tc->tc_cv[g], &tc->tc_lock);
405 		mutex_exit(&tc->tc_lock);
406 	}
407 
408 	spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_QUIESCED, gethrtime());
409 }
410 
411 static void
412 txg_do_callbacks(list_t *cb_list)
413 {
414 	dmu_tx_do_callbacks(cb_list, 0);
415 
416 	list_destroy(cb_list);
417 
418 	kmem_free(cb_list, sizeof (list_t));
419 }
420 
421 /*
422  * Dispatch the commit callbacks registered on this txg to worker threads.
423  *
424  * If no callbacks are registered for a given TXG, nothing happens.
425  * This function creates a taskq for the associated pool, if needed.
426  */
427 static void
428 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
429 {
430 	int c;
431 	tx_state_t *tx = &dp->dp_tx;
432 	list_t *cb_list;
433 
434 	for (c = 0; c < max_ncpus; c++) {
435 		tx_cpu_t *tc = &tx->tx_cpu[c];
436 		/*
437 		 * No need to lock tx_cpu_t at this point, since this can
438 		 * only be called once a txg has been synced.
439 		 */
440 
441 		int g = txg & TXG_MASK;
442 
443 		if (list_is_empty(&tc->tc_callbacks[g]))
444 			continue;
445 
446 		if (tx->tx_commit_cb_taskq == NULL) {
447 			/*
448 			 * Commit callback taskq hasn't been created yet.
449 			 */
450 			tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
451 			    boot_ncpus, defclsyspri, boot_ncpus, boot_ncpus * 2,
452 			    TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
453 		}
454 
455 		cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
456 		list_create(cb_list, sizeof (dmu_tx_callback_t),
457 		    offsetof(dmu_tx_callback_t, dcb_node));
458 
459 		list_move_tail(cb_list, &tc->tc_callbacks[g]);
460 
461 		(void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
462 		    txg_do_callbacks, cb_list, TQ_SLEEP);
463 	}
464 }
465 
466 /*
467  * Wait for pending commit callbacks of already-synced transactions to finish
468  * processing.
469  * Calling this function from within a commit callback will deadlock.
470  */
471 void
472 txg_wait_callbacks(dsl_pool_t *dp)
473 {
474 	tx_state_t *tx = &dp->dp_tx;
475 
476 	if (tx->tx_commit_cb_taskq != NULL)
477 		taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0);
478 }
479 
480 static boolean_t
481 txg_is_syncing(dsl_pool_t *dp)
482 {
483 	tx_state_t *tx = &dp->dp_tx;
484 	ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
485 	return (tx->tx_syncing_txg != 0);
486 }
487 
488 static boolean_t
489 txg_is_quiescing(dsl_pool_t *dp)
490 {
491 	tx_state_t *tx = &dp->dp_tx;
492 	ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
493 	return (tx->tx_quiescing_txg != 0);
494 }
495 
496 static boolean_t
497 txg_has_quiesced_to_sync(dsl_pool_t *dp)
498 {
499 	tx_state_t *tx = &dp->dp_tx;
500 	ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
501 	return (tx->tx_quiesced_txg != 0);
502 }
503 
504 static void
505 txg_sync_thread(void *arg)
506 {
507 	dsl_pool_t *dp = arg;
508 	spa_t *spa = dp->dp_spa;
509 	tx_state_t *tx = &dp->dp_tx;
510 	callb_cpr_t cpr;
511 	clock_t start, delta;
512 
513 	(void) spl_fstrans_mark();
514 	txg_thread_enter(tx, &cpr);
515 
516 	start = delta = 0;
517 	for (;;) {
518 		clock_t timeout = zfs_txg_timeout * hz;
519 		clock_t timer;
520 		uint64_t txg;
521 		uint64_t dirty_min_bytes =
522 		    zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
523 
524 		/*
525 		 * We sync when we're scanning, there's someone waiting
526 		 * on us, or the quiesce thread has handed off a txg to
527 		 * us, or we have reached our timeout.
528 		 */
529 		timer = (delta >= timeout ? 0 : timeout - delta);
530 		while (!dsl_scan_active(dp->dp_scan) &&
531 		    !tx->tx_exiting && timer > 0 &&
532 		    tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
533 		    !txg_has_quiesced_to_sync(dp) &&
534 		    dp->dp_dirty_total < dirty_min_bytes) {
535 			dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
536 			    tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
537 			txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
538 			delta = ddi_get_lbolt() - start;
539 			timer = (delta > timeout ? 0 : timeout - delta);
540 		}
541 
542 		/*
543 		 * Wait until the quiesce thread hands off a txg to us,
544 		 * prompting it to do so if necessary.
545 		 */
546 		while (!tx->tx_exiting && !txg_has_quiesced_to_sync(dp)) {
547 			if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
548 				tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
549 			cv_broadcast(&tx->tx_quiesce_more_cv);
550 			txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
551 		}
552 
553 		if (tx->tx_exiting)
554 			txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
555 
556 		/*
557 		 * Consume the quiesced txg which has been handed off to
558 		 * us.  This may cause the quiescing thread to now be
559 		 * able to quiesce another txg, so we must signal it.
560 		 */
561 		ASSERT(tx->tx_quiesced_txg != 0);
562 		txg = tx->tx_quiesced_txg;
563 		tx->tx_quiesced_txg = 0;
564 		tx->tx_syncing_txg = txg;
565 		DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
566 		cv_broadcast(&tx->tx_quiesce_more_cv);
567 
568 		dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
569 		    txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
570 		mutex_exit(&tx->tx_sync_lock);
571 
572 		txg_stat_t *ts = spa_txg_history_init_io(spa, txg, dp);
573 		start = ddi_get_lbolt();
574 		spa_sync(spa, txg);
575 		delta = ddi_get_lbolt() - start;
576 		spa_txg_history_fini_io(spa, ts);
577 
578 		mutex_enter(&tx->tx_sync_lock);
579 		tx->tx_synced_txg = txg;
580 		tx->tx_syncing_txg = 0;
581 		DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
582 		cv_broadcast(&tx->tx_sync_done_cv);
583 
584 		/*
585 		 * Dispatch commit callbacks to worker threads.
586 		 */
587 		txg_dispatch_callbacks(dp, txg);
588 	}
589 }
590 
591 static void
592 txg_quiesce_thread(void *arg)
593 {
594 	dsl_pool_t *dp = arg;
595 	tx_state_t *tx = &dp->dp_tx;
596 	callb_cpr_t cpr;
597 
598 	txg_thread_enter(tx, &cpr);
599 
600 	for (;;) {
601 		uint64_t txg;
602 
603 		/*
604 		 * We quiesce when there's someone waiting on us.
605 		 * However, we can only have one txg in "quiescing" or
606 		 * "quiesced, waiting to sync" state.  So we wait until
607 		 * the "quiesced, waiting to sync" txg has been consumed
608 		 * by the sync thread.
609 		 */
610 		while (!tx->tx_exiting &&
611 		    (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
612 		    txg_has_quiesced_to_sync(dp)))
613 			txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
614 
615 		if (tx->tx_exiting)
616 			txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
617 
618 		txg = tx->tx_open_txg;
619 		dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
620 		    txg, tx->tx_quiesce_txg_waiting,
621 		    tx->tx_sync_txg_waiting);
622 		tx->tx_quiescing_txg = txg;
623 
624 		mutex_exit(&tx->tx_sync_lock);
625 		txg_quiesce(dp, txg);
626 		mutex_enter(&tx->tx_sync_lock);
627 
628 		/*
629 		 * Hand this txg off to the sync thread.
630 		 */
631 		dprintf("quiesce done, handing off txg %llu\n", txg);
632 		tx->tx_quiescing_txg = 0;
633 		tx->tx_quiesced_txg = txg;
634 		DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
635 		cv_broadcast(&tx->tx_sync_more_cv);
636 		cv_broadcast(&tx->tx_quiesce_done_cv);
637 	}
638 }
639 
640 /*
641  * Delay this thread by delay nanoseconds if we are still in the open
642  * transaction group and there is already a waiting txg quiescing or quiesced.
643  * Abort the delay if this txg stalls or enters the quiescing state.
644  */
645 void
646 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
647 {
648 	tx_state_t *tx = &dp->dp_tx;
649 	hrtime_t start = gethrtime();
650 
651 	/* don't delay if this txg could transition to quiescing immediately */
652 	if (tx->tx_open_txg > txg ||
653 	    tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
654 		return;
655 
656 	mutex_enter(&tx->tx_sync_lock);
657 	if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
658 		mutex_exit(&tx->tx_sync_lock);
659 		return;
660 	}
661 
662 	while (gethrtime() - start < delay &&
663 	    tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) {
664 		(void) cv_timedwait_hires(&tx->tx_quiesce_more_cv,
665 		    &tx->tx_sync_lock, delay, resolution, 0);
666 	}
667 
668 	DMU_TX_STAT_BUMP(dmu_tx_delay);
669 
670 	mutex_exit(&tx->tx_sync_lock);
671 }
672 
673 static boolean_t
674 txg_wait_synced_impl(dsl_pool_t *dp, uint64_t txg, boolean_t wait_sig)
675 {
676 	tx_state_t *tx = &dp->dp_tx;
677 
678 	ASSERT(!dsl_pool_config_held(dp));
679 
680 	mutex_enter(&tx->tx_sync_lock);
681 	ASSERT3U(tx->tx_threads, ==, 2);
682 	if (txg == 0)
683 		txg = tx->tx_open_txg + TXG_DEFER_SIZE;
684 	if (tx->tx_sync_txg_waiting < txg)
685 		tx->tx_sync_txg_waiting = txg;
686 	dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
687 	    txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
688 	while (tx->tx_synced_txg < txg) {
689 		dprintf("broadcasting sync more "
690 		    "tx_synced=%llu waiting=%llu dp=%px\n",
691 		    tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
692 		cv_broadcast(&tx->tx_sync_more_cv);
693 		if (wait_sig) {
694 			/*
695 			 * Condition wait here but stop if the thread receives a
696 			 * signal. The caller may call txg_wait_synced*() again
697 			 * to resume waiting for this txg.
698 			 */
699 			if (cv_wait_io_sig(&tx->tx_sync_done_cv,
700 			    &tx->tx_sync_lock) == 0) {
701 				mutex_exit(&tx->tx_sync_lock);
702 				return (B_TRUE);
703 			}
704 		} else {
705 			cv_wait_io(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
706 		}
707 	}
708 	mutex_exit(&tx->tx_sync_lock);
709 	return (B_FALSE);
710 }
711 
712 void
713 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
714 {
715 	VERIFY0(txg_wait_synced_impl(dp, txg, B_FALSE));
716 }
717 
718 /*
719  * Similar to a txg_wait_synced but it can be interrupted from a signal.
720  * Returns B_TRUE if the thread was signaled while waiting.
721  */
722 boolean_t
723 txg_wait_synced_sig(dsl_pool_t *dp, uint64_t txg)
724 {
725 	return (txg_wait_synced_impl(dp, txg, B_TRUE));
726 }
727 
728 /*
729  * Wait for the specified open transaction group.  Set should_quiesce
730  * when the current open txg should be quiesced immediately.
731  */
732 void
733 txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
734 {
735 	tx_state_t *tx = &dp->dp_tx;
736 
737 	ASSERT(!dsl_pool_config_held(dp));
738 
739 	mutex_enter(&tx->tx_sync_lock);
740 	ASSERT3U(tx->tx_threads, ==, 2);
741 	if (txg == 0)
742 		txg = tx->tx_open_txg + 1;
743 	if (tx->tx_quiesce_txg_waiting < txg && should_quiesce)
744 		tx->tx_quiesce_txg_waiting = txg;
745 	dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
746 	    txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
747 	while (tx->tx_open_txg < txg) {
748 		cv_broadcast(&tx->tx_quiesce_more_cv);
749 		/*
750 		 * Callers setting should_quiesce will use cv_wait_io() and
751 		 * be accounted for as iowait time.  Otherwise, the caller is
752 		 * understood to be idle and cv_wait_sig() is used to prevent
753 		 * incorrectly inflating the system load average.
754 		 */
755 		if (should_quiesce == B_TRUE) {
756 			cv_wait_io(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
757 		} else {
758 			cv_wait_idle(&tx->tx_quiesce_done_cv,
759 			    &tx->tx_sync_lock);
760 		}
761 	}
762 	mutex_exit(&tx->tx_sync_lock);
763 }
764 
765 /*
766  * If there isn't a txg syncing or in the pipeline, push another txg through
767  * the pipeline by quiescing the open txg.
768  */
769 void
770 txg_kick(dsl_pool_t *dp)
771 {
772 	tx_state_t *tx = &dp->dp_tx;
773 
774 	ASSERT(!dsl_pool_config_held(dp));
775 
776 	mutex_enter(&tx->tx_sync_lock);
777 	if (!txg_is_syncing(dp) &&
778 	    !txg_is_quiescing(dp) &&
779 	    tx->tx_quiesce_txg_waiting <= tx->tx_open_txg &&
780 	    tx->tx_sync_txg_waiting <= tx->tx_synced_txg &&
781 	    tx->tx_quiesced_txg <= tx->tx_synced_txg) {
782 		tx->tx_quiesce_txg_waiting = tx->tx_open_txg + 1;
783 		cv_broadcast(&tx->tx_quiesce_more_cv);
784 	}
785 	mutex_exit(&tx->tx_sync_lock);
786 }
787 
788 boolean_t
789 txg_stalled(dsl_pool_t *dp)
790 {
791 	tx_state_t *tx = &dp->dp_tx;
792 	return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
793 }
794 
795 boolean_t
796 txg_sync_waiting(dsl_pool_t *dp)
797 {
798 	tx_state_t *tx = &dp->dp_tx;
799 
800 	return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
801 	    tx->tx_quiesced_txg != 0);
802 }
803 
804 /*
805  * Verify that this txg is active (open, quiescing, syncing).  Non-active
806  * txg's should not be manipulated.
807  */
808 #ifdef ZFS_DEBUG
809 void
810 txg_verify(spa_t *spa, uint64_t txg)
811 {
812 	dsl_pool_t *dp __maybe_unused = spa_get_dsl(spa);
813 	if (txg <= TXG_INITIAL || txg == ZILTEST_TXG)
814 		return;
815 	ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
816 	ASSERT3U(txg, >=, dp->dp_tx.tx_synced_txg);
817 	ASSERT3U(txg, >=, dp->dp_tx.tx_open_txg - TXG_CONCURRENT_STATES);
818 }
819 #endif
820 
821 /*
822  * Per-txg object lists.
823  */
824 void
825 txg_list_create(txg_list_t *tl, spa_t *spa, size_t offset)
826 {
827 	int t;
828 
829 	mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
830 
831 	tl->tl_offset = offset;
832 	tl->tl_spa = spa;
833 
834 	for (t = 0; t < TXG_SIZE; t++)
835 		tl->tl_head[t] = NULL;
836 }
837 
838 static boolean_t
839 txg_list_empty_impl(txg_list_t *tl, uint64_t txg)
840 {
841 	ASSERT(MUTEX_HELD(&tl->tl_lock));
842 	TXG_VERIFY(tl->tl_spa, txg);
843 	return (tl->tl_head[txg & TXG_MASK] == NULL);
844 }
845 
846 boolean_t
847 txg_list_empty(txg_list_t *tl, uint64_t txg)
848 {
849 	mutex_enter(&tl->tl_lock);
850 	boolean_t ret = txg_list_empty_impl(tl, txg);
851 	mutex_exit(&tl->tl_lock);
852 
853 	return (ret);
854 }
855 
856 void
857 txg_list_destroy(txg_list_t *tl)
858 {
859 	int t;
860 
861 	mutex_enter(&tl->tl_lock);
862 	for (t = 0; t < TXG_SIZE; t++)
863 		ASSERT(txg_list_empty_impl(tl, t));
864 	mutex_exit(&tl->tl_lock);
865 
866 	mutex_destroy(&tl->tl_lock);
867 }
868 
869 /*
870  * Returns true if all txg lists are empty.
871  *
872  * Warning: this is inherently racy (an item could be added immediately
873  * after this function returns).
874  */
875 boolean_t
876 txg_all_lists_empty(txg_list_t *tl)
877 {
878 	mutex_enter(&tl->tl_lock);
879 	for (int i = 0; i < TXG_SIZE; i++) {
880 		if (!txg_list_empty_impl(tl, i)) {
881 			mutex_exit(&tl->tl_lock);
882 			return (B_FALSE);
883 		}
884 	}
885 	mutex_exit(&tl->tl_lock);
886 	return (B_TRUE);
887 }
888 
889 /*
890  * Add an entry to the list (unless it's already on the list).
891  * Returns B_TRUE if it was actually added.
892  */
893 boolean_t
894 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
895 {
896 	int t = txg & TXG_MASK;
897 	txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
898 	boolean_t add;
899 
900 	TXG_VERIFY(tl->tl_spa, txg);
901 	mutex_enter(&tl->tl_lock);
902 	add = (tn->tn_member[t] == 0);
903 	if (add) {
904 		tn->tn_member[t] = 1;
905 		tn->tn_next[t] = tl->tl_head[t];
906 		tl->tl_head[t] = tn;
907 	}
908 	mutex_exit(&tl->tl_lock);
909 
910 	return (add);
911 }
912 
913 /*
914  * Add an entry to the end of the list, unless it's already on the list.
915  * (walks list to find end)
916  * Returns B_TRUE if it was actually added.
917  */
918 boolean_t
919 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
920 {
921 	int t = txg & TXG_MASK;
922 	txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
923 	boolean_t add;
924 
925 	TXG_VERIFY(tl->tl_spa, txg);
926 	mutex_enter(&tl->tl_lock);
927 	add = (tn->tn_member[t] == 0);
928 	if (add) {
929 		txg_node_t **tp;
930 
931 		for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
932 			continue;
933 
934 		tn->tn_member[t] = 1;
935 		tn->tn_next[t] = NULL;
936 		*tp = tn;
937 	}
938 	mutex_exit(&tl->tl_lock);
939 
940 	return (add);
941 }
942 
943 /*
944  * Remove the head of the list and return it.
945  */
946 void *
947 txg_list_remove(txg_list_t *tl, uint64_t txg)
948 {
949 	int t = txg & TXG_MASK;
950 	txg_node_t *tn;
951 	void *p = NULL;
952 
953 	TXG_VERIFY(tl->tl_spa, txg);
954 	mutex_enter(&tl->tl_lock);
955 	if ((tn = tl->tl_head[t]) != NULL) {
956 		ASSERT(tn->tn_member[t]);
957 		ASSERT(tn->tn_next[t] == NULL || tn->tn_next[t]->tn_member[t]);
958 		p = (char *)tn - tl->tl_offset;
959 		tl->tl_head[t] = tn->tn_next[t];
960 		tn->tn_next[t] = NULL;
961 		tn->tn_member[t] = 0;
962 	}
963 	mutex_exit(&tl->tl_lock);
964 
965 	return (p);
966 }
967 
968 /*
969  * Remove a specific item from the list and return it.
970  */
971 void *
972 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
973 {
974 	int t = txg & TXG_MASK;
975 	txg_node_t *tn, **tp;
976 
977 	TXG_VERIFY(tl->tl_spa, txg);
978 	mutex_enter(&tl->tl_lock);
979 
980 	for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
981 		if ((char *)tn - tl->tl_offset == p) {
982 			*tp = tn->tn_next[t];
983 			tn->tn_next[t] = NULL;
984 			tn->tn_member[t] = 0;
985 			mutex_exit(&tl->tl_lock);
986 			return (p);
987 		}
988 	}
989 
990 	mutex_exit(&tl->tl_lock);
991 
992 	return (NULL);
993 }
994 
995 boolean_t
996 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
997 {
998 	int t = txg & TXG_MASK;
999 	txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
1000 
1001 	TXG_VERIFY(tl->tl_spa, txg);
1002 	return (tn->tn_member[t] != 0);
1003 }
1004 
1005 /*
1006  * Walk a txg list
1007  */
1008 void *
1009 txg_list_head(txg_list_t *tl, uint64_t txg)
1010 {
1011 	int t = txg & TXG_MASK;
1012 	txg_node_t *tn;
1013 
1014 	mutex_enter(&tl->tl_lock);
1015 	tn = tl->tl_head[t];
1016 	mutex_exit(&tl->tl_lock);
1017 
1018 	TXG_VERIFY(tl->tl_spa, txg);
1019 	return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
1020 }
1021 
1022 void *
1023 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
1024 {
1025 	int t = txg & TXG_MASK;
1026 	txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
1027 
1028 	TXG_VERIFY(tl->tl_spa, txg);
1029 
1030 	mutex_enter(&tl->tl_lock);
1031 	tn = tn->tn_next[t];
1032 	mutex_exit(&tl->tl_lock);
1033 
1034 	return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
1035 }
1036 
1037 EXPORT_SYMBOL(txg_init);
1038 EXPORT_SYMBOL(txg_fini);
1039 EXPORT_SYMBOL(txg_sync_start);
1040 EXPORT_SYMBOL(txg_sync_stop);
1041 EXPORT_SYMBOL(txg_hold_open);
1042 EXPORT_SYMBOL(txg_rele_to_quiesce);
1043 EXPORT_SYMBOL(txg_rele_to_sync);
1044 EXPORT_SYMBOL(txg_register_callbacks);
1045 EXPORT_SYMBOL(txg_delay);
1046 EXPORT_SYMBOL(txg_wait_synced);
1047 EXPORT_SYMBOL(txg_wait_open);
1048 EXPORT_SYMBOL(txg_wait_callbacks);
1049 EXPORT_SYMBOL(txg_stalled);
1050 EXPORT_SYMBOL(txg_sync_waiting);
1051 
1052 /* BEGIN CSTYLED */
1053 ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, timeout, INT, ZMOD_RW,
1054 	"Max seconds worth of delta per txg");
1055 /* END CSTYLED */
1056