xref: /illumos-gate/usr/src/uts/common/fs/zfs/sys/txg_impl.h (revision 66582b606a8194f7f3ba5b3a3a6dca5b0d346361)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
29  */
30 
31 #ifndef _SYS_TXG_IMPL_H
32 #define	_SYS_TXG_IMPL_H
33 
34 #include <sys/spa.h>
35 #include <sys/txg.h>
36 
37 #ifdef	__cplusplus
38 extern "C" {
39 #endif
40 
41 /*
42  * The tx_cpu structure is a per-cpu structure that is used to track
43  * the number of active transaction holds (tc_count). As transactions
44  * are assigned into a transaction group the appropriate tc_count is
45  * incremented to indicate that there are pending changes that have yet
46  * to quiesce. Consumers evenutally call txg_rele_to_sync() to decrement
47  * the tc_count. A transaction group is not considered quiesced until all
48  * tx_cpu structures have reached a tc_count of zero.
49  *
50  * This structure is a per-cpu structure by design. Updates to this structure
51  * are frequent and concurrent. Having a single structure would result in
52  * heavy lock contention so a per-cpu design was implemented. With the fanned
53  * out mutex design, consumers only need to lock the mutex associated with
54  * thread's cpu.
55  *
56  * The tx_cpu contains two locks, the tc_lock and tc_open_lock.
57  * The tc_lock is used to protect all members of the tx_cpu structure with
58  * the exception of the tc_open_lock. This lock should only be held for a
59  * short period of time, typically when updating the value of tc_count.
60  *
61  * The tc_open_lock protects the tx_open_txg member of the tx_state structure.
62  * This lock is used to ensure that transactions are only assigned into
63  * the current open transaction group. In order to move the current open
64  * transaction group to the quiesce phase, the txg_quiesce thread must
65  * grab all tc_open_locks, increment the tx_open_txg, and drop the locks.
66  * The tc_open_lock is held until the transaction is assigned into the
67  * transaction group. Typically, this is a short operation but if throttling
68  * is occuring it may be held for longer periods of time.
69  */
70 struct tx_cpu {
71 	kmutex_t	tc_open_lock;	/* protects tx_open_txg */
72 	kmutex_t	tc_lock;	/* protects the rest of this struct */
73 	kcondvar_t	tc_cv[TXG_SIZE];
74 	uint64_t	tc_count[TXG_SIZE];	/* tx hold count on each txg */
75 	list_t		tc_callbacks[TXG_SIZE]; /* commit cb list */
76 	char		tc_pad[8];		/* pad to fill 3 cache lines */
77 };
78 
79 /*
80  * The tx_state structure maintains the state information about the different
81  * stages of the pool's transcation groups. A per pool tx_state structure
82  * is used to track this information. The tx_state structure also points to
83  * an array of tx_cpu structures (described above). Although the tx_sync_lock
84  * is used to protect the members of this structure, it is not used to
85  * protect the tx_open_txg. Instead a special lock in the tx_cpu structure
86  * is used. Readers of tx_open_txg must grab the per-cpu tc_open_lock.
87  * Any thread wishing to update tx_open_txg must grab the tc_open_lock on
88  * every cpu (see txg_quiesce()).
89  */
90 typedef struct tx_state {
91 	tx_cpu_t	*tx_cpu;	/* protects access to tx_open_txg */
92 	kmutex_t	tx_sync_lock;	/* protects the rest of this struct */
93 
94 	uint64_t	tx_open_txg;	/* currently open txg id */
95 	uint64_t	tx_quiescing_txg; /* currently quiescing txg id */
96 	uint64_t	tx_quiesced_txg; /* quiesced txg waiting for sync */
97 	uint64_t	tx_syncing_txg;	/* currently syncing txg id */
98 	uint64_t	tx_synced_txg;	/* last synced txg id */
99 
100 	hrtime_t	tx_open_time;	/* start time of tx_open_txg */
101 
102 	uint64_t	tx_sync_txg_waiting; /* txg we're waiting to sync */
103 	uint64_t	tx_quiesce_txg_waiting; /* txg we're waiting to open */
104 
105 	kcondvar_t	tx_sync_more_cv;
106 	kcondvar_t	tx_sync_done_cv;
107 	kcondvar_t	tx_quiesce_more_cv;
108 	kcondvar_t	tx_quiesce_done_cv;
109 	kcondvar_t	tx_timeout_cv;
110 	kcondvar_t	tx_exit_cv;	/* wait for all threads to exit */
111 
112 	uint8_t		tx_threads;	/* number of threads */
113 	uint8_t		tx_exiting;	/* set when we're exiting */
114 
115 	kthread_t	*tx_sync_thread;
116 	kthread_t	*tx_quiesce_thread;
117 
118 	taskq_t		*tx_commit_cb_taskq; /* commit callback taskq */
119 } tx_state_t;
120 
121 #ifdef	__cplusplus
122 }
123 #endif
124 
125 #endif	/* _SYS_TXG_IMPL_H */
126