xref: /titanic_52/usr/src/uts/common/fs/zfs/dsl_synctask.c (revision 269e59f9a28bf47e0f463e64fc5af4a408b73b21)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/dmu.h>
26 #include <sys/dmu_tx.h>
27 #include <sys/dsl_pool.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/metaslab.h>
31 
32 #define	DST_AVG_BLKSHIFT 14
33 
34 /* ARGSUSED */
35 static int
36 dsl_null_checkfunc(void *arg1, void *arg2, dmu_tx_t *tx)
37 {
38 	return (0);
39 }
40 
41 dsl_sync_task_group_t *
42 dsl_sync_task_group_create(dsl_pool_t *dp)
43 {
44 	dsl_sync_task_group_t *dstg;
45 
46 	dstg = kmem_zalloc(sizeof (dsl_sync_task_group_t), KM_SLEEP);
47 	list_create(&dstg->dstg_tasks, sizeof (dsl_sync_task_t),
48 	    offsetof(dsl_sync_task_t, dst_node));
49 	dstg->dstg_pool = dp;
50 
51 	return (dstg);
52 }
53 
54 void
55 dsl_sync_task_create(dsl_sync_task_group_t *dstg,
56     dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc,
57     void *arg1, void *arg2, int blocks_modified)
58 {
59 	dsl_sync_task_t *dst;
60 
61 	if (checkfunc == NULL)
62 		checkfunc = dsl_null_checkfunc;
63 	dst = kmem_zalloc(sizeof (dsl_sync_task_t), KM_SLEEP);
64 	dst->dst_checkfunc = checkfunc;
65 	dst->dst_syncfunc = syncfunc;
66 	dst->dst_arg1 = arg1;
67 	dst->dst_arg2 = arg2;
68 	list_insert_tail(&dstg->dstg_tasks, dst);
69 
70 	dstg->dstg_space += blocks_modified << DST_AVG_BLKSHIFT;
71 }
72 
73 int
74 dsl_sync_task_group_wait(dsl_sync_task_group_t *dstg)
75 {
76 	dmu_tx_t *tx;
77 	uint64_t txg;
78 	dsl_sync_task_t *dst;
79 
80 top:
81 	tx = dmu_tx_create_dd(dstg->dstg_pool->dp_mos_dir);
82 	VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
83 
84 	txg = dmu_tx_get_txg(tx);
85 
86 	/* Do a preliminary error check. */
87 	dstg->dstg_err = 0;
88 	rw_enter(&dstg->dstg_pool->dp_config_rwlock, RW_READER);
89 	for (dst = list_head(&dstg->dstg_tasks); dst;
90 	    dst = list_next(&dstg->dstg_tasks, dst)) {
91 #ifdef ZFS_DEBUG
92 		/*
93 		 * Only check half the time, otherwise, the sync-context
94 		 * check will almost never fail.
95 		 */
96 		if (spa_get_random(2) == 0)
97 			continue;
98 #endif
99 		dst->dst_err =
100 		    dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx);
101 		if (dst->dst_err)
102 			dstg->dstg_err = dst->dst_err;
103 	}
104 	rw_exit(&dstg->dstg_pool->dp_config_rwlock);
105 
106 	if (dstg->dstg_err) {
107 		dmu_tx_commit(tx);
108 		return (dstg->dstg_err);
109 	}
110 
111 	/*
112 	 * We don't generally have many sync tasks, so pay the price of
113 	 * add_tail to get the tasks executed in the right order.
114 	 */
115 	VERIFY(0 == txg_list_add_tail(&dstg->dstg_pool->dp_sync_tasks,
116 	    dstg, txg));
117 
118 	dmu_tx_commit(tx);
119 
120 	txg_wait_synced(dstg->dstg_pool, txg);
121 
122 	if (dstg->dstg_err == EAGAIN) {
123 		txg_wait_synced(dstg->dstg_pool, txg + TXG_DEFER_SIZE);
124 		goto top;
125 	}
126 
127 	return (dstg->dstg_err);
128 }
129 
130 void
131 dsl_sync_task_group_nowait(dsl_sync_task_group_t *dstg, dmu_tx_t *tx)
132 {
133 	uint64_t txg;
134 
135 	dstg->dstg_nowaiter = B_TRUE;
136 	txg = dmu_tx_get_txg(tx);
137 	/*
138 	 * We don't generally have many sync tasks, so pay the price of
139 	 * add_tail to get the tasks executed in the right order.
140 	 */
141 	VERIFY(0 == txg_list_add_tail(&dstg->dstg_pool->dp_sync_tasks,
142 	    dstg, txg));
143 }
144 
145 void
146 dsl_sync_task_group_destroy(dsl_sync_task_group_t *dstg)
147 {
148 	dsl_sync_task_t *dst;
149 
150 	while (dst = list_head(&dstg->dstg_tasks)) {
151 		list_remove(&dstg->dstg_tasks, dst);
152 		kmem_free(dst, sizeof (dsl_sync_task_t));
153 	}
154 	kmem_free(dstg, sizeof (dsl_sync_task_group_t));
155 }
156 
157 void
158 dsl_sync_task_group_sync(dsl_sync_task_group_t *dstg, dmu_tx_t *tx)
159 {
160 	dsl_sync_task_t *dst;
161 	dsl_pool_t *dp = dstg->dstg_pool;
162 	uint64_t quota, used;
163 
164 	ASSERT3U(dstg->dstg_err, ==, 0);
165 
166 	/*
167 	 * Check for sufficient space.  We just check against what's
168 	 * on-disk; we don't want any in-flight accounting to get in our
169 	 * way, because open context may have already used up various
170 	 * in-core limits (arc_tempreserve, dsl_pool_tempreserve).
171 	 */
172 	quota = dsl_pool_adjustedsize(dp, B_FALSE) -
173 	    metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
174 	used = dp->dp_root_dir->dd_phys->dd_used_bytes;
175 	/* MOS space is triple-dittoed, so we multiply by 3. */
176 	if (dstg->dstg_space > 0 && used + dstg->dstg_space * 3 > quota) {
177 		dstg->dstg_err = ENOSPC;
178 		return;
179 	}
180 
181 	/*
182 	 * Check for errors by calling checkfuncs.
183 	 */
184 	rw_enter(&dp->dp_config_rwlock, RW_WRITER);
185 	for (dst = list_head(&dstg->dstg_tasks); dst;
186 	    dst = list_next(&dstg->dstg_tasks, dst)) {
187 		dst->dst_err =
188 		    dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx);
189 		if (dst->dst_err)
190 			dstg->dstg_err = dst->dst_err;
191 	}
192 
193 	if (dstg->dstg_err == 0) {
194 		/*
195 		 * Execute sync tasks.
196 		 */
197 		for (dst = list_head(&dstg->dstg_tasks); dst;
198 		    dst = list_next(&dstg->dstg_tasks, dst)) {
199 			dst->dst_syncfunc(dst->dst_arg1, dst->dst_arg2, tx);
200 		}
201 	}
202 	rw_exit(&dp->dp_config_rwlock);
203 
204 	if (dstg->dstg_nowaiter)
205 		dsl_sync_task_group_destroy(dstg);
206 }
207 
208 int
209 dsl_sync_task_do(dsl_pool_t *dp,
210     dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc,
211     void *arg1, void *arg2, int blocks_modified)
212 {
213 	dsl_sync_task_group_t *dstg;
214 	int err;
215 
216 	dstg = dsl_sync_task_group_create(dp);
217 	dsl_sync_task_create(dstg, checkfunc, syncfunc,
218 	    arg1, arg2, blocks_modified);
219 	err = dsl_sync_task_group_wait(dstg);
220 	dsl_sync_task_group_destroy(dstg);
221 	return (err);
222 }
223 
224 void
225 dsl_sync_task_do_nowait(dsl_pool_t *dp,
226     dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc,
227     void *arg1, void *arg2, int blocks_modified, dmu_tx_t *tx)
228 {
229 	dsl_sync_task_group_t *dstg;
230 
231 	dstg = dsl_sync_task_group_create(dp);
232 	dsl_sync_task_create(dstg, checkfunc, syncfunc,
233 	    arg1, arg2, blocks_modified);
234 	dsl_sync_task_group_nowait(dstg, tx);
235 }
236