xref: /linux/drivers/md/persistent-data/dm-transaction-manager.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * Copyright (C) 2011 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6 #include "dm-transaction-manager.h"
7 #include "dm-space-map.h"
8 #include "dm-space-map-disk.h"
9 #include "dm-space-map-metadata.h"
10 #include "dm-persistent-data-internal.h"
11 
12 #include <linux/export.h>
13 #include <linux/slab.h>
14 #include <linux/device-mapper.h>
15 
16 #define DM_MSG_PREFIX "transaction manager"
17 
18 /*----------------------------------------------------------------*/
19 
20 struct shadow_info {
21 	struct hlist_node hlist;
22 	dm_block_t where;
23 };
24 
25 /*
26  * It would be nice if we scaled with the size of transaction.
27  */
28 #define DM_HASH_SIZE 256
29 #define DM_HASH_MASK (DM_HASH_SIZE - 1)
30 
31 struct dm_transaction_manager {
32 	int is_clone;
33 	struct dm_transaction_manager *real;
34 
35 	struct dm_block_manager *bm;
36 	struct dm_space_map *sm;
37 
38 	spinlock_t lock;
39 	struct hlist_head buckets[DM_HASH_SIZE];
40 };
41 
42 /*----------------------------------------------------------------*/
43 
44 static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
45 {
46 	int r = 0;
47 	unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
48 	struct shadow_info *si;
49 
50 	spin_lock(&tm->lock);
51 	hlist_for_each_entry(si, tm->buckets + bucket, hlist)
52 		if (si->where == b) {
53 			r = 1;
54 			break;
55 		}
56 	spin_unlock(&tm->lock);
57 
58 	return r;
59 }
60 
61 /*
62  * This can silently fail if there's no memory.  We're ok with this since
63  * creating redundant shadows causes no harm.
64  */
65 static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
66 {
67 	unsigned bucket;
68 	struct shadow_info *si;
69 
70 	si = kmalloc(sizeof(*si), GFP_NOIO);
71 	if (si) {
72 		si->where = b;
73 		bucket = dm_hash_block(b, DM_HASH_MASK);
74 		spin_lock(&tm->lock);
75 		hlist_add_head(&si->hlist, tm->buckets + bucket);
76 		spin_unlock(&tm->lock);
77 	}
78 }
79 
80 static void wipe_shadow_table(struct dm_transaction_manager *tm)
81 {
82 	struct shadow_info *si;
83 	struct hlist_node *tmp;
84 	struct hlist_head *bucket;
85 	int i;
86 
87 	spin_lock(&tm->lock);
88 	for (i = 0; i < DM_HASH_SIZE; i++) {
89 		bucket = tm->buckets + i;
90 		hlist_for_each_entry_safe(si, tmp, bucket, hlist)
91 			kfree(si);
92 
93 		INIT_HLIST_HEAD(bucket);
94 	}
95 
96 	spin_unlock(&tm->lock);
97 }
98 
99 /*----------------------------------------------------------------*/
100 
101 static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
102 						   struct dm_space_map *sm)
103 {
104 	int i;
105 	struct dm_transaction_manager *tm;
106 
107 	tm = kmalloc(sizeof(*tm), GFP_KERNEL);
108 	if (!tm)
109 		return ERR_PTR(-ENOMEM);
110 
111 	tm->is_clone = 0;
112 	tm->real = NULL;
113 	tm->bm = bm;
114 	tm->sm = sm;
115 
116 	spin_lock_init(&tm->lock);
117 	for (i = 0; i < DM_HASH_SIZE; i++)
118 		INIT_HLIST_HEAD(tm->buckets + i);
119 
120 	return tm;
121 }
122 
123 struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
124 {
125 	struct dm_transaction_manager *tm;
126 
127 	tm = kmalloc(sizeof(*tm), GFP_KERNEL);
128 	if (tm) {
129 		tm->is_clone = 1;
130 		tm->real = real;
131 	}
132 
133 	return tm;
134 }
135 EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
136 
137 void dm_tm_destroy(struct dm_transaction_manager *tm)
138 {
139 	if (!tm->is_clone)
140 		wipe_shadow_table(tm);
141 
142 	kfree(tm);
143 }
144 EXPORT_SYMBOL_GPL(dm_tm_destroy);
145 
146 int dm_tm_pre_commit(struct dm_transaction_manager *tm)
147 {
148 	int r;
149 
150 	if (tm->is_clone)
151 		return -EWOULDBLOCK;
152 
153 	r = dm_sm_commit(tm->sm);
154 	if (r < 0)
155 		return r;
156 
157 	return 0;
158 }
159 EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
160 
161 int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
162 {
163 	if (tm->is_clone)
164 		return -EWOULDBLOCK;
165 
166 	wipe_shadow_table(tm);
167 
168 	return dm_bm_flush_and_unlock(tm->bm, root);
169 }
170 EXPORT_SYMBOL_GPL(dm_tm_commit);
171 
172 int dm_tm_new_block(struct dm_transaction_manager *tm,
173 		    struct dm_block_validator *v,
174 		    struct dm_block **result)
175 {
176 	int r;
177 	dm_block_t new_block;
178 
179 	if (tm->is_clone)
180 		return -EWOULDBLOCK;
181 
182 	r = dm_sm_new_block(tm->sm, &new_block);
183 	if (r < 0)
184 		return r;
185 
186 	r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
187 	if (r < 0) {
188 		dm_sm_dec_block(tm->sm, new_block);
189 		return r;
190 	}
191 
192 	/*
193 	 * New blocks count as shadows in that they don't need to be
194 	 * shadowed again.
195 	 */
196 	insert_shadow(tm, new_block);
197 
198 	return 0;
199 }
200 
201 static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
202 			  struct dm_block_validator *v,
203 			  struct dm_block **result)
204 {
205 	int r;
206 	dm_block_t new;
207 	struct dm_block *orig_block;
208 
209 	r = dm_sm_new_block(tm->sm, &new);
210 	if (r < 0)
211 		return r;
212 
213 	r = dm_sm_dec_block(tm->sm, orig);
214 	if (r < 0)
215 		return r;
216 
217 	r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
218 	if (r < 0)
219 		return r;
220 
221 	/*
222 	 * It would be tempting to use dm_bm_unlock_move here, but some
223 	 * code, such as the space maps, keeps using the old data structures
224 	 * secure in the knowledge they won't be changed until the next
225 	 * transaction.  Using unlock_move would force a synchronous read
226 	 * since the old block would no longer be in the cache.
227 	 */
228 	r = dm_bm_write_lock_zero(tm->bm, new, v, result);
229 	if (r) {
230 		dm_bm_unlock(orig_block);
231 		return r;
232 	}
233 
234 	memcpy(dm_block_data(*result), dm_block_data(orig_block),
235 	       dm_bm_block_size(tm->bm));
236 
237 	dm_bm_unlock(orig_block);
238 	return r;
239 }
240 
241 int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
242 		       struct dm_block_validator *v, struct dm_block **result,
243 		       int *inc_children)
244 {
245 	int r;
246 
247 	if (tm->is_clone)
248 		return -EWOULDBLOCK;
249 
250 	r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
251 	if (r < 0)
252 		return r;
253 
254 	if (is_shadow(tm, orig) && !*inc_children)
255 		return dm_bm_write_lock(tm->bm, orig, v, result);
256 
257 	r = __shadow_block(tm, orig, v, result);
258 	if (r < 0)
259 		return r;
260 	insert_shadow(tm, dm_block_location(*result));
261 
262 	return r;
263 }
264 EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
265 
266 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
267 		    struct dm_block_validator *v,
268 		    struct dm_block **blk)
269 {
270 	if (tm->is_clone)
271 		return dm_bm_read_try_lock(tm->real->bm, b, v, blk);
272 
273 	return dm_bm_read_lock(tm->bm, b, v, blk);
274 }
275 EXPORT_SYMBOL_GPL(dm_tm_read_lock);
276 
277 int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
278 {
279 	return dm_bm_unlock(b);
280 }
281 EXPORT_SYMBOL_GPL(dm_tm_unlock);
282 
283 void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
284 {
285 	/*
286 	 * The non-blocking clone doesn't support this.
287 	 */
288 	BUG_ON(tm->is_clone);
289 
290 	dm_sm_inc_block(tm->sm, b);
291 }
292 EXPORT_SYMBOL_GPL(dm_tm_inc);
293 
294 void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
295 {
296 	/*
297 	 * The non-blocking clone doesn't support this.
298 	 */
299 	BUG_ON(tm->is_clone);
300 
301 	dm_sm_dec_block(tm->sm, b);
302 }
303 EXPORT_SYMBOL_GPL(dm_tm_dec);
304 
305 int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
306 	      uint32_t *result)
307 {
308 	if (tm->is_clone)
309 		return -EWOULDBLOCK;
310 
311 	return dm_sm_get_count(tm->sm, b, result);
312 }
313 
314 struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
315 {
316 	return tm->bm;
317 }
318 
319 /*----------------------------------------------------------------*/
320 
321 static int dm_tm_create_internal(struct dm_block_manager *bm,
322 				 dm_block_t sb_location,
323 				 struct dm_transaction_manager **tm,
324 				 struct dm_space_map **sm,
325 				 int create,
326 				 void *sm_root, size_t sm_len)
327 {
328 	int r;
329 
330 	*sm = dm_sm_metadata_init();
331 	if (IS_ERR(*sm))
332 		return PTR_ERR(*sm);
333 
334 	*tm = dm_tm_create(bm, *sm);
335 	if (IS_ERR(*tm)) {
336 		dm_sm_destroy(*sm);
337 		return PTR_ERR(*tm);
338 	}
339 
340 	if (create) {
341 		r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
342 					  sb_location);
343 		if (r) {
344 			DMERR("couldn't create metadata space map");
345 			goto bad;
346 		}
347 
348 	} else {
349 		r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
350 		if (r) {
351 			DMERR("couldn't open metadata space map");
352 			goto bad;
353 		}
354 	}
355 
356 	return 0;
357 
358 bad:
359 	dm_tm_destroy(*tm);
360 	dm_sm_destroy(*sm);
361 	return r;
362 }
363 
364 int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
365 			 struct dm_transaction_manager **tm,
366 			 struct dm_space_map **sm)
367 {
368 	return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
369 }
370 EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
371 
372 int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
373 		       void *sm_root, size_t root_len,
374 		       struct dm_transaction_manager **tm,
375 		       struct dm_space_map **sm)
376 {
377 	return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
378 }
379 EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
380 
381 /*----------------------------------------------------------------*/
382