xref: /linux/drivers/md/dm-stripe.c (revision c118478665f467e57d06b2354de65974b246b82b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 #include <linux/device-mapper.h>
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/blkdev.h>
14 #include <linux/bio.h>
15 #include <linux/dax.h>
16 #include <linux/slab.h>
17 #include <linux/log2.h>
18 
19 static struct workqueue_struct *dm_stripe_wq;
20 
21 #define DM_MSG_PREFIX "striped"
22 #define DM_IO_ERROR_THRESHOLD 15
23 
24 struct stripe {
25 	struct dm_dev *dev;
26 	sector_t physical_start;
27 
28 	atomic_t error_count;
29 };
30 
31 struct stripe_c {
32 	uint32_t stripes;
33 	int stripes_shift;
34 
35 	/* The size of this target / num. stripes */
36 	sector_t stripe_width;
37 
38 	uint32_t chunk_size;
39 	int chunk_size_shift;
40 
41 	/* Needed for handling events */
42 	struct dm_target *ti;
43 
44 	/* Work struct used for triggering events*/
45 	struct work_struct trigger_event;
46 
47 	struct stripe stripe[] __counted_by(stripes);
48 };
49 
50 /*
51  * An event is triggered whenever a drive
52  * drops out of a stripe volume.
53  */
54 static void trigger_event(struct work_struct *work)
55 {
56 	struct stripe_c *sc = container_of(work, struct stripe_c,
57 					   trigger_event);
58 	dm_table_event(sc->ti->table);
59 }
60 
61 /*
62  * Parse a single <dev> <sector> pair
63  */
64 static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
65 		      unsigned int stripe, char **argv)
66 {
67 	unsigned long long start;
68 	char dummy;
69 	int ret;
70 
71 	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
72 		return -EINVAL;
73 
74 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
75 			    &sc->stripe[stripe].dev);
76 	if (ret)
77 		return ret;
78 
79 	sc->stripe[stripe].physical_start = start;
80 
81 	return 0;
82 }
83 
84 /*
85  * Construct a striped mapping.
86  * <number of stripes> <chunk size> [<dev_path> <offset>]+
87  */
88 static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
89 {
90 	struct stripe_c *sc;
91 	sector_t width, tmp_len;
92 	uint32_t stripes;
93 	uint32_t chunk_size;
94 	int r;
95 	unsigned int i;
96 
97 	if (argc < 2) {
98 		ti->error = "Not enough arguments";
99 		return -EINVAL;
100 	}
101 
102 	if (kstrtouint(argv[0], 10, &stripes) || !stripes) {
103 		ti->error = "Invalid stripe count";
104 		return -EINVAL;
105 	}
106 
107 	if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) {
108 		ti->error = "Invalid chunk_size";
109 		return -EINVAL;
110 	}
111 
112 	width = ti->len;
113 	if (sector_div(width, stripes)) {
114 		ti->error = "Target length not divisible by number of stripes";
115 		return -EINVAL;
116 	}
117 
118 	tmp_len = width;
119 	if (sector_div(tmp_len, chunk_size)) {
120 		ti->error = "Target length not divisible by chunk size";
121 		return -EINVAL;
122 	}
123 
124 	/*
125 	 * Do we have enough arguments for that many stripes ?
126 	 */
127 	if (argc != (2 + 2 * stripes)) {
128 		ti->error = "Not enough destinations specified";
129 		return -EINVAL;
130 	}
131 
132 	sc = kmalloc(struct_size(sc, stripe, stripes), GFP_KERNEL);
133 	if (!sc) {
134 		ti->error = "Memory allocation for striped context failed";
135 		return -ENOMEM;
136 	}
137 
138 	INIT_WORK(&sc->trigger_event, trigger_event);
139 
140 	/* Set pointer to dm target; used in trigger_event */
141 	sc->ti = ti;
142 	sc->stripes = stripes;
143 	sc->stripe_width = width;
144 
145 	if (stripes & (stripes - 1))
146 		sc->stripes_shift = -1;
147 	else
148 		sc->stripes_shift = __ffs(stripes);
149 
150 	r = dm_set_target_max_io_len(ti, chunk_size);
151 	if (r) {
152 		kfree(sc);
153 		return r;
154 	}
155 
156 	ti->num_flush_bios = stripes;
157 	ti->num_discard_bios = stripes;
158 	ti->num_secure_erase_bios = stripes;
159 	ti->num_write_zeroes_bios = stripes;
160 	ti->flush_bypasses_map = true;
161 
162 	sc->chunk_size = chunk_size;
163 	if (chunk_size & (chunk_size - 1))
164 		sc->chunk_size_shift = -1;
165 	else
166 		sc->chunk_size_shift = __ffs(chunk_size);
167 
168 	/*
169 	 * Get the stripe destinations.
170 	 */
171 	for (i = 0; i < stripes; i++) {
172 		argv += 2;
173 
174 		r = get_stripe(ti, sc, i, argv);
175 		if (r < 0) {
176 			ti->error = "Couldn't parse stripe destination";
177 			while (i--)
178 				dm_put_device(ti, sc->stripe[i].dev);
179 			kfree(sc);
180 			return r;
181 		}
182 		atomic_set(&(sc->stripe[i].error_count), 0);
183 	}
184 
185 	ti->private = sc;
186 
187 	return 0;
188 }
189 
190 static void stripe_dtr(struct dm_target *ti)
191 {
192 	unsigned int i;
193 	struct stripe_c *sc = ti->private;
194 
195 	for (i = 0; i < sc->stripes; i++)
196 		dm_put_device(ti, sc->stripe[i].dev);
197 
198 	flush_work(&sc->trigger_event);
199 	kfree(sc);
200 }
201 
202 static void stripe_map_sector(struct stripe_c *sc, sector_t sector,
203 			      uint32_t *stripe, sector_t *result)
204 {
205 	sector_t chunk = dm_target_offset(sc->ti, sector);
206 	sector_t chunk_offset;
207 
208 	if (sc->chunk_size_shift < 0)
209 		chunk_offset = sector_div(chunk, sc->chunk_size);
210 	else {
211 		chunk_offset = chunk & (sc->chunk_size - 1);
212 		chunk >>= sc->chunk_size_shift;
213 	}
214 
215 	if (sc->stripes_shift < 0)
216 		*stripe = sector_div(chunk, sc->stripes);
217 	else {
218 		*stripe = chunk & (sc->stripes - 1);
219 		chunk >>= sc->stripes_shift;
220 	}
221 
222 	if (sc->chunk_size_shift < 0)
223 		chunk *= sc->chunk_size;
224 	else
225 		chunk <<= sc->chunk_size_shift;
226 
227 	*result = chunk + chunk_offset;
228 }
229 
230 static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
231 				    uint32_t target_stripe, sector_t *result)
232 {
233 	uint32_t stripe;
234 
235 	stripe_map_sector(sc, sector, &stripe, result);
236 	if (stripe == target_stripe)
237 		return;
238 
239 	/* round down */
240 	sector = *result;
241 	if (sc->chunk_size_shift < 0)
242 		*result -= sector_div(sector, sc->chunk_size);
243 	else
244 		*result = sector & ~(sector_t)(sc->chunk_size - 1);
245 
246 	if (target_stripe < stripe)
247 		*result += sc->chunk_size;		/* next chunk */
248 }
249 
250 static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
251 			    uint32_t target_stripe)
252 {
253 	sector_t begin, end;
254 
255 	stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
256 				target_stripe, &begin);
257 	stripe_map_range_sector(sc, bio_end_sector(bio),
258 				target_stripe, &end);
259 	if (begin < end) {
260 		bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev);
261 		bio->bi_iter.bi_sector = begin +
262 			sc->stripe[target_stripe].physical_start;
263 		bio->bi_iter.bi_size = to_bytes(end - begin);
264 		return DM_MAPIO_REMAPPED;
265 	}
266 
267 	/* The range doesn't map to the target stripe */
268 	bio_endio(bio);
269 	return DM_MAPIO_SUBMITTED;
270 }
271 
272 int stripe_map(struct dm_target *ti, struct bio *bio)
273 {
274 	struct stripe_c *sc = ti->private;
275 	uint32_t stripe;
276 	unsigned int target_bio_nr;
277 
278 	if (bio->bi_opf & REQ_PREFLUSH) {
279 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
280 		BUG_ON(target_bio_nr >= sc->stripes);
281 		bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev);
282 		return DM_MAPIO_REMAPPED;
283 	}
284 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
285 	    unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) ||
286 	    unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) {
287 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
288 		BUG_ON(target_bio_nr >= sc->stripes);
289 		return stripe_map_range(sc, bio, target_bio_nr);
290 	}
291 
292 	stripe_map_sector(sc, bio->bi_iter.bi_sector,
293 			  &stripe, &bio->bi_iter.bi_sector);
294 
295 	bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
296 	bio_set_dev(bio, sc->stripe[stripe].dev->bdev);
297 
298 	return DM_MAPIO_REMAPPED;
299 }
300 
301 #if IS_ENABLED(CONFIG_FS_DAX)
302 static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
303 {
304 	struct stripe_c *sc = ti->private;
305 	struct block_device *bdev;
306 	sector_t dev_sector;
307 	uint32_t stripe;
308 
309 	stripe_map_sector(sc, *pgoff * PAGE_SECTORS, &stripe, &dev_sector);
310 	dev_sector += sc->stripe[stripe].physical_start;
311 	bdev = sc->stripe[stripe].dev->bdev;
312 
313 	*pgoff = (get_start_sect(bdev) + dev_sector) >> PAGE_SECTORS_SHIFT;
314 	return sc->stripe[stripe].dev->dax_dev;
315 }
316 
317 static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
318 		long nr_pages, enum dax_access_mode mode, void **kaddr,
319 		pfn_t *pfn)
320 {
321 	struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
322 
323 	return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
324 }
325 
326 static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
327 				      size_t nr_pages)
328 {
329 	struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
330 
331 	return dax_zero_page_range(dax_dev, pgoff, nr_pages);
332 }
333 
334 static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
335 		void *addr, size_t bytes, struct iov_iter *i)
336 {
337 	struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
338 
339 	return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
340 }
341 
342 #else
343 #define stripe_dax_direct_access NULL
344 #define stripe_dax_zero_page_range NULL
345 #define stripe_dax_recovery_write NULL
346 #endif
347 
348 /*
349  * Stripe status:
350  *
351  * INFO
352  * #stripes [stripe_name <stripe_name>] [group word count]
353  * [error count 'A|D' <error count 'A|D'>]
354  *
355  * TABLE
356  * #stripes [stripe chunk size]
357  * [stripe_name physical_start <stripe_name physical_start>]
358  *
359  */
360 
361 static void stripe_status(struct dm_target *ti, status_type_t type,
362 			  unsigned int status_flags, char *result, unsigned int maxlen)
363 {
364 	struct stripe_c *sc = ti->private;
365 	unsigned int sz = 0;
366 	unsigned int i;
367 
368 	switch (type) {
369 	case STATUSTYPE_INFO:
370 		DMEMIT("%d ", sc->stripes);
371 		for (i = 0; i < sc->stripes; i++)
372 			DMEMIT("%s ", sc->stripe[i].dev->name);
373 
374 		DMEMIT("1 ");
375 		for (i = 0; i < sc->stripes; i++)
376 			DMEMIT("%c", atomic_read(&(sc->stripe[i].error_count)) ?  'D' : 'A');
377 		break;
378 
379 	case STATUSTYPE_TABLE:
380 		DMEMIT("%d %llu", sc->stripes,
381 			(unsigned long long)sc->chunk_size);
382 		for (i = 0; i < sc->stripes; i++)
383 			DMEMIT(" %s %llu", sc->stripe[i].dev->name,
384 			    (unsigned long long)sc->stripe[i].physical_start);
385 		break;
386 
387 	case STATUSTYPE_IMA:
388 		DMEMIT_TARGET_NAME_VERSION(ti->type);
389 		DMEMIT(",stripes=%d,chunk_size=%llu", sc->stripes,
390 		       (unsigned long long)sc->chunk_size);
391 
392 		for (i = 0; i < sc->stripes; i++) {
393 			DMEMIT(",stripe_%d_device_name=%s", i, sc->stripe[i].dev->name);
394 			DMEMIT(",stripe_%d_physical_start=%llu", i,
395 			       (unsigned long long)sc->stripe[i].physical_start);
396 			DMEMIT(",stripe_%d_status=%c", i,
397 			       atomic_read(&(sc->stripe[i].error_count)) ? 'D' : 'A');
398 		}
399 		DMEMIT(";");
400 		break;
401 	}
402 }
403 
404 static int stripe_end_io(struct dm_target *ti, struct bio *bio,
405 		blk_status_t *error)
406 {
407 	unsigned int i;
408 	char major_minor[16];
409 	struct stripe_c *sc = ti->private;
410 
411 	if (!*error)
412 		return DM_ENDIO_DONE; /* I/O complete */
413 
414 	if (bio->bi_opf & REQ_RAHEAD)
415 		return DM_ENDIO_DONE;
416 
417 	if (*error == BLK_STS_NOTSUPP)
418 		return DM_ENDIO_DONE;
419 
420 	memset(major_minor, 0, sizeof(major_minor));
421 	sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
422 
423 	/*
424 	 * Test to see which stripe drive triggered the event
425 	 * and increment error count for all stripes on that device.
426 	 * If the error count for a given device exceeds the threshold
427 	 * value we will no longer trigger any further events.
428 	 */
429 	for (i = 0; i < sc->stripes; i++)
430 		if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
431 			atomic_inc(&(sc->stripe[i].error_count));
432 			if (atomic_read(&(sc->stripe[i].error_count)) <
433 			    DM_IO_ERROR_THRESHOLD)
434 				queue_work(dm_stripe_wq, &sc->trigger_event);
435 		}
436 
437 	return DM_ENDIO_DONE;
438 }
439 
440 static int stripe_iterate_devices(struct dm_target *ti,
441 				  iterate_devices_callout_fn fn, void *data)
442 {
443 	struct stripe_c *sc = ti->private;
444 	int ret = 0;
445 	unsigned int i = 0;
446 
447 	do {
448 		ret = fn(ti, sc->stripe[i].dev,
449 			 sc->stripe[i].physical_start,
450 			 sc->stripe_width, data);
451 	} while (!ret && ++i < sc->stripes);
452 
453 	return ret;
454 }
455 
456 static void stripe_io_hints(struct dm_target *ti,
457 			    struct queue_limits *limits)
458 {
459 	struct stripe_c *sc = ti->private;
460 	unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
461 
462 	limits->io_min = chunk_size;
463 	limits->io_opt = chunk_size * sc->stripes;
464 }
465 
466 static struct target_type stripe_target = {
467 	.name   = "striped",
468 	.version = {1, 6, 0},
469 	.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT,
470 	.module = THIS_MODULE,
471 	.ctr    = stripe_ctr,
472 	.dtr    = stripe_dtr,
473 	.map    = stripe_map,
474 	.end_io = stripe_end_io,
475 	.status = stripe_status,
476 	.iterate_devices = stripe_iterate_devices,
477 	.io_hints = stripe_io_hints,
478 	.direct_access = stripe_dax_direct_access,
479 	.dax_zero_page_range = stripe_dax_zero_page_range,
480 	.dax_recovery_write = stripe_dax_recovery_write,
481 };
482 
483 int __init dm_stripe_init(void)
484 {
485 	int r;
486 
487 	dm_stripe_wq = alloc_workqueue("dm_stripe_wq", 0, 0);
488 	if (!dm_stripe_wq)
489 		return -ENOMEM;
490 	r = dm_register_target(&stripe_target);
491 	if (r < 0) {
492 		destroy_workqueue(dm_stripe_wq);
493 		DMWARN("target registration failed");
494 	}
495 
496 	return r;
497 }
498 
499 void dm_stripe_exit(void)
500 {
501 	dm_unregister_target(&stripe_target);
502 	destroy_workqueue(dm_stripe_wq);
503 }
504