xref: /linux/drivers/md/raid0.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    raid0.c : Multiple Devices driver for Linux
4 	     Copyright (C) 1994-96 Marc ZYNGIER
5 	     <zyngier@ufr-info-p7.ibp.fr> or
6 	     <maz@gloups.fdn.fr>
7 	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
8 
9    RAID-0 management functions.
10 
11 */
12 
13 #include <linux/blkdev.h>
14 #include <linux/seq_file.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <trace/events/block.h>
18 #include "md.h"
19 #include "raid0.h"
20 #include "raid5.h"
21 
22 static int default_layout = 0;
23 module_param(default_layout, int, 0644);
24 
25 #define UNSUPPORTED_MDDEV_FLAGS		\
26 	((1L << MD_HAS_JOURNAL) |	\
27 	 (1L << MD_JOURNAL_CLEAN) |	\
28 	 (1L << MD_FAILFAST_SUPPORTED) |\
29 	 (1L << MD_HAS_PPL) |		\
30 	 (1L << MD_HAS_MULTIPLE_PPLS) |	\
31 	 (1L << MD_FAILLAST_DEV) |	\
32 	 (1L << MD_SERIALIZE_POLICY))
33 
34 /*
35  * inform the user of the raid configuration
36 */
dump_zones(struct mddev * mddev)37 static void dump_zones(struct mddev *mddev)
38 {
39 	int j, k;
40 	sector_t zone_size = 0;
41 	sector_t zone_start = 0;
42 	struct r0conf *conf = mddev->private;
43 	int raid_disks = conf->strip_zone[0].nb_dev;
44 	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
45 		 mdname(mddev),
46 		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
47 	for (j = 0; j < conf->nr_strip_zones; j++) {
48 		char line[200];
49 		int len = 0;
50 
51 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
52 			len += scnprintf(line+len, 200-len, "%s%pg", k?"/":"",
53 				conf->devlist[j * raid_disks + k]->bdev);
54 		pr_debug("md: zone%d=[%s]\n", j, line);
55 
56 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
57 		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
58 			(unsigned long long)zone_start>>1,
59 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
60 			(unsigned long long)zone_size>>1);
61 		zone_start = conf->strip_zone[j].zone_end;
62 	}
63 }
64 
create_strip_zones(struct mddev * mddev,struct r0conf ** private_conf)65 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
66 {
67 	int i, c, err;
68 	sector_t curr_zone_end, sectors;
69 	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
70 	struct strip_zone *zone;
71 	int cnt;
72 	struct r0conf *conf = kzalloc_obj(*conf, GFP_KERNEL);
73 	unsigned int blksize = 512;
74 
75 	if (!mddev_is_dm(mddev))
76 		blksize = queue_logical_block_size(mddev->gendisk->queue);
77 
78 	*private_conf = ERR_PTR(-ENOMEM);
79 	if (!conf)
80 		return -ENOMEM;
81 	rdev_for_each(rdev1, mddev) {
82 		pr_debug("md/raid0:%s: looking at %pg\n",
83 			 mdname(mddev),
84 			 rdev1->bdev);
85 		c = 0;
86 
87 		/* round size to chunk_size */
88 		sectors = rdev1->sectors;
89 		sector_div(sectors, mddev->chunk_sectors);
90 		rdev1->sectors = sectors * mddev->chunk_sectors;
91 
92 		if (mddev_is_dm(mddev))
93 			blksize = max(blksize, queue_logical_block_size(
94 				      rdev1->bdev->bd_disk->queue));
95 
96 		rdev_for_each(rdev2, mddev) {
97 			pr_debug("md/raid0:%s:   comparing %pg(%llu)"
98 				 " with %pg(%llu)\n",
99 				 mdname(mddev),
100 				 rdev1->bdev,
101 				 (unsigned long long)rdev1->sectors,
102 				 rdev2->bdev,
103 				 (unsigned long long)rdev2->sectors);
104 			if (rdev2 == rdev1) {
105 				pr_debug("md/raid0:%s:   END\n",
106 					 mdname(mddev));
107 				break;
108 			}
109 			if (rdev2->sectors == rdev1->sectors) {
110 				/*
111 				 * Not unique, don't count it as a new
112 				 * group
113 				 */
114 				pr_debug("md/raid0:%s:   EQUAL\n",
115 					 mdname(mddev));
116 				c = 1;
117 				break;
118 			}
119 			pr_debug("md/raid0:%s:   NOT EQUAL\n",
120 				 mdname(mddev));
121 		}
122 		if (!c) {
123 			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
124 				 mdname(mddev));
125 			conf->nr_strip_zones++;
126 			pr_debug("md/raid0:%s: %d zones\n",
127 				 mdname(mddev), conf->nr_strip_zones);
128 		}
129 	}
130 	pr_debug("md/raid0:%s: FINAL %d zones\n",
131 		 mdname(mddev), conf->nr_strip_zones);
132 
133 	/*
134 	 * now since we have the hard sector sizes, we can make sure
135 	 * chunk size is a multiple of that sector size
136 	 */
137 	if ((mddev->chunk_sectors << 9) % blksize) {
138 		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
139 			mdname(mddev),
140 			mddev->chunk_sectors << 9, blksize);
141 		err = -EINVAL;
142 		goto abort;
143 	}
144 
145 	err = -ENOMEM;
146 	conf->strip_zone = kzalloc_objs(struct strip_zone, conf->nr_strip_zones,
147 					GFP_KERNEL);
148 	if (!conf->strip_zone)
149 		goto abort;
150 	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
151 					    conf->nr_strip_zones,
152 					    mddev->raid_disks),
153 				GFP_KERNEL);
154 	if (!conf->devlist)
155 		goto abort;
156 
157 	/* The first zone must contain all devices, so here we check that
158 	 * there is a proper alignment of slots to devices and find them all
159 	 */
160 	zone = &conf->strip_zone[0];
161 	cnt = 0;
162 	smallest = NULL;
163 	dev = conf->devlist;
164 	err = -EINVAL;
165 	rdev_for_each(rdev1, mddev) {
166 		int j = rdev1->raid_disk;
167 
168 		if (mddev->level == 10) {
169 			/* taking over a raid10-n2 array */
170 			j /= 2;
171 			rdev1->new_raid_disk = j;
172 		}
173 
174 		if (mddev->level == 1) {
175 			/* taiking over a raid1 array-
176 			 * we have only one active disk
177 			 */
178 			j = 0;
179 			rdev1->new_raid_disk = j;
180 		}
181 
182 		if (j < 0) {
183 			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
184 				mdname(mddev));
185 			goto abort;
186 		}
187 		if (j >= mddev->raid_disks) {
188 			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
189 				mdname(mddev), j);
190 			goto abort;
191 		}
192 		if (dev[j]) {
193 			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
194 				mdname(mddev), j);
195 			goto abort;
196 		}
197 		dev[j] = rdev1;
198 
199 		if (!smallest || (rdev1->sectors < smallest->sectors))
200 			smallest = rdev1;
201 		cnt++;
202 	}
203 	if (cnt != mddev->raid_disks) {
204 		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
205 			mdname(mddev), cnt, mddev->raid_disks);
206 		goto abort;
207 	}
208 	zone->nb_dev = cnt;
209 	zone->zone_end = smallest->sectors * cnt;
210 
211 	curr_zone_end = zone->zone_end;
212 
213 	/* now do the other zones */
214 	for (i = 1; i < conf->nr_strip_zones; i++)
215 	{
216 		int j;
217 
218 		zone = conf->strip_zone + i;
219 		dev = conf->devlist + i * mddev->raid_disks;
220 
221 		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
222 		zone->dev_start = smallest->sectors;
223 		smallest = NULL;
224 		c = 0;
225 
226 		for (j=0; j<cnt; j++) {
227 			rdev = conf->devlist[j];
228 			if (rdev->sectors <= zone->dev_start) {
229 				pr_debug("md/raid0:%s: checking %pg ... nope\n",
230 					 mdname(mddev),
231 					 rdev->bdev);
232 				continue;
233 			}
234 			pr_debug("md/raid0:%s: checking %pg ..."
235 				 " contained as device %d\n",
236 				 mdname(mddev),
237 				 rdev->bdev, c);
238 			dev[c] = rdev;
239 			c++;
240 			if (!smallest || rdev->sectors < smallest->sectors) {
241 				smallest = rdev;
242 				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
243 					 mdname(mddev),
244 					 (unsigned long long)rdev->sectors);
245 			}
246 		}
247 
248 		zone->nb_dev = c;
249 		sectors = (smallest->sectors - zone->dev_start) * c;
250 		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
251 			 mdname(mddev),
252 			 zone->nb_dev, (unsigned long long)sectors);
253 
254 		curr_zone_end += sectors;
255 		zone->zone_end = curr_zone_end;
256 
257 		pr_debug("md/raid0:%s: current zone start: %llu\n",
258 			 mdname(mddev),
259 			 (unsigned long long)smallest->sectors);
260 	}
261 
262 	if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
263 		conf->layout = RAID0_ORIG_LAYOUT;
264 	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
265 		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
266 		conf->layout = mddev->layout;
267 	} else if (default_layout == RAID0_ORIG_LAYOUT ||
268 		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
269 		conf->layout = default_layout;
270 	} else {
271 		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
272 		       mdname(mddev));
273 		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
274 		err = -EOPNOTSUPP;
275 		goto abort;
276 	}
277 
278 	if (conf->layout == RAID0_ORIG_LAYOUT) {
279 		for (i = 1; i < conf->nr_strip_zones; i++) {
280 			sector_t first_sector = conf->strip_zone[i-1].zone_end;
281 
282 			sector_div(first_sector, mddev->chunk_sectors);
283 			zone = conf->strip_zone + i;
284 			/* disk_shift is first disk index used in the zone */
285 			zone->disk_shift = sector_div(first_sector,
286 						      zone->nb_dev);
287 		}
288 	}
289 
290 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
291 	*private_conf = conf;
292 
293 	return 0;
294 abort:
295 	kfree(conf->strip_zone);
296 	kfree(conf->devlist);
297 	kfree(conf);
298 	*private_conf = ERR_PTR(err);
299 	return err;
300 }
301 
302 /* Find the zone which holds a particular offset
303  * Update *sectorp to be an offset in that zone
304  */
find_zone(struct r0conf * conf,sector_t * sectorp)305 static struct strip_zone *find_zone(struct r0conf *conf,
306 				    sector_t *sectorp)
307 {
308 	int i;
309 	struct strip_zone *z = conf->strip_zone;
310 	sector_t sector = *sectorp;
311 
312 	for (i = 0; i < conf->nr_strip_zones; i++)
313 		if (sector < z[i].zone_end) {
314 			if (i)
315 				*sectorp = sector - z[i-1].zone_end;
316 			return z + i;
317 		}
318 	BUG();
319 }
320 
321 /*
322  * remaps the bio to the target device. we separate two flows.
323  * power 2 flow and a general flow for the sake of performance
324 */
map_sector(struct mddev * mddev,struct strip_zone * zone,sector_t sector,sector_t * sector_offset)325 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
326 				sector_t sector, sector_t *sector_offset)
327 {
328 	unsigned int sect_in_chunk;
329 	sector_t chunk;
330 	struct r0conf *conf = mddev->private;
331 	int raid_disks = conf->strip_zone[0].nb_dev;
332 	unsigned int chunk_sects = mddev->chunk_sectors;
333 
334 	if (is_power_of_2(chunk_sects)) {
335 		int chunksect_bits = ffz(~chunk_sects);
336 		/* find the sector offset inside the chunk */
337 		sect_in_chunk  = sector & (chunk_sects - 1);
338 		sector >>= chunksect_bits;
339 		/* chunk in zone */
340 		chunk = *sector_offset;
341 		/* quotient is the chunk in real device*/
342 		sector_div(chunk, zone->nb_dev << chunksect_bits);
343 	} else{
344 		sect_in_chunk = sector_div(sector, chunk_sects);
345 		chunk = *sector_offset;
346 		sector_div(chunk, chunk_sects * zone->nb_dev);
347 	}
348 	/*
349 	*  position the bio over the real device
350 	*  real sector = chunk in device + starting of zone
351 	*	+ the position in the chunk
352 	*/
353 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
354 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
355 			     + sector_div(sector, zone->nb_dev)];
356 }
357 
raid0_size(struct mddev * mddev,sector_t sectors,int raid_disks)358 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
359 {
360 	sector_t array_sectors = 0;
361 	struct md_rdev *rdev;
362 
363 	WARN_ONCE(sectors || raid_disks,
364 		  "%s does not support generic reshape\n", __func__);
365 
366 	rdev_for_each(rdev, mddev)
367 		array_sectors += (rdev->sectors &
368 				  ~(sector_t)(mddev->chunk_sectors-1));
369 
370 	return array_sectors;
371 }
372 
raid0_free(struct mddev * mddev,void * priv)373 static void raid0_free(struct mddev *mddev, void *priv)
374 {
375 	struct r0conf *conf = priv;
376 
377 	kfree(conf->strip_zone);
378 	kfree(conf->devlist);
379 	kfree(conf);
380 }
381 
raid0_set_limits(struct mddev * mddev)382 static int raid0_set_limits(struct mddev *mddev)
383 {
384 	struct queue_limits lim;
385 	int err;
386 
387 	md_init_stacking_limits(&lim);
388 	lim.max_hw_sectors = mddev->chunk_sectors;
389 	lim.max_write_zeroes_sectors = mddev->chunk_sectors;
390 	lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
391 	lim.logical_block_size = mddev->logical_block_size;
392 	lim.io_min = mddev->chunk_sectors << 9;
393 	lim.io_opt = lim.io_min * mddev->raid_disks;
394 	lim.chunk_sectors = mddev->chunk_sectors;
395 	lim.features |= BLK_FEAT_ATOMIC_WRITES;
396 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
397 	if (err)
398 		return err;
399 	return queue_limits_set(mddev->gendisk->queue, &lim);
400 }
401 
raid0_run(struct mddev * mddev)402 static int raid0_run(struct mddev *mddev)
403 {
404 	struct r0conf *conf;
405 	int ret;
406 
407 	if (mddev->chunk_sectors == 0) {
408 		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
409 		return -EINVAL;
410 	}
411 	if (md_check_no_bitmap(mddev))
412 		return -EINVAL;
413 
414 	if (!mddev_is_dm(mddev)) {
415 		ret = raid0_set_limits(mddev);
416 		if (ret)
417 			return ret;
418 	}
419 
420 	/* if private is not null, we are here after takeover */
421 	if (mddev->private == NULL) {
422 		ret = create_strip_zones(mddev, &conf);
423 		if (ret < 0)
424 			return ret;
425 		mddev->private = conf;
426 	}
427 	conf = mddev->private;
428 
429 	/* calculate array device size */
430 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
431 
432 	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
433 		 mdname(mddev),
434 		 (unsigned long long)mddev->array_sectors);
435 
436 	dump_zones(mddev);
437 
438 	return md_integrity_register(mddev);
439 }
440 
441 /*
442  * Convert disk_index to the disk order in which it is read/written.
443  *  For example, if we have 4 disks, they are numbered 0,1,2,3. If we
444  *  write the disks starting at disk 3, then the read/write order would
445  *  be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
446  *  to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
447  *  to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
448  *  that 'output' space to understand the read/write disk ordering.
449  */
map_disk_shift(int disk_index,int num_disks,int disk_shift)450 static int map_disk_shift(int disk_index, int num_disks, int disk_shift)
451 {
452 	return ((disk_index + num_disks - disk_shift) % num_disks);
453 }
454 
raid0_handle_discard(struct mddev * mddev,struct bio * bio)455 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
456 {
457 	struct r0conf *conf = mddev->private;
458 	struct strip_zone *zone;
459 	sector_t start = bio->bi_iter.bi_sector;
460 	sector_t end;
461 	unsigned int stripe_size;
462 	sector_t first_stripe_index, last_stripe_index;
463 	sector_t start_disk_offset;
464 	unsigned int start_disk_index;
465 	sector_t end_disk_offset;
466 	unsigned int end_disk_index;
467 	unsigned int disk;
468 	sector_t orig_start, orig_end;
469 
470 	orig_start = start;
471 	zone = find_zone(conf, &start);
472 
473 	if (bio_end_sector(bio) > zone->zone_end) {
474 		bio = bio_submit_split_bioset(bio,
475 				zone->zone_end - bio->bi_iter.bi_sector,
476 				&mddev->bio_set);
477 		if (!bio)
478 			return;
479 
480 		end = zone->zone_end;
481 	} else {
482 		end = bio_end_sector(bio);
483 	}
484 
485 	orig_end = end;
486 	if (zone != conf->strip_zone)
487 		end = end - zone[-1].zone_end;
488 
489 	/* Now start and end is the offset in zone */
490 	stripe_size = zone->nb_dev * mddev->chunk_sectors;
491 
492 	first_stripe_index = start;
493 	sector_div(first_stripe_index, stripe_size);
494 	last_stripe_index = end;
495 	sector_div(last_stripe_index, stripe_size);
496 
497 	/* In the first zone the original and alternate layouts are the same */
498 	if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
499 		sector_div(orig_start, mddev->chunk_sectors);
500 		start_disk_index = sector_div(orig_start, zone->nb_dev);
501 		start_disk_index = map_disk_shift(start_disk_index,
502 						  zone->nb_dev,
503 						  zone->disk_shift);
504 		sector_div(orig_end, mddev->chunk_sectors);
505 		end_disk_index = sector_div(orig_end, zone->nb_dev);
506 		end_disk_index = map_disk_shift(end_disk_index,
507 						zone->nb_dev, zone->disk_shift);
508 	} else {
509 		start_disk_index = (int)(start - first_stripe_index * stripe_size) /
510 			mddev->chunk_sectors;
511 		end_disk_index = (int)(end - last_stripe_index * stripe_size) /
512 			mddev->chunk_sectors;
513 	}
514 	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
515 		mddev->chunk_sectors) +
516 		first_stripe_index * mddev->chunk_sectors;
517 	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
518 		mddev->chunk_sectors) +
519 		last_stripe_index * mddev->chunk_sectors;
520 
521 	for (disk = 0; disk < zone->nb_dev; disk++) {
522 		sector_t dev_start, dev_end;
523 		struct md_rdev *rdev;
524 		int compare_disk;
525 
526 		compare_disk = map_disk_shift(disk, zone->nb_dev,
527 					      zone->disk_shift);
528 
529 		if (compare_disk < start_disk_index)
530 			dev_start = (first_stripe_index + 1) *
531 				mddev->chunk_sectors;
532 		else if (compare_disk > start_disk_index)
533 			dev_start = first_stripe_index * mddev->chunk_sectors;
534 		else
535 			dev_start = start_disk_offset;
536 
537 		if (compare_disk < end_disk_index)
538 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
539 		else if (compare_disk > end_disk_index)
540 			dev_end = last_stripe_index * mddev->chunk_sectors;
541 		else
542 			dev_end = end_disk_offset;
543 
544 		if (dev_end <= dev_start)
545 			continue;
546 
547 		rdev = conf->devlist[(zone - conf->strip_zone) *
548 			conf->strip_zone[0].nb_dev + disk];
549 		md_submit_discard_bio(mddev, rdev, bio,
550 			dev_start + zone->dev_start + rdev->data_offset,
551 			dev_end - dev_start);
552 	}
553 	bio_endio(bio);
554 }
555 
raid0_map_submit_bio(struct mddev * mddev,struct bio * bio)556 static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
557 {
558 	struct r0conf *conf = mddev->private;
559 	struct strip_zone *zone;
560 	struct md_rdev *tmp_dev;
561 	sector_t bio_sector = bio->bi_iter.bi_sector;
562 	sector_t sector = bio_sector;
563 
564 	md_account_bio(mddev, &bio);
565 
566 	zone = find_zone(mddev->private, &sector);
567 	switch (conf->layout) {
568 	case RAID0_ORIG_LAYOUT:
569 		tmp_dev = map_sector(mddev, zone, bio_sector, &sector);
570 		break;
571 	case RAID0_ALT_MULTIZONE_LAYOUT:
572 		tmp_dev = map_sector(mddev, zone, sector, &sector);
573 		break;
574 	default:
575 		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
576 		bio_io_error(bio);
577 		return;
578 	}
579 
580 	if (unlikely(is_rdev_broken(tmp_dev))) {
581 		bio_io_error(bio);
582 		md_error(mddev, tmp_dev);
583 		return;
584 	}
585 
586 	bio_set_dev(bio, tmp_dev->bdev);
587 	bio->bi_iter.bi_sector = sector + zone->dev_start +
588 		tmp_dev->data_offset;
589 	mddev_trace_remap(mddev, bio, bio_sector);
590 	mddev_check_write_zeroes(mddev, bio);
591 	submit_bio_noacct(bio);
592 }
593 
raid0_make_request(struct mddev * mddev,struct bio * bio)594 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
595 {
596 	sector_t sector;
597 	unsigned chunk_sects;
598 	unsigned sectors;
599 
600 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
601 	    && md_flush_request(mddev, bio))
602 		return true;
603 
604 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
605 		raid0_handle_discard(mddev, bio);
606 		return true;
607 	}
608 
609 	sector = bio->bi_iter.bi_sector;
610 	chunk_sects = mddev->chunk_sectors;
611 
612 	sectors = chunk_sects -
613 		(likely(is_power_of_2(chunk_sects))
614 		 ? (sector & (chunk_sects-1))
615 		 : sector_div(sector, chunk_sects));
616 
617 	if (sectors < bio_sectors(bio)) {
618 		bio = bio_submit_split_bioset(bio, sectors,
619 					      &mddev->bio_set);
620 		if (!bio)
621 			return true;
622 	}
623 
624 	raid0_map_submit_bio(mddev, bio);
625 	return true;
626 }
627 
raid0_status(struct seq_file * seq,struct mddev * mddev)628 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
629 {
630 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
631 	return;
632 }
633 
raid0_error(struct mddev * mddev,struct md_rdev * rdev)634 static void raid0_error(struct mddev *mddev, struct md_rdev *rdev)
635 {
636 	if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
637 		char *md_name = mdname(mddev);
638 
639 		pr_crit("md/raid0%s: Disk failure on %pg detected, failing array.\n",
640 			md_name, rdev->bdev);
641 	}
642 }
643 
raid0_takeover_raid45(struct mddev * mddev)644 static void *raid0_takeover_raid45(struct mddev *mddev)
645 {
646 	struct md_rdev *rdev;
647 	struct r0conf *priv_conf;
648 
649 	if (mddev->degraded != 1) {
650 		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
651 			mdname(mddev),
652 			mddev->degraded);
653 		return ERR_PTR(-EINVAL);
654 	}
655 
656 	rdev_for_each(rdev, mddev) {
657 		/* check slot number for a disk */
658 		if (rdev->raid_disk == mddev->raid_disks-1) {
659 			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
660 				mdname(mddev));
661 			return ERR_PTR(-EINVAL);
662 		}
663 		rdev->sectors = mddev->dev_sectors;
664 	}
665 
666 	/* Set new parameters */
667 	mddev->new_level = 0;
668 	mddev->new_layout = 0;
669 	mddev->new_chunk_sectors = mddev->chunk_sectors;
670 	mddev->raid_disks--;
671 	mddev->delta_disks = -1;
672 	/* make sure it will be not marked as dirty */
673 	mddev->resync_offset = MaxSector;
674 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
675 
676 	create_strip_zones(mddev, &priv_conf);
677 
678 	return priv_conf;
679 }
680 
raid0_takeover_raid10(struct mddev * mddev)681 static void *raid0_takeover_raid10(struct mddev *mddev)
682 {
683 	struct r0conf *priv_conf;
684 
685 	/* Check layout:
686 	 *  - far_copies must be 1
687 	 *  - near_copies must be 2
688 	 *  - disks number must be even
689 	 *  - all mirrors must be already degraded
690 	 */
691 	if (mddev->layout != ((1 << 8) + 2)) {
692 		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
693 			mdname(mddev),
694 			mddev->layout);
695 		return ERR_PTR(-EINVAL);
696 	}
697 	if (mddev->raid_disks & 1) {
698 		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
699 			mdname(mddev));
700 		return ERR_PTR(-EINVAL);
701 	}
702 	if (mddev->degraded != (mddev->raid_disks>>1)) {
703 		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
704 			mdname(mddev));
705 		return ERR_PTR(-EINVAL);
706 	}
707 
708 	/* Set new parameters */
709 	mddev->new_level = 0;
710 	mddev->new_layout = 0;
711 	mddev->new_chunk_sectors = mddev->chunk_sectors;
712 	mddev->delta_disks = - mddev->raid_disks / 2;
713 	mddev->raid_disks += mddev->delta_disks;
714 	mddev->degraded = 0;
715 	/* make sure it will be not marked as dirty */
716 	mddev->resync_offset = MaxSector;
717 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
718 
719 	create_strip_zones(mddev, &priv_conf);
720 	return priv_conf;
721 }
722 
raid0_takeover_raid1(struct mddev * mddev)723 static void *raid0_takeover_raid1(struct mddev *mddev)
724 {
725 	struct r0conf *priv_conf;
726 	int chunksect;
727 
728 	/* Check layout:
729 	 *  - (N - 1) mirror drives must be already faulty
730 	 */
731 	if ((mddev->raid_disks - 1) != mddev->degraded) {
732 		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
733 		       mdname(mddev));
734 		return ERR_PTR(-EINVAL);
735 	}
736 
737 	/*
738 	 * a raid1 doesn't have the notion of chunk size, so
739 	 * figure out the largest suitable size we can use.
740 	 */
741 	chunksect = 64 * 2; /* 64K by default */
742 
743 	/* The array must be an exact multiple of chunksize */
744 	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
745 		chunksect >>= 1;
746 
747 	if ((chunksect << 9) < PAGE_SIZE)
748 		/* array size does not allow a suitable chunk size */
749 		return ERR_PTR(-EINVAL);
750 
751 	/* Set new parameters */
752 	mddev->new_level = 0;
753 	mddev->new_layout = 0;
754 	mddev->new_chunk_sectors = chunksect;
755 	mddev->chunk_sectors = chunksect;
756 	mddev->delta_disks = 1 - mddev->raid_disks;
757 	mddev->raid_disks = 1;
758 	/* make sure it will be not marked as dirty */
759 	mddev->resync_offset = MaxSector;
760 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
761 
762 	create_strip_zones(mddev, &priv_conf);
763 	return priv_conf;
764 }
765 
raid0_takeover(struct mddev * mddev)766 static void *raid0_takeover(struct mddev *mddev)
767 {
768 	/* raid0 can take over:
769 	 *  raid4 - if all data disks are active.
770 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
771 	 *  raid10 - assuming we have all necessary active disks
772 	 *  raid1 - with (N -1) mirror drives faulty
773 	 */
774 
775 	if (mddev->bitmap) {
776 		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
777 			mdname(mddev));
778 		return ERR_PTR(-EBUSY);
779 	}
780 	if (mddev->level == 4)
781 		return raid0_takeover_raid45(mddev);
782 
783 	if (mddev->level == 5) {
784 		if (mddev->layout == ALGORITHM_PARITY_N)
785 			return raid0_takeover_raid45(mddev);
786 
787 		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
788 			mdname(mddev), ALGORITHM_PARITY_N);
789 	}
790 
791 	if (mddev->level == 10)
792 		return raid0_takeover_raid10(mddev);
793 
794 	if (mddev->level == 1)
795 		return raid0_takeover_raid1(mddev);
796 
797 	pr_warn("Takeover from raid%i to raid0 not supported\n",
798 		mddev->level);
799 
800 	return ERR_PTR(-EINVAL);
801 }
802 
raid0_quiesce(struct mddev * mddev,int quiesce)803 static void raid0_quiesce(struct mddev *mddev, int quiesce)
804 {
805 }
806 
807 static struct md_personality raid0_personality=
808 {
809 	.head = {
810 		.type	= MD_PERSONALITY,
811 		.id	= ID_RAID0,
812 		.name	= "raid0",
813 		.owner	= THIS_MODULE,
814 	},
815 
816 	.make_request	= raid0_make_request,
817 	.run		= raid0_run,
818 	.free		= raid0_free,
819 	.status		= raid0_status,
820 	.size		= raid0_size,
821 	.takeover	= raid0_takeover,
822 	.quiesce	= raid0_quiesce,
823 	.error_handler	= raid0_error,
824 };
825 
raid0_init(void)826 static int __init raid0_init(void)
827 {
828 	return register_md_submodule(&raid0_personality.head);
829 }
830 
raid0_exit(void)831 static void __exit raid0_exit(void)
832 {
833 	unregister_md_submodule(&raid0_personality.head);
834 }
835 
836 module_init(raid0_init);
837 module_exit(raid0_exit);
838 MODULE_LICENSE("GPL");
839 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
840 MODULE_ALIAS("md-personality-2"); /* RAID0 */
841 MODULE_ALIAS("md-raid0");
842 MODULE_ALIAS("md-level-0");
843