xref: /linux/drivers/md/raid0.c (revision 3253aba3408aa4eb2e4e09365eede3e63ef7536b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    raid0.c : Multiple Devices driver for Linux
4 	     Copyright (C) 1994-96 Marc ZYNGIER
5 	     <zyngier@ufr-info-p7.ibp.fr> or
6 	     <maz@gloups.fdn.fr>
7 	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
8 
9    RAID-0 management functions.
10 
11 */
12 
13 #include <linux/blkdev.h>
14 #include <linux/seq_file.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <trace/events/block.h>
18 #include "md.h"
19 #include "raid0.h"
20 #include "raid5.h"
21 
22 static int default_layout = 0;
23 module_param(default_layout, int, 0644);
24 
25 #define UNSUPPORTED_MDDEV_FLAGS		\
26 	((1L << MD_HAS_JOURNAL) |	\
27 	 (1L << MD_JOURNAL_CLEAN) |	\
28 	 (1L << MD_FAILFAST_SUPPORTED) |\
29 	 (1L << MD_HAS_PPL) |		\
30 	 (1L << MD_HAS_MULTIPLE_PPLS))
31 
32 /*
33  * inform the user of the raid configuration
34 */
35 static void dump_zones(struct mddev *mddev)
36 {
37 	int j, k;
38 	sector_t zone_size = 0;
39 	sector_t zone_start = 0;
40 	struct r0conf *conf = mddev->private;
41 	int raid_disks = conf->strip_zone[0].nb_dev;
42 	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
43 		 mdname(mddev),
44 		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
45 	for (j = 0; j < conf->nr_strip_zones; j++) {
46 		char line[200];
47 		int len = 0;
48 
49 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
50 			len += scnprintf(line+len, 200-len, "%s%pg", k?"/":"",
51 				conf->devlist[j * raid_disks + k]->bdev);
52 		pr_debug("md: zone%d=[%s]\n", j, line);
53 
54 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
55 		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
56 			(unsigned long long)zone_start>>1,
57 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
58 			(unsigned long long)zone_size>>1);
59 		zone_start = conf->strip_zone[j].zone_end;
60 	}
61 }
62 
63 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
64 {
65 	int i, c, err;
66 	sector_t curr_zone_end, sectors;
67 	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
68 	struct strip_zone *zone;
69 	int cnt;
70 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
71 	unsigned blksize = 512;
72 
73 	*private_conf = ERR_PTR(-ENOMEM);
74 	if (!conf)
75 		return -ENOMEM;
76 	rdev_for_each(rdev1, mddev) {
77 		pr_debug("md/raid0:%s: looking at %pg\n",
78 			 mdname(mddev),
79 			 rdev1->bdev);
80 		c = 0;
81 
82 		/* round size to chunk_size */
83 		sectors = rdev1->sectors;
84 		sector_div(sectors, mddev->chunk_sectors);
85 		rdev1->sectors = sectors * mddev->chunk_sectors;
86 
87 		blksize = max(blksize, queue_logical_block_size(
88 				      rdev1->bdev->bd_disk->queue));
89 
90 		rdev_for_each(rdev2, mddev) {
91 			pr_debug("md/raid0:%s:   comparing %pg(%llu)"
92 				 " with %pg(%llu)\n",
93 				 mdname(mddev),
94 				 rdev1->bdev,
95 				 (unsigned long long)rdev1->sectors,
96 				 rdev2->bdev,
97 				 (unsigned long long)rdev2->sectors);
98 			if (rdev2 == rdev1) {
99 				pr_debug("md/raid0:%s:   END\n",
100 					 mdname(mddev));
101 				break;
102 			}
103 			if (rdev2->sectors == rdev1->sectors) {
104 				/*
105 				 * Not unique, don't count it as a new
106 				 * group
107 				 */
108 				pr_debug("md/raid0:%s:   EQUAL\n",
109 					 mdname(mddev));
110 				c = 1;
111 				break;
112 			}
113 			pr_debug("md/raid0:%s:   NOT EQUAL\n",
114 				 mdname(mddev));
115 		}
116 		if (!c) {
117 			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
118 				 mdname(mddev));
119 			conf->nr_strip_zones++;
120 			pr_debug("md/raid0:%s: %d zones\n",
121 				 mdname(mddev), conf->nr_strip_zones);
122 		}
123 	}
124 	pr_debug("md/raid0:%s: FINAL %d zones\n",
125 		 mdname(mddev), conf->nr_strip_zones);
126 
127 	/*
128 	 * now since we have the hard sector sizes, we can make sure
129 	 * chunk size is a multiple of that sector size
130 	 */
131 	if ((mddev->chunk_sectors << 9) % blksize) {
132 		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
133 			mdname(mddev),
134 			mddev->chunk_sectors << 9, blksize);
135 		err = -EINVAL;
136 		goto abort;
137 	}
138 
139 	err = -ENOMEM;
140 	conf->strip_zone = kcalloc(conf->nr_strip_zones,
141 				   sizeof(struct strip_zone),
142 				   GFP_KERNEL);
143 	if (!conf->strip_zone)
144 		goto abort;
145 	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
146 					    conf->nr_strip_zones,
147 					    mddev->raid_disks),
148 				GFP_KERNEL);
149 	if (!conf->devlist)
150 		goto abort;
151 
152 	/* The first zone must contain all devices, so here we check that
153 	 * there is a proper alignment of slots to devices and find them all
154 	 */
155 	zone = &conf->strip_zone[0];
156 	cnt = 0;
157 	smallest = NULL;
158 	dev = conf->devlist;
159 	err = -EINVAL;
160 	rdev_for_each(rdev1, mddev) {
161 		int j = rdev1->raid_disk;
162 
163 		if (mddev->level == 10) {
164 			/* taking over a raid10-n2 array */
165 			j /= 2;
166 			rdev1->new_raid_disk = j;
167 		}
168 
169 		if (mddev->level == 1) {
170 			/* taiking over a raid1 array-
171 			 * we have only one active disk
172 			 */
173 			j = 0;
174 			rdev1->new_raid_disk = j;
175 		}
176 
177 		if (j < 0) {
178 			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
179 				mdname(mddev));
180 			goto abort;
181 		}
182 		if (j >= mddev->raid_disks) {
183 			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
184 				mdname(mddev), j);
185 			goto abort;
186 		}
187 		if (dev[j]) {
188 			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
189 				mdname(mddev), j);
190 			goto abort;
191 		}
192 		dev[j] = rdev1;
193 
194 		if (!smallest || (rdev1->sectors < smallest->sectors))
195 			smallest = rdev1;
196 		cnt++;
197 	}
198 	if (cnt != mddev->raid_disks) {
199 		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
200 			mdname(mddev), cnt, mddev->raid_disks);
201 		goto abort;
202 	}
203 	zone->nb_dev = cnt;
204 	zone->zone_end = smallest->sectors * cnt;
205 
206 	curr_zone_end = zone->zone_end;
207 
208 	/* now do the other zones */
209 	for (i = 1; i < conf->nr_strip_zones; i++)
210 	{
211 		int j;
212 
213 		zone = conf->strip_zone + i;
214 		dev = conf->devlist + i * mddev->raid_disks;
215 
216 		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
217 		zone->dev_start = smallest->sectors;
218 		smallest = NULL;
219 		c = 0;
220 
221 		for (j=0; j<cnt; j++) {
222 			rdev = conf->devlist[j];
223 			if (rdev->sectors <= zone->dev_start) {
224 				pr_debug("md/raid0:%s: checking %pg ... nope\n",
225 					 mdname(mddev),
226 					 rdev->bdev);
227 				continue;
228 			}
229 			pr_debug("md/raid0:%s: checking %pg ..."
230 				 " contained as device %d\n",
231 				 mdname(mddev),
232 				 rdev->bdev, c);
233 			dev[c] = rdev;
234 			c++;
235 			if (!smallest || rdev->sectors < smallest->sectors) {
236 				smallest = rdev;
237 				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
238 					 mdname(mddev),
239 					 (unsigned long long)rdev->sectors);
240 			}
241 		}
242 
243 		zone->nb_dev = c;
244 		sectors = (smallest->sectors - zone->dev_start) * c;
245 		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
246 			 mdname(mddev),
247 			 zone->nb_dev, (unsigned long long)sectors);
248 
249 		curr_zone_end += sectors;
250 		zone->zone_end = curr_zone_end;
251 
252 		pr_debug("md/raid0:%s: current zone start: %llu\n",
253 			 mdname(mddev),
254 			 (unsigned long long)smallest->sectors);
255 	}
256 
257 	if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
258 		conf->layout = RAID0_ORIG_LAYOUT;
259 	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
260 		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
261 		conf->layout = mddev->layout;
262 	} else if (default_layout == RAID0_ORIG_LAYOUT ||
263 		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
264 		conf->layout = default_layout;
265 	} else {
266 		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
267 		       mdname(mddev));
268 		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
269 		err = -EOPNOTSUPP;
270 		goto abort;
271 	}
272 
273 	if (conf->layout == RAID0_ORIG_LAYOUT) {
274 		for (i = 1; i < conf->nr_strip_zones; i++) {
275 			sector_t first_sector = conf->strip_zone[i-1].zone_end;
276 
277 			sector_div(first_sector, mddev->chunk_sectors);
278 			zone = conf->strip_zone + i;
279 			/* disk_shift is first disk index used in the zone */
280 			zone->disk_shift = sector_div(first_sector,
281 						      zone->nb_dev);
282 		}
283 	}
284 
285 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
286 	*private_conf = conf;
287 
288 	return 0;
289 abort:
290 	kfree(conf->strip_zone);
291 	kfree(conf->devlist);
292 	kfree(conf);
293 	*private_conf = ERR_PTR(err);
294 	return err;
295 }
296 
297 /* Find the zone which holds a particular offset
298  * Update *sectorp to be an offset in that zone
299  */
300 static struct strip_zone *find_zone(struct r0conf *conf,
301 				    sector_t *sectorp)
302 {
303 	int i;
304 	struct strip_zone *z = conf->strip_zone;
305 	sector_t sector = *sectorp;
306 
307 	for (i = 0; i < conf->nr_strip_zones; i++)
308 		if (sector < z[i].zone_end) {
309 			if (i)
310 				*sectorp = sector - z[i-1].zone_end;
311 			return z + i;
312 		}
313 	BUG();
314 }
315 
316 /*
317  * remaps the bio to the target device. we separate two flows.
318  * power 2 flow and a general flow for the sake of performance
319 */
320 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
321 				sector_t sector, sector_t *sector_offset)
322 {
323 	unsigned int sect_in_chunk;
324 	sector_t chunk;
325 	struct r0conf *conf = mddev->private;
326 	int raid_disks = conf->strip_zone[0].nb_dev;
327 	unsigned int chunk_sects = mddev->chunk_sectors;
328 
329 	if (is_power_of_2(chunk_sects)) {
330 		int chunksect_bits = ffz(~chunk_sects);
331 		/* find the sector offset inside the chunk */
332 		sect_in_chunk  = sector & (chunk_sects - 1);
333 		sector >>= chunksect_bits;
334 		/* chunk in zone */
335 		chunk = *sector_offset;
336 		/* quotient is the chunk in real device*/
337 		sector_div(chunk, zone->nb_dev << chunksect_bits);
338 	} else{
339 		sect_in_chunk = sector_div(sector, chunk_sects);
340 		chunk = *sector_offset;
341 		sector_div(chunk, chunk_sects * zone->nb_dev);
342 	}
343 	/*
344 	*  position the bio over the real device
345 	*  real sector = chunk in device + starting of zone
346 	*	+ the position in the chunk
347 	*/
348 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
349 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
350 			     + sector_div(sector, zone->nb_dev)];
351 }
352 
353 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
354 {
355 	sector_t array_sectors = 0;
356 	struct md_rdev *rdev;
357 
358 	WARN_ONCE(sectors || raid_disks,
359 		  "%s does not support generic reshape\n", __func__);
360 
361 	rdev_for_each(rdev, mddev)
362 		array_sectors += (rdev->sectors &
363 				  ~(sector_t)(mddev->chunk_sectors-1));
364 
365 	return array_sectors;
366 }
367 
368 static void raid0_free(struct mddev *mddev, void *priv)
369 {
370 	struct r0conf *conf = priv;
371 
372 	kfree(conf->strip_zone);
373 	kfree(conf->devlist);
374 	kfree(conf);
375 }
376 
377 static int raid0_set_limits(struct mddev *mddev)
378 {
379 	struct queue_limits lim;
380 
381 	blk_set_stacking_limits(&lim);
382 	lim.max_hw_sectors = mddev->chunk_sectors;
383 	lim.max_write_zeroes_sectors = mddev->chunk_sectors;
384 	lim.io_min = mddev->chunk_sectors << 9;
385 	lim.io_opt = lim.io_min * mddev->raid_disks;
386 	mddev_stack_rdev_limits(mddev, &lim);
387 	return queue_limits_set(mddev->gendisk->queue, &lim);
388 }
389 
390 static int raid0_run(struct mddev *mddev)
391 {
392 	struct r0conf *conf;
393 	int ret;
394 
395 	if (mddev->chunk_sectors == 0) {
396 		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
397 		return -EINVAL;
398 	}
399 	if (md_check_no_bitmap(mddev))
400 		return -EINVAL;
401 
402 	/* if private is not null, we are here after takeover */
403 	if (mddev->private == NULL) {
404 		ret = create_strip_zones(mddev, &conf);
405 		if (ret < 0)
406 			return ret;
407 		mddev->private = conf;
408 	}
409 	conf = mddev->private;
410 	if (!mddev_is_dm(mddev)) {
411 		ret = raid0_set_limits(mddev);
412 		if (ret)
413 			return ret;
414 	}
415 
416 	/* calculate array device size */
417 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
418 
419 	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
420 		 mdname(mddev),
421 		 (unsigned long long)mddev->array_sectors);
422 
423 	dump_zones(mddev);
424 
425 	return md_integrity_register(mddev);
426 }
427 
428 /*
429  * Convert disk_index to the disk order in which it is read/written.
430  *  For example, if we have 4 disks, they are numbered 0,1,2,3. If we
431  *  write the disks starting at disk 3, then the read/write order would
432  *  be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
433  *  to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
434  *  to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
435  *  that 'output' space to understand the read/write disk ordering.
436  */
437 static int map_disk_shift(int disk_index, int num_disks, int disk_shift)
438 {
439 	return ((disk_index + num_disks - disk_shift) % num_disks);
440 }
441 
442 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
443 {
444 	struct r0conf *conf = mddev->private;
445 	struct strip_zone *zone;
446 	sector_t start = bio->bi_iter.bi_sector;
447 	sector_t end;
448 	unsigned int stripe_size;
449 	sector_t first_stripe_index, last_stripe_index;
450 	sector_t start_disk_offset;
451 	unsigned int start_disk_index;
452 	sector_t end_disk_offset;
453 	unsigned int end_disk_index;
454 	unsigned int disk;
455 	sector_t orig_start, orig_end;
456 
457 	orig_start = start;
458 	zone = find_zone(conf, &start);
459 
460 	if (bio_end_sector(bio) > zone->zone_end) {
461 		struct bio *split = bio_split(bio,
462 			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
463 			&mddev->bio_set);
464 		bio_chain(split, bio);
465 		submit_bio_noacct(bio);
466 		bio = split;
467 		end = zone->zone_end;
468 	} else
469 		end = bio_end_sector(bio);
470 
471 	orig_end = end;
472 	if (zone != conf->strip_zone)
473 		end = end - zone[-1].zone_end;
474 
475 	/* Now start and end is the offset in zone */
476 	stripe_size = zone->nb_dev * mddev->chunk_sectors;
477 
478 	first_stripe_index = start;
479 	sector_div(first_stripe_index, stripe_size);
480 	last_stripe_index = end;
481 	sector_div(last_stripe_index, stripe_size);
482 
483 	/* In the first zone the original and alternate layouts are the same */
484 	if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
485 		sector_div(orig_start, mddev->chunk_sectors);
486 		start_disk_index = sector_div(orig_start, zone->nb_dev);
487 		start_disk_index = map_disk_shift(start_disk_index,
488 						  zone->nb_dev,
489 						  zone->disk_shift);
490 		sector_div(orig_end, mddev->chunk_sectors);
491 		end_disk_index = sector_div(orig_end, zone->nb_dev);
492 		end_disk_index = map_disk_shift(end_disk_index,
493 						zone->nb_dev, zone->disk_shift);
494 	} else {
495 		start_disk_index = (int)(start - first_stripe_index * stripe_size) /
496 			mddev->chunk_sectors;
497 		end_disk_index = (int)(end - last_stripe_index * stripe_size) /
498 			mddev->chunk_sectors;
499 	}
500 	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
501 		mddev->chunk_sectors) +
502 		first_stripe_index * mddev->chunk_sectors;
503 	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
504 		mddev->chunk_sectors) +
505 		last_stripe_index * mddev->chunk_sectors;
506 
507 	for (disk = 0; disk < zone->nb_dev; disk++) {
508 		sector_t dev_start, dev_end;
509 		struct md_rdev *rdev;
510 		int compare_disk;
511 
512 		compare_disk = map_disk_shift(disk, zone->nb_dev,
513 					      zone->disk_shift);
514 
515 		if (compare_disk < start_disk_index)
516 			dev_start = (first_stripe_index + 1) *
517 				mddev->chunk_sectors;
518 		else if (compare_disk > start_disk_index)
519 			dev_start = first_stripe_index * mddev->chunk_sectors;
520 		else
521 			dev_start = start_disk_offset;
522 
523 		if (compare_disk < end_disk_index)
524 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
525 		else if (compare_disk > end_disk_index)
526 			dev_end = last_stripe_index * mddev->chunk_sectors;
527 		else
528 			dev_end = end_disk_offset;
529 
530 		if (dev_end <= dev_start)
531 			continue;
532 
533 		rdev = conf->devlist[(zone - conf->strip_zone) *
534 			conf->strip_zone[0].nb_dev + disk];
535 		md_submit_discard_bio(mddev, rdev, bio,
536 			dev_start + zone->dev_start + rdev->data_offset,
537 			dev_end - dev_start);
538 	}
539 	bio_endio(bio);
540 }
541 
542 static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
543 {
544 	struct r0conf *conf = mddev->private;
545 	struct strip_zone *zone;
546 	struct md_rdev *tmp_dev;
547 	sector_t bio_sector = bio->bi_iter.bi_sector;
548 	sector_t sector = bio_sector;
549 
550 	md_account_bio(mddev, &bio);
551 
552 	zone = find_zone(mddev->private, &sector);
553 	switch (conf->layout) {
554 	case RAID0_ORIG_LAYOUT:
555 		tmp_dev = map_sector(mddev, zone, bio_sector, &sector);
556 		break;
557 	case RAID0_ALT_MULTIZONE_LAYOUT:
558 		tmp_dev = map_sector(mddev, zone, sector, &sector);
559 		break;
560 	default:
561 		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
562 		bio_io_error(bio);
563 		return;
564 	}
565 
566 	if (unlikely(is_rdev_broken(tmp_dev))) {
567 		bio_io_error(bio);
568 		md_error(mddev, tmp_dev);
569 		return;
570 	}
571 
572 	bio_set_dev(bio, tmp_dev->bdev);
573 	bio->bi_iter.bi_sector = sector + zone->dev_start +
574 		tmp_dev->data_offset;
575 	mddev_trace_remap(mddev, bio, bio_sector);
576 	mddev_check_write_zeroes(mddev, bio);
577 	submit_bio_noacct(bio);
578 }
579 
580 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
581 {
582 	sector_t sector;
583 	unsigned chunk_sects;
584 	unsigned sectors;
585 
586 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
587 	    && md_flush_request(mddev, bio))
588 		return true;
589 
590 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
591 		raid0_handle_discard(mddev, bio);
592 		return true;
593 	}
594 
595 	sector = bio->bi_iter.bi_sector;
596 	chunk_sects = mddev->chunk_sectors;
597 
598 	sectors = chunk_sects -
599 		(likely(is_power_of_2(chunk_sects))
600 		 ? (sector & (chunk_sects-1))
601 		 : sector_div(sector, chunk_sects));
602 
603 	if (sectors < bio_sectors(bio)) {
604 		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
605 					      &mddev->bio_set);
606 		bio_chain(split, bio);
607 		raid0_map_submit_bio(mddev, bio);
608 		bio = split;
609 	}
610 
611 	raid0_map_submit_bio(mddev, bio);
612 	return true;
613 }
614 
615 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
616 {
617 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
618 	return;
619 }
620 
621 static void raid0_error(struct mddev *mddev, struct md_rdev *rdev)
622 {
623 	if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
624 		char *md_name = mdname(mddev);
625 
626 		pr_crit("md/raid0%s: Disk failure on %pg detected, failing array.\n",
627 			md_name, rdev->bdev);
628 	}
629 }
630 
631 static void *raid0_takeover_raid45(struct mddev *mddev)
632 {
633 	struct md_rdev *rdev;
634 	struct r0conf *priv_conf;
635 
636 	if (mddev->degraded != 1) {
637 		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
638 			mdname(mddev),
639 			mddev->degraded);
640 		return ERR_PTR(-EINVAL);
641 	}
642 
643 	rdev_for_each(rdev, mddev) {
644 		/* check slot number for a disk */
645 		if (rdev->raid_disk == mddev->raid_disks-1) {
646 			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
647 				mdname(mddev));
648 			return ERR_PTR(-EINVAL);
649 		}
650 		rdev->sectors = mddev->dev_sectors;
651 	}
652 
653 	/* Set new parameters */
654 	mddev->new_level = 0;
655 	mddev->new_layout = 0;
656 	mddev->new_chunk_sectors = mddev->chunk_sectors;
657 	mddev->raid_disks--;
658 	mddev->delta_disks = -1;
659 	/* make sure it will be not marked as dirty */
660 	mddev->recovery_cp = MaxSector;
661 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
662 
663 	create_strip_zones(mddev, &priv_conf);
664 
665 	return priv_conf;
666 }
667 
668 static void *raid0_takeover_raid10(struct mddev *mddev)
669 {
670 	struct r0conf *priv_conf;
671 
672 	/* Check layout:
673 	 *  - far_copies must be 1
674 	 *  - near_copies must be 2
675 	 *  - disks number must be even
676 	 *  - all mirrors must be already degraded
677 	 */
678 	if (mddev->layout != ((1 << 8) + 2)) {
679 		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
680 			mdname(mddev),
681 			mddev->layout);
682 		return ERR_PTR(-EINVAL);
683 	}
684 	if (mddev->raid_disks & 1) {
685 		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
686 			mdname(mddev));
687 		return ERR_PTR(-EINVAL);
688 	}
689 	if (mddev->degraded != (mddev->raid_disks>>1)) {
690 		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
691 			mdname(mddev));
692 		return ERR_PTR(-EINVAL);
693 	}
694 
695 	/* Set new parameters */
696 	mddev->new_level = 0;
697 	mddev->new_layout = 0;
698 	mddev->new_chunk_sectors = mddev->chunk_sectors;
699 	mddev->delta_disks = - mddev->raid_disks / 2;
700 	mddev->raid_disks += mddev->delta_disks;
701 	mddev->degraded = 0;
702 	/* make sure it will be not marked as dirty */
703 	mddev->recovery_cp = MaxSector;
704 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
705 
706 	create_strip_zones(mddev, &priv_conf);
707 	return priv_conf;
708 }
709 
710 static void *raid0_takeover_raid1(struct mddev *mddev)
711 {
712 	struct r0conf *priv_conf;
713 	int chunksect;
714 
715 	/* Check layout:
716 	 *  - (N - 1) mirror drives must be already faulty
717 	 */
718 	if ((mddev->raid_disks - 1) != mddev->degraded) {
719 		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
720 		       mdname(mddev));
721 		return ERR_PTR(-EINVAL);
722 	}
723 
724 	/*
725 	 * a raid1 doesn't have the notion of chunk size, so
726 	 * figure out the largest suitable size we can use.
727 	 */
728 	chunksect = 64 * 2; /* 64K by default */
729 
730 	/* The array must be an exact multiple of chunksize */
731 	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
732 		chunksect >>= 1;
733 
734 	if ((chunksect << 9) < PAGE_SIZE)
735 		/* array size does not allow a suitable chunk size */
736 		return ERR_PTR(-EINVAL);
737 
738 	/* Set new parameters */
739 	mddev->new_level = 0;
740 	mddev->new_layout = 0;
741 	mddev->new_chunk_sectors = chunksect;
742 	mddev->chunk_sectors = chunksect;
743 	mddev->delta_disks = 1 - mddev->raid_disks;
744 	mddev->raid_disks = 1;
745 	/* make sure it will be not marked as dirty */
746 	mddev->recovery_cp = MaxSector;
747 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
748 
749 	create_strip_zones(mddev, &priv_conf);
750 	return priv_conf;
751 }
752 
753 static void *raid0_takeover(struct mddev *mddev)
754 {
755 	/* raid0 can take over:
756 	 *  raid4 - if all data disks are active.
757 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
758 	 *  raid10 - assuming we have all necessary active disks
759 	 *  raid1 - with (N -1) mirror drives faulty
760 	 */
761 
762 	if (mddev->bitmap) {
763 		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
764 			mdname(mddev));
765 		return ERR_PTR(-EBUSY);
766 	}
767 	if (mddev->level == 4)
768 		return raid0_takeover_raid45(mddev);
769 
770 	if (mddev->level == 5) {
771 		if (mddev->layout == ALGORITHM_PARITY_N)
772 			return raid0_takeover_raid45(mddev);
773 
774 		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
775 			mdname(mddev), ALGORITHM_PARITY_N);
776 	}
777 
778 	if (mddev->level == 10)
779 		return raid0_takeover_raid10(mddev);
780 
781 	if (mddev->level == 1)
782 		return raid0_takeover_raid1(mddev);
783 
784 	pr_warn("Takeover from raid%i to raid0 not supported\n",
785 		mddev->level);
786 
787 	return ERR_PTR(-EINVAL);
788 }
789 
790 static void raid0_quiesce(struct mddev *mddev, int quiesce)
791 {
792 }
793 
794 static struct md_personality raid0_personality=
795 {
796 	.name		= "raid0",
797 	.level		= 0,
798 	.owner		= THIS_MODULE,
799 	.make_request	= raid0_make_request,
800 	.run		= raid0_run,
801 	.free		= raid0_free,
802 	.status		= raid0_status,
803 	.size		= raid0_size,
804 	.takeover	= raid0_takeover,
805 	.quiesce	= raid0_quiesce,
806 	.error_handler	= raid0_error,
807 };
808 
809 static int __init raid0_init (void)
810 {
811 	return register_md_personality (&raid0_personality);
812 }
813 
814 static void raid0_exit (void)
815 {
816 	unregister_md_personality (&raid0_personality);
817 }
818 
819 module_init(raid0_init);
820 module_exit(raid0_exit);
821 MODULE_LICENSE("GPL");
822 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
823 MODULE_ALIAS("md-personality-2"); /* RAID0 */
824 MODULE_ALIAS("md-raid0");
825 MODULE_ALIAS("md-level-0");
826