xref: /linux/drivers/md/raid0.c (revision 962fad301c33dec69324dc2d9320fd84a119a24c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    raid0.c : Multiple Devices driver for Linux
4 	     Copyright (C) 1994-96 Marc ZYNGIER
5 	     <zyngier@ufr-info-p7.ibp.fr> or
6 	     <maz@gloups.fdn.fr>
7 	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
8 
9    RAID-0 management functions.
10 
11 */
12 
13 #include <linux/blkdev.h>
14 #include <linux/seq_file.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <trace/events/block.h>
18 #include "md.h"
19 #include "raid0.h"
20 #include "raid5.h"
21 
22 static int default_layout = 0;
23 module_param(default_layout, int, 0644);
24 
25 #define UNSUPPORTED_MDDEV_FLAGS		\
26 	((1L << MD_HAS_JOURNAL) |	\
27 	 (1L << MD_JOURNAL_CLEAN) |	\
28 	 (1L << MD_FAILFAST_SUPPORTED) |\
29 	 (1L << MD_HAS_PPL) |		\
30 	 (1L << MD_HAS_MULTIPLE_PPLS))
31 
32 /*
33  * inform the user of the raid configuration
34 */
35 static void dump_zones(struct mddev *mddev)
36 {
37 	int j, k;
38 	sector_t zone_size = 0;
39 	sector_t zone_start = 0;
40 	char b[BDEVNAME_SIZE];
41 	struct r0conf *conf = mddev->private;
42 	int raid_disks = conf->strip_zone[0].nb_dev;
43 	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
44 		 mdname(mddev),
45 		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
46 	for (j = 0; j < conf->nr_strip_zones; j++) {
47 		char line[200];
48 		int len = 0;
49 
50 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
51 			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
52 					bdevname(conf->devlist[j*raid_disks
53 							       + k]->bdev, b));
54 		pr_debug("md: zone%d=[%s]\n", j, line);
55 
56 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
57 		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
58 			(unsigned long long)zone_start>>1,
59 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
60 			(unsigned long long)zone_size>>1);
61 		zone_start = conf->strip_zone[j].zone_end;
62 	}
63 }
64 
65 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
66 {
67 	int i, c, err;
68 	sector_t curr_zone_end, sectors;
69 	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
70 	struct strip_zone *zone;
71 	int cnt;
72 	char b[BDEVNAME_SIZE];
73 	char b2[BDEVNAME_SIZE];
74 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
75 	unsigned blksize = 512;
76 
77 	*private_conf = ERR_PTR(-ENOMEM);
78 	if (!conf)
79 		return -ENOMEM;
80 	rdev_for_each(rdev1, mddev) {
81 		pr_debug("md/raid0:%s: looking at %s\n",
82 			 mdname(mddev),
83 			 bdevname(rdev1->bdev, b));
84 		c = 0;
85 
86 		/* round size to chunk_size */
87 		sectors = rdev1->sectors;
88 		sector_div(sectors, mddev->chunk_sectors);
89 		rdev1->sectors = sectors * mddev->chunk_sectors;
90 
91 		blksize = max(blksize, queue_logical_block_size(
92 				      rdev1->bdev->bd_disk->queue));
93 
94 		rdev_for_each(rdev2, mddev) {
95 			pr_debug("md/raid0:%s:   comparing %s(%llu)"
96 				 " with %s(%llu)\n",
97 				 mdname(mddev),
98 				 bdevname(rdev1->bdev,b),
99 				 (unsigned long long)rdev1->sectors,
100 				 bdevname(rdev2->bdev,b2),
101 				 (unsigned long long)rdev2->sectors);
102 			if (rdev2 == rdev1) {
103 				pr_debug("md/raid0:%s:   END\n",
104 					 mdname(mddev));
105 				break;
106 			}
107 			if (rdev2->sectors == rdev1->sectors) {
108 				/*
109 				 * Not unique, don't count it as a new
110 				 * group
111 				 */
112 				pr_debug("md/raid0:%s:   EQUAL\n",
113 					 mdname(mddev));
114 				c = 1;
115 				break;
116 			}
117 			pr_debug("md/raid0:%s:   NOT EQUAL\n",
118 				 mdname(mddev));
119 		}
120 		if (!c) {
121 			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
122 				 mdname(mddev));
123 			conf->nr_strip_zones++;
124 			pr_debug("md/raid0:%s: %d zones\n",
125 				 mdname(mddev), conf->nr_strip_zones);
126 		}
127 	}
128 	pr_debug("md/raid0:%s: FINAL %d zones\n",
129 		 mdname(mddev), conf->nr_strip_zones);
130 
131 	if (conf->nr_strip_zones == 1) {
132 		conf->layout = RAID0_ORIG_LAYOUT;
133 	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
134 		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
135 		conf->layout = mddev->layout;
136 	} else if (default_layout == RAID0_ORIG_LAYOUT ||
137 		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
138 		conf->layout = default_layout;
139 	} else {
140 		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
141 		       mdname(mddev));
142 		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
143 		err = -ENOTSUPP;
144 		goto abort;
145 	}
146 	/*
147 	 * now since we have the hard sector sizes, we can make sure
148 	 * chunk size is a multiple of that sector size
149 	 */
150 	if ((mddev->chunk_sectors << 9) % blksize) {
151 		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
152 			mdname(mddev),
153 			mddev->chunk_sectors << 9, blksize);
154 		err = -EINVAL;
155 		goto abort;
156 	}
157 
158 	err = -ENOMEM;
159 	conf->strip_zone = kcalloc(conf->nr_strip_zones,
160 				   sizeof(struct strip_zone),
161 				   GFP_KERNEL);
162 	if (!conf->strip_zone)
163 		goto abort;
164 	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
165 					    conf->nr_strip_zones,
166 					    mddev->raid_disks),
167 				GFP_KERNEL);
168 	if (!conf->devlist)
169 		goto abort;
170 
171 	/* The first zone must contain all devices, so here we check that
172 	 * there is a proper alignment of slots to devices and find them all
173 	 */
174 	zone = &conf->strip_zone[0];
175 	cnt = 0;
176 	smallest = NULL;
177 	dev = conf->devlist;
178 	err = -EINVAL;
179 	rdev_for_each(rdev1, mddev) {
180 		int j = rdev1->raid_disk;
181 
182 		if (mddev->level == 10) {
183 			/* taking over a raid10-n2 array */
184 			j /= 2;
185 			rdev1->new_raid_disk = j;
186 		}
187 
188 		if (mddev->level == 1) {
189 			/* taiking over a raid1 array-
190 			 * we have only one active disk
191 			 */
192 			j = 0;
193 			rdev1->new_raid_disk = j;
194 		}
195 
196 		if (j < 0) {
197 			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
198 				mdname(mddev));
199 			goto abort;
200 		}
201 		if (j >= mddev->raid_disks) {
202 			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
203 				mdname(mddev), j);
204 			goto abort;
205 		}
206 		if (dev[j]) {
207 			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
208 				mdname(mddev), j);
209 			goto abort;
210 		}
211 		dev[j] = rdev1;
212 
213 		if (!smallest || (rdev1->sectors < smallest->sectors))
214 			smallest = rdev1;
215 		cnt++;
216 	}
217 	if (cnt != mddev->raid_disks) {
218 		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
219 			mdname(mddev), cnt, mddev->raid_disks);
220 		goto abort;
221 	}
222 	zone->nb_dev = cnt;
223 	zone->zone_end = smallest->sectors * cnt;
224 
225 	curr_zone_end = zone->zone_end;
226 
227 	/* now do the other zones */
228 	for (i = 1; i < conf->nr_strip_zones; i++)
229 	{
230 		int j;
231 
232 		zone = conf->strip_zone + i;
233 		dev = conf->devlist + i * mddev->raid_disks;
234 
235 		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
236 		zone->dev_start = smallest->sectors;
237 		smallest = NULL;
238 		c = 0;
239 
240 		for (j=0; j<cnt; j++) {
241 			rdev = conf->devlist[j];
242 			if (rdev->sectors <= zone->dev_start) {
243 				pr_debug("md/raid0:%s: checking %s ... nope\n",
244 					 mdname(mddev),
245 					 bdevname(rdev->bdev, b));
246 				continue;
247 			}
248 			pr_debug("md/raid0:%s: checking %s ..."
249 				 " contained as device %d\n",
250 				 mdname(mddev),
251 				 bdevname(rdev->bdev, b), c);
252 			dev[c] = rdev;
253 			c++;
254 			if (!smallest || rdev->sectors < smallest->sectors) {
255 				smallest = rdev;
256 				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
257 					 mdname(mddev),
258 					 (unsigned long long)rdev->sectors);
259 			}
260 		}
261 
262 		zone->nb_dev = c;
263 		sectors = (smallest->sectors - zone->dev_start) * c;
264 		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
265 			 mdname(mddev),
266 			 zone->nb_dev, (unsigned long long)sectors);
267 
268 		curr_zone_end += sectors;
269 		zone->zone_end = curr_zone_end;
270 
271 		pr_debug("md/raid0:%s: current zone start: %llu\n",
272 			 mdname(mddev),
273 			 (unsigned long long)smallest->sectors);
274 	}
275 
276 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
277 	*private_conf = conf;
278 
279 	return 0;
280 abort:
281 	kfree(conf->strip_zone);
282 	kfree(conf->devlist);
283 	kfree(conf);
284 	*private_conf = ERR_PTR(err);
285 	return err;
286 }
287 
288 /* Find the zone which holds a particular offset
289  * Update *sectorp to be an offset in that zone
290  */
291 static struct strip_zone *find_zone(struct r0conf *conf,
292 				    sector_t *sectorp)
293 {
294 	int i;
295 	struct strip_zone *z = conf->strip_zone;
296 	sector_t sector = *sectorp;
297 
298 	for (i = 0; i < conf->nr_strip_zones; i++)
299 		if (sector < z[i].zone_end) {
300 			if (i)
301 				*sectorp = sector - z[i-1].zone_end;
302 			return z + i;
303 		}
304 	BUG();
305 }
306 
307 /*
308  * remaps the bio to the target device. we separate two flows.
309  * power 2 flow and a general flow for the sake of performance
310 */
311 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
312 				sector_t sector, sector_t *sector_offset)
313 {
314 	unsigned int sect_in_chunk;
315 	sector_t chunk;
316 	struct r0conf *conf = mddev->private;
317 	int raid_disks = conf->strip_zone[0].nb_dev;
318 	unsigned int chunk_sects = mddev->chunk_sectors;
319 
320 	if (is_power_of_2(chunk_sects)) {
321 		int chunksect_bits = ffz(~chunk_sects);
322 		/* find the sector offset inside the chunk */
323 		sect_in_chunk  = sector & (chunk_sects - 1);
324 		sector >>= chunksect_bits;
325 		/* chunk in zone */
326 		chunk = *sector_offset;
327 		/* quotient is the chunk in real device*/
328 		sector_div(chunk, zone->nb_dev << chunksect_bits);
329 	} else{
330 		sect_in_chunk = sector_div(sector, chunk_sects);
331 		chunk = *sector_offset;
332 		sector_div(chunk, chunk_sects * zone->nb_dev);
333 	}
334 	/*
335 	*  position the bio over the real device
336 	*  real sector = chunk in device + starting of zone
337 	*	+ the position in the chunk
338 	*/
339 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
340 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
341 			     + sector_div(sector, zone->nb_dev)];
342 }
343 
344 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
345 {
346 	sector_t array_sectors = 0;
347 	struct md_rdev *rdev;
348 
349 	WARN_ONCE(sectors || raid_disks,
350 		  "%s does not support generic reshape\n", __func__);
351 
352 	rdev_for_each(rdev, mddev)
353 		array_sectors += (rdev->sectors &
354 				  ~(sector_t)(mddev->chunk_sectors-1));
355 
356 	return array_sectors;
357 }
358 
359 static void raid0_free(struct mddev *mddev, void *priv);
360 
361 static int raid0_run(struct mddev *mddev)
362 {
363 	struct r0conf *conf;
364 	int ret;
365 
366 	if (mddev->chunk_sectors == 0) {
367 		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
368 		return -EINVAL;
369 	}
370 	if (md_check_no_bitmap(mddev))
371 		return -EINVAL;
372 
373 	/* if private is not null, we are here after takeover */
374 	if (mddev->private == NULL) {
375 		ret = create_strip_zones(mddev, &conf);
376 		if (ret < 0)
377 			return ret;
378 		mddev->private = conf;
379 	}
380 	conf = mddev->private;
381 	if (mddev->queue) {
382 		struct md_rdev *rdev;
383 		bool discard_supported = false;
384 
385 		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
386 		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
387 		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
388 		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
389 
390 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
391 		blk_queue_io_opt(mddev->queue,
392 				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
393 
394 		rdev_for_each(rdev, mddev) {
395 			disk_stack_limits(mddev->gendisk, rdev->bdev,
396 					  rdev->data_offset << 9);
397 			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
398 				discard_supported = true;
399 		}
400 		if (!discard_supported)
401 			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
402 		else
403 			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
404 	}
405 
406 	/* calculate array device size */
407 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
408 
409 	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
410 		 mdname(mddev),
411 		 (unsigned long long)mddev->array_sectors);
412 
413 	if (mddev->queue) {
414 		/* calculate the max read-ahead size.
415 		 * For read-ahead of large files to be effective, we need to
416 		 * readahead at least twice a whole stripe. i.e. number of devices
417 		 * multiplied by chunk size times 2.
418 		 * If an individual device has an ra_pages greater than the
419 		 * chunk size, then we will not drive that device as hard as it
420 		 * wants.  We consider this a configuration error: a larger
421 		 * chunksize should be used in that case.
422 		 */
423 		int stripe = mddev->raid_disks *
424 			(mddev->chunk_sectors << 9) / PAGE_SIZE;
425 		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
426 			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
427 	}
428 
429 	dump_zones(mddev);
430 
431 	ret = md_integrity_register(mddev);
432 
433 	return ret;
434 }
435 
436 static void raid0_free(struct mddev *mddev, void *priv)
437 {
438 	struct r0conf *conf = priv;
439 
440 	kfree(conf->strip_zone);
441 	kfree(conf->devlist);
442 	kfree(conf);
443 }
444 
445 /*
446  * Is io distribute over 1 or more chunks ?
447 */
448 static inline int is_io_in_chunk_boundary(struct mddev *mddev,
449 			unsigned int chunk_sects, struct bio *bio)
450 {
451 	if (likely(is_power_of_2(chunk_sects))) {
452 		return chunk_sects >=
453 			((bio->bi_iter.bi_sector & (chunk_sects-1))
454 					+ bio_sectors(bio));
455 	} else{
456 		sector_t sector = bio->bi_iter.bi_sector;
457 		return chunk_sects >= (sector_div(sector, chunk_sects)
458 						+ bio_sectors(bio));
459 	}
460 }
461 
462 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
463 {
464 	struct r0conf *conf = mddev->private;
465 	struct strip_zone *zone;
466 	sector_t start = bio->bi_iter.bi_sector;
467 	sector_t end;
468 	unsigned int stripe_size;
469 	sector_t first_stripe_index, last_stripe_index;
470 	sector_t start_disk_offset;
471 	unsigned int start_disk_index;
472 	sector_t end_disk_offset;
473 	unsigned int end_disk_index;
474 	unsigned int disk;
475 
476 	zone = find_zone(conf, &start);
477 
478 	if (bio_end_sector(bio) > zone->zone_end) {
479 		struct bio *split = bio_split(bio,
480 			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
481 			&mddev->bio_set);
482 		bio_chain(split, bio);
483 		submit_bio_noacct(bio);
484 		bio = split;
485 		end = zone->zone_end;
486 	} else
487 		end = bio_end_sector(bio);
488 
489 	if (zone != conf->strip_zone)
490 		end = end - zone[-1].zone_end;
491 
492 	/* Now start and end is the offset in zone */
493 	stripe_size = zone->nb_dev * mddev->chunk_sectors;
494 
495 	first_stripe_index = start;
496 	sector_div(first_stripe_index, stripe_size);
497 	last_stripe_index = end;
498 	sector_div(last_stripe_index, stripe_size);
499 
500 	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
501 		mddev->chunk_sectors;
502 	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
503 		mddev->chunk_sectors) +
504 		first_stripe_index * mddev->chunk_sectors;
505 	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
506 		mddev->chunk_sectors;
507 	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
508 		mddev->chunk_sectors) +
509 		last_stripe_index * mddev->chunk_sectors;
510 
511 	for (disk = 0; disk < zone->nb_dev; disk++) {
512 		sector_t dev_start, dev_end;
513 		struct bio *discard_bio = NULL;
514 		struct md_rdev *rdev;
515 
516 		if (disk < start_disk_index)
517 			dev_start = (first_stripe_index + 1) *
518 				mddev->chunk_sectors;
519 		else if (disk > start_disk_index)
520 			dev_start = first_stripe_index * mddev->chunk_sectors;
521 		else
522 			dev_start = start_disk_offset;
523 
524 		if (disk < end_disk_index)
525 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
526 		else if (disk > end_disk_index)
527 			dev_end = last_stripe_index * mddev->chunk_sectors;
528 		else
529 			dev_end = end_disk_offset;
530 
531 		if (dev_end <= dev_start)
532 			continue;
533 
534 		rdev = conf->devlist[(zone - conf->strip_zone) *
535 			conf->strip_zone[0].nb_dev + disk];
536 		if (__blkdev_issue_discard(rdev->bdev,
537 			dev_start + zone->dev_start + rdev->data_offset,
538 			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
539 		    !discard_bio)
540 			continue;
541 		bio_chain(discard_bio, bio);
542 		bio_clone_blkg_association(discard_bio, bio);
543 		if (mddev->gendisk)
544 			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
545 				discard_bio, disk_devt(mddev->gendisk),
546 				bio->bi_iter.bi_sector);
547 		submit_bio_noacct(discard_bio);
548 	}
549 	bio_endio(bio);
550 }
551 
552 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
553 {
554 	struct r0conf *conf = mddev->private;
555 	struct strip_zone *zone;
556 	struct md_rdev *tmp_dev;
557 	sector_t bio_sector;
558 	sector_t sector;
559 	sector_t orig_sector;
560 	unsigned chunk_sects;
561 	unsigned sectors;
562 
563 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
564 	    && md_flush_request(mddev, bio))
565 		return true;
566 
567 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
568 		raid0_handle_discard(mddev, bio);
569 		return true;
570 	}
571 
572 	bio_sector = bio->bi_iter.bi_sector;
573 	sector = bio_sector;
574 	chunk_sects = mddev->chunk_sectors;
575 
576 	sectors = chunk_sects -
577 		(likely(is_power_of_2(chunk_sects))
578 		 ? (sector & (chunk_sects-1))
579 		 : sector_div(sector, chunk_sects));
580 
581 	/* Restore due to sector_div */
582 	sector = bio_sector;
583 
584 	if (sectors < bio_sectors(bio)) {
585 		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
586 					      &mddev->bio_set);
587 		bio_chain(split, bio);
588 		submit_bio_noacct(bio);
589 		bio = split;
590 	}
591 
592 	orig_sector = sector;
593 	zone = find_zone(mddev->private, &sector);
594 	switch (conf->layout) {
595 	case RAID0_ORIG_LAYOUT:
596 		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
597 		break;
598 	case RAID0_ALT_MULTIZONE_LAYOUT:
599 		tmp_dev = map_sector(mddev, zone, sector, &sector);
600 		break;
601 	default:
602 		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
603 		bio_io_error(bio);
604 		return true;
605 	}
606 
607 	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
608 		bio_io_error(bio);
609 		return true;
610 	}
611 
612 	bio_set_dev(bio, tmp_dev->bdev);
613 	bio->bi_iter.bi_sector = sector + zone->dev_start +
614 		tmp_dev->data_offset;
615 
616 	if (mddev->gendisk)
617 		trace_block_bio_remap(bio->bi_disk->queue, bio,
618 				disk_devt(mddev->gendisk), bio_sector);
619 	mddev_check_writesame(mddev, bio);
620 	mddev_check_write_zeroes(mddev, bio);
621 	submit_bio_noacct(bio);
622 	return true;
623 }
624 
625 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
626 {
627 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
628 	return;
629 }
630 
631 static void *raid0_takeover_raid45(struct mddev *mddev)
632 {
633 	struct md_rdev *rdev;
634 	struct r0conf *priv_conf;
635 
636 	if (mddev->degraded != 1) {
637 		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
638 			mdname(mddev),
639 			mddev->degraded);
640 		return ERR_PTR(-EINVAL);
641 	}
642 
643 	rdev_for_each(rdev, mddev) {
644 		/* check slot number for a disk */
645 		if (rdev->raid_disk == mddev->raid_disks-1) {
646 			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
647 				mdname(mddev));
648 			return ERR_PTR(-EINVAL);
649 		}
650 		rdev->sectors = mddev->dev_sectors;
651 	}
652 
653 	/* Set new parameters */
654 	mddev->new_level = 0;
655 	mddev->new_layout = 0;
656 	mddev->new_chunk_sectors = mddev->chunk_sectors;
657 	mddev->raid_disks--;
658 	mddev->delta_disks = -1;
659 	/* make sure it will be not marked as dirty */
660 	mddev->recovery_cp = MaxSector;
661 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
662 
663 	create_strip_zones(mddev, &priv_conf);
664 
665 	return priv_conf;
666 }
667 
668 static void *raid0_takeover_raid10(struct mddev *mddev)
669 {
670 	struct r0conf *priv_conf;
671 
672 	/* Check layout:
673 	 *  - far_copies must be 1
674 	 *  - near_copies must be 2
675 	 *  - disks number must be even
676 	 *  - all mirrors must be already degraded
677 	 */
678 	if (mddev->layout != ((1 << 8) + 2)) {
679 		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
680 			mdname(mddev),
681 			mddev->layout);
682 		return ERR_PTR(-EINVAL);
683 	}
684 	if (mddev->raid_disks & 1) {
685 		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
686 			mdname(mddev));
687 		return ERR_PTR(-EINVAL);
688 	}
689 	if (mddev->degraded != (mddev->raid_disks>>1)) {
690 		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
691 			mdname(mddev));
692 		return ERR_PTR(-EINVAL);
693 	}
694 
695 	/* Set new parameters */
696 	mddev->new_level = 0;
697 	mddev->new_layout = 0;
698 	mddev->new_chunk_sectors = mddev->chunk_sectors;
699 	mddev->delta_disks = - mddev->raid_disks / 2;
700 	mddev->raid_disks += mddev->delta_disks;
701 	mddev->degraded = 0;
702 	/* make sure it will be not marked as dirty */
703 	mddev->recovery_cp = MaxSector;
704 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
705 
706 	create_strip_zones(mddev, &priv_conf);
707 	return priv_conf;
708 }
709 
710 static void *raid0_takeover_raid1(struct mddev *mddev)
711 {
712 	struct r0conf *priv_conf;
713 	int chunksect;
714 
715 	/* Check layout:
716 	 *  - (N - 1) mirror drives must be already faulty
717 	 */
718 	if ((mddev->raid_disks - 1) != mddev->degraded) {
719 		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
720 		       mdname(mddev));
721 		return ERR_PTR(-EINVAL);
722 	}
723 
724 	/*
725 	 * a raid1 doesn't have the notion of chunk size, so
726 	 * figure out the largest suitable size we can use.
727 	 */
728 	chunksect = 64 * 2; /* 64K by default */
729 
730 	/* The array must be an exact multiple of chunksize */
731 	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
732 		chunksect >>= 1;
733 
734 	if ((chunksect << 9) < PAGE_SIZE)
735 		/* array size does not allow a suitable chunk size */
736 		return ERR_PTR(-EINVAL);
737 
738 	/* Set new parameters */
739 	mddev->new_level = 0;
740 	mddev->new_layout = 0;
741 	mddev->new_chunk_sectors = chunksect;
742 	mddev->chunk_sectors = chunksect;
743 	mddev->delta_disks = 1 - mddev->raid_disks;
744 	mddev->raid_disks = 1;
745 	/* make sure it will be not marked as dirty */
746 	mddev->recovery_cp = MaxSector;
747 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
748 
749 	create_strip_zones(mddev, &priv_conf);
750 	return priv_conf;
751 }
752 
753 static void *raid0_takeover(struct mddev *mddev)
754 {
755 	/* raid0 can take over:
756 	 *  raid4 - if all data disks are active.
757 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
758 	 *  raid10 - assuming we have all necessary active disks
759 	 *  raid1 - with (N -1) mirror drives faulty
760 	 */
761 
762 	if (mddev->bitmap) {
763 		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
764 			mdname(mddev));
765 		return ERR_PTR(-EBUSY);
766 	}
767 	if (mddev->level == 4)
768 		return raid0_takeover_raid45(mddev);
769 
770 	if (mddev->level == 5) {
771 		if (mddev->layout == ALGORITHM_PARITY_N)
772 			return raid0_takeover_raid45(mddev);
773 
774 		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
775 			mdname(mddev), ALGORITHM_PARITY_N);
776 	}
777 
778 	if (mddev->level == 10)
779 		return raid0_takeover_raid10(mddev);
780 
781 	if (mddev->level == 1)
782 		return raid0_takeover_raid1(mddev);
783 
784 	pr_warn("Takeover from raid%i to raid0 not supported\n",
785 		mddev->level);
786 
787 	return ERR_PTR(-EINVAL);
788 }
789 
790 static void raid0_quiesce(struct mddev *mddev, int quiesce)
791 {
792 }
793 
794 static struct md_personality raid0_personality=
795 {
796 	.name		= "raid0",
797 	.level		= 0,
798 	.owner		= THIS_MODULE,
799 	.make_request	= raid0_make_request,
800 	.run		= raid0_run,
801 	.free		= raid0_free,
802 	.status		= raid0_status,
803 	.size		= raid0_size,
804 	.takeover	= raid0_takeover,
805 	.quiesce	= raid0_quiesce,
806 };
807 
808 static int __init raid0_init (void)
809 {
810 	return register_md_personality (&raid0_personality);
811 }
812 
813 static void raid0_exit (void)
814 {
815 	unregister_md_personality (&raid0_personality);
816 }
817 
818 module_init(raid0_init);
819 module_exit(raid0_exit);
820 MODULE_LICENSE("GPL");
821 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
822 MODULE_ALIAS("md-personality-2"); /* RAID0 */
823 MODULE_ALIAS("md-raid0");
824 MODULE_ALIAS("md-level-0");
825