xref: /linux/drivers/md/raid0.c (revision 803f69144f0d48863c68f9d111b56849c7cef5bb)
1 /*
2    raid0.c : Multiple Devices driver for Linux
3              Copyright (C) 1994-96 Marc ZYNGIER
4 	     <zyngier@ufr-info-p7.ibp.fr> or
5 	     <maz@gloups.fdn.fr>
6              Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7 
8 
9    RAID-0 management functions.
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License as published by
13    the Free Software Foundation; either version 2, or (at your option)
14    any later version.
15 
16    You should have received a copy of the GNU General Public License
17    (for example /usr/src/linux/COPYING); if not, write to the Free
18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include "md.h"
26 #include "raid0.h"
27 #include "raid5.h"
28 
29 static int raid0_congested(void *data, int bits)
30 {
31 	struct mddev *mddev = data;
32 	struct r0conf *conf = mddev->private;
33 	struct md_rdev **devlist = conf->devlist;
34 	int raid_disks = conf->strip_zone[0].nb_dev;
35 	int i, ret = 0;
36 
37 	if (mddev_congested(mddev, bits))
38 		return 1;
39 
40 	for (i = 0; i < raid_disks && !ret ; i++) {
41 		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
42 
43 		ret |= bdi_congested(&q->backing_dev_info, bits);
44 	}
45 	return ret;
46 }
47 
48 /*
49  * inform the user of the raid configuration
50 */
51 static void dump_zones(struct mddev *mddev)
52 {
53 	int j, k;
54 	sector_t zone_size = 0;
55 	sector_t zone_start = 0;
56 	char b[BDEVNAME_SIZE];
57 	struct r0conf *conf = mddev->private;
58 	int raid_disks = conf->strip_zone[0].nb_dev;
59 	printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
60 	       mdname(mddev),
61 	       conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
62 	for (j = 0; j < conf->nr_strip_zones; j++) {
63 		printk(KERN_INFO "md: zone%d=[", j);
64 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
65 			printk(KERN_CONT "%s%s", k?"/":"",
66 			bdevname(conf->devlist[j*raid_disks
67 						+ k]->bdev, b));
68 		printk(KERN_CONT "]\n");
69 
70 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
71 		printk(KERN_INFO "      zone-offset=%10lluKB, "
72 				"device-offset=%10lluKB, size=%10lluKB\n",
73 			(unsigned long long)zone_start>>1,
74 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
75 			(unsigned long long)zone_size>>1);
76 		zone_start = conf->strip_zone[j].zone_end;
77 	}
78 	printk(KERN_INFO "\n");
79 }
80 
81 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
82 {
83 	int i, c, err;
84 	sector_t curr_zone_end, sectors;
85 	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
86 	struct strip_zone *zone;
87 	int cnt;
88 	char b[BDEVNAME_SIZE];
89 	char b2[BDEVNAME_SIZE];
90 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
91 
92 	if (!conf)
93 		return -ENOMEM;
94 	rdev_for_each(rdev1, mddev) {
95 		pr_debug("md/raid0:%s: looking at %s\n",
96 			 mdname(mddev),
97 			 bdevname(rdev1->bdev, b));
98 		c = 0;
99 
100 		/* round size to chunk_size */
101 		sectors = rdev1->sectors;
102 		sector_div(sectors, mddev->chunk_sectors);
103 		rdev1->sectors = sectors * mddev->chunk_sectors;
104 
105 		rdev_for_each(rdev2, mddev) {
106 			pr_debug("md/raid0:%s:   comparing %s(%llu)"
107 				 " with %s(%llu)\n",
108 				 mdname(mddev),
109 				 bdevname(rdev1->bdev,b),
110 				 (unsigned long long)rdev1->sectors,
111 				 bdevname(rdev2->bdev,b2),
112 				 (unsigned long long)rdev2->sectors);
113 			if (rdev2 == rdev1) {
114 				pr_debug("md/raid0:%s:   END\n",
115 					 mdname(mddev));
116 				break;
117 			}
118 			if (rdev2->sectors == rdev1->sectors) {
119 				/*
120 				 * Not unique, don't count it as a new
121 				 * group
122 				 */
123 				pr_debug("md/raid0:%s:   EQUAL\n",
124 					 mdname(mddev));
125 				c = 1;
126 				break;
127 			}
128 			pr_debug("md/raid0:%s:   NOT EQUAL\n",
129 				 mdname(mddev));
130 		}
131 		if (!c) {
132 			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
133 				 mdname(mddev));
134 			conf->nr_strip_zones++;
135 			pr_debug("md/raid0:%s: %d zones\n",
136 				 mdname(mddev), conf->nr_strip_zones);
137 		}
138 	}
139 	pr_debug("md/raid0:%s: FINAL %d zones\n",
140 		 mdname(mddev), conf->nr_strip_zones);
141 	err = -ENOMEM;
142 	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
143 				conf->nr_strip_zones, GFP_KERNEL);
144 	if (!conf->strip_zone)
145 		goto abort;
146 	conf->devlist = kzalloc(sizeof(struct md_rdev*)*
147 				conf->nr_strip_zones*mddev->raid_disks,
148 				GFP_KERNEL);
149 	if (!conf->devlist)
150 		goto abort;
151 
152 	/* The first zone must contain all devices, so here we check that
153 	 * there is a proper alignment of slots to devices and find them all
154 	 */
155 	zone = &conf->strip_zone[0];
156 	cnt = 0;
157 	smallest = NULL;
158 	dev = conf->devlist;
159 	err = -EINVAL;
160 	rdev_for_each(rdev1, mddev) {
161 		int j = rdev1->raid_disk;
162 
163 		if (mddev->level == 10) {
164 			/* taking over a raid10-n2 array */
165 			j /= 2;
166 			rdev1->new_raid_disk = j;
167 		}
168 
169 		if (mddev->level == 1) {
170 			/* taiking over a raid1 array-
171 			 * we have only one active disk
172 			 */
173 			j = 0;
174 			rdev1->new_raid_disk = j;
175 		}
176 
177 		if (j < 0 || j >= mddev->raid_disks) {
178 			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
179 			       "aborting!\n", mdname(mddev), j);
180 			goto abort;
181 		}
182 		if (dev[j]) {
183 			printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
184 			       "aborting!\n", mdname(mddev), j);
185 			goto abort;
186 		}
187 		dev[j] = rdev1;
188 
189 		disk_stack_limits(mddev->gendisk, rdev1->bdev,
190 				  rdev1->data_offset << 9);
191 
192 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
193 			conf->has_merge_bvec = 1;
194 
195 		if (!smallest || (rdev1->sectors < smallest->sectors))
196 			smallest = rdev1;
197 		cnt++;
198 	}
199 	if (cnt != mddev->raid_disks) {
200 		printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
201 		       "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
202 		goto abort;
203 	}
204 	zone->nb_dev = cnt;
205 	zone->zone_end = smallest->sectors * cnt;
206 
207 	curr_zone_end = zone->zone_end;
208 
209 	/* now do the other zones */
210 	for (i = 1; i < conf->nr_strip_zones; i++)
211 	{
212 		int j;
213 
214 		zone = conf->strip_zone + i;
215 		dev = conf->devlist + i * mddev->raid_disks;
216 
217 		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
218 		zone->dev_start = smallest->sectors;
219 		smallest = NULL;
220 		c = 0;
221 
222 		for (j=0; j<cnt; j++) {
223 			rdev = conf->devlist[j];
224 			if (rdev->sectors <= zone->dev_start) {
225 				pr_debug("md/raid0:%s: checking %s ... nope\n",
226 					 mdname(mddev),
227 					 bdevname(rdev->bdev, b));
228 				continue;
229 			}
230 			pr_debug("md/raid0:%s: checking %s ..."
231 				 " contained as device %d\n",
232 				 mdname(mddev),
233 				 bdevname(rdev->bdev, b), c);
234 			dev[c] = rdev;
235 			c++;
236 			if (!smallest || rdev->sectors < smallest->sectors) {
237 				smallest = rdev;
238 				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
239 					 mdname(mddev),
240 					 (unsigned long long)rdev->sectors);
241 			}
242 		}
243 
244 		zone->nb_dev = c;
245 		sectors = (smallest->sectors - zone->dev_start) * c;
246 		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
247 			 mdname(mddev),
248 			 zone->nb_dev, (unsigned long long)sectors);
249 
250 		curr_zone_end += sectors;
251 		zone->zone_end = curr_zone_end;
252 
253 		pr_debug("md/raid0:%s: current zone start: %llu\n",
254 			 mdname(mddev),
255 			 (unsigned long long)smallest->sectors);
256 	}
257 	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
258 	mddev->queue->backing_dev_info.congested_data = mddev;
259 
260 	/*
261 	 * now since we have the hard sector sizes, we can make sure
262 	 * chunk size is a multiple of that sector size
263 	 */
264 	if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
265 		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
266 		       mdname(mddev),
267 		       mddev->chunk_sectors << 9);
268 		goto abort;
269 	}
270 
271 	blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
272 	blk_queue_io_opt(mddev->queue,
273 			 (mddev->chunk_sectors << 9) * mddev->raid_disks);
274 
275 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
276 	*private_conf = conf;
277 
278 	return 0;
279 abort:
280 	kfree(conf->strip_zone);
281 	kfree(conf->devlist);
282 	kfree(conf);
283 	*private_conf = NULL;
284 	return err;
285 }
286 
287 /* Find the zone which holds a particular offset
288  * Update *sectorp to be an offset in that zone
289  */
290 static struct strip_zone *find_zone(struct r0conf *conf,
291 				    sector_t *sectorp)
292 {
293 	int i;
294 	struct strip_zone *z = conf->strip_zone;
295 	sector_t sector = *sectorp;
296 
297 	for (i = 0; i < conf->nr_strip_zones; i++)
298 		if (sector < z[i].zone_end) {
299 			if (i)
300 				*sectorp = sector - z[i-1].zone_end;
301 			return z + i;
302 		}
303 	BUG();
304 }
305 
306 /*
307  * remaps the bio to the target device. we separate two flows.
308  * power 2 flow and a general flow for the sake of perfromance
309 */
310 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
311 				sector_t sector, sector_t *sector_offset)
312 {
313 	unsigned int sect_in_chunk;
314 	sector_t chunk;
315 	struct r0conf *conf = mddev->private;
316 	int raid_disks = conf->strip_zone[0].nb_dev;
317 	unsigned int chunk_sects = mddev->chunk_sectors;
318 
319 	if (is_power_of_2(chunk_sects)) {
320 		int chunksect_bits = ffz(~chunk_sects);
321 		/* find the sector offset inside the chunk */
322 		sect_in_chunk  = sector & (chunk_sects - 1);
323 		sector >>= chunksect_bits;
324 		/* chunk in zone */
325 		chunk = *sector_offset;
326 		/* quotient is the chunk in real device*/
327 		sector_div(chunk, zone->nb_dev << chunksect_bits);
328 	} else{
329 		sect_in_chunk = sector_div(sector, chunk_sects);
330 		chunk = *sector_offset;
331 		sector_div(chunk, chunk_sects * zone->nb_dev);
332 	}
333 	/*
334 	*  position the bio over the real device
335 	*  real sector = chunk in device + starting of zone
336 	*	+ the position in the chunk
337 	*/
338 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
339 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
340 			     + sector_div(sector, zone->nb_dev)];
341 }
342 
343 /**
344  *	raid0_mergeable_bvec -- tell bio layer if two requests can be merged
345  *	@q: request queue
346  *	@bvm: properties of new bio
347  *	@biovec: the request that could be merged to it.
348  *
349  *	Return amount of bytes we can accept at this offset
350  */
351 static int raid0_mergeable_bvec(struct request_queue *q,
352 				struct bvec_merge_data *bvm,
353 				struct bio_vec *biovec)
354 {
355 	struct mddev *mddev = q->queuedata;
356 	struct r0conf *conf = mddev->private;
357 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
358 	sector_t sector_offset = sector;
359 	int max;
360 	unsigned int chunk_sectors = mddev->chunk_sectors;
361 	unsigned int bio_sectors = bvm->bi_size >> 9;
362 	struct strip_zone *zone;
363 	struct md_rdev *rdev;
364 	struct request_queue *subq;
365 
366 	if (is_power_of_2(chunk_sectors))
367 		max =  (chunk_sectors - ((sector & (chunk_sectors-1))
368 						+ bio_sectors)) << 9;
369 	else
370 		max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
371 						+ bio_sectors)) << 9;
372 	if (max < 0)
373 		max = 0; /* bio_add cannot handle a negative return */
374 	if (max <= biovec->bv_len && bio_sectors == 0)
375 		return biovec->bv_len;
376 	if (max < biovec->bv_len)
377 		/* too small already, no need to check further */
378 		return max;
379 	if (!conf->has_merge_bvec)
380 		return max;
381 
382 	/* May need to check subordinate device */
383 	sector = sector_offset;
384 	zone = find_zone(mddev->private, &sector_offset);
385 	rdev = map_sector(mddev, zone, sector, &sector_offset);
386 	subq = bdev_get_queue(rdev->bdev);
387 	if (subq->merge_bvec_fn) {
388 		bvm->bi_bdev = rdev->bdev;
389 		bvm->bi_sector = sector_offset + zone->dev_start +
390 			rdev->data_offset;
391 		return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
392 	} else
393 		return max;
394 }
395 
396 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
397 {
398 	sector_t array_sectors = 0;
399 	struct md_rdev *rdev;
400 
401 	WARN_ONCE(sectors || raid_disks,
402 		  "%s does not support generic reshape\n", __func__);
403 
404 	rdev_for_each(rdev, mddev)
405 		array_sectors += rdev->sectors;
406 
407 	return array_sectors;
408 }
409 
410 static int raid0_run(struct mddev *mddev)
411 {
412 	struct r0conf *conf;
413 	int ret;
414 
415 	if (mddev->chunk_sectors == 0) {
416 		printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
417 		       mdname(mddev));
418 		return -EINVAL;
419 	}
420 	if (md_check_no_bitmap(mddev))
421 		return -EINVAL;
422 	blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
423 
424 	/* if private is not null, we are here after takeover */
425 	if (mddev->private == NULL) {
426 		ret = create_strip_zones(mddev, &conf);
427 		if (ret < 0)
428 			return ret;
429 		mddev->private = conf;
430 	}
431 	conf = mddev->private;
432 
433 	/* calculate array device size */
434 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
435 
436 	printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
437 	       mdname(mddev),
438 	       (unsigned long long)mddev->array_sectors);
439 	/* calculate the max read-ahead size.
440 	 * For read-ahead of large files to be effective, we need to
441 	 * readahead at least twice a whole stripe. i.e. number of devices
442 	 * multiplied by chunk size times 2.
443 	 * If an individual device has an ra_pages greater than the
444 	 * chunk size, then we will not drive that device as hard as it
445 	 * wants.  We consider this a configuration error: a larger
446 	 * chunksize should be used in that case.
447 	 */
448 	{
449 		int stripe = mddev->raid_disks *
450 			(mddev->chunk_sectors << 9) / PAGE_SIZE;
451 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
452 			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
453 	}
454 
455 	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
456 	dump_zones(mddev);
457 	return md_integrity_register(mddev);
458 }
459 
460 static int raid0_stop(struct mddev *mddev)
461 {
462 	struct r0conf *conf = mddev->private;
463 
464 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
465 	kfree(conf->strip_zone);
466 	kfree(conf->devlist);
467 	kfree(conf);
468 	mddev->private = NULL;
469 	return 0;
470 }
471 
472 /*
473  * Is io distribute over 1 or more chunks ?
474 */
475 static inline int is_io_in_chunk_boundary(struct mddev *mddev,
476 			unsigned int chunk_sects, struct bio *bio)
477 {
478 	if (likely(is_power_of_2(chunk_sects))) {
479 		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
480 					+ (bio->bi_size >> 9));
481 	} else{
482 		sector_t sector = bio->bi_sector;
483 		return chunk_sects >= (sector_div(sector, chunk_sects)
484 						+ (bio->bi_size >> 9));
485 	}
486 }
487 
488 static void raid0_make_request(struct mddev *mddev, struct bio *bio)
489 {
490 	unsigned int chunk_sects;
491 	sector_t sector_offset;
492 	struct strip_zone *zone;
493 	struct md_rdev *tmp_dev;
494 
495 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
496 		md_flush_request(mddev, bio);
497 		return;
498 	}
499 
500 	chunk_sects = mddev->chunk_sectors;
501 	if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
502 		sector_t sector = bio->bi_sector;
503 		struct bio_pair *bp;
504 		/* Sanity check -- queue functions should prevent this happening */
505 		if (bio->bi_vcnt != 1 ||
506 		    bio->bi_idx != 0)
507 			goto bad_map;
508 		/* This is a one page bio that upper layers
509 		 * refuse to split for us, so we need to split it.
510 		 */
511 		if (likely(is_power_of_2(chunk_sects)))
512 			bp = bio_split(bio, chunk_sects - (sector &
513 							   (chunk_sects-1)));
514 		else
515 			bp = bio_split(bio, chunk_sects -
516 				       sector_div(sector, chunk_sects));
517 		raid0_make_request(mddev, &bp->bio1);
518 		raid0_make_request(mddev, &bp->bio2);
519 		bio_pair_release(bp);
520 		return;
521 	}
522 
523 	sector_offset = bio->bi_sector;
524 	zone = find_zone(mddev->private, &sector_offset);
525 	tmp_dev = map_sector(mddev, zone, bio->bi_sector,
526 			     &sector_offset);
527 	bio->bi_bdev = tmp_dev->bdev;
528 	bio->bi_sector = sector_offset + zone->dev_start +
529 		tmp_dev->data_offset;
530 
531 	generic_make_request(bio);
532 	return;
533 
534 bad_map:
535 	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
536 	       " or bigger than %dk %llu %d\n",
537 	       mdname(mddev), chunk_sects / 2,
538 	       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
539 
540 	bio_io_error(bio);
541 	return;
542 }
543 
544 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
545 {
546 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
547 	return;
548 }
549 
550 static void *raid0_takeover_raid45(struct mddev *mddev)
551 {
552 	struct md_rdev *rdev;
553 	struct r0conf *priv_conf;
554 
555 	if (mddev->degraded != 1) {
556 		printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
557 		       mdname(mddev),
558 		       mddev->degraded);
559 		return ERR_PTR(-EINVAL);
560 	}
561 
562 	rdev_for_each(rdev, mddev) {
563 		/* check slot number for a disk */
564 		if (rdev->raid_disk == mddev->raid_disks-1) {
565 			printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
566 			       mdname(mddev));
567 			return ERR_PTR(-EINVAL);
568 		}
569 	}
570 
571 	/* Set new parameters */
572 	mddev->new_level = 0;
573 	mddev->new_layout = 0;
574 	mddev->new_chunk_sectors = mddev->chunk_sectors;
575 	mddev->raid_disks--;
576 	mddev->delta_disks = -1;
577 	/* make sure it will be not marked as dirty */
578 	mddev->recovery_cp = MaxSector;
579 
580 	create_strip_zones(mddev, &priv_conf);
581 	return priv_conf;
582 }
583 
584 static void *raid0_takeover_raid10(struct mddev *mddev)
585 {
586 	struct r0conf *priv_conf;
587 
588 	/* Check layout:
589 	 *  - far_copies must be 1
590 	 *  - near_copies must be 2
591 	 *  - disks number must be even
592 	 *  - all mirrors must be already degraded
593 	 */
594 	if (mddev->layout != ((1 << 8) + 2)) {
595 		printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
596 		       mdname(mddev),
597 		       mddev->layout);
598 		return ERR_PTR(-EINVAL);
599 	}
600 	if (mddev->raid_disks & 1) {
601 		printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
602 		       mdname(mddev));
603 		return ERR_PTR(-EINVAL);
604 	}
605 	if (mddev->degraded != (mddev->raid_disks>>1)) {
606 		printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
607 		       mdname(mddev));
608 		return ERR_PTR(-EINVAL);
609 	}
610 
611 	/* Set new parameters */
612 	mddev->new_level = 0;
613 	mddev->new_layout = 0;
614 	mddev->new_chunk_sectors = mddev->chunk_sectors;
615 	mddev->delta_disks = - mddev->raid_disks / 2;
616 	mddev->raid_disks += mddev->delta_disks;
617 	mddev->degraded = 0;
618 	/* make sure it will be not marked as dirty */
619 	mddev->recovery_cp = MaxSector;
620 
621 	create_strip_zones(mddev, &priv_conf);
622 	return priv_conf;
623 }
624 
625 static void *raid0_takeover_raid1(struct mddev *mddev)
626 {
627 	struct r0conf *priv_conf;
628 
629 	/* Check layout:
630 	 *  - (N - 1) mirror drives must be already faulty
631 	 */
632 	if ((mddev->raid_disks - 1) != mddev->degraded) {
633 		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
634 		       mdname(mddev));
635 		return ERR_PTR(-EINVAL);
636 	}
637 
638 	/* Set new parameters */
639 	mddev->new_level = 0;
640 	mddev->new_layout = 0;
641 	mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
642 	mddev->delta_disks = 1 - mddev->raid_disks;
643 	mddev->raid_disks = 1;
644 	/* make sure it will be not marked as dirty */
645 	mddev->recovery_cp = MaxSector;
646 
647 	create_strip_zones(mddev, &priv_conf);
648 	return priv_conf;
649 }
650 
651 static void *raid0_takeover(struct mddev *mddev)
652 {
653 	/* raid0 can take over:
654 	 *  raid4 - if all data disks are active.
655 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
656 	 *  raid10 - assuming we have all necessary active disks
657 	 *  raid1 - with (N -1) mirror drives faulty
658 	 */
659 	if (mddev->level == 4)
660 		return raid0_takeover_raid45(mddev);
661 
662 	if (mddev->level == 5) {
663 		if (mddev->layout == ALGORITHM_PARITY_N)
664 			return raid0_takeover_raid45(mddev);
665 
666 		printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
667 		       mdname(mddev), ALGORITHM_PARITY_N);
668 	}
669 
670 	if (mddev->level == 10)
671 		return raid0_takeover_raid10(mddev);
672 
673 	if (mddev->level == 1)
674 		return raid0_takeover_raid1(mddev);
675 
676 	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
677 		mddev->level);
678 
679 	return ERR_PTR(-EINVAL);
680 }
681 
682 static void raid0_quiesce(struct mddev *mddev, int state)
683 {
684 }
685 
686 static struct md_personality raid0_personality=
687 {
688 	.name		= "raid0",
689 	.level		= 0,
690 	.owner		= THIS_MODULE,
691 	.make_request	= raid0_make_request,
692 	.run		= raid0_run,
693 	.stop		= raid0_stop,
694 	.status		= raid0_status,
695 	.size		= raid0_size,
696 	.takeover	= raid0_takeover,
697 	.quiesce	= raid0_quiesce,
698 };
699 
700 static int __init raid0_init (void)
701 {
702 	return register_md_personality (&raid0_personality);
703 }
704 
705 static void raid0_exit (void)
706 {
707 	unregister_md_personality (&raid0_personality);
708 }
709 
710 module_init(raid0_init);
711 module_exit(raid0_exit);
712 MODULE_LICENSE("GPL");
713 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
714 MODULE_ALIAS("md-personality-2"); /* RAID0 */
715 MODULE_ALIAS("md-raid0");
716 MODULE_ALIAS("md-level-0");
717