xref: /linux/drivers/md/raid0.c (revision 8ec3b8432e4fe8d452f88f1ed9a3450e715bb797)
1 /*
2    raid0.c : Multiple Devices driver for Linux
3              Copyright (C) 1994-96 Marc ZYNGIER
4 	     <zyngier@ufr-info-p7.ibp.fr> or
5 	     <maz@gloups.fdn.fr>
6              Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7 
8 
9    RAID-0 management functions.
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License as published by
13    the Free Software Foundation; either version 2, or (at your option)
14    any later version.
15 
16    You should have received a copy of the GNU General Public License
17    (for example /usr/src/linux/COPYING); if not, write to the Free
18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include "md.h"
25 #include "raid0.h"
26 #include "raid5.h"
27 
28 static void raid0_unplug(struct request_queue *q)
29 {
30 	mddev_t *mddev = q->queuedata;
31 	raid0_conf_t *conf = mddev->private;
32 	mdk_rdev_t **devlist = conf->devlist;
33 	int raid_disks = conf->strip_zone[0].nb_dev;
34 	int i;
35 
36 	for (i=0; i < raid_disks; i++) {
37 		struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
38 
39 		blk_unplug(r_queue);
40 	}
41 }
42 
43 static int raid0_congested(void *data, int bits)
44 {
45 	mddev_t *mddev = data;
46 	raid0_conf_t *conf = mddev->private;
47 	mdk_rdev_t **devlist = conf->devlist;
48 	int raid_disks = conf->strip_zone[0].nb_dev;
49 	int i, ret = 0;
50 
51 	if (mddev_congested(mddev, bits))
52 		return 1;
53 
54 	for (i = 0; i < raid_disks && !ret ; i++) {
55 		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
56 
57 		ret |= bdi_congested(&q->backing_dev_info, bits);
58 	}
59 	return ret;
60 }
61 
62 /*
63  * inform the user of the raid configuration
64 */
65 static void dump_zones(mddev_t *mddev)
66 {
67 	int j, k, h;
68 	sector_t zone_size = 0;
69 	sector_t zone_start = 0;
70 	char b[BDEVNAME_SIZE];
71 	raid0_conf_t *conf = mddev->private;
72 	int raid_disks = conf->strip_zone[0].nb_dev;
73 	printk(KERN_INFO "******* %s configuration *********\n",
74 		mdname(mddev));
75 	h = 0;
76 	for (j = 0; j < conf->nr_strip_zones; j++) {
77 		printk(KERN_INFO "zone%d=[", j);
78 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
79 			printk(KERN_CONT "%s/",
80 			bdevname(conf->devlist[j*raid_disks
81 						+ k]->bdev, b));
82 		printk(KERN_CONT "]\n");
83 
84 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
85 		printk(KERN_INFO "        zone offset=%llukb "
86 				"device offset=%llukb size=%llukb\n",
87 			(unsigned long long)zone_start>>1,
88 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
89 			(unsigned long long)zone_size>>1);
90 		zone_start = conf->strip_zone[j].zone_end;
91 	}
92 	printk(KERN_INFO "**********************************\n\n");
93 }
94 
95 static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
96 {
97 	int i, c, err;
98 	sector_t curr_zone_end, sectors;
99 	mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
100 	struct strip_zone *zone;
101 	int cnt;
102 	char b[BDEVNAME_SIZE];
103 	raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
104 
105 	if (!conf)
106 		return -ENOMEM;
107 	list_for_each_entry(rdev1, &mddev->disks, same_set) {
108 		printk(KERN_INFO "md/raid0:%s: looking at %s\n",
109 		       mdname(mddev),
110 		       bdevname(rdev1->bdev, b));
111 		c = 0;
112 
113 		/* round size to chunk_size */
114 		sectors = rdev1->sectors;
115 		sector_div(sectors, mddev->chunk_sectors);
116 		rdev1->sectors = sectors * mddev->chunk_sectors;
117 
118 		list_for_each_entry(rdev2, &mddev->disks, same_set) {
119 			printk(KERN_INFO "md/raid0:%s:   comparing %s(%llu)",
120 			       mdname(mddev),
121 			       bdevname(rdev1->bdev,b),
122 			       (unsigned long long)rdev1->sectors);
123 			printk(KERN_CONT " with %s(%llu)\n",
124 			       bdevname(rdev2->bdev,b),
125 			       (unsigned long long)rdev2->sectors);
126 			if (rdev2 == rdev1) {
127 				printk(KERN_INFO "md/raid0:%s:   END\n",
128 				       mdname(mddev));
129 				break;
130 			}
131 			if (rdev2->sectors == rdev1->sectors) {
132 				/*
133 				 * Not unique, don't count it as a new
134 				 * group
135 				 */
136 				printk(KERN_INFO "md/raid0:%s:   EQUAL\n",
137 				       mdname(mddev));
138 				c = 1;
139 				break;
140 			}
141 			printk(KERN_INFO "md/raid0:%s:   NOT EQUAL\n",
142 			       mdname(mddev));
143 		}
144 		if (!c) {
145 			printk(KERN_INFO "md/raid0:%s:   ==> UNIQUE\n",
146 			       mdname(mddev));
147 			conf->nr_strip_zones++;
148 			printk(KERN_INFO "md/raid0:%s: %d zones\n",
149 			       mdname(mddev), conf->nr_strip_zones);
150 		}
151 	}
152 	printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n",
153 	       mdname(mddev), conf->nr_strip_zones);
154 	err = -ENOMEM;
155 	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
156 				conf->nr_strip_zones, GFP_KERNEL);
157 	if (!conf->strip_zone)
158 		goto abort;
159 	conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
160 				conf->nr_strip_zones*mddev->raid_disks,
161 				GFP_KERNEL);
162 	if (!conf->devlist)
163 		goto abort;
164 
165 	/* The first zone must contain all devices, so here we check that
166 	 * there is a proper alignment of slots to devices and find them all
167 	 */
168 	zone = &conf->strip_zone[0];
169 	cnt = 0;
170 	smallest = NULL;
171 	dev = conf->devlist;
172 	err = -EINVAL;
173 	list_for_each_entry(rdev1, &mddev->disks, same_set) {
174 		int j = rdev1->raid_disk;
175 
176 		if (mddev->level == 10) {
177 			/* taking over a raid10-n2 array */
178 			j /= 2;
179 			rdev1->new_raid_disk = j;
180 		}
181 
182 		if (mddev->level == 1) {
183 			/* taiking over a raid1 array-
184 			 * we have only one active disk
185 			 */
186 			j = 0;
187 			rdev1->new_raid_disk = j;
188 		}
189 
190 		if (j < 0 || j >= mddev->raid_disks) {
191 			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
192 			       "aborting!\n", mdname(mddev), j);
193 			goto abort;
194 		}
195 		if (dev[j]) {
196 			printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
197 			       "aborting!\n", mdname(mddev), j);
198 			goto abort;
199 		}
200 		dev[j] = rdev1;
201 
202 		disk_stack_limits(mddev->gendisk, rdev1->bdev,
203 				  rdev1->data_offset << 9);
204 		/* as we don't honour merge_bvec_fn, we must never risk
205 		 * violating it, so limit ->max_segments to 1, lying within
206 		 * a single page.
207 		 */
208 
209 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
210 			blk_queue_max_segments(mddev->queue, 1);
211 			blk_queue_segment_boundary(mddev->queue,
212 						   PAGE_CACHE_SIZE - 1);
213 		}
214 		if (!smallest || (rdev1->sectors < smallest->sectors))
215 			smallest = rdev1;
216 		cnt++;
217 	}
218 	if (cnt != mddev->raid_disks) {
219 		printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
220 		       "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
221 		goto abort;
222 	}
223 	zone->nb_dev = cnt;
224 	zone->zone_end = smallest->sectors * cnt;
225 
226 	curr_zone_end = zone->zone_end;
227 
228 	/* now do the other zones */
229 	for (i = 1; i < conf->nr_strip_zones; i++)
230 	{
231 		int j;
232 
233 		zone = conf->strip_zone + i;
234 		dev = conf->devlist + i * mddev->raid_disks;
235 
236 		printk(KERN_INFO "md/raid0:%s: zone %d\n",
237 		       mdname(mddev), i);
238 		zone->dev_start = smallest->sectors;
239 		smallest = NULL;
240 		c = 0;
241 
242 		for (j=0; j<cnt; j++) {
243 			rdev = conf->devlist[j];
244 			printk(KERN_INFO "md/raid0:%s: checking %s ...",
245 			       mdname(mddev),
246 			       bdevname(rdev->bdev, b));
247 			if (rdev->sectors <= zone->dev_start) {
248 				printk(KERN_CONT " nope.\n");
249 				continue;
250 			}
251 			printk(KERN_CONT " contained as device %d\n", c);
252 			dev[c] = rdev;
253 			c++;
254 			if (!smallest || rdev->sectors < smallest->sectors) {
255 				smallest = rdev;
256 				printk(KERN_INFO "md/raid0:%s:  (%llu) is smallest!.\n",
257 				       mdname(mddev),
258 				       (unsigned long long)rdev->sectors);
259 			}
260 		}
261 
262 		zone->nb_dev = c;
263 		sectors = (smallest->sectors - zone->dev_start) * c;
264 		printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
265 		       mdname(mddev),
266 		       zone->nb_dev, (unsigned long long)sectors);
267 
268 		curr_zone_end += sectors;
269 		zone->zone_end = curr_zone_end;
270 
271 		printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n",
272 		       mdname(mddev),
273 		       (unsigned long long)smallest->sectors);
274 	}
275 	mddev->queue->unplug_fn = raid0_unplug;
276 	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
277 	mddev->queue->backing_dev_info.congested_data = mddev;
278 
279 	/*
280 	 * now since we have the hard sector sizes, we can make sure
281 	 * chunk size is a multiple of that sector size
282 	 */
283 	if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
284 		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
285 		       mdname(mddev),
286 		       mddev->chunk_sectors << 9);
287 		goto abort;
288 	}
289 
290 	blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
291 	blk_queue_io_opt(mddev->queue,
292 			 (mddev->chunk_sectors << 9) * mddev->raid_disks);
293 
294 	printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev));
295 	*private_conf = conf;
296 
297 	return 0;
298 abort:
299 	kfree(conf->strip_zone);
300 	kfree(conf->devlist);
301 	kfree(conf);
302 	*private_conf = NULL;
303 	return err;
304 }
305 
306 /**
307  *	raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
308  *	@q: request queue
309  *	@bvm: properties of new bio
310  *	@biovec: the request that could be merged to it.
311  *
312  *	Return amount of bytes we can accept at this offset
313  */
314 static int raid0_mergeable_bvec(struct request_queue *q,
315 				struct bvec_merge_data *bvm,
316 				struct bio_vec *biovec)
317 {
318 	mddev_t *mddev = q->queuedata;
319 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
320 	int max;
321 	unsigned int chunk_sectors = mddev->chunk_sectors;
322 	unsigned int bio_sectors = bvm->bi_size >> 9;
323 
324 	if (is_power_of_2(chunk_sectors))
325 		max =  (chunk_sectors - ((sector & (chunk_sectors-1))
326 						+ bio_sectors)) << 9;
327 	else
328 		max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
329 						+ bio_sectors)) << 9;
330 	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
331 	if (max <= biovec->bv_len && bio_sectors == 0)
332 		return biovec->bv_len;
333 	else
334 		return max;
335 }
336 
337 static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
338 {
339 	sector_t array_sectors = 0;
340 	mdk_rdev_t *rdev;
341 
342 	WARN_ONCE(sectors || raid_disks,
343 		  "%s does not support generic reshape\n", __func__);
344 
345 	list_for_each_entry(rdev, &mddev->disks, same_set)
346 		array_sectors += rdev->sectors;
347 
348 	return array_sectors;
349 }
350 
351 static int raid0_run(mddev_t *mddev)
352 {
353 	raid0_conf_t *conf;
354 	int ret;
355 
356 	if (mddev->chunk_sectors == 0) {
357 		printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
358 		       mdname(mddev));
359 		return -EINVAL;
360 	}
361 	if (md_check_no_bitmap(mddev))
362 		return -EINVAL;
363 	blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
364 
365 	/* if private is not null, we are here after takeover */
366 	if (mddev->private == NULL) {
367 		ret = create_strip_zones(mddev, &conf);
368 		if (ret < 0)
369 			return ret;
370 		mddev->private = conf;
371 	}
372 	conf = mddev->private;
373 
374 	/* calculate array device size */
375 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
376 
377 	printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
378 	       mdname(mddev),
379 	       (unsigned long long)mddev->array_sectors);
380 	/* calculate the max read-ahead size.
381 	 * For read-ahead of large files to be effective, we need to
382 	 * readahead at least twice a whole stripe. i.e. number of devices
383 	 * multiplied by chunk size times 2.
384 	 * If an individual device has an ra_pages greater than the
385 	 * chunk size, then we will not drive that device as hard as it
386 	 * wants.  We consider this a configuration error: a larger
387 	 * chunksize should be used in that case.
388 	 */
389 	{
390 		int stripe = mddev->raid_disks *
391 			(mddev->chunk_sectors << 9) / PAGE_SIZE;
392 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
393 			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
394 	}
395 
396 	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
397 	dump_zones(mddev);
398 	md_integrity_register(mddev);
399 	return 0;
400 }
401 
402 static int raid0_stop(mddev_t *mddev)
403 {
404 	raid0_conf_t *conf = mddev->private;
405 
406 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
407 	kfree(conf->strip_zone);
408 	kfree(conf->devlist);
409 	kfree(conf);
410 	mddev->private = NULL;
411 	return 0;
412 }
413 
414 /* Find the zone which holds a particular offset
415  * Update *sectorp to be an offset in that zone
416  */
417 static struct strip_zone *find_zone(struct raid0_private_data *conf,
418 				    sector_t *sectorp)
419 {
420 	int i;
421 	struct strip_zone *z = conf->strip_zone;
422 	sector_t sector = *sectorp;
423 
424 	for (i = 0; i < conf->nr_strip_zones; i++)
425 		if (sector < z[i].zone_end) {
426 			if (i)
427 				*sectorp = sector - z[i-1].zone_end;
428 			return z + i;
429 		}
430 	BUG();
431 }
432 
433 /*
434  * remaps the bio to the target device. we separate two flows.
435  * power 2 flow and a general flow for the sake of perfromance
436 */
437 static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
438 				sector_t sector, sector_t *sector_offset)
439 {
440 	unsigned int sect_in_chunk;
441 	sector_t chunk;
442 	raid0_conf_t *conf = mddev->private;
443 	int raid_disks = conf->strip_zone[0].nb_dev;
444 	unsigned int chunk_sects = mddev->chunk_sectors;
445 
446 	if (is_power_of_2(chunk_sects)) {
447 		int chunksect_bits = ffz(~chunk_sects);
448 		/* find the sector offset inside the chunk */
449 		sect_in_chunk  = sector & (chunk_sects - 1);
450 		sector >>= chunksect_bits;
451 		/* chunk in zone */
452 		chunk = *sector_offset;
453 		/* quotient is the chunk in real device*/
454 		sector_div(chunk, zone->nb_dev << chunksect_bits);
455 	} else{
456 		sect_in_chunk = sector_div(sector, chunk_sects);
457 		chunk = *sector_offset;
458 		sector_div(chunk, chunk_sects * zone->nb_dev);
459 	}
460 	/*
461 	*  position the bio over the real device
462 	*  real sector = chunk in device + starting of zone
463 	*	+ the position in the chunk
464 	*/
465 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
466 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
467 			     + sector_div(sector, zone->nb_dev)];
468 }
469 
470 /*
471  * Is io distribute over 1 or more chunks ?
472 */
473 static inline int is_io_in_chunk_boundary(mddev_t *mddev,
474 			unsigned int chunk_sects, struct bio *bio)
475 {
476 	if (likely(is_power_of_2(chunk_sects))) {
477 		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
478 					+ (bio->bi_size >> 9));
479 	} else{
480 		sector_t sector = bio->bi_sector;
481 		return chunk_sects >= (sector_div(sector, chunk_sects)
482 						+ (bio->bi_size >> 9));
483 	}
484 }
485 
486 static int raid0_make_request(mddev_t *mddev, struct bio *bio)
487 {
488 	unsigned int chunk_sects;
489 	sector_t sector_offset;
490 	struct strip_zone *zone;
491 	mdk_rdev_t *tmp_dev;
492 
493 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
494 		md_flush_request(mddev, bio);
495 		return 0;
496 	}
497 
498 	chunk_sects = mddev->chunk_sectors;
499 	if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
500 		sector_t sector = bio->bi_sector;
501 		struct bio_pair *bp;
502 		/* Sanity check -- queue functions should prevent this happening */
503 		if (bio->bi_vcnt != 1 ||
504 		    bio->bi_idx != 0)
505 			goto bad_map;
506 		/* This is a one page bio that upper layers
507 		 * refuse to split for us, so we need to split it.
508 		 */
509 		if (likely(is_power_of_2(chunk_sects)))
510 			bp = bio_split(bio, chunk_sects - (sector &
511 							   (chunk_sects-1)));
512 		else
513 			bp = bio_split(bio, chunk_sects -
514 				       sector_div(sector, chunk_sects));
515 		if (raid0_make_request(mddev, &bp->bio1))
516 			generic_make_request(&bp->bio1);
517 		if (raid0_make_request(mddev, &bp->bio2))
518 			generic_make_request(&bp->bio2);
519 
520 		bio_pair_release(bp);
521 		return 0;
522 	}
523 
524 	sector_offset = bio->bi_sector;
525 	zone =  find_zone(mddev->private, &sector_offset);
526 	tmp_dev = map_sector(mddev, zone, bio->bi_sector,
527 			     &sector_offset);
528 	bio->bi_bdev = tmp_dev->bdev;
529 	bio->bi_sector = sector_offset + zone->dev_start +
530 		tmp_dev->data_offset;
531 	/*
532 	 * Let the main block layer submit the IO and resolve recursion:
533 	 */
534 	return 1;
535 
536 bad_map:
537 	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
538 	       " or bigger than %dk %llu %d\n",
539 	       mdname(mddev), chunk_sects / 2,
540 	       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
541 
542 	bio_io_error(bio);
543 	return 0;
544 }
545 
546 static void raid0_status(struct seq_file *seq, mddev_t *mddev)
547 {
548 #undef MD_DEBUG
549 #ifdef MD_DEBUG
550 	int j, k, h;
551 	char b[BDEVNAME_SIZE];
552 	raid0_conf_t *conf = mddev->private;
553 	int raid_disks = conf->strip_zone[0].nb_dev;
554 
555 	sector_t zone_size;
556 	sector_t zone_start = 0;
557 	h = 0;
558 
559 	for (j = 0; j < conf->nr_strip_zones; j++) {
560 		seq_printf(seq, "      z%d", j);
561 		seq_printf(seq, "=[");
562 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
563 			seq_printf(seq, "%s/", bdevname(
564 				conf->devlist[j*raid_disks + k]
565 						->bdev, b));
566 
567 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
568 		seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
569 			(unsigned long long)zone_start>>1,
570 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
571 			(unsigned long long)zone_size>>1);
572 		zone_start = conf->strip_zone[j].zone_end;
573 	}
574 #endif
575 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
576 	return;
577 }
578 
579 static void *raid0_takeover_raid45(mddev_t *mddev)
580 {
581 	mdk_rdev_t *rdev;
582 	raid0_conf_t *priv_conf;
583 
584 	if (mddev->degraded != 1) {
585 		printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
586 		       mdname(mddev),
587 		       mddev->degraded);
588 		return ERR_PTR(-EINVAL);
589 	}
590 
591 	list_for_each_entry(rdev, &mddev->disks, same_set) {
592 		/* check slot number for a disk */
593 		if (rdev->raid_disk == mddev->raid_disks-1) {
594 			printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
595 			       mdname(mddev));
596 			return ERR_PTR(-EINVAL);
597 		}
598 	}
599 
600 	/* Set new parameters */
601 	mddev->new_level = 0;
602 	mddev->new_layout = 0;
603 	mddev->new_chunk_sectors = mddev->chunk_sectors;
604 	mddev->raid_disks--;
605 	mddev->delta_disks = -1;
606 	/* make sure it will be not marked as dirty */
607 	mddev->recovery_cp = MaxSector;
608 
609 	create_strip_zones(mddev, &priv_conf);
610 	return priv_conf;
611 }
612 
613 static void *raid0_takeover_raid10(mddev_t *mddev)
614 {
615 	raid0_conf_t *priv_conf;
616 
617 	/* Check layout:
618 	 *  - far_copies must be 1
619 	 *  - near_copies must be 2
620 	 *  - disks number must be even
621 	 *  - all mirrors must be already degraded
622 	 */
623 	if (mddev->layout != ((1 << 8) + 2)) {
624 		printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
625 		       mdname(mddev),
626 		       mddev->layout);
627 		return ERR_PTR(-EINVAL);
628 	}
629 	if (mddev->raid_disks & 1) {
630 		printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
631 		       mdname(mddev));
632 		return ERR_PTR(-EINVAL);
633 	}
634 	if (mddev->degraded != (mddev->raid_disks>>1)) {
635 		printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
636 		       mdname(mddev));
637 		return ERR_PTR(-EINVAL);
638 	}
639 
640 	/* Set new parameters */
641 	mddev->new_level = 0;
642 	mddev->new_layout = 0;
643 	mddev->new_chunk_sectors = mddev->chunk_sectors;
644 	mddev->delta_disks = - mddev->raid_disks / 2;
645 	mddev->raid_disks += mddev->delta_disks;
646 	mddev->degraded = 0;
647 	/* make sure it will be not marked as dirty */
648 	mddev->recovery_cp = MaxSector;
649 
650 	create_strip_zones(mddev, &priv_conf);
651 	return priv_conf;
652 }
653 
654 static void *raid0_takeover_raid1(mddev_t *mddev)
655 {
656 	raid0_conf_t *priv_conf;
657 
658 	/* Check layout:
659 	 *  - (N - 1) mirror drives must be already faulty
660 	 */
661 	if ((mddev->raid_disks - 1) != mddev->degraded) {
662 		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
663 		       mdname(mddev));
664 		return ERR_PTR(-EINVAL);
665 	}
666 
667 	/* Set new parameters */
668 	mddev->new_level = 0;
669 	mddev->new_layout = 0;
670 	mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
671 	mddev->delta_disks = 1 - mddev->raid_disks;
672 	mddev->raid_disks = 1;
673 	/* make sure it will be not marked as dirty */
674 	mddev->recovery_cp = MaxSector;
675 
676 	create_strip_zones(mddev, &priv_conf);
677 	return priv_conf;
678 }
679 
680 static void *raid0_takeover(mddev_t *mddev)
681 {
682 	/* raid0 can take over:
683 	 *  raid4 - if all data disks are active.
684 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
685 	 *  raid10 - assuming we have all necessary active disks
686 	 *  raid1 - with (N -1) mirror drives faulty
687 	 */
688 	if (mddev->level == 4)
689 		return raid0_takeover_raid45(mddev);
690 
691 	if (mddev->level == 5) {
692 		if (mddev->layout == ALGORITHM_PARITY_N)
693 			return raid0_takeover_raid45(mddev);
694 
695 		printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
696 		       mdname(mddev), ALGORITHM_PARITY_N);
697 	}
698 
699 	if (mddev->level == 10)
700 		return raid0_takeover_raid10(mddev);
701 
702 	if (mddev->level == 1)
703 		return raid0_takeover_raid1(mddev);
704 
705 	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
706 		mddev->level);
707 
708 	return ERR_PTR(-EINVAL);
709 }
710 
711 static void raid0_quiesce(mddev_t *mddev, int state)
712 {
713 }
714 
715 static struct mdk_personality raid0_personality=
716 {
717 	.name		= "raid0",
718 	.level		= 0,
719 	.owner		= THIS_MODULE,
720 	.make_request	= raid0_make_request,
721 	.run		= raid0_run,
722 	.stop		= raid0_stop,
723 	.status		= raid0_status,
724 	.size		= raid0_size,
725 	.takeover	= raid0_takeover,
726 	.quiesce	= raid0_quiesce,
727 };
728 
729 static int __init raid0_init (void)
730 {
731 	return register_md_personality (&raid0_personality);
732 }
733 
734 static void raid0_exit (void)
735 {
736 	unregister_md_personality (&raid0_personality);
737 }
738 
739 module_init(raid0_init);
740 module_exit(raid0_exit);
741 MODULE_LICENSE("GPL");
742 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
743 MODULE_ALIAS("md-personality-2"); /* RAID0 */
744 MODULE_ALIAS("md-raid0");
745 MODULE_ALIAS("md-level-0");
746