Lines Matching +full:odd +full:- +full:numbered

1 // SPDX-License-Identifier: GPL-2.0-or-later
4 Copyright (C) 1994-96 Marc ZYNGIER
5 <zyngier@ufr-info-p7.ibp.fr> or
9 RAID-0 management functions.
40 struct r0conf *conf = mddev->private; in dump_zones()
41 int raid_disks = conf->strip_zone[0].nb_dev; in dump_zones()
42 pr_debug("md: RAID0 configuration for %s - %d zone%s\n", in dump_zones()
44 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s"); in dump_zones()
45 for (j = 0; j < conf->nr_strip_zones; j++) { in dump_zones()
49 for (k = 0; k < conf->strip_zone[j].nb_dev; k++) in dump_zones()
50 len += scnprintf(line+len, 200-len, "%s%pg", k?"/":"", in dump_zones()
51 conf->devlist[j * raid_disks + k]->bdev); in dump_zones()
54 zone_size = conf->strip_zone[j].zone_end - zone_start; in dump_zones()
55 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n", in dump_zones()
57 (unsigned long long)conf->strip_zone[j].dev_start>>1, in dump_zones()
59 zone_start = conf->strip_zone[j].zone_end; in dump_zones()
73 *private_conf = ERR_PTR(-ENOMEM); in create_strip_zones()
75 return -ENOMEM; in create_strip_zones()
79 rdev1->bdev); in create_strip_zones()
83 sectors = rdev1->sectors; in create_strip_zones()
84 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
85 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
88 rdev1->bdev->bd_disk->queue)); in create_strip_zones()
94 rdev1->bdev, in create_strip_zones()
95 (unsigned long long)rdev1->sectors, in create_strip_zones()
96 rdev2->bdev, in create_strip_zones()
97 (unsigned long long)rdev2->sectors); in create_strip_zones()
103 if (rdev2->sectors == rdev1->sectors) { in create_strip_zones()
119 conf->nr_strip_zones++; in create_strip_zones()
121 mdname(mddev), conf->nr_strip_zones); in create_strip_zones()
125 mdname(mddev), conf->nr_strip_zones); in create_strip_zones()
131 if ((mddev->chunk_sectors << 9) % blksize) { in create_strip_zones()
134 mddev->chunk_sectors << 9, blksize); in create_strip_zones()
135 err = -EINVAL; in create_strip_zones()
139 err = -ENOMEM; in create_strip_zones()
140 conf->strip_zone = kcalloc(conf->nr_strip_zones, in create_strip_zones()
143 if (!conf->strip_zone) in create_strip_zones()
145 conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *), in create_strip_zones()
146 conf->nr_strip_zones, in create_strip_zones()
147 mddev->raid_disks), in create_strip_zones()
149 if (!conf->devlist) in create_strip_zones()
155 zone = &conf->strip_zone[0]; in create_strip_zones()
158 dev = conf->devlist; in create_strip_zones()
159 err = -EINVAL; in create_strip_zones()
161 int j = rdev1->raid_disk; in create_strip_zones()
163 if (mddev->level == 10) { in create_strip_zones()
164 /* taking over a raid10-n2 array */ in create_strip_zones()
166 rdev1->new_raid_disk = j; in create_strip_zones()
169 if (mddev->level == 1) { in create_strip_zones()
170 /* taiking over a raid1 array- in create_strip_zones()
174 rdev1->new_raid_disk = j; in create_strip_zones()
182 if (j >= mddev->raid_disks) { in create_strip_zones()
183 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n", in create_strip_zones()
188 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n", in create_strip_zones()
194 if (!smallest || (rdev1->sectors < smallest->sectors)) in create_strip_zones()
198 if (cnt != mddev->raid_disks) { in create_strip_zones()
199 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n", in create_strip_zones()
200 mdname(mddev), cnt, mddev->raid_disks); in create_strip_zones()
203 zone->nb_dev = cnt; in create_strip_zones()
204 zone->zone_end = smallest->sectors * cnt; in create_strip_zones()
206 curr_zone_end = zone->zone_end; in create_strip_zones()
209 for (i = 1; i < conf->nr_strip_zones; i++) in create_strip_zones()
213 zone = conf->strip_zone + i; in create_strip_zones()
214 dev = conf->devlist + i * mddev->raid_disks; in create_strip_zones()
217 zone->dev_start = smallest->sectors; in create_strip_zones()
222 rdev = conf->devlist[j]; in create_strip_zones()
223 if (rdev->sectors <= zone->dev_start) { in create_strip_zones()
226 rdev->bdev); in create_strip_zones()
232 rdev->bdev, c); in create_strip_zones()
235 if (!smallest || rdev->sectors < smallest->sectors) { in create_strip_zones()
239 (unsigned long long)rdev->sectors); in create_strip_zones()
243 zone->nb_dev = c; in create_strip_zones()
244 sectors = (smallest->sectors - zone->dev_start) * c; in create_strip_zones()
245 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", in create_strip_zones()
247 zone->nb_dev, (unsigned long long)sectors); in create_strip_zones()
250 zone->zone_end = curr_zone_end; in create_strip_zones()
254 (unsigned long long)smallest->sectors); in create_strip_zones()
257 if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) { in create_strip_zones()
258 conf->layout = RAID0_ORIG_LAYOUT; in create_strip_zones()
259 } else if (mddev->layout == RAID0_ORIG_LAYOUT || in create_strip_zones()
260 mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { in create_strip_zones()
261 conf->layout = mddev->layout; in create_strip_zones()
264 conf->layout = default_layout; in create_strip_zones()
266 pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n", in create_strip_zones()
269 err = -EOPNOTSUPP; in create_strip_zones()
273 if (conf->layout == RAID0_ORIG_LAYOUT) { in create_strip_zones()
274 for (i = 1; i < conf->nr_strip_zones; i++) { in create_strip_zones()
275 sector_t first_sector = conf->strip_zone[i-1].zone_end; in create_strip_zones()
277 sector_div(first_sector, mddev->chunk_sectors); in create_strip_zones()
278 zone = conf->strip_zone + i; in create_strip_zones()
280 zone->disk_shift = sector_div(first_sector, in create_strip_zones()
281 zone->nb_dev); in create_strip_zones()
290 kfree(conf->strip_zone); in create_strip_zones()
291 kfree(conf->devlist); in create_strip_zones()
304 struct strip_zone *z = conf->strip_zone; in find_zone()
307 for (i = 0; i < conf->nr_strip_zones; i++) in find_zone()
310 *sectorp = sector - z[i-1].zone_end; in find_zone()
325 struct r0conf *conf = mddev->private; in map_sector()
326 int raid_disks = conf->strip_zone[0].nb_dev; in map_sector()
327 unsigned int chunk_sects = mddev->chunk_sectors; in map_sector()
332 sect_in_chunk = sector & (chunk_sects - 1); in map_sector()
337 sector_div(chunk, zone->nb_dev << chunksect_bits); in map_sector()
341 sector_div(chunk, chunk_sects * zone->nb_dev); in map_sector()
349 return conf->devlist[(zone - conf->strip_zone)*raid_disks in map_sector()
350 + sector_div(sector, zone->nb_dev)]; in map_sector()
362 array_sectors += (rdev->sectors & in raid0_size()
363 ~(sector_t)(mddev->chunk_sectors-1)); in raid0_size()
372 kfree(conf->strip_zone); in raid0_free()
373 kfree(conf->devlist); in raid0_free()
383 lim.max_hw_sectors = mddev->chunk_sectors; in raid0_set_limits()
384 lim.max_write_zeroes_sectors = mddev->chunk_sectors; in raid0_set_limits()
385 lim.io_min = mddev->chunk_sectors << 9; in raid0_set_limits()
386 lim.io_opt = lim.io_min * mddev->raid_disks; in raid0_set_limits()
391 return queue_limits_set(mddev->gendisk->queue, &lim); in raid0_set_limits()
399 if (mddev->chunk_sectors == 0) { in raid0_run()
401 return -EINVAL; in raid0_run()
404 return -EINVAL; in raid0_run()
407 if (mddev->private == NULL) { in raid0_run()
411 mddev->private = conf; in raid0_run()
413 conf = mddev->private; in raid0_run()
425 (unsigned long long)mddev->array_sectors); in raid0_run()
434 * For example, if we have 4 disks, they are numbered 0,1,2,3. If we
443 return ((disk_index + num_disks - disk_shift) % num_disks); in map_disk_shift()
448 struct r0conf *conf = mddev->private; in raid0_handle_discard()
450 sector_t start = bio->bi_iter.bi_sector; in raid0_handle_discard()
464 if (bio_end_sector(bio) > zone->zone_end) { in raid0_handle_discard()
466 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, in raid0_handle_discard()
467 &mddev->bio_set); in raid0_handle_discard()
470 bio->bi_status = errno_to_blk_status(PTR_ERR(split)); in raid0_handle_discard()
477 end = zone->zone_end; in raid0_handle_discard()
482 if (zone != conf->strip_zone) in raid0_handle_discard()
483 end = end - zone[-1].zone_end; in raid0_handle_discard()
486 stripe_size = zone->nb_dev * mddev->chunk_sectors; in raid0_handle_discard()
494 if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) { in raid0_handle_discard()
495 sector_div(orig_start, mddev->chunk_sectors); in raid0_handle_discard()
496 start_disk_index = sector_div(orig_start, zone->nb_dev); in raid0_handle_discard()
498 zone->nb_dev, in raid0_handle_discard()
499 zone->disk_shift); in raid0_handle_discard()
500 sector_div(orig_end, mddev->chunk_sectors); in raid0_handle_discard()
501 end_disk_index = sector_div(orig_end, zone->nb_dev); in raid0_handle_discard()
503 zone->nb_dev, zone->disk_shift); in raid0_handle_discard()
505 start_disk_index = (int)(start - first_stripe_index * stripe_size) / in raid0_handle_discard()
506 mddev->chunk_sectors; in raid0_handle_discard()
507 end_disk_index = (int)(end - last_stripe_index * stripe_size) / in raid0_handle_discard()
508 mddev->chunk_sectors; in raid0_handle_discard()
510 start_disk_offset = ((int)(start - first_stripe_index * stripe_size) % in raid0_handle_discard()
511 mddev->chunk_sectors) + in raid0_handle_discard()
512 first_stripe_index * mddev->chunk_sectors; in raid0_handle_discard()
513 end_disk_offset = ((int)(end - last_stripe_index * stripe_size) % in raid0_handle_discard()
514 mddev->chunk_sectors) + in raid0_handle_discard()
515 last_stripe_index * mddev->chunk_sectors; in raid0_handle_discard()
517 for (disk = 0; disk < zone->nb_dev; disk++) { in raid0_handle_discard()
522 compare_disk = map_disk_shift(disk, zone->nb_dev, in raid0_handle_discard()
523 zone->disk_shift); in raid0_handle_discard()
527 mddev->chunk_sectors; in raid0_handle_discard()
529 dev_start = first_stripe_index * mddev->chunk_sectors; in raid0_handle_discard()
534 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; in raid0_handle_discard()
536 dev_end = last_stripe_index * mddev->chunk_sectors; in raid0_handle_discard()
543 rdev = conf->devlist[(zone - conf->strip_zone) * in raid0_handle_discard()
544 conf->strip_zone[0].nb_dev + disk]; in raid0_handle_discard()
546 dev_start + zone->dev_start + rdev->data_offset, in raid0_handle_discard()
547 dev_end - dev_start); in raid0_handle_discard()
554 struct r0conf *conf = mddev->private; in raid0_map_submit_bio()
557 sector_t bio_sector = bio->bi_iter.bi_sector; in raid0_map_submit_bio()
562 zone = find_zone(mddev->private, &sector); in raid0_map_submit_bio()
563 switch (conf->layout) { in raid0_map_submit_bio()
582 bio_set_dev(bio, tmp_dev->bdev); in raid0_map_submit_bio()
583 bio->bi_iter.bi_sector = sector + zone->dev_start + in raid0_map_submit_bio()
584 tmp_dev->data_offset; in raid0_map_submit_bio()
596 if (unlikely(bio->bi_opf & REQ_PREFLUSH) in raid0_make_request()
605 sector = bio->bi_iter.bi_sector; in raid0_make_request()
606 chunk_sects = mddev->chunk_sectors; in raid0_make_request()
608 sectors = chunk_sects - in raid0_make_request()
610 ? (sector & (chunk_sects-1)) in raid0_make_request()
615 &mddev->bio_set); in raid0_make_request()
618 bio->bi_status = errno_to_blk_status(PTR_ERR(split)); in raid0_make_request()
633 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); in raid0_status()
639 if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) { in raid0_error()
643 md_name, rdev->bdev); in raid0_error()
652 if (mddev->degraded != 1) { in raid0_takeover_raid45()
655 mddev->degraded); in raid0_takeover_raid45()
656 return ERR_PTR(-EINVAL); in raid0_takeover_raid45()
661 if (rdev->raid_disk == mddev->raid_disks-1) { in raid0_takeover_raid45()
664 return ERR_PTR(-EINVAL); in raid0_takeover_raid45()
666 rdev->sectors = mddev->dev_sectors; in raid0_takeover_raid45()
670 mddev->new_level = 0; in raid0_takeover_raid45()
671 mddev->new_layout = 0; in raid0_takeover_raid45()
672 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid0_takeover_raid45()
673 mddev->raid_disks--; in raid0_takeover_raid45()
674 mddev->delta_disks = -1; in raid0_takeover_raid45()
676 mddev->recovery_cp = MaxSector; in raid0_takeover_raid45()
689 * - far_copies must be 1 in raid0_takeover_raid10()
690 * - near_copies must be 2 in raid0_takeover_raid10()
691 * - disks number must be even in raid0_takeover_raid10()
692 * - all mirrors must be already degraded in raid0_takeover_raid10()
694 if (mddev->layout != ((1 << 8) + 2)) { in raid0_takeover_raid10()
697 mddev->layout); in raid0_takeover_raid10()
698 return ERR_PTR(-EINVAL); in raid0_takeover_raid10()
700 if (mddev->raid_disks & 1) { in raid0_takeover_raid10()
701 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n", in raid0_takeover_raid10()
703 return ERR_PTR(-EINVAL); in raid0_takeover_raid10()
705 if (mddev->degraded != (mddev->raid_disks>>1)) { in raid0_takeover_raid10()
708 return ERR_PTR(-EINVAL); in raid0_takeover_raid10()
712 mddev->new_level = 0; in raid0_takeover_raid10()
713 mddev->new_layout = 0; in raid0_takeover_raid10()
714 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid0_takeover_raid10()
715 mddev->delta_disks = - mddev->raid_disks / 2; in raid0_takeover_raid10()
716 mddev->raid_disks += mddev->delta_disks; in raid0_takeover_raid10()
717 mddev->degraded = 0; in raid0_takeover_raid10()
719 mddev->recovery_cp = MaxSector; in raid0_takeover_raid10()
732 * - (N - 1) mirror drives must be already faulty in raid0_takeover_raid1()
734 if ((mddev->raid_disks - 1) != mddev->degraded) { in raid0_takeover_raid1()
735 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", in raid0_takeover_raid1()
737 return ERR_PTR(-EINVAL); in raid0_takeover_raid1()
747 while (chunksect && (mddev->array_sectors & (chunksect - 1))) in raid0_takeover_raid1()
752 return ERR_PTR(-EINVAL); in raid0_takeover_raid1()
755 mddev->new_level = 0; in raid0_takeover_raid1()
756 mddev->new_layout = 0; in raid0_takeover_raid1()
757 mddev->new_chunk_sectors = chunksect; in raid0_takeover_raid1()
758 mddev->chunk_sectors = chunksect; in raid0_takeover_raid1()
759 mddev->delta_disks = 1 - mddev->raid_disks; in raid0_takeover_raid1()
760 mddev->raid_disks = 1; in raid0_takeover_raid1()
762 mddev->recovery_cp = MaxSector; in raid0_takeover_raid1()
772 * raid4 - if all data disks are active. in raid0_takeover()
773 * raid5 - providing it is Raid4 layout and one disk is faulty in raid0_takeover()
774 * raid10 - assuming we have all necessary active disks in raid0_takeover()
775 * raid1 - with (N -1) mirror drives faulty in raid0_takeover()
778 if (mddev->bitmap) { in raid0_takeover()
781 return ERR_PTR(-EBUSY); in raid0_takeover()
783 if (mddev->level == 4) in raid0_takeover()
786 if (mddev->level == 5) { in raid0_takeover()
787 if (mddev->layout == ALGORITHM_PARITY_N) in raid0_takeover()
794 if (mddev->level == 10) in raid0_takeover()
797 if (mddev->level == 1) in raid0_takeover()
801 mddev->level); in raid0_takeover()
803 return ERR_PTR(-EINVAL); in raid0_takeover()
839 MODULE_ALIAS("md-personality-2"); /* RAID0 */
840 MODULE_ALIAS("md-raid0");
841 MODULE_ALIAS("md-level-0");