1 /* 2 raid0.c : Multiple Devices driver for Linux 3 Copyright (C) 1994-96 Marc ZYNGIER 4 <zyngier@ufr-info-p7.ibp.fr> or 5 <maz@gloups.fdn.fr> 6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat 7 8 9 RAID-0 management functions. 10 11 This program is free software; you can redistribute it and/or modify 12 it under the terms of the GNU General Public License as published by 13 the Free Software Foundation; either version 2, or (at your option) 14 any later version. 15 16 You should have received a copy of the GNU General Public License 17 (for example /usr/src/linux/COPYING); if not, write to the Free 18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #include <linux/raid/raid0.h> 22 23 static void raid0_unplug(struct request_queue *q) 24 { 25 mddev_t *mddev = q->queuedata; 26 raid0_conf_t *conf = mddev_to_conf(mddev); 27 mdk_rdev_t **devlist = conf->strip_zone[0].dev; 28 int i; 29 30 for (i=0; i<mddev->raid_disks; i++) { 31 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); 32 33 blk_unplug(r_queue); 34 } 35 } 36 37 static int raid0_congested(void *data, int bits) 38 { 39 mddev_t *mddev = data; 40 raid0_conf_t *conf = mddev_to_conf(mddev); 41 mdk_rdev_t **devlist = conf->strip_zone[0].dev; 42 int i, ret = 0; 43 44 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 45 struct request_queue *q = bdev_get_queue(devlist[i]->bdev); 46 47 ret |= bdi_congested(&q->backing_dev_info, bits); 48 } 49 return ret; 50 } 51 52 53 static int create_strip_zones (mddev_t *mddev) 54 { 55 int i, c, j; 56 sector_t current_offset, curr_zone_offset; 57 sector_t min_spacing; 58 raid0_conf_t *conf = mddev_to_conf(mddev); 59 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; 60 struct list_head *tmp1, *tmp2; 61 struct strip_zone *zone; 62 int cnt; 63 char b[BDEVNAME_SIZE]; 64 65 /* 66 * The number of 'same size groups' 67 */ 68 conf->nr_strip_zones = 0; 69 70 rdev_for_each(rdev1, tmp1, mddev) { 71 printk("raid0: looking at %s\n", 72 bdevname(rdev1->bdev,b)); 73 c = 0; 74 rdev_for_each(rdev2, tmp2, mddev) { 75 printk("raid0: comparing %s(%llu)", 76 bdevname(rdev1->bdev,b), 77 (unsigned long long)rdev1->size); 78 printk(" with %s(%llu)\n", 79 bdevname(rdev2->bdev,b), 80 (unsigned long long)rdev2->size); 81 if (rdev2 == rdev1) { 82 printk("raid0: END\n"); 83 break; 84 } 85 if (rdev2->size == rdev1->size) 86 { 87 /* 88 * Not unique, don't count it as a new 89 * group 90 */ 91 printk("raid0: EQUAL\n"); 92 c = 1; 93 break; 94 } 95 printk("raid0: NOT EQUAL\n"); 96 } 97 if (!c) { 98 printk("raid0: ==> UNIQUE\n"); 99 conf->nr_strip_zones++; 100 printk("raid0: %d zones\n", conf->nr_strip_zones); 101 } 102 } 103 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); 104 105 conf->strip_zone = kzalloc(sizeof(struct strip_zone)* 106 conf->nr_strip_zones, GFP_KERNEL); 107 if (!conf->strip_zone) 108 return 1; 109 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* 110 conf->nr_strip_zones*mddev->raid_disks, 111 GFP_KERNEL); 112 if (!conf->devlist) 113 return 1; 114 115 /* The first zone must contain all devices, so here we check that 116 * there is a proper alignment of slots to devices and find them all 117 */ 118 zone = &conf->strip_zone[0]; 119 cnt = 0; 120 smallest = NULL; 121 zone->dev = conf->devlist; 122 rdev_for_each(rdev1, tmp1, mddev) { 123 int j = rdev1->raid_disk; 124 125 if (j < 0 || j >= mddev->raid_disks) { 126 printk("raid0: bad disk number %d - aborting!\n", j); 127 goto abort; 128 } 129 if (zone->dev[j]) { 130 printk("raid0: multiple devices for %d - aborting!\n", 131 j); 132 goto abort; 133 } 134 zone->dev[j] = rdev1; 135 136 blk_queue_stack_limits(mddev->queue, 137 rdev1->bdev->bd_disk->queue); 138 /* as we don't honour merge_bvec_fn, we must never risk 139 * violating it, so limit ->max_sector to one PAGE, as 140 * a one page request is never in violation. 141 */ 142 143 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && 144 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 145 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 146 147 if (!smallest || (rdev1->size <smallest->size)) 148 smallest = rdev1; 149 cnt++; 150 } 151 if (cnt != mddev->raid_disks) { 152 printk("raid0: too few disks (%d of %d) - aborting!\n", 153 cnt, mddev->raid_disks); 154 goto abort; 155 } 156 zone->nb_dev = cnt; 157 zone->size = smallest->size * cnt; 158 zone->zone_offset = 0; 159 160 current_offset = smallest->size; 161 curr_zone_offset = zone->size; 162 163 /* now do the other zones */ 164 for (i = 1; i < conf->nr_strip_zones; i++) 165 { 166 zone = conf->strip_zone + i; 167 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; 168 169 printk("raid0: zone %d\n", i); 170 zone->dev_offset = current_offset; 171 smallest = NULL; 172 c = 0; 173 174 for (j=0; j<cnt; j++) { 175 char b[BDEVNAME_SIZE]; 176 rdev = conf->strip_zone[0].dev[j]; 177 printk("raid0: checking %s ...", bdevname(rdev->bdev,b)); 178 if (rdev->size > current_offset) 179 { 180 printk(" contained as device %d\n", c); 181 zone->dev[c] = rdev; 182 c++; 183 if (!smallest || (rdev->size <smallest->size)) { 184 smallest = rdev; 185 printk(" (%llu) is smallest!.\n", 186 (unsigned long long)rdev->size); 187 } 188 } else 189 printk(" nope.\n"); 190 } 191 192 zone->nb_dev = c; 193 zone->size = (smallest->size - current_offset) * c; 194 printk("raid0: zone->nb_dev: %d, size: %llu\n", 195 zone->nb_dev, (unsigned long long)zone->size); 196 197 zone->zone_offset = curr_zone_offset; 198 curr_zone_offset += zone->size; 199 200 current_offset = smallest->size; 201 printk("raid0: current zone offset: %llu\n", 202 (unsigned long long)current_offset); 203 } 204 205 /* Now find appropriate hash spacing. 206 * We want a number which causes most hash entries to cover 207 * at most two strips, but the hash table must be at most 208 * 1 PAGE. We choose the smallest strip, or contiguous collection 209 * of strips, that has big enough size. We never consider the last 210 * strip though as it's size has no bearing on the efficacy of the hash 211 * table. 212 */ 213 conf->hash_spacing = curr_zone_offset; 214 min_spacing = curr_zone_offset; 215 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); 216 for (i=0; i < conf->nr_strip_zones-1; i++) { 217 sector_t sz = 0; 218 for (j=i; j<conf->nr_strip_zones-1 && 219 sz < min_spacing ; j++) 220 sz += conf->strip_zone[j].size; 221 if (sz >= min_spacing && sz < conf->hash_spacing) 222 conf->hash_spacing = sz; 223 } 224 225 mddev->queue->unplug_fn = raid0_unplug; 226 227 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 228 mddev->queue->backing_dev_info.congested_data = mddev; 229 230 printk("raid0: done.\n"); 231 return 0; 232 abort: 233 return 1; 234 } 235 236 /** 237 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged 238 * @q: request queue 239 * @bvm: properties of new bio 240 * @biovec: the request that could be merged to it. 241 * 242 * Return amount of bytes we can accept at this offset 243 */ 244 static int raid0_mergeable_bvec(struct request_queue *q, 245 struct bvec_merge_data *bvm, 246 struct bio_vec *biovec) 247 { 248 mddev_t *mddev = q->queuedata; 249 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 250 int max; 251 unsigned int chunk_sectors = mddev->chunk_size >> 9; 252 unsigned int bio_sectors = bvm->bi_size >> 9; 253 254 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 255 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 256 if (max <= biovec->bv_len && bio_sectors == 0) 257 return biovec->bv_len; 258 else 259 return max; 260 } 261 262 static int raid0_run (mddev_t *mddev) 263 { 264 unsigned cur=0, i=0, nb_zone; 265 s64 size; 266 raid0_conf_t *conf; 267 mdk_rdev_t *rdev; 268 struct list_head *tmp; 269 270 if (mddev->chunk_size == 0) { 271 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); 272 return -EINVAL; 273 } 274 printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n", 275 mdname(mddev), 276 mddev->chunk_size >> 9, 277 (mddev->chunk_size>>1)-1); 278 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); 279 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1); 280 mddev->queue->queue_lock = &mddev->queue->__queue_lock; 281 282 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); 283 if (!conf) 284 goto out; 285 mddev->private = (void *)conf; 286 287 conf->strip_zone = NULL; 288 conf->devlist = NULL; 289 if (create_strip_zones (mddev)) 290 goto out_free_conf; 291 292 /* calculate array device size */ 293 mddev->array_sectors = 0; 294 rdev_for_each(rdev, tmp, mddev) 295 mddev->array_sectors += rdev->size * 2; 296 297 printk("raid0 : md_size is %llu blocks.\n", 298 (unsigned long long)mddev->array_sectors / 2); 299 printk("raid0 : conf->hash_spacing is %llu blocks.\n", 300 (unsigned long long)conf->hash_spacing); 301 { 302 sector_t s = mddev->array_sectors / 2; 303 sector_t space = conf->hash_spacing; 304 int round; 305 conf->preshift = 0; 306 if (sizeof(sector_t) > sizeof(u32)) { 307 /*shift down space and s so that sector_div will work */ 308 while (space > (sector_t) (~(u32)0)) { 309 s >>= 1; 310 space >>= 1; 311 s += 1; /* force round-up */ 312 conf->preshift++; 313 } 314 } 315 round = sector_div(s, (u32)space) ? 1 : 0; 316 nb_zone = s + round; 317 } 318 printk("raid0 : nb_zone is %d.\n", nb_zone); 319 320 printk("raid0 : Allocating %Zd bytes for hash.\n", 321 nb_zone*sizeof(struct strip_zone*)); 322 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); 323 if (!conf->hash_table) 324 goto out_free_conf; 325 size = conf->strip_zone[cur].size; 326 327 conf->hash_table[0] = conf->strip_zone + cur; 328 for (i=1; i< nb_zone; i++) { 329 while (size <= conf->hash_spacing) { 330 cur++; 331 size += conf->strip_zone[cur].size; 332 } 333 size -= conf->hash_spacing; 334 conf->hash_table[i] = conf->strip_zone + cur; 335 } 336 if (conf->preshift) { 337 conf->hash_spacing >>= conf->preshift; 338 /* round hash_spacing up so when we divide by it, we 339 * err on the side of too-low, which is safest 340 */ 341 conf->hash_spacing++; 342 } 343 344 /* calculate the max read-ahead size. 345 * For read-ahead of large files to be effective, we need to 346 * readahead at least twice a whole stripe. i.e. number of devices 347 * multiplied by chunk size times 2. 348 * If an individual device has an ra_pages greater than the 349 * chunk size, then we will not drive that device as hard as it 350 * wants. We consider this a configuration error: a larger 351 * chunksize should be used in that case. 352 */ 353 { 354 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; 355 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 356 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 357 } 358 359 360 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 361 return 0; 362 363 out_free_conf: 364 kfree(conf->strip_zone); 365 kfree(conf->devlist); 366 kfree(conf); 367 mddev->private = NULL; 368 out: 369 return -ENOMEM; 370 } 371 372 static int raid0_stop (mddev_t *mddev) 373 { 374 raid0_conf_t *conf = mddev_to_conf(mddev); 375 376 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 377 kfree(conf->hash_table); 378 conf->hash_table = NULL; 379 kfree(conf->strip_zone); 380 conf->strip_zone = NULL; 381 kfree(conf); 382 mddev->private = NULL; 383 384 return 0; 385 } 386 387 static int raid0_make_request (struct request_queue *q, struct bio *bio) 388 { 389 mddev_t *mddev = q->queuedata; 390 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; 391 raid0_conf_t *conf = mddev_to_conf(mddev); 392 struct strip_zone *zone; 393 mdk_rdev_t *tmp_dev; 394 sector_t chunk; 395 sector_t block, rsect; 396 const int rw = bio_data_dir(bio); 397 int cpu; 398 399 if (unlikely(bio_barrier(bio))) { 400 bio_endio(bio, -EOPNOTSUPP); 401 return 0; 402 } 403 404 cpu = part_stat_lock(); 405 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 406 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 407 bio_sectors(bio)); 408 part_stat_unlock(); 409 410 chunk_size = mddev->chunk_size >> 10; 411 chunk_sects = mddev->chunk_size >> 9; 412 chunksize_bits = ffz(~chunk_size); 413 block = bio->bi_sector >> 1; 414 415 416 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { 417 struct bio_pair *bp; 418 /* Sanity check -- queue functions should prevent this happening */ 419 if (bio->bi_vcnt != 1 || 420 bio->bi_idx != 0) 421 goto bad_map; 422 /* This is a one page bio that upper layers 423 * refuse to split for us, so we need to split it. 424 */ 425 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1))); 426 if (raid0_make_request(q, &bp->bio1)) 427 generic_make_request(&bp->bio1); 428 if (raid0_make_request(q, &bp->bio2)) 429 generic_make_request(&bp->bio2); 430 431 bio_pair_release(bp); 432 return 0; 433 } 434 435 436 { 437 sector_t x = block >> conf->preshift; 438 sector_div(x, (u32)conf->hash_spacing); 439 zone = conf->hash_table[x]; 440 } 441 442 while (block >= (zone->zone_offset + zone->size)) 443 zone++; 444 445 sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1); 446 447 448 { 449 sector_t x = (block - zone->zone_offset) >> chunksize_bits; 450 451 sector_div(x, zone->nb_dev); 452 chunk = x; 453 454 x = block >> chunksize_bits; 455 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; 456 } 457 rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1) 458 + sect_in_chunk; 459 460 bio->bi_bdev = tmp_dev->bdev; 461 bio->bi_sector = rsect + tmp_dev->data_offset; 462 463 /* 464 * Let the main block layer submit the IO and resolve recursion: 465 */ 466 return 1; 467 468 bad_map: 469 printk("raid0_make_request bug: can't convert block across chunks" 470 " or bigger than %dk %llu %d\n", chunk_size, 471 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 472 473 bio_io_error(bio); 474 return 0; 475 } 476 477 static void raid0_status (struct seq_file *seq, mddev_t *mddev) 478 { 479 #undef MD_DEBUG 480 #ifdef MD_DEBUG 481 int j, k, h; 482 char b[BDEVNAME_SIZE]; 483 raid0_conf_t *conf = mddev_to_conf(mddev); 484 485 h = 0; 486 for (j = 0; j < conf->nr_strip_zones; j++) { 487 seq_printf(seq, " z%d", j); 488 if (conf->hash_table[h] == conf->strip_zone+j) 489 seq_printf(seq, "(h%d)", h++); 490 seq_printf(seq, "=["); 491 for (k = 0; k < conf->strip_zone[j].nb_dev; k++) 492 seq_printf(seq, "%s/", bdevname( 493 conf->strip_zone[j].dev[k]->bdev,b)); 494 495 seq_printf(seq, "] zo=%d do=%d s=%d\n", 496 conf->strip_zone[j].zone_offset, 497 conf->strip_zone[j].dev_offset, 498 conf->strip_zone[j].size); 499 } 500 #endif 501 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); 502 return; 503 } 504 505 static struct mdk_personality raid0_personality= 506 { 507 .name = "raid0", 508 .level = 0, 509 .owner = THIS_MODULE, 510 .make_request = raid0_make_request, 511 .run = raid0_run, 512 .stop = raid0_stop, 513 .status = raid0_status, 514 }; 515 516 static int __init raid0_init (void) 517 { 518 return register_md_personality (&raid0_personality); 519 } 520 521 static void raid0_exit (void) 522 { 523 unregister_md_personality (&raid0_personality); 524 } 525 526 module_init(raid0_init); 527 module_exit(raid0_exit); 528 MODULE_LICENSE("GPL"); 529 MODULE_ALIAS("md-personality-2"); /* RAID0 */ 530 MODULE_ALIAS("md-raid0"); 531 MODULE_ALIAS("md-level-0"); 532