xref: /linux/drivers/md/raid5.c (revision 4f1933620f57145212cdbb1ac6ce099eeeb21c5a)
1 /*
2  * raid5.c : Multiple Devices driver for Linux
3  *	   Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4  *	   Copyright (C) 1999, 2000 Ingo Molnar
5  *
6  * RAID-5 management functions.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * You should have received a copy of the GNU General Public License
14  * (for example /usr/src/linux/COPYING); if not, write to the Free
15  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16  */
17 
18 
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/raid/raid5.h>
23 #include <linux/highmem.h>
24 #include <linux/bitops.h>
25 #include <asm/atomic.h>
26 
27 #include <linux/raid/bitmap.h>
28 
29 /*
30  * Stripe cache
31  */
32 
33 #define NR_STRIPES		256
34 #define STRIPE_SIZE		PAGE_SIZE
35 #define STRIPE_SHIFT		(PAGE_SHIFT - 9)
36 #define STRIPE_SECTORS		(STRIPE_SIZE>>9)
37 #define	IO_THRESHOLD		1
38 #define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
39 #define HASH_MASK		(NR_HASH - 1)
40 
41 #define stripe_hash(conf, sect)	(&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
42 
43 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
44  * order without overlap.  There may be several bio's per stripe+device, and
45  * a bio could span several devices.
46  * When walking this list for a particular stripe+device, we must never proceed
47  * beyond a bio that extends past this device, as the next bio might no longer
48  * be valid.
49  * This macro is used to determine the 'next' bio in the list, given the sector
50  * of the current stripe+device
51  */
52 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
53 /*
54  * The following can be used to debug the driver
55  */
56 #define RAID5_DEBUG	0
57 #define RAID5_PARANOIA	1
58 #if RAID5_PARANOIA && defined(CONFIG_SMP)
59 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
60 #else
61 # define CHECK_DEVLOCK()
62 #endif
63 
64 #define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
65 #if RAID5_DEBUG
66 #define inline
67 #define __inline__
68 #endif
69 
70 static void print_raid5_conf (raid5_conf_t *conf);
71 
72 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
73 {
74 	if (atomic_dec_and_test(&sh->count)) {
75 		if (!list_empty(&sh->lru))
76 			BUG();
77 		if (atomic_read(&conf->active_stripes)==0)
78 			BUG();
79 		if (test_bit(STRIPE_HANDLE, &sh->state)) {
80 			if (test_bit(STRIPE_DELAYED, &sh->state))
81 				list_add_tail(&sh->lru, &conf->delayed_list);
82 			else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
83 				 conf->seq_write == sh->bm_seq)
84 				list_add_tail(&sh->lru, &conf->bitmap_list);
85 			else {
86 				clear_bit(STRIPE_BIT_DELAY, &sh->state);
87 				list_add_tail(&sh->lru, &conf->handle_list);
88 			}
89 			md_wakeup_thread(conf->mddev->thread);
90 		} else {
91 			if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
92 				atomic_dec(&conf->preread_active_stripes);
93 				if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
94 					md_wakeup_thread(conf->mddev->thread);
95 			}
96 			list_add_tail(&sh->lru, &conf->inactive_list);
97 			atomic_dec(&conf->active_stripes);
98 			if (!conf->inactive_blocked ||
99 			    atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
100 				wake_up(&conf->wait_for_stripe);
101 		}
102 	}
103 }
104 static void release_stripe(struct stripe_head *sh)
105 {
106 	raid5_conf_t *conf = sh->raid_conf;
107 	unsigned long flags;
108 
109 	spin_lock_irqsave(&conf->device_lock, flags);
110 	__release_stripe(conf, sh);
111 	spin_unlock_irqrestore(&conf->device_lock, flags);
112 }
113 
114 static inline void remove_hash(struct stripe_head *sh)
115 {
116 	PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
117 
118 	hlist_del_init(&sh->hash);
119 }
120 
121 static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
122 {
123 	struct hlist_head *hp = stripe_hash(conf, sh->sector);
124 
125 	PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
126 
127 	CHECK_DEVLOCK();
128 	hlist_add_head(&sh->hash, hp);
129 }
130 
131 
132 /* find an idle stripe, make sure it is unhashed, and return it. */
133 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
134 {
135 	struct stripe_head *sh = NULL;
136 	struct list_head *first;
137 
138 	CHECK_DEVLOCK();
139 	if (list_empty(&conf->inactive_list))
140 		goto out;
141 	first = conf->inactive_list.next;
142 	sh = list_entry(first, struct stripe_head, lru);
143 	list_del_init(first);
144 	remove_hash(sh);
145 	atomic_inc(&conf->active_stripes);
146 out:
147 	return sh;
148 }
149 
150 static void shrink_buffers(struct stripe_head *sh, int num)
151 {
152 	struct page *p;
153 	int i;
154 
155 	for (i=0; i<num ; i++) {
156 		p = sh->dev[i].page;
157 		if (!p)
158 			continue;
159 		sh->dev[i].page = NULL;
160 		put_page(p);
161 	}
162 }
163 
164 static int grow_buffers(struct stripe_head *sh, int num)
165 {
166 	int i;
167 
168 	for (i=0; i<num; i++) {
169 		struct page *page;
170 
171 		if (!(page = alloc_page(GFP_KERNEL))) {
172 			return 1;
173 		}
174 		sh->dev[i].page = page;
175 	}
176 	return 0;
177 }
178 
179 static void raid5_build_block (struct stripe_head *sh, int i);
180 
181 static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
182 {
183 	raid5_conf_t *conf = sh->raid_conf;
184 	int disks = conf->raid_disks, i;
185 
186 	if (atomic_read(&sh->count) != 0)
187 		BUG();
188 	if (test_bit(STRIPE_HANDLE, &sh->state))
189 		BUG();
190 
191 	CHECK_DEVLOCK();
192 	PRINTK("init_stripe called, stripe %llu\n",
193 		(unsigned long long)sh->sector);
194 
195 	remove_hash(sh);
196 
197 	sh->sector = sector;
198 	sh->pd_idx = pd_idx;
199 	sh->state = 0;
200 
201 	for (i=disks; i--; ) {
202 		struct r5dev *dev = &sh->dev[i];
203 
204 		if (dev->toread || dev->towrite || dev->written ||
205 		    test_bit(R5_LOCKED, &dev->flags)) {
206 			printk("sector=%llx i=%d %p %p %p %d\n",
207 			       (unsigned long long)sh->sector, i, dev->toread,
208 			       dev->towrite, dev->written,
209 			       test_bit(R5_LOCKED, &dev->flags));
210 			BUG();
211 		}
212 		dev->flags = 0;
213 		raid5_build_block(sh, i);
214 	}
215 	insert_hash(conf, sh);
216 }
217 
218 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
219 {
220 	struct stripe_head *sh;
221 	struct hlist_node *hn;
222 
223 	CHECK_DEVLOCK();
224 	PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
225 	hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
226 		if (sh->sector == sector)
227 			return sh;
228 	PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
229 	return NULL;
230 }
231 
232 static void unplug_slaves(mddev_t *mddev);
233 static void raid5_unplug_device(request_queue_t *q);
234 
235 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector,
236 					     int pd_idx, int noblock)
237 {
238 	struct stripe_head *sh;
239 
240 	PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
241 
242 	spin_lock_irq(&conf->device_lock);
243 
244 	do {
245 		wait_event_lock_irq(conf->wait_for_stripe,
246 				    conf->quiesce == 0,
247 				    conf->device_lock, /* nothing */);
248 		sh = __find_stripe(conf, sector);
249 		if (!sh) {
250 			if (!conf->inactive_blocked)
251 				sh = get_free_stripe(conf);
252 			if (noblock && sh == NULL)
253 				break;
254 			if (!sh) {
255 				conf->inactive_blocked = 1;
256 				wait_event_lock_irq(conf->wait_for_stripe,
257 						    !list_empty(&conf->inactive_list) &&
258 						    (atomic_read(&conf->active_stripes)
259 						     < (conf->max_nr_stripes *3/4)
260 						     || !conf->inactive_blocked),
261 						    conf->device_lock,
262 						    unplug_slaves(conf->mddev);
263 					);
264 				conf->inactive_blocked = 0;
265 			} else
266 				init_stripe(sh, sector, pd_idx);
267 		} else {
268 			if (atomic_read(&sh->count)) {
269 				if (!list_empty(&sh->lru))
270 					BUG();
271 			} else {
272 				if (!test_bit(STRIPE_HANDLE, &sh->state))
273 					atomic_inc(&conf->active_stripes);
274 				if (list_empty(&sh->lru))
275 					BUG();
276 				list_del_init(&sh->lru);
277 			}
278 		}
279 	} while (sh == NULL);
280 
281 	if (sh)
282 		atomic_inc(&sh->count);
283 
284 	spin_unlock_irq(&conf->device_lock);
285 	return sh;
286 }
287 
288 static int grow_one_stripe(raid5_conf_t *conf)
289 {
290 	struct stripe_head *sh;
291 	sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
292 	if (!sh)
293 		return 0;
294 	memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
295 	sh->raid_conf = conf;
296 	spin_lock_init(&sh->lock);
297 
298 	if (grow_buffers(sh, conf->raid_disks)) {
299 		shrink_buffers(sh, conf->raid_disks);
300 		kmem_cache_free(conf->slab_cache, sh);
301 		return 0;
302 	}
303 	/* we just created an active stripe so... */
304 	atomic_set(&sh->count, 1);
305 	atomic_inc(&conf->active_stripes);
306 	INIT_LIST_HEAD(&sh->lru);
307 	release_stripe(sh);
308 	return 1;
309 }
310 
311 static int grow_stripes(raid5_conf_t *conf, int num)
312 {
313 	kmem_cache_t *sc;
314 	int devs = conf->raid_disks;
315 
316 	sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev));
317 
318 	sc = kmem_cache_create(conf->cache_name,
319 			       sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
320 			       0, 0, NULL, NULL);
321 	if (!sc)
322 		return 1;
323 	conf->slab_cache = sc;
324 	while (num--) {
325 		if (!grow_one_stripe(conf))
326 			return 1;
327 	}
328 	return 0;
329 }
330 
331 static int drop_one_stripe(raid5_conf_t *conf)
332 {
333 	struct stripe_head *sh;
334 
335 	spin_lock_irq(&conf->device_lock);
336 	sh = get_free_stripe(conf);
337 	spin_unlock_irq(&conf->device_lock);
338 	if (!sh)
339 		return 0;
340 	if (atomic_read(&sh->count))
341 		BUG();
342 	shrink_buffers(sh, conf->raid_disks);
343 	kmem_cache_free(conf->slab_cache, sh);
344 	atomic_dec(&conf->active_stripes);
345 	return 1;
346 }
347 
348 static void shrink_stripes(raid5_conf_t *conf)
349 {
350 	while (drop_one_stripe(conf))
351 		;
352 
353 	if (conf->slab_cache)
354 		kmem_cache_destroy(conf->slab_cache);
355 	conf->slab_cache = NULL;
356 }
357 
358 static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
359 				   int error)
360 {
361  	struct stripe_head *sh = bi->bi_private;
362 	raid5_conf_t *conf = sh->raid_conf;
363 	int disks = conf->raid_disks, i;
364 	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
365 
366 	if (bi->bi_size)
367 		return 1;
368 
369 	for (i=0 ; i<disks; i++)
370 		if (bi == &sh->dev[i].req)
371 			break;
372 
373 	PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
374 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
375 		uptodate);
376 	if (i == disks) {
377 		BUG();
378 		return 0;
379 	}
380 
381 	if (uptodate) {
382 #if 0
383 		struct bio *bio;
384 		unsigned long flags;
385 		spin_lock_irqsave(&conf->device_lock, flags);
386 		/* we can return a buffer if we bypassed the cache or
387 		 * if the top buffer is not in highmem.  If there are
388 		 * multiple buffers, leave the extra work to
389 		 * handle_stripe
390 		 */
391 		buffer = sh->bh_read[i];
392 		if (buffer &&
393 		    (!PageHighMem(buffer->b_page)
394 		     || buffer->b_page == bh->b_page )
395 			) {
396 			sh->bh_read[i] = buffer->b_reqnext;
397 			buffer->b_reqnext = NULL;
398 		} else
399 			buffer = NULL;
400 		spin_unlock_irqrestore(&conf->device_lock, flags);
401 		if (sh->bh_page[i]==bh->b_page)
402 			set_buffer_uptodate(bh);
403 		if (buffer) {
404 			if (buffer->b_page != bh->b_page)
405 				memcpy(buffer->b_data, bh->b_data, bh->b_size);
406 			buffer->b_end_io(buffer, 1);
407 		}
408 #else
409 		set_bit(R5_UPTODATE, &sh->dev[i].flags);
410 #endif
411 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
412 			printk(KERN_INFO "raid5: read error corrected!!\n");
413 			clear_bit(R5_ReadError, &sh->dev[i].flags);
414 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
415 		}
416 		if (atomic_read(&conf->disks[i].rdev->read_errors))
417 			atomic_set(&conf->disks[i].rdev->read_errors, 0);
418 	} else {
419 		int retry = 0;
420 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
421 		atomic_inc(&conf->disks[i].rdev->read_errors);
422 		if (conf->mddev->degraded)
423 			printk(KERN_WARNING "raid5: read error not correctable.\n");
424 		else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
425 			/* Oh, no!!! */
426 			printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
427 		else if (atomic_read(&conf->disks[i].rdev->read_errors)
428 			 > conf->max_nr_stripes)
429 			printk(KERN_WARNING
430 			       "raid5: Too many read errors, failing device.\n");
431 		else
432 			retry = 1;
433 		if (retry)
434 			set_bit(R5_ReadError, &sh->dev[i].flags);
435 		else {
436 			clear_bit(R5_ReadError, &sh->dev[i].flags);
437 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
438 			md_error(conf->mddev, conf->disks[i].rdev);
439 		}
440 	}
441 	rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
442 #if 0
443 	/* must restore b_page before unlocking buffer... */
444 	if (sh->bh_page[i] != bh->b_page) {
445 		bh->b_page = sh->bh_page[i];
446 		bh->b_data = page_address(bh->b_page);
447 		clear_buffer_uptodate(bh);
448 	}
449 #endif
450 	clear_bit(R5_LOCKED, &sh->dev[i].flags);
451 	set_bit(STRIPE_HANDLE, &sh->state);
452 	release_stripe(sh);
453 	return 0;
454 }
455 
456 static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
457 				    int error)
458 {
459  	struct stripe_head *sh = bi->bi_private;
460 	raid5_conf_t *conf = sh->raid_conf;
461 	int disks = conf->raid_disks, i;
462 	unsigned long flags;
463 	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
464 
465 	if (bi->bi_size)
466 		return 1;
467 
468 	for (i=0 ; i<disks; i++)
469 		if (bi == &sh->dev[i].req)
470 			break;
471 
472 	PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
473 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
474 		uptodate);
475 	if (i == disks) {
476 		BUG();
477 		return 0;
478 	}
479 
480 	spin_lock_irqsave(&conf->device_lock, flags);
481 	if (!uptodate)
482 		md_error(conf->mddev, conf->disks[i].rdev);
483 
484 	rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
485 
486 	clear_bit(R5_LOCKED, &sh->dev[i].flags);
487 	set_bit(STRIPE_HANDLE, &sh->state);
488 	__release_stripe(conf, sh);
489 	spin_unlock_irqrestore(&conf->device_lock, flags);
490 	return 0;
491 }
492 
493 
494 static sector_t compute_blocknr(struct stripe_head *sh, int i);
495 
496 static void raid5_build_block (struct stripe_head *sh, int i)
497 {
498 	struct r5dev *dev = &sh->dev[i];
499 
500 	bio_init(&dev->req);
501 	dev->req.bi_io_vec = &dev->vec;
502 	dev->req.bi_vcnt++;
503 	dev->req.bi_max_vecs++;
504 	dev->vec.bv_page = dev->page;
505 	dev->vec.bv_len = STRIPE_SIZE;
506 	dev->vec.bv_offset = 0;
507 
508 	dev->req.bi_sector = sh->sector;
509 	dev->req.bi_private = sh;
510 
511 	dev->flags = 0;
512 	if (i != sh->pd_idx)
513 		dev->sector = compute_blocknr(sh, i);
514 }
515 
516 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
517 {
518 	char b[BDEVNAME_SIZE];
519 	raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
520 	PRINTK("raid5: error called\n");
521 
522 	if (!test_bit(Faulty, &rdev->flags)) {
523 		mddev->sb_dirty = 1;
524 		if (test_bit(In_sync, &rdev->flags)) {
525 			conf->working_disks--;
526 			mddev->degraded++;
527 			conf->failed_disks++;
528 			clear_bit(In_sync, &rdev->flags);
529 			/*
530 			 * if recovery was running, make sure it aborts.
531 			 */
532 			set_bit(MD_RECOVERY_ERR, &mddev->recovery);
533 		}
534 		set_bit(Faulty, &rdev->flags);
535 		printk (KERN_ALERT
536 			"raid5: Disk failure on %s, disabling device."
537 			" Operation continuing on %d devices\n",
538 			bdevname(rdev->bdev,b), conf->working_disks);
539 	}
540 }
541 
542 /*
543  * Input: a 'big' sector number,
544  * Output: index of the data and parity disk, and the sector # in them.
545  */
546 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
547 			unsigned int data_disks, unsigned int * dd_idx,
548 			unsigned int * pd_idx, raid5_conf_t *conf)
549 {
550 	long stripe;
551 	unsigned long chunk_number;
552 	unsigned int chunk_offset;
553 	sector_t new_sector;
554 	int sectors_per_chunk = conf->chunk_size >> 9;
555 
556 	/* First compute the information on this sector */
557 
558 	/*
559 	 * Compute the chunk number and the sector offset inside the chunk
560 	 */
561 	chunk_offset = sector_div(r_sector, sectors_per_chunk);
562 	chunk_number = r_sector;
563 	BUG_ON(r_sector != chunk_number);
564 
565 	/*
566 	 * Compute the stripe number
567 	 */
568 	stripe = chunk_number / data_disks;
569 
570 	/*
571 	 * Compute the data disk and parity disk indexes inside the stripe
572 	 */
573 	*dd_idx = chunk_number % data_disks;
574 
575 	/*
576 	 * Select the parity disk based on the user selected algorithm.
577 	 */
578 	if (conf->level == 4)
579 		*pd_idx = data_disks;
580 	else switch (conf->algorithm) {
581 		case ALGORITHM_LEFT_ASYMMETRIC:
582 			*pd_idx = data_disks - stripe % raid_disks;
583 			if (*dd_idx >= *pd_idx)
584 				(*dd_idx)++;
585 			break;
586 		case ALGORITHM_RIGHT_ASYMMETRIC:
587 			*pd_idx = stripe % raid_disks;
588 			if (*dd_idx >= *pd_idx)
589 				(*dd_idx)++;
590 			break;
591 		case ALGORITHM_LEFT_SYMMETRIC:
592 			*pd_idx = data_disks - stripe % raid_disks;
593 			*dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
594 			break;
595 		case ALGORITHM_RIGHT_SYMMETRIC:
596 			*pd_idx = stripe % raid_disks;
597 			*dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
598 			break;
599 		default:
600 			printk(KERN_ERR "raid5: unsupported algorithm %d\n",
601 				conf->algorithm);
602 	}
603 
604 	/*
605 	 * Finally, compute the new sector number
606 	 */
607 	new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
608 	return new_sector;
609 }
610 
611 
612 static sector_t compute_blocknr(struct stripe_head *sh, int i)
613 {
614 	raid5_conf_t *conf = sh->raid_conf;
615 	int raid_disks = conf->raid_disks, data_disks = raid_disks - 1;
616 	sector_t new_sector = sh->sector, check;
617 	int sectors_per_chunk = conf->chunk_size >> 9;
618 	sector_t stripe;
619 	int chunk_offset;
620 	int chunk_number, dummy1, dummy2, dd_idx = i;
621 	sector_t r_sector;
622 
623 	chunk_offset = sector_div(new_sector, sectors_per_chunk);
624 	stripe = new_sector;
625 	BUG_ON(new_sector != stripe);
626 
627 
628 	switch (conf->algorithm) {
629 		case ALGORITHM_LEFT_ASYMMETRIC:
630 		case ALGORITHM_RIGHT_ASYMMETRIC:
631 			if (i > sh->pd_idx)
632 				i--;
633 			break;
634 		case ALGORITHM_LEFT_SYMMETRIC:
635 		case ALGORITHM_RIGHT_SYMMETRIC:
636 			if (i < sh->pd_idx)
637 				i += raid_disks;
638 			i -= (sh->pd_idx + 1);
639 			break;
640 		default:
641 			printk(KERN_ERR "raid5: unsupported algorithm %d\n",
642 				conf->algorithm);
643 	}
644 
645 	chunk_number = stripe * data_disks + i;
646 	r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
647 
648 	check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
649 	if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
650 		printk(KERN_ERR "compute_blocknr: map not correct\n");
651 		return 0;
652 	}
653 	return r_sector;
654 }
655 
656 
657 
658 /*
659  * Copy data between a page in the stripe cache, and a bio.
660  * There are no alignment or size guarantees between the page or the
661  * bio except that there is some overlap.
662  * All iovecs in the bio must be considered.
663  */
664 static void copy_data(int frombio, struct bio *bio,
665 		     struct page *page,
666 		     sector_t sector)
667 {
668 	char *pa = page_address(page);
669 	struct bio_vec *bvl;
670 	int i;
671 	int page_offset;
672 
673 	if (bio->bi_sector >= sector)
674 		page_offset = (signed)(bio->bi_sector - sector) * 512;
675 	else
676 		page_offset = (signed)(sector - bio->bi_sector) * -512;
677 	bio_for_each_segment(bvl, bio, i) {
678 		int len = bio_iovec_idx(bio,i)->bv_len;
679 		int clen;
680 		int b_offset = 0;
681 
682 		if (page_offset < 0) {
683 			b_offset = -page_offset;
684 			page_offset += b_offset;
685 			len -= b_offset;
686 		}
687 
688 		if (len > 0 && page_offset + len > STRIPE_SIZE)
689 			clen = STRIPE_SIZE - page_offset;
690 		else clen = len;
691 
692 		if (clen > 0) {
693 			char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
694 			if (frombio)
695 				memcpy(pa+page_offset, ba+b_offset, clen);
696 			else
697 				memcpy(ba+b_offset, pa+page_offset, clen);
698 			__bio_kunmap_atomic(ba, KM_USER0);
699 		}
700 		if (clen < len) /* hit end of page */
701 			break;
702 		page_offset +=  len;
703 	}
704 }
705 
706 #define check_xor() 	do { 						\
707 			   if (count == MAX_XOR_BLOCKS) {		\
708 				xor_block(count, STRIPE_SIZE, ptr);	\
709 				count = 1;				\
710 			   }						\
711 			} while(0)
712 
713 
714 static void compute_block(struct stripe_head *sh, int dd_idx)
715 {
716 	raid5_conf_t *conf = sh->raid_conf;
717 	int i, count, disks = conf->raid_disks;
718 	void *ptr[MAX_XOR_BLOCKS], *p;
719 
720 	PRINTK("compute_block, stripe %llu, idx %d\n",
721 		(unsigned long long)sh->sector, dd_idx);
722 
723 	ptr[0] = page_address(sh->dev[dd_idx].page);
724 	memset(ptr[0], 0, STRIPE_SIZE);
725 	count = 1;
726 	for (i = disks ; i--; ) {
727 		if (i == dd_idx)
728 			continue;
729 		p = page_address(sh->dev[i].page);
730 		if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
731 			ptr[count++] = p;
732 		else
733 			printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
734 				" not present\n", dd_idx,
735 				(unsigned long long)sh->sector, i);
736 
737 		check_xor();
738 	}
739 	if (count != 1)
740 		xor_block(count, STRIPE_SIZE, ptr);
741 	set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
742 }
743 
744 static void compute_parity(struct stripe_head *sh, int method)
745 {
746 	raid5_conf_t *conf = sh->raid_conf;
747 	int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count;
748 	void *ptr[MAX_XOR_BLOCKS];
749 	struct bio *chosen;
750 
751 	PRINTK("compute_parity, stripe %llu, method %d\n",
752 		(unsigned long long)sh->sector, method);
753 
754 	count = 1;
755 	ptr[0] = page_address(sh->dev[pd_idx].page);
756 	switch(method) {
757 	case READ_MODIFY_WRITE:
758 		if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
759 			BUG();
760 		for (i=disks ; i-- ;) {
761 			if (i==pd_idx)
762 				continue;
763 			if (sh->dev[i].towrite &&
764 			    test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
765 				ptr[count++] = page_address(sh->dev[i].page);
766 				chosen = sh->dev[i].towrite;
767 				sh->dev[i].towrite = NULL;
768 
769 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
770 					wake_up(&conf->wait_for_overlap);
771 
772 				if (sh->dev[i].written) BUG();
773 				sh->dev[i].written = chosen;
774 				check_xor();
775 			}
776 		}
777 		break;
778 	case RECONSTRUCT_WRITE:
779 		memset(ptr[0], 0, STRIPE_SIZE);
780 		for (i= disks; i-- ;)
781 			if (i!=pd_idx && sh->dev[i].towrite) {
782 				chosen = sh->dev[i].towrite;
783 				sh->dev[i].towrite = NULL;
784 
785 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
786 					wake_up(&conf->wait_for_overlap);
787 
788 				if (sh->dev[i].written) BUG();
789 				sh->dev[i].written = chosen;
790 			}
791 		break;
792 	case CHECK_PARITY:
793 		break;
794 	}
795 	if (count>1) {
796 		xor_block(count, STRIPE_SIZE, ptr);
797 		count = 1;
798 	}
799 
800 	for (i = disks; i--;)
801 		if (sh->dev[i].written) {
802 			sector_t sector = sh->dev[i].sector;
803 			struct bio *wbi = sh->dev[i].written;
804 			while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
805 				copy_data(1, wbi, sh->dev[i].page, sector);
806 				wbi = r5_next_bio(wbi, sector);
807 			}
808 
809 			set_bit(R5_LOCKED, &sh->dev[i].flags);
810 			set_bit(R5_UPTODATE, &sh->dev[i].flags);
811 		}
812 
813 	switch(method) {
814 	case RECONSTRUCT_WRITE:
815 	case CHECK_PARITY:
816 		for (i=disks; i--;)
817 			if (i != pd_idx) {
818 				ptr[count++] = page_address(sh->dev[i].page);
819 				check_xor();
820 			}
821 		break;
822 	case READ_MODIFY_WRITE:
823 		for (i = disks; i--;)
824 			if (sh->dev[i].written) {
825 				ptr[count++] = page_address(sh->dev[i].page);
826 				check_xor();
827 			}
828 	}
829 	if (count != 1)
830 		xor_block(count, STRIPE_SIZE, ptr);
831 
832 	if (method != CHECK_PARITY) {
833 		set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
834 		set_bit(R5_LOCKED,   &sh->dev[pd_idx].flags);
835 	} else
836 		clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
837 }
838 
839 /*
840  * Each stripe/dev can have one or more bion attached.
841  * toread/towrite point to the first in a chain.
842  * The bi_next chain must be in order.
843  */
844 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
845 {
846 	struct bio **bip;
847 	raid5_conf_t *conf = sh->raid_conf;
848 	int firstwrite=0;
849 
850 	PRINTK("adding bh b#%llu to stripe s#%llu\n",
851 		(unsigned long long)bi->bi_sector,
852 		(unsigned long long)sh->sector);
853 
854 
855 	spin_lock(&sh->lock);
856 	spin_lock_irq(&conf->device_lock);
857 	if (forwrite) {
858 		bip = &sh->dev[dd_idx].towrite;
859 		if (*bip == NULL && sh->dev[dd_idx].written == NULL)
860 			firstwrite = 1;
861 	} else
862 		bip = &sh->dev[dd_idx].toread;
863 	while (*bip && (*bip)->bi_sector < bi->bi_sector) {
864 		if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
865 			goto overlap;
866 		bip = & (*bip)->bi_next;
867 	}
868 	if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
869 		goto overlap;
870 
871 	if (*bip && bi->bi_next && (*bip) != bi->bi_next)
872 		BUG();
873 	if (*bip)
874 		bi->bi_next = *bip;
875 	*bip = bi;
876 	bi->bi_phys_segments ++;
877 	spin_unlock_irq(&conf->device_lock);
878 	spin_unlock(&sh->lock);
879 
880 	PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
881 		(unsigned long long)bi->bi_sector,
882 		(unsigned long long)sh->sector, dd_idx);
883 
884 	if (conf->mddev->bitmap && firstwrite) {
885 		sh->bm_seq = conf->seq_write;
886 		bitmap_startwrite(conf->mddev->bitmap, sh->sector,
887 				  STRIPE_SECTORS, 0);
888 		set_bit(STRIPE_BIT_DELAY, &sh->state);
889 	}
890 
891 	if (forwrite) {
892 		/* check if page is covered */
893 		sector_t sector = sh->dev[dd_idx].sector;
894 		for (bi=sh->dev[dd_idx].towrite;
895 		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
896 			     bi && bi->bi_sector <= sector;
897 		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
898 			if (bi->bi_sector + (bi->bi_size>>9) >= sector)
899 				sector = bi->bi_sector + (bi->bi_size>>9);
900 		}
901 		if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
902 			set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
903 	}
904 	return 1;
905 
906  overlap:
907 	set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
908 	spin_unlock_irq(&conf->device_lock);
909 	spin_unlock(&sh->lock);
910 	return 0;
911 }
912 
913 
914 /*
915  * handle_stripe - do things to a stripe.
916  *
917  * We lock the stripe and then examine the state of various bits
918  * to see what needs to be done.
919  * Possible results:
920  *    return some read request which now have data
921  *    return some write requests which are safely on disc
922  *    schedule a read on some buffers
923  *    schedule a write of some buffers
924  *    return confirmation of parity correctness
925  *
926  * Parity calculations are done inside the stripe lock
927  * buffers are taken off read_list or write_list, and bh_cache buffers
928  * get BH_Lock set before the stripe lock is released.
929  *
930  */
931 
932 static void handle_stripe(struct stripe_head *sh)
933 {
934 	raid5_conf_t *conf = sh->raid_conf;
935 	int disks = conf->raid_disks;
936 	struct bio *return_bi= NULL;
937 	struct bio *bi;
938 	int i;
939 	int syncing;
940 	int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
941 	int non_overwrite = 0;
942 	int failed_num=0;
943 	struct r5dev *dev;
944 
945 	PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
946 		(unsigned long long)sh->sector, atomic_read(&sh->count),
947 		sh->pd_idx);
948 
949 	spin_lock(&sh->lock);
950 	clear_bit(STRIPE_HANDLE, &sh->state);
951 	clear_bit(STRIPE_DELAYED, &sh->state);
952 
953 	syncing = test_bit(STRIPE_SYNCING, &sh->state);
954 	/* Now to look around and see what can be done */
955 
956 	rcu_read_lock();
957 	for (i=disks; i--; ) {
958 		mdk_rdev_t *rdev;
959 		dev = &sh->dev[i];
960 		clear_bit(R5_Insync, &dev->flags);
961 
962 		PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
963 			i, dev->flags, dev->toread, dev->towrite, dev->written);
964 		/* maybe we can reply to a read */
965 		if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
966 			struct bio *rbi, *rbi2;
967 			PRINTK("Return read for disc %d\n", i);
968 			spin_lock_irq(&conf->device_lock);
969 			rbi = dev->toread;
970 			dev->toread = NULL;
971 			if (test_and_clear_bit(R5_Overlap, &dev->flags))
972 				wake_up(&conf->wait_for_overlap);
973 			spin_unlock_irq(&conf->device_lock);
974 			while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
975 				copy_data(0, rbi, dev->page, dev->sector);
976 				rbi2 = r5_next_bio(rbi, dev->sector);
977 				spin_lock_irq(&conf->device_lock);
978 				if (--rbi->bi_phys_segments == 0) {
979 					rbi->bi_next = return_bi;
980 					return_bi = rbi;
981 				}
982 				spin_unlock_irq(&conf->device_lock);
983 				rbi = rbi2;
984 			}
985 		}
986 
987 		/* now count some things */
988 		if (test_bit(R5_LOCKED, &dev->flags)) locked++;
989 		if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
990 
991 
992 		if (dev->toread) to_read++;
993 		if (dev->towrite) {
994 			to_write++;
995 			if (!test_bit(R5_OVERWRITE, &dev->flags))
996 				non_overwrite++;
997 		}
998 		if (dev->written) written++;
999 		rdev = rcu_dereference(conf->disks[i].rdev);
1000 		if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1001 			/* The ReadError flag will just be confusing now */
1002 			clear_bit(R5_ReadError, &dev->flags);
1003 			clear_bit(R5_ReWrite, &dev->flags);
1004 		}
1005 		if (!rdev || !test_bit(In_sync, &rdev->flags)
1006 		    || test_bit(R5_ReadError, &dev->flags)) {
1007 			failed++;
1008 			failed_num = i;
1009 		} else
1010 			set_bit(R5_Insync, &dev->flags);
1011 	}
1012 	rcu_read_unlock();
1013 	PRINTK("locked=%d uptodate=%d to_read=%d"
1014 		" to_write=%d failed=%d failed_num=%d\n",
1015 		locked, uptodate, to_read, to_write, failed, failed_num);
1016 	/* check if the array has lost two devices and, if so, some requests might
1017 	 * need to be failed
1018 	 */
1019 	if (failed > 1 && to_read+to_write+written) {
1020 		for (i=disks; i--; ) {
1021 			int bitmap_end = 0;
1022 
1023 			if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1024 				mdk_rdev_t *rdev;
1025 				rcu_read_lock();
1026 				rdev = rcu_dereference(conf->disks[i].rdev);
1027 				if (rdev && test_bit(In_sync, &rdev->flags))
1028 					/* multiple read failures in one stripe */
1029 					md_error(conf->mddev, rdev);
1030 				rcu_read_unlock();
1031 			}
1032 
1033 			spin_lock_irq(&conf->device_lock);
1034 			/* fail all writes first */
1035 			bi = sh->dev[i].towrite;
1036 			sh->dev[i].towrite = NULL;
1037 			if (bi) { to_write--; bitmap_end = 1; }
1038 
1039 			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1040 				wake_up(&conf->wait_for_overlap);
1041 
1042 			while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1043 				struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1044 				clear_bit(BIO_UPTODATE, &bi->bi_flags);
1045 				if (--bi->bi_phys_segments == 0) {
1046 					md_write_end(conf->mddev);
1047 					bi->bi_next = return_bi;
1048 					return_bi = bi;
1049 				}
1050 				bi = nextbi;
1051 			}
1052 			/* and fail all 'written' */
1053 			bi = sh->dev[i].written;
1054 			sh->dev[i].written = NULL;
1055 			if (bi) bitmap_end = 1;
1056 			while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1057 				struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1058 				clear_bit(BIO_UPTODATE, &bi->bi_flags);
1059 				if (--bi->bi_phys_segments == 0) {
1060 					md_write_end(conf->mddev);
1061 					bi->bi_next = return_bi;
1062 					return_bi = bi;
1063 				}
1064 				bi = bi2;
1065 			}
1066 
1067 			/* fail any reads if this device is non-operational */
1068 			if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1069 			    test_bit(R5_ReadError, &sh->dev[i].flags)) {
1070 				bi = sh->dev[i].toread;
1071 				sh->dev[i].toread = NULL;
1072 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1073 					wake_up(&conf->wait_for_overlap);
1074 				if (bi) to_read--;
1075 				while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1076 					struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1077 					clear_bit(BIO_UPTODATE, &bi->bi_flags);
1078 					if (--bi->bi_phys_segments == 0) {
1079 						bi->bi_next = return_bi;
1080 						return_bi = bi;
1081 					}
1082 					bi = nextbi;
1083 				}
1084 			}
1085 			spin_unlock_irq(&conf->device_lock);
1086 			if (bitmap_end)
1087 				bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1088 						STRIPE_SECTORS, 0, 0);
1089 		}
1090 	}
1091 	if (failed > 1 && syncing) {
1092 		md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1093 		clear_bit(STRIPE_SYNCING, &sh->state);
1094 		syncing = 0;
1095 	}
1096 
1097 	/* might be able to return some write requests if the parity block
1098 	 * is safe, or on a failed drive
1099 	 */
1100 	dev = &sh->dev[sh->pd_idx];
1101 	if ( written &&
1102 	     ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1103 		test_bit(R5_UPTODATE, &dev->flags))
1104 	       || (failed == 1 && failed_num == sh->pd_idx))
1105 	    ) {
1106 	    /* any written block on an uptodate or failed drive can be returned.
1107 	     * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1108 	     * never LOCKED, so we don't need to test 'failed' directly.
1109 	     */
1110 	    for (i=disks; i--; )
1111 		if (sh->dev[i].written) {
1112 		    dev = &sh->dev[i];
1113 		    if (!test_bit(R5_LOCKED, &dev->flags) &&
1114 			 test_bit(R5_UPTODATE, &dev->flags) ) {
1115 			/* We can return any write requests */
1116 			    struct bio *wbi, *wbi2;
1117 			    int bitmap_end = 0;
1118 			    PRINTK("Return write for disc %d\n", i);
1119 			    spin_lock_irq(&conf->device_lock);
1120 			    wbi = dev->written;
1121 			    dev->written = NULL;
1122 			    while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1123 				    wbi2 = r5_next_bio(wbi, dev->sector);
1124 				    if (--wbi->bi_phys_segments == 0) {
1125 					    md_write_end(conf->mddev);
1126 					    wbi->bi_next = return_bi;
1127 					    return_bi = wbi;
1128 				    }
1129 				    wbi = wbi2;
1130 			    }
1131 			    if (dev->towrite == NULL)
1132 				    bitmap_end = 1;
1133 			    spin_unlock_irq(&conf->device_lock);
1134 			    if (bitmap_end)
1135 				    bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1136 						    STRIPE_SECTORS,
1137 						    !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1138 		    }
1139 		}
1140 	}
1141 
1142 	/* Now we might consider reading some blocks, either to check/generate
1143 	 * parity, or to satisfy requests
1144 	 * or to load a block that is being partially written.
1145 	 */
1146 	if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
1147 		for (i=disks; i--;) {
1148 			dev = &sh->dev[i];
1149 			if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1150 			    (dev->toread ||
1151 			     (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1152 			     syncing ||
1153 			     (failed && (sh->dev[failed_num].toread ||
1154 					 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1155 				    )
1156 				) {
1157 				/* we would like to get this block, possibly
1158 				 * by computing it, but we might not be able to
1159 				 */
1160 				if (uptodate == disks-1) {
1161 					PRINTK("Computing block %d\n", i);
1162 					compute_block(sh, i);
1163 					uptodate++;
1164 				} else if (test_bit(R5_Insync, &dev->flags)) {
1165 					set_bit(R5_LOCKED, &dev->flags);
1166 					set_bit(R5_Wantread, &dev->flags);
1167 #if 0
1168 					/* if I am just reading this block and we don't have
1169 					   a failed drive, or any pending writes then sidestep the cache */
1170 					if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1171 					    ! syncing && !failed && !to_write) {
1172 						sh->bh_cache[i]->b_page =  sh->bh_read[i]->b_page;
1173 						sh->bh_cache[i]->b_data =  sh->bh_read[i]->b_data;
1174 					}
1175 #endif
1176 					locked++;
1177 					PRINTK("Reading block %d (sync=%d)\n",
1178 						i, syncing);
1179 				}
1180 			}
1181 		}
1182 		set_bit(STRIPE_HANDLE, &sh->state);
1183 	}
1184 
1185 	/* now to consider writing and what else, if anything should be read */
1186 	if (to_write) {
1187 		int rmw=0, rcw=0;
1188 		for (i=disks ; i--;) {
1189 			/* would I have to read this buffer for read_modify_write */
1190 			dev = &sh->dev[i];
1191 			if ((dev->towrite || i == sh->pd_idx) &&
1192 			    (!test_bit(R5_LOCKED, &dev->flags)
1193 #if 0
1194 || sh->bh_page[i]!=bh->b_page
1195 #endif
1196 				    ) &&
1197 			    !test_bit(R5_UPTODATE, &dev->flags)) {
1198 				if (test_bit(R5_Insync, &dev->flags)
1199 /*				    && !(!mddev->insync && i == sh->pd_idx) */
1200 					)
1201 					rmw++;
1202 				else rmw += 2*disks;  /* cannot read it */
1203 			}
1204 			/* Would I have to read this buffer for reconstruct_write */
1205 			if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1206 			    (!test_bit(R5_LOCKED, &dev->flags)
1207 #if 0
1208 || sh->bh_page[i] != bh->b_page
1209 #endif
1210 				    ) &&
1211 			    !test_bit(R5_UPTODATE, &dev->flags)) {
1212 				if (test_bit(R5_Insync, &dev->flags)) rcw++;
1213 				else rcw += 2*disks;
1214 			}
1215 		}
1216 		PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1217 			(unsigned long long)sh->sector, rmw, rcw);
1218 		set_bit(STRIPE_HANDLE, &sh->state);
1219 		if (rmw < rcw && rmw > 0)
1220 			/* prefer read-modify-write, but need to get some data */
1221 			for (i=disks; i--;) {
1222 				dev = &sh->dev[i];
1223 				if ((dev->towrite || i == sh->pd_idx) &&
1224 				    !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1225 				    test_bit(R5_Insync, &dev->flags)) {
1226 					if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1227 					{
1228 						PRINTK("Read_old block %d for r-m-w\n", i);
1229 						set_bit(R5_LOCKED, &dev->flags);
1230 						set_bit(R5_Wantread, &dev->flags);
1231 						locked++;
1232 					} else {
1233 						set_bit(STRIPE_DELAYED, &sh->state);
1234 						set_bit(STRIPE_HANDLE, &sh->state);
1235 					}
1236 				}
1237 			}
1238 		if (rcw <= rmw && rcw > 0)
1239 			/* want reconstruct write, but need to get some data */
1240 			for (i=disks; i--;) {
1241 				dev = &sh->dev[i];
1242 				if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1243 				    !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1244 				    test_bit(R5_Insync, &dev->flags)) {
1245 					if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1246 					{
1247 						PRINTK("Read_old block %d for Reconstruct\n", i);
1248 						set_bit(R5_LOCKED, &dev->flags);
1249 						set_bit(R5_Wantread, &dev->flags);
1250 						locked++;
1251 					} else {
1252 						set_bit(STRIPE_DELAYED, &sh->state);
1253 						set_bit(STRIPE_HANDLE, &sh->state);
1254 					}
1255 				}
1256 			}
1257 		/* now if nothing is locked, and if we have enough data, we can start a write request */
1258 		if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1259 		    !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1260 			PRINTK("Computing parity...\n");
1261 			compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1262 			/* now every locked buffer is ready to be written */
1263 			for (i=disks; i--;)
1264 				if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1265 					PRINTK("Writing block %d\n", i);
1266 					locked++;
1267 					set_bit(R5_Wantwrite, &sh->dev[i].flags);
1268 					if (!test_bit(R5_Insync, &sh->dev[i].flags)
1269 					    || (i==sh->pd_idx && failed == 0))
1270 						set_bit(STRIPE_INSYNC, &sh->state);
1271 				}
1272 			if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1273 				atomic_dec(&conf->preread_active_stripes);
1274 				if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1275 					md_wakeup_thread(conf->mddev->thread);
1276 			}
1277 		}
1278 	}
1279 
1280 	/* maybe we need to check and possibly fix the parity for this stripe
1281 	 * Any reads will already have been scheduled, so we just see if enough data
1282 	 * is available
1283 	 */
1284 	if (syncing && locked == 0 &&
1285 	    !test_bit(STRIPE_INSYNC, &sh->state)) {
1286 		set_bit(STRIPE_HANDLE, &sh->state);
1287 		if (failed == 0) {
1288 			char *pagea;
1289 			if (uptodate != disks)
1290 				BUG();
1291 			compute_parity(sh, CHECK_PARITY);
1292 			uptodate--;
1293 			pagea = page_address(sh->dev[sh->pd_idx].page);
1294 			if ((*(u32*)pagea) == 0 &&
1295 			    !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1296 				/* parity is correct (on disc, not in buffer any more) */
1297 				set_bit(STRIPE_INSYNC, &sh->state);
1298 			} else {
1299 				conf->mddev->resync_mismatches += STRIPE_SECTORS;
1300 				if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1301 					/* don't try to repair!! */
1302 					set_bit(STRIPE_INSYNC, &sh->state);
1303 				else {
1304 					compute_block(sh, sh->pd_idx);
1305 					uptodate++;
1306 				}
1307 			}
1308 		}
1309 		if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1310 			/* either failed parity check, or recovery is happening */
1311 			if (failed==0)
1312 				failed_num = sh->pd_idx;
1313 			dev = &sh->dev[failed_num];
1314 			BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1315 			BUG_ON(uptodate != disks);
1316 
1317 			set_bit(R5_LOCKED, &dev->flags);
1318 			set_bit(R5_Wantwrite, &dev->flags);
1319 			clear_bit(STRIPE_DEGRADED, &sh->state);
1320 			locked++;
1321 			set_bit(STRIPE_INSYNC, &sh->state);
1322 		}
1323 	}
1324 	if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1325 		md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1326 		clear_bit(STRIPE_SYNCING, &sh->state);
1327 	}
1328 
1329 	/* If the failed drive is just a ReadError, then we might need to progress
1330 	 * the repair/check process
1331 	 */
1332 	if (failed == 1 && ! conf->mddev->ro &&
1333 	    test_bit(R5_ReadError, &sh->dev[failed_num].flags)
1334 	    && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1335 	    && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1336 		) {
1337 		dev = &sh->dev[failed_num];
1338 		if (!test_bit(R5_ReWrite, &dev->flags)) {
1339 			set_bit(R5_Wantwrite, &dev->flags);
1340 			set_bit(R5_ReWrite, &dev->flags);
1341 			set_bit(R5_LOCKED, &dev->flags);
1342 		} else {
1343 			/* let's read it back */
1344 			set_bit(R5_Wantread, &dev->flags);
1345 			set_bit(R5_LOCKED, &dev->flags);
1346 		}
1347 	}
1348 
1349 	spin_unlock(&sh->lock);
1350 
1351 	while ((bi=return_bi)) {
1352 		int bytes = bi->bi_size;
1353 
1354 		return_bi = bi->bi_next;
1355 		bi->bi_next = NULL;
1356 		bi->bi_size = 0;
1357 		bi->bi_end_io(bi, bytes, 0);
1358 	}
1359 	for (i=disks; i-- ;) {
1360 		int rw;
1361 		struct bio *bi;
1362 		mdk_rdev_t *rdev;
1363 		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1364 			rw = 1;
1365 		else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1366 			rw = 0;
1367 		else
1368 			continue;
1369 
1370 		bi = &sh->dev[i].req;
1371 
1372 		bi->bi_rw = rw;
1373 		if (rw)
1374 			bi->bi_end_io = raid5_end_write_request;
1375 		else
1376 			bi->bi_end_io = raid5_end_read_request;
1377 
1378 		rcu_read_lock();
1379 		rdev = rcu_dereference(conf->disks[i].rdev);
1380 		if (rdev && test_bit(Faulty, &rdev->flags))
1381 			rdev = NULL;
1382 		if (rdev)
1383 			atomic_inc(&rdev->nr_pending);
1384 		rcu_read_unlock();
1385 
1386 		if (rdev) {
1387 			if (syncing)
1388 				md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1389 
1390 			bi->bi_bdev = rdev->bdev;
1391 			PRINTK("for %llu schedule op %ld on disc %d\n",
1392 				(unsigned long long)sh->sector, bi->bi_rw, i);
1393 			atomic_inc(&sh->count);
1394 			bi->bi_sector = sh->sector + rdev->data_offset;
1395 			bi->bi_flags = 1 << BIO_UPTODATE;
1396 			bi->bi_vcnt = 1;
1397 			bi->bi_max_vecs = 1;
1398 			bi->bi_idx = 0;
1399 			bi->bi_io_vec = &sh->dev[i].vec;
1400 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1401 			bi->bi_io_vec[0].bv_offset = 0;
1402 			bi->bi_size = STRIPE_SIZE;
1403 			bi->bi_next = NULL;
1404 			if (rw == WRITE &&
1405 			    test_bit(R5_ReWrite, &sh->dev[i].flags))
1406 				atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1407 			generic_make_request(bi);
1408 		} else {
1409 			if (rw == 1)
1410 				set_bit(STRIPE_DEGRADED, &sh->state);
1411 			PRINTK("skip op %ld on disc %d for sector %llu\n",
1412 				bi->bi_rw, i, (unsigned long long)sh->sector);
1413 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
1414 			set_bit(STRIPE_HANDLE, &sh->state);
1415 		}
1416 	}
1417 }
1418 
1419 static void raid5_activate_delayed(raid5_conf_t *conf)
1420 {
1421 	if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1422 		while (!list_empty(&conf->delayed_list)) {
1423 			struct list_head *l = conf->delayed_list.next;
1424 			struct stripe_head *sh;
1425 			sh = list_entry(l, struct stripe_head, lru);
1426 			list_del_init(l);
1427 			clear_bit(STRIPE_DELAYED, &sh->state);
1428 			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1429 				atomic_inc(&conf->preread_active_stripes);
1430 			list_add_tail(&sh->lru, &conf->handle_list);
1431 		}
1432 	}
1433 }
1434 
1435 static void activate_bit_delay(raid5_conf_t *conf)
1436 {
1437 	/* device_lock is held */
1438 	struct list_head head;
1439 	list_add(&head, &conf->bitmap_list);
1440 	list_del_init(&conf->bitmap_list);
1441 	while (!list_empty(&head)) {
1442 		struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1443 		list_del_init(&sh->lru);
1444 		atomic_inc(&sh->count);
1445 		__release_stripe(conf, sh);
1446 	}
1447 }
1448 
1449 static void unplug_slaves(mddev_t *mddev)
1450 {
1451 	raid5_conf_t *conf = mddev_to_conf(mddev);
1452 	int i;
1453 
1454 	rcu_read_lock();
1455 	for (i=0; i<mddev->raid_disks; i++) {
1456 		mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1457 		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
1458 			request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1459 
1460 			atomic_inc(&rdev->nr_pending);
1461 			rcu_read_unlock();
1462 
1463 			if (r_queue->unplug_fn)
1464 				r_queue->unplug_fn(r_queue);
1465 
1466 			rdev_dec_pending(rdev, mddev);
1467 			rcu_read_lock();
1468 		}
1469 	}
1470 	rcu_read_unlock();
1471 }
1472 
1473 static void raid5_unplug_device(request_queue_t *q)
1474 {
1475 	mddev_t *mddev = q->queuedata;
1476 	raid5_conf_t *conf = mddev_to_conf(mddev);
1477 	unsigned long flags;
1478 
1479 	spin_lock_irqsave(&conf->device_lock, flags);
1480 
1481 	if (blk_remove_plug(q)) {
1482 		conf->seq_flush++;
1483 		raid5_activate_delayed(conf);
1484 	}
1485 	md_wakeup_thread(mddev->thread);
1486 
1487 	spin_unlock_irqrestore(&conf->device_lock, flags);
1488 
1489 	unplug_slaves(mddev);
1490 }
1491 
1492 static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1493 			     sector_t *error_sector)
1494 {
1495 	mddev_t *mddev = q->queuedata;
1496 	raid5_conf_t *conf = mddev_to_conf(mddev);
1497 	int i, ret = 0;
1498 
1499 	rcu_read_lock();
1500 	for (i=0; i<mddev->raid_disks && ret == 0; i++) {
1501 		mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1502 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
1503 			struct block_device *bdev = rdev->bdev;
1504 			request_queue_t *r_queue = bdev_get_queue(bdev);
1505 
1506 			if (!r_queue->issue_flush_fn)
1507 				ret = -EOPNOTSUPP;
1508 			else {
1509 				atomic_inc(&rdev->nr_pending);
1510 				rcu_read_unlock();
1511 				ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1512 							      error_sector);
1513 				rdev_dec_pending(rdev, mddev);
1514 				rcu_read_lock();
1515 			}
1516 		}
1517 	}
1518 	rcu_read_unlock();
1519 	return ret;
1520 }
1521 
1522 static inline void raid5_plug_device(raid5_conf_t *conf)
1523 {
1524 	spin_lock_irq(&conf->device_lock);
1525 	blk_plug_device(conf->mddev->queue);
1526 	spin_unlock_irq(&conf->device_lock);
1527 }
1528 
1529 static int make_request (request_queue_t *q, struct bio * bi)
1530 {
1531 	mddev_t *mddev = q->queuedata;
1532 	raid5_conf_t *conf = mddev_to_conf(mddev);
1533 	const unsigned int raid_disks = conf->raid_disks;
1534 	const unsigned int data_disks = raid_disks - 1;
1535 	unsigned int dd_idx, pd_idx;
1536 	sector_t new_sector;
1537 	sector_t logical_sector, last_sector;
1538 	struct stripe_head *sh;
1539 	const int rw = bio_data_dir(bi);
1540 
1541 	if (unlikely(bio_barrier(bi))) {
1542 		bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1543 		return 0;
1544 	}
1545 
1546 	md_write_start(mddev, bi);
1547 
1548 	disk_stat_inc(mddev->gendisk, ios[rw]);
1549 	disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
1550 
1551 	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1552 	last_sector = bi->bi_sector + (bi->bi_size>>9);
1553 	bi->bi_next = NULL;
1554 	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
1555 
1556 	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1557 		DEFINE_WAIT(w);
1558 
1559 		new_sector = raid5_compute_sector(logical_sector,
1560 						  raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1561 
1562 		PRINTK("raid5: make_request, sector %llu logical %llu\n",
1563 			(unsigned long long)new_sector,
1564 			(unsigned long long)logical_sector);
1565 
1566 	retry:
1567 		prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1568 		sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1569 		if (sh) {
1570 			if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1571 				/* Add failed due to overlap.  Flush everything
1572 				 * and wait a while
1573 				 */
1574 				raid5_unplug_device(mddev->queue);
1575 				release_stripe(sh);
1576 				schedule();
1577 				goto retry;
1578 			}
1579 			finish_wait(&conf->wait_for_overlap, &w);
1580 			raid5_plug_device(conf);
1581 			handle_stripe(sh);
1582 			release_stripe(sh);
1583 
1584 		} else {
1585 			/* cannot get stripe for read-ahead, just give-up */
1586 			clear_bit(BIO_UPTODATE, &bi->bi_flags);
1587 			finish_wait(&conf->wait_for_overlap, &w);
1588 			break;
1589 		}
1590 
1591 	}
1592 	spin_lock_irq(&conf->device_lock);
1593 	if (--bi->bi_phys_segments == 0) {
1594 		int bytes = bi->bi_size;
1595 
1596 		if ( bio_data_dir(bi) == WRITE )
1597 			md_write_end(mddev);
1598 		bi->bi_size = 0;
1599 		bi->bi_end_io(bi, bytes, 0);
1600 	}
1601 	spin_unlock_irq(&conf->device_lock);
1602 	return 0;
1603 }
1604 
1605 /* FIXME go_faster isn't used */
1606 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1607 {
1608 	raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1609 	struct stripe_head *sh;
1610 	int sectors_per_chunk = conf->chunk_size >> 9;
1611 	sector_t x;
1612 	unsigned long stripe;
1613 	int chunk_offset;
1614 	int dd_idx, pd_idx;
1615 	sector_t first_sector;
1616 	int raid_disks = conf->raid_disks;
1617 	int data_disks = raid_disks-1;
1618 	sector_t max_sector = mddev->size << 1;
1619 	int sync_blocks;
1620 
1621 	if (sector_nr >= max_sector) {
1622 		/* just being told to finish up .. nothing much to do */
1623 		unplug_slaves(mddev);
1624 
1625 		if (mddev->curr_resync < max_sector) /* aborted */
1626 			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1627 					&sync_blocks, 1);
1628 		else /* compelted sync */
1629 			conf->fullsync = 0;
1630 		bitmap_close_sync(mddev->bitmap);
1631 
1632 		return 0;
1633 	}
1634 	/* if there is 1 or more failed drives and we are trying
1635 	 * to resync, then assert that we are finished, because there is
1636 	 * nothing we can do.
1637 	 */
1638 	if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1639 		sector_t rv = (mddev->size << 1) - sector_nr;
1640 		*skipped = 1;
1641 		return rv;
1642 	}
1643 	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1644 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1645 	    !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1646 		/* we can skip this block, and probably more */
1647 		sync_blocks /= STRIPE_SECTORS;
1648 		*skipped = 1;
1649 		return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1650 	}
1651 
1652 	x = sector_nr;
1653 	chunk_offset = sector_div(x, sectors_per_chunk);
1654 	stripe = x;
1655 	BUG_ON(x != stripe);
1656 
1657 	first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1658 		+ chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1659 	sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1660 	if (sh == NULL) {
1661 		sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
1662 		/* make sure we don't swamp the stripe cache if someone else
1663 		 * is trying to get access
1664 		 */
1665 		schedule_timeout_uninterruptible(1);
1666 	}
1667 	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
1668 	spin_lock(&sh->lock);
1669 	set_bit(STRIPE_SYNCING, &sh->state);
1670 	clear_bit(STRIPE_INSYNC, &sh->state);
1671 	spin_unlock(&sh->lock);
1672 
1673 	handle_stripe(sh);
1674 	release_stripe(sh);
1675 
1676 	return STRIPE_SECTORS;
1677 }
1678 
1679 /*
1680  * This is our raid5 kernel thread.
1681  *
1682  * We scan the hash table for stripes which can be handled now.
1683  * During the scan, completed stripes are saved for us by the interrupt
1684  * handler, so that they will not have to wait for our next wakeup.
1685  */
1686 static void raid5d (mddev_t *mddev)
1687 {
1688 	struct stripe_head *sh;
1689 	raid5_conf_t *conf = mddev_to_conf(mddev);
1690 	int handled;
1691 
1692 	PRINTK("+++ raid5d active\n");
1693 
1694 	md_check_recovery(mddev);
1695 
1696 	handled = 0;
1697 	spin_lock_irq(&conf->device_lock);
1698 	while (1) {
1699 		struct list_head *first;
1700 
1701 		if (conf->seq_flush - conf->seq_write > 0) {
1702 			int seq = conf->seq_flush;
1703 			spin_unlock_irq(&conf->device_lock);
1704 			bitmap_unplug(mddev->bitmap);
1705 			spin_lock_irq(&conf->device_lock);
1706 			conf->seq_write = seq;
1707 			activate_bit_delay(conf);
1708 		}
1709 
1710 		if (list_empty(&conf->handle_list) &&
1711 		    atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1712 		    !blk_queue_plugged(mddev->queue) &&
1713 		    !list_empty(&conf->delayed_list))
1714 			raid5_activate_delayed(conf);
1715 
1716 		if (list_empty(&conf->handle_list))
1717 			break;
1718 
1719 		first = conf->handle_list.next;
1720 		sh = list_entry(first, struct stripe_head, lru);
1721 
1722 		list_del_init(first);
1723 		atomic_inc(&sh->count);
1724 		if (atomic_read(&sh->count)!= 1)
1725 			BUG();
1726 		spin_unlock_irq(&conf->device_lock);
1727 
1728 		handled++;
1729 		handle_stripe(sh);
1730 		release_stripe(sh);
1731 
1732 		spin_lock_irq(&conf->device_lock);
1733 	}
1734 	PRINTK("%d stripes handled\n", handled);
1735 
1736 	spin_unlock_irq(&conf->device_lock);
1737 
1738 	unplug_slaves(mddev);
1739 
1740 	PRINTK("--- raid5d inactive\n");
1741 }
1742 
1743 static ssize_t
1744 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
1745 {
1746 	raid5_conf_t *conf = mddev_to_conf(mddev);
1747 	if (conf)
1748 		return sprintf(page, "%d\n", conf->max_nr_stripes);
1749 	else
1750 		return 0;
1751 }
1752 
1753 static ssize_t
1754 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
1755 {
1756 	raid5_conf_t *conf = mddev_to_conf(mddev);
1757 	char *end;
1758 	int new;
1759 	if (len >= PAGE_SIZE)
1760 		return -EINVAL;
1761 	if (!conf)
1762 		return -ENODEV;
1763 
1764 	new = simple_strtoul(page, &end, 10);
1765 	if (!*page || (*end && *end != '\n') )
1766 		return -EINVAL;
1767 	if (new <= 16 || new > 32768)
1768 		return -EINVAL;
1769 	while (new < conf->max_nr_stripes) {
1770 		if (drop_one_stripe(conf))
1771 			conf->max_nr_stripes--;
1772 		else
1773 			break;
1774 	}
1775 	while (new > conf->max_nr_stripes) {
1776 		if (grow_one_stripe(conf))
1777 			conf->max_nr_stripes++;
1778 		else break;
1779 	}
1780 	return len;
1781 }
1782 
1783 static struct md_sysfs_entry
1784 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
1785 				raid5_show_stripe_cache_size,
1786 				raid5_store_stripe_cache_size);
1787 
1788 static ssize_t
1789 stripe_cache_active_show(mddev_t *mddev, char *page)
1790 {
1791 	raid5_conf_t *conf = mddev_to_conf(mddev);
1792 	if (conf)
1793 		return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1794 	else
1795 		return 0;
1796 }
1797 
1798 static struct md_sysfs_entry
1799 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
1800 
1801 static struct attribute *raid5_attrs[] =  {
1802 	&raid5_stripecache_size.attr,
1803 	&raid5_stripecache_active.attr,
1804 	NULL,
1805 };
1806 static struct attribute_group raid5_attrs_group = {
1807 	.name = NULL,
1808 	.attrs = raid5_attrs,
1809 };
1810 
1811 static int run(mddev_t *mddev)
1812 {
1813 	raid5_conf_t *conf;
1814 	int raid_disk, memory;
1815 	mdk_rdev_t *rdev;
1816 	struct disk_info *disk;
1817 	struct list_head *tmp;
1818 
1819 	if (mddev->level != 5 && mddev->level != 4) {
1820 		printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
1821 		       mdname(mddev), mddev->level);
1822 		return -EIO;
1823 	}
1824 
1825 	mddev->private = kzalloc(sizeof (raid5_conf_t)
1826 				 + mddev->raid_disks * sizeof(struct disk_info),
1827 				 GFP_KERNEL);
1828 	if ((conf = mddev->private) == NULL)
1829 		goto abort;
1830 
1831 	conf->mddev = mddev;
1832 
1833 	if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1834 		goto abort;
1835 
1836 	spin_lock_init(&conf->device_lock);
1837 	init_waitqueue_head(&conf->wait_for_stripe);
1838 	init_waitqueue_head(&conf->wait_for_overlap);
1839 	INIT_LIST_HEAD(&conf->handle_list);
1840 	INIT_LIST_HEAD(&conf->delayed_list);
1841 	INIT_LIST_HEAD(&conf->bitmap_list);
1842 	INIT_LIST_HEAD(&conf->inactive_list);
1843 	atomic_set(&conf->active_stripes, 0);
1844 	atomic_set(&conf->preread_active_stripes, 0);
1845 
1846 	PRINTK("raid5: run(%s) called.\n", mdname(mddev));
1847 
1848 	ITERATE_RDEV(mddev,rdev,tmp) {
1849 		raid_disk = rdev->raid_disk;
1850 		if (raid_disk >= mddev->raid_disks
1851 		    || raid_disk < 0)
1852 			continue;
1853 		disk = conf->disks + raid_disk;
1854 
1855 		disk->rdev = rdev;
1856 
1857 		if (test_bit(In_sync, &rdev->flags)) {
1858 			char b[BDEVNAME_SIZE];
1859 			printk(KERN_INFO "raid5: device %s operational as raid"
1860 				" disk %d\n", bdevname(rdev->bdev,b),
1861 				raid_disk);
1862 			conf->working_disks++;
1863 		}
1864 	}
1865 
1866 	conf->raid_disks = mddev->raid_disks;
1867 	/*
1868 	 * 0 for a fully functional array, 1 for a degraded array.
1869 	 */
1870 	mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
1871 	conf->mddev = mddev;
1872 	conf->chunk_size = mddev->chunk_size;
1873 	conf->level = mddev->level;
1874 	conf->algorithm = mddev->layout;
1875 	conf->max_nr_stripes = NR_STRIPES;
1876 
1877 	/* device size must be a multiple of chunk size */
1878 	mddev->size &= ~(mddev->chunk_size/1024 -1);
1879 	mddev->resync_max_sectors = mddev->size << 1;
1880 
1881 	if (!conf->chunk_size || conf->chunk_size % 4) {
1882 		printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
1883 			conf->chunk_size, mdname(mddev));
1884 		goto abort;
1885 	}
1886 	if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
1887 		printk(KERN_ERR
1888 			"raid5: unsupported parity algorithm %d for %s\n",
1889 			conf->algorithm, mdname(mddev));
1890 		goto abort;
1891 	}
1892 	if (mddev->degraded > 1) {
1893 		printk(KERN_ERR "raid5: not enough operational devices for %s"
1894 			" (%d/%d failed)\n",
1895 			mdname(mddev), conf->failed_disks, conf->raid_disks);
1896 		goto abort;
1897 	}
1898 
1899 	if (mddev->degraded == 1 &&
1900 	    mddev->recovery_cp != MaxSector) {
1901 		if (mddev->ok_start_degraded)
1902 			printk(KERN_WARNING
1903 			       "raid5: starting dirty degraded array: %s"
1904 			       "- data corruption possible.\n",
1905 			       mdname(mddev));
1906 		else {
1907 			printk(KERN_ERR
1908 			       "raid5: cannot start dirty degraded array for %s\n",
1909 			       mdname(mddev));
1910 			goto abort;
1911 		}
1912 	}
1913 
1914 	{
1915 		mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
1916 		if (!mddev->thread) {
1917 			printk(KERN_ERR
1918 				"raid5: couldn't allocate thread for %s\n",
1919 				mdname(mddev));
1920 			goto abort;
1921 		}
1922 	}
1923 	memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
1924 		 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
1925 	if (grow_stripes(conf, conf->max_nr_stripes)) {
1926 		printk(KERN_ERR
1927 			"raid5: couldn't allocate %dkB for buffers\n", memory);
1928 		shrink_stripes(conf);
1929 		md_unregister_thread(mddev->thread);
1930 		goto abort;
1931 	} else
1932 		printk(KERN_INFO "raid5: allocated %dkB for %s\n",
1933 			memory, mdname(mddev));
1934 
1935 	if (mddev->degraded == 0)
1936 		printk("raid5: raid level %d set %s active with %d out of %d"
1937 			" devices, algorithm %d\n", conf->level, mdname(mddev),
1938 			mddev->raid_disks-mddev->degraded, mddev->raid_disks,
1939 			conf->algorithm);
1940 	else
1941 		printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
1942 			" out of %d devices, algorithm %d\n", conf->level,
1943 			mdname(mddev), mddev->raid_disks - mddev->degraded,
1944 			mddev->raid_disks, conf->algorithm);
1945 
1946 	print_raid5_conf(conf);
1947 
1948 	/* read-ahead size must cover two whole stripes, which is
1949 	 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
1950 	 */
1951 	{
1952 		int stripe = (mddev->raid_disks-1) * mddev->chunk_size
1953 			/ PAGE_SIZE;
1954 		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1955 			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1956 	}
1957 
1958 	/* Ok, everything is just fine now */
1959 	sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
1960 
1961 	mddev->queue->unplug_fn = raid5_unplug_device;
1962 	mddev->queue->issue_flush_fn = raid5_issue_flush;
1963 
1964 	mddev->array_size =  mddev->size * (mddev->raid_disks - 1);
1965 	return 0;
1966 abort:
1967 	if (conf) {
1968 		print_raid5_conf(conf);
1969 		kfree(conf->stripe_hashtbl);
1970 		kfree(conf);
1971 	}
1972 	mddev->private = NULL;
1973 	printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
1974 	return -EIO;
1975 }
1976 
1977 
1978 
1979 static int stop(mddev_t *mddev)
1980 {
1981 	raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1982 
1983 	md_unregister_thread(mddev->thread);
1984 	mddev->thread = NULL;
1985 	shrink_stripes(conf);
1986 	kfree(conf->stripe_hashtbl);
1987 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1988 	sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
1989 	kfree(conf);
1990 	mddev->private = NULL;
1991 	return 0;
1992 }
1993 
1994 #if RAID5_DEBUG
1995 static void print_sh (struct stripe_head *sh)
1996 {
1997 	int i;
1998 
1999 	printk("sh %llu, pd_idx %d, state %ld.\n",
2000 		(unsigned long long)sh->sector, sh->pd_idx, sh->state);
2001 	printk("sh %llu,  count %d.\n",
2002 		(unsigned long long)sh->sector, atomic_read(&sh->count));
2003 	printk("sh %llu, ", (unsigned long long)sh->sector);
2004 	for (i = 0; i < sh->raid_conf->raid_disks; i++) {
2005 		printk("(cache%d: %p %ld) ",
2006 			i, sh->dev[i].page, sh->dev[i].flags);
2007 	}
2008 	printk("\n");
2009 }
2010 
2011 static void printall (raid5_conf_t *conf)
2012 {
2013 	struct stripe_head *sh;
2014 	struct hlist_node *hn;
2015 	int i;
2016 
2017 	spin_lock_irq(&conf->device_lock);
2018 	for (i = 0; i < NR_HASH; i++) {
2019 		hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2020 			if (sh->raid_conf != conf)
2021 				continue;
2022 			print_sh(sh);
2023 		}
2024 	}
2025 	spin_unlock_irq(&conf->device_lock);
2026 }
2027 #endif
2028 
2029 static void status (struct seq_file *seq, mddev_t *mddev)
2030 {
2031 	raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2032 	int i;
2033 
2034 	seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2035 	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2036 	for (i = 0; i < conf->raid_disks; i++)
2037 		seq_printf (seq, "%s",
2038 			       conf->disks[i].rdev &&
2039 			       test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
2040 	seq_printf (seq, "]");
2041 #if RAID5_DEBUG
2042 #define D(x) \
2043 	seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2044 	printall(conf);
2045 #endif
2046 }
2047 
2048 static void print_raid5_conf (raid5_conf_t *conf)
2049 {
2050 	int i;
2051 	struct disk_info *tmp;
2052 
2053 	printk("RAID5 conf printout:\n");
2054 	if (!conf) {
2055 		printk("(conf==NULL)\n");
2056 		return;
2057 	}
2058 	printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2059 		 conf->working_disks, conf->failed_disks);
2060 
2061 	for (i = 0; i < conf->raid_disks; i++) {
2062 		char b[BDEVNAME_SIZE];
2063 		tmp = conf->disks + i;
2064 		if (tmp->rdev)
2065 		printk(" disk %d, o:%d, dev:%s\n",
2066 			i, !test_bit(Faulty, &tmp->rdev->flags),
2067 			bdevname(tmp->rdev->bdev,b));
2068 	}
2069 }
2070 
2071 static int raid5_spare_active(mddev_t *mddev)
2072 {
2073 	int i;
2074 	raid5_conf_t *conf = mddev->private;
2075 	struct disk_info *tmp;
2076 
2077 	for (i = 0; i < conf->raid_disks; i++) {
2078 		tmp = conf->disks + i;
2079 		if (tmp->rdev
2080 		    && !test_bit(Faulty, &tmp->rdev->flags)
2081 		    && !test_bit(In_sync, &tmp->rdev->flags)) {
2082 			mddev->degraded--;
2083 			conf->failed_disks--;
2084 			conf->working_disks++;
2085 			set_bit(In_sync, &tmp->rdev->flags);
2086 		}
2087 	}
2088 	print_raid5_conf(conf);
2089 	return 0;
2090 }
2091 
2092 static int raid5_remove_disk(mddev_t *mddev, int number)
2093 {
2094 	raid5_conf_t *conf = mddev->private;
2095 	int err = 0;
2096 	mdk_rdev_t *rdev;
2097 	struct disk_info *p = conf->disks + number;
2098 
2099 	print_raid5_conf(conf);
2100 	rdev = p->rdev;
2101 	if (rdev) {
2102 		if (test_bit(In_sync, &rdev->flags) ||
2103 		    atomic_read(&rdev->nr_pending)) {
2104 			err = -EBUSY;
2105 			goto abort;
2106 		}
2107 		p->rdev = NULL;
2108 		synchronize_rcu();
2109 		if (atomic_read(&rdev->nr_pending)) {
2110 			/* lost the race, try later */
2111 			err = -EBUSY;
2112 			p->rdev = rdev;
2113 		}
2114 	}
2115 abort:
2116 
2117 	print_raid5_conf(conf);
2118 	return err;
2119 }
2120 
2121 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2122 {
2123 	raid5_conf_t *conf = mddev->private;
2124 	int found = 0;
2125 	int disk;
2126 	struct disk_info *p;
2127 
2128 	if (mddev->degraded > 1)
2129 		/* no point adding a device */
2130 		return 0;
2131 
2132 	/*
2133 	 * find the disk ...
2134 	 */
2135 	for (disk=0; disk < mddev->raid_disks; disk++)
2136 		if ((p=conf->disks + disk)->rdev == NULL) {
2137 			clear_bit(In_sync, &rdev->flags);
2138 			rdev->raid_disk = disk;
2139 			found = 1;
2140 			if (rdev->saved_raid_disk != disk)
2141 				conf->fullsync = 1;
2142 			rcu_assign_pointer(p->rdev, rdev);
2143 			break;
2144 		}
2145 	print_raid5_conf(conf);
2146 	return found;
2147 }
2148 
2149 static int raid5_resize(mddev_t *mddev, sector_t sectors)
2150 {
2151 	/* no resync is happening, and there is enough space
2152 	 * on all devices, so we can resize.
2153 	 * We need to make sure resync covers any new space.
2154 	 * If the array is shrinking we should possibly wait until
2155 	 * any io in the removed space completes, but it hardly seems
2156 	 * worth it.
2157 	 */
2158 	sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2159 	mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2160 	set_capacity(mddev->gendisk, mddev->array_size << 1);
2161 	mddev->changed = 1;
2162 	if (sectors/2  > mddev->size && mddev->recovery_cp == MaxSector) {
2163 		mddev->recovery_cp = mddev->size << 1;
2164 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2165 	}
2166 	mddev->size = sectors /2;
2167 	mddev->resync_max_sectors = sectors;
2168 	return 0;
2169 }
2170 
2171 static void raid5_quiesce(mddev_t *mddev, int state)
2172 {
2173 	raid5_conf_t *conf = mddev_to_conf(mddev);
2174 
2175 	switch(state) {
2176 	case 1: /* stop all writes */
2177 		spin_lock_irq(&conf->device_lock);
2178 		conf->quiesce = 1;
2179 		wait_event_lock_irq(conf->wait_for_stripe,
2180 				    atomic_read(&conf->active_stripes) == 0,
2181 				    conf->device_lock, /* nothing */);
2182 		spin_unlock_irq(&conf->device_lock);
2183 		break;
2184 
2185 	case 0: /* re-enable writes */
2186 		spin_lock_irq(&conf->device_lock);
2187 		conf->quiesce = 0;
2188 		wake_up(&conf->wait_for_stripe);
2189 		spin_unlock_irq(&conf->device_lock);
2190 		break;
2191 	}
2192 }
2193 
2194 static struct mdk_personality raid5_personality =
2195 {
2196 	.name		= "raid5",
2197 	.level		= 5,
2198 	.owner		= THIS_MODULE,
2199 	.make_request	= make_request,
2200 	.run		= run,
2201 	.stop		= stop,
2202 	.status		= status,
2203 	.error_handler	= error,
2204 	.hot_add_disk	= raid5_add_disk,
2205 	.hot_remove_disk= raid5_remove_disk,
2206 	.spare_active	= raid5_spare_active,
2207 	.sync_request	= sync_request,
2208 	.resize		= raid5_resize,
2209 	.quiesce	= raid5_quiesce,
2210 };
2211 
2212 static struct mdk_personality raid4_personality =
2213 {
2214 	.name		= "raid4",
2215 	.level		= 4,
2216 	.owner		= THIS_MODULE,
2217 	.make_request	= make_request,
2218 	.run		= run,
2219 	.stop		= stop,
2220 	.status		= status,
2221 	.error_handler	= error,
2222 	.hot_add_disk	= raid5_add_disk,
2223 	.hot_remove_disk= raid5_remove_disk,
2224 	.spare_active	= raid5_spare_active,
2225 	.sync_request	= sync_request,
2226 	.resize		= raid5_resize,
2227 	.quiesce	= raid5_quiesce,
2228 };
2229 
2230 static int __init raid5_init(void)
2231 {
2232 	register_md_personality(&raid5_personality);
2233 	register_md_personality(&raid4_personality);
2234 	return 0;
2235 }
2236 
2237 static void raid5_exit(void)
2238 {
2239 	unregister_md_personality(&raid5_personality);
2240 	unregister_md_personality(&raid4_personality);
2241 }
2242 
2243 module_init(raid5_init);
2244 module_exit(raid5_exit);
2245 MODULE_LICENSE("GPL");
2246 MODULE_ALIAS("md-personality-4"); /* RAID5 */
2247 MODULE_ALIAS("md-raid5");
2248 MODULE_ALIAS("md-raid4");
2249 MODULE_ALIAS("md-level-5");
2250 MODULE_ALIAS("md-level-4");
2251