xref: /linux/drivers/md/raid5.c (revision 87c2ce3b9305b9b723faeedf6e32ef703ec9b33a)
1 /*
2  * raid5.c : Multiple Devices driver for Linux
3  *	   Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4  *	   Copyright (C) 1999, 2000 Ingo Molnar
5  *
6  * RAID-5 management functions.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * You should have received a copy of the GNU General Public License
14  * (for example /usr/src/linux/COPYING); if not, write to the Free
15  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16  */
17 
18 
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/raid/raid5.h>
23 #include <linux/highmem.h>
24 #include <linux/bitops.h>
25 #include <asm/atomic.h>
26 
27 #include <linux/raid/bitmap.h>
28 
29 /*
30  * Stripe cache
31  */
32 
33 #define NR_STRIPES		256
34 #define STRIPE_SIZE		PAGE_SIZE
35 #define STRIPE_SHIFT		(PAGE_SHIFT - 9)
36 #define STRIPE_SECTORS		(STRIPE_SIZE>>9)
37 #define	IO_THRESHOLD		1
38 #define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
39 #define HASH_MASK		(NR_HASH - 1)
40 
41 #define stripe_hash(conf, sect)	(&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
42 
43 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
44  * order without overlap.  There may be several bio's per stripe+device, and
45  * a bio could span several devices.
46  * When walking this list for a particular stripe+device, we must never proceed
47  * beyond a bio that extends past this device, as the next bio might no longer
48  * be valid.
49  * This macro is used to determine the 'next' bio in the list, given the sector
50  * of the current stripe+device
51  */
52 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
53 /*
54  * The following can be used to debug the driver
55  */
56 #define RAID5_DEBUG	0
57 #define RAID5_PARANOIA	1
58 #if RAID5_PARANOIA && defined(CONFIG_SMP)
59 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
60 #else
61 # define CHECK_DEVLOCK()
62 #endif
63 
64 #define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
65 #if RAID5_DEBUG
66 #define inline
67 #define __inline__
68 #endif
69 
70 static void print_raid5_conf (raid5_conf_t *conf);
71 
72 static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
73 {
74 	if (atomic_dec_and_test(&sh->count)) {
75 		if (!list_empty(&sh->lru))
76 			BUG();
77 		if (atomic_read(&conf->active_stripes)==0)
78 			BUG();
79 		if (test_bit(STRIPE_HANDLE, &sh->state)) {
80 			if (test_bit(STRIPE_DELAYED, &sh->state))
81 				list_add_tail(&sh->lru, &conf->delayed_list);
82 			else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
83 				 conf->seq_write == sh->bm_seq)
84 				list_add_tail(&sh->lru, &conf->bitmap_list);
85 			else {
86 				clear_bit(STRIPE_BIT_DELAY, &sh->state);
87 				list_add_tail(&sh->lru, &conf->handle_list);
88 			}
89 			md_wakeup_thread(conf->mddev->thread);
90 		} else {
91 			if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
92 				atomic_dec(&conf->preread_active_stripes);
93 				if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
94 					md_wakeup_thread(conf->mddev->thread);
95 			}
96 			list_add_tail(&sh->lru, &conf->inactive_list);
97 			atomic_dec(&conf->active_stripes);
98 			if (!conf->inactive_blocked ||
99 			    atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
100 				wake_up(&conf->wait_for_stripe);
101 		}
102 	}
103 }
104 static void release_stripe(struct stripe_head *sh)
105 {
106 	raid5_conf_t *conf = sh->raid_conf;
107 	unsigned long flags;
108 
109 	spin_lock_irqsave(&conf->device_lock, flags);
110 	__release_stripe(conf, sh);
111 	spin_unlock_irqrestore(&conf->device_lock, flags);
112 }
113 
114 static inline void remove_hash(struct stripe_head *sh)
115 {
116 	PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
117 
118 	hlist_del_init(&sh->hash);
119 }
120 
121 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
122 {
123 	struct hlist_head *hp = stripe_hash(conf, sh->sector);
124 
125 	PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
126 
127 	CHECK_DEVLOCK();
128 	hlist_add_head(&sh->hash, hp);
129 }
130 
131 
132 /* find an idle stripe, make sure it is unhashed, and return it. */
133 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
134 {
135 	struct stripe_head *sh = NULL;
136 	struct list_head *first;
137 
138 	CHECK_DEVLOCK();
139 	if (list_empty(&conf->inactive_list))
140 		goto out;
141 	first = conf->inactive_list.next;
142 	sh = list_entry(first, struct stripe_head, lru);
143 	list_del_init(first);
144 	remove_hash(sh);
145 	atomic_inc(&conf->active_stripes);
146 out:
147 	return sh;
148 }
149 
150 static void shrink_buffers(struct stripe_head *sh, int num)
151 {
152 	struct page *p;
153 	int i;
154 
155 	for (i=0; i<num ; i++) {
156 		p = sh->dev[i].page;
157 		if (!p)
158 			continue;
159 		sh->dev[i].page = NULL;
160 		put_page(p);
161 	}
162 }
163 
164 static int grow_buffers(struct stripe_head *sh, int num)
165 {
166 	int i;
167 
168 	for (i=0; i<num; i++) {
169 		struct page *page;
170 
171 		if (!(page = alloc_page(GFP_KERNEL))) {
172 			return 1;
173 		}
174 		sh->dev[i].page = page;
175 	}
176 	return 0;
177 }
178 
179 static void raid5_build_block (struct stripe_head *sh, int i);
180 
181 static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
182 {
183 	raid5_conf_t *conf = sh->raid_conf;
184 	int disks = conf->raid_disks, i;
185 
186 	if (atomic_read(&sh->count) != 0)
187 		BUG();
188 	if (test_bit(STRIPE_HANDLE, &sh->state))
189 		BUG();
190 
191 	CHECK_DEVLOCK();
192 	PRINTK("init_stripe called, stripe %llu\n",
193 		(unsigned long long)sh->sector);
194 
195 	remove_hash(sh);
196 
197 	sh->sector = sector;
198 	sh->pd_idx = pd_idx;
199 	sh->state = 0;
200 
201 	for (i=disks; i--; ) {
202 		struct r5dev *dev = &sh->dev[i];
203 
204 		if (dev->toread || dev->towrite || dev->written ||
205 		    test_bit(R5_LOCKED, &dev->flags)) {
206 			printk("sector=%llx i=%d %p %p %p %d\n",
207 			       (unsigned long long)sh->sector, i, dev->toread,
208 			       dev->towrite, dev->written,
209 			       test_bit(R5_LOCKED, &dev->flags));
210 			BUG();
211 		}
212 		dev->flags = 0;
213 		raid5_build_block(sh, i);
214 	}
215 	insert_hash(conf, sh);
216 }
217 
218 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
219 {
220 	struct stripe_head *sh;
221 	struct hlist_node *hn;
222 
223 	CHECK_DEVLOCK();
224 	PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
225 	hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
226 		if (sh->sector == sector)
227 			return sh;
228 	PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
229 	return NULL;
230 }
231 
232 static void unplug_slaves(mddev_t *mddev);
233 static void raid5_unplug_device(request_queue_t *q);
234 
235 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector,
236 					     int pd_idx, int noblock)
237 {
238 	struct stripe_head *sh;
239 
240 	PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
241 
242 	spin_lock_irq(&conf->device_lock);
243 
244 	do {
245 		wait_event_lock_irq(conf->wait_for_stripe,
246 				    conf->quiesce == 0,
247 				    conf->device_lock, /* nothing */);
248 		sh = __find_stripe(conf, sector);
249 		if (!sh) {
250 			if (!conf->inactive_blocked)
251 				sh = get_free_stripe(conf);
252 			if (noblock && sh == NULL)
253 				break;
254 			if (!sh) {
255 				conf->inactive_blocked = 1;
256 				wait_event_lock_irq(conf->wait_for_stripe,
257 						    !list_empty(&conf->inactive_list) &&
258 						    (atomic_read(&conf->active_stripes)
259 						     < (conf->max_nr_stripes *3/4)
260 						     || !conf->inactive_blocked),
261 						    conf->device_lock,
262 						    unplug_slaves(conf->mddev);
263 					);
264 				conf->inactive_blocked = 0;
265 			} else
266 				init_stripe(sh, sector, pd_idx);
267 		} else {
268 			if (atomic_read(&sh->count)) {
269 				if (!list_empty(&sh->lru))
270 					BUG();
271 			} else {
272 				if (!test_bit(STRIPE_HANDLE, &sh->state))
273 					atomic_inc(&conf->active_stripes);
274 				if (list_empty(&sh->lru))
275 					BUG();
276 				list_del_init(&sh->lru);
277 			}
278 		}
279 	} while (sh == NULL);
280 
281 	if (sh)
282 		atomic_inc(&sh->count);
283 
284 	spin_unlock_irq(&conf->device_lock);
285 	return sh;
286 }
287 
288 static int grow_one_stripe(raid5_conf_t *conf)
289 {
290 	struct stripe_head *sh;
291 	sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
292 	if (!sh)
293 		return 0;
294 	memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
295 	sh->raid_conf = conf;
296 	spin_lock_init(&sh->lock);
297 
298 	if (grow_buffers(sh, conf->raid_disks)) {
299 		shrink_buffers(sh, conf->raid_disks);
300 		kmem_cache_free(conf->slab_cache, sh);
301 		return 0;
302 	}
303 	/* we just created an active stripe so... */
304 	atomic_set(&sh->count, 1);
305 	atomic_inc(&conf->active_stripes);
306 	INIT_LIST_HEAD(&sh->lru);
307 	release_stripe(sh);
308 	return 1;
309 }
310 
311 static int grow_stripes(raid5_conf_t *conf, int num)
312 {
313 	kmem_cache_t *sc;
314 	int devs = conf->raid_disks;
315 
316 	sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev));
317 
318 	sc = kmem_cache_create(conf->cache_name,
319 			       sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
320 			       0, 0, NULL, NULL);
321 	if (!sc)
322 		return 1;
323 	conf->slab_cache = sc;
324 	while (num--) {
325 		if (!grow_one_stripe(conf))
326 			return 1;
327 	}
328 	return 0;
329 }
330 
331 static int drop_one_stripe(raid5_conf_t *conf)
332 {
333 	struct stripe_head *sh;
334 
335 	spin_lock_irq(&conf->device_lock);
336 	sh = get_free_stripe(conf);
337 	spin_unlock_irq(&conf->device_lock);
338 	if (!sh)
339 		return 0;
340 	if (atomic_read(&sh->count))
341 		BUG();
342 	shrink_buffers(sh, conf->raid_disks);
343 	kmem_cache_free(conf->slab_cache, sh);
344 	atomic_dec(&conf->active_stripes);
345 	return 1;
346 }
347 
348 static void shrink_stripes(raid5_conf_t *conf)
349 {
350 	while (drop_one_stripe(conf))
351 		;
352 
353 	kmem_cache_destroy(conf->slab_cache);
354 	conf->slab_cache = NULL;
355 }
356 
357 static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
358 				   int error)
359 {
360  	struct stripe_head *sh = bi->bi_private;
361 	raid5_conf_t *conf = sh->raid_conf;
362 	int disks = conf->raid_disks, i;
363 	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
364 
365 	if (bi->bi_size)
366 		return 1;
367 
368 	for (i=0 ; i<disks; i++)
369 		if (bi == &sh->dev[i].req)
370 			break;
371 
372 	PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
373 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
374 		uptodate);
375 	if (i == disks) {
376 		BUG();
377 		return 0;
378 	}
379 
380 	if (uptodate) {
381 #if 0
382 		struct bio *bio;
383 		unsigned long flags;
384 		spin_lock_irqsave(&conf->device_lock, flags);
385 		/* we can return a buffer if we bypassed the cache or
386 		 * if the top buffer is not in highmem.  If there are
387 		 * multiple buffers, leave the extra work to
388 		 * handle_stripe
389 		 */
390 		buffer = sh->bh_read[i];
391 		if (buffer &&
392 		    (!PageHighMem(buffer->b_page)
393 		     || buffer->b_page == bh->b_page )
394 			) {
395 			sh->bh_read[i] = buffer->b_reqnext;
396 			buffer->b_reqnext = NULL;
397 		} else
398 			buffer = NULL;
399 		spin_unlock_irqrestore(&conf->device_lock, flags);
400 		if (sh->bh_page[i]==bh->b_page)
401 			set_buffer_uptodate(bh);
402 		if (buffer) {
403 			if (buffer->b_page != bh->b_page)
404 				memcpy(buffer->b_data, bh->b_data, bh->b_size);
405 			buffer->b_end_io(buffer, 1);
406 		}
407 #else
408 		set_bit(R5_UPTODATE, &sh->dev[i].flags);
409 #endif
410 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
411 			printk(KERN_INFO "raid5: read error corrected!!\n");
412 			clear_bit(R5_ReadError, &sh->dev[i].flags);
413 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
414 		}
415 		if (atomic_read(&conf->disks[i].rdev->read_errors))
416 			atomic_set(&conf->disks[i].rdev->read_errors, 0);
417 	} else {
418 		int retry = 0;
419 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
420 		atomic_inc(&conf->disks[i].rdev->read_errors);
421 		if (conf->mddev->degraded)
422 			printk(KERN_WARNING "raid5: read error not correctable.\n");
423 		else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
424 			/* Oh, no!!! */
425 			printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
426 		else if (atomic_read(&conf->disks[i].rdev->read_errors)
427 			 > conf->max_nr_stripes)
428 			printk(KERN_WARNING
429 			       "raid5: Too many read errors, failing device.\n");
430 		else
431 			retry = 1;
432 		if (retry)
433 			set_bit(R5_ReadError, &sh->dev[i].flags);
434 		else {
435 			clear_bit(R5_ReadError, &sh->dev[i].flags);
436 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
437 			md_error(conf->mddev, conf->disks[i].rdev);
438 		}
439 	}
440 	rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
441 #if 0
442 	/* must restore b_page before unlocking buffer... */
443 	if (sh->bh_page[i] != bh->b_page) {
444 		bh->b_page = sh->bh_page[i];
445 		bh->b_data = page_address(bh->b_page);
446 		clear_buffer_uptodate(bh);
447 	}
448 #endif
449 	clear_bit(R5_LOCKED, &sh->dev[i].flags);
450 	set_bit(STRIPE_HANDLE, &sh->state);
451 	release_stripe(sh);
452 	return 0;
453 }
454 
455 static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
456 				    int error)
457 {
458  	struct stripe_head *sh = bi->bi_private;
459 	raid5_conf_t *conf = sh->raid_conf;
460 	int disks = conf->raid_disks, i;
461 	unsigned long flags;
462 	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
463 
464 	if (bi->bi_size)
465 		return 1;
466 
467 	for (i=0 ; i<disks; i++)
468 		if (bi == &sh->dev[i].req)
469 			break;
470 
471 	PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
472 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
473 		uptodate);
474 	if (i == disks) {
475 		BUG();
476 		return 0;
477 	}
478 
479 	spin_lock_irqsave(&conf->device_lock, flags);
480 	if (!uptodate)
481 		md_error(conf->mddev, conf->disks[i].rdev);
482 
483 	rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
484 
485 	clear_bit(R5_LOCKED, &sh->dev[i].flags);
486 	set_bit(STRIPE_HANDLE, &sh->state);
487 	__release_stripe(conf, sh);
488 	spin_unlock_irqrestore(&conf->device_lock, flags);
489 	return 0;
490 }
491 
492 
493 static sector_t compute_blocknr(struct stripe_head *sh, int i);
494 
495 static void raid5_build_block (struct stripe_head *sh, int i)
496 {
497 	struct r5dev *dev = &sh->dev[i];
498 
499 	bio_init(&dev->req);
500 	dev->req.bi_io_vec = &dev->vec;
501 	dev->req.bi_vcnt++;
502 	dev->req.bi_max_vecs++;
503 	dev->vec.bv_page = dev->page;
504 	dev->vec.bv_len = STRIPE_SIZE;
505 	dev->vec.bv_offset = 0;
506 
507 	dev->req.bi_sector = sh->sector;
508 	dev->req.bi_private = sh;
509 
510 	dev->flags = 0;
511 	if (i != sh->pd_idx)
512 		dev->sector = compute_blocknr(sh, i);
513 }
514 
515 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
516 {
517 	char b[BDEVNAME_SIZE];
518 	raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
519 	PRINTK("raid5: error called\n");
520 
521 	if (!test_bit(Faulty, &rdev->flags)) {
522 		mddev->sb_dirty = 1;
523 		if (test_bit(In_sync, &rdev->flags)) {
524 			conf->working_disks--;
525 			mddev->degraded++;
526 			conf->failed_disks++;
527 			clear_bit(In_sync, &rdev->flags);
528 			/*
529 			 * if recovery was running, make sure it aborts.
530 			 */
531 			set_bit(MD_RECOVERY_ERR, &mddev->recovery);
532 		}
533 		set_bit(Faulty, &rdev->flags);
534 		printk (KERN_ALERT
535 			"raid5: Disk failure on %s, disabling device."
536 			" Operation continuing on %d devices\n",
537 			bdevname(rdev->bdev,b), conf->working_disks);
538 	}
539 }
540 
541 /*
542  * Input: a 'big' sector number,
543  * Output: index of the data and parity disk, and the sector # in them.
544  */
545 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
546 			unsigned int data_disks, unsigned int * dd_idx,
547 			unsigned int * pd_idx, raid5_conf_t *conf)
548 {
549 	long stripe;
550 	unsigned long chunk_number;
551 	unsigned int chunk_offset;
552 	sector_t new_sector;
553 	int sectors_per_chunk = conf->chunk_size >> 9;
554 
555 	/* First compute the information on this sector */
556 
557 	/*
558 	 * Compute the chunk number and the sector offset inside the chunk
559 	 */
560 	chunk_offset = sector_div(r_sector, sectors_per_chunk);
561 	chunk_number = r_sector;
562 	BUG_ON(r_sector != chunk_number);
563 
564 	/*
565 	 * Compute the stripe number
566 	 */
567 	stripe = chunk_number / data_disks;
568 
569 	/*
570 	 * Compute the data disk and parity disk indexes inside the stripe
571 	 */
572 	*dd_idx = chunk_number % data_disks;
573 
574 	/*
575 	 * Select the parity disk based on the user selected algorithm.
576 	 */
577 	if (conf->level == 4)
578 		*pd_idx = data_disks;
579 	else switch (conf->algorithm) {
580 		case ALGORITHM_LEFT_ASYMMETRIC:
581 			*pd_idx = data_disks - stripe % raid_disks;
582 			if (*dd_idx >= *pd_idx)
583 				(*dd_idx)++;
584 			break;
585 		case ALGORITHM_RIGHT_ASYMMETRIC:
586 			*pd_idx = stripe % raid_disks;
587 			if (*dd_idx >= *pd_idx)
588 				(*dd_idx)++;
589 			break;
590 		case ALGORITHM_LEFT_SYMMETRIC:
591 			*pd_idx = data_disks - stripe % raid_disks;
592 			*dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
593 			break;
594 		case ALGORITHM_RIGHT_SYMMETRIC:
595 			*pd_idx = stripe % raid_disks;
596 			*dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
597 			break;
598 		default:
599 			printk(KERN_ERR "raid5: unsupported algorithm %d\n",
600 				conf->algorithm);
601 	}
602 
603 	/*
604 	 * Finally, compute the new sector number
605 	 */
606 	new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
607 	return new_sector;
608 }
609 
610 
611 static sector_t compute_blocknr(struct stripe_head *sh, int i)
612 {
613 	raid5_conf_t *conf = sh->raid_conf;
614 	int raid_disks = conf->raid_disks, data_disks = raid_disks - 1;
615 	sector_t new_sector = sh->sector, check;
616 	int sectors_per_chunk = conf->chunk_size >> 9;
617 	sector_t stripe;
618 	int chunk_offset;
619 	int chunk_number, dummy1, dummy2, dd_idx = i;
620 	sector_t r_sector;
621 
622 	chunk_offset = sector_div(new_sector, sectors_per_chunk);
623 	stripe = new_sector;
624 	BUG_ON(new_sector != stripe);
625 
626 
627 	switch (conf->algorithm) {
628 		case ALGORITHM_LEFT_ASYMMETRIC:
629 		case ALGORITHM_RIGHT_ASYMMETRIC:
630 			if (i > sh->pd_idx)
631 				i--;
632 			break;
633 		case ALGORITHM_LEFT_SYMMETRIC:
634 		case ALGORITHM_RIGHT_SYMMETRIC:
635 			if (i < sh->pd_idx)
636 				i += raid_disks;
637 			i -= (sh->pd_idx + 1);
638 			break;
639 		default:
640 			printk(KERN_ERR "raid5: unsupported algorithm %d\n",
641 				conf->algorithm);
642 	}
643 
644 	chunk_number = stripe * data_disks + i;
645 	r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
646 
647 	check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
648 	if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
649 		printk(KERN_ERR "compute_blocknr: map not correct\n");
650 		return 0;
651 	}
652 	return r_sector;
653 }
654 
655 
656 
657 /*
658  * Copy data between a page in the stripe cache, and a bio.
659  * There are no alignment or size guarantees between the page or the
660  * bio except that there is some overlap.
661  * All iovecs in the bio must be considered.
662  */
663 static void copy_data(int frombio, struct bio *bio,
664 		     struct page *page,
665 		     sector_t sector)
666 {
667 	char *pa = page_address(page);
668 	struct bio_vec *bvl;
669 	int i;
670 	int page_offset;
671 
672 	if (bio->bi_sector >= sector)
673 		page_offset = (signed)(bio->bi_sector - sector) * 512;
674 	else
675 		page_offset = (signed)(sector - bio->bi_sector) * -512;
676 	bio_for_each_segment(bvl, bio, i) {
677 		int len = bio_iovec_idx(bio,i)->bv_len;
678 		int clen;
679 		int b_offset = 0;
680 
681 		if (page_offset < 0) {
682 			b_offset = -page_offset;
683 			page_offset += b_offset;
684 			len -= b_offset;
685 		}
686 
687 		if (len > 0 && page_offset + len > STRIPE_SIZE)
688 			clen = STRIPE_SIZE - page_offset;
689 		else clen = len;
690 
691 		if (clen > 0) {
692 			char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
693 			if (frombio)
694 				memcpy(pa+page_offset, ba+b_offset, clen);
695 			else
696 				memcpy(ba+b_offset, pa+page_offset, clen);
697 			__bio_kunmap_atomic(ba, KM_USER0);
698 		}
699 		if (clen < len) /* hit end of page */
700 			break;
701 		page_offset +=  len;
702 	}
703 }
704 
705 #define check_xor() 	do { 						\
706 			   if (count == MAX_XOR_BLOCKS) {		\
707 				xor_block(count, STRIPE_SIZE, ptr);	\
708 				count = 1;				\
709 			   }						\
710 			} while(0)
711 
712 
713 static void compute_block(struct stripe_head *sh, int dd_idx)
714 {
715 	raid5_conf_t *conf = sh->raid_conf;
716 	int i, count, disks = conf->raid_disks;
717 	void *ptr[MAX_XOR_BLOCKS], *p;
718 
719 	PRINTK("compute_block, stripe %llu, idx %d\n",
720 		(unsigned long long)sh->sector, dd_idx);
721 
722 	ptr[0] = page_address(sh->dev[dd_idx].page);
723 	memset(ptr[0], 0, STRIPE_SIZE);
724 	count = 1;
725 	for (i = disks ; i--; ) {
726 		if (i == dd_idx)
727 			continue;
728 		p = page_address(sh->dev[i].page);
729 		if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
730 			ptr[count++] = p;
731 		else
732 			printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
733 				" not present\n", dd_idx,
734 				(unsigned long long)sh->sector, i);
735 
736 		check_xor();
737 	}
738 	if (count != 1)
739 		xor_block(count, STRIPE_SIZE, ptr);
740 	set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
741 }
742 
743 static void compute_parity(struct stripe_head *sh, int method)
744 {
745 	raid5_conf_t *conf = sh->raid_conf;
746 	int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count;
747 	void *ptr[MAX_XOR_BLOCKS];
748 	struct bio *chosen;
749 
750 	PRINTK("compute_parity, stripe %llu, method %d\n",
751 		(unsigned long long)sh->sector, method);
752 
753 	count = 1;
754 	ptr[0] = page_address(sh->dev[pd_idx].page);
755 	switch(method) {
756 	case READ_MODIFY_WRITE:
757 		if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
758 			BUG();
759 		for (i=disks ; i-- ;) {
760 			if (i==pd_idx)
761 				continue;
762 			if (sh->dev[i].towrite &&
763 			    test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
764 				ptr[count++] = page_address(sh->dev[i].page);
765 				chosen = sh->dev[i].towrite;
766 				sh->dev[i].towrite = NULL;
767 
768 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
769 					wake_up(&conf->wait_for_overlap);
770 
771 				if (sh->dev[i].written) BUG();
772 				sh->dev[i].written = chosen;
773 				check_xor();
774 			}
775 		}
776 		break;
777 	case RECONSTRUCT_WRITE:
778 		memset(ptr[0], 0, STRIPE_SIZE);
779 		for (i= disks; i-- ;)
780 			if (i!=pd_idx && sh->dev[i].towrite) {
781 				chosen = sh->dev[i].towrite;
782 				sh->dev[i].towrite = NULL;
783 
784 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
785 					wake_up(&conf->wait_for_overlap);
786 
787 				if (sh->dev[i].written) BUG();
788 				sh->dev[i].written = chosen;
789 			}
790 		break;
791 	case CHECK_PARITY:
792 		break;
793 	}
794 	if (count>1) {
795 		xor_block(count, STRIPE_SIZE, ptr);
796 		count = 1;
797 	}
798 
799 	for (i = disks; i--;)
800 		if (sh->dev[i].written) {
801 			sector_t sector = sh->dev[i].sector;
802 			struct bio *wbi = sh->dev[i].written;
803 			while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
804 				copy_data(1, wbi, sh->dev[i].page, sector);
805 				wbi = r5_next_bio(wbi, sector);
806 			}
807 
808 			set_bit(R5_LOCKED, &sh->dev[i].flags);
809 			set_bit(R5_UPTODATE, &sh->dev[i].flags);
810 		}
811 
812 	switch(method) {
813 	case RECONSTRUCT_WRITE:
814 	case CHECK_PARITY:
815 		for (i=disks; i--;)
816 			if (i != pd_idx) {
817 				ptr[count++] = page_address(sh->dev[i].page);
818 				check_xor();
819 			}
820 		break;
821 	case READ_MODIFY_WRITE:
822 		for (i = disks; i--;)
823 			if (sh->dev[i].written) {
824 				ptr[count++] = page_address(sh->dev[i].page);
825 				check_xor();
826 			}
827 	}
828 	if (count != 1)
829 		xor_block(count, STRIPE_SIZE, ptr);
830 
831 	if (method != CHECK_PARITY) {
832 		set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
833 		set_bit(R5_LOCKED,   &sh->dev[pd_idx].flags);
834 	} else
835 		clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
836 }
837 
838 /*
839  * Each stripe/dev can have one or more bion attached.
840  * toread/towrite point to the first in a chain.
841  * The bi_next chain must be in order.
842  */
843 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
844 {
845 	struct bio **bip;
846 	raid5_conf_t *conf = sh->raid_conf;
847 	int firstwrite=0;
848 
849 	PRINTK("adding bh b#%llu to stripe s#%llu\n",
850 		(unsigned long long)bi->bi_sector,
851 		(unsigned long long)sh->sector);
852 
853 
854 	spin_lock(&sh->lock);
855 	spin_lock_irq(&conf->device_lock);
856 	if (forwrite) {
857 		bip = &sh->dev[dd_idx].towrite;
858 		if (*bip == NULL && sh->dev[dd_idx].written == NULL)
859 			firstwrite = 1;
860 	} else
861 		bip = &sh->dev[dd_idx].toread;
862 	while (*bip && (*bip)->bi_sector < bi->bi_sector) {
863 		if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
864 			goto overlap;
865 		bip = & (*bip)->bi_next;
866 	}
867 	if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
868 		goto overlap;
869 
870 	if (*bip && bi->bi_next && (*bip) != bi->bi_next)
871 		BUG();
872 	if (*bip)
873 		bi->bi_next = *bip;
874 	*bip = bi;
875 	bi->bi_phys_segments ++;
876 	spin_unlock_irq(&conf->device_lock);
877 	spin_unlock(&sh->lock);
878 
879 	PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
880 		(unsigned long long)bi->bi_sector,
881 		(unsigned long long)sh->sector, dd_idx);
882 
883 	if (conf->mddev->bitmap && firstwrite) {
884 		sh->bm_seq = conf->seq_write;
885 		bitmap_startwrite(conf->mddev->bitmap, sh->sector,
886 				  STRIPE_SECTORS, 0);
887 		set_bit(STRIPE_BIT_DELAY, &sh->state);
888 	}
889 
890 	if (forwrite) {
891 		/* check if page is covered */
892 		sector_t sector = sh->dev[dd_idx].sector;
893 		for (bi=sh->dev[dd_idx].towrite;
894 		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
895 			     bi && bi->bi_sector <= sector;
896 		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
897 			if (bi->bi_sector + (bi->bi_size>>9) >= sector)
898 				sector = bi->bi_sector + (bi->bi_size>>9);
899 		}
900 		if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
901 			set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
902 	}
903 	return 1;
904 
905  overlap:
906 	set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
907 	spin_unlock_irq(&conf->device_lock);
908 	spin_unlock(&sh->lock);
909 	return 0;
910 }
911 
912 
913 /*
914  * handle_stripe - do things to a stripe.
915  *
916  * We lock the stripe and then examine the state of various bits
917  * to see what needs to be done.
918  * Possible results:
919  *    return some read request which now have data
920  *    return some write requests which are safely on disc
921  *    schedule a read on some buffers
922  *    schedule a write of some buffers
923  *    return confirmation of parity correctness
924  *
925  * Parity calculations are done inside the stripe lock
926  * buffers are taken off read_list or write_list, and bh_cache buffers
927  * get BH_Lock set before the stripe lock is released.
928  *
929  */
930 
931 static void handle_stripe(struct stripe_head *sh)
932 {
933 	raid5_conf_t *conf = sh->raid_conf;
934 	int disks = conf->raid_disks;
935 	struct bio *return_bi= NULL;
936 	struct bio *bi;
937 	int i;
938 	int syncing;
939 	int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
940 	int non_overwrite = 0;
941 	int failed_num=0;
942 	struct r5dev *dev;
943 
944 	PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
945 		(unsigned long long)sh->sector, atomic_read(&sh->count),
946 		sh->pd_idx);
947 
948 	spin_lock(&sh->lock);
949 	clear_bit(STRIPE_HANDLE, &sh->state);
950 	clear_bit(STRIPE_DELAYED, &sh->state);
951 
952 	syncing = test_bit(STRIPE_SYNCING, &sh->state);
953 	/* Now to look around and see what can be done */
954 
955 	rcu_read_lock();
956 	for (i=disks; i--; ) {
957 		mdk_rdev_t *rdev;
958 		dev = &sh->dev[i];
959 		clear_bit(R5_Insync, &dev->flags);
960 
961 		PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
962 			i, dev->flags, dev->toread, dev->towrite, dev->written);
963 		/* maybe we can reply to a read */
964 		if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
965 			struct bio *rbi, *rbi2;
966 			PRINTK("Return read for disc %d\n", i);
967 			spin_lock_irq(&conf->device_lock);
968 			rbi = dev->toread;
969 			dev->toread = NULL;
970 			if (test_and_clear_bit(R5_Overlap, &dev->flags))
971 				wake_up(&conf->wait_for_overlap);
972 			spin_unlock_irq(&conf->device_lock);
973 			while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
974 				copy_data(0, rbi, dev->page, dev->sector);
975 				rbi2 = r5_next_bio(rbi, dev->sector);
976 				spin_lock_irq(&conf->device_lock);
977 				if (--rbi->bi_phys_segments == 0) {
978 					rbi->bi_next = return_bi;
979 					return_bi = rbi;
980 				}
981 				spin_unlock_irq(&conf->device_lock);
982 				rbi = rbi2;
983 			}
984 		}
985 
986 		/* now count some things */
987 		if (test_bit(R5_LOCKED, &dev->flags)) locked++;
988 		if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
989 
990 
991 		if (dev->toread) to_read++;
992 		if (dev->towrite) {
993 			to_write++;
994 			if (!test_bit(R5_OVERWRITE, &dev->flags))
995 				non_overwrite++;
996 		}
997 		if (dev->written) written++;
998 		rdev = rcu_dereference(conf->disks[i].rdev);
999 		if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1000 			/* The ReadError flag will just be confusing now */
1001 			clear_bit(R5_ReadError, &dev->flags);
1002 			clear_bit(R5_ReWrite, &dev->flags);
1003 		}
1004 		if (!rdev || !test_bit(In_sync, &rdev->flags)
1005 		    || test_bit(R5_ReadError, &dev->flags)) {
1006 			failed++;
1007 			failed_num = i;
1008 		} else
1009 			set_bit(R5_Insync, &dev->flags);
1010 	}
1011 	rcu_read_unlock();
1012 	PRINTK("locked=%d uptodate=%d to_read=%d"
1013 		" to_write=%d failed=%d failed_num=%d\n",
1014 		locked, uptodate, to_read, to_write, failed, failed_num);
1015 	/* check if the array has lost two devices and, if so, some requests might
1016 	 * need to be failed
1017 	 */
1018 	if (failed > 1 && to_read+to_write+written) {
1019 		for (i=disks; i--; ) {
1020 			int bitmap_end = 0;
1021 
1022 			if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1023 				mdk_rdev_t *rdev;
1024 				rcu_read_lock();
1025 				rdev = rcu_dereference(conf->disks[i].rdev);
1026 				if (rdev && test_bit(In_sync, &rdev->flags))
1027 					/* multiple read failures in one stripe */
1028 					md_error(conf->mddev, rdev);
1029 				rcu_read_unlock();
1030 			}
1031 
1032 			spin_lock_irq(&conf->device_lock);
1033 			/* fail all writes first */
1034 			bi = sh->dev[i].towrite;
1035 			sh->dev[i].towrite = NULL;
1036 			if (bi) { to_write--; bitmap_end = 1; }
1037 
1038 			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1039 				wake_up(&conf->wait_for_overlap);
1040 
1041 			while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1042 				struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1043 				clear_bit(BIO_UPTODATE, &bi->bi_flags);
1044 				if (--bi->bi_phys_segments == 0) {
1045 					md_write_end(conf->mddev);
1046 					bi->bi_next = return_bi;
1047 					return_bi = bi;
1048 				}
1049 				bi = nextbi;
1050 			}
1051 			/* and fail all 'written' */
1052 			bi = sh->dev[i].written;
1053 			sh->dev[i].written = NULL;
1054 			if (bi) bitmap_end = 1;
1055 			while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1056 				struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1057 				clear_bit(BIO_UPTODATE, &bi->bi_flags);
1058 				if (--bi->bi_phys_segments == 0) {
1059 					md_write_end(conf->mddev);
1060 					bi->bi_next = return_bi;
1061 					return_bi = bi;
1062 				}
1063 				bi = bi2;
1064 			}
1065 
1066 			/* fail any reads if this device is non-operational */
1067 			if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1068 			    test_bit(R5_ReadError, &sh->dev[i].flags)) {
1069 				bi = sh->dev[i].toread;
1070 				sh->dev[i].toread = NULL;
1071 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1072 					wake_up(&conf->wait_for_overlap);
1073 				if (bi) to_read--;
1074 				while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1075 					struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1076 					clear_bit(BIO_UPTODATE, &bi->bi_flags);
1077 					if (--bi->bi_phys_segments == 0) {
1078 						bi->bi_next = return_bi;
1079 						return_bi = bi;
1080 					}
1081 					bi = nextbi;
1082 				}
1083 			}
1084 			spin_unlock_irq(&conf->device_lock);
1085 			if (bitmap_end)
1086 				bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1087 						STRIPE_SECTORS, 0, 0);
1088 		}
1089 	}
1090 	if (failed > 1 && syncing) {
1091 		md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1092 		clear_bit(STRIPE_SYNCING, &sh->state);
1093 		syncing = 0;
1094 	}
1095 
1096 	/* might be able to return some write requests if the parity block
1097 	 * is safe, or on a failed drive
1098 	 */
1099 	dev = &sh->dev[sh->pd_idx];
1100 	if ( written &&
1101 	     ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1102 		test_bit(R5_UPTODATE, &dev->flags))
1103 	       || (failed == 1 && failed_num == sh->pd_idx))
1104 	    ) {
1105 	    /* any written block on an uptodate or failed drive can be returned.
1106 	     * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1107 	     * never LOCKED, so we don't need to test 'failed' directly.
1108 	     */
1109 	    for (i=disks; i--; )
1110 		if (sh->dev[i].written) {
1111 		    dev = &sh->dev[i];
1112 		    if (!test_bit(R5_LOCKED, &dev->flags) &&
1113 			 test_bit(R5_UPTODATE, &dev->flags) ) {
1114 			/* We can return any write requests */
1115 			    struct bio *wbi, *wbi2;
1116 			    int bitmap_end = 0;
1117 			    PRINTK("Return write for disc %d\n", i);
1118 			    spin_lock_irq(&conf->device_lock);
1119 			    wbi = dev->written;
1120 			    dev->written = NULL;
1121 			    while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1122 				    wbi2 = r5_next_bio(wbi, dev->sector);
1123 				    if (--wbi->bi_phys_segments == 0) {
1124 					    md_write_end(conf->mddev);
1125 					    wbi->bi_next = return_bi;
1126 					    return_bi = wbi;
1127 				    }
1128 				    wbi = wbi2;
1129 			    }
1130 			    if (dev->towrite == NULL)
1131 				    bitmap_end = 1;
1132 			    spin_unlock_irq(&conf->device_lock);
1133 			    if (bitmap_end)
1134 				    bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1135 						    STRIPE_SECTORS,
1136 						    !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1137 		    }
1138 		}
1139 	}
1140 
1141 	/* Now we might consider reading some blocks, either to check/generate
1142 	 * parity, or to satisfy requests
1143 	 * or to load a block that is being partially written.
1144 	 */
1145 	if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
1146 		for (i=disks; i--;) {
1147 			dev = &sh->dev[i];
1148 			if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1149 			    (dev->toread ||
1150 			     (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1151 			     syncing ||
1152 			     (failed && (sh->dev[failed_num].toread ||
1153 					 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1154 				    )
1155 				) {
1156 				/* we would like to get this block, possibly
1157 				 * by computing it, but we might not be able to
1158 				 */
1159 				if (uptodate == disks-1) {
1160 					PRINTK("Computing block %d\n", i);
1161 					compute_block(sh, i);
1162 					uptodate++;
1163 				} else if (test_bit(R5_Insync, &dev->flags)) {
1164 					set_bit(R5_LOCKED, &dev->flags);
1165 					set_bit(R5_Wantread, &dev->flags);
1166 #if 0
1167 					/* if I am just reading this block and we don't have
1168 					   a failed drive, or any pending writes then sidestep the cache */
1169 					if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1170 					    ! syncing && !failed && !to_write) {
1171 						sh->bh_cache[i]->b_page =  sh->bh_read[i]->b_page;
1172 						sh->bh_cache[i]->b_data =  sh->bh_read[i]->b_data;
1173 					}
1174 #endif
1175 					locked++;
1176 					PRINTK("Reading block %d (sync=%d)\n",
1177 						i, syncing);
1178 				}
1179 			}
1180 		}
1181 		set_bit(STRIPE_HANDLE, &sh->state);
1182 	}
1183 
1184 	/* now to consider writing and what else, if anything should be read */
1185 	if (to_write) {
1186 		int rmw=0, rcw=0;
1187 		for (i=disks ; i--;) {
1188 			/* would I have to read this buffer for read_modify_write */
1189 			dev = &sh->dev[i];
1190 			if ((dev->towrite || i == sh->pd_idx) &&
1191 			    (!test_bit(R5_LOCKED, &dev->flags)
1192 #if 0
1193 || sh->bh_page[i]!=bh->b_page
1194 #endif
1195 				    ) &&
1196 			    !test_bit(R5_UPTODATE, &dev->flags)) {
1197 				if (test_bit(R5_Insync, &dev->flags)
1198 /*				    && !(!mddev->insync && i == sh->pd_idx) */
1199 					)
1200 					rmw++;
1201 				else rmw += 2*disks;  /* cannot read it */
1202 			}
1203 			/* Would I have to read this buffer for reconstruct_write */
1204 			if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1205 			    (!test_bit(R5_LOCKED, &dev->flags)
1206 #if 0
1207 || sh->bh_page[i] != bh->b_page
1208 #endif
1209 				    ) &&
1210 			    !test_bit(R5_UPTODATE, &dev->flags)) {
1211 				if (test_bit(R5_Insync, &dev->flags)) rcw++;
1212 				else rcw += 2*disks;
1213 			}
1214 		}
1215 		PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1216 			(unsigned long long)sh->sector, rmw, rcw);
1217 		set_bit(STRIPE_HANDLE, &sh->state);
1218 		if (rmw < rcw && rmw > 0)
1219 			/* prefer read-modify-write, but need to get some data */
1220 			for (i=disks; i--;) {
1221 				dev = &sh->dev[i];
1222 				if ((dev->towrite || i == sh->pd_idx) &&
1223 				    !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1224 				    test_bit(R5_Insync, &dev->flags)) {
1225 					if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1226 					{
1227 						PRINTK("Read_old block %d for r-m-w\n", i);
1228 						set_bit(R5_LOCKED, &dev->flags);
1229 						set_bit(R5_Wantread, &dev->flags);
1230 						locked++;
1231 					} else {
1232 						set_bit(STRIPE_DELAYED, &sh->state);
1233 						set_bit(STRIPE_HANDLE, &sh->state);
1234 					}
1235 				}
1236 			}
1237 		if (rcw <= rmw && rcw > 0)
1238 			/* want reconstruct write, but need to get some data */
1239 			for (i=disks; i--;) {
1240 				dev = &sh->dev[i];
1241 				if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1242 				    !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1243 				    test_bit(R5_Insync, &dev->flags)) {
1244 					if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1245 					{
1246 						PRINTK("Read_old block %d for Reconstruct\n", i);
1247 						set_bit(R5_LOCKED, &dev->flags);
1248 						set_bit(R5_Wantread, &dev->flags);
1249 						locked++;
1250 					} else {
1251 						set_bit(STRIPE_DELAYED, &sh->state);
1252 						set_bit(STRIPE_HANDLE, &sh->state);
1253 					}
1254 				}
1255 			}
1256 		/* now if nothing is locked, and if we have enough data, we can start a write request */
1257 		if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1258 		    !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1259 			PRINTK("Computing parity...\n");
1260 			compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1261 			/* now every locked buffer is ready to be written */
1262 			for (i=disks; i--;)
1263 				if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1264 					PRINTK("Writing block %d\n", i);
1265 					locked++;
1266 					set_bit(R5_Wantwrite, &sh->dev[i].flags);
1267 					if (!test_bit(R5_Insync, &sh->dev[i].flags)
1268 					    || (i==sh->pd_idx && failed == 0))
1269 						set_bit(STRIPE_INSYNC, &sh->state);
1270 				}
1271 			if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1272 				atomic_dec(&conf->preread_active_stripes);
1273 				if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1274 					md_wakeup_thread(conf->mddev->thread);
1275 			}
1276 		}
1277 	}
1278 
1279 	/* maybe we need to check and possibly fix the parity for this stripe
1280 	 * Any reads will already have been scheduled, so we just see if enough data
1281 	 * is available
1282 	 */
1283 	if (syncing && locked == 0 &&
1284 	    !test_bit(STRIPE_INSYNC, &sh->state)) {
1285 		set_bit(STRIPE_HANDLE, &sh->state);
1286 		if (failed == 0) {
1287 			char *pagea;
1288 			if (uptodate != disks)
1289 				BUG();
1290 			compute_parity(sh, CHECK_PARITY);
1291 			uptodate--;
1292 			pagea = page_address(sh->dev[sh->pd_idx].page);
1293 			if ((*(u32*)pagea) == 0 &&
1294 			    !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1295 				/* parity is correct (on disc, not in buffer any more) */
1296 				set_bit(STRIPE_INSYNC, &sh->state);
1297 			} else {
1298 				conf->mddev->resync_mismatches += STRIPE_SECTORS;
1299 				if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1300 					/* don't try to repair!! */
1301 					set_bit(STRIPE_INSYNC, &sh->state);
1302 				else {
1303 					compute_block(sh, sh->pd_idx);
1304 					uptodate++;
1305 				}
1306 			}
1307 		}
1308 		if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1309 			/* either failed parity check, or recovery is happening */
1310 			if (failed==0)
1311 				failed_num = sh->pd_idx;
1312 			dev = &sh->dev[failed_num];
1313 			BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1314 			BUG_ON(uptodate != disks);
1315 
1316 			set_bit(R5_LOCKED, &dev->flags);
1317 			set_bit(R5_Wantwrite, &dev->flags);
1318 			clear_bit(STRIPE_DEGRADED, &sh->state);
1319 			locked++;
1320 			set_bit(STRIPE_INSYNC, &sh->state);
1321 		}
1322 	}
1323 	if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1324 		md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1325 		clear_bit(STRIPE_SYNCING, &sh->state);
1326 	}
1327 
1328 	/* If the failed drive is just a ReadError, then we might need to progress
1329 	 * the repair/check process
1330 	 */
1331 	if (failed == 1 && ! conf->mddev->ro &&
1332 	    test_bit(R5_ReadError, &sh->dev[failed_num].flags)
1333 	    && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1334 	    && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1335 		) {
1336 		dev = &sh->dev[failed_num];
1337 		if (!test_bit(R5_ReWrite, &dev->flags)) {
1338 			set_bit(R5_Wantwrite, &dev->flags);
1339 			set_bit(R5_ReWrite, &dev->flags);
1340 			set_bit(R5_LOCKED, &dev->flags);
1341 		} else {
1342 			/* let's read it back */
1343 			set_bit(R5_Wantread, &dev->flags);
1344 			set_bit(R5_LOCKED, &dev->flags);
1345 		}
1346 	}
1347 
1348 	spin_unlock(&sh->lock);
1349 
1350 	while ((bi=return_bi)) {
1351 		int bytes = bi->bi_size;
1352 
1353 		return_bi = bi->bi_next;
1354 		bi->bi_next = NULL;
1355 		bi->bi_size = 0;
1356 		bi->bi_end_io(bi, bytes, 0);
1357 	}
1358 	for (i=disks; i-- ;) {
1359 		int rw;
1360 		struct bio *bi;
1361 		mdk_rdev_t *rdev;
1362 		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1363 			rw = 1;
1364 		else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1365 			rw = 0;
1366 		else
1367 			continue;
1368 
1369 		bi = &sh->dev[i].req;
1370 
1371 		bi->bi_rw = rw;
1372 		if (rw)
1373 			bi->bi_end_io = raid5_end_write_request;
1374 		else
1375 			bi->bi_end_io = raid5_end_read_request;
1376 
1377 		rcu_read_lock();
1378 		rdev = rcu_dereference(conf->disks[i].rdev);
1379 		if (rdev && test_bit(Faulty, &rdev->flags))
1380 			rdev = NULL;
1381 		if (rdev)
1382 			atomic_inc(&rdev->nr_pending);
1383 		rcu_read_unlock();
1384 
1385 		if (rdev) {
1386 			if (syncing)
1387 				md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1388 
1389 			bi->bi_bdev = rdev->bdev;
1390 			PRINTK("for %llu schedule op %ld on disc %d\n",
1391 				(unsigned long long)sh->sector, bi->bi_rw, i);
1392 			atomic_inc(&sh->count);
1393 			bi->bi_sector = sh->sector + rdev->data_offset;
1394 			bi->bi_flags = 1 << BIO_UPTODATE;
1395 			bi->bi_vcnt = 1;
1396 			bi->bi_max_vecs = 1;
1397 			bi->bi_idx = 0;
1398 			bi->bi_io_vec = &sh->dev[i].vec;
1399 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1400 			bi->bi_io_vec[0].bv_offset = 0;
1401 			bi->bi_size = STRIPE_SIZE;
1402 			bi->bi_next = NULL;
1403 			if (rw == WRITE &&
1404 			    test_bit(R5_ReWrite, &sh->dev[i].flags))
1405 				atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1406 			generic_make_request(bi);
1407 		} else {
1408 			if (rw == 1)
1409 				set_bit(STRIPE_DEGRADED, &sh->state);
1410 			PRINTK("skip op %ld on disc %d for sector %llu\n",
1411 				bi->bi_rw, i, (unsigned long long)sh->sector);
1412 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
1413 			set_bit(STRIPE_HANDLE, &sh->state);
1414 		}
1415 	}
1416 }
1417 
1418 static inline void raid5_activate_delayed(raid5_conf_t *conf)
1419 {
1420 	if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1421 		while (!list_empty(&conf->delayed_list)) {
1422 			struct list_head *l = conf->delayed_list.next;
1423 			struct stripe_head *sh;
1424 			sh = list_entry(l, struct stripe_head, lru);
1425 			list_del_init(l);
1426 			clear_bit(STRIPE_DELAYED, &sh->state);
1427 			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1428 				atomic_inc(&conf->preread_active_stripes);
1429 			list_add_tail(&sh->lru, &conf->handle_list);
1430 		}
1431 	}
1432 }
1433 
1434 static inline void activate_bit_delay(raid5_conf_t *conf)
1435 {
1436 	/* device_lock is held */
1437 	struct list_head head;
1438 	list_add(&head, &conf->bitmap_list);
1439 	list_del_init(&conf->bitmap_list);
1440 	while (!list_empty(&head)) {
1441 		struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1442 		list_del_init(&sh->lru);
1443 		atomic_inc(&sh->count);
1444 		__release_stripe(conf, sh);
1445 	}
1446 }
1447 
1448 static void unplug_slaves(mddev_t *mddev)
1449 {
1450 	raid5_conf_t *conf = mddev_to_conf(mddev);
1451 	int i;
1452 
1453 	rcu_read_lock();
1454 	for (i=0; i<mddev->raid_disks; i++) {
1455 		mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1456 		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
1457 			request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1458 
1459 			atomic_inc(&rdev->nr_pending);
1460 			rcu_read_unlock();
1461 
1462 			if (r_queue->unplug_fn)
1463 				r_queue->unplug_fn(r_queue);
1464 
1465 			rdev_dec_pending(rdev, mddev);
1466 			rcu_read_lock();
1467 		}
1468 	}
1469 	rcu_read_unlock();
1470 }
1471 
1472 static void raid5_unplug_device(request_queue_t *q)
1473 {
1474 	mddev_t *mddev = q->queuedata;
1475 	raid5_conf_t *conf = mddev_to_conf(mddev);
1476 	unsigned long flags;
1477 
1478 	spin_lock_irqsave(&conf->device_lock, flags);
1479 
1480 	if (blk_remove_plug(q)) {
1481 		conf->seq_flush++;
1482 		raid5_activate_delayed(conf);
1483 	}
1484 	md_wakeup_thread(mddev->thread);
1485 
1486 	spin_unlock_irqrestore(&conf->device_lock, flags);
1487 
1488 	unplug_slaves(mddev);
1489 }
1490 
1491 static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1492 			     sector_t *error_sector)
1493 {
1494 	mddev_t *mddev = q->queuedata;
1495 	raid5_conf_t *conf = mddev_to_conf(mddev);
1496 	int i, ret = 0;
1497 
1498 	rcu_read_lock();
1499 	for (i=0; i<mddev->raid_disks && ret == 0; i++) {
1500 		mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1501 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
1502 			struct block_device *bdev = rdev->bdev;
1503 			request_queue_t *r_queue = bdev_get_queue(bdev);
1504 
1505 			if (!r_queue->issue_flush_fn)
1506 				ret = -EOPNOTSUPP;
1507 			else {
1508 				atomic_inc(&rdev->nr_pending);
1509 				rcu_read_unlock();
1510 				ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1511 							      error_sector);
1512 				rdev_dec_pending(rdev, mddev);
1513 				rcu_read_lock();
1514 			}
1515 		}
1516 	}
1517 	rcu_read_unlock();
1518 	return ret;
1519 }
1520 
1521 static inline void raid5_plug_device(raid5_conf_t *conf)
1522 {
1523 	spin_lock_irq(&conf->device_lock);
1524 	blk_plug_device(conf->mddev->queue);
1525 	spin_unlock_irq(&conf->device_lock);
1526 }
1527 
1528 static int make_request (request_queue_t *q, struct bio * bi)
1529 {
1530 	mddev_t *mddev = q->queuedata;
1531 	raid5_conf_t *conf = mddev_to_conf(mddev);
1532 	const unsigned int raid_disks = conf->raid_disks;
1533 	const unsigned int data_disks = raid_disks - 1;
1534 	unsigned int dd_idx, pd_idx;
1535 	sector_t new_sector;
1536 	sector_t logical_sector, last_sector;
1537 	struct stripe_head *sh;
1538 	const int rw = bio_data_dir(bi);
1539 
1540 	if (unlikely(bio_barrier(bi))) {
1541 		bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1542 		return 0;
1543 	}
1544 
1545 	md_write_start(mddev, bi);
1546 
1547 	disk_stat_inc(mddev->gendisk, ios[rw]);
1548 	disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
1549 
1550 	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1551 	last_sector = bi->bi_sector + (bi->bi_size>>9);
1552 	bi->bi_next = NULL;
1553 	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
1554 
1555 	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1556 		DEFINE_WAIT(w);
1557 
1558 		new_sector = raid5_compute_sector(logical_sector,
1559 						  raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1560 
1561 		PRINTK("raid5: make_request, sector %llu logical %llu\n",
1562 			(unsigned long long)new_sector,
1563 			(unsigned long long)logical_sector);
1564 
1565 	retry:
1566 		prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1567 		sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1568 		if (sh) {
1569 			if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1570 				/* Add failed due to overlap.  Flush everything
1571 				 * and wait a while
1572 				 */
1573 				raid5_unplug_device(mddev->queue);
1574 				release_stripe(sh);
1575 				schedule();
1576 				goto retry;
1577 			}
1578 			finish_wait(&conf->wait_for_overlap, &w);
1579 			raid5_plug_device(conf);
1580 			handle_stripe(sh);
1581 			release_stripe(sh);
1582 
1583 		} else {
1584 			/* cannot get stripe for read-ahead, just give-up */
1585 			clear_bit(BIO_UPTODATE, &bi->bi_flags);
1586 			finish_wait(&conf->wait_for_overlap, &w);
1587 			break;
1588 		}
1589 
1590 	}
1591 	spin_lock_irq(&conf->device_lock);
1592 	if (--bi->bi_phys_segments == 0) {
1593 		int bytes = bi->bi_size;
1594 
1595 		if ( bio_data_dir(bi) == WRITE )
1596 			md_write_end(mddev);
1597 		bi->bi_size = 0;
1598 		bi->bi_end_io(bi, bytes, 0);
1599 	}
1600 	spin_unlock_irq(&conf->device_lock);
1601 	return 0;
1602 }
1603 
1604 /* FIXME go_faster isn't used */
1605 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1606 {
1607 	raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1608 	struct stripe_head *sh;
1609 	int sectors_per_chunk = conf->chunk_size >> 9;
1610 	sector_t x;
1611 	unsigned long stripe;
1612 	int chunk_offset;
1613 	int dd_idx, pd_idx;
1614 	sector_t first_sector;
1615 	int raid_disks = conf->raid_disks;
1616 	int data_disks = raid_disks-1;
1617 	sector_t max_sector = mddev->size << 1;
1618 	int sync_blocks;
1619 
1620 	if (sector_nr >= max_sector) {
1621 		/* just being told to finish up .. nothing much to do */
1622 		unplug_slaves(mddev);
1623 
1624 		if (mddev->curr_resync < max_sector) /* aborted */
1625 			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1626 					&sync_blocks, 1);
1627 		else /* compelted sync */
1628 			conf->fullsync = 0;
1629 		bitmap_close_sync(mddev->bitmap);
1630 
1631 		return 0;
1632 	}
1633 	/* if there is 1 or more failed drives and we are trying
1634 	 * to resync, then assert that we are finished, because there is
1635 	 * nothing we can do.
1636 	 */
1637 	if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1638 		sector_t rv = (mddev->size << 1) - sector_nr;
1639 		*skipped = 1;
1640 		return rv;
1641 	}
1642 	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1643 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1644 	    !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1645 		/* we can skip this block, and probably more */
1646 		sync_blocks /= STRIPE_SECTORS;
1647 		*skipped = 1;
1648 		return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1649 	}
1650 
1651 	x = sector_nr;
1652 	chunk_offset = sector_div(x, sectors_per_chunk);
1653 	stripe = x;
1654 	BUG_ON(x != stripe);
1655 
1656 	first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1657 		+ chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1658 	sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1659 	if (sh == NULL) {
1660 		sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
1661 		/* make sure we don't swamp the stripe cache if someone else
1662 		 * is trying to get access
1663 		 */
1664 		schedule_timeout_uninterruptible(1);
1665 	}
1666 	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
1667 	spin_lock(&sh->lock);
1668 	set_bit(STRIPE_SYNCING, &sh->state);
1669 	clear_bit(STRIPE_INSYNC, &sh->state);
1670 	spin_unlock(&sh->lock);
1671 
1672 	handle_stripe(sh);
1673 	release_stripe(sh);
1674 
1675 	return STRIPE_SECTORS;
1676 }
1677 
1678 /*
1679  * This is our raid5 kernel thread.
1680  *
1681  * We scan the hash table for stripes which can be handled now.
1682  * During the scan, completed stripes are saved for us by the interrupt
1683  * handler, so that they will not have to wait for our next wakeup.
1684  */
1685 static void raid5d (mddev_t *mddev)
1686 {
1687 	struct stripe_head *sh;
1688 	raid5_conf_t *conf = mddev_to_conf(mddev);
1689 	int handled;
1690 
1691 	PRINTK("+++ raid5d active\n");
1692 
1693 	md_check_recovery(mddev);
1694 
1695 	handled = 0;
1696 	spin_lock_irq(&conf->device_lock);
1697 	while (1) {
1698 		struct list_head *first;
1699 
1700 		if (conf->seq_flush - conf->seq_write > 0) {
1701 			int seq = conf->seq_flush;
1702 			spin_unlock_irq(&conf->device_lock);
1703 			bitmap_unplug(mddev->bitmap);
1704 			spin_lock_irq(&conf->device_lock);
1705 			conf->seq_write = seq;
1706 			activate_bit_delay(conf);
1707 		}
1708 
1709 		if (list_empty(&conf->handle_list) &&
1710 		    atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1711 		    !blk_queue_plugged(mddev->queue) &&
1712 		    !list_empty(&conf->delayed_list))
1713 			raid5_activate_delayed(conf);
1714 
1715 		if (list_empty(&conf->handle_list))
1716 			break;
1717 
1718 		first = conf->handle_list.next;
1719 		sh = list_entry(first, struct stripe_head, lru);
1720 
1721 		list_del_init(first);
1722 		atomic_inc(&sh->count);
1723 		if (atomic_read(&sh->count)!= 1)
1724 			BUG();
1725 		spin_unlock_irq(&conf->device_lock);
1726 
1727 		handled++;
1728 		handle_stripe(sh);
1729 		release_stripe(sh);
1730 
1731 		spin_lock_irq(&conf->device_lock);
1732 	}
1733 	PRINTK("%d stripes handled\n", handled);
1734 
1735 	spin_unlock_irq(&conf->device_lock);
1736 
1737 	unplug_slaves(mddev);
1738 
1739 	PRINTK("--- raid5d inactive\n");
1740 }
1741 
1742 static ssize_t
1743 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
1744 {
1745 	raid5_conf_t *conf = mddev_to_conf(mddev);
1746 	if (conf)
1747 		return sprintf(page, "%d\n", conf->max_nr_stripes);
1748 	else
1749 		return 0;
1750 }
1751 
1752 static ssize_t
1753 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
1754 {
1755 	raid5_conf_t *conf = mddev_to_conf(mddev);
1756 	char *end;
1757 	int new;
1758 	if (len >= PAGE_SIZE)
1759 		return -EINVAL;
1760 	if (!conf)
1761 		return -ENODEV;
1762 
1763 	new = simple_strtoul(page, &end, 10);
1764 	if (!*page || (*end && *end != '\n') )
1765 		return -EINVAL;
1766 	if (new <= 16 || new > 32768)
1767 		return -EINVAL;
1768 	while (new < conf->max_nr_stripes) {
1769 		if (drop_one_stripe(conf))
1770 			conf->max_nr_stripes--;
1771 		else
1772 			break;
1773 	}
1774 	while (new > conf->max_nr_stripes) {
1775 		if (grow_one_stripe(conf))
1776 			conf->max_nr_stripes++;
1777 		else break;
1778 	}
1779 	return len;
1780 }
1781 
1782 static struct md_sysfs_entry
1783 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
1784 				raid5_show_stripe_cache_size,
1785 				raid5_store_stripe_cache_size);
1786 
1787 static ssize_t
1788 stripe_cache_active_show(mddev_t *mddev, char *page)
1789 {
1790 	raid5_conf_t *conf = mddev_to_conf(mddev);
1791 	if (conf)
1792 		return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1793 	else
1794 		return 0;
1795 }
1796 
1797 static struct md_sysfs_entry
1798 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
1799 
1800 static struct attribute *raid5_attrs[] =  {
1801 	&raid5_stripecache_size.attr,
1802 	&raid5_stripecache_active.attr,
1803 	NULL,
1804 };
1805 static struct attribute_group raid5_attrs_group = {
1806 	.name = NULL,
1807 	.attrs = raid5_attrs,
1808 };
1809 
1810 static int run(mddev_t *mddev)
1811 {
1812 	raid5_conf_t *conf;
1813 	int raid_disk, memory;
1814 	mdk_rdev_t *rdev;
1815 	struct disk_info *disk;
1816 	struct list_head *tmp;
1817 
1818 	if (mddev->level != 5 && mddev->level != 4) {
1819 		printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
1820 		       mdname(mddev), mddev->level);
1821 		return -EIO;
1822 	}
1823 
1824 	mddev->private = kzalloc(sizeof (raid5_conf_t)
1825 				 + mddev->raid_disks * sizeof(struct disk_info),
1826 				 GFP_KERNEL);
1827 	if ((conf = mddev->private) == NULL)
1828 		goto abort;
1829 
1830 	conf->mddev = mddev;
1831 
1832 	if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1833 		goto abort;
1834 
1835 	spin_lock_init(&conf->device_lock);
1836 	init_waitqueue_head(&conf->wait_for_stripe);
1837 	init_waitqueue_head(&conf->wait_for_overlap);
1838 	INIT_LIST_HEAD(&conf->handle_list);
1839 	INIT_LIST_HEAD(&conf->delayed_list);
1840 	INIT_LIST_HEAD(&conf->bitmap_list);
1841 	INIT_LIST_HEAD(&conf->inactive_list);
1842 	atomic_set(&conf->active_stripes, 0);
1843 	atomic_set(&conf->preread_active_stripes, 0);
1844 
1845 	PRINTK("raid5: run(%s) called.\n", mdname(mddev));
1846 
1847 	ITERATE_RDEV(mddev,rdev,tmp) {
1848 		raid_disk = rdev->raid_disk;
1849 		if (raid_disk >= mddev->raid_disks
1850 		    || raid_disk < 0)
1851 			continue;
1852 		disk = conf->disks + raid_disk;
1853 
1854 		disk->rdev = rdev;
1855 
1856 		if (test_bit(In_sync, &rdev->flags)) {
1857 			char b[BDEVNAME_SIZE];
1858 			printk(KERN_INFO "raid5: device %s operational as raid"
1859 				" disk %d\n", bdevname(rdev->bdev,b),
1860 				raid_disk);
1861 			conf->working_disks++;
1862 		}
1863 	}
1864 
1865 	conf->raid_disks = mddev->raid_disks;
1866 	/*
1867 	 * 0 for a fully functional array, 1 for a degraded array.
1868 	 */
1869 	mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
1870 	conf->mddev = mddev;
1871 	conf->chunk_size = mddev->chunk_size;
1872 	conf->level = mddev->level;
1873 	conf->algorithm = mddev->layout;
1874 	conf->max_nr_stripes = NR_STRIPES;
1875 
1876 	/* device size must be a multiple of chunk size */
1877 	mddev->size &= ~(mddev->chunk_size/1024 -1);
1878 	mddev->resync_max_sectors = mddev->size << 1;
1879 
1880 	if (!conf->chunk_size || conf->chunk_size % 4) {
1881 		printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
1882 			conf->chunk_size, mdname(mddev));
1883 		goto abort;
1884 	}
1885 	if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
1886 		printk(KERN_ERR
1887 			"raid5: unsupported parity algorithm %d for %s\n",
1888 			conf->algorithm, mdname(mddev));
1889 		goto abort;
1890 	}
1891 	if (mddev->degraded > 1) {
1892 		printk(KERN_ERR "raid5: not enough operational devices for %s"
1893 			" (%d/%d failed)\n",
1894 			mdname(mddev), conf->failed_disks, conf->raid_disks);
1895 		goto abort;
1896 	}
1897 
1898 	if (mddev->degraded == 1 &&
1899 	    mddev->recovery_cp != MaxSector) {
1900 		if (mddev->ok_start_degraded)
1901 			printk(KERN_WARNING
1902 			       "raid5: starting dirty degraded array: %s"
1903 			       "- data corruption possible.\n",
1904 			       mdname(mddev));
1905 		else {
1906 			printk(KERN_ERR
1907 			       "raid5: cannot start dirty degraded array for %s\n",
1908 			       mdname(mddev));
1909 			goto abort;
1910 		}
1911 	}
1912 
1913 	{
1914 		mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
1915 		if (!mddev->thread) {
1916 			printk(KERN_ERR
1917 				"raid5: couldn't allocate thread for %s\n",
1918 				mdname(mddev));
1919 			goto abort;
1920 		}
1921 	}
1922 	memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
1923 		 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
1924 	if (grow_stripes(conf, conf->max_nr_stripes)) {
1925 		printk(KERN_ERR
1926 			"raid5: couldn't allocate %dkB for buffers\n", memory);
1927 		shrink_stripes(conf);
1928 		md_unregister_thread(mddev->thread);
1929 		goto abort;
1930 	} else
1931 		printk(KERN_INFO "raid5: allocated %dkB for %s\n",
1932 			memory, mdname(mddev));
1933 
1934 	if (mddev->degraded == 0)
1935 		printk("raid5: raid level %d set %s active with %d out of %d"
1936 			" devices, algorithm %d\n", conf->level, mdname(mddev),
1937 			mddev->raid_disks-mddev->degraded, mddev->raid_disks,
1938 			conf->algorithm);
1939 	else
1940 		printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
1941 			" out of %d devices, algorithm %d\n", conf->level,
1942 			mdname(mddev), mddev->raid_disks - mddev->degraded,
1943 			mddev->raid_disks, conf->algorithm);
1944 
1945 	print_raid5_conf(conf);
1946 
1947 	/* read-ahead size must cover two whole stripes, which is
1948 	 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
1949 	 */
1950 	{
1951 		int stripe = (mddev->raid_disks-1) * mddev->chunk_size
1952 			/ PAGE_SIZE;
1953 		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1954 			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1955 	}
1956 
1957 	/* Ok, everything is just fine now */
1958 	sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
1959 
1960 	mddev->queue->unplug_fn = raid5_unplug_device;
1961 	mddev->queue->issue_flush_fn = raid5_issue_flush;
1962 
1963 	mddev->array_size =  mddev->size * (mddev->raid_disks - 1);
1964 	return 0;
1965 abort:
1966 	if (conf) {
1967 		print_raid5_conf(conf);
1968 		kfree(conf->stripe_hashtbl);
1969 		kfree(conf);
1970 	}
1971 	mddev->private = NULL;
1972 	printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
1973 	return -EIO;
1974 }
1975 
1976 
1977 
1978 static int stop(mddev_t *mddev)
1979 {
1980 	raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1981 
1982 	md_unregister_thread(mddev->thread);
1983 	mddev->thread = NULL;
1984 	shrink_stripes(conf);
1985 	kfree(conf->stripe_hashtbl);
1986 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1987 	sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
1988 	kfree(conf);
1989 	mddev->private = NULL;
1990 	return 0;
1991 }
1992 
1993 #if RAID5_DEBUG
1994 static void print_sh (struct stripe_head *sh)
1995 {
1996 	int i;
1997 
1998 	printk("sh %llu, pd_idx %d, state %ld.\n",
1999 		(unsigned long long)sh->sector, sh->pd_idx, sh->state);
2000 	printk("sh %llu,  count %d.\n",
2001 		(unsigned long long)sh->sector, atomic_read(&sh->count));
2002 	printk("sh %llu, ", (unsigned long long)sh->sector);
2003 	for (i = 0; i < sh->raid_conf->raid_disks; i++) {
2004 		printk("(cache%d: %p %ld) ",
2005 			i, sh->dev[i].page, sh->dev[i].flags);
2006 	}
2007 	printk("\n");
2008 }
2009 
2010 static void printall (raid5_conf_t *conf)
2011 {
2012 	struct stripe_head *sh;
2013 	struct hlist_node *hn;
2014 	int i;
2015 
2016 	spin_lock_irq(&conf->device_lock);
2017 	for (i = 0; i < NR_HASH; i++) {
2018 		hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2019 			if (sh->raid_conf != conf)
2020 				continue;
2021 			print_sh(sh);
2022 		}
2023 	}
2024 	spin_unlock_irq(&conf->device_lock);
2025 }
2026 #endif
2027 
2028 static void status (struct seq_file *seq, mddev_t *mddev)
2029 {
2030 	raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2031 	int i;
2032 
2033 	seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2034 	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2035 	for (i = 0; i < conf->raid_disks; i++)
2036 		seq_printf (seq, "%s",
2037 			       conf->disks[i].rdev &&
2038 			       test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
2039 	seq_printf (seq, "]");
2040 #if RAID5_DEBUG
2041 #define D(x) \
2042 	seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2043 	printall(conf);
2044 #endif
2045 }
2046 
2047 static void print_raid5_conf (raid5_conf_t *conf)
2048 {
2049 	int i;
2050 	struct disk_info *tmp;
2051 
2052 	printk("RAID5 conf printout:\n");
2053 	if (!conf) {
2054 		printk("(conf==NULL)\n");
2055 		return;
2056 	}
2057 	printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2058 		 conf->working_disks, conf->failed_disks);
2059 
2060 	for (i = 0; i < conf->raid_disks; i++) {
2061 		char b[BDEVNAME_SIZE];
2062 		tmp = conf->disks + i;
2063 		if (tmp->rdev)
2064 		printk(" disk %d, o:%d, dev:%s\n",
2065 			i, !test_bit(Faulty, &tmp->rdev->flags),
2066 			bdevname(tmp->rdev->bdev,b));
2067 	}
2068 }
2069 
2070 static int raid5_spare_active(mddev_t *mddev)
2071 {
2072 	int i;
2073 	raid5_conf_t *conf = mddev->private;
2074 	struct disk_info *tmp;
2075 
2076 	for (i = 0; i < conf->raid_disks; i++) {
2077 		tmp = conf->disks + i;
2078 		if (tmp->rdev
2079 		    && !test_bit(Faulty, &tmp->rdev->flags)
2080 		    && !test_bit(In_sync, &tmp->rdev->flags)) {
2081 			mddev->degraded--;
2082 			conf->failed_disks--;
2083 			conf->working_disks++;
2084 			set_bit(In_sync, &tmp->rdev->flags);
2085 		}
2086 	}
2087 	print_raid5_conf(conf);
2088 	return 0;
2089 }
2090 
2091 static int raid5_remove_disk(mddev_t *mddev, int number)
2092 {
2093 	raid5_conf_t *conf = mddev->private;
2094 	int err = 0;
2095 	mdk_rdev_t *rdev;
2096 	struct disk_info *p = conf->disks + number;
2097 
2098 	print_raid5_conf(conf);
2099 	rdev = p->rdev;
2100 	if (rdev) {
2101 		if (test_bit(In_sync, &rdev->flags) ||
2102 		    atomic_read(&rdev->nr_pending)) {
2103 			err = -EBUSY;
2104 			goto abort;
2105 		}
2106 		p->rdev = NULL;
2107 		synchronize_rcu();
2108 		if (atomic_read(&rdev->nr_pending)) {
2109 			/* lost the race, try later */
2110 			err = -EBUSY;
2111 			p->rdev = rdev;
2112 		}
2113 	}
2114 abort:
2115 
2116 	print_raid5_conf(conf);
2117 	return err;
2118 }
2119 
2120 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2121 {
2122 	raid5_conf_t *conf = mddev->private;
2123 	int found = 0;
2124 	int disk;
2125 	struct disk_info *p;
2126 
2127 	if (mddev->degraded > 1)
2128 		/* no point adding a device */
2129 		return 0;
2130 
2131 	/*
2132 	 * find the disk ...
2133 	 */
2134 	for (disk=0; disk < mddev->raid_disks; disk++)
2135 		if ((p=conf->disks + disk)->rdev == NULL) {
2136 			clear_bit(In_sync, &rdev->flags);
2137 			rdev->raid_disk = disk;
2138 			found = 1;
2139 			if (rdev->saved_raid_disk != disk)
2140 				conf->fullsync = 1;
2141 			rcu_assign_pointer(p->rdev, rdev);
2142 			break;
2143 		}
2144 	print_raid5_conf(conf);
2145 	return found;
2146 }
2147 
2148 static int raid5_resize(mddev_t *mddev, sector_t sectors)
2149 {
2150 	/* no resync is happening, and there is enough space
2151 	 * on all devices, so we can resize.
2152 	 * We need to make sure resync covers any new space.
2153 	 * If the array is shrinking we should possibly wait until
2154 	 * any io in the removed space completes, but it hardly seems
2155 	 * worth it.
2156 	 */
2157 	sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2158 	mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2159 	set_capacity(mddev->gendisk, mddev->array_size << 1);
2160 	mddev->changed = 1;
2161 	if (sectors/2  > mddev->size && mddev->recovery_cp == MaxSector) {
2162 		mddev->recovery_cp = mddev->size << 1;
2163 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2164 	}
2165 	mddev->size = sectors /2;
2166 	mddev->resync_max_sectors = sectors;
2167 	return 0;
2168 }
2169 
2170 static void raid5_quiesce(mddev_t *mddev, int state)
2171 {
2172 	raid5_conf_t *conf = mddev_to_conf(mddev);
2173 
2174 	switch(state) {
2175 	case 1: /* stop all writes */
2176 		spin_lock_irq(&conf->device_lock);
2177 		conf->quiesce = 1;
2178 		wait_event_lock_irq(conf->wait_for_stripe,
2179 				    atomic_read(&conf->active_stripes) == 0,
2180 				    conf->device_lock, /* nothing */);
2181 		spin_unlock_irq(&conf->device_lock);
2182 		break;
2183 
2184 	case 0: /* re-enable writes */
2185 		spin_lock_irq(&conf->device_lock);
2186 		conf->quiesce = 0;
2187 		wake_up(&conf->wait_for_stripe);
2188 		spin_unlock_irq(&conf->device_lock);
2189 		break;
2190 	}
2191 }
2192 
2193 static struct mdk_personality raid5_personality =
2194 {
2195 	.name		= "raid5",
2196 	.level		= 5,
2197 	.owner		= THIS_MODULE,
2198 	.make_request	= make_request,
2199 	.run		= run,
2200 	.stop		= stop,
2201 	.status		= status,
2202 	.error_handler	= error,
2203 	.hot_add_disk	= raid5_add_disk,
2204 	.hot_remove_disk= raid5_remove_disk,
2205 	.spare_active	= raid5_spare_active,
2206 	.sync_request	= sync_request,
2207 	.resize		= raid5_resize,
2208 	.quiesce	= raid5_quiesce,
2209 };
2210 
2211 static struct mdk_personality raid4_personality =
2212 {
2213 	.name		= "raid4",
2214 	.level		= 4,
2215 	.owner		= THIS_MODULE,
2216 	.make_request	= make_request,
2217 	.run		= run,
2218 	.stop		= stop,
2219 	.status		= status,
2220 	.error_handler	= error,
2221 	.hot_add_disk	= raid5_add_disk,
2222 	.hot_remove_disk= raid5_remove_disk,
2223 	.spare_active	= raid5_spare_active,
2224 	.sync_request	= sync_request,
2225 	.resize		= raid5_resize,
2226 	.quiesce	= raid5_quiesce,
2227 };
2228 
2229 static int __init raid5_init(void)
2230 {
2231 	register_md_personality(&raid5_personality);
2232 	register_md_personality(&raid4_personality);
2233 	return 0;
2234 }
2235 
2236 static void raid5_exit(void)
2237 {
2238 	unregister_md_personality(&raid5_personality);
2239 	unregister_md_personality(&raid4_personality);
2240 }
2241 
2242 module_init(raid5_init);
2243 module_exit(raid5_exit);
2244 MODULE_LICENSE("GPL");
2245 MODULE_ALIAS("md-personality-4"); /* RAID5 */
2246 MODULE_ALIAS("md-raid5");
2247 MODULE_ALIAS("md-raid4");
2248 MODULE_ALIAS("md-level-5");
2249 MODULE_ALIAS("md-level-4");
2250