xref: /linux/drivers/md/raid5.c (revision f3a8b6645dc2e60d11f20c1c23afd964ff4e55ae)
1 /*
2  * raid5.c : Multiple Devices driver for Linux
3  *	   Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4  *	   Copyright (C) 1999, 2000 Ingo Molnar
5  *	   Copyright (C) 2002, 2003 H. Peter Anvin
6  *
7  * RAID-4/5/6 management functions.
8  * Thanks to Penguin Computing for making the RAID-6 development possible
9  * by donating a test server!
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * You should have received a copy of the GNU General Public License
17  * (for example /usr/src/linux/COPYING); if not, write to the Free
18  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 /*
22  * BITMAP UNPLUGGING:
23  *
24  * The sequencing for updating the bitmap reliably is a little
25  * subtle (and I got it wrong the first time) so it deserves some
26  * explanation.
27  *
28  * We group bitmap updates into batches.  Each batch has a number.
29  * We may write out several batches at once, but that isn't very important.
30  * conf->seq_write is the number of the last batch successfully written.
31  * conf->seq_flush is the number of the last batch that was closed to
32  *    new additions.
33  * When we discover that we will need to write to any block in a stripe
34  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35  * the number of the batch it will be in. This is seq_flush+1.
36  * When we are ready to do a write, if that batch hasn't been written yet,
37  *   we plug the array and queue the stripe for later.
38  * When an unplug happens, we increment bm_flush, thus closing the current
39  *   batch.
40  * When we notice that bm_flush > bm_write, we write out all pending updates
41  * to the bitmap, and advance bm_write to where bm_flush was.
42  * This may occasionally write a bit out twice, but is sure never to
43  * miss any bits.
44  */
45 
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/module.h>
51 #include <linux/async.h>
52 #include <linux/seq_file.h>
53 #include <linux/cpu.h>
54 #include <linux/slab.h>
55 #include <linux/ratelimit.h>
56 #include <linux/nodemask.h>
57 #include <linux/flex_array.h>
58 #include <trace/events/block.h>
59 
60 #include "md.h"
61 #include "raid5.h"
62 #include "raid0.h"
63 #include "bitmap.h"
64 
65 #define cpu_to_group(cpu) cpu_to_node(cpu)
66 #define ANY_GROUP NUMA_NO_NODE
67 
68 static bool devices_handle_discard_safely = false;
69 module_param(devices_handle_discard_safely, bool, 0644);
70 MODULE_PARM_DESC(devices_handle_discard_safely,
71 		 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
72 static struct workqueue_struct *raid5_wq;
73 /*
74  * Stripe cache
75  */
76 
77 #define NR_STRIPES		256
78 #define STRIPE_SIZE		PAGE_SIZE
79 #define STRIPE_SHIFT		(PAGE_SHIFT - 9)
80 #define STRIPE_SECTORS		(STRIPE_SIZE>>9)
81 #define	IO_THRESHOLD		1
82 #define BYPASS_THRESHOLD	1
83 #define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
84 #define HASH_MASK		(NR_HASH - 1)
85 #define MAX_STRIPE_BATCH	8
86 
87 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
88 {
89 	int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
90 	return &conf->stripe_hashtbl[hash];
91 }
92 
93 static inline int stripe_hash_locks_hash(sector_t sect)
94 {
95 	return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
96 }
97 
98 static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
99 {
100 	spin_lock_irq(conf->hash_locks + hash);
101 	spin_lock(&conf->device_lock);
102 }
103 
104 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
105 {
106 	spin_unlock(&conf->device_lock);
107 	spin_unlock_irq(conf->hash_locks + hash);
108 }
109 
110 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
111 {
112 	int i;
113 	local_irq_disable();
114 	spin_lock(conf->hash_locks);
115 	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
116 		spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
117 	spin_lock(&conf->device_lock);
118 }
119 
120 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
121 {
122 	int i;
123 	spin_unlock(&conf->device_lock);
124 	for (i = NR_STRIPE_HASH_LOCKS; i; i--)
125 		spin_unlock(conf->hash_locks + i - 1);
126 	local_irq_enable();
127 }
128 
129 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
130  * order without overlap.  There may be several bio's per stripe+device, and
131  * a bio could span several devices.
132  * When walking this list for a particular stripe+device, we must never proceed
133  * beyond a bio that extends past this device, as the next bio might no longer
134  * be valid.
135  * This function is used to determine the 'next' bio in the list, given the sector
136  * of the current stripe+device
137  */
138 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
139 {
140 	int sectors = bio_sectors(bio);
141 	if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
142 		return bio->bi_next;
143 	else
144 		return NULL;
145 }
146 
147 /*
148  * We maintain a biased count of active stripes in the bottom 16 bits of
149  * bi_phys_segments, and a count of processed stripes in the upper 16 bits
150  */
151 static inline int raid5_bi_processed_stripes(struct bio *bio)
152 {
153 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
154 	return (atomic_read(segments) >> 16) & 0xffff;
155 }
156 
157 static inline int raid5_dec_bi_active_stripes(struct bio *bio)
158 {
159 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
160 	return atomic_sub_return(1, segments) & 0xffff;
161 }
162 
163 static inline void raid5_inc_bi_active_stripes(struct bio *bio)
164 {
165 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
166 	atomic_inc(segments);
167 }
168 
169 static inline void raid5_set_bi_processed_stripes(struct bio *bio,
170 	unsigned int cnt)
171 {
172 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
173 	int old, new;
174 
175 	do {
176 		old = atomic_read(segments);
177 		new = (old & 0xffff) | (cnt << 16);
178 	} while (atomic_cmpxchg(segments, old, new) != old);
179 }
180 
181 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
182 {
183 	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
184 	atomic_set(segments, cnt);
185 }
186 
187 /* Find first data disk in a raid6 stripe */
188 static inline int raid6_d0(struct stripe_head *sh)
189 {
190 	if (sh->ddf_layout)
191 		/* ddf always start from first device */
192 		return 0;
193 	/* md starts just after Q block */
194 	if (sh->qd_idx == sh->disks - 1)
195 		return 0;
196 	else
197 		return sh->qd_idx + 1;
198 }
199 static inline int raid6_next_disk(int disk, int raid_disks)
200 {
201 	disk++;
202 	return (disk < raid_disks) ? disk : 0;
203 }
204 
205 /* When walking through the disks in a raid5, starting at raid6_d0,
206  * We need to map each disk to a 'slot', where the data disks are slot
207  * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
208  * is raid_disks-1.  This help does that mapping.
209  */
210 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
211 			     int *count, int syndrome_disks)
212 {
213 	int slot = *count;
214 
215 	if (sh->ddf_layout)
216 		(*count)++;
217 	if (idx == sh->pd_idx)
218 		return syndrome_disks;
219 	if (idx == sh->qd_idx)
220 		return syndrome_disks + 1;
221 	if (!sh->ddf_layout)
222 		(*count)++;
223 	return slot;
224 }
225 
226 static void return_io(struct bio_list *return_bi)
227 {
228 	struct bio *bi;
229 	while ((bi = bio_list_pop(return_bi)) != NULL) {
230 		bi->bi_iter.bi_size = 0;
231 		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
232 					 bi, 0);
233 		bio_endio(bi);
234 	}
235 }
236 
237 static void print_raid5_conf (struct r5conf *conf);
238 
239 static int stripe_operations_active(struct stripe_head *sh)
240 {
241 	return sh->check_state || sh->reconstruct_state ||
242 	       test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
243 	       test_bit(STRIPE_COMPUTE_RUN, &sh->state);
244 }
245 
246 static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
247 {
248 	struct r5conf *conf = sh->raid_conf;
249 	struct r5worker_group *group;
250 	int thread_cnt;
251 	int i, cpu = sh->cpu;
252 
253 	if (!cpu_online(cpu)) {
254 		cpu = cpumask_any(cpu_online_mask);
255 		sh->cpu = cpu;
256 	}
257 
258 	if (list_empty(&sh->lru)) {
259 		struct r5worker_group *group;
260 		group = conf->worker_groups + cpu_to_group(cpu);
261 		list_add_tail(&sh->lru, &group->handle_list);
262 		group->stripes_cnt++;
263 		sh->group = group;
264 	}
265 
266 	if (conf->worker_cnt_per_group == 0) {
267 		md_wakeup_thread(conf->mddev->thread);
268 		return;
269 	}
270 
271 	group = conf->worker_groups + cpu_to_group(sh->cpu);
272 
273 	group->workers[0].working = true;
274 	/* at least one worker should run to avoid race */
275 	queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
276 
277 	thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
278 	/* wakeup more workers */
279 	for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
280 		if (group->workers[i].working == false) {
281 			group->workers[i].working = true;
282 			queue_work_on(sh->cpu, raid5_wq,
283 				      &group->workers[i].work);
284 			thread_cnt--;
285 		}
286 	}
287 }
288 
289 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
290 			      struct list_head *temp_inactive_list)
291 {
292 	BUG_ON(!list_empty(&sh->lru));
293 	BUG_ON(atomic_read(&conf->active_stripes)==0);
294 	if (test_bit(STRIPE_HANDLE, &sh->state)) {
295 		if (test_bit(STRIPE_DELAYED, &sh->state) &&
296 		    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
297 			list_add_tail(&sh->lru, &conf->delayed_list);
298 		else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
299 			   sh->bm_seq - conf->seq_write > 0)
300 			list_add_tail(&sh->lru, &conf->bitmap_list);
301 		else {
302 			clear_bit(STRIPE_DELAYED, &sh->state);
303 			clear_bit(STRIPE_BIT_DELAY, &sh->state);
304 			if (conf->worker_cnt_per_group == 0) {
305 				list_add_tail(&sh->lru, &conf->handle_list);
306 			} else {
307 				raid5_wakeup_stripe_thread(sh);
308 				return;
309 			}
310 		}
311 		md_wakeup_thread(conf->mddev->thread);
312 	} else {
313 		BUG_ON(stripe_operations_active(sh));
314 		if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
315 			if (atomic_dec_return(&conf->preread_active_stripes)
316 			    < IO_THRESHOLD)
317 				md_wakeup_thread(conf->mddev->thread);
318 		atomic_dec(&conf->active_stripes);
319 		if (!test_bit(STRIPE_EXPANDING, &sh->state))
320 			list_add_tail(&sh->lru, temp_inactive_list);
321 	}
322 }
323 
324 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
325 			     struct list_head *temp_inactive_list)
326 {
327 	if (atomic_dec_and_test(&sh->count))
328 		do_release_stripe(conf, sh, temp_inactive_list);
329 }
330 
331 /*
332  * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
333  *
334  * Be careful: Only one task can add/delete stripes from temp_inactive_list at
335  * given time. Adding stripes only takes device lock, while deleting stripes
336  * only takes hash lock.
337  */
338 static void release_inactive_stripe_list(struct r5conf *conf,
339 					 struct list_head *temp_inactive_list,
340 					 int hash)
341 {
342 	int size;
343 	bool do_wakeup = false;
344 	unsigned long flags;
345 
346 	if (hash == NR_STRIPE_HASH_LOCKS) {
347 		size = NR_STRIPE_HASH_LOCKS;
348 		hash = NR_STRIPE_HASH_LOCKS - 1;
349 	} else
350 		size = 1;
351 	while (size) {
352 		struct list_head *list = &temp_inactive_list[size - 1];
353 
354 		/*
355 		 * We don't hold any lock here yet, raid5_get_active_stripe() might
356 		 * remove stripes from the list
357 		 */
358 		if (!list_empty_careful(list)) {
359 			spin_lock_irqsave(conf->hash_locks + hash, flags);
360 			if (list_empty(conf->inactive_list + hash) &&
361 			    !list_empty(list))
362 				atomic_dec(&conf->empty_inactive_list_nr);
363 			list_splice_tail_init(list, conf->inactive_list + hash);
364 			do_wakeup = true;
365 			spin_unlock_irqrestore(conf->hash_locks + hash, flags);
366 		}
367 		size--;
368 		hash--;
369 	}
370 
371 	if (do_wakeup) {
372 		wake_up(&conf->wait_for_stripe);
373 		if (atomic_read(&conf->active_stripes) == 0)
374 			wake_up(&conf->wait_for_quiescent);
375 		if (conf->retry_read_aligned)
376 			md_wakeup_thread(conf->mddev->thread);
377 	}
378 }
379 
380 /* should hold conf->device_lock already */
381 static int release_stripe_list(struct r5conf *conf,
382 			       struct list_head *temp_inactive_list)
383 {
384 	struct stripe_head *sh;
385 	int count = 0;
386 	struct llist_node *head;
387 
388 	head = llist_del_all(&conf->released_stripes);
389 	head = llist_reverse_order(head);
390 	while (head) {
391 		int hash;
392 
393 		sh = llist_entry(head, struct stripe_head, release_list);
394 		head = llist_next(head);
395 		/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
396 		smp_mb();
397 		clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
398 		/*
399 		 * Don't worry the bit is set here, because if the bit is set
400 		 * again, the count is always > 1. This is true for
401 		 * STRIPE_ON_UNPLUG_LIST bit too.
402 		 */
403 		hash = sh->hash_lock_index;
404 		__release_stripe(conf, sh, &temp_inactive_list[hash]);
405 		count++;
406 	}
407 
408 	return count;
409 }
410 
411 void raid5_release_stripe(struct stripe_head *sh)
412 {
413 	struct r5conf *conf = sh->raid_conf;
414 	unsigned long flags;
415 	struct list_head list;
416 	int hash;
417 	bool wakeup;
418 
419 	/* Avoid release_list until the last reference.
420 	 */
421 	if (atomic_add_unless(&sh->count, -1, 1))
422 		return;
423 
424 	if (unlikely(!conf->mddev->thread) ||
425 		test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
426 		goto slow_path;
427 	wakeup = llist_add(&sh->release_list, &conf->released_stripes);
428 	if (wakeup)
429 		md_wakeup_thread(conf->mddev->thread);
430 	return;
431 slow_path:
432 	local_irq_save(flags);
433 	/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
434 	if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
435 		INIT_LIST_HEAD(&list);
436 		hash = sh->hash_lock_index;
437 		do_release_stripe(conf, sh, &list);
438 		spin_unlock(&conf->device_lock);
439 		release_inactive_stripe_list(conf, &list, hash);
440 	}
441 	local_irq_restore(flags);
442 }
443 
444 static inline void remove_hash(struct stripe_head *sh)
445 {
446 	pr_debug("remove_hash(), stripe %llu\n",
447 		(unsigned long long)sh->sector);
448 
449 	hlist_del_init(&sh->hash);
450 }
451 
452 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
453 {
454 	struct hlist_head *hp = stripe_hash(conf, sh->sector);
455 
456 	pr_debug("insert_hash(), stripe %llu\n",
457 		(unsigned long long)sh->sector);
458 
459 	hlist_add_head(&sh->hash, hp);
460 }
461 
462 /* find an idle stripe, make sure it is unhashed, and return it. */
463 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
464 {
465 	struct stripe_head *sh = NULL;
466 	struct list_head *first;
467 
468 	if (list_empty(conf->inactive_list + hash))
469 		goto out;
470 	first = (conf->inactive_list + hash)->next;
471 	sh = list_entry(first, struct stripe_head, lru);
472 	list_del_init(first);
473 	remove_hash(sh);
474 	atomic_inc(&conf->active_stripes);
475 	BUG_ON(hash != sh->hash_lock_index);
476 	if (list_empty(conf->inactive_list + hash))
477 		atomic_inc(&conf->empty_inactive_list_nr);
478 out:
479 	return sh;
480 }
481 
482 static void shrink_buffers(struct stripe_head *sh)
483 {
484 	struct page *p;
485 	int i;
486 	int num = sh->raid_conf->pool_size;
487 
488 	for (i = 0; i < num ; i++) {
489 		WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
490 		p = sh->dev[i].page;
491 		if (!p)
492 			continue;
493 		sh->dev[i].page = NULL;
494 		put_page(p);
495 	}
496 }
497 
498 static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
499 {
500 	int i;
501 	int num = sh->raid_conf->pool_size;
502 
503 	for (i = 0; i < num; i++) {
504 		struct page *page;
505 
506 		if (!(page = alloc_page(gfp))) {
507 			return 1;
508 		}
509 		sh->dev[i].page = page;
510 		sh->dev[i].orig_page = page;
511 	}
512 	return 0;
513 }
514 
515 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
516 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
517 			    struct stripe_head *sh);
518 
519 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
520 {
521 	struct r5conf *conf = sh->raid_conf;
522 	int i, seq;
523 
524 	BUG_ON(atomic_read(&sh->count) != 0);
525 	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
526 	BUG_ON(stripe_operations_active(sh));
527 	BUG_ON(sh->batch_head);
528 
529 	pr_debug("init_stripe called, stripe %llu\n",
530 		(unsigned long long)sector);
531 retry:
532 	seq = read_seqcount_begin(&conf->gen_lock);
533 	sh->generation = conf->generation - previous;
534 	sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
535 	sh->sector = sector;
536 	stripe_set_idx(sector, conf, previous, sh);
537 	sh->state = 0;
538 
539 	for (i = sh->disks; i--; ) {
540 		struct r5dev *dev = &sh->dev[i];
541 
542 		if (dev->toread || dev->read || dev->towrite || dev->written ||
543 		    test_bit(R5_LOCKED, &dev->flags)) {
544 			printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
545 			       (unsigned long long)sh->sector, i, dev->toread,
546 			       dev->read, dev->towrite, dev->written,
547 			       test_bit(R5_LOCKED, &dev->flags));
548 			WARN_ON(1);
549 		}
550 		dev->flags = 0;
551 		raid5_build_block(sh, i, previous);
552 	}
553 	if (read_seqcount_retry(&conf->gen_lock, seq))
554 		goto retry;
555 	sh->overwrite_disks = 0;
556 	insert_hash(conf, sh);
557 	sh->cpu = smp_processor_id();
558 	set_bit(STRIPE_BATCH_READY, &sh->state);
559 }
560 
561 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
562 					 short generation)
563 {
564 	struct stripe_head *sh;
565 
566 	pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
567 	hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
568 		if (sh->sector == sector && sh->generation == generation)
569 			return sh;
570 	pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
571 	return NULL;
572 }
573 
574 /*
575  * Need to check if array has failed when deciding whether to:
576  *  - start an array
577  *  - remove non-faulty devices
578  *  - add a spare
579  *  - allow a reshape
580  * This determination is simple when no reshape is happening.
581  * However if there is a reshape, we need to carefully check
582  * both the before and after sections.
583  * This is because some failed devices may only affect one
584  * of the two sections, and some non-in_sync devices may
585  * be insync in the section most affected by failed devices.
586  */
587 static int calc_degraded(struct r5conf *conf)
588 {
589 	int degraded, degraded2;
590 	int i;
591 
592 	rcu_read_lock();
593 	degraded = 0;
594 	for (i = 0; i < conf->previous_raid_disks; i++) {
595 		struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
596 		if (rdev && test_bit(Faulty, &rdev->flags))
597 			rdev = rcu_dereference(conf->disks[i].replacement);
598 		if (!rdev || test_bit(Faulty, &rdev->flags))
599 			degraded++;
600 		else if (test_bit(In_sync, &rdev->flags))
601 			;
602 		else
603 			/* not in-sync or faulty.
604 			 * If the reshape increases the number of devices,
605 			 * this is being recovered by the reshape, so
606 			 * this 'previous' section is not in_sync.
607 			 * If the number of devices is being reduced however,
608 			 * the device can only be part of the array if
609 			 * we are reverting a reshape, so this section will
610 			 * be in-sync.
611 			 */
612 			if (conf->raid_disks >= conf->previous_raid_disks)
613 				degraded++;
614 	}
615 	rcu_read_unlock();
616 	if (conf->raid_disks == conf->previous_raid_disks)
617 		return degraded;
618 	rcu_read_lock();
619 	degraded2 = 0;
620 	for (i = 0; i < conf->raid_disks; i++) {
621 		struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
622 		if (rdev && test_bit(Faulty, &rdev->flags))
623 			rdev = rcu_dereference(conf->disks[i].replacement);
624 		if (!rdev || test_bit(Faulty, &rdev->flags))
625 			degraded2++;
626 		else if (test_bit(In_sync, &rdev->flags))
627 			;
628 		else
629 			/* not in-sync or faulty.
630 			 * If reshape increases the number of devices, this
631 			 * section has already been recovered, else it
632 			 * almost certainly hasn't.
633 			 */
634 			if (conf->raid_disks <= conf->previous_raid_disks)
635 				degraded2++;
636 	}
637 	rcu_read_unlock();
638 	if (degraded2 > degraded)
639 		return degraded2;
640 	return degraded;
641 }
642 
643 static int has_failed(struct r5conf *conf)
644 {
645 	int degraded;
646 
647 	if (conf->mddev->reshape_position == MaxSector)
648 		return conf->mddev->degraded > conf->max_degraded;
649 
650 	degraded = calc_degraded(conf);
651 	if (degraded > conf->max_degraded)
652 		return 1;
653 	return 0;
654 }
655 
656 struct stripe_head *
657 raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
658 			int previous, int noblock, int noquiesce)
659 {
660 	struct stripe_head *sh;
661 	int hash = stripe_hash_locks_hash(sector);
662 	int inc_empty_inactive_list_flag;
663 
664 	pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
665 
666 	spin_lock_irq(conf->hash_locks + hash);
667 
668 	do {
669 		wait_event_lock_irq(conf->wait_for_quiescent,
670 				    conf->quiesce == 0 || noquiesce,
671 				    *(conf->hash_locks + hash));
672 		sh = __find_stripe(conf, sector, conf->generation - previous);
673 		if (!sh) {
674 			if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
675 				sh = get_free_stripe(conf, hash);
676 				if (!sh && !test_bit(R5_DID_ALLOC,
677 						     &conf->cache_state))
678 					set_bit(R5_ALLOC_MORE,
679 						&conf->cache_state);
680 			}
681 			if (noblock && sh == NULL)
682 				break;
683 			if (!sh) {
684 				set_bit(R5_INACTIVE_BLOCKED,
685 					&conf->cache_state);
686 				wait_event_lock_irq(
687 					conf->wait_for_stripe,
688 					!list_empty(conf->inactive_list + hash) &&
689 					(atomic_read(&conf->active_stripes)
690 					 < (conf->max_nr_stripes * 3 / 4)
691 					 || !test_bit(R5_INACTIVE_BLOCKED,
692 						      &conf->cache_state)),
693 					*(conf->hash_locks + hash));
694 				clear_bit(R5_INACTIVE_BLOCKED,
695 					  &conf->cache_state);
696 			} else {
697 				init_stripe(sh, sector, previous);
698 				atomic_inc(&sh->count);
699 			}
700 		} else if (!atomic_inc_not_zero(&sh->count)) {
701 			spin_lock(&conf->device_lock);
702 			if (!atomic_read(&sh->count)) {
703 				if (!test_bit(STRIPE_HANDLE, &sh->state))
704 					atomic_inc(&conf->active_stripes);
705 				BUG_ON(list_empty(&sh->lru) &&
706 				       !test_bit(STRIPE_EXPANDING, &sh->state));
707 				inc_empty_inactive_list_flag = 0;
708 				if (!list_empty(conf->inactive_list + hash))
709 					inc_empty_inactive_list_flag = 1;
710 				list_del_init(&sh->lru);
711 				if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
712 					atomic_inc(&conf->empty_inactive_list_nr);
713 				if (sh->group) {
714 					sh->group->stripes_cnt--;
715 					sh->group = NULL;
716 				}
717 			}
718 			atomic_inc(&sh->count);
719 			spin_unlock(&conf->device_lock);
720 		}
721 	} while (sh == NULL);
722 
723 	spin_unlock_irq(conf->hash_locks + hash);
724 	return sh;
725 }
726 
727 static bool is_full_stripe_write(struct stripe_head *sh)
728 {
729 	BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
730 	return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded);
731 }
732 
733 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
734 {
735 	local_irq_disable();
736 	if (sh1 > sh2) {
737 		spin_lock(&sh2->stripe_lock);
738 		spin_lock_nested(&sh1->stripe_lock, 1);
739 	} else {
740 		spin_lock(&sh1->stripe_lock);
741 		spin_lock_nested(&sh2->stripe_lock, 1);
742 	}
743 }
744 
745 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
746 {
747 	spin_unlock(&sh1->stripe_lock);
748 	spin_unlock(&sh2->stripe_lock);
749 	local_irq_enable();
750 }
751 
752 /* Only freshly new full stripe normal write stripe can be added to a batch list */
753 static bool stripe_can_batch(struct stripe_head *sh)
754 {
755 	struct r5conf *conf = sh->raid_conf;
756 
757 	if (conf->log)
758 		return false;
759 	return test_bit(STRIPE_BATCH_READY, &sh->state) &&
760 		!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
761 		is_full_stripe_write(sh);
762 }
763 
764 /* we only do back search */
765 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
766 {
767 	struct stripe_head *head;
768 	sector_t head_sector, tmp_sec;
769 	int hash;
770 	int dd_idx;
771 	int inc_empty_inactive_list_flag;
772 
773 	/* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
774 	tmp_sec = sh->sector;
775 	if (!sector_div(tmp_sec, conf->chunk_sectors))
776 		return;
777 	head_sector = sh->sector - STRIPE_SECTORS;
778 
779 	hash = stripe_hash_locks_hash(head_sector);
780 	spin_lock_irq(conf->hash_locks + hash);
781 	head = __find_stripe(conf, head_sector, conf->generation);
782 	if (head && !atomic_inc_not_zero(&head->count)) {
783 		spin_lock(&conf->device_lock);
784 		if (!atomic_read(&head->count)) {
785 			if (!test_bit(STRIPE_HANDLE, &head->state))
786 				atomic_inc(&conf->active_stripes);
787 			BUG_ON(list_empty(&head->lru) &&
788 			       !test_bit(STRIPE_EXPANDING, &head->state));
789 			inc_empty_inactive_list_flag = 0;
790 			if (!list_empty(conf->inactive_list + hash))
791 				inc_empty_inactive_list_flag = 1;
792 			list_del_init(&head->lru);
793 			if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
794 				atomic_inc(&conf->empty_inactive_list_nr);
795 			if (head->group) {
796 				head->group->stripes_cnt--;
797 				head->group = NULL;
798 			}
799 		}
800 		atomic_inc(&head->count);
801 		spin_unlock(&conf->device_lock);
802 	}
803 	spin_unlock_irq(conf->hash_locks + hash);
804 
805 	if (!head)
806 		return;
807 	if (!stripe_can_batch(head))
808 		goto out;
809 
810 	lock_two_stripes(head, sh);
811 	/* clear_batch_ready clear the flag */
812 	if (!stripe_can_batch(head) || !stripe_can_batch(sh))
813 		goto unlock_out;
814 
815 	if (sh->batch_head)
816 		goto unlock_out;
817 
818 	dd_idx = 0;
819 	while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
820 		dd_idx++;
821 	if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
822 	    bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
823 		goto unlock_out;
824 
825 	if (head->batch_head) {
826 		spin_lock(&head->batch_head->batch_lock);
827 		/* This batch list is already running */
828 		if (!stripe_can_batch(head)) {
829 			spin_unlock(&head->batch_head->batch_lock);
830 			goto unlock_out;
831 		}
832 
833 		/*
834 		 * at this point, head's BATCH_READY could be cleared, but we
835 		 * can still add the stripe to batch list
836 		 */
837 		list_add(&sh->batch_list, &head->batch_list);
838 		spin_unlock(&head->batch_head->batch_lock);
839 
840 		sh->batch_head = head->batch_head;
841 	} else {
842 		head->batch_head = head;
843 		sh->batch_head = head->batch_head;
844 		spin_lock(&head->batch_lock);
845 		list_add_tail(&sh->batch_list, &head->batch_list);
846 		spin_unlock(&head->batch_lock);
847 	}
848 
849 	if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
850 		if (atomic_dec_return(&conf->preread_active_stripes)
851 		    < IO_THRESHOLD)
852 			md_wakeup_thread(conf->mddev->thread);
853 
854 	if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
855 		int seq = sh->bm_seq;
856 		if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
857 		    sh->batch_head->bm_seq > seq)
858 			seq = sh->batch_head->bm_seq;
859 		set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state);
860 		sh->batch_head->bm_seq = seq;
861 	}
862 
863 	atomic_inc(&sh->count);
864 unlock_out:
865 	unlock_two_stripes(head, sh);
866 out:
867 	raid5_release_stripe(head);
868 }
869 
870 /* Determine if 'data_offset' or 'new_data_offset' should be used
871  * in this stripe_head.
872  */
873 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
874 {
875 	sector_t progress = conf->reshape_progress;
876 	/* Need a memory barrier to make sure we see the value
877 	 * of conf->generation, or ->data_offset that was set before
878 	 * reshape_progress was updated.
879 	 */
880 	smp_rmb();
881 	if (progress == MaxSector)
882 		return 0;
883 	if (sh->generation == conf->generation - 1)
884 		return 0;
885 	/* We are in a reshape, and this is a new-generation stripe,
886 	 * so use new_data_offset.
887 	 */
888 	return 1;
889 }
890 
891 static void
892 raid5_end_read_request(struct bio *bi);
893 static void
894 raid5_end_write_request(struct bio *bi);
895 
896 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
897 {
898 	struct r5conf *conf = sh->raid_conf;
899 	int i, disks = sh->disks;
900 	struct stripe_head *head_sh = sh;
901 
902 	might_sleep();
903 
904 	if (r5l_write_stripe(conf->log, sh) == 0)
905 		return;
906 	for (i = disks; i--; ) {
907 		int op, op_flags = 0;
908 		int replace_only = 0;
909 		struct bio *bi, *rbi;
910 		struct md_rdev *rdev, *rrdev = NULL;
911 
912 		sh = head_sh;
913 		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
914 			op = REQ_OP_WRITE;
915 			if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
916 				op_flags = WRITE_FUA;
917 			if (test_bit(R5_Discard, &sh->dev[i].flags))
918 				op = REQ_OP_DISCARD;
919 		} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
920 			op = REQ_OP_READ;
921 		else if (test_and_clear_bit(R5_WantReplace,
922 					    &sh->dev[i].flags)) {
923 			op = REQ_OP_WRITE;
924 			replace_only = 1;
925 		} else
926 			continue;
927 		if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
928 			op_flags |= REQ_SYNC;
929 
930 again:
931 		bi = &sh->dev[i].req;
932 		rbi = &sh->dev[i].rreq; /* For writing to replacement */
933 
934 		rcu_read_lock();
935 		rrdev = rcu_dereference(conf->disks[i].replacement);
936 		smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
937 		rdev = rcu_dereference(conf->disks[i].rdev);
938 		if (!rdev) {
939 			rdev = rrdev;
940 			rrdev = NULL;
941 		}
942 		if (op_is_write(op)) {
943 			if (replace_only)
944 				rdev = NULL;
945 			if (rdev == rrdev)
946 				/* We raced and saw duplicates */
947 				rrdev = NULL;
948 		} else {
949 			if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
950 				rdev = rrdev;
951 			rrdev = NULL;
952 		}
953 
954 		if (rdev && test_bit(Faulty, &rdev->flags))
955 			rdev = NULL;
956 		if (rdev)
957 			atomic_inc(&rdev->nr_pending);
958 		if (rrdev && test_bit(Faulty, &rrdev->flags))
959 			rrdev = NULL;
960 		if (rrdev)
961 			atomic_inc(&rrdev->nr_pending);
962 		rcu_read_unlock();
963 
964 		/* We have already checked bad blocks for reads.  Now
965 		 * need to check for writes.  We never accept write errors
966 		 * on the replacement, so we don't to check rrdev.
967 		 */
968 		while (op_is_write(op) && rdev &&
969 		       test_bit(WriteErrorSeen, &rdev->flags)) {
970 			sector_t first_bad;
971 			int bad_sectors;
972 			int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
973 					      &first_bad, &bad_sectors);
974 			if (!bad)
975 				break;
976 
977 			if (bad < 0) {
978 				set_bit(BlockedBadBlocks, &rdev->flags);
979 				if (!conf->mddev->external &&
980 				    conf->mddev->flags) {
981 					/* It is very unlikely, but we might
982 					 * still need to write out the
983 					 * bad block log - better give it
984 					 * a chance*/
985 					md_check_recovery(conf->mddev);
986 				}
987 				/*
988 				 * Because md_wait_for_blocked_rdev
989 				 * will dec nr_pending, we must
990 				 * increment it first.
991 				 */
992 				atomic_inc(&rdev->nr_pending);
993 				md_wait_for_blocked_rdev(rdev, conf->mddev);
994 			} else {
995 				/* Acknowledged bad block - skip the write */
996 				rdev_dec_pending(rdev, conf->mddev);
997 				rdev = NULL;
998 			}
999 		}
1000 
1001 		if (rdev) {
1002 			if (s->syncing || s->expanding || s->expanded
1003 			    || s->replacing)
1004 				md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1005 
1006 			set_bit(STRIPE_IO_STARTED, &sh->state);
1007 
1008 			bi->bi_bdev = rdev->bdev;
1009 			bio_set_op_attrs(bi, op, op_flags);
1010 			bi->bi_end_io = op_is_write(op)
1011 				? raid5_end_write_request
1012 				: raid5_end_read_request;
1013 			bi->bi_private = sh;
1014 
1015 			pr_debug("%s: for %llu schedule op %d on disc %d\n",
1016 				__func__, (unsigned long long)sh->sector,
1017 				bi->bi_opf, i);
1018 			atomic_inc(&sh->count);
1019 			if (sh != head_sh)
1020 				atomic_inc(&head_sh->count);
1021 			if (use_new_offset(conf, sh))
1022 				bi->bi_iter.bi_sector = (sh->sector
1023 						 + rdev->new_data_offset);
1024 			else
1025 				bi->bi_iter.bi_sector = (sh->sector
1026 						 + rdev->data_offset);
1027 			if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
1028 				bi->bi_opf |= REQ_NOMERGE;
1029 
1030 			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1031 				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1032 			sh->dev[i].vec.bv_page = sh->dev[i].page;
1033 			bi->bi_vcnt = 1;
1034 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1035 			bi->bi_io_vec[0].bv_offset = 0;
1036 			bi->bi_iter.bi_size = STRIPE_SIZE;
1037 			/*
1038 			 * If this is discard request, set bi_vcnt 0. We don't
1039 			 * want to confuse SCSI because SCSI will replace payload
1040 			 */
1041 			if (op == REQ_OP_DISCARD)
1042 				bi->bi_vcnt = 0;
1043 			if (rrdev)
1044 				set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
1045 
1046 			if (conf->mddev->gendisk)
1047 				trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
1048 						      bi, disk_devt(conf->mddev->gendisk),
1049 						      sh->dev[i].sector);
1050 			generic_make_request(bi);
1051 		}
1052 		if (rrdev) {
1053 			if (s->syncing || s->expanding || s->expanded
1054 			    || s->replacing)
1055 				md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
1056 
1057 			set_bit(STRIPE_IO_STARTED, &sh->state);
1058 
1059 			rbi->bi_bdev = rrdev->bdev;
1060 			bio_set_op_attrs(rbi, op, op_flags);
1061 			BUG_ON(!op_is_write(op));
1062 			rbi->bi_end_io = raid5_end_write_request;
1063 			rbi->bi_private = sh;
1064 
1065 			pr_debug("%s: for %llu schedule op %d on "
1066 				 "replacement disc %d\n",
1067 				__func__, (unsigned long long)sh->sector,
1068 				rbi->bi_opf, i);
1069 			atomic_inc(&sh->count);
1070 			if (sh != head_sh)
1071 				atomic_inc(&head_sh->count);
1072 			if (use_new_offset(conf, sh))
1073 				rbi->bi_iter.bi_sector = (sh->sector
1074 						  + rrdev->new_data_offset);
1075 			else
1076 				rbi->bi_iter.bi_sector = (sh->sector
1077 						  + rrdev->data_offset);
1078 			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1079 				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1080 			sh->dev[i].rvec.bv_page = sh->dev[i].page;
1081 			rbi->bi_vcnt = 1;
1082 			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1083 			rbi->bi_io_vec[0].bv_offset = 0;
1084 			rbi->bi_iter.bi_size = STRIPE_SIZE;
1085 			/*
1086 			 * If this is discard request, set bi_vcnt 0. We don't
1087 			 * want to confuse SCSI because SCSI will replace payload
1088 			 */
1089 			if (op == REQ_OP_DISCARD)
1090 				rbi->bi_vcnt = 0;
1091 			if (conf->mddev->gendisk)
1092 				trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
1093 						      rbi, disk_devt(conf->mddev->gendisk),
1094 						      sh->dev[i].sector);
1095 			generic_make_request(rbi);
1096 		}
1097 		if (!rdev && !rrdev) {
1098 			if (op_is_write(op))
1099 				set_bit(STRIPE_DEGRADED, &sh->state);
1100 			pr_debug("skip op %d on disc %d for sector %llu\n",
1101 				bi->bi_opf, i, (unsigned long long)sh->sector);
1102 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
1103 			set_bit(STRIPE_HANDLE, &sh->state);
1104 		}
1105 
1106 		if (!head_sh->batch_head)
1107 			continue;
1108 		sh = list_first_entry(&sh->batch_list, struct stripe_head,
1109 				      batch_list);
1110 		if (sh != head_sh)
1111 			goto again;
1112 	}
1113 }
1114 
1115 static struct dma_async_tx_descriptor *
1116 async_copy_data(int frombio, struct bio *bio, struct page **page,
1117 	sector_t sector, struct dma_async_tx_descriptor *tx,
1118 	struct stripe_head *sh)
1119 {
1120 	struct bio_vec bvl;
1121 	struct bvec_iter iter;
1122 	struct page *bio_page;
1123 	int page_offset;
1124 	struct async_submit_ctl submit;
1125 	enum async_tx_flags flags = 0;
1126 
1127 	if (bio->bi_iter.bi_sector >= sector)
1128 		page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
1129 	else
1130 		page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
1131 
1132 	if (frombio)
1133 		flags |= ASYNC_TX_FENCE;
1134 	init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
1135 
1136 	bio_for_each_segment(bvl, bio, iter) {
1137 		int len = bvl.bv_len;
1138 		int clen;
1139 		int b_offset = 0;
1140 
1141 		if (page_offset < 0) {
1142 			b_offset = -page_offset;
1143 			page_offset += b_offset;
1144 			len -= b_offset;
1145 		}
1146 
1147 		if (len > 0 && page_offset + len > STRIPE_SIZE)
1148 			clen = STRIPE_SIZE - page_offset;
1149 		else
1150 			clen = len;
1151 
1152 		if (clen > 0) {
1153 			b_offset += bvl.bv_offset;
1154 			bio_page = bvl.bv_page;
1155 			if (frombio) {
1156 				if (sh->raid_conf->skip_copy &&
1157 				    b_offset == 0 && page_offset == 0 &&
1158 				    clen == STRIPE_SIZE)
1159 					*page = bio_page;
1160 				else
1161 					tx = async_memcpy(*page, bio_page, page_offset,
1162 						  b_offset, clen, &submit);
1163 			} else
1164 				tx = async_memcpy(bio_page, *page, b_offset,
1165 						  page_offset, clen, &submit);
1166 		}
1167 		/* chain the operations */
1168 		submit.depend_tx = tx;
1169 
1170 		if (clen < len) /* hit end of page */
1171 			break;
1172 		page_offset +=  len;
1173 	}
1174 
1175 	return tx;
1176 }
1177 
1178 static void ops_complete_biofill(void *stripe_head_ref)
1179 {
1180 	struct stripe_head *sh = stripe_head_ref;
1181 	struct bio_list return_bi = BIO_EMPTY_LIST;
1182 	int i;
1183 
1184 	pr_debug("%s: stripe %llu\n", __func__,
1185 		(unsigned long long)sh->sector);
1186 
1187 	/* clear completed biofills */
1188 	for (i = sh->disks; i--; ) {
1189 		struct r5dev *dev = &sh->dev[i];
1190 
1191 		/* acknowledge completion of a biofill operation */
1192 		/* and check if we need to reply to a read request,
1193 		 * new R5_Wantfill requests are held off until
1194 		 * !STRIPE_BIOFILL_RUN
1195 		 */
1196 		if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
1197 			struct bio *rbi, *rbi2;
1198 
1199 			BUG_ON(!dev->read);
1200 			rbi = dev->read;
1201 			dev->read = NULL;
1202 			while (rbi && rbi->bi_iter.bi_sector <
1203 				dev->sector + STRIPE_SECTORS) {
1204 				rbi2 = r5_next_bio(rbi, dev->sector);
1205 				if (!raid5_dec_bi_active_stripes(rbi))
1206 					bio_list_add(&return_bi, rbi);
1207 				rbi = rbi2;
1208 			}
1209 		}
1210 	}
1211 	clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
1212 
1213 	return_io(&return_bi);
1214 
1215 	set_bit(STRIPE_HANDLE, &sh->state);
1216 	raid5_release_stripe(sh);
1217 }
1218 
1219 static void ops_run_biofill(struct stripe_head *sh)
1220 {
1221 	struct dma_async_tx_descriptor *tx = NULL;
1222 	struct async_submit_ctl submit;
1223 	int i;
1224 
1225 	BUG_ON(sh->batch_head);
1226 	pr_debug("%s: stripe %llu\n", __func__,
1227 		(unsigned long long)sh->sector);
1228 
1229 	for (i = sh->disks; i--; ) {
1230 		struct r5dev *dev = &sh->dev[i];
1231 		if (test_bit(R5_Wantfill, &dev->flags)) {
1232 			struct bio *rbi;
1233 			spin_lock_irq(&sh->stripe_lock);
1234 			dev->read = rbi = dev->toread;
1235 			dev->toread = NULL;
1236 			spin_unlock_irq(&sh->stripe_lock);
1237 			while (rbi && rbi->bi_iter.bi_sector <
1238 				dev->sector + STRIPE_SECTORS) {
1239 				tx = async_copy_data(0, rbi, &dev->page,
1240 					dev->sector, tx, sh);
1241 				rbi = r5_next_bio(rbi, dev->sector);
1242 			}
1243 		}
1244 	}
1245 
1246 	atomic_inc(&sh->count);
1247 	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
1248 	async_trigger_callback(&submit);
1249 }
1250 
1251 static void mark_target_uptodate(struct stripe_head *sh, int target)
1252 {
1253 	struct r5dev *tgt;
1254 
1255 	if (target < 0)
1256 		return;
1257 
1258 	tgt = &sh->dev[target];
1259 	set_bit(R5_UPTODATE, &tgt->flags);
1260 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1261 	clear_bit(R5_Wantcompute, &tgt->flags);
1262 }
1263 
1264 static void ops_complete_compute(void *stripe_head_ref)
1265 {
1266 	struct stripe_head *sh = stripe_head_ref;
1267 
1268 	pr_debug("%s: stripe %llu\n", __func__,
1269 		(unsigned long long)sh->sector);
1270 
1271 	/* mark the computed target(s) as uptodate */
1272 	mark_target_uptodate(sh, sh->ops.target);
1273 	mark_target_uptodate(sh, sh->ops.target2);
1274 
1275 	clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
1276 	if (sh->check_state == check_state_compute_run)
1277 		sh->check_state = check_state_compute_result;
1278 	set_bit(STRIPE_HANDLE, &sh->state);
1279 	raid5_release_stripe(sh);
1280 }
1281 
1282 /* return a pointer to the address conversion region of the scribble buffer */
1283 static addr_conv_t *to_addr_conv(struct stripe_head *sh,
1284 				 struct raid5_percpu *percpu, int i)
1285 {
1286 	void *addr;
1287 
1288 	addr = flex_array_get(percpu->scribble, i);
1289 	return addr + sizeof(struct page *) * (sh->disks + 2);
1290 }
1291 
1292 /* return a pointer to the address conversion region of the scribble buffer */
1293 static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
1294 {
1295 	void *addr;
1296 
1297 	addr = flex_array_get(percpu->scribble, i);
1298 	return addr;
1299 }
1300 
1301 static struct dma_async_tx_descriptor *
1302 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
1303 {
1304 	int disks = sh->disks;
1305 	struct page **xor_srcs = to_addr_page(percpu, 0);
1306 	int target = sh->ops.target;
1307 	struct r5dev *tgt = &sh->dev[target];
1308 	struct page *xor_dest = tgt->page;
1309 	int count = 0;
1310 	struct dma_async_tx_descriptor *tx;
1311 	struct async_submit_ctl submit;
1312 	int i;
1313 
1314 	BUG_ON(sh->batch_head);
1315 
1316 	pr_debug("%s: stripe %llu block: %d\n",
1317 		__func__, (unsigned long long)sh->sector, target);
1318 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1319 
1320 	for (i = disks; i--; )
1321 		if (i != target)
1322 			xor_srcs[count++] = sh->dev[i].page;
1323 
1324 	atomic_inc(&sh->count);
1325 
1326 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
1327 			  ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
1328 	if (unlikely(count == 1))
1329 		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1330 	else
1331 		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1332 
1333 	return tx;
1334 }
1335 
1336 /* set_syndrome_sources - populate source buffers for gen_syndrome
1337  * @srcs - (struct page *) array of size sh->disks
1338  * @sh - stripe_head to parse
1339  *
1340  * Populates srcs in proper layout order for the stripe and returns the
1341  * 'count' of sources to be used in a call to async_gen_syndrome.  The P
1342  * destination buffer is recorded in srcs[count] and the Q destination
1343  * is recorded in srcs[count+1]].
1344  */
1345 static int set_syndrome_sources(struct page **srcs,
1346 				struct stripe_head *sh,
1347 				int srctype)
1348 {
1349 	int disks = sh->disks;
1350 	int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
1351 	int d0_idx = raid6_d0(sh);
1352 	int count;
1353 	int i;
1354 
1355 	for (i = 0; i < disks; i++)
1356 		srcs[i] = NULL;
1357 
1358 	count = 0;
1359 	i = d0_idx;
1360 	do {
1361 		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1362 		struct r5dev *dev = &sh->dev[i];
1363 
1364 		if (i == sh->qd_idx || i == sh->pd_idx ||
1365 		    (srctype == SYNDROME_SRC_ALL) ||
1366 		    (srctype == SYNDROME_SRC_WANT_DRAIN &&
1367 		     test_bit(R5_Wantdrain, &dev->flags)) ||
1368 		    (srctype == SYNDROME_SRC_WRITTEN &&
1369 		     dev->written))
1370 			srcs[slot] = sh->dev[i].page;
1371 		i = raid6_next_disk(i, disks);
1372 	} while (i != d0_idx);
1373 
1374 	return syndrome_disks;
1375 }
1376 
1377 static struct dma_async_tx_descriptor *
1378 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
1379 {
1380 	int disks = sh->disks;
1381 	struct page **blocks = to_addr_page(percpu, 0);
1382 	int target;
1383 	int qd_idx = sh->qd_idx;
1384 	struct dma_async_tx_descriptor *tx;
1385 	struct async_submit_ctl submit;
1386 	struct r5dev *tgt;
1387 	struct page *dest;
1388 	int i;
1389 	int count;
1390 
1391 	BUG_ON(sh->batch_head);
1392 	if (sh->ops.target < 0)
1393 		target = sh->ops.target2;
1394 	else if (sh->ops.target2 < 0)
1395 		target = sh->ops.target;
1396 	else
1397 		/* we should only have one valid target */
1398 		BUG();
1399 	BUG_ON(target < 0);
1400 	pr_debug("%s: stripe %llu block: %d\n",
1401 		__func__, (unsigned long long)sh->sector, target);
1402 
1403 	tgt = &sh->dev[target];
1404 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1405 	dest = tgt->page;
1406 
1407 	atomic_inc(&sh->count);
1408 
1409 	if (target == qd_idx) {
1410 		count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
1411 		blocks[count] = NULL; /* regenerating p is not necessary */
1412 		BUG_ON(blocks[count+1] != dest); /* q should already be set */
1413 		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1414 				  ops_complete_compute, sh,
1415 				  to_addr_conv(sh, percpu, 0));
1416 		tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1417 	} else {
1418 		/* Compute any data- or p-drive using XOR */
1419 		count = 0;
1420 		for (i = disks; i-- ; ) {
1421 			if (i == target || i == qd_idx)
1422 				continue;
1423 			blocks[count++] = sh->dev[i].page;
1424 		}
1425 
1426 		init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1427 				  NULL, ops_complete_compute, sh,
1428 				  to_addr_conv(sh, percpu, 0));
1429 		tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
1430 	}
1431 
1432 	return tx;
1433 }
1434 
1435 static struct dma_async_tx_descriptor *
1436 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
1437 {
1438 	int i, count, disks = sh->disks;
1439 	int syndrome_disks = sh->ddf_layout ? disks : disks-2;
1440 	int d0_idx = raid6_d0(sh);
1441 	int faila = -1, failb = -1;
1442 	int target = sh->ops.target;
1443 	int target2 = sh->ops.target2;
1444 	struct r5dev *tgt = &sh->dev[target];
1445 	struct r5dev *tgt2 = &sh->dev[target2];
1446 	struct dma_async_tx_descriptor *tx;
1447 	struct page **blocks = to_addr_page(percpu, 0);
1448 	struct async_submit_ctl submit;
1449 
1450 	BUG_ON(sh->batch_head);
1451 	pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1452 		 __func__, (unsigned long long)sh->sector, target, target2);
1453 	BUG_ON(target < 0 || target2 < 0);
1454 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1455 	BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
1456 
1457 	/* we need to open-code set_syndrome_sources to handle the
1458 	 * slot number conversion for 'faila' and 'failb'
1459 	 */
1460 	for (i = 0; i < disks ; i++)
1461 		blocks[i] = NULL;
1462 	count = 0;
1463 	i = d0_idx;
1464 	do {
1465 		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1466 
1467 		blocks[slot] = sh->dev[i].page;
1468 
1469 		if (i == target)
1470 			faila = slot;
1471 		if (i == target2)
1472 			failb = slot;
1473 		i = raid6_next_disk(i, disks);
1474 	} while (i != d0_idx);
1475 
1476 	BUG_ON(faila == failb);
1477 	if (failb < faila)
1478 		swap(faila, failb);
1479 	pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1480 		 __func__, (unsigned long long)sh->sector, faila, failb);
1481 
1482 	atomic_inc(&sh->count);
1483 
1484 	if (failb == syndrome_disks+1) {
1485 		/* Q disk is one of the missing disks */
1486 		if (faila == syndrome_disks) {
1487 			/* Missing P+Q, just recompute */
1488 			init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1489 					  ops_complete_compute, sh,
1490 					  to_addr_conv(sh, percpu, 0));
1491 			return async_gen_syndrome(blocks, 0, syndrome_disks+2,
1492 						  STRIPE_SIZE, &submit);
1493 		} else {
1494 			struct page *dest;
1495 			int data_target;
1496 			int qd_idx = sh->qd_idx;
1497 
1498 			/* Missing D+Q: recompute D from P, then recompute Q */
1499 			if (target == qd_idx)
1500 				data_target = target2;
1501 			else
1502 				data_target = target;
1503 
1504 			count = 0;
1505 			for (i = disks; i-- ; ) {
1506 				if (i == data_target || i == qd_idx)
1507 					continue;
1508 				blocks[count++] = sh->dev[i].page;
1509 			}
1510 			dest = sh->dev[data_target].page;
1511 			init_async_submit(&submit,
1512 					  ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1513 					  NULL, NULL, NULL,
1514 					  to_addr_conv(sh, percpu, 0));
1515 			tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
1516 				       &submit);
1517 
1518 			count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
1519 			init_async_submit(&submit, ASYNC_TX_FENCE, tx,
1520 					  ops_complete_compute, sh,
1521 					  to_addr_conv(sh, percpu, 0));
1522 			return async_gen_syndrome(blocks, 0, count+2,
1523 						  STRIPE_SIZE, &submit);
1524 		}
1525 	} else {
1526 		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1527 				  ops_complete_compute, sh,
1528 				  to_addr_conv(sh, percpu, 0));
1529 		if (failb == syndrome_disks) {
1530 			/* We're missing D+P. */
1531 			return async_raid6_datap_recov(syndrome_disks+2,
1532 						       STRIPE_SIZE, faila,
1533 						       blocks, &submit);
1534 		} else {
1535 			/* We're missing D+D. */
1536 			return async_raid6_2data_recov(syndrome_disks+2,
1537 						       STRIPE_SIZE, faila, failb,
1538 						       blocks, &submit);
1539 		}
1540 	}
1541 }
1542 
1543 static void ops_complete_prexor(void *stripe_head_ref)
1544 {
1545 	struct stripe_head *sh = stripe_head_ref;
1546 
1547 	pr_debug("%s: stripe %llu\n", __func__,
1548 		(unsigned long long)sh->sector);
1549 }
1550 
1551 static struct dma_async_tx_descriptor *
1552 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
1553 		struct dma_async_tx_descriptor *tx)
1554 {
1555 	int disks = sh->disks;
1556 	struct page **xor_srcs = to_addr_page(percpu, 0);
1557 	int count = 0, pd_idx = sh->pd_idx, i;
1558 	struct async_submit_ctl submit;
1559 
1560 	/* existing parity data subtracted */
1561 	struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1562 
1563 	BUG_ON(sh->batch_head);
1564 	pr_debug("%s: stripe %llu\n", __func__,
1565 		(unsigned long long)sh->sector);
1566 
1567 	for (i = disks; i--; ) {
1568 		struct r5dev *dev = &sh->dev[i];
1569 		/* Only process blocks that are known to be uptodate */
1570 		if (test_bit(R5_Wantdrain, &dev->flags))
1571 			xor_srcs[count++] = dev->page;
1572 	}
1573 
1574 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1575 			  ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1576 	tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1577 
1578 	return tx;
1579 }
1580 
1581 static struct dma_async_tx_descriptor *
1582 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
1583 		struct dma_async_tx_descriptor *tx)
1584 {
1585 	struct page **blocks = to_addr_page(percpu, 0);
1586 	int count;
1587 	struct async_submit_ctl submit;
1588 
1589 	pr_debug("%s: stripe %llu\n", __func__,
1590 		(unsigned long long)sh->sector);
1591 
1592 	count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN);
1593 
1594 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
1595 			  ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1596 	tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
1597 
1598 	return tx;
1599 }
1600 
1601 static struct dma_async_tx_descriptor *
1602 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1603 {
1604 	int disks = sh->disks;
1605 	int i;
1606 	struct stripe_head *head_sh = sh;
1607 
1608 	pr_debug("%s: stripe %llu\n", __func__,
1609 		(unsigned long long)sh->sector);
1610 
1611 	for (i = disks; i--; ) {
1612 		struct r5dev *dev;
1613 		struct bio *chosen;
1614 
1615 		sh = head_sh;
1616 		if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) {
1617 			struct bio *wbi;
1618 
1619 again:
1620 			dev = &sh->dev[i];
1621 			spin_lock_irq(&sh->stripe_lock);
1622 			chosen = dev->towrite;
1623 			dev->towrite = NULL;
1624 			sh->overwrite_disks = 0;
1625 			BUG_ON(dev->written);
1626 			wbi = dev->written = chosen;
1627 			spin_unlock_irq(&sh->stripe_lock);
1628 			WARN_ON(dev->page != dev->orig_page);
1629 
1630 			while (wbi && wbi->bi_iter.bi_sector <
1631 				dev->sector + STRIPE_SECTORS) {
1632 				if (wbi->bi_opf & REQ_FUA)
1633 					set_bit(R5_WantFUA, &dev->flags);
1634 				if (wbi->bi_opf & REQ_SYNC)
1635 					set_bit(R5_SyncIO, &dev->flags);
1636 				if (bio_op(wbi) == REQ_OP_DISCARD)
1637 					set_bit(R5_Discard, &dev->flags);
1638 				else {
1639 					tx = async_copy_data(1, wbi, &dev->page,
1640 						dev->sector, tx, sh);
1641 					if (dev->page != dev->orig_page) {
1642 						set_bit(R5_SkipCopy, &dev->flags);
1643 						clear_bit(R5_UPTODATE, &dev->flags);
1644 						clear_bit(R5_OVERWRITE, &dev->flags);
1645 					}
1646 				}
1647 				wbi = r5_next_bio(wbi, dev->sector);
1648 			}
1649 
1650 			if (head_sh->batch_head) {
1651 				sh = list_first_entry(&sh->batch_list,
1652 						      struct stripe_head,
1653 						      batch_list);
1654 				if (sh == head_sh)
1655 					continue;
1656 				goto again;
1657 			}
1658 		}
1659 	}
1660 
1661 	return tx;
1662 }
1663 
1664 static void ops_complete_reconstruct(void *stripe_head_ref)
1665 {
1666 	struct stripe_head *sh = stripe_head_ref;
1667 	int disks = sh->disks;
1668 	int pd_idx = sh->pd_idx;
1669 	int qd_idx = sh->qd_idx;
1670 	int i;
1671 	bool fua = false, sync = false, discard = false;
1672 
1673 	pr_debug("%s: stripe %llu\n", __func__,
1674 		(unsigned long long)sh->sector);
1675 
1676 	for (i = disks; i--; ) {
1677 		fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1678 		sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1679 		discard |= test_bit(R5_Discard, &sh->dev[i].flags);
1680 	}
1681 
1682 	for (i = disks; i--; ) {
1683 		struct r5dev *dev = &sh->dev[i];
1684 
1685 		if (dev->written || i == pd_idx || i == qd_idx) {
1686 			if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
1687 				set_bit(R5_UPTODATE, &dev->flags);
1688 			if (fua)
1689 				set_bit(R5_WantFUA, &dev->flags);
1690 			if (sync)
1691 				set_bit(R5_SyncIO, &dev->flags);
1692 		}
1693 	}
1694 
1695 	if (sh->reconstruct_state == reconstruct_state_drain_run)
1696 		sh->reconstruct_state = reconstruct_state_drain_result;
1697 	else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1698 		sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1699 	else {
1700 		BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1701 		sh->reconstruct_state = reconstruct_state_result;
1702 	}
1703 
1704 	set_bit(STRIPE_HANDLE, &sh->state);
1705 	raid5_release_stripe(sh);
1706 }
1707 
1708 static void
1709 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1710 		     struct dma_async_tx_descriptor *tx)
1711 {
1712 	int disks = sh->disks;
1713 	struct page **xor_srcs;
1714 	struct async_submit_ctl submit;
1715 	int count, pd_idx = sh->pd_idx, i;
1716 	struct page *xor_dest;
1717 	int prexor = 0;
1718 	unsigned long flags;
1719 	int j = 0;
1720 	struct stripe_head *head_sh = sh;
1721 	int last_stripe;
1722 
1723 	pr_debug("%s: stripe %llu\n", __func__,
1724 		(unsigned long long)sh->sector);
1725 
1726 	for (i = 0; i < sh->disks; i++) {
1727 		if (pd_idx == i)
1728 			continue;
1729 		if (!test_bit(R5_Discard, &sh->dev[i].flags))
1730 			break;
1731 	}
1732 	if (i >= sh->disks) {
1733 		atomic_inc(&sh->count);
1734 		set_bit(R5_Discard, &sh->dev[pd_idx].flags);
1735 		ops_complete_reconstruct(sh);
1736 		return;
1737 	}
1738 again:
1739 	count = 0;
1740 	xor_srcs = to_addr_page(percpu, j);
1741 	/* check if prexor is active which means only process blocks
1742 	 * that are part of a read-modify-write (written)
1743 	 */
1744 	if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1745 		prexor = 1;
1746 		xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1747 		for (i = disks; i--; ) {
1748 			struct r5dev *dev = &sh->dev[i];
1749 			if (head_sh->dev[i].written)
1750 				xor_srcs[count++] = dev->page;
1751 		}
1752 	} else {
1753 		xor_dest = sh->dev[pd_idx].page;
1754 		for (i = disks; i--; ) {
1755 			struct r5dev *dev = &sh->dev[i];
1756 			if (i != pd_idx)
1757 				xor_srcs[count++] = dev->page;
1758 		}
1759 	}
1760 
1761 	/* 1/ if we prexor'd then the dest is reused as a source
1762 	 * 2/ if we did not prexor then we are redoing the parity
1763 	 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1764 	 * for the synchronous xor case
1765 	 */
1766 	last_stripe = !head_sh->batch_head ||
1767 		list_first_entry(&sh->batch_list,
1768 				 struct stripe_head, batch_list) == head_sh;
1769 	if (last_stripe) {
1770 		flags = ASYNC_TX_ACK |
1771 			(prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1772 
1773 		atomic_inc(&head_sh->count);
1774 		init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
1775 				  to_addr_conv(sh, percpu, j));
1776 	} else {
1777 		flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST;
1778 		init_async_submit(&submit, flags, tx, NULL, NULL,
1779 				  to_addr_conv(sh, percpu, j));
1780 	}
1781 
1782 	if (unlikely(count == 1))
1783 		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1784 	else
1785 		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1786 	if (!last_stripe) {
1787 		j++;
1788 		sh = list_first_entry(&sh->batch_list, struct stripe_head,
1789 				      batch_list);
1790 		goto again;
1791 	}
1792 }
1793 
1794 static void
1795 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1796 		     struct dma_async_tx_descriptor *tx)
1797 {
1798 	struct async_submit_ctl submit;
1799 	struct page **blocks;
1800 	int count, i, j = 0;
1801 	struct stripe_head *head_sh = sh;
1802 	int last_stripe;
1803 	int synflags;
1804 	unsigned long txflags;
1805 
1806 	pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1807 
1808 	for (i = 0; i < sh->disks; i++) {
1809 		if (sh->pd_idx == i || sh->qd_idx == i)
1810 			continue;
1811 		if (!test_bit(R5_Discard, &sh->dev[i].flags))
1812 			break;
1813 	}
1814 	if (i >= sh->disks) {
1815 		atomic_inc(&sh->count);
1816 		set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
1817 		set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
1818 		ops_complete_reconstruct(sh);
1819 		return;
1820 	}
1821 
1822 again:
1823 	blocks = to_addr_page(percpu, j);
1824 
1825 	if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1826 		synflags = SYNDROME_SRC_WRITTEN;
1827 		txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST;
1828 	} else {
1829 		synflags = SYNDROME_SRC_ALL;
1830 		txflags = ASYNC_TX_ACK;
1831 	}
1832 
1833 	count = set_syndrome_sources(blocks, sh, synflags);
1834 	last_stripe = !head_sh->batch_head ||
1835 		list_first_entry(&sh->batch_list,
1836 				 struct stripe_head, batch_list) == head_sh;
1837 
1838 	if (last_stripe) {
1839 		atomic_inc(&head_sh->count);
1840 		init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
1841 				  head_sh, to_addr_conv(sh, percpu, j));
1842 	} else
1843 		init_async_submit(&submit, 0, tx, NULL, NULL,
1844 				  to_addr_conv(sh, percpu, j));
1845 	tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
1846 	if (!last_stripe) {
1847 		j++;
1848 		sh = list_first_entry(&sh->batch_list, struct stripe_head,
1849 				      batch_list);
1850 		goto again;
1851 	}
1852 }
1853 
1854 static void ops_complete_check(void *stripe_head_ref)
1855 {
1856 	struct stripe_head *sh = stripe_head_ref;
1857 
1858 	pr_debug("%s: stripe %llu\n", __func__,
1859 		(unsigned long long)sh->sector);
1860 
1861 	sh->check_state = check_state_check_result;
1862 	set_bit(STRIPE_HANDLE, &sh->state);
1863 	raid5_release_stripe(sh);
1864 }
1865 
1866 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1867 {
1868 	int disks = sh->disks;
1869 	int pd_idx = sh->pd_idx;
1870 	int qd_idx = sh->qd_idx;
1871 	struct page *xor_dest;
1872 	struct page **xor_srcs = to_addr_page(percpu, 0);
1873 	struct dma_async_tx_descriptor *tx;
1874 	struct async_submit_ctl submit;
1875 	int count;
1876 	int i;
1877 
1878 	pr_debug("%s: stripe %llu\n", __func__,
1879 		(unsigned long long)sh->sector);
1880 
1881 	BUG_ON(sh->batch_head);
1882 	count = 0;
1883 	xor_dest = sh->dev[pd_idx].page;
1884 	xor_srcs[count++] = xor_dest;
1885 	for (i = disks; i--; ) {
1886 		if (i == pd_idx || i == qd_idx)
1887 			continue;
1888 		xor_srcs[count++] = sh->dev[i].page;
1889 	}
1890 
1891 	init_async_submit(&submit, 0, NULL, NULL, NULL,
1892 			  to_addr_conv(sh, percpu, 0));
1893 	tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1894 			   &sh->ops.zero_sum_result, &submit);
1895 
1896 	atomic_inc(&sh->count);
1897 	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1898 	tx = async_trigger_callback(&submit);
1899 }
1900 
1901 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1902 {
1903 	struct page **srcs = to_addr_page(percpu, 0);
1904 	struct async_submit_ctl submit;
1905 	int count;
1906 
1907 	pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1908 		(unsigned long long)sh->sector, checkp);
1909 
1910 	BUG_ON(sh->batch_head);
1911 	count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL);
1912 	if (!checkp)
1913 		srcs[count] = NULL;
1914 
1915 	atomic_inc(&sh->count);
1916 	init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1917 			  sh, to_addr_conv(sh, percpu, 0));
1918 	async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1919 			   &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1920 }
1921 
1922 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1923 {
1924 	int overlap_clear = 0, i, disks = sh->disks;
1925 	struct dma_async_tx_descriptor *tx = NULL;
1926 	struct r5conf *conf = sh->raid_conf;
1927 	int level = conf->level;
1928 	struct raid5_percpu *percpu;
1929 	unsigned long cpu;
1930 
1931 	cpu = get_cpu();
1932 	percpu = per_cpu_ptr(conf->percpu, cpu);
1933 	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1934 		ops_run_biofill(sh);
1935 		overlap_clear++;
1936 	}
1937 
1938 	if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1939 		if (level < 6)
1940 			tx = ops_run_compute5(sh, percpu);
1941 		else {
1942 			if (sh->ops.target2 < 0 || sh->ops.target < 0)
1943 				tx = ops_run_compute6_1(sh, percpu);
1944 			else
1945 				tx = ops_run_compute6_2(sh, percpu);
1946 		}
1947 		/* terminate the chain if reconstruct is not set to be run */
1948 		if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1949 			async_tx_ack(tx);
1950 	}
1951 
1952 	if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
1953 		if (level < 6)
1954 			tx = ops_run_prexor5(sh, percpu, tx);
1955 		else
1956 			tx = ops_run_prexor6(sh, percpu, tx);
1957 	}
1958 
1959 	if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1960 		tx = ops_run_biodrain(sh, tx);
1961 		overlap_clear++;
1962 	}
1963 
1964 	if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1965 		if (level < 6)
1966 			ops_run_reconstruct5(sh, percpu, tx);
1967 		else
1968 			ops_run_reconstruct6(sh, percpu, tx);
1969 	}
1970 
1971 	if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1972 		if (sh->check_state == check_state_run)
1973 			ops_run_check_p(sh, percpu);
1974 		else if (sh->check_state == check_state_run_q)
1975 			ops_run_check_pq(sh, percpu, 0);
1976 		else if (sh->check_state == check_state_run_pq)
1977 			ops_run_check_pq(sh, percpu, 1);
1978 		else
1979 			BUG();
1980 	}
1981 
1982 	if (overlap_clear && !sh->batch_head)
1983 		for (i = disks; i--; ) {
1984 			struct r5dev *dev = &sh->dev[i];
1985 			if (test_and_clear_bit(R5_Overlap, &dev->flags))
1986 				wake_up(&sh->raid_conf->wait_for_overlap);
1987 		}
1988 	put_cpu();
1989 }
1990 
1991 static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
1992 	int disks)
1993 {
1994 	struct stripe_head *sh;
1995 	int i;
1996 
1997 	sh = kmem_cache_zalloc(sc, gfp);
1998 	if (sh) {
1999 		spin_lock_init(&sh->stripe_lock);
2000 		spin_lock_init(&sh->batch_lock);
2001 		INIT_LIST_HEAD(&sh->batch_list);
2002 		INIT_LIST_HEAD(&sh->lru);
2003 		atomic_set(&sh->count, 1);
2004 		for (i = 0; i < disks; i++) {
2005 			struct r5dev *dev = &sh->dev[i];
2006 
2007 			bio_init(&dev->req);
2008 			dev->req.bi_io_vec = &dev->vec;
2009 			dev->req.bi_max_vecs = 1;
2010 
2011 			bio_init(&dev->rreq);
2012 			dev->rreq.bi_io_vec = &dev->rvec;
2013 			dev->rreq.bi_max_vecs = 1;
2014 		}
2015 	}
2016 	return sh;
2017 }
2018 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
2019 {
2020 	struct stripe_head *sh;
2021 
2022 	sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size);
2023 	if (!sh)
2024 		return 0;
2025 
2026 	sh->raid_conf = conf;
2027 
2028 	if (grow_buffers(sh, gfp)) {
2029 		shrink_buffers(sh);
2030 		kmem_cache_free(conf->slab_cache, sh);
2031 		return 0;
2032 	}
2033 	sh->hash_lock_index =
2034 		conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
2035 	/* we just created an active stripe so... */
2036 	atomic_inc(&conf->active_stripes);
2037 
2038 	raid5_release_stripe(sh);
2039 	conf->max_nr_stripes++;
2040 	return 1;
2041 }
2042 
2043 static int grow_stripes(struct r5conf *conf, int num)
2044 {
2045 	struct kmem_cache *sc;
2046 	int devs = max(conf->raid_disks, conf->previous_raid_disks);
2047 
2048 	if (conf->mddev->gendisk)
2049 		sprintf(conf->cache_name[0],
2050 			"raid%d-%s", conf->level, mdname(conf->mddev));
2051 	else
2052 		sprintf(conf->cache_name[0],
2053 			"raid%d-%p", conf->level, conf->mddev);
2054 	sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
2055 
2056 	conf->active_name = 0;
2057 	sc = kmem_cache_create(conf->cache_name[conf->active_name],
2058 			       sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
2059 			       0, 0, NULL);
2060 	if (!sc)
2061 		return 1;
2062 	conf->slab_cache = sc;
2063 	conf->pool_size = devs;
2064 	while (num--)
2065 		if (!grow_one_stripe(conf, GFP_KERNEL))
2066 			return 1;
2067 
2068 	return 0;
2069 }
2070 
2071 /**
2072  * scribble_len - return the required size of the scribble region
2073  * @num - total number of disks in the array
2074  *
2075  * The size must be enough to contain:
2076  * 1/ a struct page pointer for each device in the array +2
2077  * 2/ room to convert each entry in (1) to its corresponding dma
2078  *    (dma_map_page()) or page (page_address()) address.
2079  *
2080  * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
2081  * calculate over all devices (not just the data blocks), using zeros in place
2082  * of the P and Q blocks.
2083  */
2084 static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
2085 {
2086 	struct flex_array *ret;
2087 	size_t len;
2088 
2089 	len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
2090 	ret = flex_array_alloc(len, cnt, flags);
2091 	if (!ret)
2092 		return NULL;
2093 	/* always prealloc all elements, so no locking is required */
2094 	if (flex_array_prealloc(ret, 0, cnt, flags)) {
2095 		flex_array_free(ret);
2096 		return NULL;
2097 	}
2098 	return ret;
2099 }
2100 
2101 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
2102 {
2103 	unsigned long cpu;
2104 	int err = 0;
2105 
2106 	/*
2107 	 * Never shrink. And mddev_suspend() could deadlock if this is called
2108 	 * from raid5d. In that case, scribble_disks and scribble_sectors
2109 	 * should equal to new_disks and new_sectors
2110 	 */
2111 	if (conf->scribble_disks >= new_disks &&
2112 	    conf->scribble_sectors >= new_sectors)
2113 		return 0;
2114 	mddev_suspend(conf->mddev);
2115 	get_online_cpus();
2116 	for_each_present_cpu(cpu) {
2117 		struct raid5_percpu *percpu;
2118 		struct flex_array *scribble;
2119 
2120 		percpu = per_cpu_ptr(conf->percpu, cpu);
2121 		scribble = scribble_alloc(new_disks,
2122 					  new_sectors / STRIPE_SECTORS,
2123 					  GFP_NOIO);
2124 
2125 		if (scribble) {
2126 			flex_array_free(percpu->scribble);
2127 			percpu->scribble = scribble;
2128 		} else {
2129 			err = -ENOMEM;
2130 			break;
2131 		}
2132 	}
2133 	put_online_cpus();
2134 	mddev_resume(conf->mddev);
2135 	if (!err) {
2136 		conf->scribble_disks = new_disks;
2137 		conf->scribble_sectors = new_sectors;
2138 	}
2139 	return err;
2140 }
2141 
2142 static int resize_stripes(struct r5conf *conf, int newsize)
2143 {
2144 	/* Make all the stripes able to hold 'newsize' devices.
2145 	 * New slots in each stripe get 'page' set to a new page.
2146 	 *
2147 	 * This happens in stages:
2148 	 * 1/ create a new kmem_cache and allocate the required number of
2149 	 *    stripe_heads.
2150 	 * 2/ gather all the old stripe_heads and transfer the pages across
2151 	 *    to the new stripe_heads.  This will have the side effect of
2152 	 *    freezing the array as once all stripe_heads have been collected,
2153 	 *    no IO will be possible.  Old stripe heads are freed once their
2154 	 *    pages have been transferred over, and the old kmem_cache is
2155 	 *    freed when all stripes are done.
2156 	 * 3/ reallocate conf->disks to be suitable bigger.  If this fails,
2157 	 *    we simple return a failre status - no need to clean anything up.
2158 	 * 4/ allocate new pages for the new slots in the new stripe_heads.
2159 	 *    If this fails, we don't bother trying the shrink the
2160 	 *    stripe_heads down again, we just leave them as they are.
2161 	 *    As each stripe_head is processed the new one is released into
2162 	 *    active service.
2163 	 *
2164 	 * Once step2 is started, we cannot afford to wait for a write,
2165 	 * so we use GFP_NOIO allocations.
2166 	 */
2167 	struct stripe_head *osh, *nsh;
2168 	LIST_HEAD(newstripes);
2169 	struct disk_info *ndisks;
2170 	int err;
2171 	struct kmem_cache *sc;
2172 	int i;
2173 	int hash, cnt;
2174 
2175 	if (newsize <= conf->pool_size)
2176 		return 0; /* never bother to shrink */
2177 
2178 	err = md_allow_write(conf->mddev);
2179 	if (err)
2180 		return err;
2181 
2182 	/* Step 1 */
2183 	sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
2184 			       sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
2185 			       0, 0, NULL);
2186 	if (!sc)
2187 		return -ENOMEM;
2188 
2189 	/* Need to ensure auto-resizing doesn't interfere */
2190 	mutex_lock(&conf->cache_size_mutex);
2191 
2192 	for (i = conf->max_nr_stripes; i; i--) {
2193 		nsh = alloc_stripe(sc, GFP_KERNEL, newsize);
2194 		if (!nsh)
2195 			break;
2196 
2197 		nsh->raid_conf = conf;
2198 		list_add(&nsh->lru, &newstripes);
2199 	}
2200 	if (i) {
2201 		/* didn't get enough, give up */
2202 		while (!list_empty(&newstripes)) {
2203 			nsh = list_entry(newstripes.next, struct stripe_head, lru);
2204 			list_del(&nsh->lru);
2205 			kmem_cache_free(sc, nsh);
2206 		}
2207 		kmem_cache_destroy(sc);
2208 		mutex_unlock(&conf->cache_size_mutex);
2209 		return -ENOMEM;
2210 	}
2211 	/* Step 2 - Must use GFP_NOIO now.
2212 	 * OK, we have enough stripes, start collecting inactive
2213 	 * stripes and copying them over
2214 	 */
2215 	hash = 0;
2216 	cnt = 0;
2217 	list_for_each_entry(nsh, &newstripes, lru) {
2218 		lock_device_hash_lock(conf, hash);
2219 		wait_event_cmd(conf->wait_for_stripe,
2220 				    !list_empty(conf->inactive_list + hash),
2221 				    unlock_device_hash_lock(conf, hash),
2222 				    lock_device_hash_lock(conf, hash));
2223 		osh = get_free_stripe(conf, hash);
2224 		unlock_device_hash_lock(conf, hash);
2225 
2226 		for(i=0; i<conf->pool_size; i++) {
2227 			nsh->dev[i].page = osh->dev[i].page;
2228 			nsh->dev[i].orig_page = osh->dev[i].page;
2229 		}
2230 		nsh->hash_lock_index = hash;
2231 		kmem_cache_free(conf->slab_cache, osh);
2232 		cnt++;
2233 		if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
2234 		    !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
2235 			hash++;
2236 			cnt = 0;
2237 		}
2238 	}
2239 	kmem_cache_destroy(conf->slab_cache);
2240 
2241 	/* Step 3.
2242 	 * At this point, we are holding all the stripes so the array
2243 	 * is completely stalled, so now is a good time to resize
2244 	 * conf->disks and the scribble region
2245 	 */
2246 	ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
2247 	if (ndisks) {
2248 		for (i=0; i<conf->raid_disks; i++)
2249 			ndisks[i] = conf->disks[i];
2250 		kfree(conf->disks);
2251 		conf->disks = ndisks;
2252 	} else
2253 		err = -ENOMEM;
2254 
2255 	mutex_unlock(&conf->cache_size_mutex);
2256 	/* Step 4, return new stripes to service */
2257 	while(!list_empty(&newstripes)) {
2258 		nsh = list_entry(newstripes.next, struct stripe_head, lru);
2259 		list_del_init(&nsh->lru);
2260 
2261 		for (i=conf->raid_disks; i < newsize; i++)
2262 			if (nsh->dev[i].page == NULL) {
2263 				struct page *p = alloc_page(GFP_NOIO);
2264 				nsh->dev[i].page = p;
2265 				nsh->dev[i].orig_page = p;
2266 				if (!p)
2267 					err = -ENOMEM;
2268 			}
2269 		raid5_release_stripe(nsh);
2270 	}
2271 	/* critical section pass, GFP_NOIO no longer needed */
2272 
2273 	conf->slab_cache = sc;
2274 	conf->active_name = 1-conf->active_name;
2275 	if (!err)
2276 		conf->pool_size = newsize;
2277 	return err;
2278 }
2279 
2280 static int drop_one_stripe(struct r5conf *conf)
2281 {
2282 	struct stripe_head *sh;
2283 	int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
2284 
2285 	spin_lock_irq(conf->hash_locks + hash);
2286 	sh = get_free_stripe(conf, hash);
2287 	spin_unlock_irq(conf->hash_locks + hash);
2288 	if (!sh)
2289 		return 0;
2290 	BUG_ON(atomic_read(&sh->count));
2291 	shrink_buffers(sh);
2292 	kmem_cache_free(conf->slab_cache, sh);
2293 	atomic_dec(&conf->active_stripes);
2294 	conf->max_nr_stripes--;
2295 	return 1;
2296 }
2297 
2298 static void shrink_stripes(struct r5conf *conf)
2299 {
2300 	while (conf->max_nr_stripes &&
2301 	       drop_one_stripe(conf))
2302 		;
2303 
2304 	kmem_cache_destroy(conf->slab_cache);
2305 	conf->slab_cache = NULL;
2306 }
2307 
2308 static void raid5_end_read_request(struct bio * bi)
2309 {
2310 	struct stripe_head *sh = bi->bi_private;
2311 	struct r5conf *conf = sh->raid_conf;
2312 	int disks = sh->disks, i;
2313 	char b[BDEVNAME_SIZE];
2314 	struct md_rdev *rdev = NULL;
2315 	sector_t s;
2316 
2317 	for (i=0 ; i<disks; i++)
2318 		if (bi == &sh->dev[i].req)
2319 			break;
2320 
2321 	pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
2322 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
2323 		bi->bi_error);
2324 	if (i == disks) {
2325 		bio_reset(bi);
2326 		BUG();
2327 		return;
2328 	}
2329 	if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2330 		/* If replacement finished while this request was outstanding,
2331 		 * 'replacement' might be NULL already.
2332 		 * In that case it moved down to 'rdev'.
2333 		 * rdev is not removed until all requests are finished.
2334 		 */
2335 		rdev = conf->disks[i].replacement;
2336 	if (!rdev)
2337 		rdev = conf->disks[i].rdev;
2338 
2339 	if (use_new_offset(conf, sh))
2340 		s = sh->sector + rdev->new_data_offset;
2341 	else
2342 		s = sh->sector + rdev->data_offset;
2343 	if (!bi->bi_error) {
2344 		set_bit(R5_UPTODATE, &sh->dev[i].flags);
2345 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2346 			/* Note that this cannot happen on a
2347 			 * replacement device.  We just fail those on
2348 			 * any error
2349 			 */
2350 			printk_ratelimited(
2351 				KERN_INFO
2352 				"md/raid:%s: read error corrected"
2353 				" (%lu sectors at %llu on %s)\n",
2354 				mdname(conf->mddev), STRIPE_SECTORS,
2355 				(unsigned long long)s,
2356 				bdevname(rdev->bdev, b));
2357 			atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
2358 			clear_bit(R5_ReadError, &sh->dev[i].flags);
2359 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
2360 		} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2361 			clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2362 
2363 		if (atomic_read(&rdev->read_errors))
2364 			atomic_set(&rdev->read_errors, 0);
2365 	} else {
2366 		const char *bdn = bdevname(rdev->bdev, b);
2367 		int retry = 0;
2368 		int set_bad = 0;
2369 
2370 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
2371 		atomic_inc(&rdev->read_errors);
2372 		if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2373 			printk_ratelimited(
2374 				KERN_WARNING
2375 				"md/raid:%s: read error on replacement device "
2376 				"(sector %llu on %s).\n",
2377 				mdname(conf->mddev),
2378 				(unsigned long long)s,
2379 				bdn);
2380 		else if (conf->mddev->degraded >= conf->max_degraded) {
2381 			set_bad = 1;
2382 			printk_ratelimited(
2383 				KERN_WARNING
2384 				"md/raid:%s: read error not correctable "
2385 				"(sector %llu on %s).\n",
2386 				mdname(conf->mddev),
2387 				(unsigned long long)s,
2388 				bdn);
2389 		} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
2390 			/* Oh, no!!! */
2391 			set_bad = 1;
2392 			printk_ratelimited(
2393 				KERN_WARNING
2394 				"md/raid:%s: read error NOT corrected!! "
2395 				"(sector %llu on %s).\n",
2396 				mdname(conf->mddev),
2397 				(unsigned long long)s,
2398 				bdn);
2399 		} else if (atomic_read(&rdev->read_errors)
2400 			 > conf->max_nr_stripes)
2401 			printk(KERN_WARNING
2402 			       "md/raid:%s: Too many read errors, failing device %s.\n",
2403 			       mdname(conf->mddev), bdn);
2404 		else
2405 			retry = 1;
2406 		if (set_bad && test_bit(In_sync, &rdev->flags)
2407 		    && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2408 			retry = 1;
2409 		if (retry)
2410 			if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
2411 				set_bit(R5_ReadError, &sh->dev[i].flags);
2412 				clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2413 			} else
2414 				set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2415 		else {
2416 			clear_bit(R5_ReadError, &sh->dev[i].flags);
2417 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
2418 			if (!(set_bad
2419 			      && test_bit(In_sync, &rdev->flags)
2420 			      && rdev_set_badblocks(
2421 				      rdev, sh->sector, STRIPE_SECTORS, 0)))
2422 				md_error(conf->mddev, rdev);
2423 		}
2424 	}
2425 	rdev_dec_pending(rdev, conf->mddev);
2426 	bio_reset(bi);
2427 	clear_bit(R5_LOCKED, &sh->dev[i].flags);
2428 	set_bit(STRIPE_HANDLE, &sh->state);
2429 	raid5_release_stripe(sh);
2430 }
2431 
2432 static void raid5_end_write_request(struct bio *bi)
2433 {
2434 	struct stripe_head *sh = bi->bi_private;
2435 	struct r5conf *conf = sh->raid_conf;
2436 	int disks = sh->disks, i;
2437 	struct md_rdev *uninitialized_var(rdev);
2438 	sector_t first_bad;
2439 	int bad_sectors;
2440 	int replacement = 0;
2441 
2442 	for (i = 0 ; i < disks; i++) {
2443 		if (bi == &sh->dev[i].req) {
2444 			rdev = conf->disks[i].rdev;
2445 			break;
2446 		}
2447 		if (bi == &sh->dev[i].rreq) {
2448 			rdev = conf->disks[i].replacement;
2449 			if (rdev)
2450 				replacement = 1;
2451 			else
2452 				/* rdev was removed and 'replacement'
2453 				 * replaced it.  rdev is not removed
2454 				 * until all requests are finished.
2455 				 */
2456 				rdev = conf->disks[i].rdev;
2457 			break;
2458 		}
2459 	}
2460 	pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
2461 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
2462 		bi->bi_error);
2463 	if (i == disks) {
2464 		bio_reset(bi);
2465 		BUG();
2466 		return;
2467 	}
2468 
2469 	if (replacement) {
2470 		if (bi->bi_error)
2471 			md_error(conf->mddev, rdev);
2472 		else if (is_badblock(rdev, sh->sector,
2473 				     STRIPE_SECTORS,
2474 				     &first_bad, &bad_sectors))
2475 			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
2476 	} else {
2477 		if (bi->bi_error) {
2478 			set_bit(STRIPE_DEGRADED, &sh->state);
2479 			set_bit(WriteErrorSeen, &rdev->flags);
2480 			set_bit(R5_WriteError, &sh->dev[i].flags);
2481 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
2482 				set_bit(MD_RECOVERY_NEEDED,
2483 					&rdev->mddev->recovery);
2484 		} else if (is_badblock(rdev, sh->sector,
2485 				       STRIPE_SECTORS,
2486 				       &first_bad, &bad_sectors)) {
2487 			set_bit(R5_MadeGood, &sh->dev[i].flags);
2488 			if (test_bit(R5_ReadError, &sh->dev[i].flags))
2489 				/* That was a successful write so make
2490 				 * sure it looks like we already did
2491 				 * a re-write.
2492 				 */
2493 				set_bit(R5_ReWrite, &sh->dev[i].flags);
2494 		}
2495 	}
2496 	rdev_dec_pending(rdev, conf->mddev);
2497 
2498 	if (sh->batch_head && bi->bi_error && !replacement)
2499 		set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
2500 
2501 	bio_reset(bi);
2502 	if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
2503 		clear_bit(R5_LOCKED, &sh->dev[i].flags);
2504 	set_bit(STRIPE_HANDLE, &sh->state);
2505 	raid5_release_stripe(sh);
2506 
2507 	if (sh->batch_head && sh != sh->batch_head)
2508 		raid5_release_stripe(sh->batch_head);
2509 }
2510 
2511 static void raid5_build_block(struct stripe_head *sh, int i, int previous)
2512 {
2513 	struct r5dev *dev = &sh->dev[i];
2514 
2515 	dev->flags = 0;
2516 	dev->sector = raid5_compute_blocknr(sh, i, previous);
2517 }
2518 
2519 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2520 {
2521 	char b[BDEVNAME_SIZE];
2522 	struct r5conf *conf = mddev->private;
2523 	unsigned long flags;
2524 	pr_debug("raid456: error called\n");
2525 
2526 	spin_lock_irqsave(&conf->device_lock, flags);
2527 	clear_bit(In_sync, &rdev->flags);
2528 	mddev->degraded = calc_degraded(conf);
2529 	spin_unlock_irqrestore(&conf->device_lock, flags);
2530 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2531 
2532 	set_bit(Blocked, &rdev->flags);
2533 	set_bit(Faulty, &rdev->flags);
2534 	set_mask_bits(&mddev->flags, 0,
2535 		      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
2536 	printk(KERN_ALERT
2537 	       "md/raid:%s: Disk failure on %s, disabling device.\n"
2538 	       "md/raid:%s: Operation continuing on %d devices.\n",
2539 	       mdname(mddev),
2540 	       bdevname(rdev->bdev, b),
2541 	       mdname(mddev),
2542 	       conf->raid_disks - mddev->degraded);
2543 }
2544 
2545 /*
2546  * Input: a 'big' sector number,
2547  * Output: index of the data and parity disk, and the sector # in them.
2548  */
2549 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
2550 			      int previous, int *dd_idx,
2551 			      struct stripe_head *sh)
2552 {
2553 	sector_t stripe, stripe2;
2554 	sector_t chunk_number;
2555 	unsigned int chunk_offset;
2556 	int pd_idx, qd_idx;
2557 	int ddf_layout = 0;
2558 	sector_t new_sector;
2559 	int algorithm = previous ? conf->prev_algo
2560 				 : conf->algorithm;
2561 	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2562 					 : conf->chunk_sectors;
2563 	int raid_disks = previous ? conf->previous_raid_disks
2564 				  : conf->raid_disks;
2565 	int data_disks = raid_disks - conf->max_degraded;
2566 
2567 	/* First compute the information on this sector */
2568 
2569 	/*
2570 	 * Compute the chunk number and the sector offset inside the chunk
2571 	 */
2572 	chunk_offset = sector_div(r_sector, sectors_per_chunk);
2573 	chunk_number = r_sector;
2574 
2575 	/*
2576 	 * Compute the stripe number
2577 	 */
2578 	stripe = chunk_number;
2579 	*dd_idx = sector_div(stripe, data_disks);
2580 	stripe2 = stripe;
2581 	/*
2582 	 * Select the parity disk based on the user selected algorithm.
2583 	 */
2584 	pd_idx = qd_idx = -1;
2585 	switch(conf->level) {
2586 	case 4:
2587 		pd_idx = data_disks;
2588 		break;
2589 	case 5:
2590 		switch (algorithm) {
2591 		case ALGORITHM_LEFT_ASYMMETRIC:
2592 			pd_idx = data_disks - sector_div(stripe2, raid_disks);
2593 			if (*dd_idx >= pd_idx)
2594 				(*dd_idx)++;
2595 			break;
2596 		case ALGORITHM_RIGHT_ASYMMETRIC:
2597 			pd_idx = sector_div(stripe2, raid_disks);
2598 			if (*dd_idx >= pd_idx)
2599 				(*dd_idx)++;
2600 			break;
2601 		case ALGORITHM_LEFT_SYMMETRIC:
2602 			pd_idx = data_disks - sector_div(stripe2, raid_disks);
2603 			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2604 			break;
2605 		case ALGORITHM_RIGHT_SYMMETRIC:
2606 			pd_idx = sector_div(stripe2, raid_disks);
2607 			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2608 			break;
2609 		case ALGORITHM_PARITY_0:
2610 			pd_idx = 0;
2611 			(*dd_idx)++;
2612 			break;
2613 		case ALGORITHM_PARITY_N:
2614 			pd_idx = data_disks;
2615 			break;
2616 		default:
2617 			BUG();
2618 		}
2619 		break;
2620 	case 6:
2621 
2622 		switch (algorithm) {
2623 		case ALGORITHM_LEFT_ASYMMETRIC:
2624 			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2625 			qd_idx = pd_idx + 1;
2626 			if (pd_idx == raid_disks-1) {
2627 				(*dd_idx)++;	/* Q D D D P */
2628 				qd_idx = 0;
2629 			} else if (*dd_idx >= pd_idx)
2630 				(*dd_idx) += 2; /* D D P Q D */
2631 			break;
2632 		case ALGORITHM_RIGHT_ASYMMETRIC:
2633 			pd_idx = sector_div(stripe2, raid_disks);
2634 			qd_idx = pd_idx + 1;
2635 			if (pd_idx == raid_disks-1) {
2636 				(*dd_idx)++;	/* Q D D D P */
2637 				qd_idx = 0;
2638 			} else if (*dd_idx >= pd_idx)
2639 				(*dd_idx) += 2; /* D D P Q D */
2640 			break;
2641 		case ALGORITHM_LEFT_SYMMETRIC:
2642 			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2643 			qd_idx = (pd_idx + 1) % raid_disks;
2644 			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2645 			break;
2646 		case ALGORITHM_RIGHT_SYMMETRIC:
2647 			pd_idx = sector_div(stripe2, raid_disks);
2648 			qd_idx = (pd_idx + 1) % raid_disks;
2649 			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2650 			break;
2651 
2652 		case ALGORITHM_PARITY_0:
2653 			pd_idx = 0;
2654 			qd_idx = 1;
2655 			(*dd_idx) += 2;
2656 			break;
2657 		case ALGORITHM_PARITY_N:
2658 			pd_idx = data_disks;
2659 			qd_idx = data_disks + 1;
2660 			break;
2661 
2662 		case ALGORITHM_ROTATING_ZERO_RESTART:
2663 			/* Exactly the same as RIGHT_ASYMMETRIC, but or
2664 			 * of blocks for computing Q is different.
2665 			 */
2666 			pd_idx = sector_div(stripe2, raid_disks);
2667 			qd_idx = pd_idx + 1;
2668 			if (pd_idx == raid_disks-1) {
2669 				(*dd_idx)++;	/* Q D D D P */
2670 				qd_idx = 0;
2671 			} else if (*dd_idx >= pd_idx)
2672 				(*dd_idx) += 2; /* D D P Q D */
2673 			ddf_layout = 1;
2674 			break;
2675 
2676 		case ALGORITHM_ROTATING_N_RESTART:
2677 			/* Same a left_asymmetric, by first stripe is
2678 			 * D D D P Q  rather than
2679 			 * Q D D D P
2680 			 */
2681 			stripe2 += 1;
2682 			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2683 			qd_idx = pd_idx + 1;
2684 			if (pd_idx == raid_disks-1) {
2685 				(*dd_idx)++;	/* Q D D D P */
2686 				qd_idx = 0;
2687 			} else if (*dd_idx >= pd_idx)
2688 				(*dd_idx) += 2; /* D D P Q D */
2689 			ddf_layout = 1;
2690 			break;
2691 
2692 		case ALGORITHM_ROTATING_N_CONTINUE:
2693 			/* Same as left_symmetric but Q is before P */
2694 			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2695 			qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
2696 			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2697 			ddf_layout = 1;
2698 			break;
2699 
2700 		case ALGORITHM_LEFT_ASYMMETRIC_6:
2701 			/* RAID5 left_asymmetric, with Q on last device */
2702 			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2703 			if (*dd_idx >= pd_idx)
2704 				(*dd_idx)++;
2705 			qd_idx = raid_disks - 1;
2706 			break;
2707 
2708 		case ALGORITHM_RIGHT_ASYMMETRIC_6:
2709 			pd_idx = sector_div(stripe2, raid_disks-1);
2710 			if (*dd_idx >= pd_idx)
2711 				(*dd_idx)++;
2712 			qd_idx = raid_disks - 1;
2713 			break;
2714 
2715 		case ALGORITHM_LEFT_SYMMETRIC_6:
2716 			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2717 			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2718 			qd_idx = raid_disks - 1;
2719 			break;
2720 
2721 		case ALGORITHM_RIGHT_SYMMETRIC_6:
2722 			pd_idx = sector_div(stripe2, raid_disks-1);
2723 			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2724 			qd_idx = raid_disks - 1;
2725 			break;
2726 
2727 		case ALGORITHM_PARITY_0_6:
2728 			pd_idx = 0;
2729 			(*dd_idx)++;
2730 			qd_idx = raid_disks - 1;
2731 			break;
2732 
2733 		default:
2734 			BUG();
2735 		}
2736 		break;
2737 	}
2738 
2739 	if (sh) {
2740 		sh->pd_idx = pd_idx;
2741 		sh->qd_idx = qd_idx;
2742 		sh->ddf_layout = ddf_layout;
2743 	}
2744 	/*
2745 	 * Finally, compute the new sector number
2746 	 */
2747 	new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
2748 	return new_sector;
2749 }
2750 
2751 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
2752 {
2753 	struct r5conf *conf = sh->raid_conf;
2754 	int raid_disks = sh->disks;
2755 	int data_disks = raid_disks - conf->max_degraded;
2756 	sector_t new_sector = sh->sector, check;
2757 	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2758 					 : conf->chunk_sectors;
2759 	int algorithm = previous ? conf->prev_algo
2760 				 : conf->algorithm;
2761 	sector_t stripe;
2762 	int chunk_offset;
2763 	sector_t chunk_number;
2764 	int dummy1, dd_idx = i;
2765 	sector_t r_sector;
2766 	struct stripe_head sh2;
2767 
2768 	chunk_offset = sector_div(new_sector, sectors_per_chunk);
2769 	stripe = new_sector;
2770 
2771 	if (i == sh->pd_idx)
2772 		return 0;
2773 	switch(conf->level) {
2774 	case 4: break;
2775 	case 5:
2776 		switch (algorithm) {
2777 		case ALGORITHM_LEFT_ASYMMETRIC:
2778 		case ALGORITHM_RIGHT_ASYMMETRIC:
2779 			if (i > sh->pd_idx)
2780 				i--;
2781 			break;
2782 		case ALGORITHM_LEFT_SYMMETRIC:
2783 		case ALGORITHM_RIGHT_SYMMETRIC:
2784 			if (i < sh->pd_idx)
2785 				i += raid_disks;
2786 			i -= (sh->pd_idx + 1);
2787 			break;
2788 		case ALGORITHM_PARITY_0:
2789 			i -= 1;
2790 			break;
2791 		case ALGORITHM_PARITY_N:
2792 			break;
2793 		default:
2794 			BUG();
2795 		}
2796 		break;
2797 	case 6:
2798 		if (i == sh->qd_idx)
2799 			return 0; /* It is the Q disk */
2800 		switch (algorithm) {
2801 		case ALGORITHM_LEFT_ASYMMETRIC:
2802 		case ALGORITHM_RIGHT_ASYMMETRIC:
2803 		case ALGORITHM_ROTATING_ZERO_RESTART:
2804 		case ALGORITHM_ROTATING_N_RESTART:
2805 			if (sh->pd_idx == raid_disks-1)
2806 				i--;	/* Q D D D P */
2807 			else if (i > sh->pd_idx)
2808 				i -= 2; /* D D P Q D */
2809 			break;
2810 		case ALGORITHM_LEFT_SYMMETRIC:
2811 		case ALGORITHM_RIGHT_SYMMETRIC:
2812 			if (sh->pd_idx == raid_disks-1)
2813 				i--; /* Q D D D P */
2814 			else {
2815 				/* D D P Q D */
2816 				if (i < sh->pd_idx)
2817 					i += raid_disks;
2818 				i -= (sh->pd_idx + 2);
2819 			}
2820 			break;
2821 		case ALGORITHM_PARITY_0:
2822 			i -= 2;
2823 			break;
2824 		case ALGORITHM_PARITY_N:
2825 			break;
2826 		case ALGORITHM_ROTATING_N_CONTINUE:
2827 			/* Like left_symmetric, but P is before Q */
2828 			if (sh->pd_idx == 0)
2829 				i--;	/* P D D D Q */
2830 			else {
2831 				/* D D Q P D */
2832 				if (i < sh->pd_idx)
2833 					i += raid_disks;
2834 				i -= (sh->pd_idx + 1);
2835 			}
2836 			break;
2837 		case ALGORITHM_LEFT_ASYMMETRIC_6:
2838 		case ALGORITHM_RIGHT_ASYMMETRIC_6:
2839 			if (i > sh->pd_idx)
2840 				i--;
2841 			break;
2842 		case ALGORITHM_LEFT_SYMMETRIC_6:
2843 		case ALGORITHM_RIGHT_SYMMETRIC_6:
2844 			if (i < sh->pd_idx)
2845 				i += data_disks + 1;
2846 			i -= (sh->pd_idx + 1);
2847 			break;
2848 		case ALGORITHM_PARITY_0_6:
2849 			i -= 1;
2850 			break;
2851 		default:
2852 			BUG();
2853 		}
2854 		break;
2855 	}
2856 
2857 	chunk_number = stripe * data_disks + i;
2858 	r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2859 
2860 	check = raid5_compute_sector(conf, r_sector,
2861 				     previous, &dummy1, &sh2);
2862 	if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2863 		|| sh2.qd_idx != sh->qd_idx) {
2864 		printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2865 		       mdname(conf->mddev));
2866 		return 0;
2867 	}
2868 	return r_sector;
2869 }
2870 
2871 static void
2872 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2873 			 int rcw, int expand)
2874 {
2875 	int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks;
2876 	struct r5conf *conf = sh->raid_conf;
2877 	int level = conf->level;
2878 
2879 	if (rcw) {
2880 
2881 		for (i = disks; i--; ) {
2882 			struct r5dev *dev = &sh->dev[i];
2883 
2884 			if (dev->towrite) {
2885 				set_bit(R5_LOCKED, &dev->flags);
2886 				set_bit(R5_Wantdrain, &dev->flags);
2887 				if (!expand)
2888 					clear_bit(R5_UPTODATE, &dev->flags);
2889 				s->locked++;
2890 			}
2891 		}
2892 		/* if we are not expanding this is a proper write request, and
2893 		 * there will be bios with new data to be drained into the
2894 		 * stripe cache
2895 		 */
2896 		if (!expand) {
2897 			if (!s->locked)
2898 				/* False alarm, nothing to do */
2899 				return;
2900 			sh->reconstruct_state = reconstruct_state_drain_run;
2901 			set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2902 		} else
2903 			sh->reconstruct_state = reconstruct_state_run;
2904 
2905 		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2906 
2907 		if (s->locked + conf->max_degraded == disks)
2908 			if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2909 				atomic_inc(&conf->pending_full_writes);
2910 	} else {
2911 		BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2912 			test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2913 		BUG_ON(level == 6 &&
2914 			(!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) ||
2915 			   test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags))));
2916 
2917 		for (i = disks; i--; ) {
2918 			struct r5dev *dev = &sh->dev[i];
2919 			if (i == pd_idx || i == qd_idx)
2920 				continue;
2921 
2922 			if (dev->towrite &&
2923 			    (test_bit(R5_UPTODATE, &dev->flags) ||
2924 			     test_bit(R5_Wantcompute, &dev->flags))) {
2925 				set_bit(R5_Wantdrain, &dev->flags);
2926 				set_bit(R5_LOCKED, &dev->flags);
2927 				clear_bit(R5_UPTODATE, &dev->flags);
2928 				s->locked++;
2929 			}
2930 		}
2931 		if (!s->locked)
2932 			/* False alarm - nothing to do */
2933 			return;
2934 		sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2935 		set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2936 		set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2937 		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2938 	}
2939 
2940 	/* keep the parity disk(s) locked while asynchronous operations
2941 	 * are in flight
2942 	 */
2943 	set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2944 	clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2945 	s->locked++;
2946 
2947 	if (level == 6) {
2948 		int qd_idx = sh->qd_idx;
2949 		struct r5dev *dev = &sh->dev[qd_idx];
2950 
2951 		set_bit(R5_LOCKED, &dev->flags);
2952 		clear_bit(R5_UPTODATE, &dev->flags);
2953 		s->locked++;
2954 	}
2955 
2956 	pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2957 		__func__, (unsigned long long)sh->sector,
2958 		s->locked, s->ops_request);
2959 }
2960 
2961 /*
2962  * Each stripe/dev can have one or more bion attached.
2963  * toread/towrite point to the first in a chain.
2964  * The bi_next chain must be in order.
2965  */
2966 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
2967 			  int forwrite, int previous)
2968 {
2969 	struct bio **bip;
2970 	struct r5conf *conf = sh->raid_conf;
2971 	int firstwrite=0;
2972 
2973 	pr_debug("adding bi b#%llu to stripe s#%llu\n",
2974 		(unsigned long long)bi->bi_iter.bi_sector,
2975 		(unsigned long long)sh->sector);
2976 
2977 	/*
2978 	 * If several bio share a stripe. The bio bi_phys_segments acts as a
2979 	 * reference count to avoid race. The reference count should already be
2980 	 * increased before this function is called (for example, in
2981 	 * raid5_make_request()), so other bio sharing this stripe will not free the
2982 	 * stripe. If a stripe is owned by one stripe, the stripe lock will
2983 	 * protect it.
2984 	 */
2985 	spin_lock_irq(&sh->stripe_lock);
2986 	/* Don't allow new IO added to stripes in batch list */
2987 	if (sh->batch_head)
2988 		goto overlap;
2989 	if (forwrite) {
2990 		bip = &sh->dev[dd_idx].towrite;
2991 		if (*bip == NULL)
2992 			firstwrite = 1;
2993 	} else
2994 		bip = &sh->dev[dd_idx].toread;
2995 	while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2996 		if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2997 			goto overlap;
2998 		bip = & (*bip)->bi_next;
2999 	}
3000 	if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
3001 		goto overlap;
3002 
3003 	if (!forwrite || previous)
3004 		clear_bit(STRIPE_BATCH_READY, &sh->state);
3005 
3006 	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
3007 	if (*bip)
3008 		bi->bi_next = *bip;
3009 	*bip = bi;
3010 	raid5_inc_bi_active_stripes(bi);
3011 
3012 	if (forwrite) {
3013 		/* check if page is covered */
3014 		sector_t sector = sh->dev[dd_idx].sector;
3015 		for (bi=sh->dev[dd_idx].towrite;
3016 		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
3017 			     bi && bi->bi_iter.bi_sector <= sector;
3018 		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
3019 			if (bio_end_sector(bi) >= sector)
3020 				sector = bio_end_sector(bi);
3021 		}
3022 		if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
3023 			if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
3024 				sh->overwrite_disks++;
3025 	}
3026 
3027 	pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
3028 		(unsigned long long)(*bip)->bi_iter.bi_sector,
3029 		(unsigned long long)sh->sector, dd_idx);
3030 
3031 	if (conf->mddev->bitmap && firstwrite) {
3032 		/* Cannot hold spinlock over bitmap_startwrite,
3033 		 * but must ensure this isn't added to a batch until
3034 		 * we have added to the bitmap and set bm_seq.
3035 		 * So set STRIPE_BITMAP_PENDING to prevent
3036 		 * batching.
3037 		 * If multiple add_stripe_bio() calls race here they
3038 		 * much all set STRIPE_BITMAP_PENDING.  So only the first one
3039 		 * to complete "bitmap_startwrite" gets to set
3040 		 * STRIPE_BIT_DELAY.  This is important as once a stripe
3041 		 * is added to a batch, STRIPE_BIT_DELAY cannot be changed
3042 		 * any more.
3043 		 */
3044 		set_bit(STRIPE_BITMAP_PENDING, &sh->state);
3045 		spin_unlock_irq(&sh->stripe_lock);
3046 		bitmap_startwrite(conf->mddev->bitmap, sh->sector,
3047 				  STRIPE_SECTORS, 0);
3048 		spin_lock_irq(&sh->stripe_lock);
3049 		clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
3050 		if (!sh->batch_head) {
3051 			sh->bm_seq = conf->seq_flush+1;
3052 			set_bit(STRIPE_BIT_DELAY, &sh->state);
3053 		}
3054 	}
3055 	spin_unlock_irq(&sh->stripe_lock);
3056 
3057 	if (stripe_can_batch(sh))
3058 		stripe_add_to_batch_list(conf, sh);
3059 	return 1;
3060 
3061  overlap:
3062 	set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
3063 	spin_unlock_irq(&sh->stripe_lock);
3064 	return 0;
3065 }
3066 
3067 static void end_reshape(struct r5conf *conf);
3068 
3069 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
3070 			    struct stripe_head *sh)
3071 {
3072 	int sectors_per_chunk =
3073 		previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
3074 	int dd_idx;
3075 	int chunk_offset = sector_div(stripe, sectors_per_chunk);
3076 	int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
3077 
3078 	raid5_compute_sector(conf,
3079 			     stripe * (disks - conf->max_degraded)
3080 			     *sectors_per_chunk + chunk_offset,
3081 			     previous,
3082 			     &dd_idx, sh);
3083 }
3084 
3085 static void
3086 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3087 				struct stripe_head_state *s, int disks,
3088 				struct bio_list *return_bi)
3089 {
3090 	int i;
3091 	BUG_ON(sh->batch_head);
3092 	for (i = disks; i--; ) {
3093 		struct bio *bi;
3094 		int bitmap_end = 0;
3095 
3096 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
3097 			struct md_rdev *rdev;
3098 			rcu_read_lock();
3099 			rdev = rcu_dereference(conf->disks[i].rdev);
3100 			if (rdev && test_bit(In_sync, &rdev->flags) &&
3101 			    !test_bit(Faulty, &rdev->flags))
3102 				atomic_inc(&rdev->nr_pending);
3103 			else
3104 				rdev = NULL;
3105 			rcu_read_unlock();
3106 			if (rdev) {
3107 				if (!rdev_set_badblocks(
3108 					    rdev,
3109 					    sh->sector,
3110 					    STRIPE_SECTORS, 0))
3111 					md_error(conf->mddev, rdev);
3112 				rdev_dec_pending(rdev, conf->mddev);
3113 			}
3114 		}
3115 		spin_lock_irq(&sh->stripe_lock);
3116 		/* fail all writes first */
3117 		bi = sh->dev[i].towrite;
3118 		sh->dev[i].towrite = NULL;
3119 		sh->overwrite_disks = 0;
3120 		spin_unlock_irq(&sh->stripe_lock);
3121 		if (bi)
3122 			bitmap_end = 1;
3123 
3124 		r5l_stripe_write_finished(sh);
3125 
3126 		if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3127 			wake_up(&conf->wait_for_overlap);
3128 
3129 		while (bi && bi->bi_iter.bi_sector <
3130 			sh->dev[i].sector + STRIPE_SECTORS) {
3131 			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
3132 
3133 			bi->bi_error = -EIO;
3134 			if (!raid5_dec_bi_active_stripes(bi)) {
3135 				md_write_end(conf->mddev);
3136 				bio_list_add(return_bi, bi);
3137 			}
3138 			bi = nextbi;
3139 		}
3140 		if (bitmap_end)
3141 			bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3142 				STRIPE_SECTORS, 0, 0);
3143 		bitmap_end = 0;
3144 		/* and fail all 'written' */
3145 		bi = sh->dev[i].written;
3146 		sh->dev[i].written = NULL;
3147 		if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
3148 			WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
3149 			sh->dev[i].page = sh->dev[i].orig_page;
3150 		}
3151 
3152 		if (bi) bitmap_end = 1;
3153 		while (bi && bi->bi_iter.bi_sector <
3154 		       sh->dev[i].sector + STRIPE_SECTORS) {
3155 			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
3156 
3157 			bi->bi_error = -EIO;
3158 			if (!raid5_dec_bi_active_stripes(bi)) {
3159 				md_write_end(conf->mddev);
3160 				bio_list_add(return_bi, bi);
3161 			}
3162 			bi = bi2;
3163 		}
3164 
3165 		/* fail any reads if this device is non-operational and
3166 		 * the data has not reached the cache yet.
3167 		 */
3168 		if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
3169 		    s->failed > conf->max_degraded &&
3170 		    (!test_bit(R5_Insync, &sh->dev[i].flags) ||
3171 		      test_bit(R5_ReadError, &sh->dev[i].flags))) {
3172 			spin_lock_irq(&sh->stripe_lock);
3173 			bi = sh->dev[i].toread;
3174 			sh->dev[i].toread = NULL;
3175 			spin_unlock_irq(&sh->stripe_lock);
3176 			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3177 				wake_up(&conf->wait_for_overlap);
3178 			if (bi)
3179 				s->to_read--;
3180 			while (bi && bi->bi_iter.bi_sector <
3181 			       sh->dev[i].sector + STRIPE_SECTORS) {
3182 				struct bio *nextbi =
3183 					r5_next_bio(bi, sh->dev[i].sector);
3184 
3185 				bi->bi_error = -EIO;
3186 				if (!raid5_dec_bi_active_stripes(bi))
3187 					bio_list_add(return_bi, bi);
3188 				bi = nextbi;
3189 			}
3190 		}
3191 		if (bitmap_end)
3192 			bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3193 					STRIPE_SECTORS, 0, 0);
3194 		/* If we were in the middle of a write the parity block might
3195 		 * still be locked - so just clear all R5_LOCKED flags
3196 		 */
3197 		clear_bit(R5_LOCKED, &sh->dev[i].flags);
3198 	}
3199 	s->to_write = 0;
3200 	s->written = 0;
3201 
3202 	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3203 		if (atomic_dec_and_test(&conf->pending_full_writes))
3204 			md_wakeup_thread(conf->mddev->thread);
3205 }
3206 
3207 static void
3208 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
3209 		   struct stripe_head_state *s)
3210 {
3211 	int abort = 0;
3212 	int i;
3213 
3214 	BUG_ON(sh->batch_head);
3215 	clear_bit(STRIPE_SYNCING, &sh->state);
3216 	if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
3217 		wake_up(&conf->wait_for_overlap);
3218 	s->syncing = 0;
3219 	s->replacing = 0;
3220 	/* There is nothing more to do for sync/check/repair.
3221 	 * Don't even need to abort as that is handled elsewhere
3222 	 * if needed, and not always wanted e.g. if there is a known
3223 	 * bad block here.
3224 	 * For recover/replace we need to record a bad block on all
3225 	 * non-sync devices, or abort the recovery
3226 	 */
3227 	if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
3228 		/* During recovery devices cannot be removed, so
3229 		 * locking and refcounting of rdevs is not needed
3230 		 */
3231 		rcu_read_lock();
3232 		for (i = 0; i < conf->raid_disks; i++) {
3233 			struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
3234 			if (rdev
3235 			    && !test_bit(Faulty, &rdev->flags)
3236 			    && !test_bit(In_sync, &rdev->flags)
3237 			    && !rdev_set_badblocks(rdev, sh->sector,
3238 						   STRIPE_SECTORS, 0))
3239 				abort = 1;
3240 			rdev = rcu_dereference(conf->disks[i].replacement);
3241 			if (rdev
3242 			    && !test_bit(Faulty, &rdev->flags)
3243 			    && !test_bit(In_sync, &rdev->flags)
3244 			    && !rdev_set_badblocks(rdev, sh->sector,
3245 						   STRIPE_SECTORS, 0))
3246 				abort = 1;
3247 		}
3248 		rcu_read_unlock();
3249 		if (abort)
3250 			conf->recovery_disabled =
3251 				conf->mddev->recovery_disabled;
3252 	}
3253 	md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
3254 }
3255 
3256 static int want_replace(struct stripe_head *sh, int disk_idx)
3257 {
3258 	struct md_rdev *rdev;
3259 	int rv = 0;
3260 
3261 	rcu_read_lock();
3262 	rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement);
3263 	if (rdev
3264 	    && !test_bit(Faulty, &rdev->flags)
3265 	    && !test_bit(In_sync, &rdev->flags)
3266 	    && (rdev->recovery_offset <= sh->sector
3267 		|| rdev->mddev->recovery_cp <= sh->sector))
3268 		rv = 1;
3269 	rcu_read_unlock();
3270 	return rv;
3271 }
3272 
3273 /* fetch_block - checks the given member device to see if its data needs
3274  * to be read or computed to satisfy a request.
3275  *
3276  * Returns 1 when no more member devices need to be checked, otherwise returns
3277  * 0 to tell the loop in handle_stripe_fill to continue
3278  */
3279 
3280 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3281 			   int disk_idx, int disks)
3282 {
3283 	struct r5dev *dev = &sh->dev[disk_idx];
3284 	struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
3285 				  &sh->dev[s->failed_num[1]] };
3286 	int i;
3287 
3288 
3289 	if (test_bit(R5_LOCKED, &dev->flags) ||
3290 	    test_bit(R5_UPTODATE, &dev->flags))
3291 		/* No point reading this as we already have it or have
3292 		 * decided to get it.
3293 		 */
3294 		return 0;
3295 
3296 	if (dev->toread ||
3297 	    (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)))
3298 		/* We need this block to directly satisfy a request */
3299 		return 1;
3300 
3301 	if (s->syncing || s->expanding ||
3302 	    (s->replacing && want_replace(sh, disk_idx)))
3303 		/* When syncing, or expanding we read everything.
3304 		 * When replacing, we need the replaced block.
3305 		 */
3306 		return 1;
3307 
3308 	if ((s->failed >= 1 && fdev[0]->toread) ||
3309 	    (s->failed >= 2 && fdev[1]->toread))
3310 		/* If we want to read from a failed device, then
3311 		 * we need to actually read every other device.
3312 		 */
3313 		return 1;
3314 
3315 	/* Sometimes neither read-modify-write nor reconstruct-write
3316 	 * cycles can work.  In those cases we read every block we
3317 	 * can.  Then the parity-update is certain to have enough to
3318 	 * work with.
3319 	 * This can only be a problem when we need to write something,
3320 	 * and some device has failed.  If either of those tests
3321 	 * fail we need look no further.
3322 	 */
3323 	if (!s->failed || !s->to_write)
3324 		return 0;
3325 
3326 	if (test_bit(R5_Insync, &dev->flags) &&
3327 	    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3328 		/* Pre-reads at not permitted until after short delay
3329 		 * to gather multiple requests.  However if this
3330 		 * device is no Insync, the block could only be be computed
3331 		 * and there is no need to delay that.
3332 		 */
3333 		return 0;
3334 
3335 	for (i = 0; i < s->failed && i < 2; i++) {
3336 		if (fdev[i]->towrite &&
3337 		    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3338 		    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3339 			/* If we have a partial write to a failed
3340 			 * device, then we will need to reconstruct
3341 			 * the content of that device, so all other
3342 			 * devices must be read.
3343 			 */
3344 			return 1;
3345 	}
3346 
3347 	/* If we are forced to do a reconstruct-write, either because
3348 	 * the current RAID6 implementation only supports that, or
3349 	 * or because parity cannot be trusted and we are currently
3350 	 * recovering it, there is extra need to be careful.
3351 	 * If one of the devices that we would need to read, because
3352 	 * it is not being overwritten (and maybe not written at all)
3353 	 * is missing/faulty, then we need to read everything we can.
3354 	 */
3355 	if (sh->raid_conf->level != 6 &&
3356 	    sh->sector < sh->raid_conf->mddev->recovery_cp)
3357 		/* reconstruct-write isn't being forced */
3358 		return 0;
3359 	for (i = 0; i < s->failed && i < 2; i++) {
3360 		if (s->failed_num[i] != sh->pd_idx &&
3361 		    s->failed_num[i] != sh->qd_idx &&
3362 		    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3363 		    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3364 			return 1;
3365 	}
3366 
3367 	return 0;
3368 }
3369 
3370 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
3371 		       int disk_idx, int disks)
3372 {
3373 	struct r5dev *dev = &sh->dev[disk_idx];
3374 
3375 	/* is the data in this block needed, and can we get it? */
3376 	if (need_this_block(sh, s, disk_idx, disks)) {
3377 		/* we would like to get this block, possibly by computing it,
3378 		 * otherwise read it if the backing disk is insync
3379 		 */
3380 		BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
3381 		BUG_ON(test_bit(R5_Wantread, &dev->flags));
3382 		BUG_ON(sh->batch_head);
3383 		if ((s->uptodate == disks - 1) &&
3384 		    (s->failed && (disk_idx == s->failed_num[0] ||
3385 				   disk_idx == s->failed_num[1]))) {
3386 			/* have disk failed, and we're requested to fetch it;
3387 			 * do compute it
3388 			 */
3389 			pr_debug("Computing stripe %llu block %d\n",
3390 			       (unsigned long long)sh->sector, disk_idx);
3391 			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3392 			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3393 			set_bit(R5_Wantcompute, &dev->flags);
3394 			sh->ops.target = disk_idx;
3395 			sh->ops.target2 = -1; /* no 2nd target */
3396 			s->req_compute = 1;
3397 			/* Careful: from this point on 'uptodate' is in the eye
3398 			 * of raid_run_ops which services 'compute' operations
3399 			 * before writes. R5_Wantcompute flags a block that will
3400 			 * be R5_UPTODATE by the time it is needed for a
3401 			 * subsequent operation.
3402 			 */
3403 			s->uptodate++;
3404 			return 1;
3405 		} else if (s->uptodate == disks-2 && s->failed >= 2) {
3406 			/* Computing 2-failure is *very* expensive; only
3407 			 * do it if failed >= 2
3408 			 */
3409 			int other;
3410 			for (other = disks; other--; ) {
3411 				if (other == disk_idx)
3412 					continue;
3413 				if (!test_bit(R5_UPTODATE,
3414 				      &sh->dev[other].flags))
3415 					break;
3416 			}
3417 			BUG_ON(other < 0);
3418 			pr_debug("Computing stripe %llu blocks %d,%d\n",
3419 			       (unsigned long long)sh->sector,
3420 			       disk_idx, other);
3421 			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3422 			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3423 			set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
3424 			set_bit(R5_Wantcompute, &sh->dev[other].flags);
3425 			sh->ops.target = disk_idx;
3426 			sh->ops.target2 = other;
3427 			s->uptodate += 2;
3428 			s->req_compute = 1;
3429 			return 1;
3430 		} else if (test_bit(R5_Insync, &dev->flags)) {
3431 			set_bit(R5_LOCKED, &dev->flags);
3432 			set_bit(R5_Wantread, &dev->flags);
3433 			s->locked++;
3434 			pr_debug("Reading block %d (sync=%d)\n",
3435 				disk_idx, s->syncing);
3436 		}
3437 	}
3438 
3439 	return 0;
3440 }
3441 
3442 /**
3443  * handle_stripe_fill - read or compute data to satisfy pending requests.
3444  */
3445 static void handle_stripe_fill(struct stripe_head *sh,
3446 			       struct stripe_head_state *s,
3447 			       int disks)
3448 {
3449 	int i;
3450 
3451 	/* look for blocks to read/compute, skip this if a compute
3452 	 * is already in flight, or if the stripe contents are in the
3453 	 * midst of changing due to a write
3454 	 */
3455 	if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
3456 	    !sh->reconstruct_state)
3457 		for (i = disks; i--; )
3458 			if (fetch_block(sh, s, i, disks))
3459 				break;
3460 	set_bit(STRIPE_HANDLE, &sh->state);
3461 }
3462 
3463 static void break_stripe_batch_list(struct stripe_head *head_sh,
3464 				    unsigned long handle_flags);
3465 /* handle_stripe_clean_event
3466  * any written block on an uptodate or failed drive can be returned.
3467  * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
3468  * never LOCKED, so we don't need to test 'failed' directly.
3469  */
3470 static void handle_stripe_clean_event(struct r5conf *conf,
3471 	struct stripe_head *sh, int disks, struct bio_list *return_bi)
3472 {
3473 	int i;
3474 	struct r5dev *dev;
3475 	int discard_pending = 0;
3476 	struct stripe_head *head_sh = sh;
3477 	bool do_endio = false;
3478 
3479 	for (i = disks; i--; )
3480 		if (sh->dev[i].written) {
3481 			dev = &sh->dev[i];
3482 			if (!test_bit(R5_LOCKED, &dev->flags) &&
3483 			    (test_bit(R5_UPTODATE, &dev->flags) ||
3484 			     test_bit(R5_Discard, &dev->flags) ||
3485 			     test_bit(R5_SkipCopy, &dev->flags))) {
3486 				/* We can return any write requests */
3487 				struct bio *wbi, *wbi2;
3488 				pr_debug("Return write for disc %d\n", i);
3489 				if (test_and_clear_bit(R5_Discard, &dev->flags))
3490 					clear_bit(R5_UPTODATE, &dev->flags);
3491 				if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
3492 					WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
3493 				}
3494 				do_endio = true;
3495 
3496 returnbi:
3497 				dev->page = dev->orig_page;
3498 				wbi = dev->written;
3499 				dev->written = NULL;
3500 				while (wbi && wbi->bi_iter.bi_sector <
3501 					dev->sector + STRIPE_SECTORS) {
3502 					wbi2 = r5_next_bio(wbi, dev->sector);
3503 					if (!raid5_dec_bi_active_stripes(wbi)) {
3504 						md_write_end(conf->mddev);
3505 						bio_list_add(return_bi, wbi);
3506 					}
3507 					wbi = wbi2;
3508 				}
3509 				bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3510 						STRIPE_SECTORS,
3511 					 !test_bit(STRIPE_DEGRADED, &sh->state),
3512 						0);
3513 				if (head_sh->batch_head) {
3514 					sh = list_first_entry(&sh->batch_list,
3515 							      struct stripe_head,
3516 							      batch_list);
3517 					if (sh != head_sh) {
3518 						dev = &sh->dev[i];
3519 						goto returnbi;
3520 					}
3521 				}
3522 				sh = head_sh;
3523 				dev = &sh->dev[i];
3524 			} else if (test_bit(R5_Discard, &dev->flags))
3525 				discard_pending = 1;
3526 		}
3527 
3528 	r5l_stripe_write_finished(sh);
3529 
3530 	if (!discard_pending &&
3531 	    test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
3532 		int hash;
3533 		clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
3534 		clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
3535 		if (sh->qd_idx >= 0) {
3536 			clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
3537 			clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
3538 		}
3539 		/* now that discard is done we can proceed with any sync */
3540 		clear_bit(STRIPE_DISCARD, &sh->state);
3541 		/*
3542 		 * SCSI discard will change some bio fields and the stripe has
3543 		 * no updated data, so remove it from hash list and the stripe
3544 		 * will be reinitialized
3545 		 */
3546 unhash:
3547 		hash = sh->hash_lock_index;
3548 		spin_lock_irq(conf->hash_locks + hash);
3549 		remove_hash(sh);
3550 		spin_unlock_irq(conf->hash_locks + hash);
3551 		if (head_sh->batch_head) {
3552 			sh = list_first_entry(&sh->batch_list,
3553 					      struct stripe_head, batch_list);
3554 			if (sh != head_sh)
3555 					goto unhash;
3556 		}
3557 		sh = head_sh;
3558 
3559 		if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
3560 			set_bit(STRIPE_HANDLE, &sh->state);
3561 
3562 	}
3563 
3564 	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3565 		if (atomic_dec_and_test(&conf->pending_full_writes))
3566 			md_wakeup_thread(conf->mddev->thread);
3567 
3568 	if (head_sh->batch_head && do_endio)
3569 		break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
3570 }
3571 
3572 static void handle_stripe_dirtying(struct r5conf *conf,
3573 				   struct stripe_head *sh,
3574 				   struct stripe_head_state *s,
3575 				   int disks)
3576 {
3577 	int rmw = 0, rcw = 0, i;
3578 	sector_t recovery_cp = conf->mddev->recovery_cp;
3579 
3580 	/* Check whether resync is now happening or should start.
3581 	 * If yes, then the array is dirty (after unclean shutdown or
3582 	 * initial creation), so parity in some stripes might be inconsistent.
3583 	 * In this case, we need to always do reconstruct-write, to ensure
3584 	 * that in case of drive failure or read-error correction, we
3585 	 * generate correct data from the parity.
3586 	 */
3587 	if (conf->rmw_level == PARITY_DISABLE_RMW ||
3588 	    (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
3589 	     s->failed == 0)) {
3590 		/* Calculate the real rcw later - for now make it
3591 		 * look like rcw is cheaper
3592 		 */
3593 		rcw = 1; rmw = 2;
3594 		pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
3595 			 conf->rmw_level, (unsigned long long)recovery_cp,
3596 			 (unsigned long long)sh->sector);
3597 	} else for (i = disks; i--; ) {
3598 		/* would I have to read this buffer for read_modify_write */
3599 		struct r5dev *dev = &sh->dev[i];
3600 		if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
3601 		    !test_bit(R5_LOCKED, &dev->flags) &&
3602 		    !(test_bit(R5_UPTODATE, &dev->flags) ||
3603 		      test_bit(R5_Wantcompute, &dev->flags))) {
3604 			if (test_bit(R5_Insync, &dev->flags))
3605 				rmw++;
3606 			else
3607 				rmw += 2*disks;  /* cannot read it */
3608 		}
3609 		/* Would I have to read this buffer for reconstruct_write */
3610 		if (!test_bit(R5_OVERWRITE, &dev->flags) &&
3611 		    i != sh->pd_idx && i != sh->qd_idx &&
3612 		    !test_bit(R5_LOCKED, &dev->flags) &&
3613 		    !(test_bit(R5_UPTODATE, &dev->flags) ||
3614 		    test_bit(R5_Wantcompute, &dev->flags))) {
3615 			if (test_bit(R5_Insync, &dev->flags))
3616 				rcw++;
3617 			else
3618 				rcw += 2*disks;
3619 		}
3620 	}
3621 	pr_debug("for sector %llu, rmw=%d rcw=%d\n",
3622 		(unsigned long long)sh->sector, rmw, rcw);
3623 	set_bit(STRIPE_HANDLE, &sh->state);
3624 	if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
3625 		/* prefer read-modify-write, but need to get some data */
3626 		if (conf->mddev->queue)
3627 			blk_add_trace_msg(conf->mddev->queue,
3628 					  "raid5 rmw %llu %d",
3629 					  (unsigned long long)sh->sector, rmw);
3630 		for (i = disks; i--; ) {
3631 			struct r5dev *dev = &sh->dev[i];
3632 			if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
3633 			    !test_bit(R5_LOCKED, &dev->flags) &&
3634 			    !(test_bit(R5_UPTODATE, &dev->flags) ||
3635 			    test_bit(R5_Wantcompute, &dev->flags)) &&
3636 			    test_bit(R5_Insync, &dev->flags)) {
3637 				if (test_bit(STRIPE_PREREAD_ACTIVE,
3638 					     &sh->state)) {
3639 					pr_debug("Read_old block %d for r-m-w\n",
3640 						 i);
3641 					set_bit(R5_LOCKED, &dev->flags);
3642 					set_bit(R5_Wantread, &dev->flags);
3643 					s->locked++;
3644 				} else {
3645 					set_bit(STRIPE_DELAYED, &sh->state);
3646 					set_bit(STRIPE_HANDLE, &sh->state);
3647 				}
3648 			}
3649 		}
3650 	}
3651 	if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) {
3652 		/* want reconstruct write, but need to get some data */
3653 		int qread =0;
3654 		rcw = 0;
3655 		for (i = disks; i--; ) {
3656 			struct r5dev *dev = &sh->dev[i];
3657 			if (!test_bit(R5_OVERWRITE, &dev->flags) &&
3658 			    i != sh->pd_idx && i != sh->qd_idx &&
3659 			    !test_bit(R5_LOCKED, &dev->flags) &&
3660 			    !(test_bit(R5_UPTODATE, &dev->flags) ||
3661 			      test_bit(R5_Wantcompute, &dev->flags))) {
3662 				rcw++;
3663 				if (test_bit(R5_Insync, &dev->flags) &&
3664 				    test_bit(STRIPE_PREREAD_ACTIVE,
3665 					     &sh->state)) {
3666 					pr_debug("Read_old block "
3667 						"%d for Reconstruct\n", i);
3668 					set_bit(R5_LOCKED, &dev->flags);
3669 					set_bit(R5_Wantread, &dev->flags);
3670 					s->locked++;
3671 					qread++;
3672 				} else {
3673 					set_bit(STRIPE_DELAYED, &sh->state);
3674 					set_bit(STRIPE_HANDLE, &sh->state);
3675 				}
3676 			}
3677 		}
3678 		if (rcw && conf->mddev->queue)
3679 			blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
3680 					  (unsigned long long)sh->sector,
3681 					  rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
3682 	}
3683 
3684 	if (rcw > disks && rmw > disks &&
3685 	    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3686 		set_bit(STRIPE_DELAYED, &sh->state);
3687 
3688 	/* now if nothing is locked, and if we have enough data,
3689 	 * we can start a write request
3690 	 */
3691 	/* since handle_stripe can be called at any time we need to handle the
3692 	 * case where a compute block operation has been submitted and then a
3693 	 * subsequent call wants to start a write request.  raid_run_ops only
3694 	 * handles the case where compute block and reconstruct are requested
3695 	 * simultaneously.  If this is not the case then new writes need to be
3696 	 * held off until the compute completes.
3697 	 */
3698 	if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
3699 	    (s->locked == 0 && (rcw == 0 || rmw == 0) &&
3700 	    !test_bit(STRIPE_BIT_DELAY, &sh->state)))
3701 		schedule_reconstruction(sh, s, rcw == 0, 0);
3702 }
3703 
3704 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
3705 				struct stripe_head_state *s, int disks)
3706 {
3707 	struct r5dev *dev = NULL;
3708 
3709 	BUG_ON(sh->batch_head);
3710 	set_bit(STRIPE_HANDLE, &sh->state);
3711 
3712 	switch (sh->check_state) {
3713 	case check_state_idle:
3714 		/* start a new check operation if there are no failures */
3715 		if (s->failed == 0) {
3716 			BUG_ON(s->uptodate != disks);
3717 			sh->check_state = check_state_run;
3718 			set_bit(STRIPE_OP_CHECK, &s->ops_request);
3719 			clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
3720 			s->uptodate--;
3721 			break;
3722 		}
3723 		dev = &sh->dev[s->failed_num[0]];
3724 		/* fall through */
3725 	case check_state_compute_result:
3726 		sh->check_state = check_state_idle;
3727 		if (!dev)
3728 			dev = &sh->dev[sh->pd_idx];
3729 
3730 		/* check that a write has not made the stripe insync */
3731 		if (test_bit(STRIPE_INSYNC, &sh->state))
3732 			break;
3733 
3734 		/* either failed parity check, or recovery is happening */
3735 		BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
3736 		BUG_ON(s->uptodate != disks);
3737 
3738 		set_bit(R5_LOCKED, &dev->flags);
3739 		s->locked++;
3740 		set_bit(R5_Wantwrite, &dev->flags);
3741 
3742 		clear_bit(STRIPE_DEGRADED, &sh->state);
3743 		set_bit(STRIPE_INSYNC, &sh->state);
3744 		break;
3745 	case check_state_run:
3746 		break; /* we will be called again upon completion */
3747 	case check_state_check_result:
3748 		sh->check_state = check_state_idle;
3749 
3750 		/* if a failure occurred during the check operation, leave
3751 		 * STRIPE_INSYNC not set and let the stripe be handled again
3752 		 */
3753 		if (s->failed)
3754 			break;
3755 
3756 		/* handle a successful check operation, if parity is correct
3757 		 * we are done.  Otherwise update the mismatch count and repair
3758 		 * parity if !MD_RECOVERY_CHECK
3759 		 */
3760 		if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
3761 			/* parity is correct (on disc,
3762 			 * not in buffer any more)
3763 			 */
3764 			set_bit(STRIPE_INSYNC, &sh->state);
3765 		else {
3766 			atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
3767 			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3768 				/* don't try to repair!! */
3769 				set_bit(STRIPE_INSYNC, &sh->state);
3770 			else {
3771 				sh->check_state = check_state_compute_run;
3772 				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3773 				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3774 				set_bit(R5_Wantcompute,
3775 					&sh->dev[sh->pd_idx].flags);
3776 				sh->ops.target = sh->pd_idx;
3777 				sh->ops.target2 = -1;
3778 				s->uptodate++;
3779 			}
3780 		}
3781 		break;
3782 	case check_state_compute_run:
3783 		break;
3784 	default:
3785 		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3786 		       __func__, sh->check_state,
3787 		       (unsigned long long) sh->sector);
3788 		BUG();
3789 	}
3790 }
3791 
3792 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
3793 				  struct stripe_head_state *s,
3794 				  int disks)
3795 {
3796 	int pd_idx = sh->pd_idx;
3797 	int qd_idx = sh->qd_idx;
3798 	struct r5dev *dev;
3799 
3800 	BUG_ON(sh->batch_head);
3801 	set_bit(STRIPE_HANDLE, &sh->state);
3802 
3803 	BUG_ON(s->failed > 2);
3804 
3805 	/* Want to check and possibly repair P and Q.
3806 	 * However there could be one 'failed' device, in which
3807 	 * case we can only check one of them, possibly using the
3808 	 * other to generate missing data
3809 	 */
3810 
3811 	switch (sh->check_state) {
3812 	case check_state_idle:
3813 		/* start a new check operation if there are < 2 failures */
3814 		if (s->failed == s->q_failed) {
3815 			/* The only possible failed device holds Q, so it
3816 			 * makes sense to check P (If anything else were failed,
3817 			 * we would have used P to recreate it).
3818 			 */
3819 			sh->check_state = check_state_run;
3820 		}
3821 		if (!s->q_failed && s->failed < 2) {
3822 			/* Q is not failed, and we didn't use it to generate
3823 			 * anything, so it makes sense to check it
3824 			 */
3825 			if (sh->check_state == check_state_run)
3826 				sh->check_state = check_state_run_pq;
3827 			else
3828 				sh->check_state = check_state_run_q;
3829 		}
3830 
3831 		/* discard potentially stale zero_sum_result */
3832 		sh->ops.zero_sum_result = 0;
3833 
3834 		if (sh->check_state == check_state_run) {
3835 			/* async_xor_zero_sum destroys the contents of P */
3836 			clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
3837 			s->uptodate--;
3838 		}
3839 		if (sh->check_state >= check_state_run &&
3840 		    sh->check_state <= check_state_run_pq) {
3841 			/* async_syndrome_zero_sum preserves P and Q, so
3842 			 * no need to mark them !uptodate here
3843 			 */
3844 			set_bit(STRIPE_OP_CHECK, &s->ops_request);
3845 			break;
3846 		}
3847 
3848 		/* we have 2-disk failure */
3849 		BUG_ON(s->failed != 2);
3850 		/* fall through */
3851 	case check_state_compute_result:
3852 		sh->check_state = check_state_idle;
3853 
3854 		/* check that a write has not made the stripe insync */
3855 		if (test_bit(STRIPE_INSYNC, &sh->state))
3856 			break;
3857 
3858 		/* now write out any block on a failed drive,
3859 		 * or P or Q if they were recomputed
3860 		 */
3861 		BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
3862 		if (s->failed == 2) {
3863 			dev = &sh->dev[s->failed_num[1]];
3864 			s->locked++;
3865 			set_bit(R5_LOCKED, &dev->flags);
3866 			set_bit(R5_Wantwrite, &dev->flags);
3867 		}
3868 		if (s->failed >= 1) {
3869 			dev = &sh->dev[s->failed_num[0]];
3870 			s->locked++;
3871 			set_bit(R5_LOCKED, &dev->flags);
3872 			set_bit(R5_Wantwrite, &dev->flags);
3873 		}
3874 		if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3875 			dev = &sh->dev[pd_idx];
3876 			s->locked++;
3877 			set_bit(R5_LOCKED, &dev->flags);
3878 			set_bit(R5_Wantwrite, &dev->flags);
3879 		}
3880 		if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3881 			dev = &sh->dev[qd_idx];
3882 			s->locked++;
3883 			set_bit(R5_LOCKED, &dev->flags);
3884 			set_bit(R5_Wantwrite, &dev->flags);
3885 		}
3886 		clear_bit(STRIPE_DEGRADED, &sh->state);
3887 
3888 		set_bit(STRIPE_INSYNC, &sh->state);
3889 		break;
3890 	case check_state_run:
3891 	case check_state_run_q:
3892 	case check_state_run_pq:
3893 		break; /* we will be called again upon completion */
3894 	case check_state_check_result:
3895 		sh->check_state = check_state_idle;
3896 
3897 		/* handle a successful check operation, if parity is correct
3898 		 * we are done.  Otherwise update the mismatch count and repair
3899 		 * parity if !MD_RECOVERY_CHECK
3900 		 */
3901 		if (sh->ops.zero_sum_result == 0) {
3902 			/* both parities are correct */
3903 			if (!s->failed)
3904 				set_bit(STRIPE_INSYNC, &sh->state);
3905 			else {
3906 				/* in contrast to the raid5 case we can validate
3907 				 * parity, but still have a failure to write
3908 				 * back
3909 				 */
3910 				sh->check_state = check_state_compute_result;
3911 				/* Returning at this point means that we may go
3912 				 * off and bring p and/or q uptodate again so
3913 				 * we make sure to check zero_sum_result again
3914 				 * to verify if p or q need writeback
3915 				 */
3916 			}
3917 		} else {
3918 			atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
3919 			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3920 				/* don't try to repair!! */
3921 				set_bit(STRIPE_INSYNC, &sh->state);
3922 			else {
3923 				int *target = &sh->ops.target;
3924 
3925 				sh->ops.target = -1;
3926 				sh->ops.target2 = -1;
3927 				sh->check_state = check_state_compute_run;
3928 				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3929 				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3930 				if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3931 					set_bit(R5_Wantcompute,
3932 						&sh->dev[pd_idx].flags);
3933 					*target = pd_idx;
3934 					target = &sh->ops.target2;
3935 					s->uptodate++;
3936 				}
3937 				if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3938 					set_bit(R5_Wantcompute,
3939 						&sh->dev[qd_idx].flags);
3940 					*target = qd_idx;
3941 					s->uptodate++;
3942 				}
3943 			}
3944 		}
3945 		break;
3946 	case check_state_compute_run:
3947 		break;
3948 	default:
3949 		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3950 		       __func__, sh->check_state,
3951 		       (unsigned long long) sh->sector);
3952 		BUG();
3953 	}
3954 }
3955 
3956 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3957 {
3958 	int i;
3959 
3960 	/* We have read all the blocks in this stripe and now we need to
3961 	 * copy some of them into a target stripe for expand.
3962 	 */
3963 	struct dma_async_tx_descriptor *tx = NULL;
3964 	BUG_ON(sh->batch_head);
3965 	clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3966 	for (i = 0; i < sh->disks; i++)
3967 		if (i != sh->pd_idx && i != sh->qd_idx) {
3968 			int dd_idx, j;
3969 			struct stripe_head *sh2;
3970 			struct async_submit_ctl submit;
3971 
3972 			sector_t bn = raid5_compute_blocknr(sh, i, 1);
3973 			sector_t s = raid5_compute_sector(conf, bn, 0,
3974 							  &dd_idx, NULL);
3975 			sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1);
3976 			if (sh2 == NULL)
3977 				/* so far only the early blocks of this stripe
3978 				 * have been requested.  When later blocks
3979 				 * get requested, we will try again
3980 				 */
3981 				continue;
3982 			if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
3983 			   test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
3984 				/* must have already done this block */
3985 				raid5_release_stripe(sh2);
3986 				continue;
3987 			}
3988 
3989 			/* place all the copies on one channel */
3990 			init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
3991 			tx = async_memcpy(sh2->dev[dd_idx].page,
3992 					  sh->dev[i].page, 0, 0, STRIPE_SIZE,
3993 					  &submit);
3994 
3995 			set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
3996 			set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
3997 			for (j = 0; j < conf->raid_disks; j++)
3998 				if (j != sh2->pd_idx &&
3999 				    j != sh2->qd_idx &&
4000 				    !test_bit(R5_Expanded, &sh2->dev[j].flags))
4001 					break;
4002 			if (j == conf->raid_disks) {
4003 				set_bit(STRIPE_EXPAND_READY, &sh2->state);
4004 				set_bit(STRIPE_HANDLE, &sh2->state);
4005 			}
4006 			raid5_release_stripe(sh2);
4007 
4008 		}
4009 	/* done submitting copies, wait for them to complete */
4010 	async_tx_quiesce(&tx);
4011 }
4012 
4013 /*
4014  * handle_stripe - do things to a stripe.
4015  *
4016  * We lock the stripe by setting STRIPE_ACTIVE and then examine the
4017  * state of various bits to see what needs to be done.
4018  * Possible results:
4019  *    return some read requests which now have data
4020  *    return some write requests which are safely on storage
4021  *    schedule a read on some buffers
4022  *    schedule a write of some buffers
4023  *    return confirmation of parity correctness
4024  *
4025  */
4026 
4027 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
4028 {
4029 	struct r5conf *conf = sh->raid_conf;
4030 	int disks = sh->disks;
4031 	struct r5dev *dev;
4032 	int i;
4033 	int do_recovery = 0;
4034 
4035 	memset(s, 0, sizeof(*s));
4036 
4037 	s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head;
4038 	s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
4039 	s->failed_num[0] = -1;
4040 	s->failed_num[1] = -1;
4041 	s->log_failed = r5l_log_disk_error(conf);
4042 
4043 	/* Now to look around and see what can be done */
4044 	rcu_read_lock();
4045 	for (i=disks; i--; ) {
4046 		struct md_rdev *rdev;
4047 		sector_t first_bad;
4048 		int bad_sectors;
4049 		int is_bad = 0;
4050 
4051 		dev = &sh->dev[i];
4052 
4053 		pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
4054 			 i, dev->flags,
4055 			 dev->toread, dev->towrite, dev->written);
4056 		/* maybe we can reply to a read
4057 		 *
4058 		 * new wantfill requests are only permitted while
4059 		 * ops_complete_biofill is guaranteed to be inactive
4060 		 */
4061 		if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
4062 		    !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
4063 			set_bit(R5_Wantfill, &dev->flags);
4064 
4065 		/* now count some things */
4066 		if (test_bit(R5_LOCKED, &dev->flags))
4067 			s->locked++;
4068 		if (test_bit(R5_UPTODATE, &dev->flags))
4069 			s->uptodate++;
4070 		if (test_bit(R5_Wantcompute, &dev->flags)) {
4071 			s->compute++;
4072 			BUG_ON(s->compute > 2);
4073 		}
4074 
4075 		if (test_bit(R5_Wantfill, &dev->flags))
4076 			s->to_fill++;
4077 		else if (dev->toread)
4078 			s->to_read++;
4079 		if (dev->towrite) {
4080 			s->to_write++;
4081 			if (!test_bit(R5_OVERWRITE, &dev->flags))
4082 				s->non_overwrite++;
4083 		}
4084 		if (dev->written)
4085 			s->written++;
4086 		/* Prefer to use the replacement for reads, but only
4087 		 * if it is recovered enough and has no bad blocks.
4088 		 */
4089 		rdev = rcu_dereference(conf->disks[i].replacement);
4090 		if (rdev && !test_bit(Faulty, &rdev->flags) &&
4091 		    rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
4092 		    !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
4093 				 &first_bad, &bad_sectors))
4094 			set_bit(R5_ReadRepl, &dev->flags);
4095 		else {
4096 			if (rdev && !test_bit(Faulty, &rdev->flags))
4097 				set_bit(R5_NeedReplace, &dev->flags);
4098 			else
4099 				clear_bit(R5_NeedReplace, &dev->flags);
4100 			rdev = rcu_dereference(conf->disks[i].rdev);
4101 			clear_bit(R5_ReadRepl, &dev->flags);
4102 		}
4103 		if (rdev && test_bit(Faulty, &rdev->flags))
4104 			rdev = NULL;
4105 		if (rdev) {
4106 			is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
4107 					     &first_bad, &bad_sectors);
4108 			if (s->blocked_rdev == NULL
4109 			    && (test_bit(Blocked, &rdev->flags)
4110 				|| is_bad < 0)) {
4111 				if (is_bad < 0)
4112 					set_bit(BlockedBadBlocks,
4113 						&rdev->flags);
4114 				s->blocked_rdev = rdev;
4115 				atomic_inc(&rdev->nr_pending);
4116 			}
4117 		}
4118 		clear_bit(R5_Insync, &dev->flags);
4119 		if (!rdev)
4120 			/* Not in-sync */;
4121 		else if (is_bad) {
4122 			/* also not in-sync */
4123 			if (!test_bit(WriteErrorSeen, &rdev->flags) &&
4124 			    test_bit(R5_UPTODATE, &dev->flags)) {
4125 				/* treat as in-sync, but with a read error
4126 				 * which we can now try to correct
4127 				 */
4128 				set_bit(R5_Insync, &dev->flags);
4129 				set_bit(R5_ReadError, &dev->flags);
4130 			}
4131 		} else if (test_bit(In_sync, &rdev->flags))
4132 			set_bit(R5_Insync, &dev->flags);
4133 		else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
4134 			/* in sync if before recovery_offset */
4135 			set_bit(R5_Insync, &dev->flags);
4136 		else if (test_bit(R5_UPTODATE, &dev->flags) &&
4137 			 test_bit(R5_Expanded, &dev->flags))
4138 			/* If we've reshaped into here, we assume it is Insync.
4139 			 * We will shortly update recovery_offset to make
4140 			 * it official.
4141 			 */
4142 			set_bit(R5_Insync, &dev->flags);
4143 
4144 		if (test_bit(R5_WriteError, &dev->flags)) {
4145 			/* This flag does not apply to '.replacement'
4146 			 * only to .rdev, so make sure to check that*/
4147 			struct md_rdev *rdev2 = rcu_dereference(
4148 				conf->disks[i].rdev);
4149 			if (rdev2 == rdev)
4150 				clear_bit(R5_Insync, &dev->flags);
4151 			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4152 				s->handle_bad_blocks = 1;
4153 				atomic_inc(&rdev2->nr_pending);
4154 			} else
4155 				clear_bit(R5_WriteError, &dev->flags);
4156 		}
4157 		if (test_bit(R5_MadeGood, &dev->flags)) {
4158 			/* This flag does not apply to '.replacement'
4159 			 * only to .rdev, so make sure to check that*/
4160 			struct md_rdev *rdev2 = rcu_dereference(
4161 				conf->disks[i].rdev);
4162 			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4163 				s->handle_bad_blocks = 1;
4164 				atomic_inc(&rdev2->nr_pending);
4165 			} else
4166 				clear_bit(R5_MadeGood, &dev->flags);
4167 		}
4168 		if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
4169 			struct md_rdev *rdev2 = rcu_dereference(
4170 				conf->disks[i].replacement);
4171 			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4172 				s->handle_bad_blocks = 1;
4173 				atomic_inc(&rdev2->nr_pending);
4174 			} else
4175 				clear_bit(R5_MadeGoodRepl, &dev->flags);
4176 		}
4177 		if (!test_bit(R5_Insync, &dev->flags)) {
4178 			/* The ReadError flag will just be confusing now */
4179 			clear_bit(R5_ReadError, &dev->flags);
4180 			clear_bit(R5_ReWrite, &dev->flags);
4181 		}
4182 		if (test_bit(R5_ReadError, &dev->flags))
4183 			clear_bit(R5_Insync, &dev->flags);
4184 		if (!test_bit(R5_Insync, &dev->flags)) {
4185 			if (s->failed < 2)
4186 				s->failed_num[s->failed] = i;
4187 			s->failed++;
4188 			if (rdev && !test_bit(Faulty, &rdev->flags))
4189 				do_recovery = 1;
4190 		}
4191 	}
4192 	if (test_bit(STRIPE_SYNCING, &sh->state)) {
4193 		/* If there is a failed device being replaced,
4194 		 *     we must be recovering.
4195 		 * else if we are after recovery_cp, we must be syncing
4196 		 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
4197 		 * else we can only be replacing
4198 		 * sync and recovery both need to read all devices, and so
4199 		 * use the same flag.
4200 		 */
4201 		if (do_recovery ||
4202 		    sh->sector >= conf->mddev->recovery_cp ||
4203 		    test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
4204 			s->syncing = 1;
4205 		else
4206 			s->replacing = 1;
4207 	}
4208 	rcu_read_unlock();
4209 }
4210 
4211 static int clear_batch_ready(struct stripe_head *sh)
4212 {
4213 	/* Return '1' if this is a member of batch, or
4214 	 * '0' if it is a lone stripe or a head which can now be
4215 	 * handled.
4216 	 */
4217 	struct stripe_head *tmp;
4218 	if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
4219 		return (sh->batch_head && sh->batch_head != sh);
4220 	spin_lock(&sh->stripe_lock);
4221 	if (!sh->batch_head) {
4222 		spin_unlock(&sh->stripe_lock);
4223 		return 0;
4224 	}
4225 
4226 	/*
4227 	 * this stripe could be added to a batch list before we check
4228 	 * BATCH_READY, skips it
4229 	 */
4230 	if (sh->batch_head != sh) {
4231 		spin_unlock(&sh->stripe_lock);
4232 		return 1;
4233 	}
4234 	spin_lock(&sh->batch_lock);
4235 	list_for_each_entry(tmp, &sh->batch_list, batch_list)
4236 		clear_bit(STRIPE_BATCH_READY, &tmp->state);
4237 	spin_unlock(&sh->batch_lock);
4238 	spin_unlock(&sh->stripe_lock);
4239 
4240 	/*
4241 	 * BATCH_READY is cleared, no new stripes can be added.
4242 	 * batch_list can be accessed without lock
4243 	 */
4244 	return 0;
4245 }
4246 
4247 static void break_stripe_batch_list(struct stripe_head *head_sh,
4248 				    unsigned long handle_flags)
4249 {
4250 	struct stripe_head *sh, *next;
4251 	int i;
4252 	int do_wakeup = 0;
4253 
4254 	list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
4255 
4256 		list_del_init(&sh->batch_list);
4257 
4258 		WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
4259 					  (1 << STRIPE_SYNCING) |
4260 					  (1 << STRIPE_REPLACED) |
4261 					  (1 << STRIPE_DELAYED) |
4262 					  (1 << STRIPE_BIT_DELAY) |
4263 					  (1 << STRIPE_FULL_WRITE) |
4264 					  (1 << STRIPE_BIOFILL_RUN) |
4265 					  (1 << STRIPE_COMPUTE_RUN)  |
4266 					  (1 << STRIPE_OPS_REQ_PENDING) |
4267 					  (1 << STRIPE_DISCARD) |
4268 					  (1 << STRIPE_BATCH_READY) |
4269 					  (1 << STRIPE_BATCH_ERR) |
4270 					  (1 << STRIPE_BITMAP_PENDING)),
4271 			"stripe state: %lx\n", sh->state);
4272 		WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
4273 					      (1 << STRIPE_REPLACED)),
4274 			"head stripe state: %lx\n", head_sh->state);
4275 
4276 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
4277 					    (1 << STRIPE_PREREAD_ACTIVE) |
4278 					    (1 << STRIPE_DEGRADED)),
4279 			      head_sh->state & (1 << STRIPE_INSYNC));
4280 
4281 		sh->check_state = head_sh->check_state;
4282 		sh->reconstruct_state = head_sh->reconstruct_state;
4283 		for (i = 0; i < sh->disks; i++) {
4284 			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
4285 				do_wakeup = 1;
4286 			sh->dev[i].flags = head_sh->dev[i].flags &
4287 				(~((1 << R5_WriteError) | (1 << R5_Overlap)));
4288 		}
4289 		spin_lock_irq(&sh->stripe_lock);
4290 		sh->batch_head = NULL;
4291 		spin_unlock_irq(&sh->stripe_lock);
4292 		if (handle_flags == 0 ||
4293 		    sh->state & handle_flags)
4294 			set_bit(STRIPE_HANDLE, &sh->state);
4295 		raid5_release_stripe(sh);
4296 	}
4297 	spin_lock_irq(&head_sh->stripe_lock);
4298 	head_sh->batch_head = NULL;
4299 	spin_unlock_irq(&head_sh->stripe_lock);
4300 	for (i = 0; i < head_sh->disks; i++)
4301 		if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
4302 			do_wakeup = 1;
4303 	if (head_sh->state & handle_flags)
4304 		set_bit(STRIPE_HANDLE, &head_sh->state);
4305 
4306 	if (do_wakeup)
4307 		wake_up(&head_sh->raid_conf->wait_for_overlap);
4308 }
4309 
4310 static void handle_stripe(struct stripe_head *sh)
4311 {
4312 	struct stripe_head_state s;
4313 	struct r5conf *conf = sh->raid_conf;
4314 	int i;
4315 	int prexor;
4316 	int disks = sh->disks;
4317 	struct r5dev *pdev, *qdev;
4318 
4319 	clear_bit(STRIPE_HANDLE, &sh->state);
4320 	if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
4321 		/* already being handled, ensure it gets handled
4322 		 * again when current action finishes */
4323 		set_bit(STRIPE_HANDLE, &sh->state);
4324 		return;
4325 	}
4326 
4327 	if (clear_batch_ready(sh) ) {
4328 		clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
4329 		return;
4330 	}
4331 
4332 	if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
4333 		break_stripe_batch_list(sh, 0);
4334 
4335 	if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
4336 		spin_lock(&sh->stripe_lock);
4337 		/* Cannot process 'sync' concurrently with 'discard' */
4338 		if (!test_bit(STRIPE_DISCARD, &sh->state) &&
4339 		    test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
4340 			set_bit(STRIPE_SYNCING, &sh->state);
4341 			clear_bit(STRIPE_INSYNC, &sh->state);
4342 			clear_bit(STRIPE_REPLACED, &sh->state);
4343 		}
4344 		spin_unlock(&sh->stripe_lock);
4345 	}
4346 	clear_bit(STRIPE_DELAYED, &sh->state);
4347 
4348 	pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
4349 		"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
4350 	       (unsigned long long)sh->sector, sh->state,
4351 	       atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
4352 	       sh->check_state, sh->reconstruct_state);
4353 
4354 	analyse_stripe(sh, &s);
4355 
4356 	if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
4357 		goto finish;
4358 
4359 	if (s.handle_bad_blocks) {
4360 		set_bit(STRIPE_HANDLE, &sh->state);
4361 		goto finish;
4362 	}
4363 
4364 	if (unlikely(s.blocked_rdev)) {
4365 		if (s.syncing || s.expanding || s.expanded ||
4366 		    s.replacing || s.to_write || s.written) {
4367 			set_bit(STRIPE_HANDLE, &sh->state);
4368 			goto finish;
4369 		}
4370 		/* There is nothing for the blocked_rdev to block */
4371 		rdev_dec_pending(s.blocked_rdev, conf->mddev);
4372 		s.blocked_rdev = NULL;
4373 	}
4374 
4375 	if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
4376 		set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
4377 		set_bit(STRIPE_BIOFILL_RUN, &sh->state);
4378 	}
4379 
4380 	pr_debug("locked=%d uptodate=%d to_read=%d"
4381 	       " to_write=%d failed=%d failed_num=%d,%d\n",
4382 	       s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
4383 	       s.failed_num[0], s.failed_num[1]);
4384 	/* check if the array has lost more than max_degraded devices and,
4385 	 * if so, some requests might need to be failed.
4386 	 */
4387 	if (s.failed > conf->max_degraded || s.log_failed) {
4388 		sh->check_state = 0;
4389 		sh->reconstruct_state = 0;
4390 		break_stripe_batch_list(sh, 0);
4391 		if (s.to_read+s.to_write+s.written)
4392 			handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
4393 		if (s.syncing + s.replacing)
4394 			handle_failed_sync(conf, sh, &s);
4395 	}
4396 
4397 	/* Now we check to see if any write operations have recently
4398 	 * completed
4399 	 */
4400 	prexor = 0;
4401 	if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
4402 		prexor = 1;
4403 	if (sh->reconstruct_state == reconstruct_state_drain_result ||
4404 	    sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
4405 		sh->reconstruct_state = reconstruct_state_idle;
4406 
4407 		/* All the 'written' buffers and the parity block are ready to
4408 		 * be written back to disk
4409 		 */
4410 		BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
4411 		       !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
4412 		BUG_ON(sh->qd_idx >= 0 &&
4413 		       !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
4414 		       !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
4415 		for (i = disks; i--; ) {
4416 			struct r5dev *dev = &sh->dev[i];
4417 			if (test_bit(R5_LOCKED, &dev->flags) &&
4418 				(i == sh->pd_idx || i == sh->qd_idx ||
4419 				 dev->written)) {
4420 				pr_debug("Writing block %d\n", i);
4421 				set_bit(R5_Wantwrite, &dev->flags);
4422 				if (prexor)
4423 					continue;
4424 				if (s.failed > 1)
4425 					continue;
4426 				if (!test_bit(R5_Insync, &dev->flags) ||
4427 				    ((i == sh->pd_idx || i == sh->qd_idx)  &&
4428 				     s.failed == 0))
4429 					set_bit(STRIPE_INSYNC, &sh->state);
4430 			}
4431 		}
4432 		if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4433 			s.dec_preread_active = 1;
4434 	}
4435 
4436 	/*
4437 	 * might be able to return some write requests if the parity blocks
4438 	 * are safe, or on a failed drive
4439 	 */
4440 	pdev = &sh->dev[sh->pd_idx];
4441 	s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
4442 		|| (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
4443 	qdev = &sh->dev[sh->qd_idx];
4444 	s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
4445 		|| (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
4446 		|| conf->level < 6;
4447 
4448 	if (s.written &&
4449 	    (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
4450 			     && !test_bit(R5_LOCKED, &pdev->flags)
4451 			     && (test_bit(R5_UPTODATE, &pdev->flags) ||
4452 				 test_bit(R5_Discard, &pdev->flags))))) &&
4453 	    (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
4454 			     && !test_bit(R5_LOCKED, &qdev->flags)
4455 			     && (test_bit(R5_UPTODATE, &qdev->flags) ||
4456 				 test_bit(R5_Discard, &qdev->flags))))))
4457 		handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
4458 
4459 	/* Now we might consider reading some blocks, either to check/generate
4460 	 * parity, or to satisfy requests
4461 	 * or to load a block that is being partially written.
4462 	 */
4463 	if (s.to_read || s.non_overwrite
4464 	    || (conf->level == 6 && s.to_write && s.failed)
4465 	    || (s.syncing && (s.uptodate + s.compute < disks))
4466 	    || s.replacing
4467 	    || s.expanding)
4468 		handle_stripe_fill(sh, &s, disks);
4469 
4470 	/* Now to consider new write requests and what else, if anything
4471 	 * should be read.  We do not handle new writes when:
4472 	 * 1/ A 'write' operation (copy+xor) is already in flight.
4473 	 * 2/ A 'check' operation is in flight, as it may clobber the parity
4474 	 *    block.
4475 	 */
4476 	if (s.to_write && !sh->reconstruct_state && !sh->check_state)
4477 		handle_stripe_dirtying(conf, sh, &s, disks);
4478 
4479 	/* maybe we need to check and possibly fix the parity for this stripe
4480 	 * Any reads will already have been scheduled, so we just see if enough
4481 	 * data is available.  The parity check is held off while parity
4482 	 * dependent operations are in flight.
4483 	 */
4484 	if (sh->check_state ||
4485 	    (s.syncing && s.locked == 0 &&
4486 	     !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
4487 	     !test_bit(STRIPE_INSYNC, &sh->state))) {
4488 		if (conf->level == 6)
4489 			handle_parity_checks6(conf, sh, &s, disks);
4490 		else
4491 			handle_parity_checks5(conf, sh, &s, disks);
4492 	}
4493 
4494 	if ((s.replacing || s.syncing) && s.locked == 0
4495 	    && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
4496 	    && !test_bit(STRIPE_REPLACED, &sh->state)) {
4497 		/* Write out to replacement devices where possible */
4498 		for (i = 0; i < conf->raid_disks; i++)
4499 			if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
4500 				WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
4501 				set_bit(R5_WantReplace, &sh->dev[i].flags);
4502 				set_bit(R5_LOCKED, &sh->dev[i].flags);
4503 				s.locked++;
4504 			}
4505 		if (s.replacing)
4506 			set_bit(STRIPE_INSYNC, &sh->state);
4507 		set_bit(STRIPE_REPLACED, &sh->state);
4508 	}
4509 	if ((s.syncing || s.replacing) && s.locked == 0 &&
4510 	    !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
4511 	    test_bit(STRIPE_INSYNC, &sh->state)) {
4512 		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
4513 		clear_bit(STRIPE_SYNCING, &sh->state);
4514 		if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
4515 			wake_up(&conf->wait_for_overlap);
4516 	}
4517 
4518 	/* If the failed drives are just a ReadError, then we might need
4519 	 * to progress the repair/check process
4520 	 */
4521 	if (s.failed <= conf->max_degraded && !conf->mddev->ro)
4522 		for (i = 0; i < s.failed; i++) {
4523 			struct r5dev *dev = &sh->dev[s.failed_num[i]];
4524 			if (test_bit(R5_ReadError, &dev->flags)
4525 			    && !test_bit(R5_LOCKED, &dev->flags)
4526 			    && test_bit(R5_UPTODATE, &dev->flags)
4527 				) {
4528 				if (!test_bit(R5_ReWrite, &dev->flags)) {
4529 					set_bit(R5_Wantwrite, &dev->flags);
4530 					set_bit(R5_ReWrite, &dev->flags);
4531 					set_bit(R5_LOCKED, &dev->flags);
4532 					s.locked++;
4533 				} else {
4534 					/* let's read it back */
4535 					set_bit(R5_Wantread, &dev->flags);
4536 					set_bit(R5_LOCKED, &dev->flags);
4537 					s.locked++;
4538 				}
4539 			}
4540 		}
4541 
4542 	/* Finish reconstruct operations initiated by the expansion process */
4543 	if (sh->reconstruct_state == reconstruct_state_result) {
4544 		struct stripe_head *sh_src
4545 			= raid5_get_active_stripe(conf, sh->sector, 1, 1, 1);
4546 		if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
4547 			/* sh cannot be written until sh_src has been read.
4548 			 * so arrange for sh to be delayed a little
4549 			 */
4550 			set_bit(STRIPE_DELAYED, &sh->state);
4551 			set_bit(STRIPE_HANDLE, &sh->state);
4552 			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
4553 					      &sh_src->state))
4554 				atomic_inc(&conf->preread_active_stripes);
4555 			raid5_release_stripe(sh_src);
4556 			goto finish;
4557 		}
4558 		if (sh_src)
4559 			raid5_release_stripe(sh_src);
4560 
4561 		sh->reconstruct_state = reconstruct_state_idle;
4562 		clear_bit(STRIPE_EXPANDING, &sh->state);
4563 		for (i = conf->raid_disks; i--; ) {
4564 			set_bit(R5_Wantwrite, &sh->dev[i].flags);
4565 			set_bit(R5_LOCKED, &sh->dev[i].flags);
4566 			s.locked++;
4567 		}
4568 	}
4569 
4570 	if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
4571 	    !sh->reconstruct_state) {
4572 		/* Need to write out all blocks after computing parity */
4573 		sh->disks = conf->raid_disks;
4574 		stripe_set_idx(sh->sector, conf, 0, sh);
4575 		schedule_reconstruction(sh, &s, 1, 1);
4576 	} else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
4577 		clear_bit(STRIPE_EXPAND_READY, &sh->state);
4578 		atomic_dec(&conf->reshape_stripes);
4579 		wake_up(&conf->wait_for_overlap);
4580 		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
4581 	}
4582 
4583 	if (s.expanding && s.locked == 0 &&
4584 	    !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
4585 		handle_stripe_expansion(conf, sh);
4586 
4587 finish:
4588 	/* wait for this device to become unblocked */
4589 	if (unlikely(s.blocked_rdev)) {
4590 		if (conf->mddev->external)
4591 			md_wait_for_blocked_rdev(s.blocked_rdev,
4592 						 conf->mddev);
4593 		else
4594 			/* Internal metadata will immediately
4595 			 * be written by raid5d, so we don't
4596 			 * need to wait here.
4597 			 */
4598 			rdev_dec_pending(s.blocked_rdev,
4599 					 conf->mddev);
4600 	}
4601 
4602 	if (s.handle_bad_blocks)
4603 		for (i = disks; i--; ) {
4604 			struct md_rdev *rdev;
4605 			struct r5dev *dev = &sh->dev[i];
4606 			if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
4607 				/* We own a safe reference to the rdev */
4608 				rdev = conf->disks[i].rdev;
4609 				if (!rdev_set_badblocks(rdev, sh->sector,
4610 							STRIPE_SECTORS, 0))
4611 					md_error(conf->mddev, rdev);
4612 				rdev_dec_pending(rdev, conf->mddev);
4613 			}
4614 			if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
4615 				rdev = conf->disks[i].rdev;
4616 				rdev_clear_badblocks(rdev, sh->sector,
4617 						     STRIPE_SECTORS, 0);
4618 				rdev_dec_pending(rdev, conf->mddev);
4619 			}
4620 			if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
4621 				rdev = conf->disks[i].replacement;
4622 				if (!rdev)
4623 					/* rdev have been moved down */
4624 					rdev = conf->disks[i].rdev;
4625 				rdev_clear_badblocks(rdev, sh->sector,
4626 						     STRIPE_SECTORS, 0);
4627 				rdev_dec_pending(rdev, conf->mddev);
4628 			}
4629 		}
4630 
4631 	if (s.ops_request)
4632 		raid_run_ops(sh, s.ops_request);
4633 
4634 	ops_run_io(sh, &s);
4635 
4636 	if (s.dec_preread_active) {
4637 		/* We delay this until after ops_run_io so that if make_request
4638 		 * is waiting on a flush, it won't continue until the writes
4639 		 * have actually been submitted.
4640 		 */
4641 		atomic_dec(&conf->preread_active_stripes);
4642 		if (atomic_read(&conf->preread_active_stripes) <
4643 		    IO_THRESHOLD)
4644 			md_wakeup_thread(conf->mddev->thread);
4645 	}
4646 
4647 	if (!bio_list_empty(&s.return_bi)) {
4648 		if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
4649 				(s.failed <= conf->max_degraded ||
4650 					conf->mddev->external == 0)) {
4651 			spin_lock_irq(&conf->device_lock);
4652 			bio_list_merge(&conf->return_bi, &s.return_bi);
4653 			spin_unlock_irq(&conf->device_lock);
4654 			md_wakeup_thread(conf->mddev->thread);
4655 		} else
4656 			return_io(&s.return_bi);
4657 	}
4658 
4659 	clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
4660 }
4661 
4662 static void raid5_activate_delayed(struct r5conf *conf)
4663 {
4664 	if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
4665 		while (!list_empty(&conf->delayed_list)) {
4666 			struct list_head *l = conf->delayed_list.next;
4667 			struct stripe_head *sh;
4668 			sh = list_entry(l, struct stripe_head, lru);
4669 			list_del_init(l);
4670 			clear_bit(STRIPE_DELAYED, &sh->state);
4671 			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4672 				atomic_inc(&conf->preread_active_stripes);
4673 			list_add_tail(&sh->lru, &conf->hold_list);
4674 			raid5_wakeup_stripe_thread(sh);
4675 		}
4676 	}
4677 }
4678 
4679 static void activate_bit_delay(struct r5conf *conf,
4680 	struct list_head *temp_inactive_list)
4681 {
4682 	/* device_lock is held */
4683 	struct list_head head;
4684 	list_add(&head, &conf->bitmap_list);
4685 	list_del_init(&conf->bitmap_list);
4686 	while (!list_empty(&head)) {
4687 		struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
4688 		int hash;
4689 		list_del_init(&sh->lru);
4690 		atomic_inc(&sh->count);
4691 		hash = sh->hash_lock_index;
4692 		__release_stripe(conf, sh, &temp_inactive_list[hash]);
4693 	}
4694 }
4695 
4696 static int raid5_congested(struct mddev *mddev, int bits)
4697 {
4698 	struct r5conf *conf = mddev->private;
4699 
4700 	/* No difference between reads and writes.  Just check
4701 	 * how busy the stripe_cache is
4702 	 */
4703 
4704 	if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
4705 		return 1;
4706 	if (conf->quiesce)
4707 		return 1;
4708 	if (atomic_read(&conf->empty_inactive_list_nr))
4709 		return 1;
4710 
4711 	return 0;
4712 }
4713 
4714 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4715 {
4716 	struct r5conf *conf = mddev->private;
4717 	sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4718 	unsigned int chunk_sectors;
4719 	unsigned int bio_sectors = bio_sectors(bio);
4720 
4721 	chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
4722 	return  chunk_sectors >=
4723 		((sector & (chunk_sectors - 1)) + bio_sectors);
4724 }
4725 
4726 /*
4727  *  add bio to the retry LIFO  ( in O(1) ... we are in interrupt )
4728  *  later sampled by raid5d.
4729  */
4730 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
4731 {
4732 	unsigned long flags;
4733 
4734 	spin_lock_irqsave(&conf->device_lock, flags);
4735 
4736 	bi->bi_next = conf->retry_read_aligned_list;
4737 	conf->retry_read_aligned_list = bi;
4738 
4739 	spin_unlock_irqrestore(&conf->device_lock, flags);
4740 	md_wakeup_thread(conf->mddev->thread);
4741 }
4742 
4743 static struct bio *remove_bio_from_retry(struct r5conf *conf)
4744 {
4745 	struct bio *bi;
4746 
4747 	bi = conf->retry_read_aligned;
4748 	if (bi) {
4749 		conf->retry_read_aligned = NULL;
4750 		return bi;
4751 	}
4752 	bi = conf->retry_read_aligned_list;
4753 	if(bi) {
4754 		conf->retry_read_aligned_list = bi->bi_next;
4755 		bi->bi_next = NULL;
4756 		/*
4757 		 * this sets the active strip count to 1 and the processed
4758 		 * strip count to zero (upper 8 bits)
4759 		 */
4760 		raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
4761 	}
4762 
4763 	return bi;
4764 }
4765 
4766 /*
4767  *  The "raid5_align_endio" should check if the read succeeded and if it
4768  *  did, call bio_endio on the original bio (having bio_put the new bio
4769  *  first).
4770  *  If the read failed..
4771  */
4772 static void raid5_align_endio(struct bio *bi)
4773 {
4774 	struct bio* raid_bi  = bi->bi_private;
4775 	struct mddev *mddev;
4776 	struct r5conf *conf;
4777 	struct md_rdev *rdev;
4778 	int error = bi->bi_error;
4779 
4780 	bio_put(bi);
4781 
4782 	rdev = (void*)raid_bi->bi_next;
4783 	raid_bi->bi_next = NULL;
4784 	mddev = rdev->mddev;
4785 	conf = mddev->private;
4786 
4787 	rdev_dec_pending(rdev, conf->mddev);
4788 
4789 	if (!error) {
4790 		trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
4791 					 raid_bi, 0);
4792 		bio_endio(raid_bi);
4793 		if (atomic_dec_and_test(&conf->active_aligned_reads))
4794 			wake_up(&conf->wait_for_quiescent);
4795 		return;
4796 	}
4797 
4798 	pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
4799 
4800 	add_bio_to_retry(raid_bi, conf);
4801 }
4802 
4803 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
4804 {
4805 	struct r5conf *conf = mddev->private;
4806 	int dd_idx;
4807 	struct bio* align_bi;
4808 	struct md_rdev *rdev;
4809 	sector_t end_sector;
4810 
4811 	if (!in_chunk_boundary(mddev, raid_bio)) {
4812 		pr_debug("%s: non aligned\n", __func__);
4813 		return 0;
4814 	}
4815 	/*
4816 	 * use bio_clone_mddev to make a copy of the bio
4817 	 */
4818 	align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
4819 	if (!align_bi)
4820 		return 0;
4821 	/*
4822 	 *   set bi_end_io to a new function, and set bi_private to the
4823 	 *     original bio.
4824 	 */
4825 	align_bi->bi_end_io  = raid5_align_endio;
4826 	align_bi->bi_private = raid_bio;
4827 	/*
4828 	 *	compute position
4829 	 */
4830 	align_bi->bi_iter.bi_sector =
4831 		raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4832 				     0, &dd_idx, NULL);
4833 
4834 	end_sector = bio_end_sector(align_bi);
4835 	rcu_read_lock();
4836 	rdev = rcu_dereference(conf->disks[dd_idx].replacement);
4837 	if (!rdev || test_bit(Faulty, &rdev->flags) ||
4838 	    rdev->recovery_offset < end_sector) {
4839 		rdev = rcu_dereference(conf->disks[dd_idx].rdev);
4840 		if (rdev &&
4841 		    (test_bit(Faulty, &rdev->flags) ||
4842 		    !(test_bit(In_sync, &rdev->flags) ||
4843 		      rdev->recovery_offset >= end_sector)))
4844 			rdev = NULL;
4845 	}
4846 	if (rdev) {
4847 		sector_t first_bad;
4848 		int bad_sectors;
4849 
4850 		atomic_inc(&rdev->nr_pending);
4851 		rcu_read_unlock();
4852 		raid_bio->bi_next = (void*)rdev;
4853 		align_bi->bi_bdev =  rdev->bdev;
4854 		bio_clear_flag(align_bi, BIO_SEG_VALID);
4855 
4856 		if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
4857 				bio_sectors(align_bi),
4858 				&first_bad, &bad_sectors)) {
4859 			bio_put(align_bi);
4860 			rdev_dec_pending(rdev, mddev);
4861 			return 0;
4862 		}
4863 
4864 		/* No reshape active, so we can trust rdev->data_offset */
4865 		align_bi->bi_iter.bi_sector += rdev->data_offset;
4866 
4867 		spin_lock_irq(&conf->device_lock);
4868 		wait_event_lock_irq(conf->wait_for_quiescent,
4869 				    conf->quiesce == 0,
4870 				    conf->device_lock);
4871 		atomic_inc(&conf->active_aligned_reads);
4872 		spin_unlock_irq(&conf->device_lock);
4873 
4874 		if (mddev->gendisk)
4875 			trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4876 					      align_bi, disk_devt(mddev->gendisk),
4877 					      raid_bio->bi_iter.bi_sector);
4878 		generic_make_request(align_bi);
4879 		return 1;
4880 	} else {
4881 		rcu_read_unlock();
4882 		bio_put(align_bi);
4883 		return 0;
4884 	}
4885 }
4886 
4887 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
4888 {
4889 	struct bio *split;
4890 
4891 	do {
4892 		sector_t sector = raid_bio->bi_iter.bi_sector;
4893 		unsigned chunk_sects = mddev->chunk_sectors;
4894 		unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
4895 
4896 		if (sectors < bio_sectors(raid_bio)) {
4897 			split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set);
4898 			bio_chain(split, raid_bio);
4899 		} else
4900 			split = raid_bio;
4901 
4902 		if (!raid5_read_one_chunk(mddev, split)) {
4903 			if (split != raid_bio)
4904 				generic_make_request(raid_bio);
4905 			return split;
4906 		}
4907 	} while (split != raid_bio);
4908 
4909 	return NULL;
4910 }
4911 
4912 /* __get_priority_stripe - get the next stripe to process
4913  *
4914  * Full stripe writes are allowed to pass preread active stripes up until
4915  * the bypass_threshold is exceeded.  In general the bypass_count
4916  * increments when the handle_list is handled before the hold_list; however, it
4917  * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
4918  * stripe with in flight i/o.  The bypass_count will be reset when the
4919  * head of the hold_list has changed, i.e. the head was promoted to the
4920  * handle_list.
4921  */
4922 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
4923 {
4924 	struct stripe_head *sh = NULL, *tmp;
4925 	struct list_head *handle_list = NULL;
4926 	struct r5worker_group *wg = NULL;
4927 
4928 	if (conf->worker_cnt_per_group == 0) {
4929 		handle_list = &conf->handle_list;
4930 	} else if (group != ANY_GROUP) {
4931 		handle_list = &conf->worker_groups[group].handle_list;
4932 		wg = &conf->worker_groups[group];
4933 	} else {
4934 		int i;
4935 		for (i = 0; i < conf->group_cnt; i++) {
4936 			handle_list = &conf->worker_groups[i].handle_list;
4937 			wg = &conf->worker_groups[i];
4938 			if (!list_empty(handle_list))
4939 				break;
4940 		}
4941 	}
4942 
4943 	pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
4944 		  __func__,
4945 		  list_empty(handle_list) ? "empty" : "busy",
4946 		  list_empty(&conf->hold_list) ? "empty" : "busy",
4947 		  atomic_read(&conf->pending_full_writes), conf->bypass_count);
4948 
4949 	if (!list_empty(handle_list)) {
4950 		sh = list_entry(handle_list->next, typeof(*sh), lru);
4951 
4952 		if (list_empty(&conf->hold_list))
4953 			conf->bypass_count = 0;
4954 		else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
4955 			if (conf->hold_list.next == conf->last_hold)
4956 				conf->bypass_count++;
4957 			else {
4958 				conf->last_hold = conf->hold_list.next;
4959 				conf->bypass_count -= conf->bypass_threshold;
4960 				if (conf->bypass_count < 0)
4961 					conf->bypass_count = 0;
4962 			}
4963 		}
4964 	} else if (!list_empty(&conf->hold_list) &&
4965 		   ((conf->bypass_threshold &&
4966 		     conf->bypass_count > conf->bypass_threshold) ||
4967 		    atomic_read(&conf->pending_full_writes) == 0)) {
4968 
4969 		list_for_each_entry(tmp, &conf->hold_list,  lru) {
4970 			if (conf->worker_cnt_per_group == 0 ||
4971 			    group == ANY_GROUP ||
4972 			    !cpu_online(tmp->cpu) ||
4973 			    cpu_to_group(tmp->cpu) == group) {
4974 				sh = tmp;
4975 				break;
4976 			}
4977 		}
4978 
4979 		if (sh) {
4980 			conf->bypass_count -= conf->bypass_threshold;
4981 			if (conf->bypass_count < 0)
4982 				conf->bypass_count = 0;
4983 		}
4984 		wg = NULL;
4985 	}
4986 
4987 	if (!sh)
4988 		return NULL;
4989 
4990 	if (wg) {
4991 		wg->stripes_cnt--;
4992 		sh->group = NULL;
4993 	}
4994 	list_del_init(&sh->lru);
4995 	BUG_ON(atomic_inc_return(&sh->count) != 1);
4996 	return sh;
4997 }
4998 
4999 struct raid5_plug_cb {
5000 	struct blk_plug_cb	cb;
5001 	struct list_head	list;
5002 	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];
5003 };
5004 
5005 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
5006 {
5007 	struct raid5_plug_cb *cb = container_of(
5008 		blk_cb, struct raid5_plug_cb, cb);
5009 	struct stripe_head *sh;
5010 	struct mddev *mddev = cb->cb.data;
5011 	struct r5conf *conf = mddev->private;
5012 	int cnt = 0;
5013 	int hash;
5014 
5015 	if (cb->list.next && !list_empty(&cb->list)) {
5016 		spin_lock_irq(&conf->device_lock);
5017 		while (!list_empty(&cb->list)) {
5018 			sh = list_first_entry(&cb->list, struct stripe_head, lru);
5019 			list_del_init(&sh->lru);
5020 			/*
5021 			 * avoid race release_stripe_plug() sees
5022 			 * STRIPE_ON_UNPLUG_LIST clear but the stripe
5023 			 * is still in our list
5024 			 */
5025 			smp_mb__before_atomic();
5026 			clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
5027 			/*
5028 			 * STRIPE_ON_RELEASE_LIST could be set here. In that
5029 			 * case, the count is always > 1 here
5030 			 */
5031 			hash = sh->hash_lock_index;
5032 			__release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
5033 			cnt++;
5034 		}
5035 		spin_unlock_irq(&conf->device_lock);
5036 	}
5037 	release_inactive_stripe_list(conf, cb->temp_inactive_list,
5038 				     NR_STRIPE_HASH_LOCKS);
5039 	if (mddev->queue)
5040 		trace_block_unplug(mddev->queue, cnt, !from_schedule);
5041 	kfree(cb);
5042 }
5043 
5044 static void release_stripe_plug(struct mddev *mddev,
5045 				struct stripe_head *sh)
5046 {
5047 	struct blk_plug_cb *blk_cb = blk_check_plugged(
5048 		raid5_unplug, mddev,
5049 		sizeof(struct raid5_plug_cb));
5050 	struct raid5_plug_cb *cb;
5051 
5052 	if (!blk_cb) {
5053 		raid5_release_stripe(sh);
5054 		return;
5055 	}
5056 
5057 	cb = container_of(blk_cb, struct raid5_plug_cb, cb);
5058 
5059 	if (cb->list.next == NULL) {
5060 		int i;
5061 		INIT_LIST_HEAD(&cb->list);
5062 		for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5063 			INIT_LIST_HEAD(cb->temp_inactive_list + i);
5064 	}
5065 
5066 	if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
5067 		list_add_tail(&sh->lru, &cb->list);
5068 	else
5069 		raid5_release_stripe(sh);
5070 }
5071 
5072 static void make_discard_request(struct mddev *mddev, struct bio *bi)
5073 {
5074 	struct r5conf *conf = mddev->private;
5075 	sector_t logical_sector, last_sector;
5076 	struct stripe_head *sh;
5077 	int remaining;
5078 	int stripe_sectors;
5079 
5080 	if (mddev->reshape_position != MaxSector)
5081 		/* Skip discard while reshape is happening */
5082 		return;
5083 
5084 	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
5085 	last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
5086 
5087 	bi->bi_next = NULL;
5088 	bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
5089 
5090 	stripe_sectors = conf->chunk_sectors *
5091 		(conf->raid_disks - conf->max_degraded);
5092 	logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
5093 					       stripe_sectors);
5094 	sector_div(last_sector, stripe_sectors);
5095 
5096 	logical_sector *= conf->chunk_sectors;
5097 	last_sector *= conf->chunk_sectors;
5098 
5099 	for (; logical_sector < last_sector;
5100 	     logical_sector += STRIPE_SECTORS) {
5101 		DEFINE_WAIT(w);
5102 		int d;
5103 	again:
5104 		sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0);
5105 		prepare_to_wait(&conf->wait_for_overlap, &w,
5106 				TASK_UNINTERRUPTIBLE);
5107 		set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5108 		if (test_bit(STRIPE_SYNCING, &sh->state)) {
5109 			raid5_release_stripe(sh);
5110 			schedule();
5111 			goto again;
5112 		}
5113 		clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5114 		spin_lock_irq(&sh->stripe_lock);
5115 		for (d = 0; d < conf->raid_disks; d++) {
5116 			if (d == sh->pd_idx || d == sh->qd_idx)
5117 				continue;
5118 			if (sh->dev[d].towrite || sh->dev[d].toread) {
5119 				set_bit(R5_Overlap, &sh->dev[d].flags);
5120 				spin_unlock_irq(&sh->stripe_lock);
5121 				raid5_release_stripe(sh);
5122 				schedule();
5123 				goto again;
5124 			}
5125 		}
5126 		set_bit(STRIPE_DISCARD, &sh->state);
5127 		finish_wait(&conf->wait_for_overlap, &w);
5128 		sh->overwrite_disks = 0;
5129 		for (d = 0; d < conf->raid_disks; d++) {
5130 			if (d == sh->pd_idx || d == sh->qd_idx)
5131 				continue;
5132 			sh->dev[d].towrite = bi;
5133 			set_bit(R5_OVERWRITE, &sh->dev[d].flags);
5134 			raid5_inc_bi_active_stripes(bi);
5135 			sh->overwrite_disks++;
5136 		}
5137 		spin_unlock_irq(&sh->stripe_lock);
5138 		if (conf->mddev->bitmap) {
5139 			for (d = 0;
5140 			     d < conf->raid_disks - conf->max_degraded;
5141 			     d++)
5142 				bitmap_startwrite(mddev->bitmap,
5143 						  sh->sector,
5144 						  STRIPE_SECTORS,
5145 						  0);
5146 			sh->bm_seq = conf->seq_flush + 1;
5147 			set_bit(STRIPE_BIT_DELAY, &sh->state);
5148 		}
5149 
5150 		set_bit(STRIPE_HANDLE, &sh->state);
5151 		clear_bit(STRIPE_DELAYED, &sh->state);
5152 		if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5153 			atomic_inc(&conf->preread_active_stripes);
5154 		release_stripe_plug(mddev, sh);
5155 	}
5156 
5157 	remaining = raid5_dec_bi_active_stripes(bi);
5158 	if (remaining == 0) {
5159 		md_write_end(mddev);
5160 		bio_endio(bi);
5161 	}
5162 }
5163 
5164 static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5165 {
5166 	struct r5conf *conf = mddev->private;
5167 	int dd_idx;
5168 	sector_t new_sector;
5169 	sector_t logical_sector, last_sector;
5170 	struct stripe_head *sh;
5171 	const int rw = bio_data_dir(bi);
5172 	int remaining;
5173 	DEFINE_WAIT(w);
5174 	bool do_prepare;
5175 
5176 	if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
5177 		int ret = r5l_handle_flush_request(conf->log, bi);
5178 
5179 		if (ret == 0)
5180 			return;
5181 		if (ret == -ENODEV) {
5182 			md_flush_request(mddev, bi);
5183 			return;
5184 		}
5185 		/* ret == -EAGAIN, fallback */
5186 	}
5187 
5188 	md_write_start(mddev, bi);
5189 
5190 	/*
5191 	 * If array is degraded, better not do chunk aligned read because
5192 	 * later we might have to read it again in order to reconstruct
5193 	 * data on failed drives.
5194 	 */
5195 	if (rw == READ && mddev->degraded == 0 &&
5196 	    mddev->reshape_position == MaxSector) {
5197 		bi = chunk_aligned_read(mddev, bi);
5198 		if (!bi)
5199 			return;
5200 	}
5201 
5202 	if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
5203 		make_discard_request(mddev, bi);
5204 		return;
5205 	}
5206 
5207 	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
5208 	last_sector = bio_end_sector(bi);
5209 	bi->bi_next = NULL;
5210 	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
5211 
5212 	prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
5213 	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
5214 		int previous;
5215 		int seq;
5216 
5217 		do_prepare = false;
5218 	retry:
5219 		seq = read_seqcount_begin(&conf->gen_lock);
5220 		previous = 0;
5221 		if (do_prepare)
5222 			prepare_to_wait(&conf->wait_for_overlap, &w,
5223 				TASK_UNINTERRUPTIBLE);
5224 		if (unlikely(conf->reshape_progress != MaxSector)) {
5225 			/* spinlock is needed as reshape_progress may be
5226 			 * 64bit on a 32bit platform, and so it might be
5227 			 * possible to see a half-updated value
5228 			 * Of course reshape_progress could change after
5229 			 * the lock is dropped, so once we get a reference
5230 			 * to the stripe that we think it is, we will have
5231 			 * to check again.
5232 			 */
5233 			spin_lock_irq(&conf->device_lock);
5234 			if (mddev->reshape_backwards
5235 			    ? logical_sector < conf->reshape_progress
5236 			    : logical_sector >= conf->reshape_progress) {
5237 				previous = 1;
5238 			} else {
5239 				if (mddev->reshape_backwards
5240 				    ? logical_sector < conf->reshape_safe
5241 				    : logical_sector >= conf->reshape_safe) {
5242 					spin_unlock_irq(&conf->device_lock);
5243 					schedule();
5244 					do_prepare = true;
5245 					goto retry;
5246 				}
5247 			}
5248 			spin_unlock_irq(&conf->device_lock);
5249 		}
5250 
5251 		new_sector = raid5_compute_sector(conf, logical_sector,
5252 						  previous,
5253 						  &dd_idx, NULL);
5254 		pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
5255 			(unsigned long long)new_sector,
5256 			(unsigned long long)logical_sector);
5257 
5258 		sh = raid5_get_active_stripe(conf, new_sector, previous,
5259 				       (bi->bi_opf & REQ_RAHEAD), 0);
5260 		if (sh) {
5261 			if (unlikely(previous)) {
5262 				/* expansion might have moved on while waiting for a
5263 				 * stripe, so we must do the range check again.
5264 				 * Expansion could still move past after this
5265 				 * test, but as we are holding a reference to
5266 				 * 'sh', we know that if that happens,
5267 				 *  STRIPE_EXPANDING will get set and the expansion
5268 				 * won't proceed until we finish with the stripe.
5269 				 */
5270 				int must_retry = 0;
5271 				spin_lock_irq(&conf->device_lock);
5272 				if (mddev->reshape_backwards
5273 				    ? logical_sector >= conf->reshape_progress
5274 				    : logical_sector < conf->reshape_progress)
5275 					/* mismatch, need to try again */
5276 					must_retry = 1;
5277 				spin_unlock_irq(&conf->device_lock);
5278 				if (must_retry) {
5279 					raid5_release_stripe(sh);
5280 					schedule();
5281 					do_prepare = true;
5282 					goto retry;
5283 				}
5284 			}
5285 			if (read_seqcount_retry(&conf->gen_lock, seq)) {
5286 				/* Might have got the wrong stripe_head
5287 				 * by accident
5288 				 */
5289 				raid5_release_stripe(sh);
5290 				goto retry;
5291 			}
5292 
5293 			if (rw == WRITE &&
5294 			    logical_sector >= mddev->suspend_lo &&
5295 			    logical_sector < mddev->suspend_hi) {
5296 				raid5_release_stripe(sh);
5297 				/* As the suspend_* range is controlled by
5298 				 * userspace, we want an interruptible
5299 				 * wait.
5300 				 */
5301 				flush_signals(current);
5302 				prepare_to_wait(&conf->wait_for_overlap,
5303 						&w, TASK_INTERRUPTIBLE);
5304 				if (logical_sector >= mddev->suspend_lo &&
5305 				    logical_sector < mddev->suspend_hi) {
5306 					schedule();
5307 					do_prepare = true;
5308 				}
5309 				goto retry;
5310 			}
5311 
5312 			if (test_bit(STRIPE_EXPANDING, &sh->state) ||
5313 			    !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
5314 				/* Stripe is busy expanding or
5315 				 * add failed due to overlap.  Flush everything
5316 				 * and wait a while
5317 				 */
5318 				md_wakeup_thread(mddev->thread);
5319 				raid5_release_stripe(sh);
5320 				schedule();
5321 				do_prepare = true;
5322 				goto retry;
5323 			}
5324 			set_bit(STRIPE_HANDLE, &sh->state);
5325 			clear_bit(STRIPE_DELAYED, &sh->state);
5326 			if ((!sh->batch_head || sh == sh->batch_head) &&
5327 			    (bi->bi_opf & REQ_SYNC) &&
5328 			    !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5329 				atomic_inc(&conf->preread_active_stripes);
5330 			release_stripe_plug(mddev, sh);
5331 		} else {
5332 			/* cannot get stripe for read-ahead, just give-up */
5333 			bi->bi_error = -EIO;
5334 			break;
5335 		}
5336 	}
5337 	finish_wait(&conf->wait_for_overlap, &w);
5338 
5339 	remaining = raid5_dec_bi_active_stripes(bi);
5340 	if (remaining == 0) {
5341 
5342 		if ( rw == WRITE )
5343 			md_write_end(mddev);
5344 
5345 		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
5346 					 bi, 0);
5347 		bio_endio(bi);
5348 	}
5349 }
5350 
5351 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
5352 
5353 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
5354 {
5355 	/* reshaping is quite different to recovery/resync so it is
5356 	 * handled quite separately ... here.
5357 	 *
5358 	 * On each call to sync_request, we gather one chunk worth of
5359 	 * destination stripes and flag them as expanding.
5360 	 * Then we find all the source stripes and request reads.
5361 	 * As the reads complete, handle_stripe will copy the data
5362 	 * into the destination stripe and release that stripe.
5363 	 */
5364 	struct r5conf *conf = mddev->private;
5365 	struct stripe_head *sh;
5366 	sector_t first_sector, last_sector;
5367 	int raid_disks = conf->previous_raid_disks;
5368 	int data_disks = raid_disks - conf->max_degraded;
5369 	int new_data_disks = conf->raid_disks - conf->max_degraded;
5370 	int i;
5371 	int dd_idx;
5372 	sector_t writepos, readpos, safepos;
5373 	sector_t stripe_addr;
5374 	int reshape_sectors;
5375 	struct list_head stripes;
5376 	sector_t retn;
5377 
5378 	if (sector_nr == 0) {
5379 		/* If restarting in the middle, skip the initial sectors */
5380 		if (mddev->reshape_backwards &&
5381 		    conf->reshape_progress < raid5_size(mddev, 0, 0)) {
5382 			sector_nr = raid5_size(mddev, 0, 0)
5383 				- conf->reshape_progress;
5384 		} else if (mddev->reshape_backwards &&
5385 			   conf->reshape_progress == MaxSector) {
5386 			/* shouldn't happen, but just in case, finish up.*/
5387 			sector_nr = MaxSector;
5388 		} else if (!mddev->reshape_backwards &&
5389 			   conf->reshape_progress > 0)
5390 			sector_nr = conf->reshape_progress;
5391 		sector_div(sector_nr, new_data_disks);
5392 		if (sector_nr) {
5393 			mddev->curr_resync_completed = sector_nr;
5394 			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5395 			*skipped = 1;
5396 			retn = sector_nr;
5397 			goto finish;
5398 		}
5399 	}
5400 
5401 	/* We need to process a full chunk at a time.
5402 	 * If old and new chunk sizes differ, we need to process the
5403 	 * largest of these
5404 	 */
5405 
5406 	reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors);
5407 
5408 	/* We update the metadata at least every 10 seconds, or when
5409 	 * the data about to be copied would over-write the source of
5410 	 * the data at the front of the range.  i.e. one new_stripe
5411 	 * along from reshape_progress new_maps to after where
5412 	 * reshape_safe old_maps to
5413 	 */
5414 	writepos = conf->reshape_progress;
5415 	sector_div(writepos, new_data_disks);
5416 	readpos = conf->reshape_progress;
5417 	sector_div(readpos, data_disks);
5418 	safepos = conf->reshape_safe;
5419 	sector_div(safepos, data_disks);
5420 	if (mddev->reshape_backwards) {
5421 		BUG_ON(writepos < reshape_sectors);
5422 		writepos -= reshape_sectors;
5423 		readpos += reshape_sectors;
5424 		safepos += reshape_sectors;
5425 	} else {
5426 		writepos += reshape_sectors;
5427 		/* readpos and safepos are worst-case calculations.
5428 		 * A negative number is overly pessimistic, and causes
5429 		 * obvious problems for unsigned storage.  So clip to 0.
5430 		 */
5431 		readpos -= min_t(sector_t, reshape_sectors, readpos);
5432 		safepos -= min_t(sector_t, reshape_sectors, safepos);
5433 	}
5434 
5435 	/* Having calculated the 'writepos' possibly use it
5436 	 * to set 'stripe_addr' which is where we will write to.
5437 	 */
5438 	if (mddev->reshape_backwards) {
5439 		BUG_ON(conf->reshape_progress == 0);
5440 		stripe_addr = writepos;
5441 		BUG_ON((mddev->dev_sectors &
5442 			~((sector_t)reshape_sectors - 1))
5443 		       - reshape_sectors - stripe_addr
5444 		       != sector_nr);
5445 	} else {
5446 		BUG_ON(writepos != sector_nr + reshape_sectors);
5447 		stripe_addr = sector_nr;
5448 	}
5449 
5450 	/* 'writepos' is the most advanced device address we might write.
5451 	 * 'readpos' is the least advanced device address we might read.
5452 	 * 'safepos' is the least address recorded in the metadata as having
5453 	 *     been reshaped.
5454 	 * If there is a min_offset_diff, these are adjusted either by
5455 	 * increasing the safepos/readpos if diff is negative, or
5456 	 * increasing writepos if diff is positive.
5457 	 * If 'readpos' is then behind 'writepos', there is no way that we can
5458 	 * ensure safety in the face of a crash - that must be done by userspace
5459 	 * making a backup of the data.  So in that case there is no particular
5460 	 * rush to update metadata.
5461 	 * Otherwise if 'safepos' is behind 'writepos', then we really need to
5462 	 * update the metadata to advance 'safepos' to match 'readpos' so that
5463 	 * we can be safe in the event of a crash.
5464 	 * So we insist on updating metadata if safepos is behind writepos and
5465 	 * readpos is beyond writepos.
5466 	 * In any case, update the metadata every 10 seconds.
5467 	 * Maybe that number should be configurable, but I'm not sure it is
5468 	 * worth it.... maybe it could be a multiple of safemode_delay???
5469 	 */
5470 	if (conf->min_offset_diff < 0) {
5471 		safepos += -conf->min_offset_diff;
5472 		readpos += -conf->min_offset_diff;
5473 	} else
5474 		writepos += conf->min_offset_diff;
5475 
5476 	if ((mddev->reshape_backwards
5477 	     ? (safepos > writepos && readpos < writepos)
5478 	     : (safepos < writepos && readpos > writepos)) ||
5479 	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
5480 		/* Cannot proceed until we've updated the superblock... */
5481 		wait_event(conf->wait_for_overlap,
5482 			   atomic_read(&conf->reshape_stripes)==0
5483 			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5484 		if (atomic_read(&conf->reshape_stripes) != 0)
5485 			return 0;
5486 		mddev->reshape_position = conf->reshape_progress;
5487 		mddev->curr_resync_completed = sector_nr;
5488 		conf->reshape_checkpoint = jiffies;
5489 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
5490 		md_wakeup_thread(mddev->thread);
5491 		wait_event(mddev->sb_wait, mddev->flags == 0 ||
5492 			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5493 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5494 			return 0;
5495 		spin_lock_irq(&conf->device_lock);
5496 		conf->reshape_safe = mddev->reshape_position;
5497 		spin_unlock_irq(&conf->device_lock);
5498 		wake_up(&conf->wait_for_overlap);
5499 		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5500 	}
5501 
5502 	INIT_LIST_HEAD(&stripes);
5503 	for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
5504 		int j;
5505 		int skipped_disk = 0;
5506 		sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
5507 		set_bit(STRIPE_EXPANDING, &sh->state);
5508 		atomic_inc(&conf->reshape_stripes);
5509 		/* If any of this stripe is beyond the end of the old
5510 		 * array, then we need to zero those blocks
5511 		 */
5512 		for (j=sh->disks; j--;) {
5513 			sector_t s;
5514 			if (j == sh->pd_idx)
5515 				continue;
5516 			if (conf->level == 6 &&
5517 			    j == sh->qd_idx)
5518 				continue;
5519 			s = raid5_compute_blocknr(sh, j, 0);
5520 			if (s < raid5_size(mddev, 0, 0)) {
5521 				skipped_disk = 1;
5522 				continue;
5523 			}
5524 			memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
5525 			set_bit(R5_Expanded, &sh->dev[j].flags);
5526 			set_bit(R5_UPTODATE, &sh->dev[j].flags);
5527 		}
5528 		if (!skipped_disk) {
5529 			set_bit(STRIPE_EXPAND_READY, &sh->state);
5530 			set_bit(STRIPE_HANDLE, &sh->state);
5531 		}
5532 		list_add(&sh->lru, &stripes);
5533 	}
5534 	spin_lock_irq(&conf->device_lock);
5535 	if (mddev->reshape_backwards)
5536 		conf->reshape_progress -= reshape_sectors * new_data_disks;
5537 	else
5538 		conf->reshape_progress += reshape_sectors * new_data_disks;
5539 	spin_unlock_irq(&conf->device_lock);
5540 	/* Ok, those stripe are ready. We can start scheduling
5541 	 * reads on the source stripes.
5542 	 * The source stripes are determined by mapping the first and last
5543 	 * block on the destination stripes.
5544 	 */
5545 	first_sector =
5546 		raid5_compute_sector(conf, stripe_addr*(new_data_disks),
5547 				     1, &dd_idx, NULL);
5548 	last_sector =
5549 		raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
5550 					    * new_data_disks - 1),
5551 				     1, &dd_idx, NULL);
5552 	if (last_sector >= mddev->dev_sectors)
5553 		last_sector = mddev->dev_sectors - 1;
5554 	while (first_sector <= last_sector) {
5555 		sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1);
5556 		set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
5557 		set_bit(STRIPE_HANDLE, &sh->state);
5558 		raid5_release_stripe(sh);
5559 		first_sector += STRIPE_SECTORS;
5560 	}
5561 	/* Now that the sources are clearly marked, we can release
5562 	 * the destination stripes
5563 	 */
5564 	while (!list_empty(&stripes)) {
5565 		sh = list_entry(stripes.next, struct stripe_head, lru);
5566 		list_del_init(&sh->lru);
5567 		raid5_release_stripe(sh);
5568 	}
5569 	/* If this takes us to the resync_max point where we have to pause,
5570 	 * then we need to write out the superblock.
5571 	 */
5572 	sector_nr += reshape_sectors;
5573 	retn = reshape_sectors;
5574 finish:
5575 	if (mddev->curr_resync_completed > mddev->resync_max ||
5576 	    (sector_nr - mddev->curr_resync_completed) * 2
5577 	    >= mddev->resync_max - mddev->curr_resync_completed) {
5578 		/* Cannot proceed until we've updated the superblock... */
5579 		wait_event(conf->wait_for_overlap,
5580 			   atomic_read(&conf->reshape_stripes) == 0
5581 			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5582 		if (atomic_read(&conf->reshape_stripes) != 0)
5583 			goto ret;
5584 		mddev->reshape_position = conf->reshape_progress;
5585 		mddev->curr_resync_completed = sector_nr;
5586 		conf->reshape_checkpoint = jiffies;
5587 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
5588 		md_wakeup_thread(mddev->thread);
5589 		wait_event(mddev->sb_wait,
5590 			   !test_bit(MD_CHANGE_DEVS, &mddev->flags)
5591 			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5592 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5593 			goto ret;
5594 		spin_lock_irq(&conf->device_lock);
5595 		conf->reshape_safe = mddev->reshape_position;
5596 		spin_unlock_irq(&conf->device_lock);
5597 		wake_up(&conf->wait_for_overlap);
5598 		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5599 	}
5600 ret:
5601 	return retn;
5602 }
5603 
5604 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
5605 					  int *skipped)
5606 {
5607 	struct r5conf *conf = mddev->private;
5608 	struct stripe_head *sh;
5609 	sector_t max_sector = mddev->dev_sectors;
5610 	sector_t sync_blocks;
5611 	int still_degraded = 0;
5612 	int i;
5613 
5614 	if (sector_nr >= max_sector) {
5615 		/* just being told to finish up .. nothing much to do */
5616 
5617 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
5618 			end_reshape(conf);
5619 			return 0;
5620 		}
5621 
5622 		if (mddev->curr_resync < max_sector) /* aborted */
5623 			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
5624 					&sync_blocks, 1);
5625 		else /* completed sync */
5626 			conf->fullsync = 0;
5627 		bitmap_close_sync(mddev->bitmap);
5628 
5629 		return 0;
5630 	}
5631 
5632 	/* Allow raid5_quiesce to complete */
5633 	wait_event(conf->wait_for_overlap, conf->quiesce != 2);
5634 
5635 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5636 		return reshape_request(mddev, sector_nr, skipped);
5637 
5638 	/* No need to check resync_max as we never do more than one
5639 	 * stripe, and as resync_max will always be on a chunk boundary,
5640 	 * if the check in md_do_sync didn't fire, there is no chance
5641 	 * of overstepping resync_max here
5642 	 */
5643 
5644 	/* if there is too many failed drives and we are trying
5645 	 * to resync, then assert that we are finished, because there is
5646 	 * nothing we can do.
5647 	 */
5648 	if (mddev->degraded >= conf->max_degraded &&
5649 	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5650 		sector_t rv = mddev->dev_sectors - sector_nr;
5651 		*skipped = 1;
5652 		return rv;
5653 	}
5654 	if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
5655 	    !conf->fullsync &&
5656 	    !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
5657 	    sync_blocks >= STRIPE_SECTORS) {
5658 		/* we can skip this block, and probably more */
5659 		sync_blocks /= STRIPE_SECTORS;
5660 		*skipped = 1;
5661 		return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
5662 	}
5663 
5664 	bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
5665 
5666 	sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
5667 	if (sh == NULL) {
5668 		sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
5669 		/* make sure we don't swamp the stripe cache if someone else
5670 		 * is trying to get access
5671 		 */
5672 		schedule_timeout_uninterruptible(1);
5673 	}
5674 	/* Need to check if array will still be degraded after recovery/resync
5675 	 * Note in case of > 1 drive failures it's possible we're rebuilding
5676 	 * one drive while leaving another faulty drive in array.
5677 	 */
5678 	rcu_read_lock();
5679 	for (i = 0; i < conf->raid_disks; i++) {
5680 		struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
5681 
5682 		if (rdev == NULL || test_bit(Faulty, &rdev->flags))
5683 			still_degraded = 1;
5684 	}
5685 	rcu_read_unlock();
5686 
5687 	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
5688 
5689 	set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
5690 	set_bit(STRIPE_HANDLE, &sh->state);
5691 
5692 	raid5_release_stripe(sh);
5693 
5694 	return STRIPE_SECTORS;
5695 }
5696 
5697 static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5698 {
5699 	/* We may not be able to submit a whole bio at once as there
5700 	 * may not be enough stripe_heads available.
5701 	 * We cannot pre-allocate enough stripe_heads as we may need
5702 	 * more than exist in the cache (if we allow ever large chunks).
5703 	 * So we do one stripe head at a time and record in
5704 	 * ->bi_hw_segments how many have been done.
5705 	 *
5706 	 * We *know* that this entire raid_bio is in one chunk, so
5707 	 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
5708 	 */
5709 	struct stripe_head *sh;
5710 	int dd_idx;
5711 	sector_t sector, logical_sector, last_sector;
5712 	int scnt = 0;
5713 	int remaining;
5714 	int handled = 0;
5715 
5716 	logical_sector = raid_bio->bi_iter.bi_sector &
5717 		~((sector_t)STRIPE_SECTORS-1);
5718 	sector = raid5_compute_sector(conf, logical_sector,
5719 				      0, &dd_idx, NULL);
5720 	last_sector = bio_end_sector(raid_bio);
5721 
5722 	for (; logical_sector < last_sector;
5723 	     logical_sector += STRIPE_SECTORS,
5724 		     sector += STRIPE_SECTORS,
5725 		     scnt++) {
5726 
5727 		if (scnt < raid5_bi_processed_stripes(raid_bio))
5728 			/* already done this stripe */
5729 			continue;
5730 
5731 		sh = raid5_get_active_stripe(conf, sector, 0, 1, 1);
5732 
5733 		if (!sh) {
5734 			/* failed to get a stripe - must wait */
5735 			raid5_set_bi_processed_stripes(raid_bio, scnt);
5736 			conf->retry_read_aligned = raid_bio;
5737 			return handled;
5738 		}
5739 
5740 		if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
5741 			raid5_release_stripe(sh);
5742 			raid5_set_bi_processed_stripes(raid_bio, scnt);
5743 			conf->retry_read_aligned = raid_bio;
5744 			return handled;
5745 		}
5746 
5747 		set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
5748 		handle_stripe(sh);
5749 		raid5_release_stripe(sh);
5750 		handled++;
5751 	}
5752 	remaining = raid5_dec_bi_active_stripes(raid_bio);
5753 	if (remaining == 0) {
5754 		trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
5755 					 raid_bio, 0);
5756 		bio_endio(raid_bio);
5757 	}
5758 	if (atomic_dec_and_test(&conf->active_aligned_reads))
5759 		wake_up(&conf->wait_for_quiescent);
5760 	return handled;
5761 }
5762 
5763 static int handle_active_stripes(struct r5conf *conf, int group,
5764 				 struct r5worker *worker,
5765 				 struct list_head *temp_inactive_list)
5766 {
5767 	struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
5768 	int i, batch_size = 0, hash;
5769 	bool release_inactive = false;
5770 
5771 	while (batch_size < MAX_STRIPE_BATCH &&
5772 			(sh = __get_priority_stripe(conf, group)) != NULL)
5773 		batch[batch_size++] = sh;
5774 
5775 	if (batch_size == 0) {
5776 		for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5777 			if (!list_empty(temp_inactive_list + i))
5778 				break;
5779 		if (i == NR_STRIPE_HASH_LOCKS) {
5780 			spin_unlock_irq(&conf->device_lock);
5781 			r5l_flush_stripe_to_raid(conf->log);
5782 			spin_lock_irq(&conf->device_lock);
5783 			return batch_size;
5784 		}
5785 		release_inactive = true;
5786 	}
5787 	spin_unlock_irq(&conf->device_lock);
5788 
5789 	release_inactive_stripe_list(conf, temp_inactive_list,
5790 				     NR_STRIPE_HASH_LOCKS);
5791 
5792 	r5l_flush_stripe_to_raid(conf->log);
5793 	if (release_inactive) {
5794 		spin_lock_irq(&conf->device_lock);
5795 		return 0;
5796 	}
5797 
5798 	for (i = 0; i < batch_size; i++)
5799 		handle_stripe(batch[i]);
5800 	r5l_write_stripe_run(conf->log);
5801 
5802 	cond_resched();
5803 
5804 	spin_lock_irq(&conf->device_lock);
5805 	for (i = 0; i < batch_size; i++) {
5806 		hash = batch[i]->hash_lock_index;
5807 		__release_stripe(conf, batch[i], &temp_inactive_list[hash]);
5808 	}
5809 	return batch_size;
5810 }
5811 
5812 static void raid5_do_work(struct work_struct *work)
5813 {
5814 	struct r5worker *worker = container_of(work, struct r5worker, work);
5815 	struct r5worker_group *group = worker->group;
5816 	struct r5conf *conf = group->conf;
5817 	int group_id = group - conf->worker_groups;
5818 	int handled;
5819 	struct blk_plug plug;
5820 
5821 	pr_debug("+++ raid5worker active\n");
5822 
5823 	blk_start_plug(&plug);
5824 	handled = 0;
5825 	spin_lock_irq(&conf->device_lock);
5826 	while (1) {
5827 		int batch_size, released;
5828 
5829 		released = release_stripe_list(conf, worker->temp_inactive_list);
5830 
5831 		batch_size = handle_active_stripes(conf, group_id, worker,
5832 						   worker->temp_inactive_list);
5833 		worker->working = false;
5834 		if (!batch_size && !released)
5835 			break;
5836 		handled += batch_size;
5837 	}
5838 	pr_debug("%d stripes handled\n", handled);
5839 
5840 	spin_unlock_irq(&conf->device_lock);
5841 	blk_finish_plug(&plug);
5842 
5843 	pr_debug("--- raid5worker inactive\n");
5844 }
5845 
5846 /*
5847  * This is our raid5 kernel thread.
5848  *
5849  * We scan the hash table for stripes which can be handled now.
5850  * During the scan, completed stripes are saved for us by the interrupt
5851  * handler, so that they will not have to wait for our next wakeup.
5852  */
5853 static void raid5d(struct md_thread *thread)
5854 {
5855 	struct mddev *mddev = thread->mddev;
5856 	struct r5conf *conf = mddev->private;
5857 	int handled;
5858 	struct blk_plug plug;
5859 
5860 	pr_debug("+++ raid5d active\n");
5861 
5862 	md_check_recovery(mddev);
5863 
5864 	if (!bio_list_empty(&conf->return_bi) &&
5865 	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
5866 		struct bio_list tmp = BIO_EMPTY_LIST;
5867 		spin_lock_irq(&conf->device_lock);
5868 		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
5869 			bio_list_merge(&tmp, &conf->return_bi);
5870 			bio_list_init(&conf->return_bi);
5871 		}
5872 		spin_unlock_irq(&conf->device_lock);
5873 		return_io(&tmp);
5874 	}
5875 
5876 	blk_start_plug(&plug);
5877 	handled = 0;
5878 	spin_lock_irq(&conf->device_lock);
5879 	while (1) {
5880 		struct bio *bio;
5881 		int batch_size, released;
5882 
5883 		released = release_stripe_list(conf, conf->temp_inactive_list);
5884 		if (released)
5885 			clear_bit(R5_DID_ALLOC, &conf->cache_state);
5886 
5887 		if (
5888 		    !list_empty(&conf->bitmap_list)) {
5889 			/* Now is a good time to flush some bitmap updates */
5890 			conf->seq_flush++;
5891 			spin_unlock_irq(&conf->device_lock);
5892 			bitmap_unplug(mddev->bitmap);
5893 			spin_lock_irq(&conf->device_lock);
5894 			conf->seq_write = conf->seq_flush;
5895 			activate_bit_delay(conf, conf->temp_inactive_list);
5896 		}
5897 		raid5_activate_delayed(conf);
5898 
5899 		while ((bio = remove_bio_from_retry(conf))) {
5900 			int ok;
5901 			spin_unlock_irq(&conf->device_lock);
5902 			ok = retry_aligned_read(conf, bio);
5903 			spin_lock_irq(&conf->device_lock);
5904 			if (!ok)
5905 				break;
5906 			handled++;
5907 		}
5908 
5909 		batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
5910 						   conf->temp_inactive_list);
5911 		if (!batch_size && !released)
5912 			break;
5913 		handled += batch_size;
5914 
5915 		if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
5916 			spin_unlock_irq(&conf->device_lock);
5917 			md_check_recovery(mddev);
5918 			spin_lock_irq(&conf->device_lock);
5919 		}
5920 	}
5921 	pr_debug("%d stripes handled\n", handled);
5922 
5923 	spin_unlock_irq(&conf->device_lock);
5924 	if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
5925 	    mutex_trylock(&conf->cache_size_mutex)) {
5926 		grow_one_stripe(conf, __GFP_NOWARN);
5927 		/* Set flag even if allocation failed.  This helps
5928 		 * slow down allocation requests when mem is short
5929 		 */
5930 		set_bit(R5_DID_ALLOC, &conf->cache_state);
5931 		mutex_unlock(&conf->cache_size_mutex);
5932 	}
5933 
5934 	r5l_flush_stripe_to_raid(conf->log);
5935 
5936 	async_tx_issue_pending_all();
5937 	blk_finish_plug(&plug);
5938 
5939 	pr_debug("--- raid5d inactive\n");
5940 }
5941 
5942 static ssize_t
5943 raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
5944 {
5945 	struct r5conf *conf;
5946 	int ret = 0;
5947 	spin_lock(&mddev->lock);
5948 	conf = mddev->private;
5949 	if (conf)
5950 		ret = sprintf(page, "%d\n", conf->min_nr_stripes);
5951 	spin_unlock(&mddev->lock);
5952 	return ret;
5953 }
5954 
5955 int
5956 raid5_set_cache_size(struct mddev *mddev, int size)
5957 {
5958 	struct r5conf *conf = mddev->private;
5959 	int err;
5960 
5961 	if (size <= 16 || size > 32768)
5962 		return -EINVAL;
5963 
5964 	conf->min_nr_stripes = size;
5965 	mutex_lock(&conf->cache_size_mutex);
5966 	while (size < conf->max_nr_stripes &&
5967 	       drop_one_stripe(conf))
5968 		;
5969 	mutex_unlock(&conf->cache_size_mutex);
5970 
5971 
5972 	err = md_allow_write(mddev);
5973 	if (err)
5974 		return err;
5975 
5976 	mutex_lock(&conf->cache_size_mutex);
5977 	while (size > conf->max_nr_stripes)
5978 		if (!grow_one_stripe(conf, GFP_KERNEL))
5979 			break;
5980 	mutex_unlock(&conf->cache_size_mutex);
5981 
5982 	return 0;
5983 }
5984 EXPORT_SYMBOL(raid5_set_cache_size);
5985 
5986 static ssize_t
5987 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
5988 {
5989 	struct r5conf *conf;
5990 	unsigned long new;
5991 	int err;
5992 
5993 	if (len >= PAGE_SIZE)
5994 		return -EINVAL;
5995 	if (kstrtoul(page, 10, &new))
5996 		return -EINVAL;
5997 	err = mddev_lock(mddev);
5998 	if (err)
5999 		return err;
6000 	conf = mddev->private;
6001 	if (!conf)
6002 		err = -ENODEV;
6003 	else
6004 		err = raid5_set_cache_size(mddev, new);
6005 	mddev_unlock(mddev);
6006 
6007 	return err ?: len;
6008 }
6009 
6010 static struct md_sysfs_entry
6011 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
6012 				raid5_show_stripe_cache_size,
6013 				raid5_store_stripe_cache_size);
6014 
6015 static ssize_t
6016 raid5_show_rmw_level(struct mddev  *mddev, char *page)
6017 {
6018 	struct r5conf *conf = mddev->private;
6019 	if (conf)
6020 		return sprintf(page, "%d\n", conf->rmw_level);
6021 	else
6022 		return 0;
6023 }
6024 
6025 static ssize_t
6026 raid5_store_rmw_level(struct mddev  *mddev, const char *page, size_t len)
6027 {
6028 	struct r5conf *conf = mddev->private;
6029 	unsigned long new;
6030 
6031 	if (!conf)
6032 		return -ENODEV;
6033 
6034 	if (len >= PAGE_SIZE)
6035 		return -EINVAL;
6036 
6037 	if (kstrtoul(page, 10, &new))
6038 		return -EINVAL;
6039 
6040 	if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
6041 		return -EINVAL;
6042 
6043 	if (new != PARITY_DISABLE_RMW &&
6044 	    new != PARITY_ENABLE_RMW &&
6045 	    new != PARITY_PREFER_RMW)
6046 		return -EINVAL;
6047 
6048 	conf->rmw_level = new;
6049 	return len;
6050 }
6051 
6052 static struct md_sysfs_entry
6053 raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
6054 			 raid5_show_rmw_level,
6055 			 raid5_store_rmw_level);
6056 
6057 
6058 static ssize_t
6059 raid5_show_preread_threshold(struct mddev *mddev, char *page)
6060 {
6061 	struct r5conf *conf;
6062 	int ret = 0;
6063 	spin_lock(&mddev->lock);
6064 	conf = mddev->private;
6065 	if (conf)
6066 		ret = sprintf(page, "%d\n", conf->bypass_threshold);
6067 	spin_unlock(&mddev->lock);
6068 	return ret;
6069 }
6070 
6071 static ssize_t
6072 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
6073 {
6074 	struct r5conf *conf;
6075 	unsigned long new;
6076 	int err;
6077 
6078 	if (len >= PAGE_SIZE)
6079 		return -EINVAL;
6080 	if (kstrtoul(page, 10, &new))
6081 		return -EINVAL;
6082 
6083 	err = mddev_lock(mddev);
6084 	if (err)
6085 		return err;
6086 	conf = mddev->private;
6087 	if (!conf)
6088 		err = -ENODEV;
6089 	else if (new > conf->min_nr_stripes)
6090 		err = -EINVAL;
6091 	else
6092 		conf->bypass_threshold = new;
6093 	mddev_unlock(mddev);
6094 	return err ?: len;
6095 }
6096 
6097 static struct md_sysfs_entry
6098 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
6099 					S_IRUGO | S_IWUSR,
6100 					raid5_show_preread_threshold,
6101 					raid5_store_preread_threshold);
6102 
6103 static ssize_t
6104 raid5_show_skip_copy(struct mddev *mddev, char *page)
6105 {
6106 	struct r5conf *conf;
6107 	int ret = 0;
6108 	spin_lock(&mddev->lock);
6109 	conf = mddev->private;
6110 	if (conf)
6111 		ret = sprintf(page, "%d\n", conf->skip_copy);
6112 	spin_unlock(&mddev->lock);
6113 	return ret;
6114 }
6115 
6116 static ssize_t
6117 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
6118 {
6119 	struct r5conf *conf;
6120 	unsigned long new;
6121 	int err;
6122 
6123 	if (len >= PAGE_SIZE)
6124 		return -EINVAL;
6125 	if (kstrtoul(page, 10, &new))
6126 		return -EINVAL;
6127 	new = !!new;
6128 
6129 	err = mddev_lock(mddev);
6130 	if (err)
6131 		return err;
6132 	conf = mddev->private;
6133 	if (!conf)
6134 		err = -ENODEV;
6135 	else if (new != conf->skip_copy) {
6136 		mddev_suspend(mddev);
6137 		conf->skip_copy = new;
6138 		if (new)
6139 			mddev->queue->backing_dev_info.capabilities |=
6140 				BDI_CAP_STABLE_WRITES;
6141 		else
6142 			mddev->queue->backing_dev_info.capabilities &=
6143 				~BDI_CAP_STABLE_WRITES;
6144 		mddev_resume(mddev);
6145 	}
6146 	mddev_unlock(mddev);
6147 	return err ?: len;
6148 }
6149 
6150 static struct md_sysfs_entry
6151 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
6152 					raid5_show_skip_copy,
6153 					raid5_store_skip_copy);
6154 
6155 static ssize_t
6156 stripe_cache_active_show(struct mddev *mddev, char *page)
6157 {
6158 	struct r5conf *conf = mddev->private;
6159 	if (conf)
6160 		return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
6161 	else
6162 		return 0;
6163 }
6164 
6165 static struct md_sysfs_entry
6166 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
6167 
6168 static ssize_t
6169 raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
6170 {
6171 	struct r5conf *conf;
6172 	int ret = 0;
6173 	spin_lock(&mddev->lock);
6174 	conf = mddev->private;
6175 	if (conf)
6176 		ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
6177 	spin_unlock(&mddev->lock);
6178 	return ret;
6179 }
6180 
6181 static int alloc_thread_groups(struct r5conf *conf, int cnt,
6182 			       int *group_cnt,
6183 			       int *worker_cnt_per_group,
6184 			       struct r5worker_group **worker_groups);
6185 static ssize_t
6186 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
6187 {
6188 	struct r5conf *conf;
6189 	unsigned long new;
6190 	int err;
6191 	struct r5worker_group *new_groups, *old_groups;
6192 	int group_cnt, worker_cnt_per_group;
6193 
6194 	if (len >= PAGE_SIZE)
6195 		return -EINVAL;
6196 	if (kstrtoul(page, 10, &new))
6197 		return -EINVAL;
6198 
6199 	err = mddev_lock(mddev);
6200 	if (err)
6201 		return err;
6202 	conf = mddev->private;
6203 	if (!conf)
6204 		err = -ENODEV;
6205 	else if (new != conf->worker_cnt_per_group) {
6206 		mddev_suspend(mddev);
6207 
6208 		old_groups = conf->worker_groups;
6209 		if (old_groups)
6210 			flush_workqueue(raid5_wq);
6211 
6212 		err = alloc_thread_groups(conf, new,
6213 					  &group_cnt, &worker_cnt_per_group,
6214 					  &new_groups);
6215 		if (!err) {
6216 			spin_lock_irq(&conf->device_lock);
6217 			conf->group_cnt = group_cnt;
6218 			conf->worker_cnt_per_group = worker_cnt_per_group;
6219 			conf->worker_groups = new_groups;
6220 			spin_unlock_irq(&conf->device_lock);
6221 
6222 			if (old_groups)
6223 				kfree(old_groups[0].workers);
6224 			kfree(old_groups);
6225 		}
6226 		mddev_resume(mddev);
6227 	}
6228 	mddev_unlock(mddev);
6229 
6230 	return err ?: len;
6231 }
6232 
6233 static struct md_sysfs_entry
6234 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
6235 				raid5_show_group_thread_cnt,
6236 				raid5_store_group_thread_cnt);
6237 
6238 static struct attribute *raid5_attrs[] =  {
6239 	&raid5_stripecache_size.attr,
6240 	&raid5_stripecache_active.attr,
6241 	&raid5_preread_bypass_threshold.attr,
6242 	&raid5_group_thread_cnt.attr,
6243 	&raid5_skip_copy.attr,
6244 	&raid5_rmw_level.attr,
6245 	NULL,
6246 };
6247 static struct attribute_group raid5_attrs_group = {
6248 	.name = NULL,
6249 	.attrs = raid5_attrs,
6250 };
6251 
6252 static int alloc_thread_groups(struct r5conf *conf, int cnt,
6253 			       int *group_cnt,
6254 			       int *worker_cnt_per_group,
6255 			       struct r5worker_group **worker_groups)
6256 {
6257 	int i, j, k;
6258 	ssize_t size;
6259 	struct r5worker *workers;
6260 
6261 	*worker_cnt_per_group = cnt;
6262 	if (cnt == 0) {
6263 		*group_cnt = 0;
6264 		*worker_groups = NULL;
6265 		return 0;
6266 	}
6267 	*group_cnt = num_possible_nodes();
6268 	size = sizeof(struct r5worker) * cnt;
6269 	workers = kzalloc(size * *group_cnt, GFP_NOIO);
6270 	*worker_groups = kzalloc(sizeof(struct r5worker_group) *
6271 				*group_cnt, GFP_NOIO);
6272 	if (!*worker_groups || !workers) {
6273 		kfree(workers);
6274 		kfree(*worker_groups);
6275 		return -ENOMEM;
6276 	}
6277 
6278 	for (i = 0; i < *group_cnt; i++) {
6279 		struct r5worker_group *group;
6280 
6281 		group = &(*worker_groups)[i];
6282 		INIT_LIST_HEAD(&group->handle_list);
6283 		group->conf = conf;
6284 		group->workers = workers + i * cnt;
6285 
6286 		for (j = 0; j < cnt; j++) {
6287 			struct r5worker *worker = group->workers + j;
6288 			worker->group = group;
6289 			INIT_WORK(&worker->work, raid5_do_work);
6290 
6291 			for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
6292 				INIT_LIST_HEAD(worker->temp_inactive_list + k);
6293 		}
6294 	}
6295 
6296 	return 0;
6297 }
6298 
6299 static void free_thread_groups(struct r5conf *conf)
6300 {
6301 	if (conf->worker_groups)
6302 		kfree(conf->worker_groups[0].workers);
6303 	kfree(conf->worker_groups);
6304 	conf->worker_groups = NULL;
6305 }
6306 
6307 static sector_t
6308 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
6309 {
6310 	struct r5conf *conf = mddev->private;
6311 
6312 	if (!sectors)
6313 		sectors = mddev->dev_sectors;
6314 	if (!raid_disks)
6315 		/* size is defined by the smallest of previous and new size */
6316 		raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
6317 
6318 	sectors &= ~((sector_t)conf->chunk_sectors - 1);
6319 	sectors &= ~((sector_t)conf->prev_chunk_sectors - 1);
6320 	return sectors * (raid_disks - conf->max_degraded);
6321 }
6322 
6323 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
6324 {
6325 	safe_put_page(percpu->spare_page);
6326 	if (percpu->scribble)
6327 		flex_array_free(percpu->scribble);
6328 	percpu->spare_page = NULL;
6329 	percpu->scribble = NULL;
6330 }
6331 
6332 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
6333 {
6334 	if (conf->level == 6 && !percpu->spare_page)
6335 		percpu->spare_page = alloc_page(GFP_KERNEL);
6336 	if (!percpu->scribble)
6337 		percpu->scribble = scribble_alloc(max(conf->raid_disks,
6338 						      conf->previous_raid_disks),
6339 						  max(conf->chunk_sectors,
6340 						      conf->prev_chunk_sectors)
6341 						   / STRIPE_SECTORS,
6342 						  GFP_KERNEL);
6343 
6344 	if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
6345 		free_scratch_buffer(conf, percpu);
6346 		return -ENOMEM;
6347 	}
6348 
6349 	return 0;
6350 }
6351 
6352 static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
6353 {
6354 	struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
6355 
6356 	free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6357 	return 0;
6358 }
6359 
6360 static void raid5_free_percpu(struct r5conf *conf)
6361 {
6362 	if (!conf->percpu)
6363 		return;
6364 
6365 	cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
6366 	free_percpu(conf->percpu);
6367 }
6368 
6369 static void free_conf(struct r5conf *conf)
6370 {
6371 	if (conf->log)
6372 		r5l_exit_log(conf->log);
6373 	if (conf->shrinker.nr_deferred)
6374 		unregister_shrinker(&conf->shrinker);
6375 
6376 	free_thread_groups(conf);
6377 	shrink_stripes(conf);
6378 	raid5_free_percpu(conf);
6379 	kfree(conf->disks);
6380 	kfree(conf->stripe_hashtbl);
6381 	kfree(conf);
6382 }
6383 
6384 static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
6385 {
6386 	struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
6387 	struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
6388 
6389 	if (alloc_scratch_buffer(conf, percpu)) {
6390 		pr_err("%s: failed memory allocation for cpu%u\n",
6391 		       __func__, cpu);
6392 		return -ENOMEM;
6393 	}
6394 	return 0;
6395 }
6396 
6397 static int raid5_alloc_percpu(struct r5conf *conf)
6398 {
6399 	int err = 0;
6400 
6401 	conf->percpu = alloc_percpu(struct raid5_percpu);
6402 	if (!conf->percpu)
6403 		return -ENOMEM;
6404 
6405 	err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
6406 	if (!err) {
6407 		conf->scribble_disks = max(conf->raid_disks,
6408 			conf->previous_raid_disks);
6409 		conf->scribble_sectors = max(conf->chunk_sectors,
6410 			conf->prev_chunk_sectors);
6411 	}
6412 	return err;
6413 }
6414 
6415 static unsigned long raid5_cache_scan(struct shrinker *shrink,
6416 				      struct shrink_control *sc)
6417 {
6418 	struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6419 	unsigned long ret = SHRINK_STOP;
6420 
6421 	if (mutex_trylock(&conf->cache_size_mutex)) {
6422 		ret= 0;
6423 		while (ret < sc->nr_to_scan &&
6424 		       conf->max_nr_stripes > conf->min_nr_stripes) {
6425 			if (drop_one_stripe(conf) == 0) {
6426 				ret = SHRINK_STOP;
6427 				break;
6428 			}
6429 			ret++;
6430 		}
6431 		mutex_unlock(&conf->cache_size_mutex);
6432 	}
6433 	return ret;
6434 }
6435 
6436 static unsigned long raid5_cache_count(struct shrinker *shrink,
6437 				       struct shrink_control *sc)
6438 {
6439 	struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6440 
6441 	if (conf->max_nr_stripes < conf->min_nr_stripes)
6442 		/* unlikely, but not impossible */
6443 		return 0;
6444 	return conf->max_nr_stripes - conf->min_nr_stripes;
6445 }
6446 
6447 static struct r5conf *setup_conf(struct mddev *mddev)
6448 {
6449 	struct r5conf *conf;
6450 	int raid_disk, memory, max_disks;
6451 	struct md_rdev *rdev;
6452 	struct disk_info *disk;
6453 	char pers_name[6];
6454 	int i;
6455 	int group_cnt, worker_cnt_per_group;
6456 	struct r5worker_group *new_group;
6457 
6458 	if (mddev->new_level != 5
6459 	    && mddev->new_level != 4
6460 	    && mddev->new_level != 6) {
6461 		printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
6462 		       mdname(mddev), mddev->new_level);
6463 		return ERR_PTR(-EIO);
6464 	}
6465 	if ((mddev->new_level == 5
6466 	     && !algorithm_valid_raid5(mddev->new_layout)) ||
6467 	    (mddev->new_level == 6
6468 	     && !algorithm_valid_raid6(mddev->new_layout))) {
6469 		printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
6470 		       mdname(mddev), mddev->new_layout);
6471 		return ERR_PTR(-EIO);
6472 	}
6473 	if (mddev->new_level == 6 && mddev->raid_disks < 4) {
6474 		printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
6475 		       mdname(mddev), mddev->raid_disks);
6476 		return ERR_PTR(-EINVAL);
6477 	}
6478 
6479 	if (!mddev->new_chunk_sectors ||
6480 	    (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
6481 	    !is_power_of_2(mddev->new_chunk_sectors)) {
6482 		printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
6483 		       mdname(mddev), mddev->new_chunk_sectors << 9);
6484 		return ERR_PTR(-EINVAL);
6485 	}
6486 
6487 	conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
6488 	if (conf == NULL)
6489 		goto abort;
6490 	/* Don't enable multi-threading by default*/
6491 	if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
6492 				 &new_group)) {
6493 		conf->group_cnt = group_cnt;
6494 		conf->worker_cnt_per_group = worker_cnt_per_group;
6495 		conf->worker_groups = new_group;
6496 	} else
6497 		goto abort;
6498 	spin_lock_init(&conf->device_lock);
6499 	seqcount_init(&conf->gen_lock);
6500 	mutex_init(&conf->cache_size_mutex);
6501 	init_waitqueue_head(&conf->wait_for_quiescent);
6502 	init_waitqueue_head(&conf->wait_for_stripe);
6503 	init_waitqueue_head(&conf->wait_for_overlap);
6504 	INIT_LIST_HEAD(&conf->handle_list);
6505 	INIT_LIST_HEAD(&conf->hold_list);
6506 	INIT_LIST_HEAD(&conf->delayed_list);
6507 	INIT_LIST_HEAD(&conf->bitmap_list);
6508 	bio_list_init(&conf->return_bi);
6509 	init_llist_head(&conf->released_stripes);
6510 	atomic_set(&conf->active_stripes, 0);
6511 	atomic_set(&conf->preread_active_stripes, 0);
6512 	atomic_set(&conf->active_aligned_reads, 0);
6513 	conf->bypass_threshold = BYPASS_THRESHOLD;
6514 	conf->recovery_disabled = mddev->recovery_disabled - 1;
6515 
6516 	conf->raid_disks = mddev->raid_disks;
6517 	if (mddev->reshape_position == MaxSector)
6518 		conf->previous_raid_disks = mddev->raid_disks;
6519 	else
6520 		conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
6521 	max_disks = max(conf->raid_disks, conf->previous_raid_disks);
6522 
6523 	conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
6524 			      GFP_KERNEL);
6525 	if (!conf->disks)
6526 		goto abort;
6527 
6528 	conf->mddev = mddev;
6529 
6530 	if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
6531 		goto abort;
6532 
6533 	/* We init hash_locks[0] separately to that it can be used
6534 	 * as the reference lock in the spin_lock_nest_lock() call
6535 	 * in lock_all_device_hash_locks_irq in order to convince
6536 	 * lockdep that we know what we are doing.
6537 	 */
6538 	spin_lock_init(conf->hash_locks);
6539 	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
6540 		spin_lock_init(conf->hash_locks + i);
6541 
6542 	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
6543 		INIT_LIST_HEAD(conf->inactive_list + i);
6544 
6545 	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
6546 		INIT_LIST_HEAD(conf->temp_inactive_list + i);
6547 
6548 	conf->level = mddev->new_level;
6549 	conf->chunk_sectors = mddev->new_chunk_sectors;
6550 	if (raid5_alloc_percpu(conf) != 0)
6551 		goto abort;
6552 
6553 	pr_debug("raid456: run(%s) called.\n", mdname(mddev));
6554 
6555 	rdev_for_each(rdev, mddev) {
6556 		raid_disk = rdev->raid_disk;
6557 		if (raid_disk >= max_disks
6558 		    || raid_disk < 0 || test_bit(Journal, &rdev->flags))
6559 			continue;
6560 		disk = conf->disks + raid_disk;
6561 
6562 		if (test_bit(Replacement, &rdev->flags)) {
6563 			if (disk->replacement)
6564 				goto abort;
6565 			disk->replacement = rdev;
6566 		} else {
6567 			if (disk->rdev)
6568 				goto abort;
6569 			disk->rdev = rdev;
6570 		}
6571 
6572 		if (test_bit(In_sync, &rdev->flags)) {
6573 			char b[BDEVNAME_SIZE];
6574 			printk(KERN_INFO "md/raid:%s: device %s operational as raid"
6575 			       " disk %d\n",
6576 			       mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
6577 		} else if (rdev->saved_raid_disk != raid_disk)
6578 			/* Cannot rely on bitmap to complete recovery */
6579 			conf->fullsync = 1;
6580 	}
6581 
6582 	conf->level = mddev->new_level;
6583 	if (conf->level == 6) {
6584 		conf->max_degraded = 2;
6585 		if (raid6_call.xor_syndrome)
6586 			conf->rmw_level = PARITY_ENABLE_RMW;
6587 		else
6588 			conf->rmw_level = PARITY_DISABLE_RMW;
6589 	} else {
6590 		conf->max_degraded = 1;
6591 		conf->rmw_level = PARITY_ENABLE_RMW;
6592 	}
6593 	conf->algorithm = mddev->new_layout;
6594 	conf->reshape_progress = mddev->reshape_position;
6595 	if (conf->reshape_progress != MaxSector) {
6596 		conf->prev_chunk_sectors = mddev->chunk_sectors;
6597 		conf->prev_algo = mddev->layout;
6598 	} else {
6599 		conf->prev_chunk_sectors = conf->chunk_sectors;
6600 		conf->prev_algo = conf->algorithm;
6601 	}
6602 
6603 	conf->min_nr_stripes = NR_STRIPES;
6604 	if (mddev->reshape_position != MaxSector) {
6605 		int stripes = max_t(int,
6606 			((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4,
6607 			((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
6608 		conf->min_nr_stripes = max(NR_STRIPES, stripes);
6609 		if (conf->min_nr_stripes != NR_STRIPES)
6610 			printk(KERN_INFO
6611 				"md/raid:%s: force stripe size %d for reshape\n",
6612 				mdname(mddev), conf->min_nr_stripes);
6613 	}
6614 	memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
6615 		 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
6616 	atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
6617 	if (grow_stripes(conf, conf->min_nr_stripes)) {
6618 		printk(KERN_ERR
6619 		       "md/raid:%s: couldn't allocate %dkB for buffers\n",
6620 		       mdname(mddev), memory);
6621 		goto abort;
6622 	} else
6623 		printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
6624 		       mdname(mddev), memory);
6625 	/*
6626 	 * Losing a stripe head costs more than the time to refill it,
6627 	 * it reduces the queue depth and so can hurt throughput.
6628 	 * So set it rather large, scaled by number of devices.
6629 	 */
6630 	conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
6631 	conf->shrinker.scan_objects = raid5_cache_scan;
6632 	conf->shrinker.count_objects = raid5_cache_count;
6633 	conf->shrinker.batch = 128;
6634 	conf->shrinker.flags = 0;
6635 	if (register_shrinker(&conf->shrinker)) {
6636 		printk(KERN_ERR
6637 		       "md/raid:%s: couldn't register shrinker.\n",
6638 		       mdname(mddev));
6639 		goto abort;
6640 	}
6641 
6642 	sprintf(pers_name, "raid%d", mddev->new_level);
6643 	conf->thread = md_register_thread(raid5d, mddev, pers_name);
6644 	if (!conf->thread) {
6645 		printk(KERN_ERR
6646 		       "md/raid:%s: couldn't allocate thread.\n",
6647 		       mdname(mddev));
6648 		goto abort;
6649 	}
6650 
6651 	return conf;
6652 
6653  abort:
6654 	if (conf) {
6655 		free_conf(conf);
6656 		return ERR_PTR(-EIO);
6657 	} else
6658 		return ERR_PTR(-ENOMEM);
6659 }
6660 
6661 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
6662 {
6663 	switch (algo) {
6664 	case ALGORITHM_PARITY_0:
6665 		if (raid_disk < max_degraded)
6666 			return 1;
6667 		break;
6668 	case ALGORITHM_PARITY_N:
6669 		if (raid_disk >= raid_disks - max_degraded)
6670 			return 1;
6671 		break;
6672 	case ALGORITHM_PARITY_0_6:
6673 		if (raid_disk == 0 ||
6674 		    raid_disk == raid_disks - 1)
6675 			return 1;
6676 		break;
6677 	case ALGORITHM_LEFT_ASYMMETRIC_6:
6678 	case ALGORITHM_RIGHT_ASYMMETRIC_6:
6679 	case ALGORITHM_LEFT_SYMMETRIC_6:
6680 	case ALGORITHM_RIGHT_SYMMETRIC_6:
6681 		if (raid_disk == raid_disks - 1)
6682 			return 1;
6683 	}
6684 	return 0;
6685 }
6686 
6687 static int raid5_run(struct mddev *mddev)
6688 {
6689 	struct r5conf *conf;
6690 	int working_disks = 0;
6691 	int dirty_parity_disks = 0;
6692 	struct md_rdev *rdev;
6693 	struct md_rdev *journal_dev = NULL;
6694 	sector_t reshape_offset = 0;
6695 	int i;
6696 	long long min_offset_diff = 0;
6697 	int first = 1;
6698 
6699 	if (mddev->recovery_cp != MaxSector)
6700 		printk(KERN_NOTICE "md/raid:%s: not clean"
6701 		       " -- starting background reconstruction\n",
6702 		       mdname(mddev));
6703 
6704 	rdev_for_each(rdev, mddev) {
6705 		long long diff;
6706 
6707 		if (test_bit(Journal, &rdev->flags)) {
6708 			journal_dev = rdev;
6709 			continue;
6710 		}
6711 		if (rdev->raid_disk < 0)
6712 			continue;
6713 		diff = (rdev->new_data_offset - rdev->data_offset);
6714 		if (first) {
6715 			min_offset_diff = diff;
6716 			first = 0;
6717 		} else if (mddev->reshape_backwards &&
6718 			 diff < min_offset_diff)
6719 			min_offset_diff = diff;
6720 		else if (!mddev->reshape_backwards &&
6721 			 diff > min_offset_diff)
6722 			min_offset_diff = diff;
6723 	}
6724 
6725 	if (mddev->reshape_position != MaxSector) {
6726 		/* Check that we can continue the reshape.
6727 		 * Difficulties arise if the stripe we would write to
6728 		 * next is at or after the stripe we would read from next.
6729 		 * For a reshape that changes the number of devices, this
6730 		 * is only possible for a very short time, and mdadm makes
6731 		 * sure that time appears to have past before assembling
6732 		 * the array.  So we fail if that time hasn't passed.
6733 		 * For a reshape that keeps the number of devices the same
6734 		 * mdadm must be monitoring the reshape can keeping the
6735 		 * critical areas read-only and backed up.  It will start
6736 		 * the array in read-only mode, so we check for that.
6737 		 */
6738 		sector_t here_new, here_old;
6739 		int old_disks;
6740 		int max_degraded = (mddev->level == 6 ? 2 : 1);
6741 		int chunk_sectors;
6742 		int new_data_disks;
6743 
6744 		if (journal_dev) {
6745 			printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n",
6746 			       mdname(mddev));
6747 			return -EINVAL;
6748 		}
6749 
6750 		if (mddev->new_level != mddev->level) {
6751 			printk(KERN_ERR "md/raid:%s: unsupported reshape "
6752 			       "required - aborting.\n",
6753 			       mdname(mddev));
6754 			return -EINVAL;
6755 		}
6756 		old_disks = mddev->raid_disks - mddev->delta_disks;
6757 		/* reshape_position must be on a new-stripe boundary, and one
6758 		 * further up in new geometry must map after here in old
6759 		 * geometry.
6760 		 * If the chunk sizes are different, then as we perform reshape
6761 		 * in units of the largest of the two, reshape_position needs
6762 		 * be a multiple of the largest chunk size times new data disks.
6763 		 */
6764 		here_new = mddev->reshape_position;
6765 		chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
6766 		new_data_disks = mddev->raid_disks - max_degraded;
6767 		if (sector_div(here_new, chunk_sectors * new_data_disks)) {
6768 			printk(KERN_ERR "md/raid:%s: reshape_position not "
6769 			       "on a stripe boundary\n", mdname(mddev));
6770 			return -EINVAL;
6771 		}
6772 		reshape_offset = here_new * chunk_sectors;
6773 		/* here_new is the stripe we will write to */
6774 		here_old = mddev->reshape_position;
6775 		sector_div(here_old, chunk_sectors * (old_disks-max_degraded));
6776 		/* here_old is the first stripe that we might need to read
6777 		 * from */
6778 		if (mddev->delta_disks == 0) {
6779 			/* We cannot be sure it is safe to start an in-place
6780 			 * reshape.  It is only safe if user-space is monitoring
6781 			 * and taking constant backups.
6782 			 * mdadm always starts a situation like this in
6783 			 * readonly mode so it can take control before
6784 			 * allowing any writes.  So just check for that.
6785 			 */
6786 			if (abs(min_offset_diff) >= mddev->chunk_sectors &&
6787 			    abs(min_offset_diff) >= mddev->new_chunk_sectors)
6788 				/* not really in-place - so OK */;
6789 			else if (mddev->ro == 0) {
6790 				printk(KERN_ERR "md/raid:%s: in-place reshape "
6791 				       "must be started in read-only mode "
6792 				       "- aborting\n",
6793 				       mdname(mddev));
6794 				return -EINVAL;
6795 			}
6796 		} else if (mddev->reshape_backwards
6797 		    ? (here_new * chunk_sectors + min_offset_diff <=
6798 		       here_old * chunk_sectors)
6799 		    : (here_new * chunk_sectors >=
6800 		       here_old * chunk_sectors + (-min_offset_diff))) {
6801 			/* Reading from the same stripe as writing to - bad */
6802 			printk(KERN_ERR "md/raid:%s: reshape_position too early for "
6803 			       "auto-recovery - aborting.\n",
6804 			       mdname(mddev));
6805 			return -EINVAL;
6806 		}
6807 		printk(KERN_INFO "md/raid:%s: reshape will continue\n",
6808 		       mdname(mddev));
6809 		/* OK, we should be able to continue; */
6810 	} else {
6811 		BUG_ON(mddev->level != mddev->new_level);
6812 		BUG_ON(mddev->layout != mddev->new_layout);
6813 		BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
6814 		BUG_ON(mddev->delta_disks != 0);
6815 	}
6816 
6817 	if (mddev->private == NULL)
6818 		conf = setup_conf(mddev);
6819 	else
6820 		conf = mddev->private;
6821 
6822 	if (IS_ERR(conf))
6823 		return PTR_ERR(conf);
6824 
6825 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
6826 		if (!journal_dev) {
6827 			pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
6828 			       mdname(mddev));
6829 			mddev->ro = 1;
6830 			set_disk_ro(mddev->gendisk, 1);
6831 		} else if (mddev->recovery_cp == MaxSector)
6832 			set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
6833 	}
6834 
6835 	conf->min_offset_diff = min_offset_diff;
6836 	mddev->thread = conf->thread;
6837 	conf->thread = NULL;
6838 	mddev->private = conf;
6839 
6840 	for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
6841 	     i++) {
6842 		rdev = conf->disks[i].rdev;
6843 		if (!rdev && conf->disks[i].replacement) {
6844 			/* The replacement is all we have yet */
6845 			rdev = conf->disks[i].replacement;
6846 			conf->disks[i].replacement = NULL;
6847 			clear_bit(Replacement, &rdev->flags);
6848 			conf->disks[i].rdev = rdev;
6849 		}
6850 		if (!rdev)
6851 			continue;
6852 		if (conf->disks[i].replacement &&
6853 		    conf->reshape_progress != MaxSector) {
6854 			/* replacements and reshape simply do not mix. */
6855 			printk(KERN_ERR "md: cannot handle concurrent "
6856 			       "replacement and reshape.\n");
6857 			goto abort;
6858 		}
6859 		if (test_bit(In_sync, &rdev->flags)) {
6860 			working_disks++;
6861 			continue;
6862 		}
6863 		/* This disc is not fully in-sync.  However if it
6864 		 * just stored parity (beyond the recovery_offset),
6865 		 * when we don't need to be concerned about the
6866 		 * array being dirty.
6867 		 * When reshape goes 'backwards', we never have
6868 		 * partially completed devices, so we only need
6869 		 * to worry about reshape going forwards.
6870 		 */
6871 		/* Hack because v0.91 doesn't store recovery_offset properly. */
6872 		if (mddev->major_version == 0 &&
6873 		    mddev->minor_version > 90)
6874 			rdev->recovery_offset = reshape_offset;
6875 
6876 		if (rdev->recovery_offset < reshape_offset) {
6877 			/* We need to check old and new layout */
6878 			if (!only_parity(rdev->raid_disk,
6879 					 conf->algorithm,
6880 					 conf->raid_disks,
6881 					 conf->max_degraded))
6882 				continue;
6883 		}
6884 		if (!only_parity(rdev->raid_disk,
6885 				 conf->prev_algo,
6886 				 conf->previous_raid_disks,
6887 				 conf->max_degraded))
6888 			continue;
6889 		dirty_parity_disks++;
6890 	}
6891 
6892 	/*
6893 	 * 0 for a fully functional array, 1 or 2 for a degraded array.
6894 	 */
6895 	mddev->degraded = calc_degraded(conf);
6896 
6897 	if (has_failed(conf)) {
6898 		printk(KERN_ERR "md/raid:%s: not enough operational devices"
6899 			" (%d/%d failed)\n",
6900 			mdname(mddev), mddev->degraded, conf->raid_disks);
6901 		goto abort;
6902 	}
6903 
6904 	/* device size must be a multiple of chunk size */
6905 	mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
6906 	mddev->resync_max_sectors = mddev->dev_sectors;
6907 
6908 	if (mddev->degraded > dirty_parity_disks &&
6909 	    mddev->recovery_cp != MaxSector) {
6910 		if (mddev->ok_start_degraded)
6911 			printk(KERN_WARNING
6912 			       "md/raid:%s: starting dirty degraded array"
6913 			       " - data corruption possible.\n",
6914 			       mdname(mddev));
6915 		else {
6916 			printk(KERN_ERR
6917 			       "md/raid:%s: cannot start dirty degraded array.\n",
6918 			       mdname(mddev));
6919 			goto abort;
6920 		}
6921 	}
6922 
6923 	if (mddev->degraded == 0)
6924 		printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
6925 		       " devices, algorithm %d\n", mdname(mddev), conf->level,
6926 		       mddev->raid_disks-mddev->degraded, mddev->raid_disks,
6927 		       mddev->new_layout);
6928 	else
6929 		printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
6930 		       " out of %d devices, algorithm %d\n",
6931 		       mdname(mddev), conf->level,
6932 		       mddev->raid_disks - mddev->degraded,
6933 		       mddev->raid_disks, mddev->new_layout);
6934 
6935 	print_raid5_conf(conf);
6936 
6937 	if (conf->reshape_progress != MaxSector) {
6938 		conf->reshape_safe = conf->reshape_progress;
6939 		atomic_set(&conf->reshape_stripes, 0);
6940 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6941 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6942 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6943 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6944 		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
6945 							"reshape");
6946 	}
6947 
6948 	/* Ok, everything is just fine now */
6949 	if (mddev->to_remove == &raid5_attrs_group)
6950 		mddev->to_remove = NULL;
6951 	else if (mddev->kobj.sd &&
6952 	    sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
6953 		printk(KERN_WARNING
6954 		       "raid5: failed to create sysfs attributes for %s\n",
6955 		       mdname(mddev));
6956 	md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
6957 
6958 	if (mddev->queue) {
6959 		int chunk_size;
6960 		bool discard_supported = true;
6961 		/* read-ahead size must cover two whole stripes, which
6962 		 * is 2 * (datadisks) * chunksize where 'n' is the
6963 		 * number of raid devices
6964 		 */
6965 		int data_disks = conf->previous_raid_disks - conf->max_degraded;
6966 		int stripe = data_disks *
6967 			((mddev->chunk_sectors << 9) / PAGE_SIZE);
6968 		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
6969 			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
6970 
6971 		chunk_size = mddev->chunk_sectors << 9;
6972 		blk_queue_io_min(mddev->queue, chunk_size);
6973 		blk_queue_io_opt(mddev->queue, chunk_size *
6974 				 (conf->raid_disks - conf->max_degraded));
6975 		mddev->queue->limits.raid_partial_stripes_expensive = 1;
6976 		/*
6977 		 * We can only discard a whole stripe. It doesn't make sense to
6978 		 * discard data disk but write parity disk
6979 		 */
6980 		stripe = stripe * PAGE_SIZE;
6981 		/* Round up to power of 2, as discard handling
6982 		 * currently assumes that */
6983 		while ((stripe-1) & stripe)
6984 			stripe = (stripe | (stripe-1)) + 1;
6985 		mddev->queue->limits.discard_alignment = stripe;
6986 		mddev->queue->limits.discard_granularity = stripe;
6987 		/*
6988 		 * unaligned part of discard request will be ignored, so can't
6989 		 * guarantee discard_zeroes_data
6990 		 */
6991 		mddev->queue->limits.discard_zeroes_data = 0;
6992 
6993 		blk_queue_max_write_same_sectors(mddev->queue, 0);
6994 
6995 		rdev_for_each(rdev, mddev) {
6996 			disk_stack_limits(mddev->gendisk, rdev->bdev,
6997 					  rdev->data_offset << 9);
6998 			disk_stack_limits(mddev->gendisk, rdev->bdev,
6999 					  rdev->new_data_offset << 9);
7000 			/*
7001 			 * discard_zeroes_data is required, otherwise data
7002 			 * could be lost. Consider a scenario: discard a stripe
7003 			 * (the stripe could be inconsistent if
7004 			 * discard_zeroes_data is 0); write one disk of the
7005 			 * stripe (the stripe could be inconsistent again
7006 			 * depending on which disks are used to calculate
7007 			 * parity); the disk is broken; The stripe data of this
7008 			 * disk is lost.
7009 			 */
7010 			if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
7011 			    !bdev_get_queue(rdev->bdev)->
7012 						limits.discard_zeroes_data)
7013 				discard_supported = false;
7014 			/* Unfortunately, discard_zeroes_data is not currently
7015 			 * a guarantee - just a hint.  So we only allow DISCARD
7016 			 * if the sysadmin has confirmed that only safe devices
7017 			 * are in use by setting a module parameter.
7018 			 */
7019 			if (!devices_handle_discard_safely) {
7020 				if (discard_supported) {
7021 					pr_info("md/raid456: discard support disabled due to uncertainty.\n");
7022 					pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
7023 				}
7024 				discard_supported = false;
7025 			}
7026 		}
7027 
7028 		if (discard_supported &&
7029 		    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
7030 		    mddev->queue->limits.discard_granularity >= stripe)
7031 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
7032 						mddev->queue);
7033 		else
7034 			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
7035 						mddev->queue);
7036 
7037 		blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
7038 	}
7039 
7040 	if (journal_dev) {
7041 		char b[BDEVNAME_SIZE];
7042 
7043 		printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
7044 		       mdname(mddev), bdevname(journal_dev->bdev, b));
7045 		r5l_init_log(conf, journal_dev);
7046 	}
7047 
7048 	return 0;
7049 abort:
7050 	md_unregister_thread(&mddev->thread);
7051 	print_raid5_conf(conf);
7052 	free_conf(conf);
7053 	mddev->private = NULL;
7054 	printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
7055 	return -EIO;
7056 }
7057 
7058 static void raid5_free(struct mddev *mddev, void *priv)
7059 {
7060 	struct r5conf *conf = priv;
7061 
7062 	free_conf(conf);
7063 	mddev->to_remove = &raid5_attrs_group;
7064 }
7065 
7066 static void raid5_status(struct seq_file *seq, struct mddev *mddev)
7067 {
7068 	struct r5conf *conf = mddev->private;
7069 	int i;
7070 
7071 	seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
7072 		conf->chunk_sectors / 2, mddev->layout);
7073 	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
7074 	rcu_read_lock();
7075 	for (i = 0; i < conf->raid_disks; i++) {
7076 		struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
7077 		seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
7078 	}
7079 	rcu_read_unlock();
7080 	seq_printf (seq, "]");
7081 }
7082 
7083 static void print_raid5_conf (struct r5conf *conf)
7084 {
7085 	int i;
7086 	struct disk_info *tmp;
7087 
7088 	printk(KERN_DEBUG "RAID conf printout:\n");
7089 	if (!conf) {
7090 		printk("(conf==NULL)\n");
7091 		return;
7092 	}
7093 	printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
7094 	       conf->raid_disks,
7095 	       conf->raid_disks - conf->mddev->degraded);
7096 
7097 	for (i = 0; i < conf->raid_disks; i++) {
7098 		char b[BDEVNAME_SIZE];
7099 		tmp = conf->disks + i;
7100 		if (tmp->rdev)
7101 			printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
7102 			       i, !test_bit(Faulty, &tmp->rdev->flags),
7103 			       bdevname(tmp->rdev->bdev, b));
7104 	}
7105 }
7106 
7107 static int raid5_spare_active(struct mddev *mddev)
7108 {
7109 	int i;
7110 	struct r5conf *conf = mddev->private;
7111 	struct disk_info *tmp;
7112 	int count = 0;
7113 	unsigned long flags;
7114 
7115 	for (i = 0; i < conf->raid_disks; i++) {
7116 		tmp = conf->disks + i;
7117 		if (tmp->replacement
7118 		    && tmp->replacement->recovery_offset == MaxSector
7119 		    && !test_bit(Faulty, &tmp->replacement->flags)
7120 		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
7121 			/* Replacement has just become active. */
7122 			if (!tmp->rdev
7123 			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
7124 				count++;
7125 			if (tmp->rdev) {
7126 				/* Replaced device not technically faulty,
7127 				 * but we need to be sure it gets removed
7128 				 * and never re-added.
7129 				 */
7130 				set_bit(Faulty, &tmp->rdev->flags);
7131 				sysfs_notify_dirent_safe(
7132 					tmp->rdev->sysfs_state);
7133 			}
7134 			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
7135 		} else if (tmp->rdev
7136 		    && tmp->rdev->recovery_offset == MaxSector
7137 		    && !test_bit(Faulty, &tmp->rdev->flags)
7138 		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
7139 			count++;
7140 			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
7141 		}
7142 	}
7143 	spin_lock_irqsave(&conf->device_lock, flags);
7144 	mddev->degraded = calc_degraded(conf);
7145 	spin_unlock_irqrestore(&conf->device_lock, flags);
7146 	print_raid5_conf(conf);
7147 	return count;
7148 }
7149 
7150 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
7151 {
7152 	struct r5conf *conf = mddev->private;
7153 	int err = 0;
7154 	int number = rdev->raid_disk;
7155 	struct md_rdev **rdevp;
7156 	struct disk_info *p = conf->disks + number;
7157 
7158 	print_raid5_conf(conf);
7159 	if (test_bit(Journal, &rdev->flags) && conf->log) {
7160 		struct r5l_log *log;
7161 		/*
7162 		 * we can't wait pending write here, as this is called in
7163 		 * raid5d, wait will deadlock.
7164 		 */
7165 		if (atomic_read(&mddev->writes_pending))
7166 			return -EBUSY;
7167 		log = conf->log;
7168 		conf->log = NULL;
7169 		synchronize_rcu();
7170 		r5l_exit_log(log);
7171 		return 0;
7172 	}
7173 	if (rdev == p->rdev)
7174 		rdevp = &p->rdev;
7175 	else if (rdev == p->replacement)
7176 		rdevp = &p->replacement;
7177 	else
7178 		return 0;
7179 
7180 	if (number >= conf->raid_disks &&
7181 	    conf->reshape_progress == MaxSector)
7182 		clear_bit(In_sync, &rdev->flags);
7183 
7184 	if (test_bit(In_sync, &rdev->flags) ||
7185 	    atomic_read(&rdev->nr_pending)) {
7186 		err = -EBUSY;
7187 		goto abort;
7188 	}
7189 	/* Only remove non-faulty devices if recovery
7190 	 * isn't possible.
7191 	 */
7192 	if (!test_bit(Faulty, &rdev->flags) &&
7193 	    mddev->recovery_disabled != conf->recovery_disabled &&
7194 	    !has_failed(conf) &&
7195 	    (!p->replacement || p->replacement == rdev) &&
7196 	    number < conf->raid_disks) {
7197 		err = -EBUSY;
7198 		goto abort;
7199 	}
7200 	*rdevp = NULL;
7201 	if (!test_bit(RemoveSynchronized, &rdev->flags)) {
7202 		synchronize_rcu();
7203 		if (atomic_read(&rdev->nr_pending)) {
7204 			/* lost the race, try later */
7205 			err = -EBUSY;
7206 			*rdevp = rdev;
7207 		}
7208 	}
7209 	if (p->replacement) {
7210 		/* We must have just cleared 'rdev' */
7211 		p->rdev = p->replacement;
7212 		clear_bit(Replacement, &p->replacement->flags);
7213 		smp_mb(); /* Make sure other CPUs may see both as identical
7214 			   * but will never see neither - if they are careful
7215 			   */
7216 		p->replacement = NULL;
7217 		clear_bit(WantReplacement, &rdev->flags);
7218 	} else
7219 		/* We might have just removed the Replacement as faulty-
7220 		 * clear the bit just in case
7221 		 */
7222 		clear_bit(WantReplacement, &rdev->flags);
7223 abort:
7224 
7225 	print_raid5_conf(conf);
7226 	return err;
7227 }
7228 
7229 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
7230 {
7231 	struct r5conf *conf = mddev->private;
7232 	int err = -EEXIST;
7233 	int disk;
7234 	struct disk_info *p;
7235 	int first = 0;
7236 	int last = conf->raid_disks - 1;
7237 
7238 	if (test_bit(Journal, &rdev->flags)) {
7239 		char b[BDEVNAME_SIZE];
7240 		if (conf->log)
7241 			return -EBUSY;
7242 
7243 		rdev->raid_disk = 0;
7244 		/*
7245 		 * The array is in readonly mode if journal is missing, so no
7246 		 * write requests running. We should be safe
7247 		 */
7248 		r5l_init_log(conf, rdev);
7249 		printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
7250 		       mdname(mddev), bdevname(rdev->bdev, b));
7251 		return 0;
7252 	}
7253 	if (mddev->recovery_disabled == conf->recovery_disabled)
7254 		return -EBUSY;
7255 
7256 	if (rdev->saved_raid_disk < 0 && has_failed(conf))
7257 		/* no point adding a device */
7258 		return -EINVAL;
7259 
7260 	if (rdev->raid_disk >= 0)
7261 		first = last = rdev->raid_disk;
7262 
7263 	/*
7264 	 * find the disk ... but prefer rdev->saved_raid_disk
7265 	 * if possible.
7266 	 */
7267 	if (rdev->saved_raid_disk >= 0 &&
7268 	    rdev->saved_raid_disk >= first &&
7269 	    conf->disks[rdev->saved_raid_disk].rdev == NULL)
7270 		first = rdev->saved_raid_disk;
7271 
7272 	for (disk = first; disk <= last; disk++) {
7273 		p = conf->disks + disk;
7274 		if (p->rdev == NULL) {
7275 			clear_bit(In_sync, &rdev->flags);
7276 			rdev->raid_disk = disk;
7277 			err = 0;
7278 			if (rdev->saved_raid_disk != disk)
7279 				conf->fullsync = 1;
7280 			rcu_assign_pointer(p->rdev, rdev);
7281 			goto out;
7282 		}
7283 	}
7284 	for (disk = first; disk <= last; disk++) {
7285 		p = conf->disks + disk;
7286 		if (test_bit(WantReplacement, &p->rdev->flags) &&
7287 		    p->replacement == NULL) {
7288 			clear_bit(In_sync, &rdev->flags);
7289 			set_bit(Replacement, &rdev->flags);
7290 			rdev->raid_disk = disk;
7291 			err = 0;
7292 			conf->fullsync = 1;
7293 			rcu_assign_pointer(p->replacement, rdev);
7294 			break;
7295 		}
7296 	}
7297 out:
7298 	print_raid5_conf(conf);
7299 	return err;
7300 }
7301 
7302 static int raid5_resize(struct mddev *mddev, sector_t sectors)
7303 {
7304 	/* no resync is happening, and there is enough space
7305 	 * on all devices, so we can resize.
7306 	 * We need to make sure resync covers any new space.
7307 	 * If the array is shrinking we should possibly wait until
7308 	 * any io in the removed space completes, but it hardly seems
7309 	 * worth it.
7310 	 */
7311 	sector_t newsize;
7312 	struct r5conf *conf = mddev->private;
7313 
7314 	if (conf->log)
7315 		return -EINVAL;
7316 	sectors &= ~((sector_t)conf->chunk_sectors - 1);
7317 	newsize = raid5_size(mddev, sectors, mddev->raid_disks);
7318 	if (mddev->external_size &&
7319 	    mddev->array_sectors > newsize)
7320 		return -EINVAL;
7321 	if (mddev->bitmap) {
7322 		int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
7323 		if (ret)
7324 			return ret;
7325 	}
7326 	md_set_array_sectors(mddev, newsize);
7327 	set_capacity(mddev->gendisk, mddev->array_sectors);
7328 	revalidate_disk(mddev->gendisk);
7329 	if (sectors > mddev->dev_sectors &&
7330 	    mddev->recovery_cp > mddev->dev_sectors) {
7331 		mddev->recovery_cp = mddev->dev_sectors;
7332 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7333 	}
7334 	mddev->dev_sectors = sectors;
7335 	mddev->resync_max_sectors = sectors;
7336 	return 0;
7337 }
7338 
7339 static int check_stripe_cache(struct mddev *mddev)
7340 {
7341 	/* Can only proceed if there are plenty of stripe_heads.
7342 	 * We need a minimum of one full stripe,, and for sensible progress
7343 	 * it is best to have about 4 times that.
7344 	 * If we require 4 times, then the default 256 4K stripe_heads will
7345 	 * allow for chunk sizes up to 256K, which is probably OK.
7346 	 * If the chunk size is greater, user-space should request more
7347 	 * stripe_heads first.
7348 	 */
7349 	struct r5conf *conf = mddev->private;
7350 	if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
7351 	    > conf->min_nr_stripes ||
7352 	    ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
7353 	    > conf->min_nr_stripes) {
7354 		printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes.  Needed %lu\n",
7355 		       mdname(mddev),
7356 		       ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
7357 			/ STRIPE_SIZE)*4);
7358 		return 0;
7359 	}
7360 	return 1;
7361 }
7362 
7363 static int check_reshape(struct mddev *mddev)
7364 {
7365 	struct r5conf *conf = mddev->private;
7366 
7367 	if (conf->log)
7368 		return -EINVAL;
7369 	if (mddev->delta_disks == 0 &&
7370 	    mddev->new_layout == mddev->layout &&
7371 	    mddev->new_chunk_sectors == mddev->chunk_sectors)
7372 		return 0; /* nothing to do */
7373 	if (has_failed(conf))
7374 		return -EINVAL;
7375 	if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) {
7376 		/* We might be able to shrink, but the devices must
7377 		 * be made bigger first.
7378 		 * For raid6, 4 is the minimum size.
7379 		 * Otherwise 2 is the minimum
7380 		 */
7381 		int min = 2;
7382 		if (mddev->level == 6)
7383 			min = 4;
7384 		if (mddev->raid_disks + mddev->delta_disks < min)
7385 			return -EINVAL;
7386 	}
7387 
7388 	if (!check_stripe_cache(mddev))
7389 		return -ENOSPC;
7390 
7391 	if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
7392 	    mddev->delta_disks > 0)
7393 		if (resize_chunks(conf,
7394 				  conf->previous_raid_disks
7395 				  + max(0, mddev->delta_disks),
7396 				  max(mddev->new_chunk_sectors,
7397 				      mddev->chunk_sectors)
7398 			    ) < 0)
7399 			return -ENOMEM;
7400 	return resize_stripes(conf, (conf->previous_raid_disks
7401 				     + mddev->delta_disks));
7402 }
7403 
7404 static int raid5_start_reshape(struct mddev *mddev)
7405 {
7406 	struct r5conf *conf = mddev->private;
7407 	struct md_rdev *rdev;
7408 	int spares = 0;
7409 	unsigned long flags;
7410 
7411 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
7412 		return -EBUSY;
7413 
7414 	if (!check_stripe_cache(mddev))
7415 		return -ENOSPC;
7416 
7417 	if (has_failed(conf))
7418 		return -EINVAL;
7419 
7420 	rdev_for_each(rdev, mddev) {
7421 		if (!test_bit(In_sync, &rdev->flags)
7422 		    && !test_bit(Faulty, &rdev->flags))
7423 			spares++;
7424 	}
7425 
7426 	if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
7427 		/* Not enough devices even to make a degraded array
7428 		 * of that size
7429 		 */
7430 		return -EINVAL;
7431 
7432 	/* Refuse to reduce size of the array.  Any reductions in
7433 	 * array size must be through explicit setting of array_size
7434 	 * attribute.
7435 	 */
7436 	if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
7437 	    < mddev->array_sectors) {
7438 		printk(KERN_ERR "md/raid:%s: array size must be reduced "
7439 		       "before number of disks\n", mdname(mddev));
7440 		return -EINVAL;
7441 	}
7442 
7443 	atomic_set(&conf->reshape_stripes, 0);
7444 	spin_lock_irq(&conf->device_lock);
7445 	write_seqcount_begin(&conf->gen_lock);
7446 	conf->previous_raid_disks = conf->raid_disks;
7447 	conf->raid_disks += mddev->delta_disks;
7448 	conf->prev_chunk_sectors = conf->chunk_sectors;
7449 	conf->chunk_sectors = mddev->new_chunk_sectors;
7450 	conf->prev_algo = conf->algorithm;
7451 	conf->algorithm = mddev->new_layout;
7452 	conf->generation++;
7453 	/* Code that selects data_offset needs to see the generation update
7454 	 * if reshape_progress has been set - so a memory barrier needed.
7455 	 */
7456 	smp_mb();
7457 	if (mddev->reshape_backwards)
7458 		conf->reshape_progress = raid5_size(mddev, 0, 0);
7459 	else
7460 		conf->reshape_progress = 0;
7461 	conf->reshape_safe = conf->reshape_progress;
7462 	write_seqcount_end(&conf->gen_lock);
7463 	spin_unlock_irq(&conf->device_lock);
7464 
7465 	/* Now make sure any requests that proceeded on the assumption
7466 	 * the reshape wasn't running - like Discard or Read - have
7467 	 * completed.
7468 	 */
7469 	mddev_suspend(mddev);
7470 	mddev_resume(mddev);
7471 
7472 	/* Add some new drives, as many as will fit.
7473 	 * We know there are enough to make the newly sized array work.
7474 	 * Don't add devices if we are reducing the number of
7475 	 * devices in the array.  This is because it is not possible
7476 	 * to correctly record the "partially reconstructed" state of
7477 	 * such devices during the reshape and confusion could result.
7478 	 */
7479 	if (mddev->delta_disks >= 0) {
7480 		rdev_for_each(rdev, mddev)
7481 			if (rdev->raid_disk < 0 &&
7482 			    !test_bit(Faulty, &rdev->flags)) {
7483 				if (raid5_add_disk(mddev, rdev) == 0) {
7484 					if (rdev->raid_disk
7485 					    >= conf->previous_raid_disks)
7486 						set_bit(In_sync, &rdev->flags);
7487 					else
7488 						rdev->recovery_offset = 0;
7489 
7490 					if (sysfs_link_rdev(mddev, rdev))
7491 						/* Failure here is OK */;
7492 				}
7493 			} else if (rdev->raid_disk >= conf->previous_raid_disks
7494 				   && !test_bit(Faulty, &rdev->flags)) {
7495 				/* This is a spare that was manually added */
7496 				set_bit(In_sync, &rdev->flags);
7497 			}
7498 
7499 		/* When a reshape changes the number of devices,
7500 		 * ->degraded is measured against the larger of the
7501 		 * pre and post number of devices.
7502 		 */
7503 		spin_lock_irqsave(&conf->device_lock, flags);
7504 		mddev->degraded = calc_degraded(conf);
7505 		spin_unlock_irqrestore(&conf->device_lock, flags);
7506 	}
7507 	mddev->raid_disks = conf->raid_disks;
7508 	mddev->reshape_position = conf->reshape_progress;
7509 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
7510 
7511 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7512 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7513 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7514 	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7515 	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7516 	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
7517 						"reshape");
7518 	if (!mddev->sync_thread) {
7519 		mddev->recovery = 0;
7520 		spin_lock_irq(&conf->device_lock);
7521 		write_seqcount_begin(&conf->gen_lock);
7522 		mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
7523 		mddev->new_chunk_sectors =
7524 			conf->chunk_sectors = conf->prev_chunk_sectors;
7525 		mddev->new_layout = conf->algorithm = conf->prev_algo;
7526 		rdev_for_each(rdev, mddev)
7527 			rdev->new_data_offset = rdev->data_offset;
7528 		smp_wmb();
7529 		conf->generation --;
7530 		conf->reshape_progress = MaxSector;
7531 		mddev->reshape_position = MaxSector;
7532 		write_seqcount_end(&conf->gen_lock);
7533 		spin_unlock_irq(&conf->device_lock);
7534 		return -EAGAIN;
7535 	}
7536 	conf->reshape_checkpoint = jiffies;
7537 	md_wakeup_thread(mddev->sync_thread);
7538 	md_new_event(mddev);
7539 	return 0;
7540 }
7541 
7542 /* This is called from the reshape thread and should make any
7543  * changes needed in 'conf'
7544  */
7545 static void end_reshape(struct r5conf *conf)
7546 {
7547 
7548 	if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
7549 		struct md_rdev *rdev;
7550 
7551 		spin_lock_irq(&conf->device_lock);
7552 		conf->previous_raid_disks = conf->raid_disks;
7553 		rdev_for_each(rdev, conf->mddev)
7554 			rdev->data_offset = rdev->new_data_offset;
7555 		smp_wmb();
7556 		conf->reshape_progress = MaxSector;
7557 		conf->mddev->reshape_position = MaxSector;
7558 		spin_unlock_irq(&conf->device_lock);
7559 		wake_up(&conf->wait_for_overlap);
7560 
7561 		/* read-ahead size must cover two whole stripes, which is
7562 		 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
7563 		 */
7564 		if (conf->mddev->queue) {
7565 			int data_disks = conf->raid_disks - conf->max_degraded;
7566 			int stripe = data_disks * ((conf->chunk_sectors << 9)
7567 						   / PAGE_SIZE);
7568 			if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
7569 				conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
7570 		}
7571 	}
7572 }
7573 
7574 /* This is called from the raid5d thread with mddev_lock held.
7575  * It makes config changes to the device.
7576  */
7577 static void raid5_finish_reshape(struct mddev *mddev)
7578 {
7579 	struct r5conf *conf = mddev->private;
7580 
7581 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7582 
7583 		if (mddev->delta_disks > 0) {
7584 			md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7585 			if (mddev->queue) {
7586 				set_capacity(mddev->gendisk, mddev->array_sectors);
7587 				revalidate_disk(mddev->gendisk);
7588 			}
7589 		} else {
7590 			int d;
7591 			spin_lock_irq(&conf->device_lock);
7592 			mddev->degraded = calc_degraded(conf);
7593 			spin_unlock_irq(&conf->device_lock);
7594 			for (d = conf->raid_disks ;
7595 			     d < conf->raid_disks - mddev->delta_disks;
7596 			     d++) {
7597 				struct md_rdev *rdev = conf->disks[d].rdev;
7598 				if (rdev)
7599 					clear_bit(In_sync, &rdev->flags);
7600 				rdev = conf->disks[d].replacement;
7601 				if (rdev)
7602 					clear_bit(In_sync, &rdev->flags);
7603 			}
7604 		}
7605 		mddev->layout = conf->algorithm;
7606 		mddev->chunk_sectors = conf->chunk_sectors;
7607 		mddev->reshape_position = MaxSector;
7608 		mddev->delta_disks = 0;
7609 		mddev->reshape_backwards = 0;
7610 	}
7611 }
7612 
7613 static void raid5_quiesce(struct mddev *mddev, int state)
7614 {
7615 	struct r5conf *conf = mddev->private;
7616 
7617 	switch(state) {
7618 	case 2: /* resume for a suspend */
7619 		wake_up(&conf->wait_for_overlap);
7620 		break;
7621 
7622 	case 1: /* stop all writes */
7623 		lock_all_device_hash_locks_irq(conf);
7624 		/* '2' tells resync/reshape to pause so that all
7625 		 * active stripes can drain
7626 		 */
7627 		conf->quiesce = 2;
7628 		wait_event_cmd(conf->wait_for_quiescent,
7629 				    atomic_read(&conf->active_stripes) == 0 &&
7630 				    atomic_read(&conf->active_aligned_reads) == 0,
7631 				    unlock_all_device_hash_locks_irq(conf),
7632 				    lock_all_device_hash_locks_irq(conf));
7633 		conf->quiesce = 1;
7634 		unlock_all_device_hash_locks_irq(conf);
7635 		/* allow reshape to continue */
7636 		wake_up(&conf->wait_for_overlap);
7637 		break;
7638 
7639 	case 0: /* re-enable writes */
7640 		lock_all_device_hash_locks_irq(conf);
7641 		conf->quiesce = 0;
7642 		wake_up(&conf->wait_for_quiescent);
7643 		wake_up(&conf->wait_for_overlap);
7644 		unlock_all_device_hash_locks_irq(conf);
7645 		break;
7646 	}
7647 	r5l_quiesce(conf->log, state);
7648 }
7649 
7650 static void *raid45_takeover_raid0(struct mddev *mddev, int level)
7651 {
7652 	struct r0conf *raid0_conf = mddev->private;
7653 	sector_t sectors;
7654 
7655 	/* for raid0 takeover only one zone is supported */
7656 	if (raid0_conf->nr_strip_zones > 1) {
7657 		printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
7658 		       mdname(mddev));
7659 		return ERR_PTR(-EINVAL);
7660 	}
7661 
7662 	sectors = raid0_conf->strip_zone[0].zone_end;
7663 	sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
7664 	mddev->dev_sectors = sectors;
7665 	mddev->new_level = level;
7666 	mddev->new_layout = ALGORITHM_PARITY_N;
7667 	mddev->new_chunk_sectors = mddev->chunk_sectors;
7668 	mddev->raid_disks += 1;
7669 	mddev->delta_disks = 1;
7670 	/* make sure it will be not marked as dirty */
7671 	mddev->recovery_cp = MaxSector;
7672 
7673 	return setup_conf(mddev);
7674 }
7675 
7676 static void *raid5_takeover_raid1(struct mddev *mddev)
7677 {
7678 	int chunksect;
7679 
7680 	if (mddev->raid_disks != 2 ||
7681 	    mddev->degraded > 1)
7682 		return ERR_PTR(-EINVAL);
7683 
7684 	/* Should check if there are write-behind devices? */
7685 
7686 	chunksect = 64*2; /* 64K by default */
7687 
7688 	/* The array must be an exact multiple of chunksize */
7689 	while (chunksect && (mddev->array_sectors & (chunksect-1)))
7690 		chunksect >>= 1;
7691 
7692 	if ((chunksect<<9) < STRIPE_SIZE)
7693 		/* array size does not allow a suitable chunk size */
7694 		return ERR_PTR(-EINVAL);
7695 
7696 	mddev->new_level = 5;
7697 	mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
7698 	mddev->new_chunk_sectors = chunksect;
7699 
7700 	return setup_conf(mddev);
7701 }
7702 
7703 static void *raid5_takeover_raid6(struct mddev *mddev)
7704 {
7705 	int new_layout;
7706 
7707 	switch (mddev->layout) {
7708 	case ALGORITHM_LEFT_ASYMMETRIC_6:
7709 		new_layout = ALGORITHM_LEFT_ASYMMETRIC;
7710 		break;
7711 	case ALGORITHM_RIGHT_ASYMMETRIC_6:
7712 		new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
7713 		break;
7714 	case ALGORITHM_LEFT_SYMMETRIC_6:
7715 		new_layout = ALGORITHM_LEFT_SYMMETRIC;
7716 		break;
7717 	case ALGORITHM_RIGHT_SYMMETRIC_6:
7718 		new_layout = ALGORITHM_RIGHT_SYMMETRIC;
7719 		break;
7720 	case ALGORITHM_PARITY_0_6:
7721 		new_layout = ALGORITHM_PARITY_0;
7722 		break;
7723 	case ALGORITHM_PARITY_N:
7724 		new_layout = ALGORITHM_PARITY_N;
7725 		break;
7726 	default:
7727 		return ERR_PTR(-EINVAL);
7728 	}
7729 	mddev->new_level = 5;
7730 	mddev->new_layout = new_layout;
7731 	mddev->delta_disks = -1;
7732 	mddev->raid_disks -= 1;
7733 	return setup_conf(mddev);
7734 }
7735 
7736 static int raid5_check_reshape(struct mddev *mddev)
7737 {
7738 	/* For a 2-drive array, the layout and chunk size can be changed
7739 	 * immediately as not restriping is needed.
7740 	 * For larger arrays we record the new value - after validation
7741 	 * to be used by a reshape pass.
7742 	 */
7743 	struct r5conf *conf = mddev->private;
7744 	int new_chunk = mddev->new_chunk_sectors;
7745 
7746 	if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
7747 		return -EINVAL;
7748 	if (new_chunk > 0) {
7749 		if (!is_power_of_2(new_chunk))
7750 			return -EINVAL;
7751 		if (new_chunk < (PAGE_SIZE>>9))
7752 			return -EINVAL;
7753 		if (mddev->array_sectors & (new_chunk-1))
7754 			/* not factor of array size */
7755 			return -EINVAL;
7756 	}
7757 
7758 	/* They look valid */
7759 
7760 	if (mddev->raid_disks == 2) {
7761 		/* can make the change immediately */
7762 		if (mddev->new_layout >= 0) {
7763 			conf->algorithm = mddev->new_layout;
7764 			mddev->layout = mddev->new_layout;
7765 		}
7766 		if (new_chunk > 0) {
7767 			conf->chunk_sectors = new_chunk ;
7768 			mddev->chunk_sectors = new_chunk;
7769 		}
7770 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
7771 		md_wakeup_thread(mddev->thread);
7772 	}
7773 	return check_reshape(mddev);
7774 }
7775 
7776 static int raid6_check_reshape(struct mddev *mddev)
7777 {
7778 	int new_chunk = mddev->new_chunk_sectors;
7779 
7780 	if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
7781 		return -EINVAL;
7782 	if (new_chunk > 0) {
7783 		if (!is_power_of_2(new_chunk))
7784 			return -EINVAL;
7785 		if (new_chunk < (PAGE_SIZE >> 9))
7786 			return -EINVAL;
7787 		if (mddev->array_sectors & (new_chunk-1))
7788 			/* not factor of array size */
7789 			return -EINVAL;
7790 	}
7791 
7792 	/* They look valid */
7793 	return check_reshape(mddev);
7794 }
7795 
7796 static void *raid5_takeover(struct mddev *mddev)
7797 {
7798 	/* raid5 can take over:
7799 	 *  raid0 - if there is only one strip zone - make it a raid4 layout
7800 	 *  raid1 - if there are two drives.  We need to know the chunk size
7801 	 *  raid4 - trivial - just use a raid4 layout.
7802 	 *  raid6 - Providing it is a *_6 layout
7803 	 */
7804 	if (mddev->level == 0)
7805 		return raid45_takeover_raid0(mddev, 5);
7806 	if (mddev->level == 1)
7807 		return raid5_takeover_raid1(mddev);
7808 	if (mddev->level == 4) {
7809 		mddev->new_layout = ALGORITHM_PARITY_N;
7810 		mddev->new_level = 5;
7811 		return setup_conf(mddev);
7812 	}
7813 	if (mddev->level == 6)
7814 		return raid5_takeover_raid6(mddev);
7815 
7816 	return ERR_PTR(-EINVAL);
7817 }
7818 
7819 static void *raid4_takeover(struct mddev *mddev)
7820 {
7821 	/* raid4 can take over:
7822 	 *  raid0 - if there is only one strip zone
7823 	 *  raid5 - if layout is right
7824 	 */
7825 	if (mddev->level == 0)
7826 		return raid45_takeover_raid0(mddev, 4);
7827 	if (mddev->level == 5 &&
7828 	    mddev->layout == ALGORITHM_PARITY_N) {
7829 		mddev->new_layout = 0;
7830 		mddev->new_level = 4;
7831 		return setup_conf(mddev);
7832 	}
7833 	return ERR_PTR(-EINVAL);
7834 }
7835 
7836 static struct md_personality raid5_personality;
7837 
7838 static void *raid6_takeover(struct mddev *mddev)
7839 {
7840 	/* Currently can only take over a raid5.  We map the
7841 	 * personality to an equivalent raid6 personality
7842 	 * with the Q block at the end.
7843 	 */
7844 	int new_layout;
7845 
7846 	if (mddev->pers != &raid5_personality)
7847 		return ERR_PTR(-EINVAL);
7848 	if (mddev->degraded > 1)
7849 		return ERR_PTR(-EINVAL);
7850 	if (mddev->raid_disks > 253)
7851 		return ERR_PTR(-EINVAL);
7852 	if (mddev->raid_disks < 3)
7853 		return ERR_PTR(-EINVAL);
7854 
7855 	switch (mddev->layout) {
7856 	case ALGORITHM_LEFT_ASYMMETRIC:
7857 		new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
7858 		break;
7859 	case ALGORITHM_RIGHT_ASYMMETRIC:
7860 		new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
7861 		break;
7862 	case ALGORITHM_LEFT_SYMMETRIC:
7863 		new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
7864 		break;
7865 	case ALGORITHM_RIGHT_SYMMETRIC:
7866 		new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
7867 		break;
7868 	case ALGORITHM_PARITY_0:
7869 		new_layout = ALGORITHM_PARITY_0_6;
7870 		break;
7871 	case ALGORITHM_PARITY_N:
7872 		new_layout = ALGORITHM_PARITY_N;
7873 		break;
7874 	default:
7875 		return ERR_PTR(-EINVAL);
7876 	}
7877 	mddev->new_level = 6;
7878 	mddev->new_layout = new_layout;
7879 	mddev->delta_disks = 1;
7880 	mddev->raid_disks += 1;
7881 	return setup_conf(mddev);
7882 }
7883 
7884 static struct md_personality raid6_personality =
7885 {
7886 	.name		= "raid6",
7887 	.level		= 6,
7888 	.owner		= THIS_MODULE,
7889 	.make_request	= raid5_make_request,
7890 	.run		= raid5_run,
7891 	.free		= raid5_free,
7892 	.status		= raid5_status,
7893 	.error_handler	= raid5_error,
7894 	.hot_add_disk	= raid5_add_disk,
7895 	.hot_remove_disk= raid5_remove_disk,
7896 	.spare_active	= raid5_spare_active,
7897 	.sync_request	= raid5_sync_request,
7898 	.resize		= raid5_resize,
7899 	.size		= raid5_size,
7900 	.check_reshape	= raid6_check_reshape,
7901 	.start_reshape  = raid5_start_reshape,
7902 	.finish_reshape = raid5_finish_reshape,
7903 	.quiesce	= raid5_quiesce,
7904 	.takeover	= raid6_takeover,
7905 	.congested	= raid5_congested,
7906 };
7907 static struct md_personality raid5_personality =
7908 {
7909 	.name		= "raid5",
7910 	.level		= 5,
7911 	.owner		= THIS_MODULE,
7912 	.make_request	= raid5_make_request,
7913 	.run		= raid5_run,
7914 	.free		= raid5_free,
7915 	.status		= raid5_status,
7916 	.error_handler	= raid5_error,
7917 	.hot_add_disk	= raid5_add_disk,
7918 	.hot_remove_disk= raid5_remove_disk,
7919 	.spare_active	= raid5_spare_active,
7920 	.sync_request	= raid5_sync_request,
7921 	.resize		= raid5_resize,
7922 	.size		= raid5_size,
7923 	.check_reshape	= raid5_check_reshape,
7924 	.start_reshape  = raid5_start_reshape,
7925 	.finish_reshape = raid5_finish_reshape,
7926 	.quiesce	= raid5_quiesce,
7927 	.takeover	= raid5_takeover,
7928 	.congested	= raid5_congested,
7929 };
7930 
7931 static struct md_personality raid4_personality =
7932 {
7933 	.name		= "raid4",
7934 	.level		= 4,
7935 	.owner		= THIS_MODULE,
7936 	.make_request	= raid5_make_request,
7937 	.run		= raid5_run,
7938 	.free		= raid5_free,
7939 	.status		= raid5_status,
7940 	.error_handler	= raid5_error,
7941 	.hot_add_disk	= raid5_add_disk,
7942 	.hot_remove_disk= raid5_remove_disk,
7943 	.spare_active	= raid5_spare_active,
7944 	.sync_request	= raid5_sync_request,
7945 	.resize		= raid5_resize,
7946 	.size		= raid5_size,
7947 	.check_reshape	= raid5_check_reshape,
7948 	.start_reshape  = raid5_start_reshape,
7949 	.finish_reshape = raid5_finish_reshape,
7950 	.quiesce	= raid5_quiesce,
7951 	.takeover	= raid4_takeover,
7952 	.congested	= raid5_congested,
7953 };
7954 
7955 static int __init raid5_init(void)
7956 {
7957 	int ret;
7958 
7959 	raid5_wq = alloc_workqueue("raid5wq",
7960 		WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
7961 	if (!raid5_wq)
7962 		return -ENOMEM;
7963 
7964 	ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE,
7965 				      "md/raid5:prepare",
7966 				      raid456_cpu_up_prepare,
7967 				      raid456_cpu_dead);
7968 	if (ret) {
7969 		destroy_workqueue(raid5_wq);
7970 		return ret;
7971 	}
7972 	register_md_personality(&raid6_personality);
7973 	register_md_personality(&raid5_personality);
7974 	register_md_personality(&raid4_personality);
7975 	return 0;
7976 }
7977 
7978 static void raid5_exit(void)
7979 {
7980 	unregister_md_personality(&raid6_personality);
7981 	unregister_md_personality(&raid5_personality);
7982 	unregister_md_personality(&raid4_personality);
7983 	cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
7984 	destroy_workqueue(raid5_wq);
7985 }
7986 
7987 module_init(raid5_init);
7988 module_exit(raid5_exit);
7989 MODULE_LICENSE("GPL");
7990 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
7991 MODULE_ALIAS("md-personality-4"); /* RAID5 */
7992 MODULE_ALIAS("md-raid5");
7993 MODULE_ALIAS("md-raid4");
7994 MODULE_ALIAS("md-level-5");
7995 MODULE_ALIAS("md-level-4");
7996 MODULE_ALIAS("md-personality-8"); /* RAID6 */
7997 MODULE_ALIAS("md-raid6");
7998 MODULE_ALIAS("md-level-6");
7999 
8000 /* This used to be two separate modules, they were: */
8001 MODULE_ALIAS("raid5");
8002 MODULE_ALIAS("raid6");
8003