xref: /linux/drivers/md/raid5.c (revision 7f9f7c697474268d9ef9479df3ddfe7cdcfbbffc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * raid5.c : Multiple Devices driver for Linux
4  *	   Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
5  *	   Copyright (C) 1999, 2000 Ingo Molnar
6  *	   Copyright (C) 2002, 2003 H. Peter Anvin
7  *
8  * RAID-4/5/6 management functions.
9  * Thanks to Penguin Computing for making the RAID-6 development possible
10  * by donating a test server!
11  */
12 
13 /*
14  * BITMAP UNPLUGGING:
15  *
16  * The sequencing for updating the bitmap reliably is a little
17  * subtle (and I got it wrong the first time) so it deserves some
18  * explanation.
19  *
20  * We group bitmap updates into batches.  Each batch has a number.
21  * We may write out several batches at once, but that isn't very important.
22  * conf->seq_write is the number of the last batch successfully written.
23  * conf->seq_flush is the number of the last batch that was closed to
24  *    new additions.
25  * When we discover that we will need to write to any block in a stripe
26  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
27  * the number of the batch it will be in. This is seq_flush+1.
28  * When we are ready to do a write, if that batch hasn't been written yet,
29  *   we plug the array and queue the stripe for later.
30  * When an unplug happens, we increment bm_flush, thus closing the current
31  *   batch.
32  * When we notice that bm_flush > bm_write, we write out all pending updates
33  * to the bitmap, and advance bm_write to where bm_flush was.
34  * This may occasionally write a bit out twice, but is sure never to
35  * miss any bits.
36  */
37 
38 #include <linux/blkdev.h>
39 #include <linux/kthread.h>
40 #include <linux/raid/pq.h>
41 #include <linux/async_tx.h>
42 #include <linux/module.h>
43 #include <linux/async.h>
44 #include <linux/seq_file.h>
45 #include <linux/cpu.h>
46 #include <linux/slab.h>
47 #include <linux/ratelimit.h>
48 #include <linux/nodemask.h>
49 
50 #include <trace/events/block.h>
51 #include <linux/list_sort.h>
52 
53 #include "md.h"
54 #include "raid5.h"
55 #include "raid0.h"
56 #include "md-bitmap.h"
57 #include "raid5-log.h"
58 
59 #define UNSUPPORTED_MDDEV_FLAGS		\
60 	((1L << MD_FAILFAST_SUPPORTED) |	\
61 	 (1L << MD_FAILLAST_DEV) |		\
62 	 (1L << MD_SERIALIZE_POLICY))
63 
64 
65 #define cpu_to_group(cpu) cpu_to_node(cpu)
66 #define ANY_GROUP NUMA_NO_NODE
67 
68 #define RAID5_MAX_REQ_STRIPES 256
69 
70 static bool devices_handle_discard_safely = false;
71 module_param(devices_handle_discard_safely, bool, 0644);
72 MODULE_PARM_DESC(devices_handle_discard_safely,
73 		 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
74 static struct workqueue_struct *raid5_wq;
75 
76 static void raid5_quiesce(struct mddev *mddev, int quiesce);
77 
78 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
79 {
80 	int hash = (sect >> RAID5_STRIPE_SHIFT(conf)) & HASH_MASK;
81 	return &conf->stripe_hashtbl[hash];
82 }
83 
84 static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect)
85 {
86 	return (sect >> RAID5_STRIPE_SHIFT(conf)) & STRIPE_HASH_LOCKS_MASK;
87 }
88 
89 static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
90 	__acquires(&conf->device_lock)
91 {
92 	spin_lock_irq(conf->hash_locks + hash);
93 	spin_lock(&conf->device_lock);
94 }
95 
96 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
97 	__releases(&conf->device_lock)
98 {
99 	spin_unlock(&conf->device_lock);
100 	spin_unlock_irq(conf->hash_locks + hash);
101 }
102 
103 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
104 	__acquires(&conf->device_lock)
105 {
106 	int i;
107 	spin_lock_irq(conf->hash_locks);
108 	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
109 		spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
110 	spin_lock(&conf->device_lock);
111 }
112 
113 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
114 	__releases(&conf->device_lock)
115 {
116 	int i;
117 	spin_unlock(&conf->device_lock);
118 	for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
119 		spin_unlock(conf->hash_locks + i);
120 	spin_unlock_irq(conf->hash_locks);
121 }
122 
123 /* Find first data disk in a raid6 stripe */
124 static inline int raid6_d0(struct stripe_head *sh)
125 {
126 	if (sh->ddf_layout)
127 		/* ddf always start from first device */
128 		return 0;
129 	/* md starts just after Q block */
130 	if (sh->qd_idx == sh->disks - 1)
131 		return 0;
132 	else
133 		return sh->qd_idx + 1;
134 }
135 static inline int raid6_next_disk(int disk, int raid_disks)
136 {
137 	disk++;
138 	return (disk < raid_disks) ? disk : 0;
139 }
140 
141 /* When walking through the disks in a raid5, starting at raid6_d0,
142  * We need to map each disk to a 'slot', where the data disks are slot
143  * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
144  * is raid_disks-1.  This help does that mapping.
145  */
146 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
147 			     int *count, int syndrome_disks)
148 {
149 	int slot = *count;
150 
151 	if (sh->ddf_layout)
152 		(*count)++;
153 	if (idx == sh->pd_idx)
154 		return syndrome_disks;
155 	if (idx == sh->qd_idx)
156 		return syndrome_disks + 1;
157 	if (!sh->ddf_layout)
158 		(*count)++;
159 	return slot;
160 }
161 
162 static void print_raid5_conf(struct r5conf *conf);
163 
164 static int stripe_operations_active(struct stripe_head *sh)
165 {
166 	return sh->check_state || sh->reconstruct_state ||
167 	       test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
168 	       test_bit(STRIPE_COMPUTE_RUN, &sh->state);
169 }
170 
171 static bool stripe_is_lowprio(struct stripe_head *sh)
172 {
173 	return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) ||
174 		test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) &&
175 	       !test_bit(STRIPE_R5C_CACHING, &sh->state);
176 }
177 
178 static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
179 	__must_hold(&sh->raid_conf->device_lock)
180 {
181 	struct r5conf *conf = sh->raid_conf;
182 	struct r5worker_group *group;
183 	int thread_cnt;
184 	int i, cpu = sh->cpu;
185 
186 	if (!cpu_online(cpu)) {
187 		cpu = cpumask_any(cpu_online_mask);
188 		sh->cpu = cpu;
189 	}
190 
191 	if (list_empty(&sh->lru)) {
192 		struct r5worker_group *group;
193 		group = conf->worker_groups + cpu_to_group(cpu);
194 		if (stripe_is_lowprio(sh))
195 			list_add_tail(&sh->lru, &group->loprio_list);
196 		else
197 			list_add_tail(&sh->lru, &group->handle_list);
198 		group->stripes_cnt++;
199 		sh->group = group;
200 	}
201 
202 	if (conf->worker_cnt_per_group == 0) {
203 		md_wakeup_thread(conf->mddev->thread);
204 		return;
205 	}
206 
207 	group = conf->worker_groups + cpu_to_group(sh->cpu);
208 
209 	group->workers[0].working = true;
210 	/* at least one worker should run to avoid race */
211 	queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
212 
213 	thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
214 	/* wakeup more workers */
215 	for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
216 		if (group->workers[i].working == false) {
217 			group->workers[i].working = true;
218 			queue_work_on(sh->cpu, raid5_wq,
219 				      &group->workers[i].work);
220 			thread_cnt--;
221 		}
222 	}
223 }
224 
225 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
226 			      struct list_head *temp_inactive_list)
227 	__must_hold(&conf->device_lock)
228 {
229 	int i;
230 	int injournal = 0;	/* number of date pages with R5_InJournal */
231 
232 	BUG_ON(!list_empty(&sh->lru));
233 	BUG_ON(atomic_read(&conf->active_stripes)==0);
234 
235 	if (r5c_is_writeback(conf->log))
236 		for (i = sh->disks; i--; )
237 			if (test_bit(R5_InJournal, &sh->dev[i].flags))
238 				injournal++;
239 	/*
240 	 * In the following cases, the stripe cannot be released to cached
241 	 * lists. Therefore, we make the stripe write out and set
242 	 * STRIPE_HANDLE:
243 	 *   1. when quiesce in r5c write back;
244 	 *   2. when resync is requested fot the stripe.
245 	 */
246 	if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) ||
247 	    (conf->quiesce && r5c_is_writeback(conf->log) &&
248 	     !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) {
249 		if (test_bit(STRIPE_R5C_CACHING, &sh->state))
250 			r5c_make_stripe_write_out(sh);
251 		set_bit(STRIPE_HANDLE, &sh->state);
252 	}
253 
254 	if (test_bit(STRIPE_HANDLE, &sh->state)) {
255 		if (test_bit(STRIPE_DELAYED, &sh->state) &&
256 		    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
257 			list_add_tail(&sh->lru, &conf->delayed_list);
258 		else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
259 			   sh->bm_seq - conf->seq_write > 0)
260 			list_add_tail(&sh->lru, &conf->bitmap_list);
261 		else {
262 			clear_bit(STRIPE_DELAYED, &sh->state);
263 			clear_bit(STRIPE_BIT_DELAY, &sh->state);
264 			if (conf->worker_cnt_per_group == 0) {
265 				if (stripe_is_lowprio(sh))
266 					list_add_tail(&sh->lru,
267 							&conf->loprio_list);
268 				else
269 					list_add_tail(&sh->lru,
270 							&conf->handle_list);
271 			} else {
272 				raid5_wakeup_stripe_thread(sh);
273 				return;
274 			}
275 		}
276 		md_wakeup_thread(conf->mddev->thread);
277 	} else {
278 		BUG_ON(stripe_operations_active(sh));
279 		if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
280 			if (atomic_dec_return(&conf->preread_active_stripes)
281 			    < IO_THRESHOLD)
282 				md_wakeup_thread(conf->mddev->thread);
283 		atomic_dec(&conf->active_stripes);
284 		if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
285 			if (!r5c_is_writeback(conf->log))
286 				list_add_tail(&sh->lru, temp_inactive_list);
287 			else {
288 				WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags));
289 				if (injournal == 0)
290 					list_add_tail(&sh->lru, temp_inactive_list);
291 				else if (injournal == conf->raid_disks - conf->max_degraded) {
292 					/* full stripe */
293 					if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state))
294 						atomic_inc(&conf->r5c_cached_full_stripes);
295 					if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
296 						atomic_dec(&conf->r5c_cached_partial_stripes);
297 					list_add_tail(&sh->lru, &conf->r5c_full_stripe_list);
298 					r5c_check_cached_full_stripe(conf);
299 				} else
300 					/*
301 					 * STRIPE_R5C_PARTIAL_STRIPE is set in
302 					 * r5c_try_caching_write(). No need to
303 					 * set it again.
304 					 */
305 					list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list);
306 			}
307 		}
308 	}
309 }
310 
311 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
312 			     struct list_head *temp_inactive_list)
313 	__must_hold(&conf->device_lock)
314 {
315 	if (atomic_dec_and_test(&sh->count))
316 		do_release_stripe(conf, sh, temp_inactive_list);
317 }
318 
319 /*
320  * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
321  *
322  * Be careful: Only one task can add/delete stripes from temp_inactive_list at
323  * given time. Adding stripes only takes device lock, while deleting stripes
324  * only takes hash lock.
325  */
326 static void release_inactive_stripe_list(struct r5conf *conf,
327 					 struct list_head *temp_inactive_list,
328 					 int hash)
329 {
330 	int size;
331 	bool do_wakeup = false;
332 	unsigned long flags;
333 
334 	if (hash == NR_STRIPE_HASH_LOCKS) {
335 		size = NR_STRIPE_HASH_LOCKS;
336 		hash = NR_STRIPE_HASH_LOCKS - 1;
337 	} else
338 		size = 1;
339 	while (size) {
340 		struct list_head *list = &temp_inactive_list[size - 1];
341 
342 		/*
343 		 * We don't hold any lock here yet, raid5_get_active_stripe() might
344 		 * remove stripes from the list
345 		 */
346 		if (!list_empty_careful(list)) {
347 			spin_lock_irqsave(conf->hash_locks + hash, flags);
348 			if (list_empty(conf->inactive_list + hash) &&
349 			    !list_empty(list))
350 				atomic_dec(&conf->empty_inactive_list_nr);
351 			list_splice_tail_init(list, conf->inactive_list + hash);
352 			do_wakeup = true;
353 			spin_unlock_irqrestore(conf->hash_locks + hash, flags);
354 		}
355 		size--;
356 		hash--;
357 	}
358 
359 	if (do_wakeup) {
360 		wake_up(&conf->wait_for_stripe);
361 		if (atomic_read(&conf->active_stripes) == 0)
362 			wake_up(&conf->wait_for_quiescent);
363 		if (conf->retry_read_aligned)
364 			md_wakeup_thread(conf->mddev->thread);
365 	}
366 }
367 
368 static int release_stripe_list(struct r5conf *conf,
369 			       struct list_head *temp_inactive_list)
370 	__must_hold(&conf->device_lock)
371 {
372 	struct stripe_head *sh, *t;
373 	int count = 0;
374 	struct llist_node *head;
375 
376 	head = llist_del_all(&conf->released_stripes);
377 	head = llist_reverse_order(head);
378 	llist_for_each_entry_safe(sh, t, head, release_list) {
379 		int hash;
380 
381 		/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
382 		smp_mb();
383 		clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
384 		/*
385 		 * Don't worry the bit is set here, because if the bit is set
386 		 * again, the count is always > 1. This is true for
387 		 * STRIPE_ON_UNPLUG_LIST bit too.
388 		 */
389 		hash = sh->hash_lock_index;
390 		__release_stripe(conf, sh, &temp_inactive_list[hash]);
391 		count++;
392 	}
393 
394 	return count;
395 }
396 
397 void raid5_release_stripe(struct stripe_head *sh)
398 {
399 	struct r5conf *conf = sh->raid_conf;
400 	unsigned long flags;
401 	struct list_head list;
402 	int hash;
403 	bool wakeup;
404 
405 	/* Avoid release_list until the last reference.
406 	 */
407 	if (atomic_add_unless(&sh->count, -1, 1))
408 		return;
409 
410 	if (unlikely(!conf->mddev->thread) ||
411 		test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
412 		goto slow_path;
413 	wakeup = llist_add(&sh->release_list, &conf->released_stripes);
414 	if (wakeup)
415 		md_wakeup_thread(conf->mddev->thread);
416 	return;
417 slow_path:
418 	/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
419 	if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) {
420 		INIT_LIST_HEAD(&list);
421 		hash = sh->hash_lock_index;
422 		do_release_stripe(conf, sh, &list);
423 		spin_unlock_irqrestore(&conf->device_lock, flags);
424 		release_inactive_stripe_list(conf, &list, hash);
425 	}
426 }
427 
428 static inline void remove_hash(struct stripe_head *sh)
429 {
430 	pr_debug("remove_hash(), stripe %llu\n",
431 		(unsigned long long)sh->sector);
432 
433 	hlist_del_init(&sh->hash);
434 }
435 
436 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
437 {
438 	struct hlist_head *hp = stripe_hash(conf, sh->sector);
439 
440 	pr_debug("insert_hash(), stripe %llu\n",
441 		(unsigned long long)sh->sector);
442 
443 	hlist_add_head(&sh->hash, hp);
444 }
445 
446 /* find an idle stripe, make sure it is unhashed, and return it. */
447 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
448 {
449 	struct stripe_head *sh = NULL;
450 	struct list_head *first;
451 
452 	if (list_empty(conf->inactive_list + hash))
453 		goto out;
454 	first = (conf->inactive_list + hash)->next;
455 	sh = list_entry(first, struct stripe_head, lru);
456 	list_del_init(first);
457 	remove_hash(sh);
458 	atomic_inc(&conf->active_stripes);
459 	BUG_ON(hash != sh->hash_lock_index);
460 	if (list_empty(conf->inactive_list + hash))
461 		atomic_inc(&conf->empty_inactive_list_nr);
462 out:
463 	return sh;
464 }
465 
466 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
467 static void free_stripe_pages(struct stripe_head *sh)
468 {
469 	int i;
470 	struct page *p;
471 
472 	/* Have not allocate page pool */
473 	if (!sh->pages)
474 		return;
475 
476 	for (i = 0; i < sh->nr_pages; i++) {
477 		p = sh->pages[i];
478 		if (p)
479 			put_page(p);
480 		sh->pages[i] = NULL;
481 	}
482 }
483 
484 static int alloc_stripe_pages(struct stripe_head *sh, gfp_t gfp)
485 {
486 	int i;
487 	struct page *p;
488 
489 	for (i = 0; i < sh->nr_pages; i++) {
490 		/* The page have allocated. */
491 		if (sh->pages[i])
492 			continue;
493 
494 		p = alloc_page(gfp);
495 		if (!p) {
496 			free_stripe_pages(sh);
497 			return -ENOMEM;
498 		}
499 		sh->pages[i] = p;
500 	}
501 	return 0;
502 }
503 
504 static int
505 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks)
506 {
507 	int nr_pages, cnt;
508 
509 	if (sh->pages)
510 		return 0;
511 
512 	/* Each of the sh->dev[i] need one conf->stripe_size */
513 	cnt = PAGE_SIZE / conf->stripe_size;
514 	nr_pages = (disks + cnt - 1) / cnt;
515 
516 	sh->pages = kzalloc_objs(struct page *, nr_pages);
517 	if (!sh->pages)
518 		return -ENOMEM;
519 	sh->nr_pages = nr_pages;
520 	sh->stripes_per_page = cnt;
521 	return 0;
522 }
523 #endif
524 
525 static void shrink_buffers(struct stripe_head *sh)
526 {
527 	int i;
528 	int num = sh->raid_conf->pool_size;
529 
530 #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
531 	for (i = 0; i < num ; i++) {
532 		struct page *p;
533 
534 		WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
535 		p = sh->dev[i].page;
536 		if (!p)
537 			continue;
538 		sh->dev[i].page = NULL;
539 		put_page(p);
540 	}
541 #else
542 	for (i = 0; i < num; i++)
543 		sh->dev[i].page = NULL;
544 	free_stripe_pages(sh); /* Free pages */
545 #endif
546 }
547 
548 static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
549 {
550 	int i;
551 	int num = sh->raid_conf->pool_size;
552 
553 #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
554 	for (i = 0; i < num; i++) {
555 		struct page *page;
556 
557 		if (!(page = alloc_page(gfp))) {
558 			return 1;
559 		}
560 		sh->dev[i].page = page;
561 		sh->dev[i].orig_page = page;
562 		sh->dev[i].offset = 0;
563 	}
564 #else
565 	if (alloc_stripe_pages(sh, gfp))
566 		return -ENOMEM;
567 
568 	for (i = 0; i < num; i++) {
569 		sh->dev[i].page = raid5_get_dev_page(sh, i);
570 		sh->dev[i].orig_page = sh->dev[i].page;
571 		sh->dev[i].offset = raid5_get_page_offset(sh, i);
572 	}
573 #endif
574 	return 0;
575 }
576 
577 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
578 			    struct stripe_head *sh);
579 
580 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
581 {
582 	struct r5conf *conf = sh->raid_conf;
583 	int i, seq;
584 
585 	BUG_ON(atomic_read(&sh->count) != 0);
586 	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
587 	BUG_ON(stripe_operations_active(sh));
588 	BUG_ON(sh->batch_head);
589 
590 	pr_debug("init_stripe called, stripe %llu\n",
591 		(unsigned long long)sector);
592 retry:
593 	seq = read_seqcount_begin(&conf->gen_lock);
594 	sh->generation = conf->generation - previous;
595 	sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
596 	sh->sector = sector;
597 	stripe_set_idx(sector, conf, previous, sh);
598 	sh->state = 0;
599 
600 	for (i = sh->disks; i--; ) {
601 		struct r5dev *dev = &sh->dev[i];
602 
603 		if (dev->toread || dev->read || dev->towrite || dev->written ||
604 		    test_bit(R5_LOCKED, &dev->flags)) {
605 			pr_err("sector=%llx i=%d %p %p %p %p %d\n",
606 			       (unsigned long long)sh->sector, i, dev->toread,
607 			       dev->read, dev->towrite, dev->written,
608 			       test_bit(R5_LOCKED, &dev->flags));
609 			WARN_ON(1);
610 		}
611 		dev->flags = 0;
612 		dev->sector = raid5_compute_blocknr(sh, i, previous);
613 	}
614 	if (read_seqcount_retry(&conf->gen_lock, seq))
615 		goto retry;
616 	sh->overwrite_disks = 0;
617 	insert_hash(conf, sh);
618 	sh->cpu = smp_processor_id();
619 	set_bit(STRIPE_BATCH_READY, &sh->state);
620 }
621 
622 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
623 					 short generation)
624 {
625 	struct stripe_head *sh;
626 
627 	pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
628 	hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
629 		if (sh->sector == sector && sh->generation == generation)
630 			return sh;
631 	pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
632 	return NULL;
633 }
634 
635 static struct stripe_head *find_get_stripe(struct r5conf *conf,
636 		sector_t sector, short generation, int hash)
637 {
638 	int inc_empty_inactive_list_flag;
639 	struct stripe_head *sh;
640 
641 	sh = __find_stripe(conf, sector, generation);
642 	if (!sh)
643 		return NULL;
644 
645 	if (atomic_inc_not_zero(&sh->count))
646 		return sh;
647 
648 	/*
649 	 * Slow path. The reference count is zero which means the stripe must
650 	 * be on a list (sh->lru). Must remove the stripe from the list that
651 	 * references it with the device_lock held.
652 	 */
653 
654 	spin_lock(&conf->device_lock);
655 	if (!atomic_read(&sh->count)) {
656 		if (!test_bit(STRIPE_HANDLE, &sh->state))
657 			atomic_inc(&conf->active_stripes);
658 		BUG_ON(list_empty(&sh->lru) &&
659 		       !test_bit(STRIPE_EXPANDING, &sh->state));
660 		inc_empty_inactive_list_flag = 0;
661 		if (!list_empty(conf->inactive_list + hash))
662 			inc_empty_inactive_list_flag = 1;
663 		list_del_init(&sh->lru);
664 		if (list_empty(conf->inactive_list + hash) &&
665 		    inc_empty_inactive_list_flag)
666 			atomic_inc(&conf->empty_inactive_list_nr);
667 		if (sh->group) {
668 			sh->group->stripes_cnt--;
669 			sh->group = NULL;
670 		}
671 	}
672 	atomic_inc(&sh->count);
673 	spin_unlock(&conf->device_lock);
674 
675 	return sh;
676 }
677 
678 /*
679  * Need to check if array has failed when deciding whether to:
680  *  - start an array
681  *  - remove non-faulty devices
682  *  - add a spare
683  *  - allow a reshape
684  * This determination is simple when no reshape is happening.
685  * However if there is a reshape, we need to carefully check
686  * both the before and after sections.
687  * This is because some failed devices may only affect one
688  * of the two sections, and some non-in_sync devices may
689  * be insync in the section most affected by failed devices.
690  *
691  * Most calls to this function hold &conf->device_lock. Calls
692  * in raid5_run() do not require the lock as no other threads
693  * have been started yet.
694  */
695 int raid5_calc_degraded(struct r5conf *conf)
696 {
697 	int degraded, degraded2;
698 	int i;
699 
700 	degraded = 0;
701 	for (i = 0; i < conf->previous_raid_disks; i++) {
702 		struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
703 
704 		if (rdev && test_bit(Faulty, &rdev->flags))
705 			rdev = READ_ONCE(conf->disks[i].replacement);
706 		if (!rdev || test_bit(Faulty, &rdev->flags))
707 			degraded++;
708 		else if (test_bit(In_sync, &rdev->flags))
709 			;
710 		else
711 			/* not in-sync or faulty.
712 			 * If the reshape increases the number of devices,
713 			 * this is being recovered by the reshape, so
714 			 * this 'previous' section is not in_sync.
715 			 * If the number of devices is being reduced however,
716 			 * the device can only be part of the array if
717 			 * we are reverting a reshape, so this section will
718 			 * be in-sync.
719 			 */
720 			if (conf->raid_disks >= conf->previous_raid_disks)
721 				degraded++;
722 	}
723 	if (conf->raid_disks == conf->previous_raid_disks)
724 		return degraded;
725 	degraded2 = 0;
726 	for (i = 0; i < conf->raid_disks; i++) {
727 		struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
728 
729 		if (rdev && test_bit(Faulty, &rdev->flags))
730 			rdev = READ_ONCE(conf->disks[i].replacement);
731 		if (!rdev || test_bit(Faulty, &rdev->flags))
732 			degraded2++;
733 		else if (test_bit(In_sync, &rdev->flags))
734 			;
735 		else
736 			/* not in-sync or faulty.
737 			 * If reshape increases the number of devices, this
738 			 * section has already been recovered, else it
739 			 * almost certainly hasn't.
740 			 */
741 			if (conf->raid_disks <= conf->previous_raid_disks)
742 				degraded2++;
743 	}
744 	if (degraded2 > degraded)
745 		return degraded2;
746 	return degraded;
747 }
748 
749 static bool has_failed(struct r5conf *conf)
750 {
751 	int degraded = conf->mddev->degraded;
752 
753 	if (test_bit(MD_BROKEN, &conf->mddev->flags))
754 		return true;
755 
756 	if (conf->mddev->reshape_position != MaxSector)
757 		degraded = raid5_calc_degraded(conf);
758 
759 	return degraded > conf->max_degraded;
760 }
761 
762 enum stripe_result {
763 	STRIPE_SUCCESS = 0,
764 	STRIPE_RETRY,
765 	STRIPE_SCHEDULE_AND_RETRY,
766 	STRIPE_FAIL,
767 	STRIPE_WAIT_RESHAPE,
768 };
769 
770 struct stripe_request_ctx {
771 	/* a reference to the last stripe_head for batching */
772 	struct stripe_head *batch_last;
773 
774 	/* first sector in the request */
775 	sector_t first_sector;
776 
777 	/* last sector in the request */
778 	sector_t last_sector;
779 
780 	/* the request had REQ_PREFLUSH, cleared after the first stripe_head */
781 	bool do_flush;
782 
783 	/*
784 	 * bitmap to track stripe sectors that have been added to stripes
785 	 * add one to account for unaligned requests
786 	 */
787 	unsigned long sectors_to_do[];
788 };
789 
790 /*
791  * Block until another thread clears R5_INACTIVE_BLOCKED or
792  * there are fewer than 3/4 the maximum number of active stripes
793  * and there is an inactive stripe available.
794  */
795 static bool is_inactive_blocked(struct r5conf *conf, int hash)
796 {
797 	if (list_empty(conf->inactive_list + hash))
798 		return false;
799 
800 	if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
801 		return true;
802 
803 	return (atomic_read(&conf->active_stripes) <
804 		(conf->max_nr_stripes * 3 / 4));
805 }
806 
807 struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
808 		struct stripe_request_ctx *ctx, sector_t sector,
809 		unsigned int flags)
810 {
811 	struct stripe_head *sh;
812 	int hash = stripe_hash_locks_hash(conf, sector);
813 	int previous = !!(flags & R5_GAS_PREVIOUS);
814 
815 	pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
816 
817 	spin_lock_irq(conf->hash_locks + hash);
818 
819 	for (;;) {
820 		if (!(flags & R5_GAS_NOQUIESCE) && conf->quiesce) {
821 			/*
822 			 * Must release the reference to batch_last before
823 			 * waiting, on quiesce, otherwise the batch_last will
824 			 * hold a reference to a stripe and raid5_quiesce()
825 			 * will deadlock waiting for active_stripes to go to
826 			 * zero.
827 			 */
828 			if (ctx && ctx->batch_last) {
829 				raid5_release_stripe(ctx->batch_last);
830 				ctx->batch_last = NULL;
831 			}
832 
833 			wait_event_lock_irq(conf->wait_for_quiescent,
834 					    !conf->quiesce,
835 					    *(conf->hash_locks + hash));
836 		}
837 
838 		sh = find_get_stripe(conf, sector, conf->generation - previous,
839 				     hash);
840 		if (sh)
841 			break;
842 
843 		if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
844 			sh = get_free_stripe(conf, hash);
845 			if (sh) {
846 				r5c_check_stripe_cache_usage(conf);
847 				init_stripe(sh, sector, previous);
848 				atomic_inc(&sh->count);
849 				break;
850 			}
851 
852 			if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
853 				set_bit(R5_ALLOC_MORE, &conf->cache_state);
854 		}
855 
856 		if (flags & R5_GAS_NOBLOCK)
857 			break;
858 
859 		set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
860 		r5l_wake_reclaim(conf->log, 0);
861 
862 		/* release batch_last before wait to avoid risk of deadlock */
863 		if (ctx && ctx->batch_last) {
864 			raid5_release_stripe(ctx->batch_last);
865 			ctx->batch_last = NULL;
866 		}
867 
868 		wait_event_lock_irq(conf->wait_for_stripe,
869 				    is_inactive_blocked(conf, hash),
870 				    *(conf->hash_locks + hash));
871 		clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
872 	}
873 
874 	spin_unlock_irq(conf->hash_locks + hash);
875 	return sh;
876 }
877 
878 static bool is_full_stripe_write(struct stripe_head *sh)
879 {
880 	BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
881 	return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded);
882 }
883 
884 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
885 		__acquires(&sh1->stripe_lock)
886 		__acquires(&sh2->stripe_lock)
887 {
888 	if (sh1 > sh2) {
889 		spin_lock_irq(&sh2->stripe_lock);
890 		spin_lock_nested(&sh1->stripe_lock, 1);
891 	} else {
892 		spin_lock_irq(&sh1->stripe_lock);
893 		spin_lock_nested(&sh2->stripe_lock, 1);
894 	}
895 }
896 
897 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
898 		__releases(&sh1->stripe_lock)
899 		__releases(&sh2->stripe_lock)
900 {
901 	spin_unlock(&sh1->stripe_lock);
902 	spin_unlock_irq(&sh2->stripe_lock);
903 }
904 
905 /* Only freshly new full stripe normal write stripe can be added to a batch list */
906 static bool stripe_can_batch(struct stripe_head *sh)
907 {
908 	struct r5conf *conf = sh->raid_conf;
909 
910 	if (raid5_has_log(conf) || raid5_has_ppl(conf))
911 		return false;
912 	return test_bit(STRIPE_BATCH_READY, &sh->state) &&
913 	       is_full_stripe_write(sh);
914 }
915 
916 /* we only do back search */
917 static void stripe_add_to_batch_list(struct r5conf *conf,
918 		struct stripe_head *sh, struct stripe_head *last_sh)
919 {
920 	struct stripe_head *head;
921 	sector_t head_sector, tmp_sec;
922 	int hash;
923 	int dd_idx;
924 
925 	/* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
926 	tmp_sec = sh->sector;
927 	if (!sector_div(tmp_sec, conf->chunk_sectors))
928 		return;
929 	head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf);
930 
931 	if (last_sh && head_sector == last_sh->sector) {
932 		head = last_sh;
933 		atomic_inc(&head->count);
934 	} else {
935 		hash = stripe_hash_locks_hash(conf, head_sector);
936 		spin_lock_irq(conf->hash_locks + hash);
937 		head = find_get_stripe(conf, head_sector, conf->generation,
938 				       hash);
939 		spin_unlock_irq(conf->hash_locks + hash);
940 		if (!head)
941 			return;
942 		if (!stripe_can_batch(head))
943 			goto out;
944 	}
945 
946 	lock_two_stripes(head, sh);
947 	/* clear_batch_ready clear the flag */
948 	if (!stripe_can_batch(head) || !stripe_can_batch(sh))
949 		goto unlock_out;
950 
951 	if (sh->batch_head)
952 		goto unlock_out;
953 
954 	dd_idx = 0;
955 	while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
956 		dd_idx++;
957 	if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
958 	    bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
959 		goto unlock_out;
960 
961 	if (head->batch_head) {
962 		spin_lock(&head->batch_head->batch_lock);
963 		/* This batch list is already running */
964 		if (!stripe_can_batch(head)) {
965 			spin_unlock(&head->batch_head->batch_lock);
966 			goto unlock_out;
967 		}
968 		/*
969 		 * We must assign batch_head of this stripe within the
970 		 * batch_lock, otherwise clear_batch_ready of batch head
971 		 * stripe could clear BATCH_READY bit of this stripe and
972 		 * this stripe->batch_head doesn't get assigned, which
973 		 * could confuse clear_batch_ready for this stripe
974 		 */
975 		sh->batch_head = head->batch_head;
976 
977 		/*
978 		 * at this point, head's BATCH_READY could be cleared, but we
979 		 * can still add the stripe to batch list
980 		 */
981 		list_add(&sh->batch_list, &head->batch_list);
982 		spin_unlock(&head->batch_head->batch_lock);
983 	} else {
984 		head->batch_head = head;
985 		sh->batch_head = head->batch_head;
986 		spin_lock(&head->batch_lock);
987 		list_add_tail(&sh->batch_list, &head->batch_list);
988 		spin_unlock(&head->batch_lock);
989 	}
990 
991 	if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
992 		if (atomic_dec_return(&conf->preread_active_stripes)
993 		    < IO_THRESHOLD)
994 			md_wakeup_thread(conf->mddev->thread);
995 
996 	if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
997 		int seq = sh->bm_seq;
998 		if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
999 		    sh->batch_head->bm_seq > seq)
1000 			seq = sh->batch_head->bm_seq;
1001 		set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state);
1002 		sh->batch_head->bm_seq = seq;
1003 	}
1004 
1005 	atomic_inc(&sh->count);
1006 unlock_out:
1007 	unlock_two_stripes(head, sh);
1008 out:
1009 	raid5_release_stripe(head);
1010 }
1011 
1012 /* Determine if 'data_offset' or 'new_data_offset' should be used
1013  * in this stripe_head.
1014  */
1015 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
1016 {
1017 	sector_t progress = conf->reshape_progress;
1018 	/* Need a memory barrier to make sure we see the value
1019 	 * of conf->generation, or ->data_offset that was set before
1020 	 * reshape_progress was updated.
1021 	 */
1022 	smp_rmb();
1023 	if (progress == MaxSector)
1024 		return 0;
1025 	if (sh->generation == conf->generation - 1)
1026 		return 0;
1027 	/* We are in a reshape, and this is a new-generation stripe,
1028 	 * so use new_data_offset.
1029 	 */
1030 	return 1;
1031 }
1032 
1033 static void dispatch_bio_list(struct bio_list *tmp)
1034 {
1035 	struct bio *bio;
1036 
1037 	while ((bio = bio_list_pop(tmp)))
1038 		submit_bio_noacct(bio);
1039 }
1040 
1041 static int cmp_stripe(void *priv, const struct list_head *a,
1042 		      const struct list_head *b)
1043 {
1044 	const struct r5pending_data *da = list_entry(a,
1045 				struct r5pending_data, sibling);
1046 	const struct r5pending_data *db = list_entry(b,
1047 				struct r5pending_data, sibling);
1048 	if (da->sector > db->sector)
1049 		return 1;
1050 	if (da->sector < db->sector)
1051 		return -1;
1052 	return 0;
1053 }
1054 
1055 static void dispatch_defer_bios(struct r5conf *conf, int target,
1056 				struct bio_list *list)
1057 {
1058 	struct r5pending_data *data;
1059 	struct list_head *first, *next = NULL;
1060 	int cnt = 0;
1061 
1062 	if (conf->pending_data_cnt == 0)
1063 		return;
1064 
1065 	list_sort(NULL, &conf->pending_list, cmp_stripe);
1066 
1067 	first = conf->pending_list.next;
1068 
1069 	/* temporarily move the head */
1070 	if (conf->next_pending_data)
1071 		list_move_tail(&conf->pending_list,
1072 				&conf->next_pending_data->sibling);
1073 
1074 	while (!list_empty(&conf->pending_list)) {
1075 		data = list_first_entry(&conf->pending_list,
1076 			struct r5pending_data, sibling);
1077 		if (&data->sibling == first)
1078 			first = data->sibling.next;
1079 		next = data->sibling.next;
1080 
1081 		bio_list_merge(list, &data->bios);
1082 		list_move(&data->sibling, &conf->free_list);
1083 		cnt++;
1084 		if (cnt >= target)
1085 			break;
1086 	}
1087 	conf->pending_data_cnt -= cnt;
1088 	BUG_ON(conf->pending_data_cnt < 0 || cnt < target);
1089 
1090 	if (next != &conf->pending_list)
1091 		conf->next_pending_data = list_entry(next,
1092 				struct r5pending_data, sibling);
1093 	else
1094 		conf->next_pending_data = NULL;
1095 	/* list isn't empty */
1096 	if (first != &conf->pending_list)
1097 		list_move_tail(&conf->pending_list, first);
1098 }
1099 
1100 static void flush_deferred_bios(struct r5conf *conf)
1101 {
1102 	struct bio_list tmp = BIO_EMPTY_LIST;
1103 
1104 	if (conf->pending_data_cnt == 0)
1105 		return;
1106 
1107 	spin_lock(&conf->pending_bios_lock);
1108 	dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp);
1109 	BUG_ON(conf->pending_data_cnt != 0);
1110 	spin_unlock(&conf->pending_bios_lock);
1111 
1112 	dispatch_bio_list(&tmp);
1113 }
1114 
1115 static void defer_issue_bios(struct r5conf *conf, sector_t sector,
1116 				struct bio_list *bios)
1117 {
1118 	struct bio_list tmp = BIO_EMPTY_LIST;
1119 	struct r5pending_data *ent;
1120 
1121 	spin_lock(&conf->pending_bios_lock);
1122 	ent = list_first_entry(&conf->free_list, struct r5pending_data,
1123 							sibling);
1124 	list_move_tail(&ent->sibling, &conf->pending_list);
1125 	ent->sector = sector;
1126 	bio_list_init(&ent->bios);
1127 	bio_list_merge(&ent->bios, bios);
1128 	conf->pending_data_cnt++;
1129 	if (conf->pending_data_cnt >= PENDING_IO_MAX)
1130 		dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp);
1131 
1132 	spin_unlock(&conf->pending_bios_lock);
1133 
1134 	dispatch_bio_list(&tmp);
1135 }
1136 
1137 static void
1138 raid5_end_read_request(struct bio *bi);
1139 static void
1140 raid5_end_write_request(struct bio *bi);
1141 
1142 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
1143 {
1144 	struct r5conf *conf = sh->raid_conf;
1145 	int i, disks = sh->disks;
1146 	struct stripe_head *head_sh = sh;
1147 	struct bio_list pending_bios = BIO_EMPTY_LIST;
1148 	struct r5dev *dev;
1149 	bool should_defer;
1150 
1151 	might_sleep();
1152 
1153 	if (log_stripe(sh, s) == 0)
1154 		return;
1155 
1156 	should_defer = conf->batch_bio_dispatch && conf->group_cnt;
1157 
1158 	for (i = disks; i--; ) {
1159 		enum req_op op;
1160 		blk_opf_t op_flags = 0;
1161 		int replace_only = 0;
1162 		struct bio *bi, *rbi;
1163 		struct md_rdev *rdev, *rrdev = NULL;
1164 
1165 		sh = head_sh;
1166 		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
1167 			op = REQ_OP_WRITE;
1168 			if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
1169 				op_flags = REQ_FUA;
1170 			if (test_bit(R5_Discard, &sh->dev[i].flags))
1171 				op = REQ_OP_DISCARD;
1172 		} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1173 			op = REQ_OP_READ;
1174 		else if (test_and_clear_bit(R5_WantReplace,
1175 					    &sh->dev[i].flags)) {
1176 			op = REQ_OP_WRITE;
1177 			replace_only = 1;
1178 		} else
1179 			continue;
1180 		if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
1181 			op_flags |= REQ_SYNC;
1182 
1183 again:
1184 		dev = &sh->dev[i];
1185 		bi = &dev->req;
1186 		rbi = &dev->rreq; /* For writing to replacement */
1187 
1188 		rdev = conf->disks[i].rdev;
1189 		rrdev = conf->disks[i].replacement;
1190 		if (op_is_write(op)) {
1191 			if (replace_only)
1192 				rdev = NULL;
1193 			if (rdev == rrdev)
1194 				/* We raced and saw duplicates */
1195 				rrdev = NULL;
1196 		} else {
1197 			if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
1198 				rdev = rrdev;
1199 			rrdev = NULL;
1200 		}
1201 
1202 		if (rdev && test_bit(Faulty, &rdev->flags))
1203 			rdev = NULL;
1204 		if (rdev)
1205 			atomic_inc(&rdev->nr_pending);
1206 		if (rrdev && test_bit(Faulty, &rrdev->flags))
1207 			rrdev = NULL;
1208 		if (rrdev)
1209 			atomic_inc(&rrdev->nr_pending);
1210 
1211 		/* We have already checked bad blocks for reads.  Now
1212 		 * need to check for writes.  We never accept write errors
1213 		 * on the replacement, so we don't to check rrdev.
1214 		 */
1215 		while (op_is_write(op) && rdev &&
1216 		       test_bit(WriteErrorSeen, &rdev->flags)) {
1217 			int bad = rdev_has_badblock(rdev, sh->sector,
1218 						    RAID5_STRIPE_SECTORS(conf));
1219 			if (!bad)
1220 				break;
1221 
1222 			if (bad < 0) {
1223 				set_bit(BlockedBadBlocks, &rdev->flags);
1224 				if (!conf->mddev->external &&
1225 				    conf->mddev->sb_flags) {
1226 					/* It is very unlikely, but we might
1227 					 * still need to write out the
1228 					 * bad block log - better give it
1229 					 * a chance*/
1230 					md_check_recovery(conf->mddev);
1231 				}
1232 				/*
1233 				 * Because md_wait_for_blocked_rdev
1234 				 * will dec nr_pending, we must
1235 				 * increment it first.
1236 				 */
1237 				atomic_inc(&rdev->nr_pending);
1238 				md_wait_for_blocked_rdev(rdev, conf->mddev);
1239 			} else {
1240 				/* Acknowledged bad block - skip the write */
1241 				rdev_dec_pending(rdev, conf->mddev);
1242 				rdev = NULL;
1243 			}
1244 		}
1245 
1246 		if (rdev) {
1247 			set_bit(STRIPE_IO_STARTED, &sh->state);
1248 
1249 			bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
1250 			bi->bi_end_io = op_is_write(op)
1251 				? raid5_end_write_request
1252 				: raid5_end_read_request;
1253 			bi->bi_private = sh;
1254 
1255 			pr_debug("%s: for %llu schedule op %d on disc %d\n",
1256 				__func__, (unsigned long long)sh->sector,
1257 				bi->bi_opf, i);
1258 			atomic_inc(&sh->count);
1259 			if (sh != head_sh)
1260 				atomic_inc(&head_sh->count);
1261 			if (use_new_offset(conf, sh))
1262 				bi->bi_iter.bi_sector = (sh->sector
1263 						 + rdev->new_data_offset);
1264 			else
1265 				bi->bi_iter.bi_sector = (sh->sector
1266 						 + rdev->data_offset);
1267 			if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
1268 				bi->bi_opf |= REQ_NOMERGE;
1269 
1270 			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1271 				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1272 
1273 			if (!op_is_write(op) &&
1274 			    test_bit(R5_InJournal, &sh->dev[i].flags))
1275 				/*
1276 				 * issuing read for a page in journal, this
1277 				 * must be preparing for prexor in rmw; read
1278 				 * the data into orig_page
1279 				 */
1280 				sh->dev[i].vec.bv_page = sh->dev[i].orig_page;
1281 			else
1282 				sh->dev[i].vec.bv_page = sh->dev[i].page;
1283 			bi->bi_vcnt = 1;
1284 			bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
1285 			bi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
1286 			bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
1287 			/*
1288 			 * If this is discard request, set bi_vcnt 0. We don't
1289 			 * want to confuse SCSI because SCSI will replace payload
1290 			 */
1291 			if (op == REQ_OP_DISCARD)
1292 				bi->bi_vcnt = 0;
1293 			if (rrdev)
1294 				set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
1295 
1296 			mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector);
1297 			if (should_defer && op_is_write(op))
1298 				bio_list_add(&pending_bios, bi);
1299 			else
1300 				submit_bio_noacct(bi);
1301 		}
1302 		if (rrdev) {
1303 			set_bit(STRIPE_IO_STARTED, &sh->state);
1304 
1305 			bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags);
1306 			BUG_ON(!op_is_write(op));
1307 			rbi->bi_end_io = raid5_end_write_request;
1308 			rbi->bi_private = sh;
1309 
1310 			pr_debug("%s: for %llu schedule op %d on "
1311 				 "replacement disc %d\n",
1312 				__func__, (unsigned long long)sh->sector,
1313 				rbi->bi_opf, i);
1314 			atomic_inc(&sh->count);
1315 			if (sh != head_sh)
1316 				atomic_inc(&head_sh->count);
1317 			if (use_new_offset(conf, sh))
1318 				rbi->bi_iter.bi_sector = (sh->sector
1319 						  + rrdev->new_data_offset);
1320 			else
1321 				rbi->bi_iter.bi_sector = (sh->sector
1322 						  + rrdev->data_offset);
1323 			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1324 				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1325 			sh->dev[i].rvec.bv_page = sh->dev[i].page;
1326 			rbi->bi_vcnt = 1;
1327 			rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
1328 			rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
1329 			rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
1330 			/*
1331 			 * If this is discard request, set bi_vcnt 0. We don't
1332 			 * want to confuse SCSI because SCSI will replace payload
1333 			 */
1334 			if (op == REQ_OP_DISCARD)
1335 				rbi->bi_vcnt = 0;
1336 			mddev_trace_remap(conf->mddev, rbi, sh->dev[i].sector);
1337 			if (should_defer && op_is_write(op))
1338 				bio_list_add(&pending_bios, rbi);
1339 			else
1340 				submit_bio_noacct(rbi);
1341 		}
1342 		if (!rdev && !rrdev) {
1343 			pr_debug("skip op %d on disc %d for sector %llu\n",
1344 				bi->bi_opf, i, (unsigned long long)sh->sector);
1345 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
1346 			set_bit(STRIPE_HANDLE, &sh->state);
1347 		}
1348 
1349 		if (!head_sh->batch_head)
1350 			continue;
1351 		sh = list_first_entry(&sh->batch_list, struct stripe_head,
1352 				      batch_list);
1353 		if (sh != head_sh)
1354 			goto again;
1355 	}
1356 
1357 	if (should_defer && !bio_list_empty(&pending_bios))
1358 		defer_issue_bios(conf, head_sh->sector, &pending_bios);
1359 }
1360 
1361 static struct dma_async_tx_descriptor *
1362 async_copy_data(int frombio, struct bio *bio, struct page **page,
1363 	unsigned int poff, sector_t sector, struct dma_async_tx_descriptor *tx,
1364 	struct stripe_head *sh, int no_skipcopy)
1365 {
1366 	struct bio_vec bvl;
1367 	struct bvec_iter iter;
1368 	struct page *bio_page;
1369 	int page_offset;
1370 	struct async_submit_ctl submit;
1371 	enum async_tx_flags flags = 0;
1372 	struct r5conf *conf = sh->raid_conf;
1373 
1374 	if (bio->bi_iter.bi_sector >= sector)
1375 		page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
1376 	else
1377 		page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
1378 
1379 	if (frombio)
1380 		flags |= ASYNC_TX_FENCE;
1381 	init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
1382 
1383 	bio_for_each_segment(bvl, bio, iter) {
1384 		int len = bvl.bv_len;
1385 		int clen;
1386 		int b_offset = 0;
1387 
1388 		if (page_offset < 0) {
1389 			b_offset = -page_offset;
1390 			page_offset += b_offset;
1391 			len -= b_offset;
1392 		}
1393 
1394 		if (len > 0 && page_offset + len > RAID5_STRIPE_SIZE(conf))
1395 			clen = RAID5_STRIPE_SIZE(conf) - page_offset;
1396 		else
1397 			clen = len;
1398 
1399 		if (clen > 0) {
1400 			b_offset += bvl.bv_offset;
1401 			bio_page = bvl.bv_page;
1402 			if (frombio) {
1403 				if (conf->skip_copy &&
1404 				    b_offset == 0 && page_offset == 0 &&
1405 				    clen == RAID5_STRIPE_SIZE(conf) &&
1406 				    !no_skipcopy)
1407 					*page = bio_page;
1408 				else
1409 					tx = async_memcpy(*page, bio_page, page_offset + poff,
1410 						  b_offset, clen, &submit);
1411 			} else
1412 				tx = async_memcpy(bio_page, *page, b_offset,
1413 						  page_offset + poff, clen, &submit);
1414 		}
1415 		/* chain the operations */
1416 		submit.depend_tx = tx;
1417 
1418 		if (clen < len) /* hit end of page */
1419 			break;
1420 		page_offset +=  len;
1421 	}
1422 
1423 	return tx;
1424 }
1425 
1426 static void ops_complete_biofill(void *stripe_head_ref)
1427 {
1428 	struct stripe_head *sh = stripe_head_ref;
1429 	int i;
1430 	struct r5conf *conf = sh->raid_conf;
1431 
1432 	pr_debug("%s: stripe %llu\n", __func__,
1433 		(unsigned long long)sh->sector);
1434 
1435 	/* clear completed biofills */
1436 	for (i = sh->disks; i--; ) {
1437 		struct r5dev *dev = &sh->dev[i];
1438 
1439 		/* acknowledge completion of a biofill operation */
1440 		/* and check if we need to reply to a read request,
1441 		 * new R5_Wantfill requests are held off until
1442 		 * !STRIPE_BIOFILL_RUN
1443 		 */
1444 		if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
1445 			struct bio *rbi, *rbi2;
1446 
1447 			BUG_ON(!dev->read);
1448 			rbi = dev->read;
1449 			dev->read = NULL;
1450 			while (rbi && rbi->bi_iter.bi_sector <
1451 				dev->sector + RAID5_STRIPE_SECTORS(conf)) {
1452 				rbi2 = r5_next_bio(conf, rbi, dev->sector);
1453 				bio_endio(rbi);
1454 				rbi = rbi2;
1455 			}
1456 		}
1457 	}
1458 	clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
1459 
1460 	set_bit(STRIPE_HANDLE, &sh->state);
1461 	raid5_release_stripe(sh);
1462 }
1463 
1464 static void ops_run_biofill(struct stripe_head *sh)
1465 {
1466 	struct dma_async_tx_descriptor *tx = NULL;
1467 	struct async_submit_ctl submit;
1468 	int i;
1469 	struct r5conf *conf = sh->raid_conf;
1470 
1471 	BUG_ON(sh->batch_head);
1472 	pr_debug("%s: stripe %llu\n", __func__,
1473 		(unsigned long long)sh->sector);
1474 
1475 	for (i = sh->disks; i--; ) {
1476 		struct r5dev *dev = &sh->dev[i];
1477 		if (test_bit(R5_Wantfill, &dev->flags)) {
1478 			struct bio *rbi;
1479 			spin_lock_irq(&sh->stripe_lock);
1480 			dev->read = rbi = dev->toread;
1481 			dev->toread = NULL;
1482 			spin_unlock_irq(&sh->stripe_lock);
1483 			while (rbi && rbi->bi_iter.bi_sector <
1484 				dev->sector + RAID5_STRIPE_SECTORS(conf)) {
1485 				tx = async_copy_data(0, rbi, &dev->page,
1486 						     dev->offset,
1487 						     dev->sector, tx, sh, 0);
1488 				rbi = r5_next_bio(conf, rbi, dev->sector);
1489 			}
1490 		}
1491 	}
1492 
1493 	atomic_inc(&sh->count);
1494 	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
1495 	async_trigger_callback(&submit);
1496 }
1497 
1498 static void mark_target_uptodate(struct stripe_head *sh, int target)
1499 {
1500 	struct r5dev *tgt;
1501 
1502 	if (target < 0)
1503 		return;
1504 
1505 	tgt = &sh->dev[target];
1506 	set_bit(R5_UPTODATE, &tgt->flags);
1507 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1508 	clear_bit(R5_Wantcompute, &tgt->flags);
1509 }
1510 
1511 static void ops_complete_compute(void *stripe_head_ref)
1512 {
1513 	struct stripe_head *sh = stripe_head_ref;
1514 
1515 	pr_debug("%s: stripe %llu\n", __func__,
1516 		(unsigned long long)sh->sector);
1517 
1518 	/* mark the computed target(s) as uptodate */
1519 	mark_target_uptodate(sh, sh->ops.target);
1520 	mark_target_uptodate(sh, sh->ops.target2);
1521 
1522 	clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
1523 	if (sh->check_state == check_state_compute_run)
1524 		sh->check_state = check_state_compute_result;
1525 	set_bit(STRIPE_HANDLE, &sh->state);
1526 	raid5_release_stripe(sh);
1527 }
1528 
1529 /* return a pointer to the address conversion region of the scribble buffer */
1530 static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
1531 {
1532 	return percpu->scribble + i * percpu->scribble_obj_size;
1533 }
1534 
1535 /* return a pointer to the address conversion region of the scribble buffer */
1536 static addr_conv_t *to_addr_conv(struct stripe_head *sh,
1537 				 struct raid5_percpu *percpu, int i)
1538 {
1539 	return (void *) (to_addr_page(percpu, i) + sh->disks + 2);
1540 }
1541 
1542 /*
1543  * Return a pointer to record offset address.
1544  */
1545 static unsigned int *
1546 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu)
1547 {
1548 	return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2);
1549 }
1550 
1551 static struct dma_async_tx_descriptor *
1552 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
1553 {
1554 	int disks = sh->disks;
1555 	struct page **xor_srcs = to_addr_page(percpu, 0);
1556 	unsigned int *off_srcs = to_addr_offs(sh, percpu);
1557 	int target = sh->ops.target;
1558 	struct r5dev *tgt = &sh->dev[target];
1559 	struct page *xor_dest = tgt->page;
1560 	unsigned int off_dest = tgt->offset;
1561 	int count = 0;
1562 	struct dma_async_tx_descriptor *tx;
1563 	struct async_submit_ctl submit;
1564 	int i;
1565 
1566 	BUG_ON(sh->batch_head);
1567 
1568 	pr_debug("%s: stripe %llu block: %d\n",
1569 		__func__, (unsigned long long)sh->sector, target);
1570 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1571 
1572 	for (i = disks; i--; ) {
1573 		if (i != target) {
1574 			off_srcs[count] = sh->dev[i].offset;
1575 			xor_srcs[count++] = sh->dev[i].page;
1576 		}
1577 	}
1578 
1579 	atomic_inc(&sh->count);
1580 
1581 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
1582 			  ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
1583 	if (unlikely(count == 1))
1584 		tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0],
1585 				RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1586 	else
1587 		tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
1588 				RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1589 
1590 	return tx;
1591 }
1592 
1593 /* set_syndrome_sources - populate source buffers for gen_syndrome
1594  * @srcs - (struct page *) array of size sh->disks
1595  * @offs - (unsigned int) array of offset for each page
1596  * @sh - stripe_head to parse
1597  *
1598  * Populates srcs in proper layout order for the stripe and returns the
1599  * 'count' of sources to be used in a call to async_gen_syndrome.  The P
1600  * destination buffer is recorded in srcs[count] and the Q destination
1601  * is recorded in srcs[count+1]].
1602  */
1603 static int set_syndrome_sources(struct page **srcs,
1604 				unsigned int *offs,
1605 				struct stripe_head *sh,
1606 				int srctype)
1607 {
1608 	int disks = sh->disks;
1609 	int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
1610 	int d0_idx = raid6_d0(sh);
1611 	int count;
1612 	int i;
1613 
1614 	for (i = 0; i < disks; i++)
1615 		srcs[i] = NULL;
1616 
1617 	count = 0;
1618 	i = d0_idx;
1619 	do {
1620 		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1621 		struct r5dev *dev = &sh->dev[i];
1622 
1623 		if (i == sh->qd_idx || i == sh->pd_idx ||
1624 		    (srctype == SYNDROME_SRC_ALL) ||
1625 		    (srctype == SYNDROME_SRC_WANT_DRAIN &&
1626 		     (test_bit(R5_Wantdrain, &dev->flags) ||
1627 		      test_bit(R5_InJournal, &dev->flags))) ||
1628 		    (srctype == SYNDROME_SRC_WRITTEN &&
1629 		     (dev->written ||
1630 		      test_bit(R5_InJournal, &dev->flags)))) {
1631 			if (test_bit(R5_InJournal, &dev->flags))
1632 				srcs[slot] = sh->dev[i].orig_page;
1633 			else
1634 				srcs[slot] = sh->dev[i].page;
1635 			/*
1636 			 * For R5_InJournal, PAGE_SIZE must be 4KB and will
1637 			 * not shared page. In that case, dev[i].offset
1638 			 * is 0.
1639 			 */
1640 			offs[slot] = sh->dev[i].offset;
1641 		}
1642 		i = raid6_next_disk(i, disks);
1643 	} while (i != d0_idx);
1644 
1645 	return syndrome_disks;
1646 }
1647 
1648 static struct dma_async_tx_descriptor *
1649 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
1650 {
1651 	int disks = sh->disks;
1652 	struct page **blocks = to_addr_page(percpu, 0);
1653 	unsigned int *offs = to_addr_offs(sh, percpu);
1654 	int target;
1655 	int qd_idx = sh->qd_idx;
1656 	struct dma_async_tx_descriptor *tx;
1657 	struct async_submit_ctl submit;
1658 	struct r5dev *tgt;
1659 	struct page *dest;
1660 	unsigned int dest_off;
1661 	int i;
1662 	int count;
1663 
1664 	BUG_ON(sh->batch_head);
1665 	if (sh->ops.target < 0)
1666 		target = sh->ops.target2;
1667 	else if (sh->ops.target2 < 0)
1668 		target = sh->ops.target;
1669 	else
1670 		/* we should only have one valid target */
1671 		BUG();
1672 	BUG_ON(target < 0);
1673 	pr_debug("%s: stripe %llu block: %d\n",
1674 		__func__, (unsigned long long)sh->sector, target);
1675 
1676 	tgt = &sh->dev[target];
1677 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1678 	dest = tgt->page;
1679 	dest_off = tgt->offset;
1680 
1681 	atomic_inc(&sh->count);
1682 
1683 	if (target == qd_idx) {
1684 		count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL);
1685 		blocks[count] = NULL; /* regenerating p is not necessary */
1686 		BUG_ON(blocks[count+1] != dest); /* q should already be set */
1687 		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1688 				  ops_complete_compute, sh,
1689 				  to_addr_conv(sh, percpu, 0));
1690 		tx = async_gen_syndrome(blocks, offs, count+2,
1691 				RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1692 	} else {
1693 		/* Compute any data- or p-drive using XOR */
1694 		count = 0;
1695 		for (i = disks; i-- ; ) {
1696 			if (i == target || i == qd_idx)
1697 				continue;
1698 			offs[count] = sh->dev[i].offset;
1699 			blocks[count++] = sh->dev[i].page;
1700 		}
1701 
1702 		init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1703 				  NULL, ops_complete_compute, sh,
1704 				  to_addr_conv(sh, percpu, 0));
1705 		tx = async_xor_offs(dest, dest_off, blocks, offs, count,
1706 				RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1707 	}
1708 
1709 	return tx;
1710 }
1711 
1712 static struct dma_async_tx_descriptor *
1713 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
1714 {
1715 	int i, count, disks = sh->disks;
1716 	int syndrome_disks = sh->ddf_layout ? disks : disks-2;
1717 	int d0_idx = raid6_d0(sh);
1718 	int faila = -1, failb = -1;
1719 	int target = sh->ops.target;
1720 	int target2 = sh->ops.target2;
1721 	struct r5dev *tgt = &sh->dev[target];
1722 	struct r5dev *tgt2 = &sh->dev[target2];
1723 	struct dma_async_tx_descriptor *tx;
1724 	struct page **blocks = to_addr_page(percpu, 0);
1725 	unsigned int *offs = to_addr_offs(sh, percpu);
1726 	struct async_submit_ctl submit;
1727 
1728 	BUG_ON(sh->batch_head);
1729 	pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1730 		 __func__, (unsigned long long)sh->sector, target, target2);
1731 	BUG_ON(target < 0 || target2 < 0);
1732 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1733 	BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
1734 
1735 	/* we need to open-code set_syndrome_sources to handle the
1736 	 * slot number conversion for 'faila' and 'failb'
1737 	 */
1738 	for (i = 0; i < disks ; i++) {
1739 		offs[i] = 0;
1740 		blocks[i] = NULL;
1741 	}
1742 	count = 0;
1743 	i = d0_idx;
1744 	do {
1745 		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1746 
1747 		offs[slot] = sh->dev[i].offset;
1748 		blocks[slot] = sh->dev[i].page;
1749 
1750 		if (i == target)
1751 			faila = slot;
1752 		if (i == target2)
1753 			failb = slot;
1754 		i = raid6_next_disk(i, disks);
1755 	} while (i != d0_idx);
1756 
1757 	BUG_ON(faila == failb);
1758 	if (failb < faila)
1759 		swap(faila, failb);
1760 	pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1761 		 __func__, (unsigned long long)sh->sector, faila, failb);
1762 
1763 	atomic_inc(&sh->count);
1764 
1765 	if (failb == syndrome_disks+1) {
1766 		/* Q disk is one of the missing disks */
1767 		if (faila == syndrome_disks) {
1768 			/* Missing P+Q, just recompute */
1769 			init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1770 					  ops_complete_compute, sh,
1771 					  to_addr_conv(sh, percpu, 0));
1772 			return async_gen_syndrome(blocks, offs, syndrome_disks+2,
1773 						  RAID5_STRIPE_SIZE(sh->raid_conf),
1774 						  &submit);
1775 		} else {
1776 			struct page *dest;
1777 			unsigned int dest_off;
1778 			int data_target;
1779 			int qd_idx = sh->qd_idx;
1780 
1781 			/* Missing D+Q: recompute D from P, then recompute Q */
1782 			if (target == qd_idx)
1783 				data_target = target2;
1784 			else
1785 				data_target = target;
1786 
1787 			count = 0;
1788 			for (i = disks; i-- ; ) {
1789 				if (i == data_target || i == qd_idx)
1790 					continue;
1791 				offs[count] = sh->dev[i].offset;
1792 				blocks[count++] = sh->dev[i].page;
1793 			}
1794 			dest = sh->dev[data_target].page;
1795 			dest_off = sh->dev[data_target].offset;
1796 			init_async_submit(&submit,
1797 					  ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1798 					  NULL, NULL, NULL,
1799 					  to_addr_conv(sh, percpu, 0));
1800 			tx = async_xor_offs(dest, dest_off, blocks, offs, count,
1801 				       RAID5_STRIPE_SIZE(sh->raid_conf),
1802 				       &submit);
1803 
1804 			count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL);
1805 			init_async_submit(&submit, ASYNC_TX_FENCE, tx,
1806 					  ops_complete_compute, sh,
1807 					  to_addr_conv(sh, percpu, 0));
1808 			return async_gen_syndrome(blocks, offs, count+2,
1809 						  RAID5_STRIPE_SIZE(sh->raid_conf),
1810 						  &submit);
1811 		}
1812 	} else {
1813 		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1814 				  ops_complete_compute, sh,
1815 				  to_addr_conv(sh, percpu, 0));
1816 		if (failb == syndrome_disks) {
1817 			/* We're missing D+P. */
1818 			return async_raid6_datap_recov(syndrome_disks+2,
1819 						RAID5_STRIPE_SIZE(sh->raid_conf),
1820 						faila,
1821 						blocks, offs, &submit);
1822 		} else {
1823 			/* We're missing D+D. */
1824 			return async_raid6_2data_recov(syndrome_disks+2,
1825 						RAID5_STRIPE_SIZE(sh->raid_conf),
1826 						faila, failb,
1827 						blocks, offs, &submit);
1828 		}
1829 	}
1830 }
1831 
1832 static void ops_complete_prexor(void *stripe_head_ref)
1833 {
1834 	struct stripe_head *sh = stripe_head_ref;
1835 
1836 	pr_debug("%s: stripe %llu\n", __func__,
1837 		(unsigned long long)sh->sector);
1838 
1839 	if (r5c_is_writeback(sh->raid_conf->log))
1840 		/*
1841 		 * raid5-cache write back uses orig_page during prexor.
1842 		 * After prexor, it is time to free orig_page
1843 		 */
1844 		r5c_release_extra_page(sh);
1845 }
1846 
1847 static struct dma_async_tx_descriptor *
1848 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
1849 		struct dma_async_tx_descriptor *tx)
1850 {
1851 	int disks = sh->disks;
1852 	struct page **xor_srcs = to_addr_page(percpu, 0);
1853 	unsigned int *off_srcs = to_addr_offs(sh, percpu);
1854 	int count = 0, pd_idx = sh->pd_idx, i;
1855 	struct async_submit_ctl submit;
1856 
1857 	/* existing parity data subtracted */
1858 	unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset;
1859 	struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1860 
1861 	BUG_ON(sh->batch_head);
1862 	pr_debug("%s: stripe %llu\n", __func__,
1863 		(unsigned long long)sh->sector);
1864 
1865 	for (i = disks; i--; ) {
1866 		struct r5dev *dev = &sh->dev[i];
1867 		/* Only process blocks that are known to be uptodate */
1868 		if (test_bit(R5_InJournal, &dev->flags)) {
1869 			/*
1870 			 * For this case, PAGE_SIZE must be equal to 4KB and
1871 			 * page offset is zero.
1872 			 */
1873 			off_srcs[count] = dev->offset;
1874 			xor_srcs[count++] = dev->orig_page;
1875 		} else if (test_bit(R5_Wantdrain, &dev->flags)) {
1876 			off_srcs[count] = dev->offset;
1877 			xor_srcs[count++] = dev->page;
1878 		}
1879 	}
1880 
1881 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1882 			  ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1883 	tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
1884 			RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1885 
1886 	return tx;
1887 }
1888 
1889 static struct dma_async_tx_descriptor *
1890 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
1891 		struct dma_async_tx_descriptor *tx)
1892 {
1893 	struct page **blocks = to_addr_page(percpu, 0);
1894 	unsigned int *offs = to_addr_offs(sh, percpu);
1895 	int count;
1896 	struct async_submit_ctl submit;
1897 
1898 	pr_debug("%s: stripe %llu\n", __func__,
1899 		(unsigned long long)sh->sector);
1900 
1901 	count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_WANT_DRAIN);
1902 
1903 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
1904 			  ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1905 	tx = async_gen_syndrome(blocks, offs, count+2,
1906 			RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1907 
1908 	return tx;
1909 }
1910 
1911 static struct dma_async_tx_descriptor *
1912 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1913 {
1914 	struct r5conf *conf = sh->raid_conf;
1915 	int disks = sh->disks;
1916 	int i;
1917 	struct stripe_head *head_sh = sh;
1918 
1919 	pr_debug("%s: stripe %llu\n", __func__,
1920 		(unsigned long long)sh->sector);
1921 
1922 	for (i = disks; i--; ) {
1923 		struct r5dev *dev;
1924 		struct bio *chosen;
1925 
1926 		sh = head_sh;
1927 		if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) {
1928 			struct bio *wbi;
1929 
1930 again:
1931 			dev = &sh->dev[i];
1932 			/*
1933 			 * clear R5_InJournal, so when rewriting a page in
1934 			 * journal, it is not skipped by r5l_log_stripe()
1935 			 */
1936 			clear_bit(R5_InJournal, &dev->flags);
1937 			spin_lock_irq(&sh->stripe_lock);
1938 			chosen = dev->towrite;
1939 			dev->towrite = NULL;
1940 			sh->overwrite_disks = 0;
1941 			BUG_ON(dev->written);
1942 			wbi = dev->written = chosen;
1943 			spin_unlock_irq(&sh->stripe_lock);
1944 			WARN_ON(dev->page != dev->orig_page);
1945 
1946 			while (wbi && wbi->bi_iter.bi_sector <
1947 				dev->sector + RAID5_STRIPE_SECTORS(conf)) {
1948 				if (wbi->bi_opf & REQ_FUA)
1949 					set_bit(R5_WantFUA, &dev->flags);
1950 				if (wbi->bi_opf & REQ_SYNC)
1951 					set_bit(R5_SyncIO, &dev->flags);
1952 				if (bio_op(wbi) == REQ_OP_DISCARD)
1953 					set_bit(R5_Discard, &dev->flags);
1954 				else {
1955 					tx = async_copy_data(1, wbi, &dev->page,
1956 							     dev->offset,
1957 							     dev->sector, tx, sh,
1958 							     r5c_is_writeback(conf->log));
1959 					if (dev->page != dev->orig_page &&
1960 					    !r5c_is_writeback(conf->log)) {
1961 						set_bit(R5_SkipCopy, &dev->flags);
1962 						clear_bit(R5_UPTODATE, &dev->flags);
1963 						clear_bit(R5_OVERWRITE, &dev->flags);
1964 					}
1965 				}
1966 				wbi = r5_next_bio(conf, wbi, dev->sector);
1967 			}
1968 
1969 			if (head_sh->batch_head) {
1970 				sh = list_first_entry(&sh->batch_list,
1971 						      struct stripe_head,
1972 						      batch_list);
1973 				if (sh == head_sh)
1974 					continue;
1975 				goto again;
1976 			}
1977 		}
1978 	}
1979 
1980 	return tx;
1981 }
1982 
1983 static void ops_complete_reconstruct(void *stripe_head_ref)
1984 {
1985 	struct stripe_head *sh = stripe_head_ref;
1986 	int disks = sh->disks;
1987 	int pd_idx = sh->pd_idx;
1988 	int qd_idx = sh->qd_idx;
1989 	int i;
1990 	bool fua = false, sync = false, discard = false;
1991 
1992 	pr_debug("%s: stripe %llu\n", __func__,
1993 		(unsigned long long)sh->sector);
1994 
1995 	for (i = disks; i--; ) {
1996 		fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1997 		sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1998 		discard |= test_bit(R5_Discard, &sh->dev[i].flags);
1999 	}
2000 
2001 	for (i = disks; i--; ) {
2002 		struct r5dev *dev = &sh->dev[i];
2003 
2004 		if (dev->written || i == pd_idx || i == qd_idx) {
2005 			if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) {
2006 				set_bit(R5_UPTODATE, &dev->flags);
2007 				if (test_bit(STRIPE_EXPAND_READY, &sh->state))
2008 					set_bit(R5_Expanded, &dev->flags);
2009 			}
2010 			if (fua)
2011 				set_bit(R5_WantFUA, &dev->flags);
2012 			if (sync)
2013 				set_bit(R5_SyncIO, &dev->flags);
2014 		}
2015 	}
2016 
2017 	if (sh->reconstruct_state == reconstruct_state_drain_run)
2018 		sh->reconstruct_state = reconstruct_state_drain_result;
2019 	else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
2020 		sh->reconstruct_state = reconstruct_state_prexor_drain_result;
2021 	else {
2022 		BUG_ON(sh->reconstruct_state != reconstruct_state_run);
2023 		sh->reconstruct_state = reconstruct_state_result;
2024 	}
2025 
2026 	set_bit(STRIPE_HANDLE, &sh->state);
2027 	raid5_release_stripe(sh);
2028 }
2029 
2030 static void
2031 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
2032 		     struct dma_async_tx_descriptor *tx)
2033 {
2034 	int disks = sh->disks;
2035 	struct page **xor_srcs;
2036 	unsigned int *off_srcs;
2037 	struct async_submit_ctl submit;
2038 	int count, pd_idx = sh->pd_idx, i;
2039 	struct page *xor_dest;
2040 	unsigned int off_dest;
2041 	int prexor = 0;
2042 	unsigned long flags;
2043 	int j = 0;
2044 	struct stripe_head *head_sh = sh;
2045 	int last_stripe;
2046 
2047 	pr_debug("%s: stripe %llu\n", __func__,
2048 		(unsigned long long)sh->sector);
2049 
2050 	for (i = 0; i < sh->disks; i++) {
2051 		if (pd_idx == i)
2052 			continue;
2053 		if (!test_bit(R5_Discard, &sh->dev[i].flags))
2054 			break;
2055 	}
2056 	if (i >= sh->disks) {
2057 		atomic_inc(&sh->count);
2058 		set_bit(R5_Discard, &sh->dev[pd_idx].flags);
2059 		ops_complete_reconstruct(sh);
2060 		return;
2061 	}
2062 again:
2063 	count = 0;
2064 	xor_srcs = to_addr_page(percpu, j);
2065 	off_srcs = to_addr_offs(sh, percpu);
2066 	/* check if prexor is active which means only process blocks
2067 	 * that are part of a read-modify-write (written)
2068 	 */
2069 	if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
2070 		prexor = 1;
2071 		off_dest = off_srcs[count] = sh->dev[pd_idx].offset;
2072 		xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
2073 		for (i = disks; i--; ) {
2074 			struct r5dev *dev = &sh->dev[i];
2075 			if (head_sh->dev[i].written ||
2076 			    test_bit(R5_InJournal, &head_sh->dev[i].flags)) {
2077 				off_srcs[count] = dev->offset;
2078 				xor_srcs[count++] = dev->page;
2079 			}
2080 		}
2081 	} else {
2082 		xor_dest = sh->dev[pd_idx].page;
2083 		off_dest = sh->dev[pd_idx].offset;
2084 		for (i = disks; i--; ) {
2085 			struct r5dev *dev = &sh->dev[i];
2086 			if (i != pd_idx) {
2087 				off_srcs[count] = dev->offset;
2088 				xor_srcs[count++] = dev->page;
2089 			}
2090 		}
2091 	}
2092 
2093 	/* 1/ if we prexor'd then the dest is reused as a source
2094 	 * 2/ if we did not prexor then we are redoing the parity
2095 	 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
2096 	 * for the synchronous xor case
2097 	 */
2098 	last_stripe = !head_sh->batch_head ||
2099 		list_first_entry(&sh->batch_list,
2100 				 struct stripe_head, batch_list) == head_sh;
2101 	if (last_stripe) {
2102 		flags = ASYNC_TX_ACK |
2103 			(prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
2104 
2105 		atomic_inc(&head_sh->count);
2106 		init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
2107 				  to_addr_conv(sh, percpu, j));
2108 	} else {
2109 		flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST;
2110 		init_async_submit(&submit, flags, tx, NULL, NULL,
2111 				  to_addr_conv(sh, percpu, j));
2112 	}
2113 
2114 	if (unlikely(count == 1))
2115 		tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0],
2116 				RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
2117 	else
2118 		tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
2119 				RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
2120 	if (!last_stripe) {
2121 		j++;
2122 		sh = list_first_entry(&sh->batch_list, struct stripe_head,
2123 				      batch_list);
2124 		goto again;
2125 	}
2126 }
2127 
2128 static void
2129 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
2130 		     struct dma_async_tx_descriptor *tx)
2131 {
2132 	struct async_submit_ctl submit;
2133 	struct page **blocks;
2134 	unsigned int *offs;
2135 	int count, i, j = 0;
2136 	struct stripe_head *head_sh = sh;
2137 	int last_stripe;
2138 	int synflags;
2139 	unsigned long txflags;
2140 
2141 	pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
2142 
2143 	for (i = 0; i < sh->disks; i++) {
2144 		if (sh->pd_idx == i || sh->qd_idx == i)
2145 			continue;
2146 		if (!test_bit(R5_Discard, &sh->dev[i].flags))
2147 			break;
2148 	}
2149 	if (i >= sh->disks) {
2150 		atomic_inc(&sh->count);
2151 		set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
2152 		set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
2153 		ops_complete_reconstruct(sh);
2154 		return;
2155 	}
2156 
2157 again:
2158 	blocks = to_addr_page(percpu, j);
2159 	offs = to_addr_offs(sh, percpu);
2160 
2161 	if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
2162 		synflags = SYNDROME_SRC_WRITTEN;
2163 		txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST;
2164 	} else {
2165 		synflags = SYNDROME_SRC_ALL;
2166 		txflags = ASYNC_TX_ACK;
2167 	}
2168 
2169 	count = set_syndrome_sources(blocks, offs, sh, synflags);
2170 	last_stripe = !head_sh->batch_head ||
2171 		list_first_entry(&sh->batch_list,
2172 				 struct stripe_head, batch_list) == head_sh;
2173 
2174 	if (last_stripe) {
2175 		atomic_inc(&head_sh->count);
2176 		init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
2177 				  head_sh, to_addr_conv(sh, percpu, j));
2178 	} else
2179 		init_async_submit(&submit, 0, tx, NULL, NULL,
2180 				  to_addr_conv(sh, percpu, j));
2181 	tx = async_gen_syndrome(blocks, offs, count+2,
2182 			RAID5_STRIPE_SIZE(sh->raid_conf),  &submit);
2183 	if (!last_stripe) {
2184 		j++;
2185 		sh = list_first_entry(&sh->batch_list, struct stripe_head,
2186 				      batch_list);
2187 		goto again;
2188 	}
2189 }
2190 
2191 static void ops_complete_check(void *stripe_head_ref)
2192 {
2193 	struct stripe_head *sh = stripe_head_ref;
2194 
2195 	pr_debug("%s: stripe %llu\n", __func__,
2196 		(unsigned long long)sh->sector);
2197 
2198 	sh->check_state = check_state_check_result;
2199 	set_bit(STRIPE_HANDLE, &sh->state);
2200 	raid5_release_stripe(sh);
2201 }
2202 
2203 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
2204 {
2205 	int disks = sh->disks;
2206 	int pd_idx = sh->pd_idx;
2207 	int qd_idx = sh->qd_idx;
2208 	struct page *xor_dest;
2209 	unsigned int off_dest;
2210 	struct page **xor_srcs = to_addr_page(percpu, 0);
2211 	unsigned int *off_srcs = to_addr_offs(sh, percpu);
2212 	struct dma_async_tx_descriptor *tx;
2213 	struct async_submit_ctl submit;
2214 	int count;
2215 	int i;
2216 
2217 	pr_debug("%s: stripe %llu\n", __func__,
2218 		(unsigned long long)sh->sector);
2219 
2220 	BUG_ON(sh->batch_head);
2221 	count = 0;
2222 	xor_dest = sh->dev[pd_idx].page;
2223 	off_dest = sh->dev[pd_idx].offset;
2224 	off_srcs[count] = off_dest;
2225 	xor_srcs[count++] = xor_dest;
2226 	for (i = disks; i--; ) {
2227 		if (i == pd_idx || i == qd_idx)
2228 			continue;
2229 		off_srcs[count] = sh->dev[i].offset;
2230 		xor_srcs[count++] = sh->dev[i].page;
2231 	}
2232 
2233 	init_async_submit(&submit, 0, NULL, NULL, NULL,
2234 			  to_addr_conv(sh, percpu, 0));
2235 	tx = async_xor_val_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
2236 			   RAID5_STRIPE_SIZE(sh->raid_conf),
2237 			   &sh->ops.zero_sum_result, &submit);
2238 
2239 	atomic_inc(&sh->count);
2240 	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
2241 	tx = async_trigger_callback(&submit);
2242 }
2243 
2244 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
2245 {
2246 	struct page **srcs = to_addr_page(percpu, 0);
2247 	unsigned int *offs = to_addr_offs(sh, percpu);
2248 	struct async_submit_ctl submit;
2249 	int count;
2250 
2251 	pr_debug("%s: stripe %llu checkp: %d\n", __func__,
2252 		(unsigned long long)sh->sector, checkp);
2253 
2254 	BUG_ON(sh->batch_head);
2255 	count = set_syndrome_sources(srcs, offs, sh, SYNDROME_SRC_ALL);
2256 	if (!checkp)
2257 		srcs[count] = NULL;
2258 
2259 	atomic_inc(&sh->count);
2260 	init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
2261 			  sh, to_addr_conv(sh, percpu, 0));
2262 	async_syndrome_val(srcs, offs, count+2,
2263 			   RAID5_STRIPE_SIZE(sh->raid_conf),
2264 			   &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit);
2265 }
2266 
2267 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
2268 {
2269 	int overlap_clear = 0, i, disks = sh->disks;
2270 	struct dma_async_tx_descriptor *tx = NULL;
2271 	struct r5conf *conf = sh->raid_conf;
2272 	int level = conf->level;
2273 	struct raid5_percpu *percpu;
2274 
2275 	local_lock(&conf->percpu->lock);
2276 	percpu = this_cpu_ptr(conf->percpu);
2277 	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
2278 		ops_run_biofill(sh);
2279 		overlap_clear++;
2280 	}
2281 
2282 	if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
2283 		if (level < 6)
2284 			tx = ops_run_compute5(sh, percpu);
2285 		else {
2286 			if (sh->ops.target2 < 0 || sh->ops.target < 0)
2287 				tx = ops_run_compute6_1(sh, percpu);
2288 			else
2289 				tx = ops_run_compute6_2(sh, percpu);
2290 		}
2291 		/* terminate the chain if reconstruct is not set to be run */
2292 		if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
2293 			async_tx_ack(tx);
2294 	}
2295 
2296 	if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
2297 		if (level < 6)
2298 			tx = ops_run_prexor5(sh, percpu, tx);
2299 		else
2300 			tx = ops_run_prexor6(sh, percpu, tx);
2301 	}
2302 
2303 	if (test_bit(STRIPE_OP_PARTIAL_PARITY, &ops_request))
2304 		tx = ops_run_partial_parity(sh, percpu, tx);
2305 
2306 	if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
2307 		tx = ops_run_biodrain(sh, tx);
2308 		overlap_clear++;
2309 	}
2310 
2311 	if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
2312 		if (level < 6)
2313 			ops_run_reconstruct5(sh, percpu, tx);
2314 		else
2315 			ops_run_reconstruct6(sh, percpu, tx);
2316 	}
2317 
2318 	if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
2319 		if (sh->check_state == check_state_run)
2320 			ops_run_check_p(sh, percpu);
2321 		else if (sh->check_state == check_state_run_q)
2322 			ops_run_check_pq(sh, percpu, 0);
2323 		else if (sh->check_state == check_state_run_pq)
2324 			ops_run_check_pq(sh, percpu, 1);
2325 		else
2326 			BUG();
2327 	}
2328 
2329 	if (overlap_clear && !sh->batch_head) {
2330 		for (i = disks; i--; ) {
2331 			struct r5dev *dev = &sh->dev[i];
2332 			if (test_and_clear_bit(R5_Overlap, &dev->flags))
2333 				wake_up_bit(&dev->flags, R5_Overlap);
2334 		}
2335 	}
2336 	local_unlock(&conf->percpu->lock);
2337 }
2338 
2339 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
2340 {
2341 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
2342 	kfree(sh->pages);
2343 #endif
2344 	if (sh->ppl_page)
2345 		__free_page(sh->ppl_page);
2346 	kmem_cache_free(sc, sh);
2347 }
2348 
2349 static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
2350 	int disks, struct r5conf *conf)
2351 {
2352 	struct stripe_head *sh;
2353 
2354 	sh = kmem_cache_zalloc(sc, gfp);
2355 	if (sh) {
2356 		spin_lock_init(&sh->stripe_lock);
2357 		spin_lock_init(&sh->batch_lock);
2358 		INIT_LIST_HEAD(&sh->batch_list);
2359 		INIT_LIST_HEAD(&sh->lru);
2360 		INIT_LIST_HEAD(&sh->r5c);
2361 		INIT_LIST_HEAD(&sh->log_list);
2362 		atomic_set(&sh->count, 1);
2363 		sh->raid_conf = conf;
2364 		sh->log_start = MaxSector;
2365 
2366 		if (raid5_has_ppl(conf)) {
2367 			sh->ppl_page = alloc_page(gfp);
2368 			if (!sh->ppl_page) {
2369 				free_stripe(sc, sh);
2370 				return NULL;
2371 			}
2372 		}
2373 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
2374 		if (init_stripe_shared_pages(sh, conf, disks)) {
2375 			free_stripe(sc, sh);
2376 			return NULL;
2377 		}
2378 #endif
2379 	}
2380 	return sh;
2381 }
2382 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
2383 {
2384 	struct stripe_head *sh;
2385 
2386 	sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf);
2387 	if (!sh)
2388 		return 0;
2389 
2390 	if (grow_buffers(sh, gfp)) {
2391 		shrink_buffers(sh);
2392 		free_stripe(conf->slab_cache, sh);
2393 		return 0;
2394 	}
2395 	sh->hash_lock_index =
2396 		conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
2397 	/* we just created an active stripe so... */
2398 	atomic_inc(&conf->active_stripes);
2399 
2400 	raid5_release_stripe(sh);
2401 	WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1);
2402 	return 1;
2403 }
2404 
2405 static int grow_stripes(struct r5conf *conf, int num)
2406 {
2407 	struct kmem_cache *sc;
2408 	size_t namelen = sizeof(conf->cache_name[0]);
2409 	int devs = max(conf->raid_disks, conf->previous_raid_disks);
2410 
2411 	if (mddev_is_dm(conf->mddev))
2412 		snprintf(conf->cache_name[0], namelen,
2413 			"raid%d-%p", conf->level, conf->mddev);
2414 	else
2415 		snprintf(conf->cache_name[0], namelen,
2416 			"raid%d-%s", conf->level, mdname(conf->mddev));
2417 	snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
2418 
2419 	conf->active_name = 0;
2420 	sc = kmem_cache_create(conf->cache_name[conf->active_name],
2421 			       struct_size_t(struct stripe_head, dev, devs),
2422 			       0, 0, NULL);
2423 	if (!sc)
2424 		return 1;
2425 	conf->slab_cache = sc;
2426 	conf->pool_size = devs;
2427 	while (num--)
2428 		if (!grow_one_stripe(conf, GFP_KERNEL))
2429 			return 1;
2430 
2431 	return 0;
2432 }
2433 
2434 /**
2435  * scribble_alloc - allocate percpu scribble buffer for required size
2436  *		    of the scribble region
2437  * @percpu: from for_each_present_cpu() of the caller
2438  * @num: total number of disks in the array
2439  * @cnt: scribble objs count for required size of the scribble region
2440  *
2441  * The scribble buffer size must be enough to contain:
2442  * 1/ a struct page pointer for each device in the array +2
2443  * 2/ room to convert each entry in (1) to its corresponding dma
2444  *    (dma_map_page()) or page (page_address()) address.
2445  *
2446  * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
2447  * calculate over all devices (not just the data blocks), using zeros in place
2448  * of the P and Q blocks.
2449  */
2450 static int scribble_alloc(struct raid5_percpu *percpu,
2451 			  int num, int cnt)
2452 {
2453 	size_t obj_size =
2454 		sizeof(struct page *) * (num + 2) +
2455 		sizeof(addr_conv_t) * (num + 2) +
2456 		sizeof(unsigned int) * (num + 2);
2457 	void *scribble;
2458 
2459 	/*
2460 	 * If here is in raid array suspend context, it is in memalloc noio
2461 	 * context as well, there is no potential recursive memory reclaim
2462 	 * I/Os with the GFP_KERNEL flag.
2463 	 */
2464 	scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL);
2465 	if (!scribble)
2466 		return -ENOMEM;
2467 
2468 	kvfree(percpu->scribble);
2469 
2470 	percpu->scribble = scribble;
2471 	percpu->scribble_obj_size = obj_size;
2472 	return 0;
2473 }
2474 
2475 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
2476 {
2477 	unsigned long cpu;
2478 	int err = 0;
2479 
2480 	/* Never shrink. */
2481 	if (conf->scribble_disks >= new_disks &&
2482 	    conf->scribble_sectors >= new_sectors)
2483 		return 0;
2484 
2485 	raid5_quiesce(conf->mddev, true);
2486 	cpus_read_lock();
2487 
2488 	for_each_present_cpu(cpu) {
2489 		struct raid5_percpu *percpu;
2490 
2491 		percpu = per_cpu_ptr(conf->percpu, cpu);
2492 		err = scribble_alloc(percpu, new_disks,
2493 				     new_sectors / RAID5_STRIPE_SECTORS(conf));
2494 		if (err)
2495 			break;
2496 	}
2497 
2498 	cpus_read_unlock();
2499 	raid5_quiesce(conf->mddev, false);
2500 
2501 	if (!err) {
2502 		conf->scribble_disks = new_disks;
2503 		conf->scribble_sectors = new_sectors;
2504 	}
2505 	return err;
2506 }
2507 
2508 static int resize_stripes(struct r5conf *conf, int newsize)
2509 {
2510 	/* Make all the stripes able to hold 'newsize' devices.
2511 	 * New slots in each stripe get 'page' set to a new page.
2512 	 *
2513 	 * This happens in stages:
2514 	 * 1/ create a new kmem_cache and allocate the required number of
2515 	 *    stripe_heads.
2516 	 * 2/ gather all the old stripe_heads and transfer the pages across
2517 	 *    to the new stripe_heads.  This will have the side effect of
2518 	 *    freezing the array as once all stripe_heads have been collected,
2519 	 *    no IO will be possible.  Old stripe heads are freed once their
2520 	 *    pages have been transferred over, and the old kmem_cache is
2521 	 *    freed when all stripes are done.
2522 	 * 3/ reallocate conf->disks to be suitable bigger.  If this fails,
2523 	 *    we simple return a failure status - no need to clean anything up.
2524 	 * 4/ allocate new pages for the new slots in the new stripe_heads.
2525 	 *    If this fails, we don't bother trying the shrink the
2526 	 *    stripe_heads down again, we just leave them as they are.
2527 	 *    As each stripe_head is processed the new one is released into
2528 	 *    active service.
2529 	 *
2530 	 * Once step2 is started, we cannot afford to wait for a write,
2531 	 * so we use GFP_NOIO allocations.
2532 	 */
2533 	struct stripe_head *osh, *nsh;
2534 	LIST_HEAD(newstripes);
2535 	struct disk_info *ndisks;
2536 	int err = 0;
2537 	struct kmem_cache *sc;
2538 	int i;
2539 	int hash, cnt;
2540 
2541 	md_allow_write(conf->mddev);
2542 
2543 	/* Step 1 */
2544 	sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
2545 			       struct_size_t(struct stripe_head, dev, newsize),
2546 			       0, 0, NULL);
2547 	if (!sc)
2548 		return -ENOMEM;
2549 
2550 	/* Need to ensure auto-resizing doesn't interfere */
2551 	mutex_lock(&conf->cache_size_mutex);
2552 
2553 	for (i = conf->max_nr_stripes; i; i--) {
2554 		nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf);
2555 		if (!nsh)
2556 			break;
2557 
2558 		list_add(&nsh->lru, &newstripes);
2559 	}
2560 	if (i) {
2561 		/* didn't get enough, give up */
2562 		while (!list_empty(&newstripes)) {
2563 			nsh = list_entry(newstripes.next, struct stripe_head, lru);
2564 			list_del(&nsh->lru);
2565 			free_stripe(sc, nsh);
2566 		}
2567 		kmem_cache_destroy(sc);
2568 		mutex_unlock(&conf->cache_size_mutex);
2569 		return -ENOMEM;
2570 	}
2571 	/* Step 2 - Must use GFP_NOIO now.
2572 	 * OK, we have enough stripes, start collecting inactive
2573 	 * stripes and copying them over
2574 	 */
2575 	hash = 0;
2576 	cnt = 0;
2577 	list_for_each_entry(nsh, &newstripes, lru) {
2578 		lock_device_hash_lock(conf, hash);
2579 		wait_event_cmd(conf->wait_for_stripe,
2580 				    !list_empty(conf->inactive_list + hash),
2581 				    unlock_device_hash_lock(conf, hash),
2582 				    lock_device_hash_lock(conf, hash));
2583 		osh = get_free_stripe(conf, hash);
2584 		unlock_device_hash_lock(conf, hash);
2585 
2586 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
2587 	for (i = 0; i < osh->nr_pages; i++) {
2588 		nsh->pages[i] = osh->pages[i];
2589 		osh->pages[i] = NULL;
2590 	}
2591 #endif
2592 		for(i=0; i<conf->pool_size; i++) {
2593 			nsh->dev[i].page = osh->dev[i].page;
2594 			nsh->dev[i].orig_page = osh->dev[i].page;
2595 			nsh->dev[i].offset = osh->dev[i].offset;
2596 		}
2597 		nsh->hash_lock_index = hash;
2598 		free_stripe(conf->slab_cache, osh);
2599 		cnt++;
2600 		if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
2601 		    !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
2602 			hash++;
2603 			cnt = 0;
2604 		}
2605 	}
2606 	kmem_cache_destroy(conf->slab_cache);
2607 
2608 	/* Step 3.
2609 	 * At this point, we are holding all the stripes so the array
2610 	 * is completely stalled, so now is a good time to resize
2611 	 * conf->disks and the scribble region
2612 	 */
2613 	ndisks = kzalloc_objs(struct disk_info, newsize, GFP_NOIO);
2614 	if (ndisks) {
2615 		for (i = 0; i < conf->pool_size; i++)
2616 			ndisks[i] = conf->disks[i];
2617 
2618 		for (i = conf->pool_size; i < newsize; i++) {
2619 			ndisks[i].extra_page = alloc_page(GFP_NOIO);
2620 			if (!ndisks[i].extra_page)
2621 				err = -ENOMEM;
2622 		}
2623 
2624 		if (err) {
2625 			for (i = conf->pool_size; i < newsize; i++)
2626 				if (ndisks[i].extra_page)
2627 					put_page(ndisks[i].extra_page);
2628 			kfree(ndisks);
2629 		} else {
2630 			kfree(conf->disks);
2631 			conf->disks = ndisks;
2632 		}
2633 	} else
2634 		err = -ENOMEM;
2635 
2636 	conf->slab_cache = sc;
2637 	conf->active_name = 1-conf->active_name;
2638 
2639 	/* Step 4, return new stripes to service */
2640 	while(!list_empty(&newstripes)) {
2641 		nsh = list_entry(newstripes.next, struct stripe_head, lru);
2642 		list_del_init(&nsh->lru);
2643 
2644 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
2645 		for (i = 0; i < nsh->nr_pages; i++) {
2646 			if (nsh->pages[i])
2647 				continue;
2648 			nsh->pages[i] = alloc_page(GFP_NOIO);
2649 			if (!nsh->pages[i])
2650 				err = -ENOMEM;
2651 		}
2652 
2653 		for (i = conf->raid_disks; i < newsize; i++) {
2654 			if (nsh->dev[i].page)
2655 				continue;
2656 			nsh->dev[i].page = raid5_get_dev_page(nsh, i);
2657 			nsh->dev[i].orig_page = nsh->dev[i].page;
2658 			nsh->dev[i].offset = raid5_get_page_offset(nsh, i);
2659 		}
2660 #else
2661 		for (i=conf->raid_disks; i < newsize; i++)
2662 			if (nsh->dev[i].page == NULL) {
2663 				struct page *p = alloc_page(GFP_NOIO);
2664 				nsh->dev[i].page = p;
2665 				nsh->dev[i].orig_page = p;
2666 				nsh->dev[i].offset = 0;
2667 				if (!p)
2668 					err = -ENOMEM;
2669 			}
2670 #endif
2671 		raid5_release_stripe(nsh);
2672 	}
2673 	/* critical section pass, GFP_NOIO no longer needed */
2674 
2675 	if (!err)
2676 		conf->pool_size = newsize;
2677 	mutex_unlock(&conf->cache_size_mutex);
2678 
2679 	return err;
2680 }
2681 
2682 static int drop_one_stripe(struct r5conf *conf)
2683 {
2684 	struct stripe_head *sh;
2685 	int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
2686 
2687 	spin_lock_irq(conf->hash_locks + hash);
2688 	sh = get_free_stripe(conf, hash);
2689 	spin_unlock_irq(conf->hash_locks + hash);
2690 	if (!sh)
2691 		return 0;
2692 	BUG_ON(atomic_read(&sh->count));
2693 	shrink_buffers(sh);
2694 	free_stripe(conf->slab_cache, sh);
2695 	atomic_dec(&conf->active_stripes);
2696 	WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1);
2697 	return 1;
2698 }
2699 
2700 static void shrink_stripes(struct r5conf *conf)
2701 {
2702 	while (conf->max_nr_stripes &&
2703 	       drop_one_stripe(conf))
2704 		;
2705 
2706 	kmem_cache_destroy(conf->slab_cache);
2707 	conf->slab_cache = NULL;
2708 }
2709 
2710 static void raid5_end_read_request(struct bio * bi)
2711 {
2712 	struct stripe_head *sh = bi->bi_private;
2713 	struct r5conf *conf = sh->raid_conf;
2714 	int disks = sh->disks, i;
2715 	struct md_rdev *rdev = NULL;
2716 	sector_t s;
2717 
2718 	for (i=0 ; i<disks; i++)
2719 		if (bi == &sh->dev[i].req)
2720 			break;
2721 
2722 	pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
2723 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
2724 		bi->bi_status);
2725 	if (i == disks) {
2726 		BUG();
2727 		return;
2728 	}
2729 	if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2730 		/* If replacement finished while this request was outstanding,
2731 		 * 'replacement' might be NULL already.
2732 		 * In that case it moved down to 'rdev'.
2733 		 * rdev is not removed until all requests are finished.
2734 		 */
2735 		rdev = conf->disks[i].replacement;
2736 	if (!rdev)
2737 		rdev = conf->disks[i].rdev;
2738 
2739 	if (use_new_offset(conf, sh))
2740 		s = sh->sector + rdev->new_data_offset;
2741 	else
2742 		s = sh->sector + rdev->data_offset;
2743 	if (!bi->bi_status) {
2744 		set_bit(R5_UPTODATE, &sh->dev[i].flags);
2745 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2746 			/* Note that this cannot happen on a
2747 			 * replacement device.  We just fail those on
2748 			 * any error
2749 			 */
2750 			pr_info_ratelimited(
2751 				"md/raid:%s: read error corrected (%lu sectors at %llu on %pg)\n",
2752 				mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf),
2753 				(unsigned long long)s,
2754 				rdev->bdev);
2755 			atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors);
2756 			clear_bit(R5_ReadError, &sh->dev[i].flags);
2757 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
2758 		} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2759 			clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2760 
2761 		if (test_bit(R5_InJournal, &sh->dev[i].flags))
2762 			/*
2763 			 * end read for a page in journal, this
2764 			 * must be preparing for prexor in rmw
2765 			 */
2766 			set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2767 
2768 		if (atomic_read(&rdev->read_errors))
2769 			atomic_set(&rdev->read_errors, 0);
2770 	} else {
2771 		int retry = 0;
2772 		int set_bad = 0;
2773 
2774 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
2775 		if (!(bi->bi_status == BLK_STS_PROTECTION))
2776 			atomic_inc(&rdev->read_errors);
2777 		if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2778 			pr_warn_ratelimited(
2779 				"md/raid:%s: read error on replacement device (sector %llu on %pg).\n",
2780 				mdname(conf->mddev),
2781 				(unsigned long long)s,
2782 				rdev->bdev);
2783 		else if (conf->mddev->degraded >= conf->max_degraded) {
2784 			set_bad = 1;
2785 			pr_warn_ratelimited(
2786 				"md/raid:%s: read error not correctable (sector %llu on %pg).\n",
2787 				mdname(conf->mddev),
2788 				(unsigned long long)s,
2789 				rdev->bdev);
2790 		} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
2791 			/* Oh, no!!! */
2792 			set_bad = 1;
2793 			pr_warn_ratelimited(
2794 				"md/raid:%s: read error NOT corrected!! (sector %llu on %pg).\n",
2795 				mdname(conf->mddev),
2796 				(unsigned long long)s,
2797 				rdev->bdev);
2798 		} else if (atomic_read(&rdev->read_errors)
2799 			 > conf->max_nr_stripes) {
2800 			if (!test_bit(Faulty, &rdev->flags)) {
2801 				pr_warn("md/raid:%s: %d read_errors > %d stripes\n",
2802 				    mdname(conf->mddev),
2803 				    atomic_read(&rdev->read_errors),
2804 				    conf->max_nr_stripes);
2805 				pr_warn("md/raid:%s: Too many read errors, failing device %pg.\n",
2806 				    mdname(conf->mddev), rdev->bdev);
2807 			}
2808 		} else
2809 			retry = 1;
2810 		if (set_bad && test_bit(In_sync, &rdev->flags)
2811 		    && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2812 			retry = 1;
2813 		if (retry)
2814 			if (sh->qd_idx >= 0 && sh->pd_idx == i)
2815 				set_bit(R5_ReadError, &sh->dev[i].flags);
2816 			else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
2817 				set_bit(R5_ReadError, &sh->dev[i].flags);
2818 				clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2819 			} else
2820 				set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2821 		else {
2822 			clear_bit(R5_ReadError, &sh->dev[i].flags);
2823 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
2824 			if (!(set_bad && test_bit(In_sync, &rdev->flags)))
2825 				rdev_set_badblocks(rdev, sh->sector,
2826 						   RAID5_STRIPE_SECTORS(conf), 0);
2827 		}
2828 	}
2829 	rdev_dec_pending(rdev, conf->mddev);
2830 	bio_uninit(bi);
2831 	clear_bit(R5_LOCKED, &sh->dev[i].flags);
2832 	set_bit(STRIPE_HANDLE, &sh->state);
2833 	raid5_release_stripe(sh);
2834 }
2835 
2836 static void raid5_end_write_request(struct bio *bi)
2837 {
2838 	struct stripe_head *sh = bi->bi_private;
2839 	struct r5conf *conf = sh->raid_conf;
2840 	int disks = sh->disks, i;
2841 	struct md_rdev *rdev;
2842 	int replacement = 0;
2843 
2844 	for (i = 0 ; i < disks; i++) {
2845 		if (bi == &sh->dev[i].req) {
2846 			rdev = conf->disks[i].rdev;
2847 			break;
2848 		}
2849 		if (bi == &sh->dev[i].rreq) {
2850 			rdev = conf->disks[i].replacement;
2851 			if (rdev)
2852 				replacement = 1;
2853 			else
2854 				/* rdev was removed and 'replacement'
2855 				 * replaced it.  rdev is not removed
2856 				 * until all requests are finished.
2857 				 */
2858 				rdev = conf->disks[i].rdev;
2859 			break;
2860 		}
2861 	}
2862 	pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
2863 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
2864 		bi->bi_status);
2865 	if (i == disks) {
2866 		BUG();
2867 		return;
2868 	}
2869 
2870 	if (replacement) {
2871 		if (bi->bi_status)
2872 			md_error(conf->mddev, rdev);
2873 		else if (rdev_has_badblock(rdev, sh->sector,
2874 					   RAID5_STRIPE_SECTORS(conf)))
2875 			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
2876 	} else {
2877 		if (bi->bi_status) {
2878 			set_bit(WriteErrorSeen, &rdev->flags);
2879 			set_bit(R5_WriteError, &sh->dev[i].flags);
2880 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
2881 				set_bit(MD_RECOVERY_NEEDED,
2882 					&rdev->mddev->recovery);
2883 		} else if (rdev_has_badblock(rdev, sh->sector,
2884 					     RAID5_STRIPE_SECTORS(conf))) {
2885 			set_bit(R5_MadeGood, &sh->dev[i].flags);
2886 			if (test_bit(R5_ReadError, &sh->dev[i].flags))
2887 				/* That was a successful write so make
2888 				 * sure it looks like we already did
2889 				 * a re-write.
2890 				 */
2891 				set_bit(R5_ReWrite, &sh->dev[i].flags);
2892 		}
2893 	}
2894 	rdev_dec_pending(rdev, conf->mddev);
2895 
2896 	if (sh->batch_head && bi->bi_status && !replacement)
2897 		set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
2898 
2899 	bio_uninit(bi);
2900 	if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
2901 		clear_bit(R5_LOCKED, &sh->dev[i].flags);
2902 	set_bit(STRIPE_HANDLE, &sh->state);
2903 
2904 	if (sh->batch_head && sh != sh->batch_head)
2905 		raid5_release_stripe(sh->batch_head);
2906 	raid5_release_stripe(sh);
2907 }
2908 
2909 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2910 {
2911 	struct r5conf *conf = mddev->private;
2912 	unsigned long flags;
2913 	pr_debug("raid456: error called\n");
2914 
2915 	pr_crit("md/raid:%s: Disk failure on %pg, disabling device.\n",
2916 		mdname(mddev), rdev->bdev);
2917 
2918 	spin_lock_irqsave(&conf->device_lock, flags);
2919 	set_bit(Faulty, &rdev->flags);
2920 	clear_bit(In_sync, &rdev->flags);
2921 	mddev->degraded = raid5_calc_degraded(conf);
2922 
2923 	if (has_failed(conf)) {
2924 		set_bit(MD_BROKEN, &conf->mddev->flags);
2925 
2926 		pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
2927 			mdname(mddev), mddev->degraded, conf->raid_disks);
2928 	} else {
2929 		pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
2930 			mdname(mddev), conf->raid_disks - mddev->degraded);
2931 	}
2932 
2933 	spin_unlock_irqrestore(&conf->device_lock, flags);
2934 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2935 
2936 	set_bit(Blocked, &rdev->flags);
2937 	set_mask_bits(&mddev->sb_flags, 0,
2938 		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2939 	r5c_update_on_rdev_error(mddev, rdev);
2940 }
2941 
2942 /*
2943  * Input: a 'big' sector number,
2944  * Output: index of the data and parity disk, and the sector # in them.
2945  */
2946 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
2947 			      int previous, int *dd_idx,
2948 			      struct stripe_head *sh)
2949 {
2950 	sector_t stripe, stripe2;
2951 	sector_t chunk_number;
2952 	unsigned int chunk_offset;
2953 	int pd_idx, qd_idx;
2954 	int ddf_layout = 0;
2955 	sector_t new_sector;
2956 	int algorithm = previous ? conf->prev_algo
2957 				 : conf->algorithm;
2958 	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2959 					 : conf->chunk_sectors;
2960 	int raid_disks = previous ? conf->previous_raid_disks
2961 				  : conf->raid_disks;
2962 	int data_disks = raid_disks - conf->max_degraded;
2963 
2964 	/* First compute the information on this sector */
2965 
2966 	/*
2967 	 * Compute the chunk number and the sector offset inside the chunk
2968 	 */
2969 	chunk_offset = sector_div(r_sector, sectors_per_chunk);
2970 	chunk_number = r_sector;
2971 
2972 	/*
2973 	 * Compute the stripe number
2974 	 */
2975 	stripe = chunk_number;
2976 	*dd_idx = sector_div(stripe, data_disks);
2977 	stripe2 = stripe;
2978 	/*
2979 	 * Select the parity disk based on the user selected algorithm.
2980 	 */
2981 	pd_idx = qd_idx = -1;
2982 	switch(conf->level) {
2983 	case 4:
2984 		pd_idx = data_disks;
2985 		break;
2986 	case 5:
2987 		switch (algorithm) {
2988 		case ALGORITHM_LEFT_ASYMMETRIC:
2989 			pd_idx = data_disks - sector_div(stripe2, raid_disks);
2990 			if (*dd_idx >= pd_idx)
2991 				(*dd_idx)++;
2992 			break;
2993 		case ALGORITHM_RIGHT_ASYMMETRIC:
2994 			pd_idx = sector_div(stripe2, raid_disks);
2995 			if (*dd_idx >= pd_idx)
2996 				(*dd_idx)++;
2997 			break;
2998 		case ALGORITHM_LEFT_SYMMETRIC:
2999 			pd_idx = data_disks - sector_div(stripe2, raid_disks);
3000 			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
3001 			break;
3002 		case ALGORITHM_RIGHT_SYMMETRIC:
3003 			pd_idx = sector_div(stripe2, raid_disks);
3004 			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
3005 			break;
3006 		case ALGORITHM_PARITY_0:
3007 			pd_idx = 0;
3008 			(*dd_idx)++;
3009 			break;
3010 		case ALGORITHM_PARITY_N:
3011 			pd_idx = data_disks;
3012 			break;
3013 		default:
3014 			BUG();
3015 		}
3016 		break;
3017 	case 6:
3018 
3019 		switch (algorithm) {
3020 		case ALGORITHM_LEFT_ASYMMETRIC:
3021 			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
3022 			qd_idx = pd_idx + 1;
3023 			if (pd_idx == raid_disks-1) {
3024 				(*dd_idx)++;	/* Q D D D P */
3025 				qd_idx = 0;
3026 			} else if (*dd_idx >= pd_idx)
3027 				(*dd_idx) += 2; /* D D P Q D */
3028 			break;
3029 		case ALGORITHM_RIGHT_ASYMMETRIC:
3030 			pd_idx = sector_div(stripe2, raid_disks);
3031 			qd_idx = pd_idx + 1;
3032 			if (pd_idx == raid_disks-1) {
3033 				(*dd_idx)++;	/* Q D D D P */
3034 				qd_idx = 0;
3035 			} else if (*dd_idx >= pd_idx)
3036 				(*dd_idx) += 2; /* D D P Q D */
3037 			break;
3038 		case ALGORITHM_LEFT_SYMMETRIC:
3039 			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
3040 			qd_idx = (pd_idx + 1) % raid_disks;
3041 			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
3042 			break;
3043 		case ALGORITHM_RIGHT_SYMMETRIC:
3044 			pd_idx = sector_div(stripe2, raid_disks);
3045 			qd_idx = (pd_idx + 1) % raid_disks;
3046 			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
3047 			break;
3048 
3049 		case ALGORITHM_PARITY_0:
3050 			pd_idx = 0;
3051 			qd_idx = 1;
3052 			(*dd_idx) += 2;
3053 			break;
3054 		case ALGORITHM_PARITY_N:
3055 			pd_idx = data_disks;
3056 			qd_idx = data_disks + 1;
3057 			break;
3058 
3059 		case ALGORITHM_ROTATING_ZERO_RESTART:
3060 			/* Exactly the same as RIGHT_ASYMMETRIC, but or
3061 			 * of blocks for computing Q is different.
3062 			 */
3063 			pd_idx = sector_div(stripe2, raid_disks);
3064 			qd_idx = pd_idx + 1;
3065 			if (pd_idx == raid_disks-1) {
3066 				(*dd_idx)++;	/* Q D D D P */
3067 				qd_idx = 0;
3068 			} else if (*dd_idx >= pd_idx)
3069 				(*dd_idx) += 2; /* D D P Q D */
3070 			ddf_layout = 1;
3071 			break;
3072 
3073 		case ALGORITHM_ROTATING_N_RESTART:
3074 			/* Same a left_asymmetric, by first stripe is
3075 			 * D D D P Q  rather than
3076 			 * Q D D D P
3077 			 */
3078 			stripe2 += 1;
3079 			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
3080 			qd_idx = pd_idx + 1;
3081 			if (pd_idx == raid_disks-1) {
3082 				(*dd_idx)++;	/* Q D D D P */
3083 				qd_idx = 0;
3084 			} else if (*dd_idx >= pd_idx)
3085 				(*dd_idx) += 2; /* D D P Q D */
3086 			ddf_layout = 1;
3087 			break;
3088 
3089 		case ALGORITHM_ROTATING_N_CONTINUE:
3090 			/* Same as left_symmetric but Q is before P */
3091 			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
3092 			qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
3093 			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
3094 			ddf_layout = 1;
3095 			break;
3096 
3097 		case ALGORITHM_LEFT_ASYMMETRIC_6:
3098 			/* RAID5 left_asymmetric, with Q on last device */
3099 			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
3100 			if (*dd_idx >= pd_idx)
3101 				(*dd_idx)++;
3102 			qd_idx = raid_disks - 1;
3103 			break;
3104 
3105 		case ALGORITHM_RIGHT_ASYMMETRIC_6:
3106 			pd_idx = sector_div(stripe2, raid_disks-1);
3107 			if (*dd_idx >= pd_idx)
3108 				(*dd_idx)++;
3109 			qd_idx = raid_disks - 1;
3110 			break;
3111 
3112 		case ALGORITHM_LEFT_SYMMETRIC_6:
3113 			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
3114 			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
3115 			qd_idx = raid_disks - 1;
3116 			break;
3117 
3118 		case ALGORITHM_RIGHT_SYMMETRIC_6:
3119 			pd_idx = sector_div(stripe2, raid_disks-1);
3120 			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
3121 			qd_idx = raid_disks - 1;
3122 			break;
3123 
3124 		case ALGORITHM_PARITY_0_6:
3125 			pd_idx = 0;
3126 			(*dd_idx)++;
3127 			qd_idx = raid_disks - 1;
3128 			break;
3129 
3130 		default:
3131 			BUG();
3132 		}
3133 		break;
3134 	}
3135 
3136 	if (sh) {
3137 		sh->pd_idx = pd_idx;
3138 		sh->qd_idx = qd_idx;
3139 		sh->ddf_layout = ddf_layout;
3140 	}
3141 	/*
3142 	 * Finally, compute the new sector number
3143 	 */
3144 	new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
3145 	return new_sector;
3146 }
3147 
3148 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
3149 {
3150 	struct r5conf *conf = sh->raid_conf;
3151 	int raid_disks = sh->disks;
3152 	int data_disks = raid_disks - conf->max_degraded;
3153 	sector_t new_sector = sh->sector, check;
3154 	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
3155 					 : conf->chunk_sectors;
3156 	int algorithm = previous ? conf->prev_algo
3157 				 : conf->algorithm;
3158 	sector_t stripe;
3159 	int chunk_offset;
3160 	sector_t chunk_number;
3161 	int dummy1, dd_idx = i;
3162 	sector_t r_sector;
3163 	struct stripe_head sh2;
3164 
3165 	chunk_offset = sector_div(new_sector, sectors_per_chunk);
3166 	stripe = new_sector;
3167 
3168 	if (i == sh->pd_idx)
3169 		return 0;
3170 	switch(conf->level) {
3171 	case 4: break;
3172 	case 5:
3173 		switch (algorithm) {
3174 		case ALGORITHM_LEFT_ASYMMETRIC:
3175 		case ALGORITHM_RIGHT_ASYMMETRIC:
3176 			if (i > sh->pd_idx)
3177 				i--;
3178 			break;
3179 		case ALGORITHM_LEFT_SYMMETRIC:
3180 		case ALGORITHM_RIGHT_SYMMETRIC:
3181 			if (i < sh->pd_idx)
3182 				i += raid_disks;
3183 			i -= (sh->pd_idx + 1);
3184 			break;
3185 		case ALGORITHM_PARITY_0:
3186 			i -= 1;
3187 			break;
3188 		case ALGORITHM_PARITY_N:
3189 			break;
3190 		default:
3191 			BUG();
3192 		}
3193 		break;
3194 	case 6:
3195 		if (i == sh->qd_idx)
3196 			return 0; /* It is the Q disk */
3197 		switch (algorithm) {
3198 		case ALGORITHM_LEFT_ASYMMETRIC:
3199 		case ALGORITHM_RIGHT_ASYMMETRIC:
3200 		case ALGORITHM_ROTATING_ZERO_RESTART:
3201 		case ALGORITHM_ROTATING_N_RESTART:
3202 			if (sh->pd_idx == raid_disks-1)
3203 				i--;	/* Q D D D P */
3204 			else if (i > sh->pd_idx)
3205 				i -= 2; /* D D P Q D */
3206 			break;
3207 		case ALGORITHM_LEFT_SYMMETRIC:
3208 		case ALGORITHM_RIGHT_SYMMETRIC:
3209 			if (sh->pd_idx == raid_disks-1)
3210 				i--; /* Q D D D P */
3211 			else {
3212 				/* D D P Q D */
3213 				if (i < sh->pd_idx)
3214 					i += raid_disks;
3215 				i -= (sh->pd_idx + 2);
3216 			}
3217 			break;
3218 		case ALGORITHM_PARITY_0:
3219 			i -= 2;
3220 			break;
3221 		case ALGORITHM_PARITY_N:
3222 			break;
3223 		case ALGORITHM_ROTATING_N_CONTINUE:
3224 			/* Like left_symmetric, but P is before Q */
3225 			if (sh->pd_idx == 0)
3226 				i--;	/* P D D D Q */
3227 			else {
3228 				/* D D Q P D */
3229 				if (i < sh->pd_idx)
3230 					i += raid_disks;
3231 				i -= (sh->pd_idx + 1);
3232 			}
3233 			break;
3234 		case ALGORITHM_LEFT_ASYMMETRIC_6:
3235 		case ALGORITHM_RIGHT_ASYMMETRIC_6:
3236 			if (i > sh->pd_idx)
3237 				i--;
3238 			break;
3239 		case ALGORITHM_LEFT_SYMMETRIC_6:
3240 		case ALGORITHM_RIGHT_SYMMETRIC_6:
3241 			if (i < sh->pd_idx)
3242 				i += data_disks + 1;
3243 			i -= (sh->pd_idx + 1);
3244 			break;
3245 		case ALGORITHM_PARITY_0_6:
3246 			i -= 1;
3247 			break;
3248 		default:
3249 			BUG();
3250 		}
3251 		break;
3252 	}
3253 
3254 	chunk_number = stripe * data_disks + i;
3255 	r_sector = chunk_number * sectors_per_chunk + chunk_offset;
3256 
3257 	check = raid5_compute_sector(conf, r_sector,
3258 				     previous, &dummy1, &sh2);
3259 	if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
3260 		|| sh2.qd_idx != sh->qd_idx) {
3261 		pr_warn("md/raid:%s: compute_blocknr: map not correct\n",
3262 			mdname(conf->mddev));
3263 		return 0;
3264 	}
3265 	return r_sector;
3266 }
3267 
3268 /*
3269  * There are cases where we want handle_stripe_dirtying() and
3270  * schedule_reconstruction() to delay towrite to some dev of a stripe.
3271  *
3272  * This function checks whether we want to delay the towrite. Specifically,
3273  * we delay the towrite when:
3274  *
3275  *   1. degraded stripe has a non-overwrite to the missing dev, AND this
3276  *      stripe has data in journal (for other devices).
3277  *
3278  *      In this case, when reading data for the non-overwrite dev, it is
3279  *      necessary to handle complex rmw of write back cache (prexor with
3280  *      orig_page, and xor with page). To keep read path simple, we would
3281  *      like to flush data in journal to RAID disks first, so complex rmw
3282  *      is handled in the write patch (handle_stripe_dirtying).
3283  *
3284  *   2. when journal space is critical (R5C_LOG_CRITICAL=1)
3285  *
3286  *      It is important to be able to flush all stripes in raid5-cache.
3287  *      Therefore, we need reserve some space on the journal device for
3288  *      these flushes. If flush operation includes pending writes to the
3289  *      stripe, we need to reserve (conf->raid_disk + 1) pages per stripe
3290  *      for the flush out. If we exclude these pending writes from flush
3291  *      operation, we only need (conf->max_degraded + 1) pages per stripe.
3292  *      Therefore, excluding pending writes in these cases enables more
3293  *      efficient use of the journal device.
3294  *
3295  *      Note: To make sure the stripe makes progress, we only delay
3296  *      towrite for stripes with data already in journal (injournal > 0).
3297  *      When LOG_CRITICAL, stripes with injournal == 0 will be sent to
3298  *      no_space_stripes list.
3299  *
3300  *   3. during journal failure
3301  *      In journal failure, we try to flush all cached data to raid disks
3302  *      based on data in stripe cache. The array is read-only to upper
3303  *      layers, so we would skip all pending writes.
3304  *
3305  */
3306 static inline bool delay_towrite(struct r5conf *conf,
3307 				 struct r5dev *dev,
3308 				 struct stripe_head_state *s)
3309 {
3310 	/* case 1 above */
3311 	if (!test_bit(R5_OVERWRITE, &dev->flags) &&
3312 	    !test_bit(R5_Insync, &dev->flags) && s->injournal)
3313 		return true;
3314 	/* case 2 above */
3315 	if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
3316 	    s->injournal > 0)
3317 		return true;
3318 	/* case 3 above */
3319 	if (s->log_failed && s->injournal)
3320 		return true;
3321 	return false;
3322 }
3323 
3324 static void
3325 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
3326 			 int rcw, int expand)
3327 {
3328 	int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks;
3329 	struct r5conf *conf = sh->raid_conf;
3330 	int level = conf->level;
3331 
3332 	if (rcw) {
3333 		/*
3334 		 * In some cases, handle_stripe_dirtying initially decided to
3335 		 * run rmw and allocates extra page for prexor. However, rcw is
3336 		 * cheaper later on. We need to free the extra page now,
3337 		 * because we won't be able to do that in ops_complete_prexor().
3338 		 */
3339 		r5c_release_extra_page(sh);
3340 
3341 		for (i = disks; i--; ) {
3342 			struct r5dev *dev = &sh->dev[i];
3343 
3344 			if (dev->towrite && !delay_towrite(conf, dev, s)) {
3345 				set_bit(R5_LOCKED, &dev->flags);
3346 				set_bit(R5_Wantdrain, &dev->flags);
3347 				if (!expand)
3348 					clear_bit(R5_UPTODATE, &dev->flags);
3349 				s->locked++;
3350 			} else if (test_bit(R5_InJournal, &dev->flags)) {
3351 				set_bit(R5_LOCKED, &dev->flags);
3352 				s->locked++;
3353 			}
3354 		}
3355 		/* if we are not expanding this is a proper write request, and
3356 		 * there will be bios with new data to be drained into the
3357 		 * stripe cache
3358 		 */
3359 		if (!expand) {
3360 			if (!s->locked)
3361 				/* False alarm, nothing to do */
3362 				return;
3363 			sh->reconstruct_state = reconstruct_state_drain_run;
3364 			set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
3365 		} else
3366 			sh->reconstruct_state = reconstruct_state_run;
3367 
3368 		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
3369 
3370 		if (s->locked + conf->max_degraded == disks)
3371 			if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
3372 				atomic_inc(&conf->pending_full_writes);
3373 	} else {
3374 		BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
3375 			test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
3376 		BUG_ON(level == 6 &&
3377 			(!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) ||
3378 			   test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags))));
3379 
3380 		for (i = disks; i--; ) {
3381 			struct r5dev *dev = &sh->dev[i];
3382 			if (i == pd_idx || i == qd_idx)
3383 				continue;
3384 
3385 			if (dev->towrite &&
3386 			    (test_bit(R5_UPTODATE, &dev->flags) ||
3387 			     test_bit(R5_Wantcompute, &dev->flags))) {
3388 				set_bit(R5_Wantdrain, &dev->flags);
3389 				set_bit(R5_LOCKED, &dev->flags);
3390 				clear_bit(R5_UPTODATE, &dev->flags);
3391 				s->locked++;
3392 			} else if (test_bit(R5_InJournal, &dev->flags)) {
3393 				set_bit(R5_LOCKED, &dev->flags);
3394 				s->locked++;
3395 			}
3396 		}
3397 		if (!s->locked)
3398 			/* False alarm - nothing to do */
3399 			return;
3400 		sh->reconstruct_state = reconstruct_state_prexor_drain_run;
3401 		set_bit(STRIPE_OP_PREXOR, &s->ops_request);
3402 		set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
3403 		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
3404 	}
3405 
3406 	/* keep the parity disk(s) locked while asynchronous operations
3407 	 * are in flight
3408 	 */
3409 	set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
3410 	clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
3411 	s->locked++;
3412 
3413 	if (level == 6) {
3414 		int qd_idx = sh->qd_idx;
3415 		struct r5dev *dev = &sh->dev[qd_idx];
3416 
3417 		set_bit(R5_LOCKED, &dev->flags);
3418 		clear_bit(R5_UPTODATE, &dev->flags);
3419 		s->locked++;
3420 	}
3421 
3422 	if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page &&
3423 	    test_bit(STRIPE_OP_BIODRAIN, &s->ops_request) &&
3424 	    !test_bit(STRIPE_FULL_WRITE, &sh->state) &&
3425 	    test_bit(R5_Insync, &sh->dev[pd_idx].flags))
3426 		set_bit(STRIPE_OP_PARTIAL_PARITY, &s->ops_request);
3427 
3428 	pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
3429 		__func__, (unsigned long long)sh->sector,
3430 		s->locked, s->ops_request);
3431 }
3432 
3433 static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi,
3434 				int dd_idx, int forwrite)
3435 {
3436 	struct r5conf *conf = sh->raid_conf;
3437 	struct bio **bip;
3438 
3439 	pr_debug("checking bi b#%llu to stripe s#%llu\n",
3440 		 bi->bi_iter.bi_sector, sh->sector);
3441 
3442 	/* Don't allow new IO added to stripes in batch list */
3443 	if (sh->batch_head)
3444 		return true;
3445 
3446 	if (forwrite)
3447 		bip = &sh->dev[dd_idx].towrite;
3448 	else
3449 		bip = &sh->dev[dd_idx].toread;
3450 
3451 	while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
3452 		if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
3453 			return true;
3454 		bip = &(*bip)->bi_next;
3455 	}
3456 
3457 	if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
3458 		return true;
3459 
3460 	if (forwrite && raid5_has_ppl(conf)) {
3461 		/*
3462 		 * With PPL only writes to consecutive data chunks within a
3463 		 * stripe are allowed because for a single stripe_head we can
3464 		 * only have one PPL entry at a time, which describes one data
3465 		 * range. Not really an overlap, but R5_Overlap can be
3466 		 * used to handle this.
3467 		 */
3468 		sector_t sector;
3469 		sector_t first = 0;
3470 		sector_t last = 0;
3471 		int count = 0;
3472 		int i;
3473 
3474 		for (i = 0; i < sh->disks; i++) {
3475 			if (i != sh->pd_idx &&
3476 			    (i == dd_idx || sh->dev[i].towrite)) {
3477 				sector = sh->dev[i].sector;
3478 				if (count == 0 || sector < first)
3479 					first = sector;
3480 				if (sector > last)
3481 					last = sector;
3482 				count++;
3483 			}
3484 		}
3485 
3486 		if (first + conf->chunk_sectors * (count - 1) != last)
3487 			return true;
3488 	}
3489 
3490 	return false;
3491 }
3492 
3493 static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
3494 			     int dd_idx, int forwrite, int previous)
3495 {
3496 	struct r5conf *conf = sh->raid_conf;
3497 	struct bio **bip;
3498 	int firstwrite = 0;
3499 
3500 	if (forwrite) {
3501 		bip = &sh->dev[dd_idx].towrite;
3502 		if (!*bip)
3503 			firstwrite = 1;
3504 	} else {
3505 		bip = &sh->dev[dd_idx].toread;
3506 	}
3507 
3508 	while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector)
3509 		bip = &(*bip)->bi_next;
3510 
3511 	if (!forwrite || previous)
3512 		clear_bit(STRIPE_BATCH_READY, &sh->state);
3513 
3514 	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
3515 	if (*bip)
3516 		bi->bi_next = *bip;
3517 	*bip = bi;
3518 	bio_inc_remaining(bi);
3519 	md_write_inc(conf->mddev, bi);
3520 
3521 	if (forwrite) {
3522 		/* check if page is covered */
3523 		sector_t sector = sh->dev[dd_idx].sector;
3524 		for (bi=sh->dev[dd_idx].towrite;
3525 		     sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) &&
3526 			     bi && bi->bi_iter.bi_sector <= sector;
3527 		     bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) {
3528 			if (bio_end_sector(bi) >= sector)
3529 				sector = bio_end_sector(bi);
3530 		}
3531 		if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf))
3532 			if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
3533 				sh->overwrite_disks++;
3534 	}
3535 
3536 	pr_debug("added bi b#%llu to stripe s#%llu, disk %d, logical %llu\n",
3537 		 (*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
3538 		 sh->dev[dd_idx].sector);
3539 
3540 	if (conf->mddev->bitmap && firstwrite && !sh->batch_head) {
3541 		sh->bm_seq = conf->seq_flush+1;
3542 		set_bit(STRIPE_BIT_DELAY, &sh->state);
3543 	}
3544 }
3545 
3546 /*
3547  * Each stripe/dev can have one or more bios attached.
3548  * toread/towrite point to the first in a chain.
3549  * The bi_next chain must be in order.
3550  */
3551 static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi,
3552 			   int dd_idx, int forwrite, int previous)
3553 {
3554 	spin_lock_irq(&sh->stripe_lock);
3555 
3556 	if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
3557 		set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
3558 		spin_unlock_irq(&sh->stripe_lock);
3559 		return false;
3560 	}
3561 
3562 	__add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
3563 	spin_unlock_irq(&sh->stripe_lock);
3564 	return true;
3565 }
3566 
3567 static void end_reshape(struct r5conf *conf);
3568 
3569 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
3570 			    struct stripe_head *sh)
3571 {
3572 	int sectors_per_chunk =
3573 		previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
3574 	int dd_idx;
3575 	int chunk_offset = sector_div(stripe, sectors_per_chunk);
3576 	int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
3577 
3578 	raid5_compute_sector(conf,
3579 			     stripe * (disks - conf->max_degraded)
3580 			     *sectors_per_chunk + chunk_offset,
3581 			     previous,
3582 			     &dd_idx, sh);
3583 }
3584 
3585 static void
3586 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3587 		     struct stripe_head_state *s, int disks)
3588 {
3589 	int i;
3590 	BUG_ON(sh->batch_head);
3591 	for (i = disks; i--; ) {
3592 		struct bio *bi;
3593 
3594 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
3595 			struct md_rdev *rdev = conf->disks[i].rdev;
3596 
3597 			if (rdev && test_bit(In_sync, &rdev->flags) &&
3598 			    !test_bit(Faulty, &rdev->flags))
3599 				atomic_inc(&rdev->nr_pending);
3600 			else
3601 				rdev = NULL;
3602 			if (rdev) {
3603 				rdev_set_badblocks(rdev,
3604 						   sh->sector,
3605 						   RAID5_STRIPE_SECTORS(conf),
3606 						   0);
3607 				rdev_dec_pending(rdev, conf->mddev);
3608 			}
3609 		}
3610 		spin_lock_irq(&sh->stripe_lock);
3611 		/* fail all writes first */
3612 		bi = sh->dev[i].towrite;
3613 		sh->dev[i].towrite = NULL;
3614 		sh->overwrite_disks = 0;
3615 		spin_unlock_irq(&sh->stripe_lock);
3616 
3617 		log_stripe_write_finished(sh);
3618 
3619 		if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3620 			wake_up_bit(&sh->dev[i].flags, R5_Overlap);
3621 
3622 		while (bi && bi->bi_iter.bi_sector <
3623 			sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
3624 			struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector);
3625 
3626 			md_write_end(conf->mddev);
3627 			bio_io_error(bi);
3628 			bi = nextbi;
3629 		}
3630 		/* and fail all 'written' */
3631 		bi = sh->dev[i].written;
3632 		sh->dev[i].written = NULL;
3633 		if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
3634 			WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
3635 			sh->dev[i].page = sh->dev[i].orig_page;
3636 		}
3637 
3638 		while (bi && bi->bi_iter.bi_sector <
3639 		       sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
3640 			struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
3641 
3642 			md_write_end(conf->mddev);
3643 			bio_io_error(bi);
3644 			bi = bi2;
3645 		}
3646 
3647 		/* fail any reads if this device is non-operational and
3648 		 * the data has not reached the cache yet.
3649 		 */
3650 		if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
3651 		    s->failed > conf->max_degraded &&
3652 		    (!test_bit(R5_Insync, &sh->dev[i].flags) ||
3653 		      test_bit(R5_ReadError, &sh->dev[i].flags))) {
3654 			spin_lock_irq(&sh->stripe_lock);
3655 			bi = sh->dev[i].toread;
3656 			sh->dev[i].toread = NULL;
3657 			spin_unlock_irq(&sh->stripe_lock);
3658 			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3659 				wake_up_bit(&sh->dev[i].flags, R5_Overlap);
3660 			if (bi)
3661 				s->to_read--;
3662 			while (bi && bi->bi_iter.bi_sector <
3663 			       sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
3664 				struct bio *nextbi =
3665 					r5_next_bio(conf, bi, sh->dev[i].sector);
3666 
3667 				bio_io_error(bi);
3668 				bi = nextbi;
3669 			}
3670 		}
3671 		/* If we were in the middle of a write the parity block might
3672 		 * still be locked - so just clear all R5_LOCKED flags
3673 		 */
3674 		clear_bit(R5_LOCKED, &sh->dev[i].flags);
3675 	}
3676 	s->to_write = 0;
3677 	s->written = 0;
3678 
3679 	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3680 		if (atomic_dec_and_test(&conf->pending_full_writes))
3681 			md_wakeup_thread(conf->mddev->thread);
3682 }
3683 
3684 static void
3685 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
3686 		   struct stripe_head_state *s)
3687 {
3688 	int abort = 0;
3689 	int i;
3690 
3691 	BUG_ON(sh->batch_head);
3692 	clear_bit(STRIPE_SYNCING, &sh->state);
3693 	if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
3694 		wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap);
3695 	s->syncing = 0;
3696 	s->replacing = 0;
3697 	/* There is nothing more to do for sync/check/repair.
3698 	 * Don't even need to abort as that is handled elsewhere
3699 	 * if needed, and not always wanted e.g. if there is a known
3700 	 * bad block here.
3701 	 * For recover/replace we need to record a bad block on all
3702 	 * non-sync devices, or abort the recovery
3703 	 */
3704 	if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
3705 		/* During recovery devices cannot be removed, so
3706 		 * locking and refcounting of rdevs is not needed
3707 		 */
3708 		for (i = 0; i < conf->raid_disks; i++) {
3709 			struct md_rdev *rdev = conf->disks[i].rdev;
3710 
3711 			if (rdev
3712 			    && !test_bit(Faulty, &rdev->flags)
3713 			    && !test_bit(In_sync, &rdev->flags)
3714 			    && !rdev_set_badblocks(rdev, sh->sector,
3715 						   RAID5_STRIPE_SECTORS(conf), 0))
3716 				abort = 1;
3717 			rdev = conf->disks[i].replacement;
3718 
3719 			if (rdev
3720 			    && !test_bit(Faulty, &rdev->flags)
3721 			    && !test_bit(In_sync, &rdev->flags)
3722 			    && !rdev_set_badblocks(rdev, sh->sector,
3723 						   RAID5_STRIPE_SECTORS(conf), 0))
3724 				abort = 1;
3725 		}
3726 	}
3727 	md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf));
3728 
3729 	if (abort)
3730 		md_sync_error(conf->mddev);
3731 }
3732 
3733 static int want_replace(struct stripe_head *sh, int disk_idx)
3734 {
3735 	struct md_rdev *rdev;
3736 	int rv = 0;
3737 
3738 	rdev = sh->raid_conf->disks[disk_idx].replacement;
3739 	if (rdev
3740 	    && !test_bit(Faulty, &rdev->flags)
3741 	    && !test_bit(In_sync, &rdev->flags)
3742 	    && (rdev->recovery_offset <= sh->sector
3743 		|| rdev->mddev->resync_offset <= sh->sector))
3744 		rv = 1;
3745 	return rv;
3746 }
3747 
3748 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3749 			   int disk_idx, int disks)
3750 {
3751 	struct r5dev *dev = &sh->dev[disk_idx];
3752 	struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
3753 				  &sh->dev[s->failed_num[1]] };
3754 	struct mddev *mddev = sh->raid_conf->mddev;
3755 	bool force_rcw = false;
3756 	int i;
3757 
3758 	if (sh->raid_conf->rmw_level == PARITY_DISABLE_RMW ||
3759 	    (mddev->bitmap_ops && mddev->bitmap_ops->blocks_synced &&
3760 	     !mddev->bitmap_ops->blocks_synced(mddev, sh->sector)))
3761 		force_rcw = true;
3762 
3763 	if (test_bit(R5_LOCKED, &dev->flags) ||
3764 	    test_bit(R5_UPTODATE, &dev->flags))
3765 		/* No point reading this as we already have it or have
3766 		 * decided to get it.
3767 		 */
3768 		return 0;
3769 
3770 	if (dev->toread ||
3771 	    (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)))
3772 		/* We need this block to directly satisfy a request */
3773 		return 1;
3774 
3775 	if (s->syncing || s->expanding ||
3776 	    (s->replacing && want_replace(sh, disk_idx)))
3777 		/* When syncing, or expanding we read everything.
3778 		 * When replacing, we need the replaced block.
3779 		 */
3780 		return 1;
3781 
3782 	if ((s->failed >= 1 && fdev[0]->toread) ||
3783 	    (s->failed >= 2 && fdev[1]->toread))
3784 		/* If we want to read from a failed device, then
3785 		 * we need to actually read every other device.
3786 		 */
3787 		return 1;
3788 
3789 	/* Sometimes neither read-modify-write nor reconstruct-write
3790 	 * cycles can work.  In those cases we read every block we
3791 	 * can.  Then the parity-update is certain to have enough to
3792 	 * work with.
3793 	 * This can only be a problem when we need to write something,
3794 	 * and some device has failed.  If either of those tests
3795 	 * fail we need look no further.
3796 	 */
3797 	if (!s->failed || !s->to_write)
3798 		return 0;
3799 
3800 	if (test_bit(R5_Insync, &dev->flags) &&
3801 	    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3802 		/* Pre-reads at not permitted until after short delay
3803 		 * to gather multiple requests.  However if this
3804 		 * device is no Insync, the block could only be computed
3805 		 * and there is no need to delay that.
3806 		 */
3807 		return 0;
3808 
3809 	for (i = 0; i < s->failed && i < 2; i++) {
3810 		if (fdev[i]->towrite &&
3811 		    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3812 		    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3813 			/* If we have a partial write to a failed
3814 			 * device, then we will need to reconstruct
3815 			 * the content of that device, so all other
3816 			 * devices must be read.
3817 			 */
3818 			return 1;
3819 
3820 		if (s->failed >= 2 &&
3821 		    (fdev[i]->towrite ||
3822 		     s->failed_num[i] == sh->pd_idx ||
3823 		     s->failed_num[i] == sh->qd_idx) &&
3824 		    !test_bit(R5_UPTODATE, &fdev[i]->flags))
3825 			/* In max degraded raid6, If the failed disk is P, Q,
3826 			 * or we want to read the failed disk, we need to do
3827 			 * reconstruct-write.
3828 			 */
3829 			force_rcw = true;
3830 	}
3831 
3832 	/* If we are forced to do a reconstruct-write, because parity
3833 	 * cannot be trusted and we are currently recovering it, there
3834 	 * is extra need to be careful.
3835 	 * If one of the devices that we would need to read, because
3836 	 * it is not being overwritten (and maybe not written at all)
3837 	 * is missing/faulty, then we need to read everything we can.
3838 	 */
3839 	if (!force_rcw &&
3840 	    sh->sector < sh->raid_conf->mddev->resync_offset)
3841 		/* reconstruct-write isn't being forced */
3842 		return 0;
3843 	for (i = 0; i < s->failed && i < 2; i++) {
3844 		if (s->failed_num[i] != sh->pd_idx &&
3845 		    s->failed_num[i] != sh->qd_idx &&
3846 		    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3847 		    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3848 			return 1;
3849 	}
3850 
3851 	return 0;
3852 }
3853 
3854 /* fetch_block - checks the given member device to see if its data needs
3855  * to be read or computed to satisfy a request.
3856  *
3857  * Returns 1 when no more member devices need to be checked, otherwise returns
3858  * 0 to tell the loop in handle_stripe_fill to continue
3859  */
3860 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
3861 		       int disk_idx, int disks)
3862 {
3863 	struct r5dev *dev = &sh->dev[disk_idx];
3864 
3865 	/* is the data in this block needed, and can we get it? */
3866 	if (need_this_block(sh, s, disk_idx, disks)) {
3867 		/* we would like to get this block, possibly by computing it,
3868 		 * otherwise read it if the backing disk is insync
3869 		 */
3870 		BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
3871 		BUG_ON(test_bit(R5_Wantread, &dev->flags));
3872 		BUG_ON(sh->batch_head);
3873 
3874 		/*
3875 		 * In the raid6 case if the only non-uptodate disk is P
3876 		 * then we already trusted P to compute the other failed
3877 		 * drives. It is safe to compute rather than re-read P.
3878 		 * In other cases we only compute blocks from failed
3879 		 * devices, otherwise check/repair might fail to detect
3880 		 * a real inconsistency.
3881 		 */
3882 
3883 		if ((s->uptodate == disks - 1) &&
3884 		    ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) ||
3885 		    (s->failed && (disk_idx == s->failed_num[0] ||
3886 				   disk_idx == s->failed_num[1])))) {
3887 			/* have disk failed, and we're requested to fetch it;
3888 			 * do compute it
3889 			 */
3890 			pr_debug("Computing stripe %llu block %d\n",
3891 			       (unsigned long long)sh->sector, disk_idx);
3892 			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3893 			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3894 			set_bit(R5_Wantcompute, &dev->flags);
3895 			sh->ops.target = disk_idx;
3896 			sh->ops.target2 = -1; /* no 2nd target */
3897 			s->req_compute = 1;
3898 			/* Careful: from this point on 'uptodate' is in the eye
3899 			 * of raid_run_ops which services 'compute' operations
3900 			 * before writes. R5_Wantcompute flags a block that will
3901 			 * be R5_UPTODATE by the time it is needed for a
3902 			 * subsequent operation.
3903 			 */
3904 			s->uptodate++;
3905 			return 1;
3906 		} else if (s->uptodate == disks-2 && s->failed >= 2) {
3907 			/* Computing 2-failure is *very* expensive; only
3908 			 * do it if failed >= 2
3909 			 */
3910 			int other;
3911 			for (other = disks; other--; ) {
3912 				if (other == disk_idx)
3913 					continue;
3914 				if (!test_bit(R5_UPTODATE,
3915 				      &sh->dev[other].flags))
3916 					break;
3917 			}
3918 			BUG_ON(other < 0);
3919 			if (test_bit(R5_LOCKED, &sh->dev[other].flags))
3920 				return 0;
3921 			pr_debug("Computing stripe %llu blocks %d,%d\n",
3922 			       (unsigned long long)sh->sector,
3923 			       disk_idx, other);
3924 			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3925 			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3926 			set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
3927 			set_bit(R5_Wantcompute, &sh->dev[other].flags);
3928 			sh->ops.target = disk_idx;
3929 			sh->ops.target2 = other;
3930 			s->uptodate += 2;
3931 			s->req_compute = 1;
3932 			return 1;
3933 		} else if (test_bit(R5_Insync, &dev->flags)) {
3934 			set_bit(R5_LOCKED, &dev->flags);
3935 			set_bit(R5_Wantread, &dev->flags);
3936 			s->locked++;
3937 			pr_debug("Reading block %d (sync=%d)\n",
3938 				disk_idx, s->syncing);
3939 		}
3940 	}
3941 
3942 	return 0;
3943 }
3944 
3945 /*
3946  * handle_stripe_fill - read or compute data to satisfy pending requests.
3947  */
3948 static void handle_stripe_fill(struct stripe_head *sh,
3949 			       struct stripe_head_state *s,
3950 			       int disks)
3951 {
3952 	int i;
3953 
3954 	/* look for blocks to read/compute, skip this if a compute
3955 	 * is already in flight, or if the stripe contents are in the
3956 	 * midst of changing due to a write
3957 	 */
3958 	if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
3959 	    !sh->reconstruct_state) {
3960 
3961 		/*
3962 		 * For degraded stripe with data in journal, do not handle
3963 		 * read requests yet, instead, flush the stripe to raid
3964 		 * disks first, this avoids handling complex rmw of write
3965 		 * back cache (prexor with orig_page, and then xor with
3966 		 * page) in the read path
3967 		 */
3968 		if (s->to_read && s->injournal && s->failed) {
3969 			if (test_bit(STRIPE_R5C_CACHING, &sh->state))
3970 				r5c_make_stripe_write_out(sh);
3971 			goto out;
3972 		}
3973 
3974 		for (i = disks; i--; )
3975 			if (fetch_block(sh, s, i, disks))
3976 				break;
3977 	}
3978 out:
3979 	set_bit(STRIPE_HANDLE, &sh->state);
3980 }
3981 
3982 static void break_stripe_batch_list(struct stripe_head *head_sh,
3983 				    unsigned long handle_flags);
3984 /* handle_stripe_clean_event
3985  * any written block on an uptodate or failed drive can be returned.
3986  * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
3987  * never LOCKED, so we don't need to test 'failed' directly.
3988  */
3989 static void handle_stripe_clean_event(struct r5conf *conf,
3990 	struct stripe_head *sh, int disks)
3991 {
3992 	int i;
3993 	struct r5dev *dev;
3994 	int discard_pending = 0;
3995 	struct stripe_head *head_sh = sh;
3996 	bool do_endio = false;
3997 
3998 	for (i = disks; i--; )
3999 		if (sh->dev[i].written) {
4000 			dev = &sh->dev[i];
4001 			if (!test_bit(R5_LOCKED, &dev->flags) &&
4002 			    (test_bit(R5_UPTODATE, &dev->flags) ||
4003 			     test_bit(R5_Discard, &dev->flags) ||
4004 			     test_bit(R5_SkipCopy, &dev->flags))) {
4005 				/* We can return any write requests */
4006 				struct bio *wbi, *wbi2;
4007 				pr_debug("Return write for disc %d\n", i);
4008 				if (test_and_clear_bit(R5_Discard, &dev->flags))
4009 					clear_bit(R5_UPTODATE, &dev->flags);
4010 				if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
4011 					WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
4012 				}
4013 				do_endio = true;
4014 
4015 returnbi:
4016 				dev->page = dev->orig_page;
4017 				wbi = dev->written;
4018 				dev->written = NULL;
4019 				while (wbi && wbi->bi_iter.bi_sector <
4020 					dev->sector + RAID5_STRIPE_SECTORS(conf)) {
4021 					wbi2 = r5_next_bio(conf, wbi, dev->sector);
4022 					md_write_end(conf->mddev);
4023 					bio_endio(wbi);
4024 					wbi = wbi2;
4025 				}
4026 
4027 				if (head_sh->batch_head) {
4028 					sh = list_first_entry(&sh->batch_list,
4029 							      struct stripe_head,
4030 							      batch_list);
4031 					if (sh != head_sh) {
4032 						dev = &sh->dev[i];
4033 						goto returnbi;
4034 					}
4035 				}
4036 				sh = head_sh;
4037 				dev = &sh->dev[i];
4038 			} else if (test_bit(R5_Discard, &dev->flags))
4039 				discard_pending = 1;
4040 		}
4041 
4042 	log_stripe_write_finished(sh);
4043 
4044 	if (!discard_pending &&
4045 	    test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
4046 		int hash;
4047 		clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
4048 		clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
4049 		if (sh->qd_idx >= 0) {
4050 			clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
4051 			clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
4052 		}
4053 		/* now that discard is done we can proceed with any sync */
4054 		clear_bit(STRIPE_DISCARD, &sh->state);
4055 		/*
4056 		 * SCSI discard will change some bio fields and the stripe has
4057 		 * no updated data, so remove it from hash list and the stripe
4058 		 * will be reinitialized
4059 		 */
4060 unhash:
4061 		hash = sh->hash_lock_index;
4062 		spin_lock_irq(conf->hash_locks + hash);
4063 		remove_hash(sh);
4064 		spin_unlock_irq(conf->hash_locks + hash);
4065 		if (head_sh->batch_head) {
4066 			sh = list_first_entry(&sh->batch_list,
4067 					      struct stripe_head, batch_list);
4068 			if (sh != head_sh)
4069 					goto unhash;
4070 		}
4071 		sh = head_sh;
4072 
4073 		if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
4074 			set_bit(STRIPE_HANDLE, &sh->state);
4075 
4076 	}
4077 
4078 	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
4079 		if (atomic_dec_and_test(&conf->pending_full_writes))
4080 			md_wakeup_thread(conf->mddev->thread);
4081 
4082 	if (head_sh->batch_head && do_endio)
4083 		break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
4084 }
4085 
4086 /*
4087  * For RMW in write back cache, we need extra page in prexor to store the
4088  * old data. This page is stored in dev->orig_page.
4089  *
4090  * This function checks whether we have data for prexor. The exact logic
4091  * is:
4092  *       R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE)
4093  */
4094 static inline bool uptodate_for_rmw(struct r5dev *dev)
4095 {
4096 	return (test_bit(R5_UPTODATE, &dev->flags)) &&
4097 		(!test_bit(R5_InJournal, &dev->flags) ||
4098 		 test_bit(R5_OrigPageUPTDODATE, &dev->flags));
4099 }
4100 
4101 static int handle_stripe_dirtying(struct r5conf *conf,
4102 				  struct stripe_head *sh,
4103 				  struct stripe_head_state *s,
4104 				  int disks)
4105 {
4106 	int rmw = 0, rcw = 0, i;
4107 	struct mddev *mddev = conf->mddev;
4108 	sector_t resync_offset = mddev->resync_offset;
4109 
4110 	/* Check whether resync is now happening or should start.
4111 	 * If yes, then the array is dirty (after unclean shutdown or
4112 	 * initial creation), so parity in some stripes might be inconsistent.
4113 	 * In this case, we need to always do reconstruct-write, to ensure
4114 	 * that in case of drive failure or read-error correction, we
4115 	 * generate correct data from the parity.
4116 	 */
4117 	if (conf->rmw_level == PARITY_DISABLE_RMW ||
4118 	    (resync_offset < MaxSector && sh->sector >= resync_offset &&
4119 	     s->failed == 0)) {
4120 		/* Calculate the real rcw later - for now make it
4121 		 * look like rcw is cheaper
4122 		 */
4123 		rcw = 1; rmw = 2;
4124 		pr_debug("force RCW rmw_level=%u, resync_offset=%llu sh->sector=%llu\n",
4125 			 conf->rmw_level, (unsigned long long)resync_offset,
4126 			 (unsigned long long)sh->sector);
4127 	} else if (mddev->bitmap_ops && mddev->bitmap_ops->blocks_synced &&
4128 		   !mddev->bitmap_ops->blocks_synced(mddev, sh->sector)) {
4129 		/* The initial recover is not done, must read everything */
4130 		rcw = 1; rmw = 2;
4131 		pr_debug("force RCW by lazy recovery, sh->sector=%llu\n",
4132 			 sh->sector);
4133 	} else for (i = disks; i--; ) {
4134 		/* would I have to read this buffer for read_modify_write */
4135 		struct r5dev *dev = &sh->dev[i];
4136 		if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
4137 		     i == sh->pd_idx || i == sh->qd_idx ||
4138 		     test_bit(R5_InJournal, &dev->flags)) &&
4139 		    !test_bit(R5_LOCKED, &dev->flags) &&
4140 		    !(uptodate_for_rmw(dev) ||
4141 		      test_bit(R5_Wantcompute, &dev->flags))) {
4142 			if (test_bit(R5_Insync, &dev->flags))
4143 				rmw++;
4144 			else
4145 				rmw += 2*disks;  /* cannot read it */
4146 		}
4147 		/* Would I have to read this buffer for reconstruct_write */
4148 		if (!test_bit(R5_OVERWRITE, &dev->flags) &&
4149 		    i != sh->pd_idx && i != sh->qd_idx &&
4150 		    !test_bit(R5_LOCKED, &dev->flags) &&
4151 		    !(test_bit(R5_UPTODATE, &dev->flags) ||
4152 		      test_bit(R5_Wantcompute, &dev->flags))) {
4153 			if (test_bit(R5_Insync, &dev->flags))
4154 				rcw++;
4155 			else
4156 				rcw += 2*disks;
4157 		}
4158 	}
4159 
4160 	pr_debug("for sector %llu state 0x%lx, rmw=%d rcw=%d\n",
4161 		 (unsigned long long)sh->sector, sh->state, rmw, rcw);
4162 	set_bit(STRIPE_HANDLE, &sh->state);
4163 	if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
4164 		/* prefer read-modify-write, but need to get some data */
4165 		mddev_add_trace_msg(mddev, "raid5 rmw %llu %d",
4166 				sh->sector, rmw);
4167 
4168 		for (i = disks; i--; ) {
4169 			struct r5dev *dev = &sh->dev[i];
4170 			if (test_bit(R5_InJournal, &dev->flags) &&
4171 			    dev->page == dev->orig_page &&
4172 			    !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) {
4173 				/* alloc page for prexor */
4174 				struct page *p = alloc_page(GFP_NOIO);
4175 
4176 				if (p) {
4177 					dev->orig_page = p;
4178 					continue;
4179 				}
4180 
4181 				/*
4182 				 * alloc_page() failed, try use
4183 				 * disk_info->extra_page
4184 				 */
4185 				if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE,
4186 						      &conf->cache_state)) {
4187 					r5c_use_extra_page(sh);
4188 					break;
4189 				}
4190 
4191 				/* extra_page in use, add to delayed_list */
4192 				set_bit(STRIPE_DELAYED, &sh->state);
4193 				s->waiting_extra_page = 1;
4194 				return -EAGAIN;
4195 			}
4196 		}
4197 
4198 		for (i = disks; i--; ) {
4199 			struct r5dev *dev = &sh->dev[i];
4200 			if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
4201 			     i == sh->pd_idx || i == sh->qd_idx ||
4202 			     test_bit(R5_InJournal, &dev->flags)) &&
4203 			    !test_bit(R5_LOCKED, &dev->flags) &&
4204 			    !(uptodate_for_rmw(dev) ||
4205 			      test_bit(R5_Wantcompute, &dev->flags)) &&
4206 			    test_bit(R5_Insync, &dev->flags)) {
4207 				if (test_bit(STRIPE_PREREAD_ACTIVE,
4208 					     &sh->state)) {
4209 					pr_debug("Read_old block %d for r-m-w\n",
4210 						 i);
4211 					set_bit(R5_LOCKED, &dev->flags);
4212 					set_bit(R5_Wantread, &dev->flags);
4213 					s->locked++;
4214 				} else
4215 					set_bit(STRIPE_DELAYED, &sh->state);
4216 			}
4217 		}
4218 	}
4219 	if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) {
4220 		/* want reconstruct write, but need to get some data */
4221 		int qread =0;
4222 		rcw = 0;
4223 		for (i = disks; i--; ) {
4224 			struct r5dev *dev = &sh->dev[i];
4225 			if (!test_bit(R5_OVERWRITE, &dev->flags) &&
4226 			    i != sh->pd_idx && i != sh->qd_idx &&
4227 			    !test_bit(R5_LOCKED, &dev->flags) &&
4228 			    !(test_bit(R5_UPTODATE, &dev->flags) ||
4229 			      test_bit(R5_Wantcompute, &dev->flags))) {
4230 				rcw++;
4231 				if (test_bit(R5_Insync, &dev->flags) &&
4232 				    test_bit(STRIPE_PREREAD_ACTIVE,
4233 					     &sh->state)) {
4234 					pr_debug("Read_old block "
4235 						"%d for Reconstruct\n", i);
4236 					set_bit(R5_LOCKED, &dev->flags);
4237 					set_bit(R5_Wantread, &dev->flags);
4238 					s->locked++;
4239 					qread++;
4240 				} else
4241 					set_bit(STRIPE_DELAYED, &sh->state);
4242 			}
4243 		}
4244 		if (rcw && !mddev_is_dm(mddev))
4245 			blk_add_trace_msg(mddev->gendisk->queue,
4246 				"raid5 rcw %llu %d %d %d",
4247 				(unsigned long long)sh->sector, rcw, qread,
4248 				test_bit(STRIPE_DELAYED, &sh->state));
4249 	}
4250 
4251 	if (rcw > disks && rmw > disks &&
4252 	    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4253 		set_bit(STRIPE_DELAYED, &sh->state);
4254 
4255 	/* now if nothing is locked, and if we have enough data,
4256 	 * we can start a write request
4257 	 */
4258 	/* since handle_stripe can be called at any time we need to handle the
4259 	 * case where a compute block operation has been submitted and then a
4260 	 * subsequent call wants to start a write request.  raid_run_ops only
4261 	 * handles the case where compute block and reconstruct are requested
4262 	 * simultaneously.  If this is not the case then new writes need to be
4263 	 * held off until the compute completes.
4264 	 */
4265 	if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
4266 	    (s->locked == 0 && (rcw == 0 || rmw == 0) &&
4267 	     !test_bit(STRIPE_BIT_DELAY, &sh->state)))
4268 		schedule_reconstruction(sh, s, rcw == 0, 0);
4269 	return 0;
4270 }
4271 
4272 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
4273 				struct stripe_head_state *s, int disks)
4274 {
4275 	struct r5dev *dev = NULL;
4276 
4277 	BUG_ON(sh->batch_head);
4278 	set_bit(STRIPE_HANDLE, &sh->state);
4279 
4280 	switch (sh->check_state) {
4281 	case check_state_idle:
4282 		/* start a new check operation if there are no failures */
4283 		if (s->failed == 0) {
4284 			BUG_ON(s->uptodate != disks);
4285 			sh->check_state = check_state_run;
4286 			set_bit(STRIPE_OP_CHECK, &s->ops_request);
4287 			clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
4288 			s->uptodate--;
4289 			break;
4290 		}
4291 		dev = &sh->dev[s->failed_num[0]];
4292 		fallthrough;
4293 	case check_state_compute_result:
4294 		sh->check_state = check_state_idle;
4295 		if (!dev)
4296 			dev = &sh->dev[sh->pd_idx];
4297 
4298 		/* check that a write has not made the stripe insync */
4299 		if (test_bit(STRIPE_INSYNC, &sh->state))
4300 			break;
4301 
4302 		/* either failed parity check, or recovery is happening */
4303 		BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
4304 		BUG_ON(s->uptodate != disks);
4305 
4306 		set_bit(R5_LOCKED, &dev->flags);
4307 		s->locked++;
4308 		set_bit(R5_Wantwrite, &dev->flags);
4309 
4310 		set_bit(STRIPE_INSYNC, &sh->state);
4311 		break;
4312 	case check_state_run:
4313 		break; /* we will be called again upon completion */
4314 	case check_state_check_result:
4315 		sh->check_state = check_state_idle;
4316 
4317 		/* if a failure occurred during the check operation, leave
4318 		 * STRIPE_INSYNC not set and let the stripe be handled again
4319 		 */
4320 		if (s->failed)
4321 			break;
4322 
4323 		/* handle a successful check operation, if parity is correct
4324 		 * we are done.  Otherwise update the mismatch count and repair
4325 		 * parity if !MD_RECOVERY_CHECK
4326 		 */
4327 		if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
4328 			/* parity is correct (on disc,
4329 			 * not in buffer any more)
4330 			 */
4331 			set_bit(STRIPE_INSYNC, &sh->state);
4332 		else {
4333 			atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
4334 			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4335 				/* don't try to repair!! */
4336 				set_bit(STRIPE_INSYNC, &sh->state);
4337 				pr_warn_ratelimited("%s: mismatch sector in range "
4338 						    "%llu-%llu\n", mdname(conf->mddev),
4339 						    (unsigned long long) sh->sector,
4340 						    (unsigned long long) sh->sector +
4341 						    RAID5_STRIPE_SECTORS(conf));
4342 			} else {
4343 				sh->check_state = check_state_compute_run;
4344 				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
4345 				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
4346 				set_bit(R5_Wantcompute,
4347 					&sh->dev[sh->pd_idx].flags);
4348 				sh->ops.target = sh->pd_idx;
4349 				sh->ops.target2 = -1;
4350 				s->uptodate++;
4351 			}
4352 		}
4353 		break;
4354 	case check_state_compute_run:
4355 		break;
4356 	default:
4357 		pr_err("%s: unknown check_state: %d sector: %llu\n",
4358 		       __func__, sh->check_state,
4359 		       (unsigned long long) sh->sector);
4360 		BUG();
4361 	}
4362 }
4363 
4364 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
4365 				  struct stripe_head_state *s,
4366 				  int disks)
4367 {
4368 	int pd_idx = sh->pd_idx;
4369 	int qd_idx = sh->qd_idx;
4370 	struct r5dev *dev;
4371 
4372 	BUG_ON(sh->batch_head);
4373 	set_bit(STRIPE_HANDLE, &sh->state);
4374 
4375 	BUG_ON(s->failed > 2);
4376 
4377 	/* Want to check and possibly repair P and Q.
4378 	 * However there could be one 'failed' device, in which
4379 	 * case we can only check one of them, possibly using the
4380 	 * other to generate missing data
4381 	 */
4382 
4383 	switch (sh->check_state) {
4384 	case check_state_idle:
4385 		/* start a new check operation if there are < 2 failures */
4386 		if (s->failed == s->q_failed) {
4387 			/* The only possible failed device holds Q, so it
4388 			 * makes sense to check P (If anything else were failed,
4389 			 * we would have used P to recreate it).
4390 			 */
4391 			sh->check_state = check_state_run;
4392 		}
4393 		if (!s->q_failed && s->failed < 2) {
4394 			/* Q is not failed, and we didn't use it to generate
4395 			 * anything, so it makes sense to check it
4396 			 */
4397 			if (sh->check_state == check_state_run)
4398 				sh->check_state = check_state_run_pq;
4399 			else
4400 				sh->check_state = check_state_run_q;
4401 		}
4402 
4403 		/* discard potentially stale zero_sum_result */
4404 		sh->ops.zero_sum_result = 0;
4405 
4406 		if (sh->check_state == check_state_run) {
4407 			/* async_xor_zero_sum destroys the contents of P */
4408 			clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
4409 			s->uptodate--;
4410 		}
4411 		if (sh->check_state >= check_state_run &&
4412 		    sh->check_state <= check_state_run_pq) {
4413 			/* async_syndrome_zero_sum preserves P and Q, so
4414 			 * no need to mark them !uptodate here
4415 			 */
4416 			set_bit(STRIPE_OP_CHECK, &s->ops_request);
4417 			break;
4418 		}
4419 
4420 		/* we have 2-disk failure */
4421 		BUG_ON(s->failed != 2);
4422 		fallthrough;
4423 	case check_state_compute_result:
4424 		sh->check_state = check_state_idle;
4425 
4426 		/* check that a write has not made the stripe insync */
4427 		if (test_bit(STRIPE_INSYNC, &sh->state))
4428 			break;
4429 
4430 		/* now write out any block on a failed drive,
4431 		 * or P or Q if they were recomputed
4432 		 */
4433 		dev = NULL;
4434 		if (s->failed == 2) {
4435 			dev = &sh->dev[s->failed_num[1]];
4436 			s->locked++;
4437 			set_bit(R5_LOCKED, &dev->flags);
4438 			set_bit(R5_Wantwrite, &dev->flags);
4439 		}
4440 		if (s->failed >= 1) {
4441 			dev = &sh->dev[s->failed_num[0]];
4442 			s->locked++;
4443 			set_bit(R5_LOCKED, &dev->flags);
4444 			set_bit(R5_Wantwrite, &dev->flags);
4445 		}
4446 		if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
4447 			dev = &sh->dev[pd_idx];
4448 			s->locked++;
4449 			set_bit(R5_LOCKED, &dev->flags);
4450 			set_bit(R5_Wantwrite, &dev->flags);
4451 		}
4452 		if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
4453 			dev = &sh->dev[qd_idx];
4454 			s->locked++;
4455 			set_bit(R5_LOCKED, &dev->flags);
4456 			set_bit(R5_Wantwrite, &dev->flags);
4457 		}
4458 		if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
4459 			      "%s: disk%td not up to date\n",
4460 			      mdname(conf->mddev),
4461 			      dev - (struct r5dev *) &sh->dev)) {
4462 			clear_bit(R5_LOCKED, &dev->flags);
4463 			clear_bit(R5_Wantwrite, &dev->flags);
4464 			s->locked--;
4465 		}
4466 
4467 		set_bit(STRIPE_INSYNC, &sh->state);
4468 		break;
4469 	case check_state_run:
4470 	case check_state_run_q:
4471 	case check_state_run_pq:
4472 		break; /* we will be called again upon completion */
4473 	case check_state_check_result:
4474 		sh->check_state = check_state_idle;
4475 
4476 		/* handle a successful check operation, if parity is correct
4477 		 * we are done.  Otherwise update the mismatch count and repair
4478 		 * parity if !MD_RECOVERY_CHECK
4479 		 */
4480 		if (sh->ops.zero_sum_result == 0) {
4481 			/* both parities are correct */
4482 			if (!s->failed)
4483 				set_bit(STRIPE_INSYNC, &sh->state);
4484 			else {
4485 				/* in contrast to the raid5 case we can validate
4486 				 * parity, but still have a failure to write
4487 				 * back
4488 				 */
4489 				sh->check_state = check_state_compute_result;
4490 				/* Returning at this point means that we may go
4491 				 * off and bring p and/or q uptodate again so
4492 				 * we make sure to check zero_sum_result again
4493 				 * to verify if p or q need writeback
4494 				 */
4495 			}
4496 		} else {
4497 			atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
4498 			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4499 				/* don't try to repair!! */
4500 				set_bit(STRIPE_INSYNC, &sh->state);
4501 				pr_warn_ratelimited("%s: mismatch sector in range "
4502 						    "%llu-%llu\n", mdname(conf->mddev),
4503 						    (unsigned long long) sh->sector,
4504 						    (unsigned long long) sh->sector +
4505 						    RAID5_STRIPE_SECTORS(conf));
4506 			} else {
4507 				int *target = &sh->ops.target;
4508 
4509 				sh->ops.target = -1;
4510 				sh->ops.target2 = -1;
4511 				sh->check_state = check_state_compute_run;
4512 				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
4513 				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
4514 				if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
4515 					set_bit(R5_Wantcompute,
4516 						&sh->dev[pd_idx].flags);
4517 					*target = pd_idx;
4518 					target = &sh->ops.target2;
4519 					s->uptodate++;
4520 				}
4521 				if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
4522 					set_bit(R5_Wantcompute,
4523 						&sh->dev[qd_idx].flags);
4524 					*target = qd_idx;
4525 					s->uptodate++;
4526 				}
4527 			}
4528 		}
4529 		break;
4530 	case check_state_compute_run:
4531 		break;
4532 	default:
4533 		pr_warn("%s: unknown check_state: %d sector: %llu\n",
4534 			__func__, sh->check_state,
4535 			(unsigned long long) sh->sector);
4536 		BUG();
4537 	}
4538 }
4539 
4540 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
4541 {
4542 	int i;
4543 
4544 	/* We have read all the blocks in this stripe and now we need to
4545 	 * copy some of them into a target stripe for expand.
4546 	 */
4547 	struct dma_async_tx_descriptor *tx = NULL;
4548 	BUG_ON(sh->batch_head);
4549 	clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4550 	for (i = 0; i < sh->disks; i++)
4551 		if (i != sh->pd_idx && i != sh->qd_idx) {
4552 			int dd_idx, j;
4553 			struct stripe_head *sh2;
4554 			struct async_submit_ctl submit;
4555 
4556 			sector_t bn = raid5_compute_blocknr(sh, i, 1);
4557 			sector_t s = raid5_compute_sector(conf, bn, 0,
4558 							  &dd_idx, NULL);
4559 			sh2 = raid5_get_active_stripe(conf, NULL, s,
4560 				R5_GAS_NOBLOCK | R5_GAS_NOQUIESCE);
4561 			if (sh2 == NULL)
4562 				/* so far only the early blocks of this stripe
4563 				 * have been requested.  When later blocks
4564 				 * get requested, we will try again
4565 				 */
4566 				continue;
4567 			if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
4568 			   test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
4569 				/* must have already done this block */
4570 				raid5_release_stripe(sh2);
4571 				continue;
4572 			}
4573 
4574 			/* place all the copies on one channel */
4575 			init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
4576 			tx = async_memcpy(sh2->dev[dd_idx].page,
4577 					  sh->dev[i].page, sh2->dev[dd_idx].offset,
4578 					  sh->dev[i].offset, RAID5_STRIPE_SIZE(conf),
4579 					  &submit);
4580 
4581 			set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
4582 			set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
4583 			for (j = 0; j < conf->raid_disks; j++)
4584 				if (j != sh2->pd_idx &&
4585 				    j != sh2->qd_idx &&
4586 				    !test_bit(R5_Expanded, &sh2->dev[j].flags))
4587 					break;
4588 			if (j == conf->raid_disks) {
4589 				set_bit(STRIPE_EXPAND_READY, &sh2->state);
4590 				set_bit(STRIPE_HANDLE, &sh2->state);
4591 			}
4592 			raid5_release_stripe(sh2);
4593 
4594 		}
4595 	/* done submitting copies, wait for them to complete */
4596 	async_tx_quiesce(&tx);
4597 }
4598 
4599 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
4600 {
4601 	struct r5conf *conf = sh->raid_conf;
4602 	int disks = sh->disks;
4603 	struct r5dev *dev;
4604 	int i;
4605 	int do_recovery = 0;
4606 
4607 	memset(s, 0, sizeof(*s));
4608 
4609 	s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head;
4610 	s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
4611 	s->failed_num[0] = -1;
4612 	s->failed_num[1] = -1;
4613 	s->log_failed = r5l_log_disk_error(conf);
4614 
4615 	/* Now to look around and see what can be done */
4616 	for (i=disks; i--; ) {
4617 		struct md_rdev *rdev;
4618 		int is_bad = 0;
4619 
4620 		dev = &sh->dev[i];
4621 
4622 		pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
4623 			 i, dev->flags,
4624 			 dev->toread, dev->towrite, dev->written);
4625 		/* maybe we can reply to a read
4626 		 *
4627 		 * new wantfill requests are only permitted while
4628 		 * ops_complete_biofill is guaranteed to be inactive
4629 		 */
4630 		if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
4631 		    !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
4632 			set_bit(R5_Wantfill, &dev->flags);
4633 
4634 		/* now count some things */
4635 		if (test_bit(R5_LOCKED, &dev->flags))
4636 			s->locked++;
4637 		if (test_bit(R5_UPTODATE, &dev->flags))
4638 			s->uptodate++;
4639 		if (test_bit(R5_Wantcompute, &dev->flags)) {
4640 			s->compute++;
4641 			BUG_ON(s->compute > 2);
4642 		}
4643 
4644 		if (test_bit(R5_Wantfill, &dev->flags))
4645 			s->to_fill++;
4646 		else if (dev->toread)
4647 			s->to_read++;
4648 		if (dev->towrite) {
4649 			s->to_write++;
4650 			if (!test_bit(R5_OVERWRITE, &dev->flags))
4651 				s->non_overwrite++;
4652 		}
4653 		if (dev->written)
4654 			s->written++;
4655 		/* Prefer to use the replacement for reads, but only
4656 		 * if it is recovered enough and has no bad blocks.
4657 		 */
4658 		rdev = conf->disks[i].replacement;
4659 		if (rdev && !test_bit(Faulty, &rdev->flags) &&
4660 		    rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
4661 		    !rdev_has_badblock(rdev, sh->sector,
4662 				       RAID5_STRIPE_SECTORS(conf)))
4663 			set_bit(R5_ReadRepl, &dev->flags);
4664 		else {
4665 			if (rdev && !test_bit(Faulty, &rdev->flags))
4666 				set_bit(R5_NeedReplace, &dev->flags);
4667 			else
4668 				clear_bit(R5_NeedReplace, &dev->flags);
4669 			rdev = conf->disks[i].rdev;
4670 			clear_bit(R5_ReadRepl, &dev->flags);
4671 		}
4672 		if (rdev && test_bit(Faulty, &rdev->flags))
4673 			rdev = NULL;
4674 		if (rdev) {
4675 			is_bad = rdev_has_badblock(rdev, sh->sector,
4676 						   RAID5_STRIPE_SECTORS(conf));
4677 			if (s->blocked_rdev == NULL) {
4678 				if (is_bad < 0)
4679 					set_bit(BlockedBadBlocks, &rdev->flags);
4680 				if (rdev_blocked(rdev)) {
4681 					s->blocked_rdev = rdev;
4682 					atomic_inc(&rdev->nr_pending);
4683 				}
4684 			}
4685 		}
4686 		clear_bit(R5_Insync, &dev->flags);
4687 		if (!rdev)
4688 			/* Not in-sync */;
4689 		else if (is_bad) {
4690 			/* also not in-sync */
4691 			if (!test_bit(WriteErrorSeen, &rdev->flags) &&
4692 			    test_bit(R5_UPTODATE, &dev->flags)) {
4693 				/* treat as in-sync, but with a read error
4694 				 * which we can now try to correct
4695 				 */
4696 				set_bit(R5_Insync, &dev->flags);
4697 				set_bit(R5_ReadError, &dev->flags);
4698 			}
4699 		} else if (test_bit(In_sync, &rdev->flags))
4700 			set_bit(R5_Insync, &dev->flags);
4701 		else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <=
4702 			 rdev->recovery_offset) {
4703 			/*
4704 			 * in sync if:
4705 			 *  - normal IO, or
4706 			 *  - resync IO that is not lazy recovery
4707 			 *
4708 			 * For lazy recovery, we have to mark the rdev without
4709 			 * In_sync as failed, to build initial xor data.
4710 			 */
4711 			if (!test_bit(STRIPE_SYNCING, &sh->state) ||
4712 			    !test_bit(MD_RECOVERY_LAZY_RECOVER,
4713 				      &conf->mddev->recovery))
4714 				set_bit(R5_Insync, &dev->flags);
4715 		} else if (test_bit(R5_UPTODATE, &dev->flags) &&
4716 			 test_bit(R5_Expanded, &dev->flags))
4717 			/* If we've reshaped into here, we assume it is Insync.
4718 			 * We will shortly update recovery_offset to make
4719 			 * it official.
4720 			 */
4721 			set_bit(R5_Insync, &dev->flags);
4722 
4723 		if (test_bit(R5_WriteError, &dev->flags)) {
4724 			/* This flag does not apply to '.replacement'
4725 			 * only to .rdev, so make sure to check that*/
4726 			struct md_rdev *rdev2 = conf->disks[i].rdev;
4727 
4728 			if (rdev2 == rdev)
4729 				clear_bit(R5_Insync, &dev->flags);
4730 			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4731 				s->handle_bad_blocks = 1;
4732 				atomic_inc(&rdev2->nr_pending);
4733 			} else
4734 				clear_bit(R5_WriteError, &dev->flags);
4735 		}
4736 		if (test_bit(R5_MadeGood, &dev->flags)) {
4737 			/* This flag does not apply to '.replacement'
4738 			 * only to .rdev, so make sure to check that*/
4739 			struct md_rdev *rdev2 = conf->disks[i].rdev;
4740 
4741 			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4742 				s->handle_bad_blocks = 1;
4743 				atomic_inc(&rdev2->nr_pending);
4744 			} else
4745 				clear_bit(R5_MadeGood, &dev->flags);
4746 		}
4747 		if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
4748 			struct md_rdev *rdev2 = conf->disks[i].replacement;
4749 
4750 			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4751 				s->handle_bad_blocks = 1;
4752 				atomic_inc(&rdev2->nr_pending);
4753 			} else
4754 				clear_bit(R5_MadeGoodRepl, &dev->flags);
4755 		}
4756 		if (!test_bit(R5_Insync, &dev->flags)) {
4757 			/* The ReadError flag will just be confusing now */
4758 			clear_bit(R5_ReadError, &dev->flags);
4759 			clear_bit(R5_ReWrite, &dev->flags);
4760 		}
4761 		if (test_bit(R5_ReadError, &dev->flags))
4762 			clear_bit(R5_Insync, &dev->flags);
4763 		if (!test_bit(R5_Insync, &dev->flags)) {
4764 			if (s->failed < 2)
4765 				s->failed_num[s->failed] = i;
4766 			s->failed++;
4767 			if (rdev && !test_bit(Faulty, &rdev->flags))
4768 				do_recovery = 1;
4769 			else if (!rdev) {
4770 				rdev = conf->disks[i].replacement;
4771 				if (rdev && !test_bit(Faulty, &rdev->flags))
4772 					do_recovery = 1;
4773 			}
4774 		}
4775 
4776 		if (test_bit(R5_InJournal, &dev->flags))
4777 			s->injournal++;
4778 		if (test_bit(R5_InJournal, &dev->flags) && dev->written)
4779 			s->just_cached++;
4780 	}
4781 	if (test_bit(STRIPE_SYNCING, &sh->state)) {
4782 		/* If there is a failed device being replaced,
4783 		 *     we must be recovering.
4784 		 * else if we are after resync_offset, we must be syncing
4785 		 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
4786 		 * else we can only be replacing
4787 		 * sync and recovery both need to read all devices, and so
4788 		 * use the same flag.
4789 		 */
4790 		if (do_recovery ||
4791 		    sh->sector >= conf->mddev->resync_offset ||
4792 		    test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
4793 			s->syncing = 1;
4794 		else
4795 			s->replacing = 1;
4796 	}
4797 }
4798 
4799 /*
4800  * Return '1' if this is a member of batch, or '0' if it is a lone stripe or
4801  * a head which can now be handled.
4802  */
4803 static int clear_batch_ready(struct stripe_head *sh)
4804 {
4805 	struct stripe_head *tmp;
4806 	if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
4807 		return (sh->batch_head && sh->batch_head != sh);
4808 	spin_lock(&sh->stripe_lock);
4809 	if (!sh->batch_head) {
4810 		spin_unlock(&sh->stripe_lock);
4811 		return 0;
4812 	}
4813 
4814 	/*
4815 	 * this stripe could be added to a batch list before we check
4816 	 * BATCH_READY, skips it
4817 	 */
4818 	if (sh->batch_head != sh) {
4819 		spin_unlock(&sh->stripe_lock);
4820 		return 1;
4821 	}
4822 	spin_lock(&sh->batch_lock);
4823 	list_for_each_entry(tmp, &sh->batch_list, batch_list)
4824 		clear_bit(STRIPE_BATCH_READY, &tmp->state);
4825 	spin_unlock(&sh->batch_lock);
4826 	spin_unlock(&sh->stripe_lock);
4827 
4828 	/*
4829 	 * BATCH_READY is cleared, no new stripes can be added.
4830 	 * batch_list can be accessed without lock
4831 	 */
4832 	return 0;
4833 }
4834 
4835 static void break_stripe_batch_list(struct stripe_head *head_sh,
4836 				    unsigned long handle_flags)
4837 {
4838 	struct stripe_head *sh, *next;
4839 	int i;
4840 
4841 	list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
4842 
4843 		list_del_init(&sh->batch_list);
4844 
4845 		WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
4846 					  (1 << STRIPE_SYNCING) |
4847 					  (1 << STRIPE_REPLACED) |
4848 					  (1 << STRIPE_DELAYED) |
4849 					  (1 << STRIPE_BIT_DELAY) |
4850 					  (1 << STRIPE_FULL_WRITE) |
4851 					  (1 << STRIPE_BIOFILL_RUN) |
4852 					  (1 << STRIPE_COMPUTE_RUN)  |
4853 					  (1 << STRIPE_DISCARD) |
4854 					  (1 << STRIPE_BATCH_READY) |
4855 					  (1 << STRIPE_BATCH_ERR)),
4856 			"stripe state: %lx\n", sh->state);
4857 		WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
4858 					      (1 << STRIPE_REPLACED)),
4859 			"head stripe state: %lx\n", head_sh->state);
4860 
4861 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
4862 					    (1 << STRIPE_PREREAD_ACTIVE) |
4863 					    (1 << STRIPE_ON_UNPLUG_LIST)),
4864 			      head_sh->state & (1 << STRIPE_INSYNC));
4865 
4866 		sh->check_state = head_sh->check_state;
4867 		sh->reconstruct_state = head_sh->reconstruct_state;
4868 		spin_lock_irq(&sh->stripe_lock);
4869 		sh->batch_head = NULL;
4870 		spin_unlock_irq(&sh->stripe_lock);
4871 		for (i = 0; i < sh->disks; i++) {
4872 			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
4873 				wake_up_bit(&sh->dev[i].flags, R5_Overlap);
4874 			sh->dev[i].flags = head_sh->dev[i].flags &
4875 				(~((1 << R5_WriteError) | (1 << R5_Overlap)));
4876 		}
4877 		if (handle_flags == 0 ||
4878 		    sh->state & handle_flags)
4879 			set_bit(STRIPE_HANDLE, &sh->state);
4880 		raid5_release_stripe(sh);
4881 	}
4882 	spin_lock_irq(&head_sh->stripe_lock);
4883 	head_sh->batch_head = NULL;
4884 	spin_unlock_irq(&head_sh->stripe_lock);
4885 	for (i = 0; i < head_sh->disks; i++)
4886 		if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
4887 			wake_up_bit(&head_sh->dev[i].flags, R5_Overlap);
4888 	if (head_sh->state & handle_flags)
4889 		set_bit(STRIPE_HANDLE, &head_sh->state);
4890 }
4891 
4892 /*
4893  * handle_stripe - do things to a stripe.
4894  *
4895  * We lock the stripe by setting STRIPE_ACTIVE and then examine the
4896  * state of various bits to see what needs to be done.
4897  * Possible results:
4898  *    return some read requests which now have data
4899  *    return some write requests which are safely on storage
4900  *    schedule a read on some buffers
4901  *    schedule a write of some buffers
4902  *    return confirmation of parity correctness
4903  */
4904 static void handle_stripe(struct stripe_head *sh)
4905 {
4906 	struct stripe_head_state s;
4907 	struct r5conf *conf = sh->raid_conf;
4908 	int i;
4909 	int prexor;
4910 	int disks = sh->disks;
4911 	struct r5dev *pdev, *qdev;
4912 
4913 	clear_bit(STRIPE_HANDLE, &sh->state);
4914 
4915 	/*
4916 	 * handle_stripe should not continue handle the batched stripe, only
4917 	 * the head of batch list or lone stripe can continue. Otherwise we
4918 	 * could see break_stripe_batch_list warns about the STRIPE_ACTIVE
4919 	 * is set for the batched stripe.
4920 	 */
4921 	if (clear_batch_ready(sh))
4922 		return;
4923 
4924 	if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
4925 		/* already being handled, ensure it gets handled
4926 		 * again when current action finishes */
4927 		set_bit(STRIPE_HANDLE, &sh->state);
4928 		return;
4929 	}
4930 
4931 	if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
4932 		break_stripe_batch_list(sh, 0);
4933 
4934 	if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
4935 		spin_lock(&sh->stripe_lock);
4936 		/*
4937 		 * Cannot process 'sync' concurrently with 'discard'.
4938 		 * Flush data in r5cache before 'sync'.
4939 		 */
4940 		if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
4941 		    !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) &&
4942 		    !test_bit(STRIPE_DISCARD, &sh->state) &&
4943 		    test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
4944 			set_bit(STRIPE_SYNCING, &sh->state);
4945 			clear_bit(STRIPE_INSYNC, &sh->state);
4946 			clear_bit(STRIPE_REPLACED, &sh->state);
4947 		}
4948 		spin_unlock(&sh->stripe_lock);
4949 	}
4950 	clear_bit(STRIPE_DELAYED, &sh->state);
4951 
4952 	pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
4953 		"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
4954 	       (unsigned long long)sh->sector, sh->state,
4955 	       atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
4956 	       sh->check_state, sh->reconstruct_state);
4957 
4958 	analyse_stripe(sh, &s);
4959 
4960 	if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
4961 		goto finish;
4962 
4963 	if (s.handle_bad_blocks ||
4964 	    (md_is_rdwr(conf->mddev) &&
4965 	     test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags))) {
4966 		set_bit(STRIPE_HANDLE, &sh->state);
4967 		goto finish;
4968 	}
4969 
4970 	if (unlikely(s.blocked_rdev)) {
4971 		if (s.syncing || s.expanding || s.expanded ||
4972 		    s.replacing || s.to_write || s.written) {
4973 			set_bit(STRIPE_HANDLE, &sh->state);
4974 			goto finish;
4975 		}
4976 		/* There is nothing for the blocked_rdev to block */
4977 		rdev_dec_pending(s.blocked_rdev, conf->mddev);
4978 		s.blocked_rdev = NULL;
4979 	}
4980 
4981 	if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
4982 		set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
4983 		set_bit(STRIPE_BIOFILL_RUN, &sh->state);
4984 	}
4985 
4986 	pr_debug("locked=%d uptodate=%d to_read=%d"
4987 	       " to_write=%d failed=%d failed_num=%d,%d\n",
4988 	       s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
4989 	       s.failed_num[0], s.failed_num[1]);
4990 	/*
4991 	 * check if the array has lost more than max_degraded devices and,
4992 	 * if so, some requests might need to be failed.
4993 	 *
4994 	 * When journal device failed (log_failed), we will only process
4995 	 * the stripe if there is data need write to raid disks
4996 	 */
4997 	if (s.failed > conf->max_degraded ||
4998 	    (s.log_failed && s.injournal == 0)) {
4999 		sh->check_state = 0;
5000 		sh->reconstruct_state = 0;
5001 		break_stripe_batch_list(sh, 0);
5002 		if (s.to_read+s.to_write+s.written)
5003 			handle_failed_stripe(conf, sh, &s, disks);
5004 		if (s.syncing + s.replacing)
5005 			handle_failed_sync(conf, sh, &s);
5006 	}
5007 
5008 	/* Now we check to see if any write operations have recently
5009 	 * completed
5010 	 */
5011 	prexor = 0;
5012 	if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
5013 		prexor = 1;
5014 	if (sh->reconstruct_state == reconstruct_state_drain_result ||
5015 	    sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
5016 		sh->reconstruct_state = reconstruct_state_idle;
5017 
5018 		/* All the 'written' buffers and the parity block are ready to
5019 		 * be written back to disk
5020 		 */
5021 		BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
5022 		       !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
5023 		BUG_ON(sh->qd_idx >= 0 &&
5024 		       !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
5025 		       !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
5026 		for (i = disks; i--; ) {
5027 			struct r5dev *dev = &sh->dev[i];
5028 			if (test_bit(R5_LOCKED, &dev->flags) &&
5029 				(i == sh->pd_idx || i == sh->qd_idx ||
5030 				 dev->written || test_bit(R5_InJournal,
5031 							  &dev->flags))) {
5032 				pr_debug("Writing block %d\n", i);
5033 				set_bit(R5_Wantwrite, &dev->flags);
5034 				if (prexor)
5035 					continue;
5036 				if (s.failed > 1)
5037 					continue;
5038 				if (!test_bit(R5_Insync, &dev->flags) ||
5039 				    ((i == sh->pd_idx || i == sh->qd_idx)  &&
5040 				     s.failed == 0))
5041 					set_bit(STRIPE_INSYNC, &sh->state);
5042 			}
5043 		}
5044 		if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5045 			s.dec_preread_active = 1;
5046 	}
5047 
5048 	/*
5049 	 * might be able to return some write requests if the parity blocks
5050 	 * are safe, or on a failed drive
5051 	 */
5052 	pdev = &sh->dev[sh->pd_idx];
5053 	s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
5054 		|| (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
5055 	qdev = &sh->dev[sh->qd_idx];
5056 	s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
5057 		|| (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
5058 		|| conf->level < 6;
5059 
5060 	if (s.written &&
5061 	    (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
5062 			     && !test_bit(R5_LOCKED, &pdev->flags)
5063 			     && (test_bit(R5_UPTODATE, &pdev->flags) ||
5064 				 test_bit(R5_Discard, &pdev->flags))))) &&
5065 	    (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
5066 			     && !test_bit(R5_LOCKED, &qdev->flags)
5067 			     && (test_bit(R5_UPTODATE, &qdev->flags) ||
5068 				 test_bit(R5_Discard, &qdev->flags))))))
5069 		handle_stripe_clean_event(conf, sh, disks);
5070 
5071 	if (s.just_cached)
5072 		r5c_handle_cached_data_endio(conf, sh, disks);
5073 	log_stripe_write_finished(sh);
5074 
5075 	/* Now we might consider reading some blocks, either to check/generate
5076 	 * parity, or to satisfy requests
5077 	 * or to load a block that is being partially written.
5078 	 */
5079 	if (s.to_read || s.non_overwrite
5080 	    || (s.to_write && s.failed)
5081 	    || (s.syncing && (s.uptodate + s.compute < disks))
5082 	    || s.replacing
5083 	    || s.expanding)
5084 		handle_stripe_fill(sh, &s, disks);
5085 
5086 	/*
5087 	 * When the stripe finishes full journal write cycle (write to journal
5088 	 * and raid disk), this is the clean up procedure so it is ready for
5089 	 * next operation.
5090 	 */
5091 	r5c_finish_stripe_write_out(conf, sh, &s);
5092 
5093 	/*
5094 	 * Now to consider new write requests, cache write back and what else,
5095 	 * if anything should be read.  We do not handle new writes when:
5096 	 * 1/ A 'write' operation (copy+xor) is already in flight.
5097 	 * 2/ A 'check' operation is in flight, as it may clobber the parity
5098 	 *    block.
5099 	 * 3/ A r5c cache log write is in flight.
5100 	 */
5101 
5102 	if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) {
5103 		if (!r5c_is_writeback(conf->log)) {
5104 			if (s.to_write)
5105 				handle_stripe_dirtying(conf, sh, &s, disks);
5106 		} else { /* write back cache */
5107 			int ret = 0;
5108 
5109 			/* First, try handle writes in caching phase */
5110 			if (s.to_write)
5111 				ret = r5c_try_caching_write(conf, sh, &s,
5112 							    disks);
5113 			/*
5114 			 * If caching phase failed: ret == -EAGAIN
5115 			 *    OR
5116 			 * stripe under reclaim: !caching && injournal
5117 			 *
5118 			 * fall back to handle_stripe_dirtying()
5119 			 */
5120 			if (ret == -EAGAIN ||
5121 			    /* stripe under reclaim: !caching && injournal */
5122 			    (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
5123 			     s.injournal > 0)) {
5124 				ret = handle_stripe_dirtying(conf, sh, &s,
5125 							     disks);
5126 				if (ret == -EAGAIN)
5127 					goto finish;
5128 			}
5129 		}
5130 	}
5131 
5132 	/* maybe we need to check and possibly fix the parity for this stripe
5133 	 * Any reads will already have been scheduled, so we just see if enough
5134 	 * data is available.  The parity check is held off while parity
5135 	 * dependent operations are in flight.
5136 	 */
5137 	if (sh->check_state ||
5138 	    (s.syncing && s.locked == 0 &&
5139 	     !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
5140 	     !test_bit(STRIPE_INSYNC, &sh->state))) {
5141 		if (conf->level == 6)
5142 			handle_parity_checks6(conf, sh, &s, disks);
5143 		else
5144 			handle_parity_checks5(conf, sh, &s, disks);
5145 	}
5146 
5147 	if ((s.replacing || s.syncing) && s.locked == 0
5148 	    && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
5149 	    && !test_bit(STRIPE_REPLACED, &sh->state)) {
5150 		/* Write out to replacement devices where possible */
5151 		for (i = 0; i < conf->raid_disks; i++)
5152 			if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
5153 				WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
5154 				set_bit(R5_WantReplace, &sh->dev[i].flags);
5155 				set_bit(R5_LOCKED, &sh->dev[i].flags);
5156 				s.locked++;
5157 			}
5158 		if (s.replacing)
5159 			set_bit(STRIPE_INSYNC, &sh->state);
5160 		set_bit(STRIPE_REPLACED, &sh->state);
5161 	}
5162 	if ((s.syncing || s.replacing) && s.locked == 0 &&
5163 	    !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
5164 	    test_bit(STRIPE_INSYNC, &sh->state)) {
5165 		md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf));
5166 		clear_bit(STRIPE_SYNCING, &sh->state);
5167 		if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
5168 			wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap);
5169 	}
5170 
5171 	/* If the failed drives are just a ReadError, then we might need
5172 	 * to progress the repair/check process
5173 	 */
5174 	if (s.failed <= conf->max_degraded && !conf->mddev->ro)
5175 		for (i = 0; i < s.failed; i++) {
5176 			struct r5dev *dev = &sh->dev[s.failed_num[i]];
5177 			if (test_bit(R5_ReadError, &dev->flags)
5178 			    && !test_bit(R5_LOCKED, &dev->flags)
5179 			    && test_bit(R5_UPTODATE, &dev->flags)
5180 				) {
5181 				if (!test_bit(R5_ReWrite, &dev->flags)) {
5182 					set_bit(R5_Wantwrite, &dev->flags);
5183 					set_bit(R5_ReWrite, &dev->flags);
5184 				} else
5185 					/* let's read it back */
5186 					set_bit(R5_Wantread, &dev->flags);
5187 				set_bit(R5_LOCKED, &dev->flags);
5188 				s.locked++;
5189 			}
5190 		}
5191 
5192 	/* Finish reconstruct operations initiated by the expansion process */
5193 	if (sh->reconstruct_state == reconstruct_state_result) {
5194 		struct stripe_head *sh_src
5195 			= raid5_get_active_stripe(conf, NULL, sh->sector,
5196 					R5_GAS_PREVIOUS | R5_GAS_NOBLOCK |
5197 					R5_GAS_NOQUIESCE);
5198 		if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
5199 			/* sh cannot be written until sh_src has been read.
5200 			 * so arrange for sh to be delayed a little
5201 			 */
5202 			set_bit(STRIPE_DELAYED, &sh->state);
5203 			set_bit(STRIPE_HANDLE, &sh->state);
5204 			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
5205 					      &sh_src->state))
5206 				atomic_inc(&conf->preread_active_stripes);
5207 			raid5_release_stripe(sh_src);
5208 			goto finish;
5209 		}
5210 		if (sh_src)
5211 			raid5_release_stripe(sh_src);
5212 
5213 		sh->reconstruct_state = reconstruct_state_idle;
5214 		clear_bit(STRIPE_EXPANDING, &sh->state);
5215 		for (i = conf->raid_disks; i--; ) {
5216 			set_bit(R5_Wantwrite, &sh->dev[i].flags);
5217 			set_bit(R5_LOCKED, &sh->dev[i].flags);
5218 			s.locked++;
5219 		}
5220 	}
5221 
5222 	if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
5223 	    !sh->reconstruct_state) {
5224 		/* Need to write out all blocks after computing parity */
5225 		sh->disks = conf->raid_disks;
5226 		stripe_set_idx(sh->sector, conf, 0, sh);
5227 		schedule_reconstruction(sh, &s, 1, 1);
5228 	} else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
5229 		clear_bit(STRIPE_EXPAND_READY, &sh->state);
5230 		atomic_dec(&conf->reshape_stripes);
5231 		wake_up(&conf->wait_for_reshape);
5232 		md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf));
5233 	}
5234 
5235 	if (s.expanding && s.locked == 0 &&
5236 	    !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
5237 		handle_stripe_expansion(conf, sh);
5238 
5239 finish:
5240 	/* wait for this device to become unblocked */
5241 	if (unlikely(s.blocked_rdev)) {
5242 		if (conf->mddev->external)
5243 			md_wait_for_blocked_rdev(s.blocked_rdev,
5244 						 conf->mddev);
5245 		else
5246 			/* Internal metadata will immediately
5247 			 * be written by raid5d, so we don't
5248 			 * need to wait here.
5249 			 */
5250 			rdev_dec_pending(s.blocked_rdev,
5251 					 conf->mddev);
5252 	}
5253 
5254 	if (s.handle_bad_blocks)
5255 		for (i = disks; i--; ) {
5256 			struct md_rdev *rdev;
5257 			struct r5dev *dev = &sh->dev[i];
5258 			if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
5259 				/* We own a safe reference to the rdev */
5260 				rdev = conf->disks[i].rdev;
5261 				rdev_set_badblocks(rdev, sh->sector,
5262 						   RAID5_STRIPE_SECTORS(conf), 0);
5263 				rdev_dec_pending(rdev, conf->mddev);
5264 			}
5265 			if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
5266 				rdev = conf->disks[i].rdev;
5267 				rdev_clear_badblocks(rdev, sh->sector,
5268 						     RAID5_STRIPE_SECTORS(conf), 0);
5269 				rdev_dec_pending(rdev, conf->mddev);
5270 			}
5271 			if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
5272 				rdev = conf->disks[i].replacement;
5273 				if (!rdev)
5274 					/* rdev have been moved down */
5275 					rdev = conf->disks[i].rdev;
5276 				rdev_clear_badblocks(rdev, sh->sector,
5277 						     RAID5_STRIPE_SECTORS(conf), 0);
5278 				rdev_dec_pending(rdev, conf->mddev);
5279 			}
5280 		}
5281 
5282 	if (s.ops_request)
5283 		raid_run_ops(sh, s.ops_request);
5284 
5285 	ops_run_io(sh, &s);
5286 
5287 	if (s.dec_preread_active) {
5288 		/* We delay this until after ops_run_io so that if make_request
5289 		 * is waiting on a flush, it won't continue until the writes
5290 		 * have actually been submitted.
5291 		 */
5292 		atomic_dec(&conf->preread_active_stripes);
5293 		if (atomic_read(&conf->preread_active_stripes) <
5294 		    IO_THRESHOLD)
5295 			md_wakeup_thread(conf->mddev->thread);
5296 	}
5297 
5298 	clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
5299 }
5300 
5301 static void raid5_activate_delayed(struct r5conf *conf)
5302 	__must_hold(&conf->device_lock)
5303 {
5304 	if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
5305 		while (!list_empty(&conf->delayed_list)) {
5306 			struct list_head *l = conf->delayed_list.next;
5307 			struct stripe_head *sh;
5308 			sh = list_entry(l, struct stripe_head, lru);
5309 			list_del_init(l);
5310 			clear_bit(STRIPE_DELAYED, &sh->state);
5311 			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5312 				atomic_inc(&conf->preread_active_stripes);
5313 			list_add_tail(&sh->lru, &conf->hold_list);
5314 			raid5_wakeup_stripe_thread(sh);
5315 		}
5316 	}
5317 }
5318 
5319 static void activate_bit_delay(struct r5conf *conf,
5320 		struct list_head *temp_inactive_list)
5321 	__must_hold(&conf->device_lock)
5322 {
5323 	struct list_head head;
5324 	list_add(&head, &conf->bitmap_list);
5325 	list_del_init(&conf->bitmap_list);
5326 	while (!list_empty(&head)) {
5327 		struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
5328 		int hash;
5329 		list_del_init(&sh->lru);
5330 		atomic_inc(&sh->count);
5331 		hash = sh->hash_lock_index;
5332 		__release_stripe(conf, sh, &temp_inactive_list[hash]);
5333 	}
5334 }
5335 
5336 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
5337 {
5338 	struct r5conf *conf = mddev->private;
5339 	sector_t sector = bio->bi_iter.bi_sector;
5340 	unsigned int chunk_sectors;
5341 	unsigned int bio_sectors = bio_sectors(bio);
5342 
5343 	chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
5344 	return  chunk_sectors >=
5345 		((sector & (chunk_sectors - 1)) + bio_sectors);
5346 }
5347 
5348 /*
5349  *  add bio to the retry LIFO  ( in O(1) ... we are in interrupt )
5350  *  later sampled by raid5d.
5351  */
5352 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
5353 {
5354 	unsigned long flags;
5355 
5356 	spin_lock_irqsave(&conf->device_lock, flags);
5357 
5358 	bi->bi_next = conf->retry_read_aligned_list;
5359 	conf->retry_read_aligned_list = bi;
5360 
5361 	spin_unlock_irqrestore(&conf->device_lock, flags);
5362 	md_wakeup_thread(conf->mddev->thread);
5363 }
5364 
5365 static struct bio *remove_bio_from_retry(struct r5conf *conf,
5366 					 unsigned int *offset)
5367 {
5368 	struct bio *bi;
5369 
5370 	bi = conf->retry_read_aligned;
5371 	if (bi) {
5372 		*offset = conf->retry_read_offset;
5373 		conf->retry_read_aligned = NULL;
5374 		return bi;
5375 	}
5376 	bi = conf->retry_read_aligned_list;
5377 	if(bi) {
5378 		conf->retry_read_aligned_list = bi->bi_next;
5379 		bi->bi_next = NULL;
5380 		*offset = 0;
5381 	}
5382 
5383 	return bi;
5384 }
5385 
5386 /*
5387  *  The "raid5_align_endio" should check if the read succeeded and if it
5388  *  did, call bio_endio on the original bio (having bio_put the new bio
5389  *  first).
5390  *  If the read failed..
5391  */
5392 static void raid5_align_endio(struct bio *bi)
5393 {
5394 	struct bio *raid_bi = bi->bi_private;
5395 	struct md_rdev *rdev = (void *)raid_bi->bi_next;
5396 	struct mddev *mddev = rdev->mddev;
5397 	struct r5conf *conf = mddev->private;
5398 	blk_status_t error = bi->bi_status;
5399 
5400 	bio_put(bi);
5401 	raid_bi->bi_next = NULL;
5402 	rdev_dec_pending(rdev, conf->mddev);
5403 
5404 	if (!error) {
5405 		bio_endio(raid_bi);
5406 		if (atomic_dec_and_test(&conf->active_aligned_reads))
5407 			wake_up(&conf->wait_for_quiescent);
5408 		return;
5409 	}
5410 
5411 	pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
5412 
5413 	add_bio_to_retry(raid_bi, conf);
5414 }
5415 
5416 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
5417 {
5418 	struct r5conf *conf = mddev->private;
5419 	struct bio *align_bio;
5420 	struct md_rdev *rdev;
5421 	sector_t sector, end_sector;
5422 	int dd_idx;
5423 	bool did_inc;
5424 
5425 	if (!in_chunk_boundary(mddev, raid_bio)) {
5426 		pr_debug("%s: non aligned\n", __func__);
5427 		return 0;
5428 	}
5429 
5430 	sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
5431 				      &dd_idx, NULL);
5432 	end_sector = sector + bio_sectors(raid_bio);
5433 
5434 	if (r5c_big_stripe_cached(conf, sector))
5435 		return 0;
5436 
5437 	rdev = conf->disks[dd_idx].replacement;
5438 	if (!rdev || test_bit(Faulty, &rdev->flags) ||
5439 	    rdev->recovery_offset < end_sector) {
5440 		rdev = conf->disks[dd_idx].rdev;
5441 		if (!rdev)
5442 			return 0;
5443 		if (test_bit(Faulty, &rdev->flags) ||
5444 		    !(test_bit(In_sync, &rdev->flags) ||
5445 		      rdev->recovery_offset >= end_sector))
5446 			return 0;
5447 	}
5448 
5449 	atomic_inc(&rdev->nr_pending);
5450 
5451 	if (rdev_has_badblock(rdev, sector, bio_sectors(raid_bio))) {
5452 		rdev_dec_pending(rdev, mddev);
5453 		return 0;
5454 	}
5455 
5456 	md_account_bio(mddev, &raid_bio);
5457 	raid_bio->bi_next = (void *)rdev;
5458 
5459 	align_bio = bio_alloc_clone(rdev->bdev, raid_bio, GFP_NOIO,
5460 				    &mddev->bio_set);
5461 	align_bio->bi_end_io = raid5_align_endio;
5462 	align_bio->bi_private = raid_bio;
5463 	align_bio->bi_iter.bi_sector = sector;
5464 
5465 	/* No reshape active, so we can trust rdev->data_offset */
5466 	align_bio->bi_iter.bi_sector += rdev->data_offset;
5467 
5468 	did_inc = false;
5469 	if (conf->quiesce == 0) {
5470 		atomic_inc(&conf->active_aligned_reads);
5471 		did_inc = true;
5472 	}
5473 	/* need a memory barrier to detect the race with raid5_quiesce() */
5474 	if (!did_inc || smp_load_acquire(&conf->quiesce) != 0) {
5475 		/* quiesce is in progress, so we need to undo io activation and wait
5476 		 * for it to finish
5477 		 */
5478 		if (did_inc && atomic_dec_and_test(&conf->active_aligned_reads))
5479 			wake_up(&conf->wait_for_quiescent);
5480 		spin_lock_irq(&conf->device_lock);
5481 		wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
5482 				    conf->device_lock);
5483 		atomic_inc(&conf->active_aligned_reads);
5484 		spin_unlock_irq(&conf->device_lock);
5485 	}
5486 
5487 	mddev_trace_remap(mddev, align_bio, raid_bio->bi_iter.bi_sector);
5488 	submit_bio_noacct(align_bio);
5489 	return 1;
5490 }
5491 
5492 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
5493 {
5494 	sector_t sector = raid_bio->bi_iter.bi_sector;
5495 	unsigned chunk_sects = mddev->chunk_sectors;
5496 	unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
5497 
5498 	if (sectors < bio_sectors(raid_bio)) {
5499 		struct r5conf *conf = mddev->private;
5500 
5501 		raid_bio = bio_submit_split_bioset(raid_bio, sectors,
5502 						   &conf->bio_split);
5503 		if (!raid_bio)
5504 			return NULL;
5505 	}
5506 
5507 	if (!raid5_read_one_chunk(mddev, raid_bio))
5508 		return raid_bio;
5509 
5510 	return NULL;
5511 }
5512 
5513 /* __get_priority_stripe - get the next stripe to process
5514  *
5515  * Full stripe writes are allowed to pass preread active stripes up until
5516  * the bypass_threshold is exceeded.  In general the bypass_count
5517  * increments when the handle_list is handled before the hold_list; however, it
5518  * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
5519  * stripe with in flight i/o.  The bypass_count will be reset when the
5520  * head of the hold_list has changed, i.e. the head was promoted to the
5521  * handle_list.
5522  */
5523 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
5524 	__must_hold(&conf->device_lock)
5525 {
5526 	struct stripe_head *sh, *tmp;
5527 	struct list_head *handle_list = NULL;
5528 	struct r5worker_group *wg;
5529 	bool second_try = !r5c_is_writeback(conf->log) &&
5530 		!r5l_log_disk_error(conf);
5531 	bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) ||
5532 		r5l_log_disk_error(conf);
5533 
5534 again:
5535 	wg = NULL;
5536 	sh = NULL;
5537 	if (conf->worker_cnt_per_group == 0) {
5538 		handle_list = try_loprio ? &conf->loprio_list :
5539 					&conf->handle_list;
5540 	} else if (group != ANY_GROUP) {
5541 		handle_list = try_loprio ? &conf->worker_groups[group].loprio_list :
5542 				&conf->worker_groups[group].handle_list;
5543 		wg = &conf->worker_groups[group];
5544 	} else {
5545 		int i;
5546 		for (i = 0; i < conf->group_cnt; i++) {
5547 			handle_list = try_loprio ? &conf->worker_groups[i].loprio_list :
5548 				&conf->worker_groups[i].handle_list;
5549 			wg = &conf->worker_groups[i];
5550 			if (!list_empty(handle_list))
5551 				break;
5552 		}
5553 	}
5554 
5555 	pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
5556 		  __func__,
5557 		  list_empty(handle_list) ? "empty" : "busy",
5558 		  list_empty(&conf->hold_list) ? "empty" : "busy",
5559 		  atomic_read(&conf->pending_full_writes), conf->bypass_count);
5560 
5561 	if (!list_empty(handle_list)) {
5562 		sh = list_entry(handle_list->next, typeof(*sh), lru);
5563 
5564 		if (list_empty(&conf->hold_list))
5565 			conf->bypass_count = 0;
5566 		else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
5567 			if (conf->hold_list.next == conf->last_hold)
5568 				conf->bypass_count++;
5569 			else {
5570 				conf->last_hold = conf->hold_list.next;
5571 				conf->bypass_count -= conf->bypass_threshold;
5572 				if (conf->bypass_count < 0)
5573 					conf->bypass_count = 0;
5574 			}
5575 		}
5576 	} else if (!list_empty(&conf->hold_list) &&
5577 		   ((conf->bypass_threshold &&
5578 		     conf->bypass_count > conf->bypass_threshold) ||
5579 		    atomic_read(&conf->pending_full_writes) == 0)) {
5580 
5581 		list_for_each_entry(tmp, &conf->hold_list,  lru) {
5582 			if (conf->worker_cnt_per_group == 0 ||
5583 			    group == ANY_GROUP ||
5584 			    !cpu_online(tmp->cpu) ||
5585 			    cpu_to_group(tmp->cpu) == group) {
5586 				sh = tmp;
5587 				break;
5588 			}
5589 		}
5590 
5591 		if (sh) {
5592 			conf->bypass_count -= conf->bypass_threshold;
5593 			if (conf->bypass_count < 0)
5594 				conf->bypass_count = 0;
5595 		}
5596 		wg = NULL;
5597 	}
5598 
5599 	if (!sh) {
5600 		if (second_try)
5601 			return NULL;
5602 		second_try = true;
5603 		try_loprio = !try_loprio;
5604 		goto again;
5605 	}
5606 
5607 	if (wg) {
5608 		wg->stripes_cnt--;
5609 		sh->group = NULL;
5610 	}
5611 	list_del_init(&sh->lru);
5612 	BUG_ON(atomic_inc_return(&sh->count) != 1);
5613 	return sh;
5614 }
5615 
5616 struct raid5_plug_cb {
5617 	struct blk_plug_cb	cb;
5618 	struct list_head	list;
5619 	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];
5620 };
5621 
5622 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
5623 {
5624 	struct raid5_plug_cb *cb = container_of(
5625 		blk_cb, struct raid5_plug_cb, cb);
5626 	struct stripe_head *sh;
5627 	struct mddev *mddev = cb->cb.data;
5628 	struct r5conf *conf = mddev->private;
5629 	int cnt = 0;
5630 	int hash;
5631 
5632 	if (cb->list.next && !list_empty(&cb->list)) {
5633 		spin_lock_irq(&conf->device_lock);
5634 		while (!list_empty(&cb->list)) {
5635 			sh = list_first_entry(&cb->list, struct stripe_head, lru);
5636 			list_del_init(&sh->lru);
5637 			/*
5638 			 * avoid race release_stripe_plug() sees
5639 			 * STRIPE_ON_UNPLUG_LIST clear but the stripe
5640 			 * is still in our list
5641 			 */
5642 			smp_mb__before_atomic();
5643 			clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
5644 			/*
5645 			 * STRIPE_ON_RELEASE_LIST could be set here. In that
5646 			 * case, the count is always > 1 here
5647 			 */
5648 			hash = sh->hash_lock_index;
5649 			__release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
5650 			cnt++;
5651 		}
5652 		spin_unlock_irq(&conf->device_lock);
5653 	}
5654 	release_inactive_stripe_list(conf, cb->temp_inactive_list,
5655 				     NR_STRIPE_HASH_LOCKS);
5656 	if (!mddev_is_dm(mddev))
5657 		trace_block_unplug(mddev->gendisk->queue, cnt, !from_schedule);
5658 	kfree(cb);
5659 }
5660 
5661 static void release_stripe_plug(struct mddev *mddev,
5662 				struct stripe_head *sh)
5663 {
5664 	struct blk_plug_cb *blk_cb = blk_check_plugged(
5665 		raid5_unplug, mddev,
5666 		sizeof(struct raid5_plug_cb));
5667 	struct raid5_plug_cb *cb;
5668 
5669 	if (!blk_cb) {
5670 		raid5_release_stripe(sh);
5671 		return;
5672 	}
5673 
5674 	cb = container_of(blk_cb, struct raid5_plug_cb, cb);
5675 
5676 	if (cb->list.next == NULL) {
5677 		int i;
5678 		INIT_LIST_HEAD(&cb->list);
5679 		for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5680 			INIT_LIST_HEAD(cb->temp_inactive_list + i);
5681 	}
5682 
5683 	if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
5684 		list_add_tail(&sh->lru, &cb->list);
5685 	else
5686 		raid5_release_stripe(sh);
5687 }
5688 
5689 static void make_discard_request(struct mddev *mddev, struct bio *bi)
5690 {
5691 	struct r5conf *conf = mddev->private;
5692 	sector_t logical_sector, last_sector;
5693 	struct stripe_head *sh;
5694 	int stripe_sectors;
5695 
5696 	/* We need to handle this when io_uring supports discard/trim */
5697 	if (WARN_ON_ONCE(bi->bi_opf & REQ_NOWAIT))
5698 		return;
5699 
5700 	if (mddev->reshape_position != MaxSector)
5701 		/* Skip discard while reshape is happening */
5702 		return;
5703 
5704 	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
5705 	last_sector = bio_end_sector(bi);
5706 
5707 	bi->bi_next = NULL;
5708 
5709 	stripe_sectors = conf->chunk_sectors *
5710 		(conf->raid_disks - conf->max_degraded);
5711 	logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
5712 					       stripe_sectors);
5713 	sector_div(last_sector, stripe_sectors);
5714 
5715 	logical_sector *= conf->chunk_sectors;
5716 	last_sector *= conf->chunk_sectors;
5717 
5718 	for (; logical_sector < last_sector;
5719 	     logical_sector += RAID5_STRIPE_SECTORS(conf)) {
5720 		DEFINE_WAIT(w);
5721 		int d;
5722 	again:
5723 		sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0);
5724 		set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5725 		if (test_bit(STRIPE_SYNCING, &sh->state)) {
5726 			raid5_release_stripe(sh);
5727 			wait_on_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap,
5728 				    TASK_UNINTERRUPTIBLE);
5729 			goto again;
5730 		}
5731 		clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5732 		spin_lock_irq(&sh->stripe_lock);
5733 		for (d = 0; d < conf->raid_disks; d++) {
5734 			if (d == sh->pd_idx || d == sh->qd_idx)
5735 				continue;
5736 			if (sh->dev[d].towrite || sh->dev[d].toread) {
5737 				set_bit(R5_Overlap, &sh->dev[d].flags);
5738 				spin_unlock_irq(&sh->stripe_lock);
5739 				raid5_release_stripe(sh);
5740 				wait_on_bit(&sh->dev[d].flags, R5_Overlap,
5741 					    TASK_UNINTERRUPTIBLE);
5742 				goto again;
5743 			}
5744 		}
5745 		set_bit(STRIPE_DISCARD, &sh->state);
5746 		sh->overwrite_disks = 0;
5747 		for (d = 0; d < conf->raid_disks; d++) {
5748 			if (d == sh->pd_idx || d == sh->qd_idx)
5749 				continue;
5750 			sh->dev[d].towrite = bi;
5751 			set_bit(R5_OVERWRITE, &sh->dev[d].flags);
5752 			bio_inc_remaining(bi);
5753 			md_write_inc(mddev, bi);
5754 			sh->overwrite_disks++;
5755 		}
5756 		spin_unlock_irq(&sh->stripe_lock);
5757 		if (conf->mddev->bitmap) {
5758 			sh->bm_seq = conf->seq_flush + 1;
5759 			set_bit(STRIPE_BIT_DELAY, &sh->state);
5760 		}
5761 
5762 		set_bit(STRIPE_HANDLE, &sh->state);
5763 		clear_bit(STRIPE_DELAYED, &sh->state);
5764 		if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5765 			atomic_inc(&conf->preread_active_stripes);
5766 		release_stripe_plug(mddev, sh);
5767 	}
5768 
5769 	bio_endio(bi);
5770 }
5771 
5772 static bool ahead_of_reshape(struct mddev *mddev, sector_t sector,
5773 			     sector_t reshape_sector)
5774 {
5775 	return mddev->reshape_backwards ? sector < reshape_sector :
5776 					  sector >= reshape_sector;
5777 }
5778 
5779 static bool range_ahead_of_reshape(struct mddev *mddev, sector_t min,
5780 				   sector_t max, sector_t reshape_sector)
5781 {
5782 	return mddev->reshape_backwards ? max < reshape_sector :
5783 					  min >= reshape_sector;
5784 }
5785 
5786 static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
5787 				    struct stripe_head *sh)
5788 {
5789 	sector_t max_sector = 0, min_sector = MaxSector;
5790 	bool ret = false;
5791 	int dd_idx;
5792 
5793 	for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
5794 		if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
5795 			continue;
5796 
5797 		min_sector = min(min_sector, sh->dev[dd_idx].sector);
5798 		max_sector = max(max_sector, sh->dev[dd_idx].sector);
5799 	}
5800 
5801 	spin_lock_irq(&conf->device_lock);
5802 
5803 	if (!range_ahead_of_reshape(mddev, min_sector, max_sector,
5804 				     conf->reshape_progress))
5805 		/* mismatch, need to try again */
5806 		ret = true;
5807 
5808 	spin_unlock_irq(&conf->device_lock);
5809 
5810 	return ret;
5811 }
5812 
5813 static int add_all_stripe_bios(struct r5conf *conf,
5814 		struct stripe_request_ctx *ctx, struct stripe_head *sh,
5815 		struct bio *bi, int forwrite, int previous)
5816 {
5817 	int dd_idx;
5818 
5819 	spin_lock_irq(&sh->stripe_lock);
5820 
5821 	for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
5822 		struct r5dev *dev = &sh->dev[dd_idx];
5823 
5824 		if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
5825 			continue;
5826 
5827 		if (dev->sector < ctx->first_sector ||
5828 		    dev->sector >= ctx->last_sector)
5829 			continue;
5830 
5831 		if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
5832 			set_bit(R5_Overlap, &dev->flags);
5833 			spin_unlock_irq(&sh->stripe_lock);
5834 			raid5_release_stripe(sh);
5835 			/* release batch_last before wait to avoid risk of deadlock */
5836 			if (ctx->batch_last) {
5837 				raid5_release_stripe(ctx->batch_last);
5838 				ctx->batch_last = NULL;
5839 			}
5840 			md_wakeup_thread(conf->mddev->thread);
5841 			wait_on_bit(&dev->flags, R5_Overlap, TASK_UNINTERRUPTIBLE);
5842 			return 0;
5843 		}
5844 	}
5845 
5846 	for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
5847 		struct r5dev *dev = &sh->dev[dd_idx];
5848 
5849 		if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
5850 			continue;
5851 
5852 		if (dev->sector < ctx->first_sector ||
5853 		    dev->sector >= ctx->last_sector)
5854 			continue;
5855 
5856 		__add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
5857 		clear_bit((dev->sector - ctx->first_sector) >>
5858 			  RAID5_STRIPE_SHIFT(conf), ctx->sectors_to_do);
5859 	}
5860 
5861 	spin_unlock_irq(&sh->stripe_lock);
5862 	return 1;
5863 }
5864 
5865 enum reshape_loc {
5866 	LOC_NO_RESHAPE,
5867 	LOC_AHEAD_OF_RESHAPE,
5868 	LOC_INSIDE_RESHAPE,
5869 	LOC_BEHIND_RESHAPE,
5870 };
5871 
5872 static enum reshape_loc get_reshape_loc(struct mddev *mddev,
5873 		struct r5conf *conf, sector_t logical_sector)
5874 {
5875 	sector_t reshape_progress, reshape_safe;
5876 
5877 	if (likely(conf->reshape_progress == MaxSector))
5878 		return LOC_NO_RESHAPE;
5879 	/*
5880 	 * Spinlock is needed as reshape_progress may be
5881 	 * 64bit on a 32bit platform, and so it might be
5882 	 * possible to see a half-updated value
5883 	 * Of course reshape_progress could change after
5884 	 * the lock is dropped, so once we get a reference
5885 	 * to the stripe that we think it is, we will have
5886 	 * to check again.
5887 	 */
5888 	spin_lock_irq(&conf->device_lock);
5889 	reshape_progress = conf->reshape_progress;
5890 	reshape_safe = conf->reshape_safe;
5891 	spin_unlock_irq(&conf->device_lock);
5892 	if (reshape_progress == MaxSector)
5893 		return LOC_NO_RESHAPE;
5894 	if (ahead_of_reshape(mddev, logical_sector, reshape_progress))
5895 		return LOC_AHEAD_OF_RESHAPE;
5896 	if (ahead_of_reshape(mddev, logical_sector, reshape_safe))
5897 		return LOC_INSIDE_RESHAPE;
5898 	return LOC_BEHIND_RESHAPE;
5899 }
5900 
5901 static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset,
5902 				unsigned long *sectors)
5903 {
5904 	struct r5conf *conf = mddev->private;
5905 	sector_t start = *offset;
5906 	sector_t end = start + *sectors;
5907 	sector_t prev_start = start;
5908 	sector_t prev_end = end;
5909 	int sectors_per_chunk;
5910 	enum reshape_loc loc;
5911 	int dd_idx;
5912 
5913 	sectors_per_chunk = conf->chunk_sectors *
5914 		(conf->raid_disks - conf->max_degraded);
5915 	start = round_down(start, sectors_per_chunk);
5916 	end = round_up(end, sectors_per_chunk);
5917 
5918 	start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL);
5919 	end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL);
5920 
5921 	/*
5922 	 * For LOC_INSIDE_RESHAPE, this IO will wait for reshape to make
5923 	 * progress, hence it's the same as LOC_BEHIND_RESHAPE.
5924 	 */
5925 	loc = get_reshape_loc(mddev, conf, prev_start);
5926 	if (likely(loc != LOC_AHEAD_OF_RESHAPE)) {
5927 		*offset = start;
5928 		*sectors = end - start;
5929 		return;
5930 	}
5931 
5932 	sectors_per_chunk = conf->prev_chunk_sectors *
5933 		(conf->previous_raid_disks - conf->max_degraded);
5934 	prev_start = round_down(prev_start, sectors_per_chunk);
5935 	prev_end = round_down(prev_end, sectors_per_chunk);
5936 
5937 	prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL);
5938 	prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL);
5939 
5940 	/*
5941 	 * for LOC_AHEAD_OF_RESHAPE, reshape can make progress before this IO
5942 	 * is handled in make_stripe_request(), we can't know this here hence
5943 	 * we set bits for both.
5944 	 */
5945 	*offset = min(start, prev_start);
5946 	*sectors = max(end, prev_end) - *offset;
5947 }
5948 
5949 static enum stripe_result make_stripe_request(struct mddev *mddev,
5950 		struct r5conf *conf, struct stripe_request_ctx *ctx,
5951 		sector_t logical_sector, struct bio *bi)
5952 {
5953 	const int rw = bio_data_dir(bi);
5954 	enum stripe_result ret;
5955 	struct stripe_head *sh;
5956 	enum reshape_loc loc;
5957 	sector_t new_sector;
5958 	int previous = 0, flags = 0;
5959 	int seq, dd_idx;
5960 
5961 	seq = read_seqcount_begin(&conf->gen_lock);
5962 	loc = get_reshape_loc(mddev, conf, logical_sector);
5963 	if (loc == LOC_INSIDE_RESHAPE) {
5964 		ret = STRIPE_SCHEDULE_AND_RETRY;
5965 		goto out;
5966 	}
5967 	if (loc == LOC_AHEAD_OF_RESHAPE)
5968 		previous = 1;
5969 
5970 	new_sector = raid5_compute_sector(conf, logical_sector, previous,
5971 					  &dd_idx, NULL);
5972 	pr_debug("raid456: %s, sector %llu logical %llu\n", __func__,
5973 		 new_sector, logical_sector);
5974 
5975 	if (previous)
5976 		flags |= R5_GAS_PREVIOUS;
5977 	if (bi->bi_opf & REQ_RAHEAD)
5978 		flags |= R5_GAS_NOBLOCK;
5979 	sh = raid5_get_active_stripe(conf, ctx, new_sector, flags);
5980 	if (unlikely(!sh)) {
5981 		/* cannot get stripe, just give-up */
5982 		bi->bi_status = BLK_STS_IOERR;
5983 		return STRIPE_FAIL;
5984 	}
5985 
5986 	if (unlikely(previous) &&
5987 	    stripe_ahead_of_reshape(mddev, conf, sh)) {
5988 		/*
5989 		 * Expansion moved on while waiting for a stripe.
5990 		 * Expansion could still move past after this
5991 		 * test, but as we are holding a reference to
5992 		 * 'sh', we know that if that happens,
5993 		 *  STRIPE_EXPANDING will get set and the expansion
5994 		 * won't proceed until we finish with the stripe.
5995 		 */
5996 		ret = STRIPE_SCHEDULE_AND_RETRY;
5997 		goto out_release;
5998 	}
5999 
6000 	if (read_seqcount_retry(&conf->gen_lock, seq)) {
6001 		/* Might have got the wrong stripe_head by accident */
6002 		ret = STRIPE_RETRY;
6003 		goto out_release;
6004 	}
6005 
6006 	if (test_bit(STRIPE_EXPANDING, &sh->state)) {
6007 		md_wakeup_thread(mddev->thread);
6008 		ret = STRIPE_SCHEDULE_AND_RETRY;
6009 		goto out_release;
6010 	}
6011 
6012 	if (!add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
6013 		ret = STRIPE_RETRY;
6014 		goto out;
6015 	}
6016 
6017 	if (stripe_can_batch(sh)) {
6018 		stripe_add_to_batch_list(conf, sh, ctx->batch_last);
6019 		if (ctx->batch_last)
6020 			raid5_release_stripe(ctx->batch_last);
6021 		atomic_inc(&sh->count);
6022 		ctx->batch_last = sh;
6023 	}
6024 
6025 	if (ctx->do_flush) {
6026 		set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
6027 		/* we only need flush for one stripe */
6028 		ctx->do_flush = false;
6029 	}
6030 
6031 	set_bit(STRIPE_HANDLE, &sh->state);
6032 	clear_bit(STRIPE_DELAYED, &sh->state);
6033 	if ((!sh->batch_head || sh == sh->batch_head) &&
6034 	    (bi->bi_opf & REQ_SYNC) &&
6035 	    !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
6036 		atomic_inc(&conf->preread_active_stripes);
6037 
6038 	release_stripe_plug(mddev, sh);
6039 	return STRIPE_SUCCESS;
6040 
6041 out_release:
6042 	raid5_release_stripe(sh);
6043 out:
6044 	if (ret == STRIPE_SCHEDULE_AND_RETRY && reshape_interrupted(mddev)) {
6045 		bi->bi_status = BLK_STS_RESOURCE;
6046 		ret = STRIPE_WAIT_RESHAPE;
6047 		pr_err_ratelimited("dm-raid456: io across reshape position while reshape can't make progress");
6048 	}
6049 	return ret;
6050 }
6051 
6052 /*
6053  * If the bio covers multiple data disks, find sector within the bio that has
6054  * the lowest chunk offset in the first chunk.
6055  */
6056 static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
6057 					      struct bio *bi)
6058 {
6059 	int sectors_per_chunk = conf->chunk_sectors;
6060 	int raid_disks = conf->raid_disks;
6061 	int dd_idx;
6062 	struct stripe_head sh;
6063 	unsigned int chunk_offset;
6064 	sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
6065 	sector_t sector;
6066 
6067 	/* We pass in fake stripe_head to get back parity disk numbers */
6068 	sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh);
6069 	chunk_offset = sector_div(sector, sectors_per_chunk);
6070 	if (sectors_per_chunk - chunk_offset >= bio_sectors(bi))
6071 		return r_sector;
6072 	/*
6073 	 * Bio crosses to the next data disk. Check whether it's in the same
6074 	 * chunk.
6075 	 */
6076 	dd_idx++;
6077 	while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx)
6078 		dd_idx++;
6079 	if (dd_idx >= raid_disks)
6080 		return r_sector;
6081 	return r_sector + sectors_per_chunk - chunk_offset;
6082 }
6083 
6084 static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
6085 {
6086 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
6087 	struct r5conf *conf = mddev->private;
6088 	const int rw = bio_data_dir(bi);
6089 	struct stripe_request_ctx *ctx;
6090 	sector_t logical_sector;
6091 	enum stripe_result res;
6092 	int s, stripe_cnt;
6093 	bool on_wq;
6094 
6095 	if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
6096 		int ret = log_handle_flush_request(conf, bi);
6097 
6098 		if (ret == 0)
6099 			return true;
6100 		if (ret == -ENODEV) {
6101 			if (md_flush_request(mddev, bi))
6102 				return true;
6103 		}
6104 		/* ret == -EAGAIN, fallback */
6105 	}
6106 
6107 	md_write_start(mddev, bi);
6108 	/*
6109 	 * If array is degraded, better not do chunk aligned read because
6110 	 * later we might have to read it again in order to reconstruct
6111 	 * data on failed drives.
6112 	 */
6113 	if (rw == READ && mddev->degraded == 0 &&
6114 	    mddev->reshape_position == MaxSector) {
6115 		bi = chunk_aligned_read(mddev, bi);
6116 		if (!bi)
6117 			return true;
6118 	}
6119 
6120 	if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
6121 		make_discard_request(mddev, bi);
6122 		md_write_end(mddev);
6123 		return true;
6124 	}
6125 
6126 	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
6127 	bi->bi_next = NULL;
6128 
6129 	ctx = mempool_alloc(conf->ctx_pool, GFP_NOIO);
6130 	memset(ctx, 0, conf->ctx_size);
6131 	ctx->first_sector = logical_sector;
6132 	ctx->last_sector = bio_end_sector(bi);
6133 	/*
6134 	 * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
6135 	 * we need to flush journal device
6136 	 */
6137 	if (unlikely(bi->bi_opf & REQ_PREFLUSH))
6138 		ctx->do_flush = true;
6139 
6140 	stripe_cnt = DIV_ROUND_UP_SECTOR_T(ctx->last_sector - logical_sector,
6141 					   RAID5_STRIPE_SECTORS(conf));
6142 	bitmap_set(ctx->sectors_to_do, 0, stripe_cnt);
6143 
6144 	pr_debug("raid456: %s, logical %llu to %llu\n", __func__,
6145 		 bi->bi_iter.bi_sector, ctx->last_sector);
6146 
6147 	/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
6148 	if ((bi->bi_opf & REQ_NOWAIT) &&
6149 	    get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) {
6150 		bio_wouldblock_error(bi);
6151 		if (rw == WRITE)
6152 			md_write_end(mddev);
6153 		mempool_free(ctx, conf->ctx_pool);
6154 		return true;
6155 	}
6156 	md_account_bio(mddev, &bi);
6157 
6158 	/*
6159 	 * Lets start with the stripe with the lowest chunk offset in the first
6160 	 * chunk. That has the best chances of creating IOs adjacent to
6161 	 * previous IOs in case of sequential IO and thus creates the most
6162 	 * sequential IO pattern. We don't bother with the optimization when
6163 	 * reshaping as the performance benefit is not worth the complexity.
6164 	 */
6165 	if (likely(conf->reshape_progress == MaxSector)) {
6166 		logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
6167 		on_wq = false;
6168 	} else {
6169 		add_wait_queue(&conf->wait_for_reshape, &wait);
6170 		on_wq = true;
6171 	}
6172 	s = (logical_sector - ctx->first_sector) >> RAID5_STRIPE_SHIFT(conf);
6173 
6174 	while (1) {
6175 		res = make_stripe_request(mddev, conf, ctx, logical_sector,
6176 					  bi);
6177 		if (res == STRIPE_FAIL || res == STRIPE_WAIT_RESHAPE)
6178 			break;
6179 
6180 		if (res == STRIPE_RETRY)
6181 			continue;
6182 
6183 		if (res == STRIPE_SCHEDULE_AND_RETRY) {
6184 			WARN_ON_ONCE(!on_wq);
6185 			/*
6186 			 * Must release the reference to batch_last before
6187 			 * scheduling and waiting for work to be done,
6188 			 * otherwise the batch_last stripe head could prevent
6189 			 * raid5_activate_delayed() from making progress
6190 			 * and thus deadlocking.
6191 			 */
6192 			if (ctx->batch_last) {
6193 				raid5_release_stripe(ctx->batch_last);
6194 				ctx->batch_last = NULL;
6195 			}
6196 
6197 			wait_woken(&wait, TASK_UNINTERRUPTIBLE,
6198 				   MAX_SCHEDULE_TIMEOUT);
6199 			continue;
6200 		}
6201 
6202 		s = find_next_bit_wrap(ctx->sectors_to_do, stripe_cnt, s);
6203 		if (s == stripe_cnt)
6204 			break;
6205 
6206 		logical_sector = ctx->first_sector +
6207 			(s << RAID5_STRIPE_SHIFT(conf));
6208 	}
6209 	if (unlikely(on_wq))
6210 		remove_wait_queue(&conf->wait_for_reshape, &wait);
6211 
6212 	if (ctx->batch_last)
6213 		raid5_release_stripe(ctx->batch_last);
6214 
6215 	if (rw == WRITE)
6216 		md_write_end(mddev);
6217 
6218 	mempool_free(ctx, conf->ctx_pool);
6219 	if (res == STRIPE_WAIT_RESHAPE) {
6220 		md_free_cloned_bio(bi);
6221 		return false;
6222 	}
6223 
6224 	bio_endio(bi);
6225 	return true;
6226 }
6227 
6228 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
6229 
6230 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
6231 {
6232 	/* reshaping is quite different to recovery/resync so it is
6233 	 * handled quite separately ... here.
6234 	 *
6235 	 * On each call to sync_request, we gather one chunk worth of
6236 	 * destination stripes and flag them as expanding.
6237 	 * Then we find all the source stripes and request reads.
6238 	 * As the reads complete, handle_stripe will copy the data
6239 	 * into the destination stripe and release that stripe.
6240 	 */
6241 	struct r5conf *conf = mddev->private;
6242 	struct stripe_head *sh;
6243 	struct md_rdev *rdev;
6244 	sector_t first_sector, last_sector;
6245 	int raid_disks = conf->previous_raid_disks;
6246 	int data_disks = raid_disks - conf->max_degraded;
6247 	int new_data_disks = conf->raid_disks - conf->max_degraded;
6248 	int i;
6249 	int dd_idx;
6250 	sector_t writepos, readpos, safepos;
6251 	sector_t stripe_addr;
6252 	int reshape_sectors;
6253 	struct list_head stripes;
6254 	sector_t retn;
6255 
6256 	if (sector_nr == 0) {
6257 		/* If restarting in the middle, skip the initial sectors */
6258 		if (mddev->reshape_backwards &&
6259 		    conf->reshape_progress < raid5_size(mddev, 0, 0)) {
6260 			sector_nr = raid5_size(mddev, 0, 0)
6261 				- conf->reshape_progress;
6262 		} else if (mddev->reshape_backwards &&
6263 			   conf->reshape_progress == MaxSector) {
6264 			/* shouldn't happen, but just in case, finish up.*/
6265 			sector_nr = MaxSector;
6266 		} else if (!mddev->reshape_backwards &&
6267 			   conf->reshape_progress > 0)
6268 			sector_nr = conf->reshape_progress;
6269 		sector_div(sector_nr, new_data_disks);
6270 		if (sector_nr) {
6271 			mddev->curr_resync_completed = sector_nr;
6272 			sysfs_notify_dirent_safe(mddev->sysfs_completed);
6273 			*skipped = 1;
6274 			retn = sector_nr;
6275 			goto finish;
6276 		}
6277 	}
6278 
6279 	/* We need to process a full chunk at a time.
6280 	 * If old and new chunk sizes differ, we need to process the
6281 	 * largest of these
6282 	 */
6283 
6284 	reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors);
6285 
6286 	/* We update the metadata at least every 10 seconds, or when
6287 	 * the data about to be copied would over-write the source of
6288 	 * the data at the front of the range.  i.e. one new_stripe
6289 	 * along from reshape_progress new_maps to after where
6290 	 * reshape_safe old_maps to
6291 	 */
6292 	writepos = conf->reshape_progress;
6293 	sector_div(writepos, new_data_disks);
6294 	readpos = conf->reshape_progress;
6295 	sector_div(readpos, data_disks);
6296 	safepos = conf->reshape_safe;
6297 	sector_div(safepos, data_disks);
6298 	if (mddev->reshape_backwards) {
6299 		if (WARN_ON(writepos < reshape_sectors))
6300 			return MaxSector;
6301 
6302 		writepos -= reshape_sectors;
6303 		readpos += reshape_sectors;
6304 		safepos += reshape_sectors;
6305 	} else {
6306 		writepos += reshape_sectors;
6307 		/* readpos and safepos are worst-case calculations.
6308 		 * A negative number is overly pessimistic, and causes
6309 		 * obvious problems for unsigned storage.  So clip to 0.
6310 		 */
6311 		readpos -= min_t(sector_t, reshape_sectors, readpos);
6312 		safepos -= min_t(sector_t, reshape_sectors, safepos);
6313 	}
6314 
6315 	/* Having calculated the 'writepos' possibly use it
6316 	 * to set 'stripe_addr' which is where we will write to.
6317 	 */
6318 	if (mddev->reshape_backwards) {
6319 		if (WARN_ON(conf->reshape_progress == 0))
6320 			return MaxSector;
6321 
6322 		stripe_addr = writepos;
6323 		if (WARN_ON((mddev->dev_sectors &
6324 		    ~((sector_t)reshape_sectors - 1)) -
6325 		    reshape_sectors - stripe_addr != sector_nr))
6326 			return MaxSector;
6327 	} else {
6328 		if (WARN_ON(writepos != sector_nr + reshape_sectors))
6329 			return MaxSector;
6330 
6331 		stripe_addr = sector_nr;
6332 	}
6333 
6334 	/* 'writepos' is the most advanced device address we might write.
6335 	 * 'readpos' is the least advanced device address we might read.
6336 	 * 'safepos' is the least address recorded in the metadata as having
6337 	 *     been reshaped.
6338 	 * If there is a min_offset_diff, these are adjusted either by
6339 	 * increasing the safepos/readpos if diff is negative, or
6340 	 * increasing writepos if diff is positive.
6341 	 * If 'readpos' is then behind 'writepos', there is no way that we can
6342 	 * ensure safety in the face of a crash - that must be done by userspace
6343 	 * making a backup of the data.  So in that case there is no particular
6344 	 * rush to update metadata.
6345 	 * Otherwise if 'safepos' is behind 'writepos', then we really need to
6346 	 * update the metadata to advance 'safepos' to match 'readpos' so that
6347 	 * we can be safe in the event of a crash.
6348 	 * So we insist on updating metadata if safepos is behind writepos and
6349 	 * readpos is beyond writepos.
6350 	 * In any case, update the metadata every 10 seconds.
6351 	 * Maybe that number should be configurable, but I'm not sure it is
6352 	 * worth it.... maybe it could be a multiple of safemode_delay???
6353 	 */
6354 	if (conf->min_offset_diff < 0) {
6355 		safepos += -conf->min_offset_diff;
6356 		readpos += -conf->min_offset_diff;
6357 	} else
6358 		writepos += conf->min_offset_diff;
6359 
6360 	if ((mddev->reshape_backwards
6361 	     ? (safepos > writepos && readpos < writepos)
6362 	     : (safepos < writepos && readpos > writepos)) ||
6363 	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
6364 		/* Cannot proceed until we've updated the superblock... */
6365 		wait_event(conf->wait_for_reshape,
6366 			   atomic_read(&conf->reshape_stripes)==0
6367 			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6368 		if (atomic_read(&conf->reshape_stripes) != 0)
6369 			return 0;
6370 		mddev->reshape_position = conf->reshape_progress;
6371 		mddev->curr_resync_completed = sector_nr;
6372 		if (!mddev->reshape_backwards)
6373 			/* Can update recovery_offset */
6374 			rdev_for_each(rdev, mddev)
6375 				if (rdev->raid_disk >= 0 &&
6376 				    !test_bit(Journal, &rdev->flags) &&
6377 				    !test_bit(In_sync, &rdev->flags) &&
6378 				    rdev->recovery_offset < sector_nr)
6379 					rdev->recovery_offset = sector_nr;
6380 
6381 		conf->reshape_checkpoint = jiffies;
6382 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6383 		md_wakeup_thread(mddev->thread);
6384 		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
6385 			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6386 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6387 			return 0;
6388 		spin_lock_irq(&conf->device_lock);
6389 		conf->reshape_safe = mddev->reshape_position;
6390 		spin_unlock_irq(&conf->device_lock);
6391 		wake_up(&conf->wait_for_reshape);
6392 		sysfs_notify_dirent_safe(mddev->sysfs_completed);
6393 	}
6394 
6395 	INIT_LIST_HEAD(&stripes);
6396 	for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) {
6397 		int j;
6398 		int skipped_disk = 0;
6399 		sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i,
6400 					     R5_GAS_NOQUIESCE);
6401 		set_bit(STRIPE_EXPANDING, &sh->state);
6402 		atomic_inc(&conf->reshape_stripes);
6403 		/* If any of this stripe is beyond the end of the old
6404 		 * array, then we need to zero those blocks
6405 		 */
6406 		for (j=sh->disks; j--;) {
6407 			sector_t s;
6408 			if (j == sh->pd_idx)
6409 				continue;
6410 			if (conf->level == 6 &&
6411 			    j == sh->qd_idx)
6412 				continue;
6413 			s = raid5_compute_blocknr(sh, j, 0);
6414 			if (s < raid5_size(mddev, 0, 0)) {
6415 				skipped_disk = 1;
6416 				continue;
6417 			}
6418 			memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf));
6419 			set_bit(R5_Expanded, &sh->dev[j].flags);
6420 			set_bit(R5_UPTODATE, &sh->dev[j].flags);
6421 		}
6422 		if (!skipped_disk) {
6423 			set_bit(STRIPE_EXPAND_READY, &sh->state);
6424 			set_bit(STRIPE_HANDLE, &sh->state);
6425 		}
6426 		list_add(&sh->lru, &stripes);
6427 	}
6428 	spin_lock_irq(&conf->device_lock);
6429 	if (mddev->reshape_backwards)
6430 		conf->reshape_progress -= reshape_sectors * new_data_disks;
6431 	else
6432 		conf->reshape_progress += reshape_sectors * new_data_disks;
6433 	spin_unlock_irq(&conf->device_lock);
6434 	/* Ok, those stripe are ready. We can start scheduling
6435 	 * reads on the source stripes.
6436 	 * The source stripes are determined by mapping the first and last
6437 	 * block on the destination stripes.
6438 	 */
6439 	first_sector =
6440 		raid5_compute_sector(conf, stripe_addr*(new_data_disks),
6441 				     1, &dd_idx, NULL);
6442 	last_sector =
6443 		raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
6444 					    * new_data_disks - 1),
6445 				     1, &dd_idx, NULL);
6446 	if (last_sector >= mddev->dev_sectors)
6447 		last_sector = mddev->dev_sectors - 1;
6448 	while (first_sector <= last_sector) {
6449 		sh = raid5_get_active_stripe(conf, NULL, first_sector,
6450 				R5_GAS_PREVIOUS | R5_GAS_NOQUIESCE);
6451 		set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
6452 		set_bit(STRIPE_HANDLE, &sh->state);
6453 		raid5_release_stripe(sh);
6454 		first_sector += RAID5_STRIPE_SECTORS(conf);
6455 	}
6456 	/* Now that the sources are clearly marked, we can release
6457 	 * the destination stripes
6458 	 */
6459 	while (!list_empty(&stripes)) {
6460 		sh = list_entry(stripes.next, struct stripe_head, lru);
6461 		list_del_init(&sh->lru);
6462 		raid5_release_stripe(sh);
6463 	}
6464 	/* If this takes us to the resync_max point where we have to pause,
6465 	 * then we need to write out the superblock.
6466 	 */
6467 	sector_nr += reshape_sectors;
6468 	retn = reshape_sectors;
6469 finish:
6470 	if (mddev->curr_resync_completed > mddev->resync_max ||
6471 	    (sector_nr - mddev->curr_resync_completed) * 2
6472 	    >= mddev->resync_max - mddev->curr_resync_completed) {
6473 		/* Cannot proceed until we've updated the superblock... */
6474 		wait_event(conf->wait_for_reshape,
6475 			   atomic_read(&conf->reshape_stripes) == 0
6476 			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6477 		if (atomic_read(&conf->reshape_stripes) != 0)
6478 			goto ret;
6479 		mddev->reshape_position = conf->reshape_progress;
6480 		mddev->curr_resync_completed = sector_nr;
6481 		if (!mddev->reshape_backwards)
6482 			/* Can update recovery_offset */
6483 			rdev_for_each(rdev, mddev)
6484 				if (rdev->raid_disk >= 0 &&
6485 				    !test_bit(Journal, &rdev->flags) &&
6486 				    !test_bit(In_sync, &rdev->flags) &&
6487 				    rdev->recovery_offset < sector_nr)
6488 					rdev->recovery_offset = sector_nr;
6489 		conf->reshape_checkpoint = jiffies;
6490 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6491 		md_wakeup_thread(mddev->thread);
6492 		wait_event(mddev->sb_wait,
6493 			   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
6494 			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6495 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6496 			goto ret;
6497 		spin_lock_irq(&conf->device_lock);
6498 		conf->reshape_safe = mddev->reshape_position;
6499 		spin_unlock_irq(&conf->device_lock);
6500 		wake_up(&conf->wait_for_reshape);
6501 		sysfs_notify_dirent_safe(mddev->sysfs_completed);
6502 	}
6503 ret:
6504 	return retn;
6505 }
6506 
6507 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
6508 					  sector_t max_sector, int *skipped)
6509 {
6510 	struct r5conf *conf = mddev->private;
6511 	struct stripe_head *sh;
6512 	sector_t sync_blocks;
6513 	bool still_degraded = false;
6514 	int i;
6515 
6516 	if (sector_nr >= max_sector) {
6517 		/* just being told to finish up .. nothing much to do */
6518 
6519 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
6520 			end_reshape(conf);
6521 			return 0;
6522 		}
6523 
6524 		if (mddev->curr_resync < max_sector) /* aborted */
6525 			md_bitmap_end_sync(mddev, mddev->curr_resync,
6526 					   &sync_blocks);
6527 		else /* completed sync */
6528 			conf->fullsync = 0;
6529 		if (md_bitmap_enabled(mddev, false))
6530 			mddev->bitmap_ops->close_sync(mddev);
6531 
6532 		return 0;
6533 	}
6534 
6535 	/* Allow raid5_quiesce to complete */
6536 	wait_event(conf->wait_for_reshape, conf->quiesce != 2);
6537 
6538 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6539 		return reshape_request(mddev, sector_nr, skipped);
6540 
6541 	/* No need to check resync_max as we never do more than one
6542 	 * stripe, and as resync_max will always be on a chunk boundary,
6543 	 * if the check in md_do_sync didn't fire, there is no chance
6544 	 * of overstepping resync_max here
6545 	 */
6546 
6547 	/* if there is too many failed drives and we are trying
6548 	 * to resync, then assert that we are finished, because there is
6549 	 * nothing we can do.
6550 	 */
6551 	if (mddev->degraded >= conf->max_degraded &&
6552 	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6553 		sector_t rv = mddev->dev_sectors - sector_nr;
6554 		*skipped = 1;
6555 		return rv;
6556 	}
6557 	if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
6558 	    !conf->fullsync &&
6559 	    !md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
6560 	    sync_blocks >= RAID5_STRIPE_SECTORS(conf)) {
6561 		/* we can skip this block, and probably more */
6562 		do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf));
6563 		*skipped = 1;
6564 		/* keep things rounded to whole stripes */
6565 		return sync_blocks * RAID5_STRIPE_SECTORS(conf);
6566 	}
6567 
6568 	if (md_bitmap_enabled(mddev, false))
6569 		mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false);
6570 
6571 	sh = raid5_get_active_stripe(conf, NULL, sector_nr,
6572 				     R5_GAS_NOBLOCK);
6573 	if (sh == NULL) {
6574 		sh = raid5_get_active_stripe(conf, NULL, sector_nr, 0);
6575 		/* make sure we don't swamp the stripe cache if someone else
6576 		 * is trying to get access
6577 		 */
6578 		schedule_timeout_uninterruptible(1);
6579 	}
6580 	/* Need to check if array will still be degraded after recovery/resync
6581 	 * Note in case of > 1 drive failures it's possible we're rebuilding
6582 	 * one drive while leaving another faulty drive in array.
6583 	 */
6584 	for (i = 0; i < conf->raid_disks; i++) {
6585 		struct md_rdev *rdev = conf->disks[i].rdev;
6586 
6587 		if (rdev == NULL || test_bit(Faulty, &rdev->flags))
6588 			still_degraded = true;
6589 	}
6590 
6591 	md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, still_degraded);
6592 	set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
6593 	set_bit(STRIPE_HANDLE, &sh->state);
6594 
6595 	raid5_release_stripe(sh);
6596 
6597 	return RAID5_STRIPE_SECTORS(conf);
6598 }
6599 
6600 static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
6601 			       unsigned int offset)
6602 {
6603 	/* We may not be able to submit a whole bio at once as there
6604 	 * may not be enough stripe_heads available.
6605 	 * We cannot pre-allocate enough stripe_heads as we may need
6606 	 * more than exist in the cache (if we allow ever large chunks).
6607 	 * So we do one stripe head at a time and record in
6608 	 * ->bi_hw_segments how many have been done.
6609 	 *
6610 	 * We *know* that this entire raid_bio is in one chunk, so
6611 	 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
6612 	 */
6613 	struct stripe_head *sh;
6614 	int dd_idx;
6615 	sector_t sector, logical_sector, last_sector;
6616 	int scnt = 0;
6617 	int handled = 0;
6618 
6619 	logical_sector = raid_bio->bi_iter.bi_sector &
6620 		~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
6621 	sector = raid5_compute_sector(conf, logical_sector,
6622 				      0, &dd_idx, NULL);
6623 	last_sector = bio_end_sector(raid_bio);
6624 
6625 	for (; logical_sector < last_sector;
6626 	     logical_sector += RAID5_STRIPE_SECTORS(conf),
6627 		     sector += RAID5_STRIPE_SECTORS(conf),
6628 		     scnt++) {
6629 
6630 		if (scnt < offset)
6631 			/* already done this stripe */
6632 			continue;
6633 
6634 		sh = raid5_get_active_stripe(conf, NULL, sector,
6635 				R5_GAS_NOBLOCK | R5_GAS_NOQUIESCE);
6636 		if (!sh) {
6637 			/* failed to get a stripe - must wait */
6638 			conf->retry_read_aligned = raid_bio;
6639 			conf->retry_read_offset = scnt;
6640 			return handled;
6641 		}
6642 
6643 		if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
6644 			int hash;
6645 
6646 			spin_lock_irq(&conf->device_lock);
6647 			hash = sh->hash_lock_index;
6648 			__release_stripe(conf, sh,
6649 					 &conf->temp_inactive_list[hash]);
6650 			spin_unlock_irq(&conf->device_lock);
6651 			conf->retry_read_aligned = raid_bio;
6652 			conf->retry_read_offset = scnt;
6653 			return handled;
6654 		}
6655 
6656 		set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
6657 		handle_stripe(sh);
6658 		raid5_release_stripe(sh);
6659 		handled++;
6660 	}
6661 
6662 	bio_endio(raid_bio);
6663 
6664 	if (atomic_dec_and_test(&conf->active_aligned_reads))
6665 		wake_up(&conf->wait_for_quiescent);
6666 	return handled;
6667 }
6668 
6669 static int handle_active_stripes(struct r5conf *conf, int group,
6670 				 struct r5worker *worker,
6671 				 struct list_head *temp_inactive_list)
6672 		__must_hold(&conf->device_lock)
6673 {
6674 	struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
6675 	int i, batch_size = 0, hash;
6676 	bool release_inactive = false;
6677 
6678 	while (batch_size < MAX_STRIPE_BATCH &&
6679 			(sh = __get_priority_stripe(conf, group)) != NULL)
6680 		batch[batch_size++] = sh;
6681 
6682 	if (batch_size == 0) {
6683 		for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
6684 			if (!list_empty(temp_inactive_list + i))
6685 				break;
6686 		if (i == NR_STRIPE_HASH_LOCKS) {
6687 			spin_unlock_irq(&conf->device_lock);
6688 			log_flush_stripe_to_raid(conf);
6689 			spin_lock_irq(&conf->device_lock);
6690 			return batch_size;
6691 		}
6692 		release_inactive = true;
6693 	}
6694 	spin_unlock_irq(&conf->device_lock);
6695 
6696 	release_inactive_stripe_list(conf, temp_inactive_list,
6697 				     NR_STRIPE_HASH_LOCKS);
6698 
6699 	r5l_flush_stripe_to_raid(conf->log);
6700 	if (release_inactive) {
6701 		spin_lock_irq(&conf->device_lock);
6702 		return 0;
6703 	}
6704 
6705 	for (i = 0; i < batch_size; i++)
6706 		handle_stripe(batch[i]);
6707 	log_write_stripe_run(conf);
6708 
6709 	cond_resched();
6710 
6711 	spin_lock_irq(&conf->device_lock);
6712 	for (i = 0; i < batch_size; i++) {
6713 		hash = batch[i]->hash_lock_index;
6714 		__release_stripe(conf, batch[i], &temp_inactive_list[hash]);
6715 	}
6716 	return batch_size;
6717 }
6718 
6719 static void raid5_do_work(struct work_struct *work)
6720 {
6721 	struct r5worker *worker = container_of(work, struct r5worker, work);
6722 	struct r5worker_group *group = worker->group;
6723 	struct r5conf *conf = group->conf;
6724 	struct mddev *mddev = conf->mddev;
6725 	int group_id = group - conf->worker_groups;
6726 	int handled;
6727 	struct blk_plug plug;
6728 
6729 	pr_debug("+++ raid5worker active\n");
6730 
6731 	blk_start_plug(&plug);
6732 	handled = 0;
6733 	spin_lock_irq(&conf->device_lock);
6734 	while (1) {
6735 		int batch_size, released;
6736 
6737 		released = release_stripe_list(conf, worker->temp_inactive_list);
6738 
6739 		batch_size = handle_active_stripes(conf, group_id, worker,
6740 						   worker->temp_inactive_list);
6741 		worker->working = false;
6742 		if (!batch_size && !released)
6743 			break;
6744 		handled += batch_size;
6745 		wait_event_lock_irq(mddev->sb_wait,
6746 			!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
6747 			conf->device_lock);
6748 	}
6749 	pr_debug("%d stripes handled\n", handled);
6750 
6751 	spin_unlock_irq(&conf->device_lock);
6752 
6753 	flush_deferred_bios(conf);
6754 
6755 	r5l_flush_stripe_to_raid(conf->log);
6756 
6757 	async_tx_issue_pending_all();
6758 	blk_finish_plug(&plug);
6759 
6760 	pr_debug("--- raid5worker inactive\n");
6761 }
6762 
6763 /*
6764  * This is our raid5 kernel thread.
6765  *
6766  * We scan the hash table for stripes which can be handled now.
6767  * During the scan, completed stripes are saved for us by the interrupt
6768  * handler, so that they will not have to wait for our next wakeup.
6769  */
6770 static void raid5d(struct md_thread *thread)
6771 {
6772 	struct mddev *mddev = thread->mddev;
6773 	struct r5conf *conf = mddev->private;
6774 	int handled;
6775 	struct blk_plug plug;
6776 
6777 	pr_debug("+++ raid5d active\n");
6778 
6779 	md_check_recovery(mddev);
6780 
6781 	blk_start_plug(&plug);
6782 	handled = 0;
6783 	spin_lock_irq(&conf->device_lock);
6784 	while (1) {
6785 		struct bio *bio;
6786 		int batch_size, released;
6787 		unsigned int offset;
6788 
6789 		if (md_is_rdwr(mddev) &&
6790 		    test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6791 			break;
6792 
6793 		released = release_stripe_list(conf, conf->temp_inactive_list);
6794 		if (released)
6795 			clear_bit(R5_DID_ALLOC, &conf->cache_state);
6796 
6797 		if (
6798 		    !list_empty(&conf->bitmap_list)) {
6799 			/* Now is a good time to flush some bitmap updates */
6800 			conf->seq_flush++;
6801 			spin_unlock_irq(&conf->device_lock);
6802 			if (md_bitmap_enabled(mddev, true))
6803 				mddev->bitmap_ops->unplug(mddev, true);
6804 			spin_lock_irq(&conf->device_lock);
6805 			conf->seq_write = conf->seq_flush;
6806 			activate_bit_delay(conf, conf->temp_inactive_list);
6807 		}
6808 		raid5_activate_delayed(conf);
6809 
6810 		while ((bio = remove_bio_from_retry(conf, &offset))) {
6811 			int ok;
6812 			spin_unlock_irq(&conf->device_lock);
6813 			ok = retry_aligned_read(conf, bio, offset);
6814 			spin_lock_irq(&conf->device_lock);
6815 			if (!ok)
6816 				break;
6817 			handled++;
6818 		}
6819 
6820 		batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
6821 						   conf->temp_inactive_list);
6822 		if (!batch_size && !released)
6823 			break;
6824 		handled += batch_size;
6825 
6826 		if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
6827 			spin_unlock_irq(&conf->device_lock);
6828 			md_check_recovery(mddev);
6829 			spin_lock_irq(&conf->device_lock);
6830 		}
6831 	}
6832 	pr_debug("%d stripes handled\n", handled);
6833 
6834 	spin_unlock_irq(&conf->device_lock);
6835 	if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
6836 	    mutex_trylock(&conf->cache_size_mutex)) {
6837 		grow_one_stripe(conf, __GFP_NOWARN);
6838 		/* Set flag even if allocation failed.  This helps
6839 		 * slow down allocation requests when mem is short
6840 		 */
6841 		set_bit(R5_DID_ALLOC, &conf->cache_state);
6842 		mutex_unlock(&conf->cache_size_mutex);
6843 	}
6844 
6845 	flush_deferred_bios(conf);
6846 
6847 	r5l_flush_stripe_to_raid(conf->log);
6848 
6849 	async_tx_issue_pending_all();
6850 	blk_finish_plug(&plug);
6851 
6852 	pr_debug("--- raid5d inactive\n");
6853 }
6854 
6855 static ssize_t
6856 raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
6857 {
6858 	struct r5conf *conf;
6859 	int ret = 0;
6860 	spin_lock(&mddev->lock);
6861 	conf = mddev->private;
6862 	if (conf)
6863 		ret = sprintf(page, "%d\n", conf->min_nr_stripes);
6864 	spin_unlock(&mddev->lock);
6865 	return ret;
6866 }
6867 
6868 int
6869 raid5_set_cache_size(struct mddev *mddev, int size)
6870 {
6871 	int result = 0;
6872 	struct r5conf *conf = mddev->private;
6873 
6874 	if (size <= 16 || size > 32768)
6875 		return -EINVAL;
6876 
6877 	WRITE_ONCE(conf->min_nr_stripes, size);
6878 	mutex_lock(&conf->cache_size_mutex);
6879 	while (size < conf->max_nr_stripes &&
6880 	       drop_one_stripe(conf))
6881 		;
6882 	mutex_unlock(&conf->cache_size_mutex);
6883 
6884 	md_allow_write(mddev);
6885 
6886 	mutex_lock(&conf->cache_size_mutex);
6887 	while (size > conf->max_nr_stripes)
6888 		if (!grow_one_stripe(conf, GFP_KERNEL)) {
6889 			WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes);
6890 			result = -ENOMEM;
6891 			break;
6892 		}
6893 	mutex_unlock(&conf->cache_size_mutex);
6894 
6895 	return result;
6896 }
6897 EXPORT_SYMBOL(raid5_set_cache_size);
6898 
6899 static ssize_t
6900 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
6901 {
6902 	struct r5conf *conf;
6903 	unsigned long new;
6904 	int err;
6905 
6906 	if (len >= PAGE_SIZE)
6907 		return -EINVAL;
6908 	if (kstrtoul(page, 10, &new))
6909 		return -EINVAL;
6910 	err = mddev_lock(mddev);
6911 	if (err)
6912 		return err;
6913 	conf = mddev->private;
6914 	if (!conf)
6915 		err = -ENODEV;
6916 	else
6917 		err = raid5_set_cache_size(mddev, new);
6918 	mddev_unlock(mddev);
6919 
6920 	return err ?: len;
6921 }
6922 
6923 static struct md_sysfs_entry
6924 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
6925 				raid5_show_stripe_cache_size,
6926 				raid5_store_stripe_cache_size);
6927 
6928 static ssize_t
6929 raid5_show_rmw_level(struct mddev  *mddev, char *page)
6930 {
6931 	struct r5conf *conf = mddev->private;
6932 	if (conf)
6933 		return sprintf(page, "%d\n", conf->rmw_level);
6934 	else
6935 		return 0;
6936 }
6937 
6938 static ssize_t
6939 raid5_store_rmw_level(struct mddev  *mddev, const char *page, size_t len)
6940 {
6941 	struct r5conf *conf = mddev->private;
6942 	unsigned long new;
6943 
6944 	if (!conf)
6945 		return -ENODEV;
6946 
6947 	if (len >= PAGE_SIZE)
6948 		return -EINVAL;
6949 
6950 	if (kstrtoul(page, 10, &new))
6951 		return -EINVAL;
6952 
6953 	if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
6954 		return -EINVAL;
6955 
6956 	if (new != PARITY_DISABLE_RMW &&
6957 	    new != PARITY_ENABLE_RMW &&
6958 	    new != PARITY_PREFER_RMW)
6959 		return -EINVAL;
6960 
6961 	conf->rmw_level = new;
6962 	return len;
6963 }
6964 
6965 static struct md_sysfs_entry
6966 raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
6967 			 raid5_show_rmw_level,
6968 			 raid5_store_rmw_level);
6969 
6970 static ssize_t
6971 raid5_show_stripe_size(struct mddev  *mddev, char *page)
6972 {
6973 	struct r5conf *conf;
6974 	int ret = 0;
6975 
6976 	spin_lock(&mddev->lock);
6977 	conf = mddev->private;
6978 	if (conf)
6979 		ret = sprintf(page, "%lu\n", RAID5_STRIPE_SIZE(conf));
6980 	spin_unlock(&mddev->lock);
6981 	return ret;
6982 }
6983 
6984 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
6985 static ssize_t
6986 raid5_store_stripe_size(struct mddev  *mddev, const char *page, size_t len)
6987 {
6988 	struct r5conf *conf;
6989 	unsigned long new;
6990 	int err;
6991 	int size;
6992 
6993 	if (len >= PAGE_SIZE)
6994 		return -EINVAL;
6995 	if (kstrtoul(page, 10, &new))
6996 		return -EINVAL;
6997 
6998 	/*
6999 	 * The value should not be bigger than PAGE_SIZE. It requires to
7000 	 * be multiple of DEFAULT_STRIPE_SIZE and the value should be power
7001 	 * of two.
7002 	 */
7003 	if (new % DEFAULT_STRIPE_SIZE != 0 ||
7004 			new > PAGE_SIZE || new == 0 ||
7005 			new != roundup_pow_of_two(new))
7006 		return -EINVAL;
7007 
7008 	err = mddev_suspend_and_lock(mddev);
7009 	if (err)
7010 		return err;
7011 
7012 	conf = mddev->private;
7013 	if (!conf) {
7014 		err = -ENODEV;
7015 		goto out_unlock;
7016 	}
7017 
7018 	if (new == conf->stripe_size)
7019 		goto out_unlock;
7020 
7021 	pr_debug("md/raid: change stripe_size from %lu to %lu\n",
7022 			conf->stripe_size, new);
7023 
7024 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7025 	    mddev->reshape_position != MaxSector || mddev->sysfs_active) {
7026 		err = -EBUSY;
7027 		goto out_unlock;
7028 	}
7029 
7030 	mutex_lock(&conf->cache_size_mutex);
7031 	size = conf->max_nr_stripes;
7032 
7033 	shrink_stripes(conf);
7034 
7035 	conf->stripe_size = new;
7036 	conf->stripe_shift = ilog2(new) - 9;
7037 	conf->stripe_sectors = new >> 9;
7038 	if (grow_stripes(conf, size)) {
7039 		pr_warn("md/raid:%s: couldn't allocate buffers\n",
7040 				mdname(mddev));
7041 		err = -ENOMEM;
7042 	}
7043 	mutex_unlock(&conf->cache_size_mutex);
7044 
7045 out_unlock:
7046 	mddev_unlock_and_resume(mddev);
7047 	return err ?: len;
7048 }
7049 
7050 static struct md_sysfs_entry
7051 raid5_stripe_size = __ATTR(stripe_size, 0644,
7052 			 raid5_show_stripe_size,
7053 			 raid5_store_stripe_size);
7054 #else
7055 static struct md_sysfs_entry
7056 raid5_stripe_size = __ATTR(stripe_size, 0444,
7057 			 raid5_show_stripe_size,
7058 			 NULL);
7059 #endif
7060 
7061 static ssize_t
7062 raid5_show_preread_threshold(struct mddev *mddev, char *page)
7063 {
7064 	struct r5conf *conf;
7065 	int ret = 0;
7066 	spin_lock(&mddev->lock);
7067 	conf = mddev->private;
7068 	if (conf)
7069 		ret = sprintf(page, "%d\n", conf->bypass_threshold);
7070 	spin_unlock(&mddev->lock);
7071 	return ret;
7072 }
7073 
7074 static ssize_t
7075 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
7076 {
7077 	struct r5conf *conf;
7078 	unsigned long new;
7079 	int err;
7080 
7081 	if (len >= PAGE_SIZE)
7082 		return -EINVAL;
7083 	if (kstrtoul(page, 10, &new))
7084 		return -EINVAL;
7085 
7086 	err = mddev_lock(mddev);
7087 	if (err)
7088 		return err;
7089 	conf = mddev->private;
7090 	if (!conf)
7091 		err = -ENODEV;
7092 	else if (new > conf->min_nr_stripes)
7093 		err = -EINVAL;
7094 	else
7095 		conf->bypass_threshold = new;
7096 	mddev_unlock(mddev);
7097 	return err ?: len;
7098 }
7099 
7100 static struct md_sysfs_entry
7101 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
7102 					S_IRUGO | S_IWUSR,
7103 					raid5_show_preread_threshold,
7104 					raid5_store_preread_threshold);
7105 
7106 static ssize_t
7107 raid5_show_skip_copy(struct mddev *mddev, char *page)
7108 {
7109 	struct r5conf *conf;
7110 	int ret = 0;
7111 	spin_lock(&mddev->lock);
7112 	conf = mddev->private;
7113 	if (conf)
7114 		ret = sprintf(page, "%d\n", conf->skip_copy);
7115 	spin_unlock(&mddev->lock);
7116 	return ret;
7117 }
7118 
7119 static ssize_t
7120 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
7121 {
7122 	struct r5conf *conf;
7123 	unsigned long new;
7124 	int err;
7125 
7126 	if (len >= PAGE_SIZE)
7127 		return -EINVAL;
7128 	if (kstrtoul(page, 10, &new))
7129 		return -EINVAL;
7130 	new = !!new;
7131 
7132 	err = mddev_suspend_and_lock(mddev);
7133 	if (err)
7134 		return err;
7135 	conf = mddev->private;
7136 	if (!conf)
7137 		err = -ENODEV;
7138 	else if (new != conf->skip_copy) {
7139 		struct request_queue *q = mddev->gendisk->queue;
7140 		struct queue_limits lim = queue_limits_start_update(q);
7141 
7142 		conf->skip_copy = new;
7143 		if (new)
7144 			lim.features |= BLK_FEAT_STABLE_WRITES;
7145 		else
7146 			lim.features &= ~BLK_FEAT_STABLE_WRITES;
7147 		err = queue_limits_commit_update(q, &lim);
7148 	}
7149 	mddev_unlock_and_resume(mddev);
7150 	return err ?: len;
7151 }
7152 
7153 static struct md_sysfs_entry
7154 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
7155 					raid5_show_skip_copy,
7156 					raid5_store_skip_copy);
7157 
7158 static ssize_t
7159 stripe_cache_active_show(struct mddev *mddev, char *page)
7160 {
7161 	struct r5conf *conf = mddev->private;
7162 	if (conf)
7163 		return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
7164 	else
7165 		return 0;
7166 }
7167 
7168 static struct md_sysfs_entry
7169 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
7170 
7171 static ssize_t
7172 raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
7173 {
7174 	struct r5conf *conf;
7175 	int ret = 0;
7176 	spin_lock(&mddev->lock);
7177 	conf = mddev->private;
7178 	if (conf)
7179 		ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
7180 	spin_unlock(&mddev->lock);
7181 	return ret;
7182 }
7183 
7184 static int alloc_thread_groups(struct r5conf *conf, int cnt,
7185 			       int *group_cnt,
7186 			       struct r5worker_group **worker_groups);
7187 static ssize_t
7188 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
7189 {
7190 	struct r5conf *conf;
7191 	unsigned int new;
7192 	int err;
7193 	struct r5worker_group *new_groups, *old_groups;
7194 	int group_cnt;
7195 
7196 	if (len >= PAGE_SIZE)
7197 		return -EINVAL;
7198 	if (kstrtouint(page, 10, &new))
7199 		return -EINVAL;
7200 	/* 8192 should be big enough */
7201 	if (new > 8192)
7202 		return -EINVAL;
7203 
7204 	err = mddev_suspend_and_lock(mddev);
7205 	if (err)
7206 		return err;
7207 	conf = mddev->private;
7208 	if (!conf) {
7209 		mddev_unlock_and_resume(mddev);
7210 		return -ENODEV;
7211 	}
7212 	raid5_quiesce(mddev, true);
7213 
7214 	if (new != conf->worker_cnt_per_group) {
7215 		old_groups = conf->worker_groups;
7216 		if (old_groups)
7217 			flush_workqueue(raid5_wq);
7218 
7219 		err = alloc_thread_groups(conf, new, &group_cnt, &new_groups);
7220 		if (!err) {
7221 			spin_lock_irq(&conf->device_lock);
7222 			conf->group_cnt = group_cnt;
7223 			conf->worker_cnt_per_group = new;
7224 			conf->worker_groups = new_groups;
7225 			spin_unlock_irq(&conf->device_lock);
7226 
7227 			if (old_groups)
7228 				kfree(old_groups[0].workers);
7229 			kfree(old_groups);
7230 		}
7231 	}
7232 
7233 	raid5_quiesce(mddev, false);
7234 	mddev_unlock_and_resume(mddev);
7235 
7236 	return err ?: len;
7237 }
7238 
7239 static struct md_sysfs_entry
7240 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
7241 				raid5_show_group_thread_cnt,
7242 				raid5_store_group_thread_cnt);
7243 
7244 static struct attribute *raid5_attrs[] =  {
7245 	&raid5_stripecache_size.attr,
7246 	&raid5_stripecache_active.attr,
7247 	&raid5_preread_bypass_threshold.attr,
7248 	&raid5_group_thread_cnt.attr,
7249 	&raid5_skip_copy.attr,
7250 	&raid5_rmw_level.attr,
7251 	&raid5_stripe_size.attr,
7252 	&r5c_journal_mode.attr,
7253 	&ppl_write_hint.attr,
7254 	NULL,
7255 };
7256 static const struct attribute_group raid5_attrs_group = {
7257 	.name = NULL,
7258 	.attrs = raid5_attrs,
7259 };
7260 
7261 static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt,
7262 			       struct r5worker_group **worker_groups)
7263 {
7264 	int i, j, k;
7265 	ssize_t size;
7266 	struct r5worker *workers;
7267 
7268 	if (cnt == 0) {
7269 		*group_cnt = 0;
7270 		*worker_groups = NULL;
7271 		return 0;
7272 	}
7273 	*group_cnt = num_possible_nodes();
7274 	size = sizeof(struct r5worker) * cnt;
7275 	workers = kcalloc(size, *group_cnt, GFP_NOIO);
7276 	*worker_groups = kzalloc_objs(struct r5worker_group, *group_cnt,
7277 				      GFP_NOIO);
7278 	if (!*worker_groups || !workers) {
7279 		kfree(workers);
7280 		kfree(*worker_groups);
7281 		return -ENOMEM;
7282 	}
7283 
7284 	for (i = 0; i < *group_cnt; i++) {
7285 		struct r5worker_group *group;
7286 
7287 		group = &(*worker_groups)[i];
7288 		INIT_LIST_HEAD(&group->handle_list);
7289 		INIT_LIST_HEAD(&group->loprio_list);
7290 		group->conf = conf;
7291 		group->workers = workers + i * cnt;
7292 
7293 		for (j = 0; j < cnt; j++) {
7294 			struct r5worker *worker = group->workers + j;
7295 			worker->group = group;
7296 			INIT_WORK(&worker->work, raid5_do_work);
7297 
7298 			for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
7299 				INIT_LIST_HEAD(worker->temp_inactive_list + k);
7300 		}
7301 	}
7302 
7303 	return 0;
7304 }
7305 
7306 static void free_thread_groups(struct r5conf *conf)
7307 {
7308 	if (conf->worker_groups)
7309 		kfree(conf->worker_groups[0].workers);
7310 	kfree(conf->worker_groups);
7311 	conf->worker_groups = NULL;
7312 }
7313 
7314 static sector_t
7315 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
7316 {
7317 	struct r5conf *conf = mddev->private;
7318 
7319 	if (!sectors)
7320 		sectors = mddev->dev_sectors;
7321 	if (!raid_disks)
7322 		/* size is defined by the smallest of previous and new size */
7323 		raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
7324 
7325 	sectors &= ~((sector_t)conf->chunk_sectors - 1);
7326 	sectors &= ~((sector_t)conf->prev_chunk_sectors - 1);
7327 	return sectors * (raid_disks - conf->max_degraded);
7328 }
7329 
7330 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
7331 {
7332 	safe_put_page(percpu->spare_page);
7333 	percpu->spare_page = NULL;
7334 	kvfree(percpu->scribble);
7335 	percpu->scribble = NULL;
7336 }
7337 
7338 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
7339 {
7340 	if (conf->level == 6 && !percpu->spare_page) {
7341 		percpu->spare_page = alloc_page(GFP_KERNEL);
7342 		if (!percpu->spare_page)
7343 			return -ENOMEM;
7344 	}
7345 
7346 	if (scribble_alloc(percpu,
7347 			   max(conf->raid_disks,
7348 			       conf->previous_raid_disks),
7349 			   max(conf->chunk_sectors,
7350 			       conf->prev_chunk_sectors)
7351 			   / RAID5_STRIPE_SECTORS(conf))) {
7352 		free_scratch_buffer(conf, percpu);
7353 		return -ENOMEM;
7354 	}
7355 
7356 	local_lock_init(&percpu->lock);
7357 	return 0;
7358 }
7359 
7360 static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
7361 {
7362 	struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
7363 
7364 	free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
7365 	return 0;
7366 }
7367 
7368 static void raid5_free_percpu(struct r5conf *conf)
7369 {
7370 	if (!conf->percpu)
7371 		return;
7372 
7373 	cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
7374 	free_percpu(conf->percpu);
7375 }
7376 
7377 static void free_conf(struct r5conf *conf)
7378 {
7379 	int i;
7380 
7381 	log_exit(conf);
7382 
7383 	shrinker_free(conf->shrinker);
7384 	free_thread_groups(conf);
7385 	shrink_stripes(conf);
7386 	raid5_free_percpu(conf);
7387 	for (i = 0; i < conf->pool_size; i++)
7388 		if (conf->disks[i].extra_page)
7389 			put_page(conf->disks[i].extra_page);
7390 	kfree(conf->disks);
7391 	bioset_exit(&conf->bio_split);
7392 	kfree(conf->stripe_hashtbl);
7393 	kfree(conf->pending_data);
7394 
7395 	mempool_destroy(conf->ctx_pool);
7396 
7397 	kfree(conf);
7398 }
7399 
7400 static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
7401 {
7402 	struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
7403 	struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
7404 
7405 	if (alloc_scratch_buffer(conf, percpu)) {
7406 		pr_warn("%s: failed memory allocation for cpu%u\n",
7407 			__func__, cpu);
7408 		return -ENOMEM;
7409 	}
7410 	return 0;
7411 }
7412 
7413 static int raid5_alloc_percpu(struct r5conf *conf)
7414 {
7415 	int err = 0;
7416 
7417 	conf->percpu = alloc_percpu(struct raid5_percpu);
7418 	if (!conf->percpu)
7419 		return -ENOMEM;
7420 
7421 	err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
7422 	if (!err) {
7423 		conf->scribble_disks = max(conf->raid_disks,
7424 			conf->previous_raid_disks);
7425 		conf->scribble_sectors = max(conf->chunk_sectors,
7426 			conf->prev_chunk_sectors);
7427 	}
7428 	return err;
7429 }
7430 
7431 static unsigned long raid5_cache_scan(struct shrinker *shrink,
7432 				      struct shrink_control *sc)
7433 {
7434 	struct r5conf *conf = shrink->private_data;
7435 	unsigned long ret = SHRINK_STOP;
7436 
7437 	if (mutex_trylock(&conf->cache_size_mutex)) {
7438 		ret= 0;
7439 		while (ret < sc->nr_to_scan &&
7440 		       conf->max_nr_stripes > conf->min_nr_stripes) {
7441 			if (drop_one_stripe(conf) == 0) {
7442 				ret = SHRINK_STOP;
7443 				break;
7444 			}
7445 			ret++;
7446 		}
7447 		mutex_unlock(&conf->cache_size_mutex);
7448 	}
7449 	return ret;
7450 }
7451 
7452 static unsigned long raid5_cache_count(struct shrinker *shrink,
7453 				       struct shrink_control *sc)
7454 {
7455 	struct r5conf *conf = shrink->private_data;
7456 	int max_stripes = READ_ONCE(conf->max_nr_stripes);
7457 	int min_stripes = READ_ONCE(conf->min_nr_stripes);
7458 
7459 	if (max_stripes < min_stripes)
7460 		/* unlikely, but not impossible */
7461 		return 0;
7462 	return max_stripes - min_stripes;
7463 }
7464 
7465 static struct r5conf *setup_conf(struct mddev *mddev)
7466 {
7467 	struct r5conf *conf;
7468 	int raid_disk, memory, max_disks;
7469 	struct md_rdev *rdev;
7470 	struct disk_info *disk;
7471 	char pers_name[6];
7472 	int i;
7473 	int group_cnt;
7474 	struct r5worker_group *new_group;
7475 	int ret = -ENOMEM;
7476 
7477 	if (mddev->new_level != 5
7478 	    && mddev->new_level != 4
7479 	    && mddev->new_level != 6) {
7480 		pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n",
7481 			mdname(mddev), mddev->new_level);
7482 		return ERR_PTR(-EIO);
7483 	}
7484 	if ((mddev->new_level == 5
7485 	     && !algorithm_valid_raid5(mddev->new_layout)) ||
7486 	    (mddev->new_level == 6
7487 	     && !algorithm_valid_raid6(mddev->new_layout))) {
7488 		pr_warn("md/raid:%s: layout %d not supported\n",
7489 			mdname(mddev), mddev->new_layout);
7490 		return ERR_PTR(-EIO);
7491 	}
7492 	if (mddev->new_level == 6 && mddev->raid_disks < 4) {
7493 		pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n",
7494 			mdname(mddev), mddev->raid_disks);
7495 		return ERR_PTR(-EINVAL);
7496 	}
7497 
7498 	if (!mddev->new_chunk_sectors ||
7499 	    (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
7500 	    !is_power_of_2(mddev->new_chunk_sectors)) {
7501 		pr_warn("md/raid:%s: invalid chunk size %d\n",
7502 			mdname(mddev), mddev->new_chunk_sectors << 9);
7503 		return ERR_PTR(-EINVAL);
7504 	}
7505 
7506 	conf = kzalloc_obj(struct r5conf);
7507 	if (conf == NULL)
7508 		goto abort;
7509 
7510 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
7511 	conf->stripe_size = DEFAULT_STRIPE_SIZE;
7512 	conf->stripe_shift = ilog2(DEFAULT_STRIPE_SIZE) - 9;
7513 	conf->stripe_sectors = DEFAULT_STRIPE_SIZE >> 9;
7514 #endif
7515 	INIT_LIST_HEAD(&conf->free_list);
7516 	INIT_LIST_HEAD(&conf->pending_list);
7517 	conf->pending_data = kzalloc_objs(struct r5pending_data, PENDING_IO_MAX);
7518 	if (!conf->pending_data)
7519 		goto abort;
7520 	for (i = 0; i < PENDING_IO_MAX; i++)
7521 		list_add(&conf->pending_data[i].sibling, &conf->free_list);
7522 	/* Don't enable multi-threading by default*/
7523 	if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) {
7524 		conf->group_cnt = group_cnt;
7525 		conf->worker_cnt_per_group = 0;
7526 		conf->worker_groups = new_group;
7527 	} else
7528 		goto abort;
7529 	spin_lock_init(&conf->device_lock);
7530 	seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock);
7531 	mutex_init(&conf->cache_size_mutex);
7532 
7533 	init_waitqueue_head(&conf->wait_for_quiescent);
7534 	init_waitqueue_head(&conf->wait_for_stripe);
7535 	init_waitqueue_head(&conf->wait_for_reshape);
7536 	INIT_LIST_HEAD(&conf->handle_list);
7537 	INIT_LIST_HEAD(&conf->loprio_list);
7538 	INIT_LIST_HEAD(&conf->hold_list);
7539 	INIT_LIST_HEAD(&conf->delayed_list);
7540 	INIT_LIST_HEAD(&conf->bitmap_list);
7541 	init_llist_head(&conf->released_stripes);
7542 	atomic_set(&conf->active_stripes, 0);
7543 	atomic_set(&conf->preread_active_stripes, 0);
7544 	atomic_set(&conf->active_aligned_reads, 0);
7545 	spin_lock_init(&conf->pending_bios_lock);
7546 	conf->batch_bio_dispatch = true;
7547 	rdev_for_each(rdev, mddev) {
7548 		if (test_bit(Journal, &rdev->flags))
7549 			continue;
7550 		if (!bdev_rot(rdev->bdev)) {
7551 			conf->batch_bio_dispatch = false;
7552 			break;
7553 		}
7554 	}
7555 
7556 	conf->bypass_threshold = BYPASS_THRESHOLD;
7557 	conf->raid_disks = mddev->raid_disks;
7558 	if (mddev->reshape_position == MaxSector)
7559 		conf->previous_raid_disks = mddev->raid_disks;
7560 	else
7561 		conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
7562 	max_disks = max(conf->raid_disks, conf->previous_raid_disks);
7563 
7564 	conf->disks = kzalloc_objs(struct disk_info, max_disks);
7565 
7566 	if (!conf->disks)
7567 		goto abort;
7568 
7569 	for (i = 0; i < max_disks; i++) {
7570 		conf->disks[i].extra_page = alloc_page(GFP_KERNEL);
7571 		if (!conf->disks[i].extra_page)
7572 			goto abort;
7573 	}
7574 
7575 	ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
7576 	if (ret)
7577 		goto abort;
7578 	conf->mddev = mddev;
7579 
7580 	ret = -ENOMEM;
7581 	conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
7582 	if (!conf->stripe_hashtbl)
7583 		goto abort;
7584 
7585 	/* We init hash_locks[0] separately to that it can be used
7586 	 * as the reference lock in the spin_lock_nest_lock() call
7587 	 * in lock_all_device_hash_locks_irq in order to convince
7588 	 * lockdep that we know what we are doing.
7589 	 */
7590 	spin_lock_init(conf->hash_locks);
7591 	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
7592 		spin_lock_init(conf->hash_locks + i);
7593 
7594 	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
7595 		INIT_LIST_HEAD(conf->inactive_list + i);
7596 
7597 	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
7598 		INIT_LIST_HEAD(conf->temp_inactive_list + i);
7599 
7600 	atomic_set(&conf->r5c_cached_full_stripes, 0);
7601 	INIT_LIST_HEAD(&conf->r5c_full_stripe_list);
7602 	atomic_set(&conf->r5c_cached_partial_stripes, 0);
7603 	INIT_LIST_HEAD(&conf->r5c_partial_stripe_list);
7604 	atomic_set(&conf->r5c_flushing_full_stripes, 0);
7605 	atomic_set(&conf->r5c_flushing_partial_stripes, 0);
7606 
7607 	conf->level = mddev->new_level;
7608 	conf->chunk_sectors = mddev->new_chunk_sectors;
7609 	ret = raid5_alloc_percpu(conf);
7610 	if (ret)
7611 		goto abort;
7612 
7613 	pr_debug("raid456: run(%s) called.\n", mdname(mddev));
7614 
7615 	ret = -EIO;
7616 	rdev_for_each(rdev, mddev) {
7617 		raid_disk = rdev->raid_disk;
7618 		if (raid_disk >= max_disks
7619 		    || raid_disk < 0 || test_bit(Journal, &rdev->flags))
7620 			continue;
7621 		disk = conf->disks + raid_disk;
7622 
7623 		if (test_bit(Replacement, &rdev->flags)) {
7624 			if (disk->replacement)
7625 				goto abort;
7626 			disk->replacement = rdev;
7627 		} else {
7628 			if (disk->rdev)
7629 				goto abort;
7630 			disk->rdev = rdev;
7631 		}
7632 
7633 		if (test_bit(In_sync, &rdev->flags)) {
7634 			pr_info("md/raid:%s: device %pg operational as raid disk %d\n",
7635 				mdname(mddev), rdev->bdev, raid_disk);
7636 		} else if (rdev->saved_raid_disk != raid_disk)
7637 			/* Cannot rely on bitmap to complete recovery */
7638 			conf->fullsync = 1;
7639 	}
7640 
7641 	conf->level = mddev->new_level;
7642 	if (conf->level == 6) {
7643 		conf->max_degraded = 2;
7644 		if (raid6_call.xor_syndrome)
7645 			conf->rmw_level = PARITY_ENABLE_RMW;
7646 		else
7647 			conf->rmw_level = PARITY_DISABLE_RMW;
7648 	} else {
7649 		conf->max_degraded = 1;
7650 		conf->rmw_level = PARITY_ENABLE_RMW;
7651 	}
7652 	conf->algorithm = mddev->new_layout;
7653 	conf->reshape_progress = mddev->reshape_position;
7654 	if (conf->reshape_progress != MaxSector) {
7655 		conf->prev_chunk_sectors = mddev->chunk_sectors;
7656 		conf->prev_algo = mddev->layout;
7657 	} else {
7658 		conf->prev_chunk_sectors = conf->chunk_sectors;
7659 		conf->prev_algo = conf->algorithm;
7660 	}
7661 
7662 	conf->min_nr_stripes = NR_STRIPES;
7663 	if (mddev->reshape_position != MaxSector) {
7664 		int stripes = max_t(int,
7665 			((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4,
7666 			((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4);
7667 		conf->min_nr_stripes = max(NR_STRIPES, stripes);
7668 		if (conf->min_nr_stripes != NR_STRIPES)
7669 			pr_info("md/raid:%s: force stripe size %d for reshape\n",
7670 				mdname(mddev), conf->min_nr_stripes);
7671 	}
7672 	memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
7673 		 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
7674 	atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
7675 	if (grow_stripes(conf, conf->min_nr_stripes)) {
7676 		pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n",
7677 			mdname(mddev), memory);
7678 		ret = -ENOMEM;
7679 		goto abort;
7680 	} else
7681 		pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
7682 	/*
7683 	 * Losing a stripe head costs more than the time to refill it,
7684 	 * it reduces the queue depth and so can hurt throughput.
7685 	 * So set it rather large, scaled by number of devices.
7686 	 */
7687 	conf->shrinker = shrinker_alloc(0, "md-raid5:%s", mdname(mddev));
7688 	if (!conf->shrinker) {
7689 		ret = -ENOMEM;
7690 		pr_warn("md/raid:%s: couldn't allocate shrinker.\n",
7691 			mdname(mddev));
7692 		goto abort;
7693 	}
7694 
7695 	conf->shrinker->seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
7696 	conf->shrinker->scan_objects = raid5_cache_scan;
7697 	conf->shrinker->count_objects = raid5_cache_count;
7698 	conf->shrinker->batch = 128;
7699 	conf->shrinker->private_data = conf;
7700 
7701 	shrinker_register(conf->shrinker);
7702 
7703 	sprintf(pers_name, "raid%d", mddev->new_level);
7704 	rcu_assign_pointer(conf->thread,
7705 			   md_register_thread(raid5d, mddev, pers_name));
7706 	if (!conf->thread) {
7707 		pr_warn("md/raid:%s: couldn't allocate thread.\n",
7708 			mdname(mddev));
7709 		ret = -ENOMEM;
7710 		goto abort;
7711 	}
7712 
7713 	return conf;
7714 
7715  abort:
7716 	if (conf)
7717 		free_conf(conf);
7718 	return ERR_PTR(ret);
7719 }
7720 
7721 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
7722 {
7723 	switch (algo) {
7724 	case ALGORITHM_PARITY_0:
7725 		if (raid_disk < max_degraded)
7726 			return 1;
7727 		break;
7728 	case ALGORITHM_PARITY_N:
7729 		if (raid_disk >= raid_disks - max_degraded)
7730 			return 1;
7731 		break;
7732 	case ALGORITHM_PARITY_0_6:
7733 		if (raid_disk == 0 ||
7734 		    raid_disk == raid_disks - 1)
7735 			return 1;
7736 		break;
7737 	case ALGORITHM_LEFT_ASYMMETRIC_6:
7738 	case ALGORITHM_RIGHT_ASYMMETRIC_6:
7739 	case ALGORITHM_LEFT_SYMMETRIC_6:
7740 	case ALGORITHM_RIGHT_SYMMETRIC_6:
7741 		if (raid_disk == raid_disks - 1)
7742 			return 1;
7743 	}
7744 	return 0;
7745 }
7746 
7747 static int raid5_create_ctx_pool(struct r5conf *conf)
7748 {
7749 	struct stripe_request_ctx *ctx;
7750 	int size;
7751 
7752 	if (mddev_is_dm(conf->mddev))
7753 		size = BITS_TO_LONGS(RAID5_MAX_REQ_STRIPES);
7754 	else
7755 		size = BITS_TO_LONGS(
7756 			queue_max_hw_sectors(conf->mddev->gendisk->queue) >>
7757 			RAID5_STRIPE_SHIFT(conf));
7758 
7759 	conf->ctx_size = struct_size(ctx, sectors_to_do, size);
7760 	conf->ctx_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS,
7761 						     conf->ctx_size);
7762 
7763 	return conf->ctx_pool ? 0 : -ENOMEM;
7764 }
7765 
7766 static int raid5_set_limits(struct mddev *mddev)
7767 {
7768 	struct r5conf *conf = mddev->private;
7769 	struct queue_limits lim;
7770 	int data_disks, stripe;
7771 	struct md_rdev *rdev;
7772 
7773 	/*
7774 	 * The read-ahead size must cover two whole stripes, which is
7775 	 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices.
7776 	 */
7777 	data_disks = conf->previous_raid_disks - conf->max_degraded;
7778 
7779 	/*
7780 	 * We can only discard a whole stripe. It doesn't make sense to
7781 	 * discard data disk but write parity disk
7782 	 */
7783 	stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9));
7784 
7785 	md_init_stacking_limits(&lim);
7786 	lim.logical_block_size = mddev->logical_block_size;
7787 	lim.io_min = mddev->chunk_sectors << 9;
7788 	lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded);
7789 	lim.chunk_sectors = lim.io_opt >> 9;
7790 	lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
7791 	lim.discard_granularity = stripe;
7792 	lim.max_write_zeroes_sectors = 0;
7793 	lim.max_hw_wzeroes_unmap_sectors = 0;
7794 	mddev_stack_rdev_limits(mddev, &lim, 0);
7795 	rdev_for_each(rdev, mddev)
7796 		queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
7797 				mddev->gendisk->disk_name);
7798 
7799 	/*
7800 	 * Zeroing is required for discard, otherwise data could be lost.
7801 	 *
7802 	 * Consider a scenario: discard a stripe (the stripe could be
7803 	 * inconsistent if discard_zeroes_data is 0); write one disk of the
7804 	 * stripe (the stripe could be inconsistent again depending on which
7805 	 * disks are used to calculate parity); the disk is broken; The stripe
7806 	 * data of this disk is lost.
7807 	 *
7808 	 * We only allow DISCARD if the sysadmin has confirmed that only safe
7809 	 * devices are in use by setting a module parameter.  A better idea
7810 	 * might be to turn DISCARD into WRITE_ZEROES requests, as that is
7811 	 * required to be safe.
7812 	 */
7813 	if (!devices_handle_discard_safely ||
7814 	    lim.max_discard_sectors < (stripe >> 9) ||
7815 	    lim.discard_granularity < stripe)
7816 		lim.max_hw_discard_sectors = 0;
7817 
7818 	/*
7819 	 * Requests require having a bitmap for each stripe.
7820 	 * Limit the max sectors based on this.
7821 	 */
7822 	lim.max_hw_sectors = RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf);
7823 	if ((lim.max_hw_sectors << 9) < lim.io_opt)
7824 		lim.max_hw_sectors = lim.io_opt >> 9;
7825 
7826 	/* No restrictions on the number of segments in the request */
7827 	lim.max_segments = USHRT_MAX;
7828 
7829 	return queue_limits_set(mddev->gendisk->queue, &lim);
7830 }
7831 
7832 static int raid5_run(struct mddev *mddev)
7833 {
7834 	struct r5conf *conf;
7835 	int dirty_parity_disks = 0;
7836 	struct md_rdev *rdev;
7837 	struct md_rdev *journal_dev = NULL;
7838 	sector_t reshape_offset = 0;
7839 	int i;
7840 	long long min_offset_diff = 0;
7841 	int first = 1;
7842 	int ret = -EIO;
7843 
7844 	if (mddev->resync_offset != MaxSector)
7845 		pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
7846 			  mdname(mddev));
7847 
7848 	rdev_for_each(rdev, mddev) {
7849 		long long diff;
7850 
7851 		if (test_bit(Journal, &rdev->flags)) {
7852 			journal_dev = rdev;
7853 			continue;
7854 		}
7855 		if (rdev->raid_disk < 0)
7856 			continue;
7857 		diff = (rdev->new_data_offset - rdev->data_offset);
7858 		if (first) {
7859 			min_offset_diff = diff;
7860 			first = 0;
7861 		} else if (mddev->reshape_backwards &&
7862 			 diff < min_offset_diff)
7863 			min_offset_diff = diff;
7864 		else if (!mddev->reshape_backwards &&
7865 			 diff > min_offset_diff)
7866 			min_offset_diff = diff;
7867 	}
7868 
7869 	if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) &&
7870 	    (mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
7871 		pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
7872 			  mdname(mddev));
7873 		return -EINVAL;
7874 	}
7875 
7876 	if (mddev->reshape_position != MaxSector) {
7877 		/* Check that we can continue the reshape.
7878 		 * Difficulties arise if the stripe we would write to
7879 		 * next is at or after the stripe we would read from next.
7880 		 * For a reshape that changes the number of devices, this
7881 		 * is only possible for a very short time, and mdadm makes
7882 		 * sure that time appears to have past before assembling
7883 		 * the array.  So we fail if that time hasn't passed.
7884 		 * For a reshape that keeps the number of devices the same
7885 		 * mdadm must be monitoring the reshape can keeping the
7886 		 * critical areas read-only and backed up.  It will start
7887 		 * the array in read-only mode, so we check for that.
7888 		 */
7889 		sector_t here_new, here_old;
7890 		int old_disks;
7891 		int max_degraded = (mddev->level == 6 ? 2 : 1);
7892 		int chunk_sectors;
7893 		int new_data_disks;
7894 
7895 		if (journal_dev) {
7896 			pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
7897 				mdname(mddev));
7898 			return -EINVAL;
7899 		}
7900 
7901 		if (mddev->new_level != mddev->level) {
7902 			pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
7903 				mdname(mddev));
7904 			return -EINVAL;
7905 		}
7906 		old_disks = mddev->raid_disks - mddev->delta_disks;
7907 		/* reshape_position must be on a new-stripe boundary, and one
7908 		 * further up in new geometry must map after here in old
7909 		 * geometry.
7910 		 * If the chunk sizes are different, then as we perform reshape
7911 		 * in units of the largest of the two, reshape_position needs
7912 		 * be a multiple of the largest chunk size times new data disks.
7913 		 */
7914 		here_new = mddev->reshape_position;
7915 		chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
7916 		new_data_disks = mddev->raid_disks - max_degraded;
7917 		if (sector_div(here_new, chunk_sectors * new_data_disks)) {
7918 			pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
7919 				mdname(mddev));
7920 			return -EINVAL;
7921 		}
7922 		reshape_offset = here_new * chunk_sectors;
7923 		/* here_new is the stripe we will write to */
7924 		here_old = mddev->reshape_position;
7925 		sector_div(here_old, chunk_sectors * (old_disks-max_degraded));
7926 		/* here_old is the first stripe that we might need to read
7927 		 * from */
7928 		if (mddev->delta_disks == 0) {
7929 			/* We cannot be sure it is safe to start an in-place
7930 			 * reshape.  It is only safe if user-space is monitoring
7931 			 * and taking constant backups.
7932 			 * mdadm always starts a situation like this in
7933 			 * readonly mode so it can take control before
7934 			 * allowing any writes.  So just check for that.
7935 			 */
7936 			if (abs(min_offset_diff) >= mddev->chunk_sectors &&
7937 			    abs(min_offset_diff) >= mddev->new_chunk_sectors)
7938 				/* not really in-place - so OK */;
7939 			else if (mddev->ro == 0) {
7940 				pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
7941 					mdname(mddev));
7942 				return -EINVAL;
7943 			}
7944 		} else if (mddev->reshape_backwards
7945 		    ? (here_new * chunk_sectors + min_offset_diff <=
7946 		       here_old * chunk_sectors)
7947 		    : (here_new * chunk_sectors >=
7948 		       here_old * chunk_sectors + (-min_offset_diff))) {
7949 			/* Reading from the same stripe as writing to - bad */
7950 			pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
7951 				mdname(mddev));
7952 			return -EINVAL;
7953 		}
7954 		pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
7955 		/* OK, we should be able to continue; */
7956 	} else {
7957 		BUG_ON(mddev->level != mddev->new_level);
7958 		BUG_ON(mddev->layout != mddev->new_layout);
7959 		BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
7960 		BUG_ON(mddev->delta_disks != 0);
7961 	}
7962 
7963 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags) &&
7964 	    test_bit(MD_HAS_PPL, &mddev->flags)) {
7965 		pr_warn("md/raid:%s: using journal device and PPL not allowed - disabling PPL\n",
7966 			mdname(mddev));
7967 		clear_bit(MD_HAS_PPL, &mddev->flags);
7968 		clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags);
7969 	}
7970 
7971 	if (mddev->private == NULL)
7972 		conf = setup_conf(mddev);
7973 	else
7974 		conf = mddev->private;
7975 
7976 	if (IS_ERR(conf))
7977 		return PTR_ERR(conf);
7978 
7979 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
7980 		if (!journal_dev) {
7981 			pr_warn("md/raid:%s: journal disk is missing, force array readonly\n",
7982 				mdname(mddev));
7983 			mddev->ro = 1;
7984 			set_disk_ro(mddev->gendisk, 1);
7985 		} else if (mddev->resync_offset == MaxSector)
7986 			set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
7987 	}
7988 
7989 	conf->min_offset_diff = min_offset_diff;
7990 	rcu_assign_pointer(mddev->thread, conf->thread);
7991 	rcu_assign_pointer(conf->thread, NULL);
7992 	mddev->private = conf;
7993 
7994 	for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
7995 	     i++) {
7996 		rdev = conf->disks[i].rdev;
7997 		if (!rdev)
7998 			continue;
7999 		if (conf->disks[i].replacement &&
8000 		    conf->reshape_progress != MaxSector) {
8001 			/* replacements and reshape simply do not mix. */
8002 			pr_warn("md: cannot handle concurrent replacement and reshape.\n");
8003 			goto abort;
8004 		}
8005 		if (test_bit(In_sync, &rdev->flags))
8006 			continue;
8007 		/* This disc is not fully in-sync.  However if it
8008 		 * just stored parity (beyond the recovery_offset),
8009 		 * when we don't need to be concerned about the
8010 		 * array being dirty.
8011 		 * When reshape goes 'backwards', we never have
8012 		 * partially completed devices, so we only need
8013 		 * to worry about reshape going forwards.
8014 		 */
8015 		/* Hack because v0.91 doesn't store recovery_offset properly. */
8016 		if (mddev->major_version == 0 &&
8017 		    mddev->minor_version > 90)
8018 			rdev->recovery_offset = reshape_offset;
8019 
8020 		if (rdev->recovery_offset < reshape_offset) {
8021 			/* We need to check old and new layout */
8022 			if (!only_parity(rdev->raid_disk,
8023 					 conf->algorithm,
8024 					 conf->raid_disks,
8025 					 conf->max_degraded))
8026 				continue;
8027 		}
8028 		if (!only_parity(rdev->raid_disk,
8029 				 conf->prev_algo,
8030 				 conf->previous_raid_disks,
8031 				 conf->max_degraded))
8032 			continue;
8033 		dirty_parity_disks++;
8034 	}
8035 
8036 	/*
8037 	 * 0 for a fully functional array, 1 or 2 for a degraded array.
8038 	 */
8039 	mddev->degraded = raid5_calc_degraded(conf);
8040 
8041 	if (has_failed(conf)) {
8042 		pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
8043 			mdname(mddev), mddev->degraded, conf->raid_disks);
8044 		goto abort;
8045 	}
8046 
8047 	/* device size must be a multiple of chunk size */
8048 	mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1);
8049 	mddev->resync_max_sectors = mddev->dev_sectors;
8050 
8051 	if (mddev->degraded > dirty_parity_disks &&
8052 	    mddev->resync_offset != MaxSector) {
8053 		if (test_bit(MD_HAS_PPL, &mddev->flags))
8054 			pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n",
8055 				mdname(mddev));
8056 		else if (mddev->ok_start_degraded)
8057 			pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n",
8058 				mdname(mddev));
8059 		else {
8060 			pr_crit("md/raid:%s: cannot start dirty degraded array.\n",
8061 				mdname(mddev));
8062 			goto abort;
8063 		}
8064 	}
8065 
8066 	pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n",
8067 		mdname(mddev), conf->level,
8068 		mddev->raid_disks-mddev->degraded, mddev->raid_disks,
8069 		mddev->new_layout);
8070 
8071 	print_raid5_conf(conf);
8072 
8073 	if (conf->reshape_progress != MaxSector) {
8074 		conf->reshape_safe = conf->reshape_progress;
8075 		atomic_set(&conf->reshape_stripes, 0);
8076 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8077 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8078 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8079 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8080 	}
8081 
8082 	/* Ok, everything is just fine now */
8083 	if (mddev->to_remove == &raid5_attrs_group)
8084 		mddev->to_remove = NULL;
8085 	else if (mddev->kobj.sd &&
8086 	    sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
8087 		pr_warn("raid5: failed to create sysfs attributes for %s\n",
8088 			mdname(mddev));
8089 	md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
8090 
8091 	if (!mddev_is_dm(mddev)) {
8092 		ret = raid5_set_limits(mddev);
8093 		if (ret)
8094 			goto abort;
8095 	}
8096 
8097 	ret = raid5_create_ctx_pool(conf);
8098 	if (ret)
8099 		goto abort;
8100 
8101 	ret = log_init(conf, journal_dev, raid5_has_ppl(conf));
8102 	if (ret)
8103 		goto abort;
8104 
8105 	return 0;
8106 abort:
8107 	md_unregister_thread(mddev, &mddev->thread);
8108 	print_raid5_conf(conf);
8109 	free_conf(conf);
8110 	mddev->private = NULL;
8111 	pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
8112 	return ret;
8113 }
8114 
8115 static void raid5_free(struct mddev *mddev, void *priv)
8116 {
8117 	struct r5conf *conf = priv;
8118 
8119 	free_conf(conf);
8120 	mddev->to_remove = &raid5_attrs_group;
8121 }
8122 
8123 static void raid5_status(struct seq_file *seq, struct mddev *mddev)
8124 {
8125 	struct r5conf *conf = mddev->private;
8126 	int i;
8127 
8128 	lockdep_assert_held(&mddev->lock);
8129 
8130 	seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
8131 		conf->chunk_sectors / 2, mddev->layout);
8132 	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
8133 	for (i = 0; i < conf->raid_disks; i++) {
8134 		struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
8135 
8136 		seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
8137 	}
8138 	seq_printf (seq, "]");
8139 }
8140 
8141 static void print_raid5_conf(struct r5conf *conf)
8142 {
8143 	struct md_rdev *rdev;
8144 	int i;
8145 
8146 	pr_debug("RAID conf printout:\n");
8147 	if (!conf) {
8148 		pr_debug("(conf==NULL)\n");
8149 		return;
8150 	}
8151 	pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level,
8152 	       conf->raid_disks,
8153 	       conf->raid_disks - conf->mddev->degraded);
8154 
8155 	for (i = 0; i < conf->raid_disks; i++) {
8156 		rdev = conf->disks[i].rdev;
8157 		if (rdev)
8158 			pr_debug(" disk %d, o:%d, dev:%pg\n",
8159 			       i, !test_bit(Faulty, &rdev->flags),
8160 			       rdev->bdev);
8161 	}
8162 }
8163 
8164 static int raid5_spare_active(struct mddev *mddev)
8165 {
8166 	int i;
8167 	struct r5conf *conf = mddev->private;
8168 	struct md_rdev *rdev, *replacement;
8169 	int count = 0;
8170 	unsigned long flags;
8171 
8172 	for (i = 0; i < conf->raid_disks; i++) {
8173 		rdev = conf->disks[i].rdev;
8174 		replacement = conf->disks[i].replacement;
8175 		if (replacement
8176 		    && replacement->recovery_offset == MaxSector
8177 		    && !test_bit(Faulty, &replacement->flags)
8178 		    && !test_and_set_bit(In_sync, &replacement->flags)) {
8179 			/* Replacement has just become active. */
8180 			if (!rdev
8181 			    || !test_and_clear_bit(In_sync, &rdev->flags))
8182 				count++;
8183 			if (rdev) {
8184 				/* Replaced device not technically faulty,
8185 				 * but we need to be sure it gets removed
8186 				 * and never re-added.
8187 				 */
8188 				set_bit(Faulty, &rdev->flags);
8189 				sysfs_notify_dirent_safe(
8190 					rdev->sysfs_state);
8191 			}
8192 			sysfs_notify_dirent_safe(replacement->sysfs_state);
8193 		} else if (rdev
8194 		    && rdev->recovery_offset == MaxSector
8195 		    && !test_bit(Faulty, &rdev->flags)
8196 		    && !test_and_set_bit(In_sync, &rdev->flags)) {
8197 			count++;
8198 			sysfs_notify_dirent_safe(rdev->sysfs_state);
8199 		}
8200 	}
8201 	spin_lock_irqsave(&conf->device_lock, flags);
8202 	mddev->degraded = raid5_calc_degraded(conf);
8203 	spin_unlock_irqrestore(&conf->device_lock, flags);
8204 	print_raid5_conf(conf);
8205 	return count;
8206 }
8207 
8208 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
8209 {
8210 	struct r5conf *conf = mddev->private;
8211 	int err = 0;
8212 	int number = rdev->raid_disk;
8213 	struct md_rdev **rdevp;
8214 	struct disk_info *p;
8215 	struct md_rdev *tmp;
8216 
8217 	print_raid5_conf(conf);
8218 	if (test_bit(Journal, &rdev->flags) && conf->log) {
8219 		/*
8220 		 * we can't wait pending write here, as this is called in
8221 		 * raid5d, wait will deadlock.
8222 		 * neilb: there is no locking about new writes here,
8223 		 * so this cannot be safe.
8224 		 */
8225 		if (atomic_read(&conf->active_stripes) ||
8226 		    atomic_read(&conf->r5c_cached_full_stripes) ||
8227 		    atomic_read(&conf->r5c_cached_partial_stripes)) {
8228 			return -EBUSY;
8229 		}
8230 		log_exit(conf);
8231 		return 0;
8232 	}
8233 	if (unlikely(number >= conf->pool_size))
8234 		return 0;
8235 	p = conf->disks + number;
8236 	if (rdev == p->rdev)
8237 		rdevp = &p->rdev;
8238 	else if (rdev == p->replacement)
8239 		rdevp = &p->replacement;
8240 	else
8241 		return 0;
8242 
8243 	if (number >= conf->raid_disks &&
8244 	    conf->reshape_progress == MaxSector)
8245 		clear_bit(In_sync, &rdev->flags);
8246 
8247 	if (test_bit(In_sync, &rdev->flags) ||
8248 	    atomic_read(&rdev->nr_pending)) {
8249 		err = -EBUSY;
8250 		goto abort;
8251 	}
8252 	/* Only remove non-faulty devices if recovery
8253 	 * isn't possible.
8254 	 */
8255 	if (!test_bit(Faulty, &rdev->flags) &&
8256 	    !has_failed(conf) &&
8257 	    (!p->replacement || p->replacement == rdev) &&
8258 	    number < conf->raid_disks) {
8259 		err = -EBUSY;
8260 		goto abort;
8261 	}
8262 	WRITE_ONCE(*rdevp, NULL);
8263 	if (!err) {
8264 		err = log_modify(conf, rdev, false);
8265 		if (err)
8266 			goto abort;
8267 	}
8268 
8269 	tmp = p->replacement;
8270 	if (tmp) {
8271 		/* We must have just cleared 'rdev' */
8272 		WRITE_ONCE(p->rdev, tmp);
8273 		clear_bit(Replacement, &tmp->flags);
8274 		WRITE_ONCE(p->replacement, NULL);
8275 
8276 		if (!err)
8277 			err = log_modify(conf, tmp, true);
8278 	}
8279 
8280 	clear_bit(WantReplacement, &rdev->flags);
8281 abort:
8282 
8283 	print_raid5_conf(conf);
8284 	return err;
8285 }
8286 
8287 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
8288 {
8289 	struct r5conf *conf = mddev->private;
8290 	int ret, err = -EEXIST;
8291 	int disk;
8292 	struct disk_info *p;
8293 	struct md_rdev *tmp;
8294 	int first = 0;
8295 	int last = conf->raid_disks - 1;
8296 
8297 	if (test_bit(Journal, &rdev->flags)) {
8298 		if (conf->log)
8299 			return -EBUSY;
8300 
8301 		rdev->raid_disk = 0;
8302 		/*
8303 		 * The array is in readonly mode if journal is missing, so no
8304 		 * write requests running. We should be safe
8305 		 */
8306 		ret = log_init(conf, rdev, false);
8307 		if (ret)
8308 			return ret;
8309 
8310 		ret = r5l_start(conf->log);
8311 		if (ret)
8312 			return ret;
8313 
8314 		return 0;
8315 	}
8316 
8317 	if (rdev->saved_raid_disk < 0 && has_failed(conf))
8318 		/* no point adding a device */
8319 		return -EINVAL;
8320 
8321 	if (rdev->raid_disk >= 0)
8322 		first = last = rdev->raid_disk;
8323 
8324 	/*
8325 	 * find the disk ... but prefer rdev->saved_raid_disk
8326 	 * if possible.
8327 	 */
8328 	if (rdev->saved_raid_disk >= first &&
8329 	    rdev->saved_raid_disk <= last &&
8330 	    conf->disks[rdev->saved_raid_disk].rdev == NULL)
8331 		first = rdev->saved_raid_disk;
8332 
8333 	for (disk = first; disk <= last; disk++) {
8334 		p = conf->disks + disk;
8335 		if (p->rdev == NULL) {
8336 			clear_bit(In_sync, &rdev->flags);
8337 			rdev->raid_disk = disk;
8338 			if (rdev->saved_raid_disk != disk)
8339 				conf->fullsync = 1;
8340 			WRITE_ONCE(p->rdev, rdev);
8341 
8342 			err = log_modify(conf, rdev, true);
8343 
8344 			goto out;
8345 		}
8346 	}
8347 	for (disk = first; disk <= last; disk++) {
8348 		p = conf->disks + disk;
8349 		tmp = p->rdev;
8350 		if (test_bit(WantReplacement, &tmp->flags) &&
8351 		    mddev->reshape_position == MaxSector &&
8352 		    p->replacement == NULL) {
8353 			clear_bit(In_sync, &rdev->flags);
8354 			set_bit(Replacement, &rdev->flags);
8355 			rdev->raid_disk = disk;
8356 			err = 0;
8357 			conf->fullsync = 1;
8358 			WRITE_ONCE(p->replacement, rdev);
8359 			break;
8360 		}
8361 	}
8362 out:
8363 	print_raid5_conf(conf);
8364 	return err;
8365 }
8366 
8367 static int raid5_resize(struct mddev *mddev, sector_t sectors)
8368 {
8369 	/* no resync is happening, and there is enough space
8370 	 * on all devices, so we can resize.
8371 	 * We need to make sure resync covers any new space.
8372 	 * If the array is shrinking we should possibly wait until
8373 	 * any io in the removed space completes, but it hardly seems
8374 	 * worth it.
8375 	 */
8376 	sector_t newsize;
8377 	struct r5conf *conf = mddev->private;
8378 
8379 	if (raid5_has_log(conf) || raid5_has_ppl(conf))
8380 		return -EINVAL;
8381 	sectors &= ~((sector_t)conf->chunk_sectors - 1);
8382 	newsize = raid5_size(mddev, sectors, mddev->raid_disks);
8383 	if (mddev->external_size &&
8384 	    mddev->array_sectors > newsize)
8385 		return -EINVAL;
8386 
8387 	if (md_bitmap_enabled(mddev, false)) {
8388 		int ret = mddev->bitmap_ops->resize(mddev, sectors, 0);
8389 
8390 		if (ret)
8391 			return ret;
8392 	}
8393 
8394 	md_set_array_sectors(mddev, newsize);
8395 	if (sectors > mddev->dev_sectors &&
8396 	    mddev->resync_offset > mddev->dev_sectors) {
8397 		mddev->resync_offset = mddev->dev_sectors;
8398 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8399 	}
8400 	mddev->dev_sectors = sectors;
8401 	mddev->resync_max_sectors = sectors;
8402 	return 0;
8403 }
8404 
8405 static int check_stripe_cache(struct mddev *mddev)
8406 {
8407 	/* Can only proceed if there are plenty of stripe_heads.
8408 	 * We need a minimum of one full stripe,, and for sensible progress
8409 	 * it is best to have about 4 times that.
8410 	 * If we require 4 times, then the default 256 4K stripe_heads will
8411 	 * allow for chunk sizes up to 256K, which is probably OK.
8412 	 * If the chunk size is greater, user-space should request more
8413 	 * stripe_heads first.
8414 	 */
8415 	struct r5conf *conf = mddev->private;
8416 	if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
8417 	    > conf->min_nr_stripes ||
8418 	    ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
8419 	    > conf->min_nr_stripes) {
8420 		pr_warn("md/raid:%s: reshape: not enough stripes.  Needed %lu\n",
8421 			mdname(mddev),
8422 			((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
8423 			 / RAID5_STRIPE_SIZE(conf))*4);
8424 		return 0;
8425 	}
8426 	return 1;
8427 }
8428 
8429 static int check_reshape(struct mddev *mddev)
8430 {
8431 	struct r5conf *conf = mddev->private;
8432 
8433 	if (raid5_has_log(conf) || raid5_has_ppl(conf))
8434 		return -EINVAL;
8435 	if (mddev->delta_disks == 0 &&
8436 	    mddev->new_layout == mddev->layout &&
8437 	    mddev->new_chunk_sectors == mddev->chunk_sectors)
8438 		return 0; /* nothing to do */
8439 	if (has_failed(conf))
8440 		return -EINVAL;
8441 	if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) {
8442 		/* We might be able to shrink, but the devices must
8443 		 * be made bigger first.
8444 		 * For raid6, 4 is the minimum size.
8445 		 * Otherwise 2 is the minimum
8446 		 */
8447 		int min = 2;
8448 		if (mddev->level == 6)
8449 			min = 4;
8450 		if (mddev->raid_disks + mddev->delta_disks < min)
8451 			return -EINVAL;
8452 	}
8453 
8454 	if (!check_stripe_cache(mddev))
8455 		return -ENOSPC;
8456 
8457 	if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
8458 	    mddev->delta_disks > 0)
8459 		if (resize_chunks(conf,
8460 				  conf->previous_raid_disks
8461 				  + max(0, mddev->delta_disks),
8462 				  max(mddev->new_chunk_sectors,
8463 				      mddev->chunk_sectors)
8464 			    ) < 0)
8465 			return -ENOMEM;
8466 
8467 	if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size)
8468 		return 0; /* never bother to shrink */
8469 	return resize_stripes(conf, (conf->previous_raid_disks
8470 				     + mddev->delta_disks));
8471 }
8472 
8473 static int raid5_start_reshape(struct mddev *mddev)
8474 {
8475 	struct r5conf *conf = mddev->private;
8476 	struct md_rdev *rdev;
8477 	int spares = 0;
8478 	int i;
8479 	unsigned long flags;
8480 
8481 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
8482 		return -EBUSY;
8483 
8484 	if (!check_stripe_cache(mddev))
8485 		return -ENOSPC;
8486 
8487 	if (has_failed(conf))
8488 		return -EINVAL;
8489 
8490 	/* raid5 can't handle concurrent reshape and recovery */
8491 	if (mddev->resync_offset < MaxSector)
8492 		return -EBUSY;
8493 	for (i = 0; i < conf->raid_disks; i++)
8494 		if (conf->disks[i].replacement)
8495 			return -EBUSY;
8496 
8497 	rdev_for_each(rdev, mddev) {
8498 		if (!test_bit(In_sync, &rdev->flags)
8499 		    && !test_bit(Faulty, &rdev->flags))
8500 			spares++;
8501 	}
8502 
8503 	if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
8504 		/* Not enough devices even to make a degraded array
8505 		 * of that size
8506 		 */
8507 		return -EINVAL;
8508 
8509 	/* Refuse to reduce size of the array.  Any reductions in
8510 	 * array size must be through explicit setting of array_size
8511 	 * attribute.
8512 	 */
8513 	if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
8514 	    < mddev->array_sectors) {
8515 		pr_warn("md/raid:%s: array size must be reduced before number of disks\n",
8516 			mdname(mddev));
8517 		return -EINVAL;
8518 	}
8519 
8520 	atomic_set(&conf->reshape_stripes, 0);
8521 	spin_lock_irq(&conf->device_lock);
8522 	write_seqcount_begin(&conf->gen_lock);
8523 	conf->previous_raid_disks = conf->raid_disks;
8524 	conf->raid_disks += mddev->delta_disks;
8525 	conf->prev_chunk_sectors = conf->chunk_sectors;
8526 	conf->chunk_sectors = mddev->new_chunk_sectors;
8527 	conf->prev_algo = conf->algorithm;
8528 	conf->algorithm = mddev->new_layout;
8529 	conf->generation++;
8530 	/* Code that selects data_offset needs to see the generation update
8531 	 * if reshape_progress has been set - so a memory barrier needed.
8532 	 */
8533 	smp_mb();
8534 	if (mddev->reshape_backwards)
8535 		conf->reshape_progress = raid5_size(mddev, 0, 0);
8536 	else
8537 		conf->reshape_progress = 0;
8538 	conf->reshape_safe = conf->reshape_progress;
8539 	write_seqcount_end(&conf->gen_lock);
8540 	spin_unlock_irq(&conf->device_lock);
8541 
8542 	/* Now make sure any requests that proceeded on the assumption
8543 	 * the reshape wasn't running - like Discard or Read - have
8544 	 * completed.
8545 	 */
8546 	raid5_quiesce(mddev, true);
8547 	raid5_quiesce(mddev, false);
8548 
8549 	/* Add some new drives, as many as will fit.
8550 	 * We know there are enough to make the newly sized array work.
8551 	 * Don't add devices if we are reducing the number of
8552 	 * devices in the array.  This is because it is not possible
8553 	 * to correctly record the "partially reconstructed" state of
8554 	 * such devices during the reshape and confusion could result.
8555 	 */
8556 	if (mddev->delta_disks >= 0) {
8557 		rdev_for_each(rdev, mddev)
8558 			if (rdev->raid_disk < 0 &&
8559 			    !test_bit(Faulty, &rdev->flags)) {
8560 				if (raid5_add_disk(mddev, rdev) == 0) {
8561 					if (rdev->raid_disk
8562 					    >= conf->previous_raid_disks)
8563 						set_bit(In_sync, &rdev->flags);
8564 					else
8565 						rdev->recovery_offset = 0;
8566 
8567 					/* Failure here is OK */
8568 					sysfs_link_rdev(mddev, rdev);
8569 				}
8570 			} else if (rdev->raid_disk >= conf->previous_raid_disks
8571 				   && !test_bit(Faulty, &rdev->flags)) {
8572 				/* This is a spare that was manually added */
8573 				set_bit(In_sync, &rdev->flags);
8574 			}
8575 
8576 		/* When a reshape changes the number of devices,
8577 		 * ->degraded is measured against the larger of the
8578 		 * pre and post number of devices.
8579 		 */
8580 		spin_lock_irqsave(&conf->device_lock, flags);
8581 		mddev->degraded = raid5_calc_degraded(conf);
8582 		spin_unlock_irqrestore(&conf->device_lock, flags);
8583 	}
8584 	mddev->raid_disks = conf->raid_disks;
8585 	mddev->reshape_position = conf->reshape_progress;
8586 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8587 
8588 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8589 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8590 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8591 	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8592 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8593 	conf->reshape_checkpoint = jiffies;
8594 	md_new_event();
8595 	return 0;
8596 }
8597 
8598 /* This is called from the reshape thread and should make any
8599  * changes needed in 'conf'
8600  */
8601 static void end_reshape(struct r5conf *conf)
8602 {
8603 
8604 	if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
8605 		struct md_rdev *rdev;
8606 
8607 		spin_lock_irq(&conf->device_lock);
8608 		conf->previous_raid_disks = conf->raid_disks;
8609 		md_finish_reshape(conf->mddev);
8610 		smp_wmb();
8611 		conf->reshape_progress = MaxSector;
8612 		conf->mddev->reshape_position = MaxSector;
8613 		rdev_for_each(rdev, conf->mddev)
8614 			if (rdev->raid_disk >= 0 &&
8615 			    !test_bit(Journal, &rdev->flags) &&
8616 			    !test_bit(In_sync, &rdev->flags))
8617 				rdev->recovery_offset = MaxSector;
8618 		spin_unlock_irq(&conf->device_lock);
8619 		wake_up(&conf->wait_for_reshape);
8620 
8621 		mddev_update_io_opt(conf->mddev,
8622 			conf->raid_disks - conf->max_degraded);
8623 	}
8624 }
8625 
8626 /* This is called from the raid5d thread with mddev_lock held.
8627  * It makes config changes to the device.
8628  */
8629 static void raid5_finish_reshape(struct mddev *mddev)
8630 {
8631 	struct r5conf *conf = mddev->private;
8632 	struct md_rdev *rdev;
8633 
8634 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8635 
8636 		if (mddev->delta_disks <= 0) {
8637 			int d;
8638 			spin_lock_irq(&conf->device_lock);
8639 			mddev->degraded = raid5_calc_degraded(conf);
8640 			spin_unlock_irq(&conf->device_lock);
8641 			for (d = conf->raid_disks ;
8642 			     d < conf->raid_disks - mddev->delta_disks;
8643 			     d++) {
8644 				rdev = conf->disks[d].rdev;
8645 				if (rdev)
8646 					clear_bit(In_sync, &rdev->flags);
8647 				rdev = conf->disks[d].replacement;
8648 				if (rdev)
8649 					clear_bit(In_sync, &rdev->flags);
8650 			}
8651 		}
8652 		mddev->layout = conf->algorithm;
8653 		mddev->chunk_sectors = conf->chunk_sectors;
8654 		mddev->reshape_position = MaxSector;
8655 		mddev->delta_disks = 0;
8656 		mddev->reshape_backwards = 0;
8657 	}
8658 }
8659 
8660 static void raid5_quiesce(struct mddev *mddev, int quiesce)
8661 {
8662 	struct r5conf *conf = mddev->private;
8663 
8664 	if (quiesce) {
8665 		/* stop all writes */
8666 		lock_all_device_hash_locks_irq(conf);
8667 		/* '2' tells resync/reshape to pause so that all
8668 		 * active stripes can drain
8669 		 */
8670 		r5c_flush_cache(conf, INT_MAX);
8671 		/* need a memory barrier to make sure read_one_chunk() sees
8672 		 * quiesce started and reverts to slow (locked) path.
8673 		 */
8674 		smp_store_release(&conf->quiesce, 2);
8675 		wait_event_cmd(conf->wait_for_quiescent,
8676 				    atomic_read(&conf->active_stripes) == 0 &&
8677 				    atomic_read(&conf->active_aligned_reads) == 0,
8678 				    unlock_all_device_hash_locks_irq(conf),
8679 				    lock_all_device_hash_locks_irq(conf));
8680 		conf->quiesce = 1;
8681 		unlock_all_device_hash_locks_irq(conf);
8682 		/* allow reshape to continue */
8683 		wake_up(&conf->wait_for_reshape);
8684 	} else {
8685 		/* re-enable writes */
8686 		lock_all_device_hash_locks_irq(conf);
8687 		conf->quiesce = 0;
8688 		wake_up(&conf->wait_for_quiescent);
8689 		wake_up(&conf->wait_for_reshape);
8690 		unlock_all_device_hash_locks_irq(conf);
8691 	}
8692 	log_quiesce(conf, quiesce);
8693 }
8694 
8695 static void *raid45_takeover_raid0(struct mddev *mddev, int level)
8696 {
8697 	struct r0conf *raid0_conf = mddev->private;
8698 	sector_t sectors;
8699 
8700 	/* for raid0 takeover only one zone is supported */
8701 	if (raid0_conf->nr_strip_zones > 1) {
8702 		pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n",
8703 			mdname(mddev));
8704 		return ERR_PTR(-EINVAL);
8705 	}
8706 
8707 	sectors = raid0_conf->strip_zone[0].zone_end;
8708 	sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
8709 	mddev->dev_sectors = sectors;
8710 	mddev->new_level = level;
8711 	mddev->new_layout = ALGORITHM_PARITY_N;
8712 	mddev->new_chunk_sectors = mddev->chunk_sectors;
8713 	mddev->raid_disks += 1;
8714 	mddev->delta_disks = 1;
8715 	/* make sure it will be not marked as dirty */
8716 	mddev->resync_offset = MaxSector;
8717 
8718 	return setup_conf(mddev);
8719 }
8720 
8721 static void *raid5_takeover_raid1(struct mddev *mddev)
8722 {
8723 	int chunksect;
8724 	void *ret;
8725 
8726 	if (mddev->raid_disks != 2 ||
8727 	    mddev->degraded > 1)
8728 		return ERR_PTR(-EINVAL);
8729 
8730 	/* Should check if there are write-behind devices? */
8731 
8732 	chunksect = 64*2; /* 64K by default */
8733 
8734 	/* The array must be an exact multiple of chunksize */
8735 	while (chunksect && (mddev->array_sectors & (chunksect-1)))
8736 		chunksect >>= 1;
8737 
8738 	if ((chunksect<<9) < RAID5_STRIPE_SIZE((struct r5conf *)mddev->private))
8739 		/* array size does not allow a suitable chunk size */
8740 		return ERR_PTR(-EINVAL);
8741 
8742 	mddev->new_level = 5;
8743 	mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
8744 	mddev->new_chunk_sectors = chunksect;
8745 
8746 	ret = setup_conf(mddev);
8747 	if (!IS_ERR(ret))
8748 		mddev_clear_unsupported_flags(mddev,
8749 			UNSUPPORTED_MDDEV_FLAGS);
8750 	return ret;
8751 }
8752 
8753 static void *raid5_takeover_raid6(struct mddev *mddev)
8754 {
8755 	int new_layout;
8756 
8757 	switch (mddev->layout) {
8758 	case ALGORITHM_LEFT_ASYMMETRIC_6:
8759 		new_layout = ALGORITHM_LEFT_ASYMMETRIC;
8760 		break;
8761 	case ALGORITHM_RIGHT_ASYMMETRIC_6:
8762 		new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
8763 		break;
8764 	case ALGORITHM_LEFT_SYMMETRIC_6:
8765 		new_layout = ALGORITHM_LEFT_SYMMETRIC;
8766 		break;
8767 	case ALGORITHM_RIGHT_SYMMETRIC_6:
8768 		new_layout = ALGORITHM_RIGHT_SYMMETRIC;
8769 		break;
8770 	case ALGORITHM_PARITY_0_6:
8771 		new_layout = ALGORITHM_PARITY_0;
8772 		break;
8773 	case ALGORITHM_PARITY_N:
8774 		new_layout = ALGORITHM_PARITY_N;
8775 		break;
8776 	default:
8777 		return ERR_PTR(-EINVAL);
8778 	}
8779 	mddev->new_level = 5;
8780 	mddev->new_layout = new_layout;
8781 	mddev->delta_disks = -1;
8782 	mddev->raid_disks -= 1;
8783 	return setup_conf(mddev);
8784 }
8785 
8786 static int raid5_check_reshape(struct mddev *mddev)
8787 {
8788 	/* For a 2-drive array, the layout and chunk size can be changed
8789 	 * immediately as not restriping is needed.
8790 	 * For larger arrays we record the new value - after validation
8791 	 * to be used by a reshape pass.
8792 	 */
8793 	struct r5conf *conf = mddev->private;
8794 	int new_chunk = mddev->new_chunk_sectors;
8795 
8796 	if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
8797 		return -EINVAL;
8798 	if (new_chunk > 0) {
8799 		if (!is_power_of_2(new_chunk))
8800 			return -EINVAL;
8801 		if (new_chunk < (PAGE_SIZE>>9))
8802 			return -EINVAL;
8803 		if (mddev->array_sectors & (new_chunk-1))
8804 			/* not factor of array size */
8805 			return -EINVAL;
8806 	}
8807 
8808 	/* They look valid */
8809 
8810 	if (mddev->raid_disks == 2) {
8811 		/* can make the change immediately */
8812 		if (mddev->new_layout >= 0) {
8813 			conf->algorithm = mddev->new_layout;
8814 			mddev->layout = mddev->new_layout;
8815 		}
8816 		if (new_chunk > 0) {
8817 			conf->chunk_sectors = new_chunk ;
8818 			mddev->chunk_sectors = new_chunk;
8819 		}
8820 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8821 		md_wakeup_thread(mddev->thread);
8822 	}
8823 	return check_reshape(mddev);
8824 }
8825 
8826 static int raid6_check_reshape(struct mddev *mddev)
8827 {
8828 	int new_chunk = mddev->new_chunk_sectors;
8829 
8830 	if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
8831 		return -EINVAL;
8832 	if (new_chunk > 0) {
8833 		if (!is_power_of_2(new_chunk))
8834 			return -EINVAL;
8835 		if (new_chunk < (PAGE_SIZE >> 9))
8836 			return -EINVAL;
8837 		if (mddev->array_sectors & (new_chunk-1))
8838 			/* not factor of array size */
8839 			return -EINVAL;
8840 	}
8841 
8842 	/* They look valid */
8843 	return check_reshape(mddev);
8844 }
8845 
8846 static void *raid5_takeover(struct mddev *mddev)
8847 {
8848 	/* raid5 can take over:
8849 	 *  raid0 - if there is only one strip zone - make it a raid4 layout
8850 	 *  raid1 - if there are two drives.  We need to know the chunk size
8851 	 *  raid4 - trivial - just use a raid4 layout.
8852 	 *  raid6 - Providing it is a *_6 layout
8853 	 */
8854 	if (mddev->level == 0)
8855 		return raid45_takeover_raid0(mddev, 5);
8856 	if (mddev->level == 1)
8857 		return raid5_takeover_raid1(mddev);
8858 	if (mddev->level == 4) {
8859 		mddev->new_layout = ALGORITHM_PARITY_N;
8860 		mddev->new_level = 5;
8861 		return setup_conf(mddev);
8862 	}
8863 	if (mddev->level == 6)
8864 		return raid5_takeover_raid6(mddev);
8865 
8866 	return ERR_PTR(-EINVAL);
8867 }
8868 
8869 static void *raid4_takeover(struct mddev *mddev)
8870 {
8871 	/* raid4 can take over:
8872 	 *  raid0 - if there is only one strip zone
8873 	 *  raid5 - if layout is right
8874 	 */
8875 	if (mddev->level == 0)
8876 		return raid45_takeover_raid0(mddev, 4);
8877 	if (mddev->level == 5 &&
8878 	    mddev->layout == ALGORITHM_PARITY_N) {
8879 		mddev->new_layout = 0;
8880 		mddev->new_level = 4;
8881 		return setup_conf(mddev);
8882 	}
8883 	return ERR_PTR(-EINVAL);
8884 }
8885 
8886 static struct md_personality raid5_personality;
8887 
8888 static void *raid6_takeover(struct mddev *mddev)
8889 {
8890 	/* Currently can only take over a raid5.  We map the
8891 	 * personality to an equivalent raid6 personality
8892 	 * with the Q block at the end.
8893 	 */
8894 	int new_layout;
8895 
8896 	if (mddev->pers != &raid5_personality)
8897 		return ERR_PTR(-EINVAL);
8898 	if (mddev->degraded > 1)
8899 		return ERR_PTR(-EINVAL);
8900 	if (mddev->raid_disks > 253)
8901 		return ERR_PTR(-EINVAL);
8902 	if (mddev->raid_disks < 3)
8903 		return ERR_PTR(-EINVAL);
8904 
8905 	switch (mddev->layout) {
8906 	case ALGORITHM_LEFT_ASYMMETRIC:
8907 		new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
8908 		break;
8909 	case ALGORITHM_RIGHT_ASYMMETRIC:
8910 		new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
8911 		break;
8912 	case ALGORITHM_LEFT_SYMMETRIC:
8913 		new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
8914 		break;
8915 	case ALGORITHM_RIGHT_SYMMETRIC:
8916 		new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
8917 		break;
8918 	case ALGORITHM_PARITY_0:
8919 		new_layout = ALGORITHM_PARITY_0_6;
8920 		break;
8921 	case ALGORITHM_PARITY_N:
8922 		new_layout = ALGORITHM_PARITY_N;
8923 		break;
8924 	default:
8925 		return ERR_PTR(-EINVAL);
8926 	}
8927 	mddev->new_level = 6;
8928 	mddev->new_layout = new_layout;
8929 	mddev->delta_disks = 1;
8930 	mddev->raid_disks += 1;
8931 	return setup_conf(mddev);
8932 }
8933 
8934 static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
8935 {
8936 	struct r5conf *conf;
8937 	int err;
8938 
8939 	err = mddev_suspend_and_lock(mddev);
8940 	if (err)
8941 		return err;
8942 	conf = mddev->private;
8943 	if (!conf) {
8944 		mddev_unlock_and_resume(mddev);
8945 		return -ENODEV;
8946 	}
8947 
8948 	if (strncmp(buf, "ppl", 3) == 0) {
8949 		/* ppl only works with RAID 5 */
8950 		if (!raid5_has_ppl(conf) && conf->level == 5) {
8951 			err = log_init(conf, NULL, true);
8952 			if (!err) {
8953 				err = resize_stripes(conf, conf->pool_size);
8954 				if (err)
8955 					log_exit(conf);
8956 			}
8957 		} else
8958 			err = -EINVAL;
8959 	} else if (strncmp(buf, "resync", 6) == 0) {
8960 		if (raid5_has_ppl(conf)) {
8961 			log_exit(conf);
8962 			err = resize_stripes(conf, conf->pool_size);
8963 		} else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) &&
8964 			   r5l_log_disk_error(conf)) {
8965 			bool journal_dev_exists = false;
8966 			struct md_rdev *rdev;
8967 
8968 			rdev_for_each(rdev, mddev)
8969 				if (test_bit(Journal, &rdev->flags)) {
8970 					journal_dev_exists = true;
8971 					break;
8972 				}
8973 
8974 			if (!journal_dev_exists)
8975 				clear_bit(MD_HAS_JOURNAL, &mddev->flags);
8976 			else  /* need remove journal device first */
8977 				err = -EBUSY;
8978 		} else
8979 			err = -EINVAL;
8980 	} else {
8981 		err = -EINVAL;
8982 	}
8983 
8984 	if (!err)
8985 		md_update_sb(mddev, 1);
8986 
8987 	mddev_unlock_and_resume(mddev);
8988 
8989 	return err;
8990 }
8991 
8992 static int raid5_start(struct mddev *mddev)
8993 {
8994 	struct r5conf *conf = mddev->private;
8995 
8996 	return r5l_start(conf->log);
8997 }
8998 
8999 /*
9000  * This is only used for dm-raid456, caller already frozen sync_thread, hence
9001  * if rehsape is still in progress, io that is waiting for reshape can never be
9002  * done now, hence wake up and handle those IO.
9003  */
9004 static void raid5_prepare_suspend(struct mddev *mddev)
9005 {
9006 	struct r5conf *conf = mddev->private;
9007 
9008 	wake_up(&conf->wait_for_reshape);
9009 }
9010 
9011 static struct md_personality raid6_personality =
9012 {
9013 	.head = {
9014 		.type	= MD_PERSONALITY,
9015 		.id	= ID_RAID6,
9016 		.name	= "raid6",
9017 		.owner	= THIS_MODULE,
9018 	},
9019 
9020 	.make_request	= raid5_make_request,
9021 	.run		= raid5_run,
9022 	.start		= raid5_start,
9023 	.free		= raid5_free,
9024 	.status		= raid5_status,
9025 	.error_handler	= raid5_error,
9026 	.hot_add_disk	= raid5_add_disk,
9027 	.hot_remove_disk= raid5_remove_disk,
9028 	.spare_active	= raid5_spare_active,
9029 	.sync_request	= raid5_sync_request,
9030 	.resize		= raid5_resize,
9031 	.size		= raid5_size,
9032 	.check_reshape	= raid6_check_reshape,
9033 	.start_reshape  = raid5_start_reshape,
9034 	.finish_reshape = raid5_finish_reshape,
9035 	.quiesce	= raid5_quiesce,
9036 	.takeover	= raid6_takeover,
9037 	.change_consistency_policy = raid5_change_consistency_policy,
9038 	.prepare_suspend = raid5_prepare_suspend,
9039 	.bitmap_sector	= raid5_bitmap_sector,
9040 };
9041 static struct md_personality raid5_personality =
9042 {
9043 	.head = {
9044 		.type	= MD_PERSONALITY,
9045 		.id	= ID_RAID5,
9046 		.name	= "raid5",
9047 		.owner	= THIS_MODULE,
9048 	},
9049 
9050 	.make_request	= raid5_make_request,
9051 	.run		= raid5_run,
9052 	.start		= raid5_start,
9053 	.free		= raid5_free,
9054 	.status		= raid5_status,
9055 	.error_handler	= raid5_error,
9056 	.hot_add_disk	= raid5_add_disk,
9057 	.hot_remove_disk= raid5_remove_disk,
9058 	.spare_active	= raid5_spare_active,
9059 	.sync_request	= raid5_sync_request,
9060 	.resize		= raid5_resize,
9061 	.size		= raid5_size,
9062 	.check_reshape	= raid5_check_reshape,
9063 	.start_reshape  = raid5_start_reshape,
9064 	.finish_reshape = raid5_finish_reshape,
9065 	.quiesce	= raid5_quiesce,
9066 	.takeover	= raid5_takeover,
9067 	.change_consistency_policy = raid5_change_consistency_policy,
9068 	.prepare_suspend = raid5_prepare_suspend,
9069 	.bitmap_sector	= raid5_bitmap_sector,
9070 };
9071 
9072 static struct md_personality raid4_personality =
9073 {
9074 	.head = {
9075 		.type	= MD_PERSONALITY,
9076 		.id	= ID_RAID4,
9077 		.name	= "raid4",
9078 		.owner	= THIS_MODULE,
9079 	},
9080 
9081 	.make_request	= raid5_make_request,
9082 	.run		= raid5_run,
9083 	.start		= raid5_start,
9084 	.free		= raid5_free,
9085 	.status		= raid5_status,
9086 	.error_handler	= raid5_error,
9087 	.hot_add_disk	= raid5_add_disk,
9088 	.hot_remove_disk= raid5_remove_disk,
9089 	.spare_active	= raid5_spare_active,
9090 	.sync_request	= raid5_sync_request,
9091 	.resize		= raid5_resize,
9092 	.size		= raid5_size,
9093 	.check_reshape	= raid5_check_reshape,
9094 	.start_reshape  = raid5_start_reshape,
9095 	.finish_reshape = raid5_finish_reshape,
9096 	.quiesce	= raid5_quiesce,
9097 	.takeover	= raid4_takeover,
9098 	.change_consistency_policy = raid5_change_consistency_policy,
9099 	.prepare_suspend = raid5_prepare_suspend,
9100 	.bitmap_sector	= raid5_bitmap_sector,
9101 };
9102 
9103 static int __init raid5_init(void)
9104 {
9105 	int ret;
9106 
9107 	raid5_wq = alloc_workqueue("raid5wq",
9108 		WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_SYSFS, 0);
9109 	if (!raid5_wq)
9110 		return -ENOMEM;
9111 
9112 	ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE,
9113 				      "md/raid5:prepare",
9114 				      raid456_cpu_up_prepare,
9115 				      raid456_cpu_dead);
9116 	if (ret)
9117 		goto err_destroy_wq;
9118 
9119 	ret = register_md_submodule(&raid6_personality.head);
9120 	if (ret)
9121 		goto err_cpuhp_remove;
9122 
9123 	ret = register_md_submodule(&raid5_personality.head);
9124 	if (ret)
9125 		goto err_unregister_raid6;
9126 
9127 	ret = register_md_submodule(&raid4_personality.head);
9128 	if (ret)
9129 		goto err_unregister_raid5;
9130 
9131 	return 0;
9132 
9133 err_unregister_raid5:
9134 	unregister_md_submodule(&raid5_personality.head);
9135 err_unregister_raid6:
9136 	unregister_md_submodule(&raid6_personality.head);
9137 err_cpuhp_remove:
9138 	cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
9139 err_destroy_wq:
9140 	destroy_workqueue(raid5_wq);
9141 	return ret;
9142 }
9143 
9144 static void __exit raid5_exit(void)
9145 {
9146 	unregister_md_submodule(&raid6_personality.head);
9147 	unregister_md_submodule(&raid5_personality.head);
9148 	unregister_md_submodule(&raid4_personality.head);
9149 	cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
9150 	destroy_workqueue(raid5_wq);
9151 }
9152 
9153 module_init(raid5_init);
9154 module_exit(raid5_exit);
9155 MODULE_LICENSE("GPL");
9156 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
9157 MODULE_ALIAS("md-personality-4"); /* RAID5 */
9158 MODULE_ALIAS("md-raid5");
9159 MODULE_ALIAS("md-raid4");
9160 MODULE_ALIAS("md-level-5");
9161 MODULE_ALIAS("md-level-4");
9162 MODULE_ALIAS("md-personality-8"); /* RAID6 */
9163 MODULE_ALIAS("md-raid6");
9164 MODULE_ALIAS("md-level-6");
9165 
9166 /* This used to be two separate modules, they were: */
9167 MODULE_ALIAS("raid5");
9168 MODULE_ALIAS("raid6");
9169