xref: /linux/drivers/md/md.c (revision 9d77eb52778499a97cab662aa96de4e2e4fa72d3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    md.c : Multiple Devices driver for Linux
4      Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 
6      completely rewritten, based on the MD driver code from Marc Zyngier
7 
8    Changes:
9 
10    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14    - kmod support by: Cyrus Durgin
15    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 
18    - lots of fixes and improvements to the RAID1/RAID5 and generic
19      RAID code (such as request based resynchronization):
20 
21      Neil Brown <neilb@cse.unsw.edu.au>.
22 
23    - persistent bitmap code
24      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 
26 
27    Errors, Warnings, etc.
28    Please use:
29      pr_crit() for error conditions that risk data loss
30      pr_err() for error conditions that are unexpected, like an IO error
31          or internal inconsistency
32      pr_warn() for error conditions that could have been predicated, like
33          adding a device to an array when it has incompatible metadata
34      pr_info() for every interesting, very rare events, like an array starting
35          or stopping, or resync starting or stopping
36      pr_debug() for everything else.
37 
38 */
39 
40 #include <linux/sched/mm.h>
41 #include <linux/sched/signal.h>
42 #include <linux/kthread.h>
43 #include <linux/blkdev.h>
44 #include <linux/blk-integrity.h>
45 #include <linux/badblocks.h>
46 #include <linux/sysctl.h>
47 #include <linux/seq_file.h>
48 #include <linux/fs.h>
49 #include <linux/poll.h>
50 #include <linux/ctype.h>
51 #include <linux/string.h>
52 #include <linux/hdreg.h>
53 #include <linux/proc_fs.h>
54 #include <linux/random.h>
55 #include <linux/major.h>
56 #include <linux/module.h>
57 #include <linux/reboot.h>
58 #include <linux/file.h>
59 #include <linux/compat.h>
60 #include <linux/delay.h>
61 #include <linux/raid/md_p.h>
62 #include <linux/raid/md_u.h>
63 #include <linux/raid/detect.h>
64 #include <linux/slab.h>
65 #include <linux/percpu-refcount.h>
66 #include <linux/part_stat.h>
67 
68 #include <trace/events/block.h>
69 #include "md.h"
70 #include "md-bitmap.h"
71 #include "md-cluster.h"
72 
73 /* pers_list is a list of registered personalities protected by pers_lock. */
74 static LIST_HEAD(pers_list);
75 static DEFINE_SPINLOCK(pers_lock);
76 
77 static const struct kobj_type md_ktype;
78 
79 struct md_cluster_operations *md_cluster_ops;
80 EXPORT_SYMBOL(md_cluster_ops);
81 static struct module *md_cluster_mod;
82 
83 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
84 static struct workqueue_struct *md_wq;
85 static struct workqueue_struct *md_misc_wq;
86 struct workqueue_struct *md_bitmap_wq;
87 
88 static int remove_and_add_spares(struct mddev *mddev,
89 				 struct md_rdev *this);
90 static void mddev_detach(struct mddev *mddev);
91 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
92 static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
93 
94 /*
95  * Default number of read corrections we'll attempt on an rdev
96  * before ejecting it from the array. We divide the read error
97  * count by 2 for every hour elapsed between read errors.
98  */
99 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
100 /* Default safemode delay: 200 msec */
101 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
102 /*
103  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104  * is 1000 KB/sec, so the extra system load does not show up that much.
105  * Increase it if you want to have more _guaranteed_ speed. Note that
106  * the RAID driver will use the maximum available bandwidth if the IO
107  * subsystem is idle. There is also an 'absolute maximum' reconstruction
108  * speed limit - in case reconstruction slows down your system despite
109  * idle IO detection.
110  *
111  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
112  * or /sys/block/mdX/md/sync_speed_{min,max}
113  */
114 
115 static int sysctl_speed_limit_min = 1000;
116 static int sysctl_speed_limit_max = 200000;
117 static inline int speed_min(struct mddev *mddev)
118 {
119 	return mddev->sync_speed_min ?
120 		mddev->sync_speed_min : sysctl_speed_limit_min;
121 }
122 
123 static inline int speed_max(struct mddev *mddev)
124 {
125 	return mddev->sync_speed_max ?
126 		mddev->sync_speed_max : sysctl_speed_limit_max;
127 }
128 
129 static void rdev_uninit_serial(struct md_rdev *rdev)
130 {
131 	if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
132 		return;
133 
134 	kvfree(rdev->serial);
135 	rdev->serial = NULL;
136 }
137 
138 static void rdevs_uninit_serial(struct mddev *mddev)
139 {
140 	struct md_rdev *rdev;
141 
142 	rdev_for_each(rdev, mddev)
143 		rdev_uninit_serial(rdev);
144 }
145 
146 static int rdev_init_serial(struct md_rdev *rdev)
147 {
148 	/* serial_nums equals with BARRIER_BUCKETS_NR */
149 	int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
150 	struct serial_in_rdev *serial = NULL;
151 
152 	if (test_bit(CollisionCheck, &rdev->flags))
153 		return 0;
154 
155 	serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
156 			  GFP_KERNEL);
157 	if (!serial)
158 		return -ENOMEM;
159 
160 	for (i = 0; i < serial_nums; i++) {
161 		struct serial_in_rdev *serial_tmp = &serial[i];
162 
163 		spin_lock_init(&serial_tmp->serial_lock);
164 		serial_tmp->serial_rb = RB_ROOT_CACHED;
165 		init_waitqueue_head(&serial_tmp->serial_io_wait);
166 	}
167 
168 	rdev->serial = serial;
169 	set_bit(CollisionCheck, &rdev->flags);
170 
171 	return 0;
172 }
173 
174 static int rdevs_init_serial(struct mddev *mddev)
175 {
176 	struct md_rdev *rdev;
177 	int ret = 0;
178 
179 	rdev_for_each(rdev, mddev) {
180 		ret = rdev_init_serial(rdev);
181 		if (ret)
182 			break;
183 	}
184 
185 	/* Free all resources if pool is not existed */
186 	if (ret && !mddev->serial_info_pool)
187 		rdevs_uninit_serial(mddev);
188 
189 	return ret;
190 }
191 
192 /*
193  * rdev needs to enable serial stuffs if it meets the conditions:
194  * 1. it is multi-queue device flaged with writemostly.
195  * 2. the write-behind mode is enabled.
196  */
197 static int rdev_need_serial(struct md_rdev *rdev)
198 {
199 	return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
200 		rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
201 		test_bit(WriteMostly, &rdev->flags));
202 }
203 
204 /*
205  * Init resource for rdev(s), then create serial_info_pool if:
206  * 1. rdev is the first device which return true from rdev_enable_serial.
207  * 2. rdev is NULL, means we want to enable serialization for all rdevs.
208  */
209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
210 			      bool is_suspend)
211 {
212 	int ret = 0;
213 
214 	if (rdev && !rdev_need_serial(rdev) &&
215 	    !test_bit(CollisionCheck, &rdev->flags))
216 		return;
217 
218 	if (!is_suspend)
219 		mddev_suspend(mddev);
220 
221 	if (!rdev)
222 		ret = rdevs_init_serial(mddev);
223 	else
224 		ret = rdev_init_serial(rdev);
225 	if (ret)
226 		goto abort;
227 
228 	if (mddev->serial_info_pool == NULL) {
229 		/*
230 		 * already in memalloc noio context by
231 		 * mddev_suspend()
232 		 */
233 		mddev->serial_info_pool =
234 			mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
235 						sizeof(struct serial_info));
236 		if (!mddev->serial_info_pool) {
237 			rdevs_uninit_serial(mddev);
238 			pr_err("can't alloc memory pool for serialization\n");
239 		}
240 	}
241 
242 abort:
243 	if (!is_suspend)
244 		mddev_resume(mddev);
245 }
246 
247 /*
248  * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249  * 1. rdev is the last device flaged with CollisionCheck.
250  * 2. when bitmap is destroyed while policy is not enabled.
251  * 3. for disable policy, the pool is destroyed only when no rdev needs it.
252  */
253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
254 			       bool is_suspend)
255 {
256 	if (rdev && !test_bit(CollisionCheck, &rdev->flags))
257 		return;
258 
259 	if (mddev->serial_info_pool) {
260 		struct md_rdev *temp;
261 		int num = 0; /* used to track if other rdevs need the pool */
262 
263 		if (!is_suspend)
264 			mddev_suspend(mddev);
265 		rdev_for_each(temp, mddev) {
266 			if (!rdev) {
267 				if (!mddev->serialize_policy ||
268 				    !rdev_need_serial(temp))
269 					rdev_uninit_serial(temp);
270 				else
271 					num++;
272 			} else if (temp != rdev &&
273 				   test_bit(CollisionCheck, &temp->flags))
274 				num++;
275 		}
276 
277 		if (rdev)
278 			rdev_uninit_serial(rdev);
279 
280 		if (num)
281 			pr_info("The mempool could be used by other devices\n");
282 		else {
283 			mempool_destroy(mddev->serial_info_pool);
284 			mddev->serial_info_pool = NULL;
285 		}
286 		if (!is_suspend)
287 			mddev_resume(mddev);
288 	}
289 }
290 
291 static struct ctl_table_header *raid_table_header;
292 
293 static struct ctl_table raid_table[] = {
294 	{
295 		.procname	= "speed_limit_min",
296 		.data		= &sysctl_speed_limit_min,
297 		.maxlen		= sizeof(int),
298 		.mode		= S_IRUGO|S_IWUSR,
299 		.proc_handler	= proc_dointvec,
300 	},
301 	{
302 		.procname	= "speed_limit_max",
303 		.data		= &sysctl_speed_limit_max,
304 		.maxlen		= sizeof(int),
305 		.mode		= S_IRUGO|S_IWUSR,
306 		.proc_handler	= proc_dointvec,
307 	},
308 	{ }
309 };
310 
311 static int start_readonly;
312 
313 /*
314  * The original mechanism for creating an md device is to create
315  * a device node in /dev and to open it.  This causes races with device-close.
316  * The preferred method is to write to the "new_array" module parameter.
317  * This can avoid races.
318  * Setting create_on_open to false disables the original mechanism
319  * so all the races disappear.
320  */
321 static bool create_on_open = true;
322 
323 /*
324  * We have a system wide 'event count' that is incremented
325  * on any 'interesting' event, and readers of /proc/mdstat
326  * can use 'poll' or 'select' to find out when the event
327  * count increases.
328  *
329  * Events are:
330  *  start array, stop array, error, add device, remove device,
331  *  start build, activate spare
332  */
333 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
334 static atomic_t md_event_count;
335 void md_new_event(void)
336 {
337 	atomic_inc(&md_event_count);
338 	wake_up(&md_event_waiters);
339 }
340 EXPORT_SYMBOL_GPL(md_new_event);
341 
342 /*
343  * Enables to iterate over all existing md arrays
344  * all_mddevs_lock protects this list.
345  */
346 static LIST_HEAD(all_mddevs);
347 static DEFINE_SPINLOCK(all_mddevs_lock);
348 
349 /* Rather than calling directly into the personality make_request function,
350  * IO requests come here first so that we can check if the device is
351  * being suspended pending a reconfiguration.
352  * We hold a refcount over the call to ->make_request.  By the time that
353  * call has finished, the bio has been linked into some internal structure
354  * and so is visible to ->quiesce(), so we don't need the refcount any more.
355  */
356 static bool is_suspended(struct mddev *mddev, struct bio *bio)
357 {
358 	if (is_md_suspended(mddev))
359 		return true;
360 	if (bio_data_dir(bio) != WRITE)
361 		return false;
362 	if (mddev->suspend_lo >= mddev->suspend_hi)
363 		return false;
364 	if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
365 		return false;
366 	if (bio_end_sector(bio) < mddev->suspend_lo)
367 		return false;
368 	return true;
369 }
370 
371 void md_handle_request(struct mddev *mddev, struct bio *bio)
372 {
373 check_suspended:
374 	if (is_suspended(mddev, bio)) {
375 		DEFINE_WAIT(__wait);
376 		/* Bail out if REQ_NOWAIT is set for the bio */
377 		if (bio->bi_opf & REQ_NOWAIT) {
378 			bio_wouldblock_error(bio);
379 			return;
380 		}
381 		for (;;) {
382 			prepare_to_wait(&mddev->sb_wait, &__wait,
383 					TASK_UNINTERRUPTIBLE);
384 			if (!is_suspended(mddev, bio))
385 				break;
386 			schedule();
387 		}
388 		finish_wait(&mddev->sb_wait, &__wait);
389 	}
390 	if (!percpu_ref_tryget_live(&mddev->active_io))
391 		goto check_suspended;
392 
393 	if (!mddev->pers->make_request(mddev, bio)) {
394 		percpu_ref_put(&mddev->active_io);
395 		goto check_suspended;
396 	}
397 
398 	percpu_ref_put(&mddev->active_io);
399 }
400 EXPORT_SYMBOL(md_handle_request);
401 
402 static void md_submit_bio(struct bio *bio)
403 {
404 	const int rw = bio_data_dir(bio);
405 	struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
406 
407 	if (mddev == NULL || mddev->pers == NULL) {
408 		bio_io_error(bio);
409 		return;
410 	}
411 
412 	if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
413 		bio_io_error(bio);
414 		return;
415 	}
416 
417 	bio = bio_split_to_limits(bio);
418 	if (!bio)
419 		return;
420 
421 	if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
422 		if (bio_sectors(bio) != 0)
423 			bio->bi_status = BLK_STS_IOERR;
424 		bio_endio(bio);
425 		return;
426 	}
427 
428 	/* bio could be mergeable after passing to underlayer */
429 	bio->bi_opf &= ~REQ_NOMERGE;
430 
431 	md_handle_request(mddev, bio);
432 }
433 
434 /* mddev_suspend makes sure no new requests are submitted
435  * to the device, and that any requests that have been submitted
436  * are completely handled.
437  * Once mddev_detach() is called and completes, the module will be
438  * completely unused.
439  */
440 void mddev_suspend(struct mddev *mddev)
441 {
442 	struct md_thread *thread = rcu_dereference_protected(mddev->thread,
443 			lockdep_is_held(&mddev->reconfig_mutex));
444 
445 	WARN_ON_ONCE(thread && current == thread->tsk);
446 	if (mddev->suspended++)
447 		return;
448 	wake_up(&mddev->sb_wait);
449 	set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
450 	percpu_ref_kill(&mddev->active_io);
451 
452 	if (mddev->pers && mddev->pers->prepare_suspend)
453 		mddev->pers->prepare_suspend(mddev);
454 
455 	wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
456 	clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
457 	wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
458 
459 	del_timer_sync(&mddev->safemode_timer);
460 	/* restrict memory reclaim I/O during raid array is suspend */
461 	mddev->noio_flag = memalloc_noio_save();
462 }
463 EXPORT_SYMBOL_GPL(mddev_suspend);
464 
465 void mddev_resume(struct mddev *mddev)
466 {
467 	lockdep_assert_held(&mddev->reconfig_mutex);
468 	if (--mddev->suspended)
469 		return;
470 
471 	/* entred the memalloc scope from mddev_suspend() */
472 	memalloc_noio_restore(mddev->noio_flag);
473 
474 	percpu_ref_resurrect(&mddev->active_io);
475 	wake_up(&mddev->sb_wait);
476 
477 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
478 	md_wakeup_thread(mddev->thread);
479 	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
480 }
481 EXPORT_SYMBOL_GPL(mddev_resume);
482 
483 /*
484  * Generic flush handling for md
485  */
486 
487 static void md_end_flush(struct bio *bio)
488 {
489 	struct md_rdev *rdev = bio->bi_private;
490 	struct mddev *mddev = rdev->mddev;
491 
492 	bio_put(bio);
493 
494 	rdev_dec_pending(rdev, mddev);
495 
496 	if (atomic_dec_and_test(&mddev->flush_pending)) {
497 		/* The pre-request flush has finished */
498 		queue_work(md_wq, &mddev->flush_work);
499 	}
500 }
501 
502 static void md_submit_flush_data(struct work_struct *ws);
503 
504 static void submit_flushes(struct work_struct *ws)
505 {
506 	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
507 	struct md_rdev *rdev;
508 
509 	mddev->start_flush = ktime_get_boottime();
510 	INIT_WORK(&mddev->flush_work, md_submit_flush_data);
511 	atomic_set(&mddev->flush_pending, 1);
512 	rcu_read_lock();
513 	rdev_for_each_rcu(rdev, mddev)
514 		if (rdev->raid_disk >= 0 &&
515 		    !test_bit(Faulty, &rdev->flags)) {
516 			/* Take two references, one is dropped
517 			 * when request finishes, one after
518 			 * we reclaim rcu_read_lock
519 			 */
520 			struct bio *bi;
521 			atomic_inc(&rdev->nr_pending);
522 			atomic_inc(&rdev->nr_pending);
523 			rcu_read_unlock();
524 			bi = bio_alloc_bioset(rdev->bdev, 0,
525 					      REQ_OP_WRITE | REQ_PREFLUSH,
526 					      GFP_NOIO, &mddev->bio_set);
527 			bi->bi_end_io = md_end_flush;
528 			bi->bi_private = rdev;
529 			atomic_inc(&mddev->flush_pending);
530 			submit_bio(bi);
531 			rcu_read_lock();
532 			rdev_dec_pending(rdev, mddev);
533 		}
534 	rcu_read_unlock();
535 	if (atomic_dec_and_test(&mddev->flush_pending))
536 		queue_work(md_wq, &mddev->flush_work);
537 }
538 
539 static void md_submit_flush_data(struct work_struct *ws)
540 {
541 	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
542 	struct bio *bio = mddev->flush_bio;
543 
544 	/*
545 	 * must reset flush_bio before calling into md_handle_request to avoid a
546 	 * deadlock, because other bios passed md_handle_request suspend check
547 	 * could wait for this and below md_handle_request could wait for those
548 	 * bios because of suspend check
549 	 */
550 	spin_lock_irq(&mddev->lock);
551 	mddev->prev_flush_start = mddev->start_flush;
552 	mddev->flush_bio = NULL;
553 	spin_unlock_irq(&mddev->lock);
554 	wake_up(&mddev->sb_wait);
555 
556 	if (bio->bi_iter.bi_size == 0) {
557 		/* an empty barrier - all done */
558 		bio_endio(bio);
559 	} else {
560 		bio->bi_opf &= ~REQ_PREFLUSH;
561 		md_handle_request(mddev, bio);
562 	}
563 }
564 
565 /*
566  * Manages consolidation of flushes and submitting any flushes needed for
567  * a bio with REQ_PREFLUSH.  Returns true if the bio is finished or is
568  * being finished in another context.  Returns false if the flushing is
569  * complete but still needs the I/O portion of the bio to be processed.
570  */
571 bool md_flush_request(struct mddev *mddev, struct bio *bio)
572 {
573 	ktime_t req_start = ktime_get_boottime();
574 	spin_lock_irq(&mddev->lock);
575 	/* flush requests wait until ongoing flush completes,
576 	 * hence coalescing all the pending requests.
577 	 */
578 	wait_event_lock_irq(mddev->sb_wait,
579 			    !mddev->flush_bio ||
580 			    ktime_before(req_start, mddev->prev_flush_start),
581 			    mddev->lock);
582 	/* new request after previous flush is completed */
583 	if (ktime_after(req_start, mddev->prev_flush_start)) {
584 		WARN_ON(mddev->flush_bio);
585 		mddev->flush_bio = bio;
586 		bio = NULL;
587 	}
588 	spin_unlock_irq(&mddev->lock);
589 
590 	if (!bio) {
591 		INIT_WORK(&mddev->flush_work, submit_flushes);
592 		queue_work(md_wq, &mddev->flush_work);
593 	} else {
594 		/* flush was performed for some other bio while we waited. */
595 		if (bio->bi_iter.bi_size == 0)
596 			/* an empty barrier - all done */
597 			bio_endio(bio);
598 		else {
599 			bio->bi_opf &= ~REQ_PREFLUSH;
600 			return false;
601 		}
602 	}
603 	return true;
604 }
605 EXPORT_SYMBOL(md_flush_request);
606 
607 static inline struct mddev *mddev_get(struct mddev *mddev)
608 {
609 	lockdep_assert_held(&all_mddevs_lock);
610 
611 	if (test_bit(MD_DELETED, &mddev->flags))
612 		return NULL;
613 	atomic_inc(&mddev->active);
614 	return mddev;
615 }
616 
617 static void mddev_delayed_delete(struct work_struct *ws);
618 
619 void mddev_put(struct mddev *mddev)
620 {
621 	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
622 		return;
623 	if (!mddev->raid_disks && list_empty(&mddev->disks) &&
624 	    mddev->ctime == 0 && !mddev->hold_active) {
625 		/* Array is not configured at all, and not held active,
626 		 * so destroy it */
627 		set_bit(MD_DELETED, &mddev->flags);
628 
629 		/*
630 		 * Call queue_work inside the spinlock so that
631 		 * flush_workqueue() after mddev_find will succeed in waiting
632 		 * for the work to be done.
633 		 */
634 		queue_work(md_misc_wq, &mddev->del_work);
635 	}
636 	spin_unlock(&all_mddevs_lock);
637 }
638 
639 static void md_safemode_timeout(struct timer_list *t);
640 static void md_start_sync(struct work_struct *ws);
641 
642 static void active_io_release(struct percpu_ref *ref)
643 {
644 	struct mddev *mddev = container_of(ref, struct mddev, active_io);
645 
646 	wake_up(&mddev->sb_wait);
647 }
648 
649 static void no_op(struct percpu_ref *r) {}
650 
651 int mddev_init(struct mddev *mddev)
652 {
653 
654 	if (percpu_ref_init(&mddev->active_io, active_io_release,
655 			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
656 		return -ENOMEM;
657 
658 	if (percpu_ref_init(&mddev->writes_pending, no_op,
659 			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
660 		percpu_ref_exit(&mddev->active_io);
661 		return -ENOMEM;
662 	}
663 
664 	/* We want to start with the refcount at zero */
665 	percpu_ref_put(&mddev->writes_pending);
666 
667 	mutex_init(&mddev->open_mutex);
668 	mutex_init(&mddev->reconfig_mutex);
669 	mutex_init(&mddev->sync_mutex);
670 	mutex_init(&mddev->bitmap_info.mutex);
671 	INIT_LIST_HEAD(&mddev->disks);
672 	INIT_LIST_HEAD(&mddev->all_mddevs);
673 	INIT_LIST_HEAD(&mddev->deleting);
674 	timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
675 	atomic_set(&mddev->active, 1);
676 	atomic_set(&mddev->openers, 0);
677 	atomic_set(&mddev->sync_seq, 0);
678 	spin_lock_init(&mddev->lock);
679 	atomic_set(&mddev->flush_pending, 0);
680 	init_waitqueue_head(&mddev->sb_wait);
681 	init_waitqueue_head(&mddev->recovery_wait);
682 	mddev->reshape_position = MaxSector;
683 	mddev->reshape_backwards = 0;
684 	mddev->last_sync_action = "none";
685 	mddev->resync_min = 0;
686 	mddev->resync_max = MaxSector;
687 	mddev->level = LEVEL_NONE;
688 
689 	INIT_WORK(&mddev->sync_work, md_start_sync);
690 	INIT_WORK(&mddev->del_work, mddev_delayed_delete);
691 
692 	return 0;
693 }
694 EXPORT_SYMBOL_GPL(mddev_init);
695 
696 void mddev_destroy(struct mddev *mddev)
697 {
698 	percpu_ref_exit(&mddev->active_io);
699 	percpu_ref_exit(&mddev->writes_pending);
700 }
701 EXPORT_SYMBOL_GPL(mddev_destroy);
702 
703 static struct mddev *mddev_find_locked(dev_t unit)
704 {
705 	struct mddev *mddev;
706 
707 	list_for_each_entry(mddev, &all_mddevs, all_mddevs)
708 		if (mddev->unit == unit)
709 			return mddev;
710 
711 	return NULL;
712 }
713 
714 /* find an unused unit number */
715 static dev_t mddev_alloc_unit(void)
716 {
717 	static int next_minor = 512;
718 	int start = next_minor;
719 	bool is_free = 0;
720 	dev_t dev = 0;
721 
722 	while (!is_free) {
723 		dev = MKDEV(MD_MAJOR, next_minor);
724 		next_minor++;
725 		if (next_minor > MINORMASK)
726 			next_minor = 0;
727 		if (next_minor == start)
728 			return 0;		/* Oh dear, all in use. */
729 		is_free = !mddev_find_locked(dev);
730 	}
731 
732 	return dev;
733 }
734 
735 static struct mddev *mddev_alloc(dev_t unit)
736 {
737 	struct mddev *new;
738 	int error;
739 
740 	if (unit && MAJOR(unit) != MD_MAJOR)
741 		unit &= ~((1 << MdpMinorShift) - 1);
742 
743 	new = kzalloc(sizeof(*new), GFP_KERNEL);
744 	if (!new)
745 		return ERR_PTR(-ENOMEM);
746 
747 	error = mddev_init(new);
748 	if (error)
749 		goto out_free_new;
750 
751 	spin_lock(&all_mddevs_lock);
752 	if (unit) {
753 		error = -EEXIST;
754 		if (mddev_find_locked(unit))
755 			goto out_destroy_new;
756 		new->unit = unit;
757 		if (MAJOR(unit) == MD_MAJOR)
758 			new->md_minor = MINOR(unit);
759 		else
760 			new->md_minor = MINOR(unit) >> MdpMinorShift;
761 		new->hold_active = UNTIL_IOCTL;
762 	} else {
763 		error = -ENODEV;
764 		new->unit = mddev_alloc_unit();
765 		if (!new->unit)
766 			goto out_destroy_new;
767 		new->md_minor = MINOR(new->unit);
768 		new->hold_active = UNTIL_STOP;
769 	}
770 
771 	list_add(&new->all_mddevs, &all_mddevs);
772 	spin_unlock(&all_mddevs_lock);
773 	return new;
774 
775 out_destroy_new:
776 	spin_unlock(&all_mddevs_lock);
777 	mddev_destroy(new);
778 out_free_new:
779 	kfree(new);
780 	return ERR_PTR(error);
781 }
782 
783 static void mddev_free(struct mddev *mddev)
784 {
785 	spin_lock(&all_mddevs_lock);
786 	list_del(&mddev->all_mddevs);
787 	spin_unlock(&all_mddevs_lock);
788 
789 	mddev_destroy(mddev);
790 	kfree(mddev);
791 }
792 
793 static const struct attribute_group md_redundancy_group;
794 
795 void mddev_unlock(struct mddev *mddev)
796 {
797 	struct md_rdev *rdev;
798 	struct md_rdev *tmp;
799 	LIST_HEAD(delete);
800 
801 	if (!list_empty(&mddev->deleting))
802 		list_splice_init(&mddev->deleting, &delete);
803 
804 	if (mddev->to_remove) {
805 		/* These cannot be removed under reconfig_mutex as
806 		 * an access to the files will try to take reconfig_mutex
807 		 * while holding the file unremovable, which leads to
808 		 * a deadlock.
809 		 * So hold set sysfs_active while the remove in happeing,
810 		 * and anything else which might set ->to_remove or my
811 		 * otherwise change the sysfs namespace will fail with
812 		 * -EBUSY if sysfs_active is still set.
813 		 * We set sysfs_active under reconfig_mutex and elsewhere
814 		 * test it under the same mutex to ensure its correct value
815 		 * is seen.
816 		 */
817 		const struct attribute_group *to_remove = mddev->to_remove;
818 		mddev->to_remove = NULL;
819 		mddev->sysfs_active = 1;
820 		mutex_unlock(&mddev->reconfig_mutex);
821 
822 		if (mddev->kobj.sd) {
823 			if (to_remove != &md_redundancy_group)
824 				sysfs_remove_group(&mddev->kobj, to_remove);
825 			if (mddev->pers == NULL ||
826 			    mddev->pers->sync_request == NULL) {
827 				sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
828 				if (mddev->sysfs_action)
829 					sysfs_put(mddev->sysfs_action);
830 				if (mddev->sysfs_completed)
831 					sysfs_put(mddev->sysfs_completed);
832 				if (mddev->sysfs_degraded)
833 					sysfs_put(mddev->sysfs_degraded);
834 				mddev->sysfs_action = NULL;
835 				mddev->sysfs_completed = NULL;
836 				mddev->sysfs_degraded = NULL;
837 			}
838 		}
839 		mddev->sysfs_active = 0;
840 	} else
841 		mutex_unlock(&mddev->reconfig_mutex);
842 
843 	md_wakeup_thread(mddev->thread);
844 	wake_up(&mddev->sb_wait);
845 
846 	list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
847 		list_del_init(&rdev->same_set);
848 		kobject_del(&rdev->kobj);
849 		export_rdev(rdev, mddev);
850 	}
851 }
852 EXPORT_SYMBOL_GPL(mddev_unlock);
853 
854 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
855 {
856 	struct md_rdev *rdev;
857 
858 	rdev_for_each_rcu(rdev, mddev)
859 		if (rdev->desc_nr == nr)
860 			return rdev;
861 
862 	return NULL;
863 }
864 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
865 
866 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
867 {
868 	struct md_rdev *rdev;
869 
870 	rdev_for_each(rdev, mddev)
871 		if (rdev->bdev->bd_dev == dev)
872 			return rdev;
873 
874 	return NULL;
875 }
876 
877 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
878 {
879 	struct md_rdev *rdev;
880 
881 	rdev_for_each_rcu(rdev, mddev)
882 		if (rdev->bdev->bd_dev == dev)
883 			return rdev;
884 
885 	return NULL;
886 }
887 EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
888 
889 static struct md_personality *find_pers(int level, char *clevel)
890 {
891 	struct md_personality *pers;
892 	list_for_each_entry(pers, &pers_list, list) {
893 		if (level != LEVEL_NONE && pers->level == level)
894 			return pers;
895 		if (strcmp(pers->name, clevel)==0)
896 			return pers;
897 	}
898 	return NULL;
899 }
900 
901 /* return the offset of the super block in 512byte sectors */
902 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
903 {
904 	return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
905 }
906 
907 static int alloc_disk_sb(struct md_rdev *rdev)
908 {
909 	rdev->sb_page = alloc_page(GFP_KERNEL);
910 	if (!rdev->sb_page)
911 		return -ENOMEM;
912 	return 0;
913 }
914 
915 void md_rdev_clear(struct md_rdev *rdev)
916 {
917 	if (rdev->sb_page) {
918 		put_page(rdev->sb_page);
919 		rdev->sb_loaded = 0;
920 		rdev->sb_page = NULL;
921 		rdev->sb_start = 0;
922 		rdev->sectors = 0;
923 	}
924 	if (rdev->bb_page) {
925 		put_page(rdev->bb_page);
926 		rdev->bb_page = NULL;
927 	}
928 	badblocks_exit(&rdev->badblocks);
929 }
930 EXPORT_SYMBOL_GPL(md_rdev_clear);
931 
932 static void super_written(struct bio *bio)
933 {
934 	struct md_rdev *rdev = bio->bi_private;
935 	struct mddev *mddev = rdev->mddev;
936 
937 	if (bio->bi_status) {
938 		pr_err("md: %s gets error=%d\n", __func__,
939 		       blk_status_to_errno(bio->bi_status));
940 		md_error(mddev, rdev);
941 		if (!test_bit(Faulty, &rdev->flags)
942 		    && (bio->bi_opf & MD_FAILFAST)) {
943 			set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
944 			set_bit(LastDev, &rdev->flags);
945 		}
946 	} else
947 		clear_bit(LastDev, &rdev->flags);
948 
949 	bio_put(bio);
950 
951 	rdev_dec_pending(rdev, mddev);
952 
953 	if (atomic_dec_and_test(&mddev->pending_writes))
954 		wake_up(&mddev->sb_wait);
955 }
956 
957 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
958 		   sector_t sector, int size, struct page *page)
959 {
960 	/* write first size bytes of page to sector of rdev
961 	 * Increment mddev->pending_writes before returning
962 	 * and decrement it on completion, waking up sb_wait
963 	 * if zero is reached.
964 	 * If an error occurred, call md_error
965 	 */
966 	struct bio *bio;
967 
968 	if (!page)
969 		return;
970 
971 	if (test_bit(Faulty, &rdev->flags))
972 		return;
973 
974 	bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
975 			       1,
976 			       REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA,
977 			       GFP_NOIO, &mddev->sync_set);
978 
979 	atomic_inc(&rdev->nr_pending);
980 
981 	bio->bi_iter.bi_sector = sector;
982 	__bio_add_page(bio, page, size, 0);
983 	bio->bi_private = rdev;
984 	bio->bi_end_io = super_written;
985 
986 	if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
987 	    test_bit(FailFast, &rdev->flags) &&
988 	    !test_bit(LastDev, &rdev->flags))
989 		bio->bi_opf |= MD_FAILFAST;
990 
991 	atomic_inc(&mddev->pending_writes);
992 	submit_bio(bio);
993 }
994 
995 int md_super_wait(struct mddev *mddev)
996 {
997 	/* wait for all superblock writes that were scheduled to complete */
998 	wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
999 	if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
1000 		return -EAGAIN;
1001 	return 0;
1002 }
1003 
1004 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
1005 		 struct page *page, blk_opf_t opf, bool metadata_op)
1006 {
1007 	struct bio bio;
1008 	struct bio_vec bvec;
1009 
1010 	if (metadata_op && rdev->meta_bdev)
1011 		bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
1012 	else
1013 		bio_init(&bio, rdev->bdev, &bvec, 1, opf);
1014 
1015 	if (metadata_op)
1016 		bio.bi_iter.bi_sector = sector + rdev->sb_start;
1017 	else if (rdev->mddev->reshape_position != MaxSector &&
1018 		 (rdev->mddev->reshape_backwards ==
1019 		  (sector >= rdev->mddev->reshape_position)))
1020 		bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
1021 	else
1022 		bio.bi_iter.bi_sector = sector + rdev->data_offset;
1023 	__bio_add_page(&bio, page, size, 0);
1024 
1025 	submit_bio_wait(&bio);
1026 
1027 	return !bio.bi_status;
1028 }
1029 EXPORT_SYMBOL_GPL(sync_page_io);
1030 
1031 static int read_disk_sb(struct md_rdev *rdev, int size)
1032 {
1033 	if (rdev->sb_loaded)
1034 		return 0;
1035 
1036 	if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true))
1037 		goto fail;
1038 	rdev->sb_loaded = 1;
1039 	return 0;
1040 
1041 fail:
1042 	pr_err("md: disabled device %pg, could not read superblock.\n",
1043 	       rdev->bdev);
1044 	return -EINVAL;
1045 }
1046 
1047 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1048 {
1049 	return	sb1->set_uuid0 == sb2->set_uuid0 &&
1050 		sb1->set_uuid1 == sb2->set_uuid1 &&
1051 		sb1->set_uuid2 == sb2->set_uuid2 &&
1052 		sb1->set_uuid3 == sb2->set_uuid3;
1053 }
1054 
1055 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1056 {
1057 	int ret;
1058 	mdp_super_t *tmp1, *tmp2;
1059 
1060 	tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1061 	tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1062 
1063 	if (!tmp1 || !tmp2) {
1064 		ret = 0;
1065 		goto abort;
1066 	}
1067 
1068 	*tmp1 = *sb1;
1069 	*tmp2 = *sb2;
1070 
1071 	/*
1072 	 * nr_disks is not constant
1073 	 */
1074 	tmp1->nr_disks = 0;
1075 	tmp2->nr_disks = 0;
1076 
1077 	ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
1078 abort:
1079 	kfree(tmp1);
1080 	kfree(tmp2);
1081 	return ret;
1082 }
1083 
1084 static u32 md_csum_fold(u32 csum)
1085 {
1086 	csum = (csum & 0xffff) + (csum >> 16);
1087 	return (csum & 0xffff) + (csum >> 16);
1088 }
1089 
1090 static unsigned int calc_sb_csum(mdp_super_t *sb)
1091 {
1092 	u64 newcsum = 0;
1093 	u32 *sb32 = (u32*)sb;
1094 	int i;
1095 	unsigned int disk_csum, csum;
1096 
1097 	disk_csum = sb->sb_csum;
1098 	sb->sb_csum = 0;
1099 
1100 	for (i = 0; i < MD_SB_BYTES/4 ; i++)
1101 		newcsum += sb32[i];
1102 	csum = (newcsum & 0xffffffff) + (newcsum>>32);
1103 
1104 #ifdef CONFIG_ALPHA
1105 	/* This used to use csum_partial, which was wrong for several
1106 	 * reasons including that different results are returned on
1107 	 * different architectures.  It isn't critical that we get exactly
1108 	 * the same return value as before (we always csum_fold before
1109 	 * testing, and that removes any differences).  However as we
1110 	 * know that csum_partial always returned a 16bit value on
1111 	 * alphas, do a fold to maximise conformity to previous behaviour.
1112 	 */
1113 	sb->sb_csum = md_csum_fold(disk_csum);
1114 #else
1115 	sb->sb_csum = disk_csum;
1116 #endif
1117 	return csum;
1118 }
1119 
1120 /*
1121  * Handle superblock details.
1122  * We want to be able to handle multiple superblock formats
1123  * so we have a common interface to them all, and an array of
1124  * different handlers.
1125  * We rely on user-space to write the initial superblock, and support
1126  * reading and updating of superblocks.
1127  * Interface methods are:
1128  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1129  *      loads and validates a superblock on dev.
1130  *      if refdev != NULL, compare superblocks on both devices
1131  *    Return:
1132  *      0 - dev has a superblock that is compatible with refdev
1133  *      1 - dev has a superblock that is compatible and newer than refdev
1134  *          so dev should be used as the refdev in future
1135  *     -EINVAL superblock incompatible or invalid
1136  *     -othererror e.g. -EIO
1137  *
1138  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
1139  *      Verify that dev is acceptable into mddev.
1140  *       The first time, mddev->raid_disks will be 0, and data from
1141  *       dev should be merged in.  Subsequent calls check that dev
1142  *       is new enough.  Return 0 or -EINVAL
1143  *
1144  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
1145  *     Update the superblock for rdev with data in mddev
1146  *     This does not write to disc.
1147  *
1148  */
1149 
1150 struct super_type  {
1151 	char		    *name;
1152 	struct module	    *owner;
1153 	int		    (*load_super)(struct md_rdev *rdev,
1154 					  struct md_rdev *refdev,
1155 					  int minor_version);
1156 	int		    (*validate_super)(struct mddev *mddev,
1157 					      struct md_rdev *rdev);
1158 	void		    (*sync_super)(struct mddev *mddev,
1159 					  struct md_rdev *rdev);
1160 	unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
1161 						sector_t num_sectors);
1162 	int		    (*allow_new_offset)(struct md_rdev *rdev,
1163 						unsigned long long new_offset);
1164 };
1165 
1166 /*
1167  * Check that the given mddev has no bitmap.
1168  *
1169  * This function is called from the run method of all personalities that do not
1170  * support bitmaps. It prints an error message and returns non-zero if mddev
1171  * has a bitmap. Otherwise, it returns 0.
1172  *
1173  */
1174 int md_check_no_bitmap(struct mddev *mddev)
1175 {
1176 	if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1177 		return 0;
1178 	pr_warn("%s: bitmaps are not supported for %s\n",
1179 		mdname(mddev), mddev->pers->name);
1180 	return 1;
1181 }
1182 EXPORT_SYMBOL(md_check_no_bitmap);
1183 
1184 /*
1185  * load_super for 0.90.0
1186  */
1187 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1188 {
1189 	mdp_super_t *sb;
1190 	int ret;
1191 	bool spare_disk = true;
1192 
1193 	/*
1194 	 * Calculate the position of the superblock (512byte sectors),
1195 	 * it's at the end of the disk.
1196 	 *
1197 	 * It also happens to be a multiple of 4Kb.
1198 	 */
1199 	rdev->sb_start = calc_dev_sboffset(rdev);
1200 
1201 	ret = read_disk_sb(rdev, MD_SB_BYTES);
1202 	if (ret)
1203 		return ret;
1204 
1205 	ret = -EINVAL;
1206 
1207 	sb = page_address(rdev->sb_page);
1208 
1209 	if (sb->md_magic != MD_SB_MAGIC) {
1210 		pr_warn("md: invalid raid superblock magic on %pg\n",
1211 			rdev->bdev);
1212 		goto abort;
1213 	}
1214 
1215 	if (sb->major_version != 0 ||
1216 	    sb->minor_version < 90 ||
1217 	    sb->minor_version > 91) {
1218 		pr_warn("Bad version number %d.%d on %pg\n",
1219 			sb->major_version, sb->minor_version, rdev->bdev);
1220 		goto abort;
1221 	}
1222 
1223 	if (sb->raid_disks <= 0)
1224 		goto abort;
1225 
1226 	if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1227 		pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
1228 		goto abort;
1229 	}
1230 
1231 	rdev->preferred_minor = sb->md_minor;
1232 	rdev->data_offset = 0;
1233 	rdev->new_data_offset = 0;
1234 	rdev->sb_size = MD_SB_BYTES;
1235 	rdev->badblocks.shift = -1;
1236 
1237 	if (sb->level == LEVEL_MULTIPATH)
1238 		rdev->desc_nr = -1;
1239 	else
1240 		rdev->desc_nr = sb->this_disk.number;
1241 
1242 	/* not spare disk, or LEVEL_MULTIPATH */
1243 	if (sb->level == LEVEL_MULTIPATH ||
1244 		(rdev->desc_nr >= 0 &&
1245 		 rdev->desc_nr < MD_SB_DISKS &&
1246 		 sb->disks[rdev->desc_nr].state &
1247 		 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1248 		spare_disk = false;
1249 
1250 	if (!refdev) {
1251 		if (!spare_disk)
1252 			ret = 1;
1253 		else
1254 			ret = 0;
1255 	} else {
1256 		__u64 ev1, ev2;
1257 		mdp_super_t *refsb = page_address(refdev->sb_page);
1258 		if (!md_uuid_equal(refsb, sb)) {
1259 			pr_warn("md: %pg has different UUID to %pg\n",
1260 				rdev->bdev, refdev->bdev);
1261 			goto abort;
1262 		}
1263 		if (!md_sb_equal(refsb, sb)) {
1264 			pr_warn("md: %pg has same UUID but different superblock to %pg\n",
1265 				rdev->bdev, refdev->bdev);
1266 			goto abort;
1267 		}
1268 		ev1 = md_event(sb);
1269 		ev2 = md_event(refsb);
1270 
1271 		if (!spare_disk && ev1 > ev2)
1272 			ret = 1;
1273 		else
1274 			ret = 0;
1275 	}
1276 	rdev->sectors = rdev->sb_start;
1277 	/* Limit to 4TB as metadata cannot record more than that.
1278 	 * (not needed for Linear and RAID0 as metadata doesn't
1279 	 * record this size)
1280 	 */
1281 	if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1282 		rdev->sectors = (sector_t)(2ULL << 32) - 2;
1283 
1284 	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1285 		/* "this cannot possibly happen" ... */
1286 		ret = -EINVAL;
1287 
1288  abort:
1289 	return ret;
1290 }
1291 
1292 /*
1293  * validate_super for 0.90.0
1294  */
1295 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1296 {
1297 	mdp_disk_t *desc;
1298 	mdp_super_t *sb = page_address(rdev->sb_page);
1299 	__u64 ev1 = md_event(sb);
1300 
1301 	rdev->raid_disk = -1;
1302 	clear_bit(Faulty, &rdev->flags);
1303 	clear_bit(In_sync, &rdev->flags);
1304 	clear_bit(Bitmap_sync, &rdev->flags);
1305 	clear_bit(WriteMostly, &rdev->flags);
1306 
1307 	if (mddev->raid_disks == 0) {
1308 		mddev->major_version = 0;
1309 		mddev->minor_version = sb->minor_version;
1310 		mddev->patch_version = sb->patch_version;
1311 		mddev->external = 0;
1312 		mddev->chunk_sectors = sb->chunk_size >> 9;
1313 		mddev->ctime = sb->ctime;
1314 		mddev->utime = sb->utime;
1315 		mddev->level = sb->level;
1316 		mddev->clevel[0] = 0;
1317 		mddev->layout = sb->layout;
1318 		mddev->raid_disks = sb->raid_disks;
1319 		mddev->dev_sectors = ((sector_t)sb->size) * 2;
1320 		mddev->events = ev1;
1321 		mddev->bitmap_info.offset = 0;
1322 		mddev->bitmap_info.space = 0;
1323 		/* bitmap can use 60 K after the 4K superblocks */
1324 		mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1325 		mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1326 		mddev->reshape_backwards = 0;
1327 
1328 		if (mddev->minor_version >= 91) {
1329 			mddev->reshape_position = sb->reshape_position;
1330 			mddev->delta_disks = sb->delta_disks;
1331 			mddev->new_level = sb->new_level;
1332 			mddev->new_layout = sb->new_layout;
1333 			mddev->new_chunk_sectors = sb->new_chunk >> 9;
1334 			if (mddev->delta_disks < 0)
1335 				mddev->reshape_backwards = 1;
1336 		} else {
1337 			mddev->reshape_position = MaxSector;
1338 			mddev->delta_disks = 0;
1339 			mddev->new_level = mddev->level;
1340 			mddev->new_layout = mddev->layout;
1341 			mddev->new_chunk_sectors = mddev->chunk_sectors;
1342 		}
1343 		if (mddev->level == 0)
1344 			mddev->layout = -1;
1345 
1346 		if (sb->state & (1<<MD_SB_CLEAN))
1347 			mddev->recovery_cp = MaxSector;
1348 		else {
1349 			if (sb->events_hi == sb->cp_events_hi &&
1350 				sb->events_lo == sb->cp_events_lo) {
1351 				mddev->recovery_cp = sb->recovery_cp;
1352 			} else
1353 				mddev->recovery_cp = 0;
1354 		}
1355 
1356 		memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1357 		memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1358 		memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1359 		memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1360 
1361 		mddev->max_disks = MD_SB_DISKS;
1362 
1363 		if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1364 		    mddev->bitmap_info.file == NULL) {
1365 			mddev->bitmap_info.offset =
1366 				mddev->bitmap_info.default_offset;
1367 			mddev->bitmap_info.space =
1368 				mddev->bitmap_info.default_space;
1369 		}
1370 
1371 	} else if (mddev->pers == NULL) {
1372 		/* Insist on good event counter while assembling, except
1373 		 * for spares (which don't need an event count) */
1374 		++ev1;
1375 		if (sb->disks[rdev->desc_nr].state & (
1376 			    (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1377 			if (ev1 < mddev->events)
1378 				return -EINVAL;
1379 	} else if (mddev->bitmap) {
1380 		/* if adding to array with a bitmap, then we can accept an
1381 		 * older device ... but not too old.
1382 		 */
1383 		if (ev1 < mddev->bitmap->events_cleared)
1384 			return 0;
1385 		if (ev1 < mddev->events)
1386 			set_bit(Bitmap_sync, &rdev->flags);
1387 	} else {
1388 		if (ev1 < mddev->events)
1389 			/* just a hot-add of a new device, leave raid_disk at -1 */
1390 			return 0;
1391 	}
1392 
1393 	if (mddev->level != LEVEL_MULTIPATH) {
1394 		desc = sb->disks + rdev->desc_nr;
1395 
1396 		if (desc->state & (1<<MD_DISK_FAULTY))
1397 			set_bit(Faulty, &rdev->flags);
1398 		else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1399 			    desc->raid_disk < mddev->raid_disks */) {
1400 			set_bit(In_sync, &rdev->flags);
1401 			rdev->raid_disk = desc->raid_disk;
1402 			rdev->saved_raid_disk = desc->raid_disk;
1403 		} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1404 			/* active but not in sync implies recovery up to
1405 			 * reshape position.  We don't know exactly where
1406 			 * that is, so set to zero for now */
1407 			if (mddev->minor_version >= 91) {
1408 				rdev->recovery_offset = 0;
1409 				rdev->raid_disk = desc->raid_disk;
1410 			}
1411 		}
1412 		if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1413 			set_bit(WriteMostly, &rdev->flags);
1414 		if (desc->state & (1<<MD_DISK_FAILFAST))
1415 			set_bit(FailFast, &rdev->flags);
1416 	} else /* MULTIPATH are always insync */
1417 		set_bit(In_sync, &rdev->flags);
1418 	return 0;
1419 }
1420 
1421 /*
1422  * sync_super for 0.90.0
1423  */
1424 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1425 {
1426 	mdp_super_t *sb;
1427 	struct md_rdev *rdev2;
1428 	int next_spare = mddev->raid_disks;
1429 
1430 	/* make rdev->sb match mddev data..
1431 	 *
1432 	 * 1/ zero out disks
1433 	 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1434 	 * 3/ any empty disks < next_spare become removed
1435 	 *
1436 	 * disks[0] gets initialised to REMOVED because
1437 	 * we cannot be sure from other fields if it has
1438 	 * been initialised or not.
1439 	 */
1440 	int i;
1441 	int active=0, working=0,failed=0,spare=0,nr_disks=0;
1442 
1443 	rdev->sb_size = MD_SB_BYTES;
1444 
1445 	sb = page_address(rdev->sb_page);
1446 
1447 	memset(sb, 0, sizeof(*sb));
1448 
1449 	sb->md_magic = MD_SB_MAGIC;
1450 	sb->major_version = mddev->major_version;
1451 	sb->patch_version = mddev->patch_version;
1452 	sb->gvalid_words  = 0; /* ignored */
1453 	memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1454 	memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1455 	memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1456 	memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1457 
1458 	sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1459 	sb->level = mddev->level;
1460 	sb->size = mddev->dev_sectors / 2;
1461 	sb->raid_disks = mddev->raid_disks;
1462 	sb->md_minor = mddev->md_minor;
1463 	sb->not_persistent = 0;
1464 	sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1465 	sb->state = 0;
1466 	sb->events_hi = (mddev->events>>32);
1467 	sb->events_lo = (u32)mddev->events;
1468 
1469 	if (mddev->reshape_position == MaxSector)
1470 		sb->minor_version = 90;
1471 	else {
1472 		sb->minor_version = 91;
1473 		sb->reshape_position = mddev->reshape_position;
1474 		sb->new_level = mddev->new_level;
1475 		sb->delta_disks = mddev->delta_disks;
1476 		sb->new_layout = mddev->new_layout;
1477 		sb->new_chunk = mddev->new_chunk_sectors << 9;
1478 	}
1479 	mddev->minor_version = sb->minor_version;
1480 	if (mddev->in_sync)
1481 	{
1482 		sb->recovery_cp = mddev->recovery_cp;
1483 		sb->cp_events_hi = (mddev->events>>32);
1484 		sb->cp_events_lo = (u32)mddev->events;
1485 		if (mddev->recovery_cp == MaxSector)
1486 			sb->state = (1<< MD_SB_CLEAN);
1487 	} else
1488 		sb->recovery_cp = 0;
1489 
1490 	sb->layout = mddev->layout;
1491 	sb->chunk_size = mddev->chunk_sectors << 9;
1492 
1493 	if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1494 		sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1495 
1496 	sb->disks[0].state = (1<<MD_DISK_REMOVED);
1497 	rdev_for_each(rdev2, mddev) {
1498 		mdp_disk_t *d;
1499 		int desc_nr;
1500 		int is_active = test_bit(In_sync, &rdev2->flags);
1501 
1502 		if (rdev2->raid_disk >= 0 &&
1503 		    sb->minor_version >= 91)
1504 			/* we have nowhere to store the recovery_offset,
1505 			 * but if it is not below the reshape_position,
1506 			 * we can piggy-back on that.
1507 			 */
1508 			is_active = 1;
1509 		if (rdev2->raid_disk < 0 ||
1510 		    test_bit(Faulty, &rdev2->flags))
1511 			is_active = 0;
1512 		if (is_active)
1513 			desc_nr = rdev2->raid_disk;
1514 		else
1515 			desc_nr = next_spare++;
1516 		rdev2->desc_nr = desc_nr;
1517 		d = &sb->disks[rdev2->desc_nr];
1518 		nr_disks++;
1519 		d->number = rdev2->desc_nr;
1520 		d->major = MAJOR(rdev2->bdev->bd_dev);
1521 		d->minor = MINOR(rdev2->bdev->bd_dev);
1522 		if (is_active)
1523 			d->raid_disk = rdev2->raid_disk;
1524 		else
1525 			d->raid_disk = rdev2->desc_nr; /* compatibility */
1526 		if (test_bit(Faulty, &rdev2->flags))
1527 			d->state = (1<<MD_DISK_FAULTY);
1528 		else if (is_active) {
1529 			d->state = (1<<MD_DISK_ACTIVE);
1530 			if (test_bit(In_sync, &rdev2->flags))
1531 				d->state |= (1<<MD_DISK_SYNC);
1532 			active++;
1533 			working++;
1534 		} else {
1535 			d->state = 0;
1536 			spare++;
1537 			working++;
1538 		}
1539 		if (test_bit(WriteMostly, &rdev2->flags))
1540 			d->state |= (1<<MD_DISK_WRITEMOSTLY);
1541 		if (test_bit(FailFast, &rdev2->flags))
1542 			d->state |= (1<<MD_DISK_FAILFAST);
1543 	}
1544 	/* now set the "removed" and "faulty" bits on any missing devices */
1545 	for (i=0 ; i < mddev->raid_disks ; i++) {
1546 		mdp_disk_t *d = &sb->disks[i];
1547 		if (d->state == 0 && d->number == 0) {
1548 			d->number = i;
1549 			d->raid_disk = i;
1550 			d->state = (1<<MD_DISK_REMOVED);
1551 			d->state |= (1<<MD_DISK_FAULTY);
1552 			failed++;
1553 		}
1554 	}
1555 	sb->nr_disks = nr_disks;
1556 	sb->active_disks = active;
1557 	sb->working_disks = working;
1558 	sb->failed_disks = failed;
1559 	sb->spare_disks = spare;
1560 
1561 	sb->this_disk = sb->disks[rdev->desc_nr];
1562 	sb->sb_csum = calc_sb_csum(sb);
1563 }
1564 
1565 /*
1566  * rdev_size_change for 0.90.0
1567  */
1568 static unsigned long long
1569 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1570 {
1571 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1572 		return 0; /* component must fit device */
1573 	if (rdev->mddev->bitmap_info.offset)
1574 		return 0; /* can't move bitmap */
1575 	rdev->sb_start = calc_dev_sboffset(rdev);
1576 	if (!num_sectors || num_sectors > rdev->sb_start)
1577 		num_sectors = rdev->sb_start;
1578 	/* Limit to 4TB as metadata cannot record more than that.
1579 	 * 4TB == 2^32 KB, or 2*2^32 sectors.
1580 	 */
1581 	if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1582 		num_sectors = (sector_t)(2ULL << 32) - 2;
1583 	do {
1584 		md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1585 		       rdev->sb_page);
1586 	} while (md_super_wait(rdev->mddev) < 0);
1587 	return num_sectors;
1588 }
1589 
1590 static int
1591 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1592 {
1593 	/* non-zero offset changes not possible with v0.90 */
1594 	return new_offset == 0;
1595 }
1596 
1597 /*
1598  * version 1 superblock
1599  */
1600 
1601 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1602 {
1603 	__le32 disk_csum;
1604 	u32 csum;
1605 	unsigned long long newcsum;
1606 	int size = 256 + le32_to_cpu(sb->max_dev)*2;
1607 	__le32 *isuper = (__le32*)sb;
1608 
1609 	disk_csum = sb->sb_csum;
1610 	sb->sb_csum = 0;
1611 	newcsum = 0;
1612 	for (; size >= 4; size -= 4)
1613 		newcsum += le32_to_cpu(*isuper++);
1614 
1615 	if (size == 2)
1616 		newcsum += le16_to_cpu(*(__le16*) isuper);
1617 
1618 	csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1619 	sb->sb_csum = disk_csum;
1620 	return cpu_to_le32(csum);
1621 }
1622 
1623 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1624 {
1625 	struct mdp_superblock_1 *sb;
1626 	int ret;
1627 	sector_t sb_start;
1628 	sector_t sectors;
1629 	int bmask;
1630 	bool spare_disk = true;
1631 
1632 	/*
1633 	 * Calculate the position of the superblock in 512byte sectors.
1634 	 * It is always aligned to a 4K boundary and
1635 	 * depeding on minor_version, it can be:
1636 	 * 0: At least 8K, but less than 12K, from end of device
1637 	 * 1: At start of device
1638 	 * 2: 4K from start of device.
1639 	 */
1640 	switch(minor_version) {
1641 	case 0:
1642 		sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
1643 		sb_start &= ~(sector_t)(4*2-1);
1644 		break;
1645 	case 1:
1646 		sb_start = 0;
1647 		break;
1648 	case 2:
1649 		sb_start = 8;
1650 		break;
1651 	default:
1652 		return -EINVAL;
1653 	}
1654 	rdev->sb_start = sb_start;
1655 
1656 	/* superblock is rarely larger than 1K, but it can be larger,
1657 	 * and it is safe to read 4k, so we do that
1658 	 */
1659 	ret = read_disk_sb(rdev, 4096);
1660 	if (ret) return ret;
1661 
1662 	sb = page_address(rdev->sb_page);
1663 
1664 	if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1665 	    sb->major_version != cpu_to_le32(1) ||
1666 	    le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1667 	    le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1668 	    (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1669 		return -EINVAL;
1670 
1671 	if (calc_sb_1_csum(sb) != sb->sb_csum) {
1672 		pr_warn("md: invalid superblock checksum on %pg\n",
1673 			rdev->bdev);
1674 		return -EINVAL;
1675 	}
1676 	if (le64_to_cpu(sb->data_size) < 10) {
1677 		pr_warn("md: data_size too small on %pg\n",
1678 			rdev->bdev);
1679 		return -EINVAL;
1680 	}
1681 	if (sb->pad0 ||
1682 	    sb->pad3[0] ||
1683 	    memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1684 		/* Some padding is non-zero, might be a new feature */
1685 		return -EINVAL;
1686 
1687 	rdev->preferred_minor = 0xffff;
1688 	rdev->data_offset = le64_to_cpu(sb->data_offset);
1689 	rdev->new_data_offset = rdev->data_offset;
1690 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1691 	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1692 		rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1693 	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1694 
1695 	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1696 	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1697 	if (rdev->sb_size & bmask)
1698 		rdev->sb_size = (rdev->sb_size | bmask) + 1;
1699 
1700 	if (minor_version
1701 	    && rdev->data_offset < sb_start + (rdev->sb_size/512))
1702 		return -EINVAL;
1703 	if (minor_version
1704 	    && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1705 		return -EINVAL;
1706 
1707 	if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1708 		rdev->desc_nr = -1;
1709 	else
1710 		rdev->desc_nr = le32_to_cpu(sb->dev_number);
1711 
1712 	if (!rdev->bb_page) {
1713 		rdev->bb_page = alloc_page(GFP_KERNEL);
1714 		if (!rdev->bb_page)
1715 			return -ENOMEM;
1716 	}
1717 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1718 	    rdev->badblocks.count == 0) {
1719 		/* need to load the bad block list.
1720 		 * Currently we limit it to one page.
1721 		 */
1722 		s32 offset;
1723 		sector_t bb_sector;
1724 		__le64 *bbp;
1725 		int i;
1726 		int sectors = le16_to_cpu(sb->bblog_size);
1727 		if (sectors > (PAGE_SIZE / 512))
1728 			return -EINVAL;
1729 		offset = le32_to_cpu(sb->bblog_offset);
1730 		if (offset == 0)
1731 			return -EINVAL;
1732 		bb_sector = (long long)offset;
1733 		if (!sync_page_io(rdev, bb_sector, sectors << 9,
1734 				  rdev->bb_page, REQ_OP_READ, true))
1735 			return -EIO;
1736 		bbp = (__le64 *)page_address(rdev->bb_page);
1737 		rdev->badblocks.shift = sb->bblog_shift;
1738 		for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1739 			u64 bb = le64_to_cpu(*bbp);
1740 			int count = bb & (0x3ff);
1741 			u64 sector = bb >> 10;
1742 			sector <<= sb->bblog_shift;
1743 			count <<= sb->bblog_shift;
1744 			if (bb + 1 == 0)
1745 				break;
1746 			if (badblocks_set(&rdev->badblocks, sector, count, 1))
1747 				return -EINVAL;
1748 		}
1749 	} else if (sb->bblog_offset != 0)
1750 		rdev->badblocks.shift = 0;
1751 
1752 	if ((le32_to_cpu(sb->feature_map) &
1753 	    (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
1754 		rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1755 		rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1756 		rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1757 	}
1758 
1759 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1760 	    sb->level != 0)
1761 		return -EINVAL;
1762 
1763 	/* not spare disk, or LEVEL_MULTIPATH */
1764 	if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1765 		(rdev->desc_nr >= 0 &&
1766 		rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1767 		(le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1768 		 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1769 		spare_disk = false;
1770 
1771 	if (!refdev) {
1772 		if (!spare_disk)
1773 			ret = 1;
1774 		else
1775 			ret = 0;
1776 	} else {
1777 		__u64 ev1, ev2;
1778 		struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1779 
1780 		if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1781 		    sb->level != refsb->level ||
1782 		    sb->layout != refsb->layout ||
1783 		    sb->chunksize != refsb->chunksize) {
1784 			pr_warn("md: %pg has strangely different superblock to %pg\n",
1785 				rdev->bdev,
1786 				refdev->bdev);
1787 			return -EINVAL;
1788 		}
1789 		ev1 = le64_to_cpu(sb->events);
1790 		ev2 = le64_to_cpu(refsb->events);
1791 
1792 		if (!spare_disk && ev1 > ev2)
1793 			ret = 1;
1794 		else
1795 			ret = 0;
1796 	}
1797 	if (minor_version)
1798 		sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
1799 	else
1800 		sectors = rdev->sb_start;
1801 	if (sectors < le64_to_cpu(sb->data_size))
1802 		return -EINVAL;
1803 	rdev->sectors = le64_to_cpu(sb->data_size);
1804 	return ret;
1805 }
1806 
1807 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1808 {
1809 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1810 	__u64 ev1 = le64_to_cpu(sb->events);
1811 
1812 	rdev->raid_disk = -1;
1813 	clear_bit(Faulty, &rdev->flags);
1814 	clear_bit(In_sync, &rdev->flags);
1815 	clear_bit(Bitmap_sync, &rdev->flags);
1816 	clear_bit(WriteMostly, &rdev->flags);
1817 
1818 	if (mddev->raid_disks == 0) {
1819 		mddev->major_version = 1;
1820 		mddev->patch_version = 0;
1821 		mddev->external = 0;
1822 		mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1823 		mddev->ctime = le64_to_cpu(sb->ctime);
1824 		mddev->utime = le64_to_cpu(sb->utime);
1825 		mddev->level = le32_to_cpu(sb->level);
1826 		mddev->clevel[0] = 0;
1827 		mddev->layout = le32_to_cpu(sb->layout);
1828 		mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1829 		mddev->dev_sectors = le64_to_cpu(sb->size);
1830 		mddev->events = ev1;
1831 		mddev->bitmap_info.offset = 0;
1832 		mddev->bitmap_info.space = 0;
1833 		/* Default location for bitmap is 1K after superblock
1834 		 * using 3K - total of 4K
1835 		 */
1836 		mddev->bitmap_info.default_offset = 1024 >> 9;
1837 		mddev->bitmap_info.default_space = (4096-1024) >> 9;
1838 		mddev->reshape_backwards = 0;
1839 
1840 		mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1841 		memcpy(mddev->uuid, sb->set_uuid, 16);
1842 
1843 		mddev->max_disks =  (4096-256)/2;
1844 
1845 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1846 		    mddev->bitmap_info.file == NULL) {
1847 			mddev->bitmap_info.offset =
1848 				(__s32)le32_to_cpu(sb->bitmap_offset);
1849 			/* Metadata doesn't record how much space is available.
1850 			 * For 1.0, we assume we can use up to the superblock
1851 			 * if before, else to 4K beyond superblock.
1852 			 * For others, assume no change is possible.
1853 			 */
1854 			if (mddev->minor_version > 0)
1855 				mddev->bitmap_info.space = 0;
1856 			else if (mddev->bitmap_info.offset > 0)
1857 				mddev->bitmap_info.space =
1858 					8 - mddev->bitmap_info.offset;
1859 			else
1860 				mddev->bitmap_info.space =
1861 					-mddev->bitmap_info.offset;
1862 		}
1863 
1864 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1865 			mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1866 			mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1867 			mddev->new_level = le32_to_cpu(sb->new_level);
1868 			mddev->new_layout = le32_to_cpu(sb->new_layout);
1869 			mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1870 			if (mddev->delta_disks < 0 ||
1871 			    (mddev->delta_disks == 0 &&
1872 			     (le32_to_cpu(sb->feature_map)
1873 			      & MD_FEATURE_RESHAPE_BACKWARDS)))
1874 				mddev->reshape_backwards = 1;
1875 		} else {
1876 			mddev->reshape_position = MaxSector;
1877 			mddev->delta_disks = 0;
1878 			mddev->new_level = mddev->level;
1879 			mddev->new_layout = mddev->layout;
1880 			mddev->new_chunk_sectors = mddev->chunk_sectors;
1881 		}
1882 
1883 		if (mddev->level == 0 &&
1884 		    !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1885 			mddev->layout = -1;
1886 
1887 		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1888 			set_bit(MD_HAS_JOURNAL, &mddev->flags);
1889 
1890 		if (le32_to_cpu(sb->feature_map) &
1891 		    (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
1892 			if (le32_to_cpu(sb->feature_map) &
1893 			    (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1894 				return -EINVAL;
1895 			if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1896 			    (le32_to_cpu(sb->feature_map) &
1897 					    MD_FEATURE_MULTIPLE_PPLS))
1898 				return -EINVAL;
1899 			set_bit(MD_HAS_PPL, &mddev->flags);
1900 		}
1901 	} else if (mddev->pers == NULL) {
1902 		/* Insist of good event counter while assembling, except for
1903 		 * spares (which don't need an event count) */
1904 		++ev1;
1905 		if (rdev->desc_nr >= 0 &&
1906 		    rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1907 		    (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1908 		     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1909 			if (ev1 < mddev->events)
1910 				return -EINVAL;
1911 	} else if (mddev->bitmap) {
1912 		/* If adding to array with a bitmap, then we can accept an
1913 		 * older device, but not too old.
1914 		 */
1915 		if (ev1 < mddev->bitmap->events_cleared)
1916 			return 0;
1917 		if (ev1 < mddev->events)
1918 			set_bit(Bitmap_sync, &rdev->flags);
1919 	} else {
1920 		if (ev1 < mddev->events)
1921 			/* just a hot-add of a new device, leave raid_disk at -1 */
1922 			return 0;
1923 	}
1924 	if (mddev->level != LEVEL_MULTIPATH) {
1925 		int role;
1926 		if (rdev->desc_nr < 0 ||
1927 		    rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1928 			role = MD_DISK_ROLE_SPARE;
1929 			rdev->desc_nr = -1;
1930 		} else
1931 			role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1932 		switch(role) {
1933 		case MD_DISK_ROLE_SPARE: /* spare */
1934 			break;
1935 		case MD_DISK_ROLE_FAULTY: /* faulty */
1936 			set_bit(Faulty, &rdev->flags);
1937 			break;
1938 		case MD_DISK_ROLE_JOURNAL: /* journal device */
1939 			if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1940 				/* journal device without journal feature */
1941 				pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1942 				return -EINVAL;
1943 			}
1944 			set_bit(Journal, &rdev->flags);
1945 			rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1946 			rdev->raid_disk = 0;
1947 			break;
1948 		default:
1949 			rdev->saved_raid_disk = role;
1950 			if ((le32_to_cpu(sb->feature_map) &
1951 			     MD_FEATURE_RECOVERY_OFFSET)) {
1952 				rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1953 				if (!(le32_to_cpu(sb->feature_map) &
1954 				      MD_FEATURE_RECOVERY_BITMAP))
1955 					rdev->saved_raid_disk = -1;
1956 			} else {
1957 				/*
1958 				 * If the array is FROZEN, then the device can't
1959 				 * be in_sync with rest of array.
1960 				 */
1961 				if (!test_bit(MD_RECOVERY_FROZEN,
1962 					      &mddev->recovery))
1963 					set_bit(In_sync, &rdev->flags);
1964 			}
1965 			rdev->raid_disk = role;
1966 			break;
1967 		}
1968 		if (sb->devflags & WriteMostly1)
1969 			set_bit(WriteMostly, &rdev->flags);
1970 		if (sb->devflags & FailFast1)
1971 			set_bit(FailFast, &rdev->flags);
1972 		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1973 			set_bit(Replacement, &rdev->flags);
1974 	} else /* MULTIPATH are always insync */
1975 		set_bit(In_sync, &rdev->flags);
1976 
1977 	return 0;
1978 }
1979 
1980 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1981 {
1982 	struct mdp_superblock_1 *sb;
1983 	struct md_rdev *rdev2;
1984 	int max_dev, i;
1985 	/* make rdev->sb match mddev and rdev data. */
1986 
1987 	sb = page_address(rdev->sb_page);
1988 
1989 	sb->feature_map = 0;
1990 	sb->pad0 = 0;
1991 	sb->recovery_offset = cpu_to_le64(0);
1992 	memset(sb->pad3, 0, sizeof(sb->pad3));
1993 
1994 	sb->utime = cpu_to_le64((__u64)mddev->utime);
1995 	sb->events = cpu_to_le64(mddev->events);
1996 	if (mddev->in_sync)
1997 		sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1998 	else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1999 		sb->resync_offset = cpu_to_le64(MaxSector);
2000 	else
2001 		sb->resync_offset = cpu_to_le64(0);
2002 
2003 	sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
2004 
2005 	sb->raid_disks = cpu_to_le32(mddev->raid_disks);
2006 	sb->size = cpu_to_le64(mddev->dev_sectors);
2007 	sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
2008 	sb->level = cpu_to_le32(mddev->level);
2009 	sb->layout = cpu_to_le32(mddev->layout);
2010 	if (test_bit(FailFast, &rdev->flags))
2011 		sb->devflags |= FailFast1;
2012 	else
2013 		sb->devflags &= ~FailFast1;
2014 
2015 	if (test_bit(WriteMostly, &rdev->flags))
2016 		sb->devflags |= WriteMostly1;
2017 	else
2018 		sb->devflags &= ~WriteMostly1;
2019 	sb->data_offset = cpu_to_le64(rdev->data_offset);
2020 	sb->data_size = cpu_to_le64(rdev->sectors);
2021 
2022 	if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2023 		sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
2024 		sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
2025 	}
2026 
2027 	if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
2028 	    !test_bit(In_sync, &rdev->flags)) {
2029 		sb->feature_map |=
2030 			cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2031 		sb->recovery_offset =
2032 			cpu_to_le64(rdev->recovery_offset);
2033 		if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2034 			sb->feature_map |=
2035 				cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
2036 	}
2037 	/* Note: recovery_offset and journal_tail share space  */
2038 	if (test_bit(Journal, &rdev->flags))
2039 		sb->journal_tail = cpu_to_le64(rdev->journal_tail);
2040 	if (test_bit(Replacement, &rdev->flags))
2041 		sb->feature_map |=
2042 			cpu_to_le32(MD_FEATURE_REPLACEMENT);
2043 
2044 	if (mddev->reshape_position != MaxSector) {
2045 		sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2046 		sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2047 		sb->new_layout = cpu_to_le32(mddev->new_layout);
2048 		sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2049 		sb->new_level = cpu_to_le32(mddev->new_level);
2050 		sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
2051 		if (mddev->delta_disks == 0 &&
2052 		    mddev->reshape_backwards)
2053 			sb->feature_map
2054 				|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
2055 		if (rdev->new_data_offset != rdev->data_offset) {
2056 			sb->feature_map
2057 				|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2058 			sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2059 							     - rdev->data_offset));
2060 		}
2061 	}
2062 
2063 	if (mddev_is_clustered(mddev))
2064 		sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2065 
2066 	if (rdev->badblocks.count == 0)
2067 		/* Nothing to do for bad blocks*/ ;
2068 	else if (sb->bblog_offset == 0)
2069 		/* Cannot record bad blocks on this device */
2070 		md_error(mddev, rdev);
2071 	else {
2072 		struct badblocks *bb = &rdev->badblocks;
2073 		__le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2074 		u64 *p = bb->page;
2075 		sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2076 		if (bb->changed) {
2077 			unsigned seq;
2078 
2079 retry:
2080 			seq = read_seqbegin(&bb->lock);
2081 
2082 			memset(bbp, 0xff, PAGE_SIZE);
2083 
2084 			for (i = 0 ; i < bb->count ; i++) {
2085 				u64 internal_bb = p[i];
2086 				u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2087 						| BB_LEN(internal_bb));
2088 				bbp[i] = cpu_to_le64(store_bb);
2089 			}
2090 			bb->changed = 0;
2091 			if (read_seqretry(&bb->lock, seq))
2092 				goto retry;
2093 
2094 			bb->sector = (rdev->sb_start +
2095 				      (int)le32_to_cpu(sb->bblog_offset));
2096 			bb->size = le16_to_cpu(sb->bblog_size);
2097 		}
2098 	}
2099 
2100 	max_dev = 0;
2101 	rdev_for_each(rdev2, mddev)
2102 		if (rdev2->desc_nr+1 > max_dev)
2103 			max_dev = rdev2->desc_nr+1;
2104 
2105 	if (max_dev > le32_to_cpu(sb->max_dev)) {
2106 		int bmask;
2107 		sb->max_dev = cpu_to_le32(max_dev);
2108 		rdev->sb_size = max_dev * 2 + 256;
2109 		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2110 		if (rdev->sb_size & bmask)
2111 			rdev->sb_size = (rdev->sb_size | bmask) + 1;
2112 	} else
2113 		max_dev = le32_to_cpu(sb->max_dev);
2114 
2115 	for (i=0; i<max_dev;i++)
2116 		sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2117 
2118 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2119 		sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
2120 
2121 	if (test_bit(MD_HAS_PPL, &mddev->flags)) {
2122 		if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2123 			sb->feature_map |=
2124 			    cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2125 		else
2126 			sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
2127 		sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2128 		sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2129 	}
2130 
2131 	rdev_for_each(rdev2, mddev) {
2132 		i = rdev2->desc_nr;
2133 		if (test_bit(Faulty, &rdev2->flags))
2134 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
2135 		else if (test_bit(In_sync, &rdev2->flags))
2136 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2137 		else if (test_bit(Journal, &rdev2->flags))
2138 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
2139 		else if (rdev2->raid_disk >= 0)
2140 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2141 		else
2142 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2143 	}
2144 
2145 	sb->sb_csum = calc_sb_1_csum(sb);
2146 }
2147 
2148 static sector_t super_1_choose_bm_space(sector_t dev_size)
2149 {
2150 	sector_t bm_space;
2151 
2152 	/* if the device is bigger than 8Gig, save 64k for bitmap
2153 	 * usage, if bigger than 200Gig, save 128k
2154 	 */
2155 	if (dev_size < 64*2)
2156 		bm_space = 0;
2157 	else if (dev_size - 64*2 >= 200*1024*1024*2)
2158 		bm_space = 128*2;
2159 	else if (dev_size - 4*2 > 8*1024*1024*2)
2160 		bm_space = 64*2;
2161 	else
2162 		bm_space = 4*2;
2163 	return bm_space;
2164 }
2165 
2166 static unsigned long long
2167 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2168 {
2169 	struct mdp_superblock_1 *sb;
2170 	sector_t max_sectors;
2171 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2172 		return 0; /* component must fit device */
2173 	if (rdev->data_offset != rdev->new_data_offset)
2174 		return 0; /* too confusing */
2175 	if (rdev->sb_start < rdev->data_offset) {
2176 		/* minor versions 1 and 2; superblock before data */
2177 		max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
2178 		if (!num_sectors || num_sectors > max_sectors)
2179 			num_sectors = max_sectors;
2180 	} else if (rdev->mddev->bitmap_info.offset) {
2181 		/* minor version 0 with bitmap we can't move */
2182 		return 0;
2183 	} else {
2184 		/* minor version 0; superblock after data */
2185 		sector_t sb_start, bm_space;
2186 		sector_t dev_size = bdev_nr_sectors(rdev->bdev);
2187 
2188 		/* 8K is for superblock */
2189 		sb_start = dev_size - 8*2;
2190 		sb_start &= ~(sector_t)(4*2 - 1);
2191 
2192 		bm_space = super_1_choose_bm_space(dev_size);
2193 
2194 		/* Space that can be used to store date needs to decrease
2195 		 * superblock bitmap space and bad block space(4K)
2196 		 */
2197 		max_sectors = sb_start - bm_space - 4*2;
2198 
2199 		if (!num_sectors || num_sectors > max_sectors)
2200 			num_sectors = max_sectors;
2201 		rdev->sb_start = sb_start;
2202 	}
2203 	sb = page_address(rdev->sb_page);
2204 	sb->data_size = cpu_to_le64(num_sectors);
2205 	sb->super_offset = cpu_to_le64(rdev->sb_start);
2206 	sb->sb_csum = calc_sb_1_csum(sb);
2207 	do {
2208 		md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2209 			       rdev->sb_page);
2210 	} while (md_super_wait(rdev->mddev) < 0);
2211 	return num_sectors;
2212 
2213 }
2214 
2215 static int
2216 super_1_allow_new_offset(struct md_rdev *rdev,
2217 			 unsigned long long new_offset)
2218 {
2219 	/* All necessary checks on new >= old have been done */
2220 	struct bitmap *bitmap;
2221 	if (new_offset >= rdev->data_offset)
2222 		return 1;
2223 
2224 	/* with 1.0 metadata, there is no metadata to tread on
2225 	 * so we can always move back */
2226 	if (rdev->mddev->minor_version == 0)
2227 		return 1;
2228 
2229 	/* otherwise we must be sure not to step on
2230 	 * any metadata, so stay:
2231 	 * 36K beyond start of superblock
2232 	 * beyond end of badblocks
2233 	 * beyond write-intent bitmap
2234 	 */
2235 	if (rdev->sb_start + (32+4)*2 > new_offset)
2236 		return 0;
2237 	bitmap = rdev->mddev->bitmap;
2238 	if (bitmap && !rdev->mddev->bitmap_info.file &&
2239 	    rdev->sb_start + rdev->mddev->bitmap_info.offset +
2240 	    bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
2241 		return 0;
2242 	if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2243 		return 0;
2244 
2245 	return 1;
2246 }
2247 
2248 static struct super_type super_types[] = {
2249 	[0] = {
2250 		.name	= "0.90.0",
2251 		.owner	= THIS_MODULE,
2252 		.load_super	    = super_90_load,
2253 		.validate_super	    = super_90_validate,
2254 		.sync_super	    = super_90_sync,
2255 		.rdev_size_change   = super_90_rdev_size_change,
2256 		.allow_new_offset   = super_90_allow_new_offset,
2257 	},
2258 	[1] = {
2259 		.name	= "md-1",
2260 		.owner	= THIS_MODULE,
2261 		.load_super	    = super_1_load,
2262 		.validate_super	    = super_1_validate,
2263 		.sync_super	    = super_1_sync,
2264 		.rdev_size_change   = super_1_rdev_size_change,
2265 		.allow_new_offset   = super_1_allow_new_offset,
2266 	},
2267 };
2268 
2269 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2270 {
2271 	if (mddev->sync_super) {
2272 		mddev->sync_super(mddev, rdev);
2273 		return;
2274 	}
2275 
2276 	BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2277 
2278 	super_types[mddev->major_version].sync_super(mddev, rdev);
2279 }
2280 
2281 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
2282 {
2283 	struct md_rdev *rdev, *rdev2;
2284 
2285 	rcu_read_lock();
2286 	rdev_for_each_rcu(rdev, mddev1) {
2287 		if (test_bit(Faulty, &rdev->flags) ||
2288 		    test_bit(Journal, &rdev->flags) ||
2289 		    rdev->raid_disk == -1)
2290 			continue;
2291 		rdev_for_each_rcu(rdev2, mddev2) {
2292 			if (test_bit(Faulty, &rdev2->flags) ||
2293 			    test_bit(Journal, &rdev2->flags) ||
2294 			    rdev2->raid_disk == -1)
2295 				continue;
2296 			if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
2297 				rcu_read_unlock();
2298 				return 1;
2299 			}
2300 		}
2301 	}
2302 	rcu_read_unlock();
2303 	return 0;
2304 }
2305 
2306 static LIST_HEAD(pending_raid_disks);
2307 
2308 /*
2309  * Try to register data integrity profile for an mddev
2310  *
2311  * This is called when an array is started and after a disk has been kicked
2312  * from the array. It only succeeds if all working and active component devices
2313  * are integrity capable with matching profiles.
2314  */
2315 int md_integrity_register(struct mddev *mddev)
2316 {
2317 	struct md_rdev *rdev, *reference = NULL;
2318 
2319 	if (list_empty(&mddev->disks))
2320 		return 0; /* nothing to do */
2321 	if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2322 		return 0; /* shouldn't register, or already is */
2323 	rdev_for_each(rdev, mddev) {
2324 		/* skip spares and non-functional disks */
2325 		if (test_bit(Faulty, &rdev->flags))
2326 			continue;
2327 		if (rdev->raid_disk < 0)
2328 			continue;
2329 		if (!reference) {
2330 			/* Use the first rdev as the reference */
2331 			reference = rdev;
2332 			continue;
2333 		}
2334 		/* does this rdev's profile match the reference profile? */
2335 		if (blk_integrity_compare(reference->bdev->bd_disk,
2336 				rdev->bdev->bd_disk) < 0)
2337 			return -EINVAL;
2338 	}
2339 	if (!reference || !bdev_get_integrity(reference->bdev))
2340 		return 0;
2341 	/*
2342 	 * All component devices are integrity capable and have matching
2343 	 * profiles, register the common profile for the md device.
2344 	 */
2345 	blk_integrity_register(mddev->gendisk,
2346 			       bdev_get_integrity(reference->bdev));
2347 
2348 	pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2349 	if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
2350 	    (mddev->level != 1 && mddev->level != 10 &&
2351 	     bioset_integrity_create(&mddev->io_clone_set, BIO_POOL_SIZE))) {
2352 		/*
2353 		 * No need to handle the failure of bioset_integrity_create,
2354 		 * because the function is called by md_run() -> pers->run(),
2355 		 * md_run calls bioset_exit -> bioset_integrity_free in case
2356 		 * of failure case.
2357 		 */
2358 		pr_err("md: failed to create integrity pool for %s\n",
2359 		       mdname(mddev));
2360 		return -EINVAL;
2361 	}
2362 	return 0;
2363 }
2364 EXPORT_SYMBOL(md_integrity_register);
2365 
2366 /*
2367  * Attempt to add an rdev, but only if it is consistent with the current
2368  * integrity profile
2369  */
2370 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2371 {
2372 	struct blk_integrity *bi_mddev;
2373 
2374 	if (!mddev->gendisk)
2375 		return 0;
2376 
2377 	bi_mddev = blk_get_integrity(mddev->gendisk);
2378 
2379 	if (!bi_mddev) /* nothing to do */
2380 		return 0;
2381 
2382 	if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2383 		pr_err("%s: incompatible integrity profile for %pg\n",
2384 		       mdname(mddev), rdev->bdev);
2385 		return -ENXIO;
2386 	}
2387 
2388 	return 0;
2389 }
2390 EXPORT_SYMBOL(md_integrity_add_rdev);
2391 
2392 static bool rdev_read_only(struct md_rdev *rdev)
2393 {
2394 	return bdev_read_only(rdev->bdev) ||
2395 		(rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
2396 }
2397 
2398 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2399 {
2400 	char b[BDEVNAME_SIZE];
2401 	int err;
2402 
2403 	/* prevent duplicates */
2404 	if (find_rdev(mddev, rdev->bdev->bd_dev))
2405 		return -EEXIST;
2406 
2407 	if (rdev_read_only(rdev) && mddev->pers)
2408 		return -EROFS;
2409 
2410 	/* make sure rdev->sectors exceeds mddev->dev_sectors */
2411 	if (!test_bit(Journal, &rdev->flags) &&
2412 	    rdev->sectors &&
2413 	    (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2414 		if (mddev->pers) {
2415 			/* Cannot change size, so fail
2416 			 * If mddev->level <= 0, then we don't care
2417 			 * about aligning sizes (e.g. linear)
2418 			 */
2419 			if (mddev->level > 0)
2420 				return -ENOSPC;
2421 		} else
2422 			mddev->dev_sectors = rdev->sectors;
2423 	}
2424 
2425 	/* Verify rdev->desc_nr is unique.
2426 	 * If it is -1, assign a free number, else
2427 	 * check number is not in use
2428 	 */
2429 	rcu_read_lock();
2430 	if (rdev->desc_nr < 0) {
2431 		int choice = 0;
2432 		if (mddev->pers)
2433 			choice = mddev->raid_disks;
2434 		while (md_find_rdev_nr_rcu(mddev, choice))
2435 			choice++;
2436 		rdev->desc_nr = choice;
2437 	} else {
2438 		if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2439 			rcu_read_unlock();
2440 			return -EBUSY;
2441 		}
2442 	}
2443 	rcu_read_unlock();
2444 	if (!test_bit(Journal, &rdev->flags) &&
2445 	    mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2446 		pr_warn("md: %s: array is limited to %d devices\n",
2447 			mdname(mddev), mddev->max_disks);
2448 		return -EBUSY;
2449 	}
2450 	snprintf(b, sizeof(b), "%pg", rdev->bdev);
2451 	strreplace(b, '/', '!');
2452 
2453 	rdev->mddev = mddev;
2454 	pr_debug("md: bind<%s>\n", b);
2455 
2456 	if (mddev->raid_disks)
2457 		mddev_create_serial_pool(mddev, rdev, false);
2458 
2459 	if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2460 		goto fail;
2461 
2462 	/* failure here is OK */
2463 	err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
2464 	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2465 	rdev->sysfs_unack_badblocks =
2466 		sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2467 	rdev->sysfs_badblocks =
2468 		sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
2469 
2470 	list_add_rcu(&rdev->same_set, &mddev->disks);
2471 	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2472 
2473 	/* May as well allow recovery to be retried once */
2474 	mddev->recovery_disabled++;
2475 
2476 	return 0;
2477 
2478  fail:
2479 	pr_warn("md: failed to register dev-%s for %s\n",
2480 		b, mdname(mddev));
2481 	return err;
2482 }
2483 
2484 void md_autodetect_dev(dev_t dev);
2485 
2486 /* just for claiming the bdev */
2487 static struct md_rdev claim_rdev;
2488 
2489 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
2490 {
2491 	pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
2492 	md_rdev_clear(rdev);
2493 #ifndef MODULE
2494 	if (test_bit(AutoDetected, &rdev->flags))
2495 		md_autodetect_dev(rdev->bdev->bd_dev);
2496 #endif
2497 	blkdev_put(rdev->bdev,
2498 		   test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
2499 	rdev->bdev = NULL;
2500 	kobject_put(&rdev->kobj);
2501 }
2502 
2503 static void md_kick_rdev_from_array(struct md_rdev *rdev)
2504 {
2505 	struct mddev *mddev = rdev->mddev;
2506 
2507 	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2508 	list_del_rcu(&rdev->same_set);
2509 	pr_debug("md: unbind<%pg>\n", rdev->bdev);
2510 	mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2511 	rdev->mddev = NULL;
2512 	sysfs_remove_link(&rdev->kobj, "block");
2513 	sysfs_put(rdev->sysfs_state);
2514 	sysfs_put(rdev->sysfs_unack_badblocks);
2515 	sysfs_put(rdev->sysfs_badblocks);
2516 	rdev->sysfs_state = NULL;
2517 	rdev->sysfs_unack_badblocks = NULL;
2518 	rdev->sysfs_badblocks = NULL;
2519 	rdev->badblocks.count = 0;
2520 
2521 	synchronize_rcu();
2522 
2523 	/*
2524 	 * kobject_del() will wait for all in progress writers to be done, where
2525 	 * reconfig_mutex is held, hence it can't be called under
2526 	 * reconfig_mutex and it's delayed to mddev_unlock().
2527 	 */
2528 	list_add(&rdev->same_set, &mddev->deleting);
2529 }
2530 
2531 static void export_array(struct mddev *mddev)
2532 {
2533 	struct md_rdev *rdev;
2534 
2535 	while (!list_empty(&mddev->disks)) {
2536 		rdev = list_first_entry(&mddev->disks, struct md_rdev,
2537 					same_set);
2538 		md_kick_rdev_from_array(rdev);
2539 	}
2540 	mddev->raid_disks = 0;
2541 	mddev->major_version = 0;
2542 }
2543 
2544 static bool set_in_sync(struct mddev *mddev)
2545 {
2546 	lockdep_assert_held(&mddev->lock);
2547 	if (!mddev->in_sync) {
2548 		mddev->sync_checkers++;
2549 		spin_unlock(&mddev->lock);
2550 		percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2551 		spin_lock(&mddev->lock);
2552 		if (!mddev->in_sync &&
2553 		    percpu_ref_is_zero(&mddev->writes_pending)) {
2554 			mddev->in_sync = 1;
2555 			/*
2556 			 * Ensure ->in_sync is visible before we clear
2557 			 * ->sync_checkers.
2558 			 */
2559 			smp_mb();
2560 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2561 			sysfs_notify_dirent_safe(mddev->sysfs_state);
2562 		}
2563 		if (--mddev->sync_checkers == 0)
2564 			percpu_ref_switch_to_percpu(&mddev->writes_pending);
2565 	}
2566 	if (mddev->safemode == 1)
2567 		mddev->safemode = 0;
2568 	return mddev->in_sync;
2569 }
2570 
2571 static void sync_sbs(struct mddev *mddev, int nospares)
2572 {
2573 	/* Update each superblock (in-memory image), but
2574 	 * if we are allowed to, skip spares which already
2575 	 * have the right event counter, or have one earlier
2576 	 * (which would mean they aren't being marked as dirty
2577 	 * with the rest of the array)
2578 	 */
2579 	struct md_rdev *rdev;
2580 	rdev_for_each(rdev, mddev) {
2581 		if (rdev->sb_events == mddev->events ||
2582 		    (nospares &&
2583 		     rdev->raid_disk < 0 &&
2584 		     rdev->sb_events+1 == mddev->events)) {
2585 			/* Don't update this superblock */
2586 			rdev->sb_loaded = 2;
2587 		} else {
2588 			sync_super(mddev, rdev);
2589 			rdev->sb_loaded = 1;
2590 		}
2591 	}
2592 }
2593 
2594 static bool does_sb_need_changing(struct mddev *mddev)
2595 {
2596 	struct md_rdev *rdev = NULL, *iter;
2597 	struct mdp_superblock_1 *sb;
2598 	int role;
2599 
2600 	/* Find a good rdev */
2601 	rdev_for_each(iter, mddev)
2602 		if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
2603 			rdev = iter;
2604 			break;
2605 		}
2606 
2607 	/* No good device found. */
2608 	if (!rdev)
2609 		return false;
2610 
2611 	sb = page_address(rdev->sb_page);
2612 	/* Check if a device has become faulty or a spare become active */
2613 	rdev_for_each(rdev, mddev) {
2614 		role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2615 		/* Device activated? */
2616 		if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 &&
2617 		    !test_bit(Faulty, &rdev->flags))
2618 			return true;
2619 		/* Device turned faulty? */
2620 		if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX))
2621 			return true;
2622 	}
2623 
2624 	/* Check if any mddev parameters have changed */
2625 	if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2626 	    (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2627 	    (mddev->layout != le32_to_cpu(sb->layout)) ||
2628 	    (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2629 	    (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2630 		return true;
2631 
2632 	return false;
2633 }
2634 
2635 void md_update_sb(struct mddev *mddev, int force_change)
2636 {
2637 	struct md_rdev *rdev;
2638 	int sync_req;
2639 	int nospares = 0;
2640 	int any_badblocks_changed = 0;
2641 	int ret = -1;
2642 
2643 	if (!md_is_rdwr(mddev)) {
2644 		if (force_change)
2645 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2646 		return;
2647 	}
2648 
2649 repeat:
2650 	if (mddev_is_clustered(mddev)) {
2651 		if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2652 			force_change = 1;
2653 		if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2654 			nospares = 1;
2655 		ret = md_cluster_ops->metadata_update_start(mddev);
2656 		/* Has someone else has updated the sb */
2657 		if (!does_sb_need_changing(mddev)) {
2658 			if (ret == 0)
2659 				md_cluster_ops->metadata_update_cancel(mddev);
2660 			bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2661 							 BIT(MD_SB_CHANGE_DEVS) |
2662 							 BIT(MD_SB_CHANGE_CLEAN));
2663 			return;
2664 		}
2665 	}
2666 
2667 	/*
2668 	 * First make sure individual recovery_offsets are correct
2669 	 * curr_resync_completed can only be used during recovery.
2670 	 * During reshape/resync it might use array-addresses rather
2671 	 * that device addresses.
2672 	 */
2673 	rdev_for_each(rdev, mddev) {
2674 		if (rdev->raid_disk >= 0 &&
2675 		    mddev->delta_disks >= 0 &&
2676 		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2677 		    test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2678 		    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2679 		    !test_bit(Journal, &rdev->flags) &&
2680 		    !test_bit(In_sync, &rdev->flags) &&
2681 		    mddev->curr_resync_completed > rdev->recovery_offset)
2682 				rdev->recovery_offset = mddev->curr_resync_completed;
2683 
2684 	}
2685 	if (!mddev->persistent) {
2686 		clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2687 		clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2688 		if (!mddev->external) {
2689 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2690 			rdev_for_each(rdev, mddev) {
2691 				if (rdev->badblocks.changed) {
2692 					rdev->badblocks.changed = 0;
2693 					ack_all_badblocks(&rdev->badblocks);
2694 					md_error(mddev, rdev);
2695 				}
2696 				clear_bit(Blocked, &rdev->flags);
2697 				clear_bit(BlockedBadBlocks, &rdev->flags);
2698 				wake_up(&rdev->blocked_wait);
2699 			}
2700 		}
2701 		wake_up(&mddev->sb_wait);
2702 		return;
2703 	}
2704 
2705 	spin_lock(&mddev->lock);
2706 
2707 	mddev->utime = ktime_get_real_seconds();
2708 
2709 	if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2710 		force_change = 1;
2711 	if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2712 		/* just a clean<-> dirty transition, possibly leave spares alone,
2713 		 * though if events isn't the right even/odd, we will have to do
2714 		 * spares after all
2715 		 */
2716 		nospares = 1;
2717 	if (force_change)
2718 		nospares = 0;
2719 	if (mddev->degraded)
2720 		/* If the array is degraded, then skipping spares is both
2721 		 * dangerous and fairly pointless.
2722 		 * Dangerous because a device that was removed from the array
2723 		 * might have a event_count that still looks up-to-date,
2724 		 * so it can be re-added without a resync.
2725 		 * Pointless because if there are any spares to skip,
2726 		 * then a recovery will happen and soon that array won't
2727 		 * be degraded any more and the spare can go back to sleep then.
2728 		 */
2729 		nospares = 0;
2730 
2731 	sync_req = mddev->in_sync;
2732 
2733 	/* If this is just a dirty<->clean transition, and the array is clean
2734 	 * and 'events' is odd, we can roll back to the previous clean state */
2735 	if (nospares
2736 	    && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2737 	    && mddev->can_decrease_events
2738 	    && mddev->events != 1) {
2739 		mddev->events--;
2740 		mddev->can_decrease_events = 0;
2741 	} else {
2742 		/* otherwise we have to go forward and ... */
2743 		mddev->events ++;
2744 		mddev->can_decrease_events = nospares;
2745 	}
2746 
2747 	/*
2748 	 * This 64-bit counter should never wrap.
2749 	 * Either we are in around ~1 trillion A.C., assuming
2750 	 * 1 reboot per second, or we have a bug...
2751 	 */
2752 	WARN_ON(mddev->events == 0);
2753 
2754 	rdev_for_each(rdev, mddev) {
2755 		if (rdev->badblocks.changed)
2756 			any_badblocks_changed++;
2757 		if (test_bit(Faulty, &rdev->flags))
2758 			set_bit(FaultRecorded, &rdev->flags);
2759 	}
2760 
2761 	sync_sbs(mddev, nospares);
2762 	spin_unlock(&mddev->lock);
2763 
2764 	pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2765 		 mdname(mddev), mddev->in_sync);
2766 
2767 	if (mddev->queue)
2768 		blk_add_trace_msg(mddev->queue, "md md_update_sb");
2769 rewrite:
2770 	md_bitmap_update_sb(mddev->bitmap);
2771 	rdev_for_each(rdev, mddev) {
2772 		if (rdev->sb_loaded != 1)
2773 			continue; /* no noise on spare devices */
2774 
2775 		if (!test_bit(Faulty, &rdev->flags)) {
2776 			md_super_write(mddev,rdev,
2777 				       rdev->sb_start, rdev->sb_size,
2778 				       rdev->sb_page);
2779 			pr_debug("md: (write) %pg's sb offset: %llu\n",
2780 				 rdev->bdev,
2781 				 (unsigned long long)rdev->sb_start);
2782 			rdev->sb_events = mddev->events;
2783 			if (rdev->badblocks.size) {
2784 				md_super_write(mddev, rdev,
2785 					       rdev->badblocks.sector,
2786 					       rdev->badblocks.size << 9,
2787 					       rdev->bb_page);
2788 				rdev->badblocks.size = 0;
2789 			}
2790 
2791 		} else
2792 			pr_debug("md: %pg (skipping faulty)\n",
2793 				 rdev->bdev);
2794 
2795 		if (mddev->level == LEVEL_MULTIPATH)
2796 			/* only need to write one superblock... */
2797 			break;
2798 	}
2799 	if (md_super_wait(mddev) < 0)
2800 		goto rewrite;
2801 	/* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2802 
2803 	if (mddev_is_clustered(mddev) && ret == 0)
2804 		md_cluster_ops->metadata_update_finish(mddev);
2805 
2806 	if (mddev->in_sync != sync_req ||
2807 	    !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2808 			       BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2809 		/* have to write it out again */
2810 		goto repeat;
2811 	wake_up(&mddev->sb_wait);
2812 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2813 		sysfs_notify_dirent_safe(mddev->sysfs_completed);
2814 
2815 	rdev_for_each(rdev, mddev) {
2816 		if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2817 			clear_bit(Blocked, &rdev->flags);
2818 
2819 		if (any_badblocks_changed)
2820 			ack_all_badblocks(&rdev->badblocks);
2821 		clear_bit(BlockedBadBlocks, &rdev->flags);
2822 		wake_up(&rdev->blocked_wait);
2823 	}
2824 }
2825 EXPORT_SYMBOL(md_update_sb);
2826 
2827 static int add_bound_rdev(struct md_rdev *rdev)
2828 {
2829 	struct mddev *mddev = rdev->mddev;
2830 	int err = 0;
2831 	bool add_journal = test_bit(Journal, &rdev->flags);
2832 
2833 	if (!mddev->pers->hot_remove_disk || add_journal) {
2834 		/* If there is hot_add_disk but no hot_remove_disk
2835 		 * then added disks for geometry changes,
2836 		 * and should be added immediately.
2837 		 */
2838 		super_types[mddev->major_version].
2839 			validate_super(mddev, rdev);
2840 		if (add_journal)
2841 			mddev_suspend(mddev);
2842 		err = mddev->pers->hot_add_disk(mddev, rdev);
2843 		if (add_journal)
2844 			mddev_resume(mddev);
2845 		if (err) {
2846 			md_kick_rdev_from_array(rdev);
2847 			return err;
2848 		}
2849 	}
2850 	sysfs_notify_dirent_safe(rdev->sysfs_state);
2851 
2852 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2853 	if (mddev->degraded)
2854 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2855 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2856 	md_new_event();
2857 	md_wakeup_thread(mddev->thread);
2858 	return 0;
2859 }
2860 
2861 /* words written to sysfs files may, or may not, be \n terminated.
2862  * We want to accept with case. For this we use cmd_match.
2863  */
2864 static int cmd_match(const char *cmd, const char *str)
2865 {
2866 	/* See if cmd, written into a sysfs file, matches
2867 	 * str.  They must either be the same, or cmd can
2868 	 * have a trailing newline
2869 	 */
2870 	while (*cmd && *str && *cmd == *str) {
2871 		cmd++;
2872 		str++;
2873 	}
2874 	if (*cmd == '\n')
2875 		cmd++;
2876 	if (*str || *cmd)
2877 		return 0;
2878 	return 1;
2879 }
2880 
2881 struct rdev_sysfs_entry {
2882 	struct attribute attr;
2883 	ssize_t (*show)(struct md_rdev *, char *);
2884 	ssize_t (*store)(struct md_rdev *, const char *, size_t);
2885 };
2886 
2887 static ssize_t
2888 state_show(struct md_rdev *rdev, char *page)
2889 {
2890 	char *sep = ",";
2891 	size_t len = 0;
2892 	unsigned long flags = READ_ONCE(rdev->flags);
2893 
2894 	if (test_bit(Faulty, &flags) ||
2895 	    (!test_bit(ExternalBbl, &flags) &&
2896 	    rdev->badblocks.unacked_exist))
2897 		len += sprintf(page+len, "faulty%s", sep);
2898 	if (test_bit(In_sync, &flags))
2899 		len += sprintf(page+len, "in_sync%s", sep);
2900 	if (test_bit(Journal, &flags))
2901 		len += sprintf(page+len, "journal%s", sep);
2902 	if (test_bit(WriteMostly, &flags))
2903 		len += sprintf(page+len, "write_mostly%s", sep);
2904 	if (test_bit(Blocked, &flags) ||
2905 	    (rdev->badblocks.unacked_exist
2906 	     && !test_bit(Faulty, &flags)))
2907 		len += sprintf(page+len, "blocked%s", sep);
2908 	if (!test_bit(Faulty, &flags) &&
2909 	    !test_bit(Journal, &flags) &&
2910 	    !test_bit(In_sync, &flags))
2911 		len += sprintf(page+len, "spare%s", sep);
2912 	if (test_bit(WriteErrorSeen, &flags))
2913 		len += sprintf(page+len, "write_error%s", sep);
2914 	if (test_bit(WantReplacement, &flags))
2915 		len += sprintf(page+len, "want_replacement%s", sep);
2916 	if (test_bit(Replacement, &flags))
2917 		len += sprintf(page+len, "replacement%s", sep);
2918 	if (test_bit(ExternalBbl, &flags))
2919 		len += sprintf(page+len, "external_bbl%s", sep);
2920 	if (test_bit(FailFast, &flags))
2921 		len += sprintf(page+len, "failfast%s", sep);
2922 
2923 	if (len)
2924 		len -= strlen(sep);
2925 
2926 	return len+sprintf(page+len, "\n");
2927 }
2928 
2929 static ssize_t
2930 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2931 {
2932 	/* can write
2933 	 *  faulty  - simulates an error
2934 	 *  remove  - disconnects the device
2935 	 *  writemostly - sets write_mostly
2936 	 *  -writemostly - clears write_mostly
2937 	 *  blocked - sets the Blocked flags
2938 	 *  -blocked - clears the Blocked and possibly simulates an error
2939 	 *  insync - sets Insync providing device isn't active
2940 	 *  -insync - clear Insync for a device with a slot assigned,
2941 	 *            so that it gets rebuilt based on bitmap
2942 	 *  write_error - sets WriteErrorSeen
2943 	 *  -write_error - clears WriteErrorSeen
2944 	 *  {,-}failfast - set/clear FailFast
2945 	 */
2946 
2947 	struct mddev *mddev = rdev->mddev;
2948 	int err = -EINVAL;
2949 	bool need_update_sb = false;
2950 
2951 	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2952 		md_error(rdev->mddev, rdev);
2953 
2954 		if (test_bit(MD_BROKEN, &rdev->mddev->flags))
2955 			err = -EBUSY;
2956 		else
2957 			err = 0;
2958 	} else if (cmd_match(buf, "remove")) {
2959 		if (rdev->mddev->pers) {
2960 			clear_bit(Blocked, &rdev->flags);
2961 			remove_and_add_spares(rdev->mddev, rdev);
2962 		}
2963 		if (rdev->raid_disk >= 0)
2964 			err = -EBUSY;
2965 		else {
2966 			err = 0;
2967 			if (mddev_is_clustered(mddev))
2968 				err = md_cluster_ops->remove_disk(mddev, rdev);
2969 
2970 			if (err == 0) {
2971 				md_kick_rdev_from_array(rdev);
2972 				if (mddev->pers) {
2973 					set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2974 					md_wakeup_thread(mddev->thread);
2975 				}
2976 				md_new_event();
2977 			}
2978 		}
2979 	} else if (cmd_match(buf, "writemostly")) {
2980 		set_bit(WriteMostly, &rdev->flags);
2981 		mddev_create_serial_pool(rdev->mddev, rdev, false);
2982 		need_update_sb = true;
2983 		err = 0;
2984 	} else if (cmd_match(buf, "-writemostly")) {
2985 		mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2986 		clear_bit(WriteMostly, &rdev->flags);
2987 		need_update_sb = true;
2988 		err = 0;
2989 	} else if (cmd_match(buf, "blocked")) {
2990 		set_bit(Blocked, &rdev->flags);
2991 		err = 0;
2992 	} else if (cmd_match(buf, "-blocked")) {
2993 		if (!test_bit(Faulty, &rdev->flags) &&
2994 		    !test_bit(ExternalBbl, &rdev->flags) &&
2995 		    rdev->badblocks.unacked_exist) {
2996 			/* metadata handler doesn't understand badblocks,
2997 			 * so we need to fail the device
2998 			 */
2999 			md_error(rdev->mddev, rdev);
3000 		}
3001 		clear_bit(Blocked, &rdev->flags);
3002 		clear_bit(BlockedBadBlocks, &rdev->flags);
3003 		wake_up(&rdev->blocked_wait);
3004 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3005 		md_wakeup_thread(rdev->mddev->thread);
3006 
3007 		err = 0;
3008 	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3009 		set_bit(In_sync, &rdev->flags);
3010 		err = 0;
3011 	} else if (cmd_match(buf, "failfast")) {
3012 		set_bit(FailFast, &rdev->flags);
3013 		need_update_sb = true;
3014 		err = 0;
3015 	} else if (cmd_match(buf, "-failfast")) {
3016 		clear_bit(FailFast, &rdev->flags);
3017 		need_update_sb = true;
3018 		err = 0;
3019 	} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3020 		   !test_bit(Journal, &rdev->flags)) {
3021 		if (rdev->mddev->pers == NULL) {
3022 			clear_bit(In_sync, &rdev->flags);
3023 			rdev->saved_raid_disk = rdev->raid_disk;
3024 			rdev->raid_disk = -1;
3025 			err = 0;
3026 		}
3027 	} else if (cmd_match(buf, "write_error")) {
3028 		set_bit(WriteErrorSeen, &rdev->flags);
3029 		err = 0;
3030 	} else if (cmd_match(buf, "-write_error")) {
3031 		clear_bit(WriteErrorSeen, &rdev->flags);
3032 		err = 0;
3033 	} else if (cmd_match(buf, "want_replacement")) {
3034 		/* Any non-spare device that is not a replacement can
3035 		 * become want_replacement at any time, but we then need to
3036 		 * check if recovery is needed.
3037 		 */
3038 		if (rdev->raid_disk >= 0 &&
3039 		    !test_bit(Journal, &rdev->flags) &&
3040 		    !test_bit(Replacement, &rdev->flags))
3041 			set_bit(WantReplacement, &rdev->flags);
3042 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3043 		md_wakeup_thread(rdev->mddev->thread);
3044 		err = 0;
3045 	} else if (cmd_match(buf, "-want_replacement")) {
3046 		/* Clearing 'want_replacement' is always allowed.
3047 		 * Once replacements starts it is too late though.
3048 		 */
3049 		err = 0;
3050 		clear_bit(WantReplacement, &rdev->flags);
3051 	} else if (cmd_match(buf, "replacement")) {
3052 		/* Can only set a device as a replacement when array has not
3053 		 * yet been started.  Once running, replacement is automatic
3054 		 * from spares, or by assigning 'slot'.
3055 		 */
3056 		if (rdev->mddev->pers)
3057 			err = -EBUSY;
3058 		else {
3059 			set_bit(Replacement, &rdev->flags);
3060 			err = 0;
3061 		}
3062 	} else if (cmd_match(buf, "-replacement")) {
3063 		/* Similarly, can only clear Replacement before start */
3064 		if (rdev->mddev->pers)
3065 			err = -EBUSY;
3066 		else {
3067 			clear_bit(Replacement, &rdev->flags);
3068 			err = 0;
3069 		}
3070 	} else if (cmd_match(buf, "re-add")) {
3071 		if (!rdev->mddev->pers)
3072 			err = -EINVAL;
3073 		else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3074 				rdev->saved_raid_disk >= 0) {
3075 			/* clear_bit is performed _after_ all the devices
3076 			 * have their local Faulty bit cleared. If any writes
3077 			 * happen in the meantime in the local node, they
3078 			 * will land in the local bitmap, which will be synced
3079 			 * by this node eventually
3080 			 */
3081 			if (!mddev_is_clustered(rdev->mddev) ||
3082 			    (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3083 				clear_bit(Faulty, &rdev->flags);
3084 				err = add_bound_rdev(rdev);
3085 			}
3086 		} else
3087 			err = -EBUSY;
3088 	} else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3089 		set_bit(ExternalBbl, &rdev->flags);
3090 		rdev->badblocks.shift = 0;
3091 		err = 0;
3092 	} else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3093 		clear_bit(ExternalBbl, &rdev->flags);
3094 		err = 0;
3095 	}
3096 	if (need_update_sb)
3097 		md_update_sb(mddev, 1);
3098 	if (!err)
3099 		sysfs_notify_dirent_safe(rdev->sysfs_state);
3100 	return err ? err : len;
3101 }
3102 static struct rdev_sysfs_entry rdev_state =
3103 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
3104 
3105 static ssize_t
3106 errors_show(struct md_rdev *rdev, char *page)
3107 {
3108 	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3109 }
3110 
3111 static ssize_t
3112 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3113 {
3114 	unsigned int n;
3115 	int rv;
3116 
3117 	rv = kstrtouint(buf, 10, &n);
3118 	if (rv < 0)
3119 		return rv;
3120 	atomic_set(&rdev->corrected_errors, n);
3121 	return len;
3122 }
3123 static struct rdev_sysfs_entry rdev_errors =
3124 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
3125 
3126 static ssize_t
3127 slot_show(struct md_rdev *rdev, char *page)
3128 {
3129 	if (test_bit(Journal, &rdev->flags))
3130 		return sprintf(page, "journal\n");
3131 	else if (rdev->raid_disk < 0)
3132 		return sprintf(page, "none\n");
3133 	else
3134 		return sprintf(page, "%d\n", rdev->raid_disk);
3135 }
3136 
3137 static ssize_t
3138 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3139 {
3140 	int slot;
3141 	int err;
3142 
3143 	if (test_bit(Journal, &rdev->flags))
3144 		return -EBUSY;
3145 	if (strncmp(buf, "none", 4)==0)
3146 		slot = -1;
3147 	else {
3148 		err = kstrtouint(buf, 10, (unsigned int *)&slot);
3149 		if (err < 0)
3150 			return err;
3151 		if (slot < 0)
3152 			/* overflow */
3153 			return -ENOSPC;
3154 	}
3155 	if (rdev->mddev->pers && slot == -1) {
3156 		/* Setting 'slot' on an active array requires also
3157 		 * updating the 'rd%d' link, and communicating
3158 		 * with the personality with ->hot_*_disk.
3159 		 * For now we only support removing
3160 		 * failed/spare devices.  This normally happens automatically,
3161 		 * but not when the metadata is externally managed.
3162 		 */
3163 		if (rdev->raid_disk == -1)
3164 			return -EEXIST;
3165 		/* personality does all needed checks */
3166 		if (rdev->mddev->pers->hot_remove_disk == NULL)
3167 			return -EINVAL;
3168 		clear_bit(Blocked, &rdev->flags);
3169 		remove_and_add_spares(rdev->mddev, rdev);
3170 		if (rdev->raid_disk >= 0)
3171 			return -EBUSY;
3172 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3173 		md_wakeup_thread(rdev->mddev->thread);
3174 	} else if (rdev->mddev->pers) {
3175 		/* Activating a spare .. or possibly reactivating
3176 		 * if we ever get bitmaps working here.
3177 		 */
3178 		int err;
3179 
3180 		if (rdev->raid_disk != -1)
3181 			return -EBUSY;
3182 
3183 		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3184 			return -EBUSY;
3185 
3186 		if (rdev->mddev->pers->hot_add_disk == NULL)
3187 			return -EINVAL;
3188 
3189 		if (slot >= rdev->mddev->raid_disks &&
3190 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3191 			return -ENOSPC;
3192 
3193 		rdev->raid_disk = slot;
3194 		if (test_bit(In_sync, &rdev->flags))
3195 			rdev->saved_raid_disk = slot;
3196 		else
3197 			rdev->saved_raid_disk = -1;
3198 		clear_bit(In_sync, &rdev->flags);
3199 		clear_bit(Bitmap_sync, &rdev->flags);
3200 		err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
3201 		if (err) {
3202 			rdev->raid_disk = -1;
3203 			return err;
3204 		} else
3205 			sysfs_notify_dirent_safe(rdev->sysfs_state);
3206 		/* failure here is OK */;
3207 		sysfs_link_rdev(rdev->mddev, rdev);
3208 		/* don't wakeup anyone, leave that to userspace. */
3209 	} else {
3210 		if (slot >= rdev->mddev->raid_disks &&
3211 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3212 			return -ENOSPC;
3213 		rdev->raid_disk = slot;
3214 		/* assume it is working */
3215 		clear_bit(Faulty, &rdev->flags);
3216 		clear_bit(WriteMostly, &rdev->flags);
3217 		set_bit(In_sync, &rdev->flags);
3218 		sysfs_notify_dirent_safe(rdev->sysfs_state);
3219 	}
3220 	return len;
3221 }
3222 
3223 static struct rdev_sysfs_entry rdev_slot =
3224 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
3225 
3226 static ssize_t
3227 offset_show(struct md_rdev *rdev, char *page)
3228 {
3229 	return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3230 }
3231 
3232 static ssize_t
3233 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3234 {
3235 	unsigned long long offset;
3236 	if (kstrtoull(buf, 10, &offset) < 0)
3237 		return -EINVAL;
3238 	if (rdev->mddev->pers && rdev->raid_disk >= 0)
3239 		return -EBUSY;
3240 	if (rdev->sectors && rdev->mddev->external)
3241 		/* Must set offset before size, so overlap checks
3242 		 * can be sane */
3243 		return -EBUSY;
3244 	rdev->data_offset = offset;
3245 	rdev->new_data_offset = offset;
3246 	return len;
3247 }
3248 
3249 static struct rdev_sysfs_entry rdev_offset =
3250 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
3251 
3252 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3253 {
3254 	return sprintf(page, "%llu\n",
3255 		       (unsigned long long)rdev->new_data_offset);
3256 }
3257 
3258 static ssize_t new_offset_store(struct md_rdev *rdev,
3259 				const char *buf, size_t len)
3260 {
3261 	unsigned long long new_offset;
3262 	struct mddev *mddev = rdev->mddev;
3263 
3264 	if (kstrtoull(buf, 10, &new_offset) < 0)
3265 		return -EINVAL;
3266 
3267 	if (mddev->sync_thread ||
3268 	    test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
3269 		return -EBUSY;
3270 	if (new_offset == rdev->data_offset)
3271 		/* reset is always permitted */
3272 		;
3273 	else if (new_offset > rdev->data_offset) {
3274 		/* must not push array size beyond rdev_sectors */
3275 		if (new_offset - rdev->data_offset
3276 		    + mddev->dev_sectors > rdev->sectors)
3277 				return -E2BIG;
3278 	}
3279 	/* Metadata worries about other space details. */
3280 
3281 	/* decreasing the offset is inconsistent with a backwards
3282 	 * reshape.
3283 	 */
3284 	if (new_offset < rdev->data_offset &&
3285 	    mddev->reshape_backwards)
3286 		return -EINVAL;
3287 	/* Increasing offset is inconsistent with forwards
3288 	 * reshape.  reshape_direction should be set to
3289 	 * 'backwards' first.
3290 	 */
3291 	if (new_offset > rdev->data_offset &&
3292 	    !mddev->reshape_backwards)
3293 		return -EINVAL;
3294 
3295 	if (mddev->pers && mddev->persistent &&
3296 	    !super_types[mddev->major_version]
3297 	    .allow_new_offset(rdev, new_offset))
3298 		return -E2BIG;
3299 	rdev->new_data_offset = new_offset;
3300 	if (new_offset > rdev->data_offset)
3301 		mddev->reshape_backwards = 1;
3302 	else if (new_offset < rdev->data_offset)
3303 		mddev->reshape_backwards = 0;
3304 
3305 	return len;
3306 }
3307 static struct rdev_sysfs_entry rdev_new_offset =
3308 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3309 
3310 static ssize_t
3311 rdev_size_show(struct md_rdev *rdev, char *page)
3312 {
3313 	return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3314 }
3315 
3316 static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b)
3317 {
3318 	/* check if two start/length pairs overlap */
3319 	if (a->data_offset + a->sectors <= b->data_offset)
3320 		return false;
3321 	if (b->data_offset + b->sectors <= a->data_offset)
3322 		return false;
3323 	return true;
3324 }
3325 
3326 static bool md_rdev_overlaps(struct md_rdev *rdev)
3327 {
3328 	struct mddev *mddev;
3329 	struct md_rdev *rdev2;
3330 
3331 	spin_lock(&all_mddevs_lock);
3332 	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
3333 		if (test_bit(MD_DELETED, &mddev->flags))
3334 			continue;
3335 		rdev_for_each(rdev2, mddev) {
3336 			if (rdev != rdev2 && rdev->bdev == rdev2->bdev &&
3337 			    md_rdevs_overlap(rdev, rdev2)) {
3338 				spin_unlock(&all_mddevs_lock);
3339 				return true;
3340 			}
3341 		}
3342 	}
3343 	spin_unlock(&all_mddevs_lock);
3344 	return false;
3345 }
3346 
3347 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3348 {
3349 	unsigned long long blocks;
3350 	sector_t new;
3351 
3352 	if (kstrtoull(buf, 10, &blocks) < 0)
3353 		return -EINVAL;
3354 
3355 	if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3356 		return -EINVAL; /* sector conversion overflow */
3357 
3358 	new = blocks * 2;
3359 	if (new != blocks * 2)
3360 		return -EINVAL; /* unsigned long long to sector_t overflow */
3361 
3362 	*sectors = new;
3363 	return 0;
3364 }
3365 
3366 static ssize_t
3367 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3368 {
3369 	struct mddev *my_mddev = rdev->mddev;
3370 	sector_t oldsectors = rdev->sectors;
3371 	sector_t sectors;
3372 
3373 	if (test_bit(Journal, &rdev->flags))
3374 		return -EBUSY;
3375 	if (strict_blocks_to_sectors(buf, &sectors) < 0)
3376 		return -EINVAL;
3377 	if (rdev->data_offset != rdev->new_data_offset)
3378 		return -EINVAL; /* too confusing */
3379 	if (my_mddev->pers && rdev->raid_disk >= 0) {
3380 		if (my_mddev->persistent) {
3381 			sectors = super_types[my_mddev->major_version].
3382 				rdev_size_change(rdev, sectors);
3383 			if (!sectors)
3384 				return -EBUSY;
3385 		} else if (!sectors)
3386 			sectors = bdev_nr_sectors(rdev->bdev) -
3387 				rdev->data_offset;
3388 		if (!my_mddev->pers->resize)
3389 			/* Cannot change size for RAID0 or Linear etc */
3390 			return -EINVAL;
3391 	}
3392 	if (sectors < my_mddev->dev_sectors)
3393 		return -EINVAL; /* component must fit device */
3394 
3395 	rdev->sectors = sectors;
3396 
3397 	/*
3398 	 * Check that all other rdevs with the same bdev do not overlap.  This
3399 	 * check does not provide a hard guarantee, it just helps avoid
3400 	 * dangerous mistakes.
3401 	 */
3402 	if (sectors > oldsectors && my_mddev->external &&
3403 	    md_rdev_overlaps(rdev)) {
3404 		/*
3405 		 * Someone else could have slipped in a size change here, but
3406 		 * doing so is just silly.  We put oldsectors back because we
3407 		 * know it is safe, and trust userspace not to race with itself.
3408 		 */
3409 		rdev->sectors = oldsectors;
3410 		return -EBUSY;
3411 	}
3412 	return len;
3413 }
3414 
3415 static struct rdev_sysfs_entry rdev_size =
3416 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3417 
3418 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3419 {
3420 	unsigned long long recovery_start = rdev->recovery_offset;
3421 
3422 	if (test_bit(In_sync, &rdev->flags) ||
3423 	    recovery_start == MaxSector)
3424 		return sprintf(page, "none\n");
3425 
3426 	return sprintf(page, "%llu\n", recovery_start);
3427 }
3428 
3429 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3430 {
3431 	unsigned long long recovery_start;
3432 
3433 	if (cmd_match(buf, "none"))
3434 		recovery_start = MaxSector;
3435 	else if (kstrtoull(buf, 10, &recovery_start))
3436 		return -EINVAL;
3437 
3438 	if (rdev->mddev->pers &&
3439 	    rdev->raid_disk >= 0)
3440 		return -EBUSY;
3441 
3442 	rdev->recovery_offset = recovery_start;
3443 	if (recovery_start == MaxSector)
3444 		set_bit(In_sync, &rdev->flags);
3445 	else
3446 		clear_bit(In_sync, &rdev->flags);
3447 	return len;
3448 }
3449 
3450 static struct rdev_sysfs_entry rdev_recovery_start =
3451 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3452 
3453 /* sysfs access to bad-blocks list.
3454  * We present two files.
3455  * 'bad-blocks' lists sector numbers and lengths of ranges that
3456  *    are recorded as bad.  The list is truncated to fit within
3457  *    the one-page limit of sysfs.
3458  *    Writing "sector length" to this file adds an acknowledged
3459  *    bad block list.
3460  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3461  *    been acknowledged.  Writing to this file adds bad blocks
3462  *    without acknowledging them.  This is largely for testing.
3463  */
3464 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3465 {
3466 	return badblocks_show(&rdev->badblocks, page, 0);
3467 }
3468 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3469 {
3470 	int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3471 	/* Maybe that ack was all we needed */
3472 	if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3473 		wake_up(&rdev->blocked_wait);
3474 	return rv;
3475 }
3476 static struct rdev_sysfs_entry rdev_bad_blocks =
3477 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3478 
3479 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3480 {
3481 	return badblocks_show(&rdev->badblocks, page, 1);
3482 }
3483 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3484 {
3485 	return badblocks_store(&rdev->badblocks, page, len, 1);
3486 }
3487 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3488 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3489 
3490 static ssize_t
3491 ppl_sector_show(struct md_rdev *rdev, char *page)
3492 {
3493 	return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3494 }
3495 
3496 static ssize_t
3497 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3498 {
3499 	unsigned long long sector;
3500 
3501 	if (kstrtoull(buf, 10, &sector) < 0)
3502 		return -EINVAL;
3503 	if (sector != (sector_t)sector)
3504 		return -EINVAL;
3505 
3506 	if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3507 	    rdev->raid_disk >= 0)
3508 		return -EBUSY;
3509 
3510 	if (rdev->mddev->persistent) {
3511 		if (rdev->mddev->major_version == 0)
3512 			return -EINVAL;
3513 		if ((sector > rdev->sb_start &&
3514 		     sector - rdev->sb_start > S16_MAX) ||
3515 		    (sector < rdev->sb_start &&
3516 		     rdev->sb_start - sector > -S16_MIN))
3517 			return -EINVAL;
3518 		rdev->ppl.offset = sector - rdev->sb_start;
3519 	} else if (!rdev->mddev->external) {
3520 		return -EBUSY;
3521 	}
3522 	rdev->ppl.sector = sector;
3523 	return len;
3524 }
3525 
3526 static struct rdev_sysfs_entry rdev_ppl_sector =
3527 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3528 
3529 static ssize_t
3530 ppl_size_show(struct md_rdev *rdev, char *page)
3531 {
3532 	return sprintf(page, "%u\n", rdev->ppl.size);
3533 }
3534 
3535 static ssize_t
3536 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3537 {
3538 	unsigned int size;
3539 
3540 	if (kstrtouint(buf, 10, &size) < 0)
3541 		return -EINVAL;
3542 
3543 	if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3544 	    rdev->raid_disk >= 0)
3545 		return -EBUSY;
3546 
3547 	if (rdev->mddev->persistent) {
3548 		if (rdev->mddev->major_version == 0)
3549 			return -EINVAL;
3550 		if (size > U16_MAX)
3551 			return -EINVAL;
3552 	} else if (!rdev->mddev->external) {
3553 		return -EBUSY;
3554 	}
3555 	rdev->ppl.size = size;
3556 	return len;
3557 }
3558 
3559 static struct rdev_sysfs_entry rdev_ppl_size =
3560 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3561 
3562 static struct attribute *rdev_default_attrs[] = {
3563 	&rdev_state.attr,
3564 	&rdev_errors.attr,
3565 	&rdev_slot.attr,
3566 	&rdev_offset.attr,
3567 	&rdev_new_offset.attr,
3568 	&rdev_size.attr,
3569 	&rdev_recovery_start.attr,
3570 	&rdev_bad_blocks.attr,
3571 	&rdev_unack_bad_blocks.attr,
3572 	&rdev_ppl_sector.attr,
3573 	&rdev_ppl_size.attr,
3574 	NULL,
3575 };
3576 ATTRIBUTE_GROUPS(rdev_default);
3577 static ssize_t
3578 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3579 {
3580 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3581 	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3582 
3583 	if (!entry->show)
3584 		return -EIO;
3585 	if (!rdev->mddev)
3586 		return -ENODEV;
3587 	return entry->show(rdev, page);
3588 }
3589 
3590 static ssize_t
3591 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3592 	      const char *page, size_t length)
3593 {
3594 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3595 	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3596 	struct kernfs_node *kn = NULL;
3597 	ssize_t rv;
3598 	struct mddev *mddev = rdev->mddev;
3599 
3600 	if (!entry->store)
3601 		return -EIO;
3602 	if (!capable(CAP_SYS_ADMIN))
3603 		return -EACCES;
3604 
3605 	if (entry->store == state_store && cmd_match(page, "remove"))
3606 		kn = sysfs_break_active_protection(kobj, attr);
3607 
3608 	rv = mddev ? mddev_lock(mddev) : -ENODEV;
3609 	if (!rv) {
3610 		if (rdev->mddev == NULL)
3611 			rv = -ENODEV;
3612 		else
3613 			rv = entry->store(rdev, page, length);
3614 		mddev_unlock(mddev);
3615 	}
3616 
3617 	if (kn)
3618 		sysfs_unbreak_active_protection(kn);
3619 
3620 	return rv;
3621 }
3622 
3623 static void rdev_free(struct kobject *ko)
3624 {
3625 	struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3626 	kfree(rdev);
3627 }
3628 static const struct sysfs_ops rdev_sysfs_ops = {
3629 	.show		= rdev_attr_show,
3630 	.store		= rdev_attr_store,
3631 };
3632 static const struct kobj_type rdev_ktype = {
3633 	.release	= rdev_free,
3634 	.sysfs_ops	= &rdev_sysfs_ops,
3635 	.default_groups	= rdev_default_groups,
3636 };
3637 
3638 int md_rdev_init(struct md_rdev *rdev)
3639 {
3640 	rdev->desc_nr = -1;
3641 	rdev->saved_raid_disk = -1;
3642 	rdev->raid_disk = -1;
3643 	rdev->flags = 0;
3644 	rdev->data_offset = 0;
3645 	rdev->new_data_offset = 0;
3646 	rdev->sb_events = 0;
3647 	rdev->last_read_error = 0;
3648 	rdev->sb_loaded = 0;
3649 	rdev->bb_page = NULL;
3650 	atomic_set(&rdev->nr_pending, 0);
3651 	atomic_set(&rdev->read_errors, 0);
3652 	atomic_set(&rdev->corrected_errors, 0);
3653 
3654 	INIT_LIST_HEAD(&rdev->same_set);
3655 	init_waitqueue_head(&rdev->blocked_wait);
3656 
3657 	/* Add space to store bad block list.
3658 	 * This reserves the space even on arrays where it cannot
3659 	 * be used - I wonder if that matters
3660 	 */
3661 	return badblocks_init(&rdev->badblocks, 0);
3662 }
3663 EXPORT_SYMBOL_GPL(md_rdev_init);
3664 
3665 /*
3666  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3667  *
3668  * mark the device faulty if:
3669  *
3670  *   - the device is nonexistent (zero size)
3671  *   - the device has no valid superblock
3672  *
3673  * a faulty rdev _never_ has rdev->sb set.
3674  */
3675 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3676 {
3677 	struct md_rdev *rdev;
3678 	struct md_rdev *holder;
3679 	sector_t size;
3680 	int err;
3681 
3682 	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3683 	if (!rdev)
3684 		return ERR_PTR(-ENOMEM);
3685 
3686 	err = md_rdev_init(rdev);
3687 	if (err)
3688 		goto out_free_rdev;
3689 	err = alloc_disk_sb(rdev);
3690 	if (err)
3691 		goto out_clear_rdev;
3692 
3693 	if (super_format == -2) {
3694 		holder = &claim_rdev;
3695 	} else {
3696 		holder = rdev;
3697 		set_bit(Holder, &rdev->flags);
3698 	}
3699 
3700 	rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
3701 				       holder, NULL);
3702 	if (IS_ERR(rdev->bdev)) {
3703 		pr_warn("md: could not open device unknown-block(%u,%u).\n",
3704 			MAJOR(newdev), MINOR(newdev));
3705 		err = PTR_ERR(rdev->bdev);
3706 		goto out_clear_rdev;
3707 	}
3708 
3709 	kobject_init(&rdev->kobj, &rdev_ktype);
3710 
3711 	size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
3712 	if (!size) {
3713 		pr_warn("md: %pg has zero or unknown size, marking faulty!\n",
3714 			rdev->bdev);
3715 		err = -EINVAL;
3716 		goto out_blkdev_put;
3717 	}
3718 
3719 	if (super_format >= 0) {
3720 		err = super_types[super_format].
3721 			load_super(rdev, NULL, super_minor);
3722 		if (err == -EINVAL) {
3723 			pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
3724 				rdev->bdev,
3725 				super_format, super_minor);
3726 			goto out_blkdev_put;
3727 		}
3728 		if (err < 0) {
3729 			pr_warn("md: could not read %pg's sb, not importing!\n",
3730 				rdev->bdev);
3731 			goto out_blkdev_put;
3732 		}
3733 	}
3734 
3735 	return rdev;
3736 
3737 out_blkdev_put:
3738 	blkdev_put(rdev->bdev, holder);
3739 out_clear_rdev:
3740 	md_rdev_clear(rdev);
3741 out_free_rdev:
3742 	kfree(rdev);
3743 	return ERR_PTR(err);
3744 }
3745 
3746 /*
3747  * Check a full RAID array for plausibility
3748  */
3749 
3750 static int analyze_sbs(struct mddev *mddev)
3751 {
3752 	int i;
3753 	struct md_rdev *rdev, *freshest, *tmp;
3754 
3755 	freshest = NULL;
3756 	rdev_for_each_safe(rdev, tmp, mddev)
3757 		switch (super_types[mddev->major_version].
3758 			load_super(rdev, freshest, mddev->minor_version)) {
3759 		case 1:
3760 			freshest = rdev;
3761 			break;
3762 		case 0:
3763 			break;
3764 		default:
3765 			pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n",
3766 				rdev->bdev);
3767 			md_kick_rdev_from_array(rdev);
3768 		}
3769 
3770 	/* Cannot find a valid fresh disk */
3771 	if (!freshest) {
3772 		pr_warn("md: cannot find a valid disk\n");
3773 		return -EINVAL;
3774 	}
3775 
3776 	super_types[mddev->major_version].
3777 		validate_super(mddev, freshest);
3778 
3779 	i = 0;
3780 	rdev_for_each_safe(rdev, tmp, mddev) {
3781 		if (mddev->max_disks &&
3782 		    (rdev->desc_nr >= mddev->max_disks ||
3783 		     i > mddev->max_disks)) {
3784 			pr_warn("md: %s: %pg: only %d devices permitted\n",
3785 				mdname(mddev), rdev->bdev,
3786 				mddev->max_disks);
3787 			md_kick_rdev_from_array(rdev);
3788 			continue;
3789 		}
3790 		if (rdev != freshest) {
3791 			if (super_types[mddev->major_version].
3792 			    validate_super(mddev, rdev)) {
3793 				pr_warn("md: kicking non-fresh %pg from array!\n",
3794 					rdev->bdev);
3795 				md_kick_rdev_from_array(rdev);
3796 				continue;
3797 			}
3798 		}
3799 		if (mddev->level == LEVEL_MULTIPATH) {
3800 			rdev->desc_nr = i++;
3801 			rdev->raid_disk = rdev->desc_nr;
3802 			set_bit(In_sync, &rdev->flags);
3803 		} else if (rdev->raid_disk >=
3804 			    (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3805 			   !test_bit(Journal, &rdev->flags)) {
3806 			rdev->raid_disk = -1;
3807 			clear_bit(In_sync, &rdev->flags);
3808 		}
3809 	}
3810 
3811 	return 0;
3812 }
3813 
3814 /* Read a fixed-point number.
3815  * Numbers in sysfs attributes should be in "standard" units where
3816  * possible, so time should be in seconds.
3817  * However we internally use a a much smaller unit such as
3818  * milliseconds or jiffies.
3819  * This function takes a decimal number with a possible fractional
3820  * component, and produces an integer which is the result of
3821  * multiplying that number by 10^'scale'.
3822  * all without any floating-point arithmetic.
3823  */
3824 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3825 {
3826 	unsigned long result = 0;
3827 	long decimals = -1;
3828 	while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3829 		if (*cp == '.')
3830 			decimals = 0;
3831 		else if (decimals < scale) {
3832 			unsigned int value;
3833 			value = *cp - '0';
3834 			result = result * 10 + value;
3835 			if (decimals >= 0)
3836 				decimals++;
3837 		}
3838 		cp++;
3839 	}
3840 	if (*cp == '\n')
3841 		cp++;
3842 	if (*cp)
3843 		return -EINVAL;
3844 	if (decimals < 0)
3845 		decimals = 0;
3846 	*res = result * int_pow(10, scale - decimals);
3847 	return 0;
3848 }
3849 
3850 static ssize_t
3851 safe_delay_show(struct mddev *mddev, char *page)
3852 {
3853 	unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
3854 
3855 	return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
3856 }
3857 static ssize_t
3858 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3859 {
3860 	unsigned long msec;
3861 
3862 	if (mddev_is_clustered(mddev)) {
3863 		pr_warn("md: Safemode is disabled for clustered mode\n");
3864 		return -EINVAL;
3865 	}
3866 
3867 	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
3868 		return -EINVAL;
3869 	if (msec == 0)
3870 		mddev->safemode_delay = 0;
3871 	else {
3872 		unsigned long old_delay = mddev->safemode_delay;
3873 		unsigned long new_delay = (msec*HZ)/1000;
3874 
3875 		if (new_delay == 0)
3876 			new_delay = 1;
3877 		mddev->safemode_delay = new_delay;
3878 		if (new_delay < old_delay || old_delay == 0)
3879 			mod_timer(&mddev->safemode_timer, jiffies+1);
3880 	}
3881 	return len;
3882 }
3883 static struct md_sysfs_entry md_safe_delay =
3884 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3885 
3886 static ssize_t
3887 level_show(struct mddev *mddev, char *page)
3888 {
3889 	struct md_personality *p;
3890 	int ret;
3891 	spin_lock(&mddev->lock);
3892 	p = mddev->pers;
3893 	if (p)
3894 		ret = sprintf(page, "%s\n", p->name);
3895 	else if (mddev->clevel[0])
3896 		ret = sprintf(page, "%s\n", mddev->clevel);
3897 	else if (mddev->level != LEVEL_NONE)
3898 		ret = sprintf(page, "%d\n", mddev->level);
3899 	else
3900 		ret = 0;
3901 	spin_unlock(&mddev->lock);
3902 	return ret;
3903 }
3904 
3905 static ssize_t
3906 level_store(struct mddev *mddev, const char *buf, size_t len)
3907 {
3908 	char clevel[16];
3909 	ssize_t rv;
3910 	size_t slen = len;
3911 	struct md_personality *pers, *oldpers;
3912 	long level;
3913 	void *priv, *oldpriv;
3914 	struct md_rdev *rdev;
3915 
3916 	if (slen == 0 || slen >= sizeof(clevel))
3917 		return -EINVAL;
3918 
3919 	rv = mddev_lock(mddev);
3920 	if (rv)
3921 		return rv;
3922 
3923 	if (mddev->pers == NULL) {
3924 		memcpy(mddev->clevel, buf, slen);
3925 		if (mddev->clevel[slen-1] == '\n')
3926 			slen--;
3927 		mddev->clevel[slen] = 0;
3928 		mddev->level = LEVEL_NONE;
3929 		rv = len;
3930 		goto out_unlock;
3931 	}
3932 	rv = -EROFS;
3933 	if (!md_is_rdwr(mddev))
3934 		goto out_unlock;
3935 
3936 	/* request to change the personality.  Need to ensure:
3937 	 *  - array is not engaged in resync/recovery/reshape
3938 	 *  - old personality can be suspended
3939 	 *  - new personality will access other array.
3940 	 */
3941 
3942 	rv = -EBUSY;
3943 	if (mddev->sync_thread ||
3944 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3945 	    mddev->reshape_position != MaxSector ||
3946 	    mddev->sysfs_active)
3947 		goto out_unlock;
3948 
3949 	rv = -EINVAL;
3950 	if (!mddev->pers->quiesce) {
3951 		pr_warn("md: %s: %s does not support online personality change\n",
3952 			mdname(mddev), mddev->pers->name);
3953 		goto out_unlock;
3954 	}
3955 
3956 	/* Now find the new personality */
3957 	memcpy(clevel, buf, slen);
3958 	if (clevel[slen-1] == '\n')
3959 		slen--;
3960 	clevel[slen] = 0;
3961 	if (kstrtol(clevel, 10, &level))
3962 		level = LEVEL_NONE;
3963 
3964 	if (request_module("md-%s", clevel) != 0)
3965 		request_module("md-level-%s", clevel);
3966 	spin_lock(&pers_lock);
3967 	pers = find_pers(level, clevel);
3968 	if (!pers || !try_module_get(pers->owner)) {
3969 		spin_unlock(&pers_lock);
3970 		pr_warn("md: personality %s not loaded\n", clevel);
3971 		rv = -EINVAL;
3972 		goto out_unlock;
3973 	}
3974 	spin_unlock(&pers_lock);
3975 
3976 	if (pers == mddev->pers) {
3977 		/* Nothing to do! */
3978 		module_put(pers->owner);
3979 		rv = len;
3980 		goto out_unlock;
3981 	}
3982 	if (!pers->takeover) {
3983 		module_put(pers->owner);
3984 		pr_warn("md: %s: %s does not support personality takeover\n",
3985 			mdname(mddev), clevel);
3986 		rv = -EINVAL;
3987 		goto out_unlock;
3988 	}
3989 
3990 	rdev_for_each(rdev, mddev)
3991 		rdev->new_raid_disk = rdev->raid_disk;
3992 
3993 	/* ->takeover must set new_* and/or delta_disks
3994 	 * if it succeeds, and may set them when it fails.
3995 	 */
3996 	priv = pers->takeover(mddev);
3997 	if (IS_ERR(priv)) {
3998 		mddev->new_level = mddev->level;
3999 		mddev->new_layout = mddev->layout;
4000 		mddev->new_chunk_sectors = mddev->chunk_sectors;
4001 		mddev->raid_disks -= mddev->delta_disks;
4002 		mddev->delta_disks = 0;
4003 		mddev->reshape_backwards = 0;
4004 		module_put(pers->owner);
4005 		pr_warn("md: %s: %s would not accept array\n",
4006 			mdname(mddev), clevel);
4007 		rv = PTR_ERR(priv);
4008 		goto out_unlock;
4009 	}
4010 
4011 	/* Looks like we have a winner */
4012 	mddev_suspend(mddev);
4013 	mddev_detach(mddev);
4014 
4015 	spin_lock(&mddev->lock);
4016 	oldpers = mddev->pers;
4017 	oldpriv = mddev->private;
4018 	mddev->pers = pers;
4019 	mddev->private = priv;
4020 	strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4021 	mddev->level = mddev->new_level;
4022 	mddev->layout = mddev->new_layout;
4023 	mddev->chunk_sectors = mddev->new_chunk_sectors;
4024 	mddev->delta_disks = 0;
4025 	mddev->reshape_backwards = 0;
4026 	mddev->degraded = 0;
4027 	spin_unlock(&mddev->lock);
4028 
4029 	if (oldpers->sync_request == NULL &&
4030 	    mddev->external) {
4031 		/* We are converting from a no-redundancy array
4032 		 * to a redundancy array and metadata is managed
4033 		 * externally so we need to be sure that writes
4034 		 * won't block due to a need to transition
4035 		 *      clean->dirty
4036 		 * until external management is started.
4037 		 */
4038 		mddev->in_sync = 0;
4039 		mddev->safemode_delay = 0;
4040 		mddev->safemode = 0;
4041 	}
4042 
4043 	oldpers->free(mddev, oldpriv);
4044 
4045 	if (oldpers->sync_request == NULL &&
4046 	    pers->sync_request != NULL) {
4047 		/* need to add the md_redundancy_group */
4048 		if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4049 			pr_warn("md: cannot register extra attributes for %s\n",
4050 				mdname(mddev));
4051 		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4052 		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4053 		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
4054 	}
4055 	if (oldpers->sync_request != NULL &&
4056 	    pers->sync_request == NULL) {
4057 		/* need to remove the md_redundancy_group */
4058 		if (mddev->to_remove == NULL)
4059 			mddev->to_remove = &md_redundancy_group;
4060 	}
4061 
4062 	module_put(oldpers->owner);
4063 
4064 	rdev_for_each(rdev, mddev) {
4065 		if (rdev->raid_disk < 0)
4066 			continue;
4067 		if (rdev->new_raid_disk >= mddev->raid_disks)
4068 			rdev->new_raid_disk = -1;
4069 		if (rdev->new_raid_disk == rdev->raid_disk)
4070 			continue;
4071 		sysfs_unlink_rdev(mddev, rdev);
4072 	}
4073 	rdev_for_each(rdev, mddev) {
4074 		if (rdev->raid_disk < 0)
4075 			continue;
4076 		if (rdev->new_raid_disk == rdev->raid_disk)
4077 			continue;
4078 		rdev->raid_disk = rdev->new_raid_disk;
4079 		if (rdev->raid_disk < 0)
4080 			clear_bit(In_sync, &rdev->flags);
4081 		else {
4082 			if (sysfs_link_rdev(mddev, rdev))
4083 				pr_warn("md: cannot register rd%d for %s after level change\n",
4084 					rdev->raid_disk, mdname(mddev));
4085 		}
4086 	}
4087 
4088 	if (pers->sync_request == NULL) {
4089 		/* this is now an array without redundancy, so
4090 		 * it must always be in_sync
4091 		 */
4092 		mddev->in_sync = 1;
4093 		del_timer_sync(&mddev->safemode_timer);
4094 	}
4095 	blk_set_stacking_limits(&mddev->queue->limits);
4096 	pers->run(mddev);
4097 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4098 	mddev_resume(mddev);
4099 	if (!mddev->thread)
4100 		md_update_sb(mddev, 1);
4101 	sysfs_notify_dirent_safe(mddev->sysfs_level);
4102 	md_new_event();
4103 	rv = len;
4104 out_unlock:
4105 	mddev_unlock(mddev);
4106 	return rv;
4107 }
4108 
4109 static struct md_sysfs_entry md_level =
4110 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
4111 
4112 static ssize_t
4113 layout_show(struct mddev *mddev, char *page)
4114 {
4115 	/* just a number, not meaningful for all levels */
4116 	if (mddev->reshape_position != MaxSector &&
4117 	    mddev->layout != mddev->new_layout)
4118 		return sprintf(page, "%d (%d)\n",
4119 			       mddev->new_layout, mddev->layout);
4120 	return sprintf(page, "%d\n", mddev->layout);
4121 }
4122 
4123 static ssize_t
4124 layout_store(struct mddev *mddev, const char *buf, size_t len)
4125 {
4126 	unsigned int n;
4127 	int err;
4128 
4129 	err = kstrtouint(buf, 10, &n);
4130 	if (err < 0)
4131 		return err;
4132 	err = mddev_lock(mddev);
4133 	if (err)
4134 		return err;
4135 
4136 	if (mddev->pers) {
4137 		if (mddev->pers->check_reshape == NULL)
4138 			err = -EBUSY;
4139 		else if (!md_is_rdwr(mddev))
4140 			err = -EROFS;
4141 		else {
4142 			mddev->new_layout = n;
4143 			err = mddev->pers->check_reshape(mddev);
4144 			if (err)
4145 				mddev->new_layout = mddev->layout;
4146 		}
4147 	} else {
4148 		mddev->new_layout = n;
4149 		if (mddev->reshape_position == MaxSector)
4150 			mddev->layout = n;
4151 	}
4152 	mddev_unlock(mddev);
4153 	return err ?: len;
4154 }
4155 static struct md_sysfs_entry md_layout =
4156 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
4157 
4158 static ssize_t
4159 raid_disks_show(struct mddev *mddev, char *page)
4160 {
4161 	if (mddev->raid_disks == 0)
4162 		return 0;
4163 	if (mddev->reshape_position != MaxSector &&
4164 	    mddev->delta_disks != 0)
4165 		return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4166 			       mddev->raid_disks - mddev->delta_disks);
4167 	return sprintf(page, "%d\n", mddev->raid_disks);
4168 }
4169 
4170 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4171 
4172 static ssize_t
4173 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
4174 {
4175 	unsigned int n;
4176 	int err;
4177 
4178 	err = kstrtouint(buf, 10, &n);
4179 	if (err < 0)
4180 		return err;
4181 
4182 	err = mddev_lock(mddev);
4183 	if (err)
4184 		return err;
4185 	if (mddev->pers)
4186 		err = update_raid_disks(mddev, n);
4187 	else if (mddev->reshape_position != MaxSector) {
4188 		struct md_rdev *rdev;
4189 		int olddisks = mddev->raid_disks - mddev->delta_disks;
4190 
4191 		err = -EINVAL;
4192 		rdev_for_each(rdev, mddev) {
4193 			if (olddisks < n &&
4194 			    rdev->data_offset < rdev->new_data_offset)
4195 				goto out_unlock;
4196 			if (olddisks > n &&
4197 			    rdev->data_offset > rdev->new_data_offset)
4198 				goto out_unlock;
4199 		}
4200 		err = 0;
4201 		mddev->delta_disks = n - olddisks;
4202 		mddev->raid_disks = n;
4203 		mddev->reshape_backwards = (mddev->delta_disks < 0);
4204 	} else
4205 		mddev->raid_disks = n;
4206 out_unlock:
4207 	mddev_unlock(mddev);
4208 	return err ? err : len;
4209 }
4210 static struct md_sysfs_entry md_raid_disks =
4211 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
4212 
4213 static ssize_t
4214 uuid_show(struct mddev *mddev, char *page)
4215 {
4216 	return sprintf(page, "%pU\n", mddev->uuid);
4217 }
4218 static struct md_sysfs_entry md_uuid =
4219 __ATTR(uuid, S_IRUGO, uuid_show, NULL);
4220 
4221 static ssize_t
4222 chunk_size_show(struct mddev *mddev, char *page)
4223 {
4224 	if (mddev->reshape_position != MaxSector &&
4225 	    mddev->chunk_sectors != mddev->new_chunk_sectors)
4226 		return sprintf(page, "%d (%d)\n",
4227 			       mddev->new_chunk_sectors << 9,
4228 			       mddev->chunk_sectors << 9);
4229 	return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
4230 }
4231 
4232 static ssize_t
4233 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
4234 {
4235 	unsigned long n;
4236 	int err;
4237 
4238 	err = kstrtoul(buf, 10, &n);
4239 	if (err < 0)
4240 		return err;
4241 
4242 	err = mddev_lock(mddev);
4243 	if (err)
4244 		return err;
4245 	if (mddev->pers) {
4246 		if (mddev->pers->check_reshape == NULL)
4247 			err = -EBUSY;
4248 		else if (!md_is_rdwr(mddev))
4249 			err = -EROFS;
4250 		else {
4251 			mddev->new_chunk_sectors = n >> 9;
4252 			err = mddev->pers->check_reshape(mddev);
4253 			if (err)
4254 				mddev->new_chunk_sectors = mddev->chunk_sectors;
4255 		}
4256 	} else {
4257 		mddev->new_chunk_sectors = n >> 9;
4258 		if (mddev->reshape_position == MaxSector)
4259 			mddev->chunk_sectors = n >> 9;
4260 	}
4261 	mddev_unlock(mddev);
4262 	return err ?: len;
4263 }
4264 static struct md_sysfs_entry md_chunk_size =
4265 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
4266 
4267 static ssize_t
4268 resync_start_show(struct mddev *mddev, char *page)
4269 {
4270 	if (mddev->recovery_cp == MaxSector)
4271 		return sprintf(page, "none\n");
4272 	return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4273 }
4274 
4275 static ssize_t
4276 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
4277 {
4278 	unsigned long long n;
4279 	int err;
4280 
4281 	if (cmd_match(buf, "none"))
4282 		n = MaxSector;
4283 	else {
4284 		err = kstrtoull(buf, 10, &n);
4285 		if (err < 0)
4286 			return err;
4287 		if (n != (sector_t)n)
4288 			return -EINVAL;
4289 	}
4290 
4291 	err = mddev_lock(mddev);
4292 	if (err)
4293 		return err;
4294 	if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4295 		err = -EBUSY;
4296 
4297 	if (!err) {
4298 		mddev->recovery_cp = n;
4299 		if (mddev->pers)
4300 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4301 	}
4302 	mddev_unlock(mddev);
4303 	return err ?: len;
4304 }
4305 static struct md_sysfs_entry md_resync_start =
4306 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4307 		resync_start_show, resync_start_store);
4308 
4309 /*
4310  * The array state can be:
4311  *
4312  * clear
4313  *     No devices, no size, no level
4314  *     Equivalent to STOP_ARRAY ioctl
4315  * inactive
4316  *     May have some settings, but array is not active
4317  *        all IO results in error
4318  *     When written, doesn't tear down array, but just stops it
4319  * suspended (not supported yet)
4320  *     All IO requests will block. The array can be reconfigured.
4321  *     Writing this, if accepted, will block until array is quiescent
4322  * readonly
4323  *     no resync can happen.  no superblocks get written.
4324  *     write requests fail
4325  * read-auto
4326  *     like readonly, but behaves like 'clean' on a write request.
4327  *
4328  * clean - no pending writes, but otherwise active.
4329  *     When written to inactive array, starts without resync
4330  *     If a write request arrives then
4331  *       if metadata is known, mark 'dirty' and switch to 'active'.
4332  *       if not known, block and switch to write-pending
4333  *     If written to an active array that has pending writes, then fails.
4334  * active
4335  *     fully active: IO and resync can be happening.
4336  *     When written to inactive array, starts with resync
4337  *
4338  * write-pending
4339  *     clean, but writes are blocked waiting for 'active' to be written.
4340  *
4341  * active-idle
4342  *     like active, but no writes have been seen for a while (100msec).
4343  *
4344  * broken
4345 *     Array is failed. It's useful because mounted-arrays aren't stopped
4346 *     when array is failed, so this state will at least alert the user that
4347 *     something is wrong.
4348  */
4349 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
4350 		   write_pending, active_idle, broken, bad_word};
4351 static char *array_states[] = {
4352 	"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
4353 	"write-pending", "active-idle", "broken", NULL };
4354 
4355 static int match_word(const char *word, char **list)
4356 {
4357 	int n;
4358 	for (n=0; list[n]; n++)
4359 		if (cmd_match(word, list[n]))
4360 			break;
4361 	return n;
4362 }
4363 
4364 static ssize_t
4365 array_state_show(struct mddev *mddev, char *page)
4366 {
4367 	enum array_state st = inactive;
4368 
4369 	if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
4370 		switch(mddev->ro) {
4371 		case MD_RDONLY:
4372 			st = readonly;
4373 			break;
4374 		case MD_AUTO_READ:
4375 			st = read_auto;
4376 			break;
4377 		case MD_RDWR:
4378 			spin_lock(&mddev->lock);
4379 			if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4380 				st = write_pending;
4381 			else if (mddev->in_sync)
4382 				st = clean;
4383 			else if (mddev->safemode)
4384 				st = active_idle;
4385 			else
4386 				st = active;
4387 			spin_unlock(&mddev->lock);
4388 		}
4389 
4390 		if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4391 			st = broken;
4392 	} else {
4393 		if (list_empty(&mddev->disks) &&
4394 		    mddev->raid_disks == 0 &&
4395 		    mddev->dev_sectors == 0)
4396 			st = clear;
4397 		else
4398 			st = inactive;
4399 	}
4400 	return sprintf(page, "%s\n", array_states[st]);
4401 }
4402 
4403 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4404 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4405 static int restart_array(struct mddev *mddev);
4406 
4407 static ssize_t
4408 array_state_store(struct mddev *mddev, const char *buf, size_t len)
4409 {
4410 	int err = 0;
4411 	enum array_state st = match_word(buf, array_states);
4412 
4413 	if (mddev->pers && (st == active || st == clean) &&
4414 	    mddev->ro != MD_RDONLY) {
4415 		/* don't take reconfig_mutex when toggling between
4416 		 * clean and active
4417 		 */
4418 		spin_lock(&mddev->lock);
4419 		if (st == active) {
4420 			restart_array(mddev);
4421 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4422 			md_wakeup_thread(mddev->thread);
4423 			wake_up(&mddev->sb_wait);
4424 		} else /* st == clean */ {
4425 			restart_array(mddev);
4426 			if (!set_in_sync(mddev))
4427 				err = -EBUSY;
4428 		}
4429 		if (!err)
4430 			sysfs_notify_dirent_safe(mddev->sysfs_state);
4431 		spin_unlock(&mddev->lock);
4432 		return err ?: len;
4433 	}
4434 	err = mddev_lock(mddev);
4435 	if (err)
4436 		return err;
4437 	err = -EINVAL;
4438 	switch(st) {
4439 	case bad_word:
4440 		break;
4441 	case clear:
4442 		/* stopping an active array */
4443 		err = do_md_stop(mddev, 0, NULL);
4444 		break;
4445 	case inactive:
4446 		/* stopping an active array */
4447 		if (mddev->pers)
4448 			err = do_md_stop(mddev, 2, NULL);
4449 		else
4450 			err = 0; /* already inactive */
4451 		break;
4452 	case suspended:
4453 		break; /* not supported yet */
4454 	case readonly:
4455 		if (mddev->pers)
4456 			err = md_set_readonly(mddev, NULL);
4457 		else {
4458 			mddev->ro = MD_RDONLY;
4459 			set_disk_ro(mddev->gendisk, 1);
4460 			err = do_md_run(mddev);
4461 		}
4462 		break;
4463 	case read_auto:
4464 		if (mddev->pers) {
4465 			if (md_is_rdwr(mddev))
4466 				err = md_set_readonly(mddev, NULL);
4467 			else if (mddev->ro == MD_RDONLY)
4468 				err = restart_array(mddev);
4469 			if (err == 0) {
4470 				mddev->ro = MD_AUTO_READ;
4471 				set_disk_ro(mddev->gendisk, 0);
4472 			}
4473 		} else {
4474 			mddev->ro = MD_AUTO_READ;
4475 			err = do_md_run(mddev);
4476 		}
4477 		break;
4478 	case clean:
4479 		if (mddev->pers) {
4480 			err = restart_array(mddev);
4481 			if (err)
4482 				break;
4483 			spin_lock(&mddev->lock);
4484 			if (!set_in_sync(mddev))
4485 				err = -EBUSY;
4486 			spin_unlock(&mddev->lock);
4487 		} else
4488 			err = -EINVAL;
4489 		break;
4490 	case active:
4491 		if (mddev->pers) {
4492 			err = restart_array(mddev);
4493 			if (err)
4494 				break;
4495 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4496 			wake_up(&mddev->sb_wait);
4497 			err = 0;
4498 		} else {
4499 			mddev->ro = MD_RDWR;
4500 			set_disk_ro(mddev->gendisk, 0);
4501 			err = do_md_run(mddev);
4502 		}
4503 		break;
4504 	case write_pending:
4505 	case active_idle:
4506 	case broken:
4507 		/* these cannot be set */
4508 		break;
4509 	}
4510 
4511 	if (!err) {
4512 		if (mddev->hold_active == UNTIL_IOCTL)
4513 			mddev->hold_active = 0;
4514 		sysfs_notify_dirent_safe(mddev->sysfs_state);
4515 	}
4516 	mddev_unlock(mddev);
4517 	return err ?: len;
4518 }
4519 static struct md_sysfs_entry md_array_state =
4520 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4521 
4522 static ssize_t
4523 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4524 	return sprintf(page, "%d\n",
4525 		       atomic_read(&mddev->max_corr_read_errors));
4526 }
4527 
4528 static ssize_t
4529 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4530 {
4531 	unsigned int n;
4532 	int rv;
4533 
4534 	rv = kstrtouint(buf, 10, &n);
4535 	if (rv < 0)
4536 		return rv;
4537 	if (n > INT_MAX)
4538 		return -EINVAL;
4539 	atomic_set(&mddev->max_corr_read_errors, n);
4540 	return len;
4541 }
4542 
4543 static struct md_sysfs_entry max_corr_read_errors =
4544 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4545 	max_corrected_read_errors_store);
4546 
4547 static ssize_t
4548 null_show(struct mddev *mddev, char *page)
4549 {
4550 	return -EINVAL;
4551 }
4552 
4553 static ssize_t
4554 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4555 {
4556 	/* buf must be %d:%d\n? giving major and minor numbers */
4557 	/* The new device is added to the array.
4558 	 * If the array has a persistent superblock, we read the
4559 	 * superblock to initialise info and check validity.
4560 	 * Otherwise, only checking done is that in bind_rdev_to_array,
4561 	 * which mainly checks size.
4562 	 */
4563 	char *e;
4564 	int major = simple_strtoul(buf, &e, 10);
4565 	int minor;
4566 	dev_t dev;
4567 	struct md_rdev *rdev;
4568 	int err;
4569 
4570 	if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4571 		return -EINVAL;
4572 	minor = simple_strtoul(e+1, &e, 10);
4573 	if (*e && *e != '\n')
4574 		return -EINVAL;
4575 	dev = MKDEV(major, minor);
4576 	if (major != MAJOR(dev) ||
4577 	    minor != MINOR(dev))
4578 		return -EOVERFLOW;
4579 
4580 	err = mddev_lock(mddev);
4581 	if (err)
4582 		return err;
4583 	if (mddev->persistent) {
4584 		rdev = md_import_device(dev, mddev->major_version,
4585 					mddev->minor_version);
4586 		if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4587 			struct md_rdev *rdev0
4588 				= list_entry(mddev->disks.next,
4589 					     struct md_rdev, same_set);
4590 			err = super_types[mddev->major_version]
4591 				.load_super(rdev, rdev0, mddev->minor_version);
4592 			if (err < 0)
4593 				goto out;
4594 		}
4595 	} else if (mddev->external)
4596 		rdev = md_import_device(dev, -2, -1);
4597 	else
4598 		rdev = md_import_device(dev, -1, -1);
4599 
4600 	if (IS_ERR(rdev)) {
4601 		mddev_unlock(mddev);
4602 		return PTR_ERR(rdev);
4603 	}
4604 	err = bind_rdev_to_array(rdev, mddev);
4605  out:
4606 	if (err)
4607 		export_rdev(rdev, mddev);
4608 	mddev_unlock(mddev);
4609 	if (!err)
4610 		md_new_event();
4611 	return err ? err : len;
4612 }
4613 
4614 static struct md_sysfs_entry md_new_device =
4615 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4616 
4617 static ssize_t
4618 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4619 {
4620 	char *end;
4621 	unsigned long chunk, end_chunk;
4622 	int err;
4623 
4624 	err = mddev_lock(mddev);
4625 	if (err)
4626 		return err;
4627 	if (!mddev->bitmap)
4628 		goto out;
4629 	/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4630 	while (*buf) {
4631 		chunk = end_chunk = simple_strtoul(buf, &end, 0);
4632 		if (buf == end) break;
4633 		if (*end == '-') { /* range */
4634 			buf = end + 1;
4635 			end_chunk = simple_strtoul(buf, &end, 0);
4636 			if (buf == end) break;
4637 		}
4638 		if (*end && !isspace(*end)) break;
4639 		md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4640 		buf = skip_spaces(end);
4641 	}
4642 	md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4643 out:
4644 	mddev_unlock(mddev);
4645 	return len;
4646 }
4647 
4648 static struct md_sysfs_entry md_bitmap =
4649 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4650 
4651 static ssize_t
4652 size_show(struct mddev *mddev, char *page)
4653 {
4654 	return sprintf(page, "%llu\n",
4655 		(unsigned long long)mddev->dev_sectors / 2);
4656 }
4657 
4658 static int update_size(struct mddev *mddev, sector_t num_sectors);
4659 
4660 static ssize_t
4661 size_store(struct mddev *mddev, const char *buf, size_t len)
4662 {
4663 	/* If array is inactive, we can reduce the component size, but
4664 	 * not increase it (except from 0).
4665 	 * If array is active, we can try an on-line resize
4666 	 */
4667 	sector_t sectors;
4668 	int err = strict_blocks_to_sectors(buf, &sectors);
4669 
4670 	if (err < 0)
4671 		return err;
4672 	err = mddev_lock(mddev);
4673 	if (err)
4674 		return err;
4675 	if (mddev->pers) {
4676 		err = update_size(mddev, sectors);
4677 		if (err == 0)
4678 			md_update_sb(mddev, 1);
4679 	} else {
4680 		if (mddev->dev_sectors == 0 ||
4681 		    mddev->dev_sectors > sectors)
4682 			mddev->dev_sectors = sectors;
4683 		else
4684 			err = -ENOSPC;
4685 	}
4686 	mddev_unlock(mddev);
4687 	return err ? err : len;
4688 }
4689 
4690 static struct md_sysfs_entry md_size =
4691 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4692 
4693 /* Metadata version.
4694  * This is one of
4695  *   'none' for arrays with no metadata (good luck...)
4696  *   'external' for arrays with externally managed metadata,
4697  * or N.M for internally known formats
4698  */
4699 static ssize_t
4700 metadata_show(struct mddev *mddev, char *page)
4701 {
4702 	if (mddev->persistent)
4703 		return sprintf(page, "%d.%d\n",
4704 			       mddev->major_version, mddev->minor_version);
4705 	else if (mddev->external)
4706 		return sprintf(page, "external:%s\n", mddev->metadata_type);
4707 	else
4708 		return sprintf(page, "none\n");
4709 }
4710 
4711 static ssize_t
4712 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4713 {
4714 	int major, minor;
4715 	char *e;
4716 	int err;
4717 	/* Changing the details of 'external' metadata is
4718 	 * always permitted.  Otherwise there must be
4719 	 * no devices attached to the array.
4720 	 */
4721 
4722 	err = mddev_lock(mddev);
4723 	if (err)
4724 		return err;
4725 	err = -EBUSY;
4726 	if (mddev->external && strncmp(buf, "external:", 9) == 0)
4727 		;
4728 	else if (!list_empty(&mddev->disks))
4729 		goto out_unlock;
4730 
4731 	err = 0;
4732 	if (cmd_match(buf, "none")) {
4733 		mddev->persistent = 0;
4734 		mddev->external = 0;
4735 		mddev->major_version = 0;
4736 		mddev->minor_version = 90;
4737 		goto out_unlock;
4738 	}
4739 	if (strncmp(buf, "external:", 9) == 0) {
4740 		size_t namelen = len-9;
4741 		if (namelen >= sizeof(mddev->metadata_type))
4742 			namelen = sizeof(mddev->metadata_type)-1;
4743 		memcpy(mddev->metadata_type, buf+9, namelen);
4744 		mddev->metadata_type[namelen] = 0;
4745 		if (namelen && mddev->metadata_type[namelen-1] == '\n')
4746 			mddev->metadata_type[--namelen] = 0;
4747 		mddev->persistent = 0;
4748 		mddev->external = 1;
4749 		mddev->major_version = 0;
4750 		mddev->minor_version = 90;
4751 		goto out_unlock;
4752 	}
4753 	major = simple_strtoul(buf, &e, 10);
4754 	err = -EINVAL;
4755 	if (e==buf || *e != '.')
4756 		goto out_unlock;
4757 	buf = e+1;
4758 	minor = simple_strtoul(buf, &e, 10);
4759 	if (e==buf || (*e && *e != '\n') )
4760 		goto out_unlock;
4761 	err = -ENOENT;
4762 	if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4763 		goto out_unlock;
4764 	mddev->major_version = major;
4765 	mddev->minor_version = minor;
4766 	mddev->persistent = 1;
4767 	mddev->external = 0;
4768 	err = 0;
4769 out_unlock:
4770 	mddev_unlock(mddev);
4771 	return err ?: len;
4772 }
4773 
4774 static struct md_sysfs_entry md_metadata =
4775 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4776 
4777 static ssize_t
4778 action_show(struct mddev *mddev, char *page)
4779 {
4780 	char *type = "idle";
4781 	unsigned long recovery = mddev->recovery;
4782 	if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4783 		type = "frozen";
4784 	else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4785 	    (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4786 		if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4787 			type = "reshape";
4788 		else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4789 			if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4790 				type = "resync";
4791 			else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4792 				type = "check";
4793 			else
4794 				type = "repair";
4795 		} else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4796 			type = "recover";
4797 		else if (mddev->reshape_position != MaxSector)
4798 			type = "reshape";
4799 	}
4800 	return sprintf(page, "%s\n", type);
4801 }
4802 
4803 static void stop_sync_thread(struct mddev *mddev)
4804 {
4805 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4806 		return;
4807 
4808 	if (mddev_lock(mddev))
4809 		return;
4810 
4811 	/*
4812 	 * Check again in case MD_RECOVERY_RUNNING is cleared before lock is
4813 	 * held.
4814 	 */
4815 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
4816 		mddev_unlock(mddev);
4817 		return;
4818 	}
4819 
4820 	if (work_pending(&mddev->del_work))
4821 		flush_workqueue(md_misc_wq);
4822 
4823 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4824 	/*
4825 	 * Thread might be blocked waiting for metadata update which will now
4826 	 * never happen
4827 	 */
4828 	md_wakeup_thread_directly(mddev->sync_thread);
4829 
4830 	mddev_unlock(mddev);
4831 }
4832 
4833 static void idle_sync_thread(struct mddev *mddev)
4834 {
4835 	int sync_seq = atomic_read(&mddev->sync_seq);
4836 
4837 	mutex_lock(&mddev->sync_mutex);
4838 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4839 	stop_sync_thread(mddev);
4840 
4841 	wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) ||
4842 			!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
4843 
4844 	mutex_unlock(&mddev->sync_mutex);
4845 }
4846 
4847 static void frozen_sync_thread(struct mddev *mddev)
4848 {
4849 	mutex_lock(&mddev->sync_mutex);
4850 	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4851 	stop_sync_thread(mddev);
4852 
4853 	wait_event(resync_wait, mddev->sync_thread == NULL &&
4854 			!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
4855 
4856 	mutex_unlock(&mddev->sync_mutex);
4857 }
4858 
4859 static ssize_t
4860 action_store(struct mddev *mddev, const char *page, size_t len)
4861 {
4862 	if (!mddev->pers || !mddev->pers->sync_request)
4863 		return -EINVAL;
4864 
4865 
4866 	if (cmd_match(page, "idle"))
4867 		idle_sync_thread(mddev);
4868 	else if (cmd_match(page, "frozen"))
4869 		frozen_sync_thread(mddev);
4870 	else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4871 		return -EBUSY;
4872 	else if (cmd_match(page, "resync"))
4873 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4874 	else if (cmd_match(page, "recover")) {
4875 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4876 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4877 	} else if (cmd_match(page, "reshape")) {
4878 		int err;
4879 		if (mddev->pers->start_reshape == NULL)
4880 			return -EINVAL;
4881 		err = mddev_lock(mddev);
4882 		if (!err) {
4883 			if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
4884 				err =  -EBUSY;
4885 			} else if (mddev->reshape_position == MaxSector ||
4886 				   mddev->pers->check_reshape == NULL ||
4887 				   mddev->pers->check_reshape(mddev)) {
4888 				clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4889 				err = mddev->pers->start_reshape(mddev);
4890 			} else {
4891 				/*
4892 				 * If reshape is still in progress, and
4893 				 * md_check_recovery() can continue to reshape,
4894 				 * don't restart reshape because data can be
4895 				 * corrupted for raid456.
4896 				 */
4897 				clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4898 			}
4899 			mddev_unlock(mddev);
4900 		}
4901 		if (err)
4902 			return err;
4903 		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
4904 	} else {
4905 		if (cmd_match(page, "check"))
4906 			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4907 		else if (!cmd_match(page, "repair"))
4908 			return -EINVAL;
4909 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4910 		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4911 		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4912 	}
4913 	if (mddev->ro == MD_AUTO_READ) {
4914 		/* A write to sync_action is enough to justify
4915 		 * canceling read-auto mode
4916 		 */
4917 		flush_work(&mddev->sync_work);
4918 		mddev->ro = MD_RDWR;
4919 		md_wakeup_thread(mddev->sync_thread);
4920 	}
4921 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4922 	md_wakeup_thread(mddev->thread);
4923 	sysfs_notify_dirent_safe(mddev->sysfs_action);
4924 	return len;
4925 }
4926 
4927 static struct md_sysfs_entry md_scan_mode =
4928 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4929 
4930 static ssize_t
4931 last_sync_action_show(struct mddev *mddev, char *page)
4932 {
4933 	return sprintf(page, "%s\n", mddev->last_sync_action);
4934 }
4935 
4936 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4937 
4938 static ssize_t
4939 mismatch_cnt_show(struct mddev *mddev, char *page)
4940 {
4941 	return sprintf(page, "%llu\n",
4942 		       (unsigned long long)
4943 		       atomic64_read(&mddev->resync_mismatches));
4944 }
4945 
4946 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4947 
4948 static ssize_t
4949 sync_min_show(struct mddev *mddev, char *page)
4950 {
4951 	return sprintf(page, "%d (%s)\n", speed_min(mddev),
4952 		       mddev->sync_speed_min ? "local": "system");
4953 }
4954 
4955 static ssize_t
4956 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4957 {
4958 	unsigned int min;
4959 	int rv;
4960 
4961 	if (strncmp(buf, "system", 6)==0) {
4962 		min = 0;
4963 	} else {
4964 		rv = kstrtouint(buf, 10, &min);
4965 		if (rv < 0)
4966 			return rv;
4967 		if (min == 0)
4968 			return -EINVAL;
4969 	}
4970 	mddev->sync_speed_min = min;
4971 	return len;
4972 }
4973 
4974 static struct md_sysfs_entry md_sync_min =
4975 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4976 
4977 static ssize_t
4978 sync_max_show(struct mddev *mddev, char *page)
4979 {
4980 	return sprintf(page, "%d (%s)\n", speed_max(mddev),
4981 		       mddev->sync_speed_max ? "local": "system");
4982 }
4983 
4984 static ssize_t
4985 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4986 {
4987 	unsigned int max;
4988 	int rv;
4989 
4990 	if (strncmp(buf, "system", 6)==0) {
4991 		max = 0;
4992 	} else {
4993 		rv = kstrtouint(buf, 10, &max);
4994 		if (rv < 0)
4995 			return rv;
4996 		if (max == 0)
4997 			return -EINVAL;
4998 	}
4999 	mddev->sync_speed_max = max;
5000 	return len;
5001 }
5002 
5003 static struct md_sysfs_entry md_sync_max =
5004 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
5005 
5006 static ssize_t
5007 degraded_show(struct mddev *mddev, char *page)
5008 {
5009 	return sprintf(page, "%d\n", mddev->degraded);
5010 }
5011 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
5012 
5013 static ssize_t
5014 sync_force_parallel_show(struct mddev *mddev, char *page)
5015 {
5016 	return sprintf(page, "%d\n", mddev->parallel_resync);
5017 }
5018 
5019 static ssize_t
5020 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
5021 {
5022 	long n;
5023 
5024 	if (kstrtol(buf, 10, &n))
5025 		return -EINVAL;
5026 
5027 	if (n != 0 && n != 1)
5028 		return -EINVAL;
5029 
5030 	mddev->parallel_resync = n;
5031 
5032 	if (mddev->sync_thread)
5033 		wake_up(&resync_wait);
5034 
5035 	return len;
5036 }
5037 
5038 /* force parallel resync, even with shared block devices */
5039 static struct md_sysfs_entry md_sync_force_parallel =
5040 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5041        sync_force_parallel_show, sync_force_parallel_store);
5042 
5043 static ssize_t
5044 sync_speed_show(struct mddev *mddev, char *page)
5045 {
5046 	unsigned long resync, dt, db;
5047 	if (mddev->curr_resync == MD_RESYNC_NONE)
5048 		return sprintf(page, "none\n");
5049 	resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5050 	dt = (jiffies - mddev->resync_mark) / HZ;
5051 	if (!dt) dt++;
5052 	db = resync - mddev->resync_mark_cnt;
5053 	return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
5054 }
5055 
5056 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
5057 
5058 static ssize_t
5059 sync_completed_show(struct mddev *mddev, char *page)
5060 {
5061 	unsigned long long max_sectors, resync;
5062 
5063 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5064 		return sprintf(page, "none\n");
5065 
5066 	if (mddev->curr_resync == MD_RESYNC_YIELDED ||
5067 	    mddev->curr_resync == MD_RESYNC_DELAYED)
5068 		return sprintf(page, "delayed\n");
5069 
5070 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5071 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5072 		max_sectors = mddev->resync_max_sectors;
5073 	else
5074 		max_sectors = mddev->dev_sectors;
5075 
5076 	resync = mddev->curr_resync_completed;
5077 	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
5078 }
5079 
5080 static struct md_sysfs_entry md_sync_completed =
5081 	__ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
5082 
5083 static ssize_t
5084 min_sync_show(struct mddev *mddev, char *page)
5085 {
5086 	return sprintf(page, "%llu\n",
5087 		       (unsigned long long)mddev->resync_min);
5088 }
5089 static ssize_t
5090 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
5091 {
5092 	unsigned long long min;
5093 	int err;
5094 
5095 	if (kstrtoull(buf, 10, &min))
5096 		return -EINVAL;
5097 
5098 	spin_lock(&mddev->lock);
5099 	err = -EINVAL;
5100 	if (min > mddev->resync_max)
5101 		goto out_unlock;
5102 
5103 	err = -EBUSY;
5104 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5105 		goto out_unlock;
5106 
5107 	/* Round down to multiple of 4K for safety */
5108 	mddev->resync_min = round_down(min, 8);
5109 	err = 0;
5110 
5111 out_unlock:
5112 	spin_unlock(&mddev->lock);
5113 	return err ?: len;
5114 }
5115 
5116 static struct md_sysfs_entry md_min_sync =
5117 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5118 
5119 static ssize_t
5120 max_sync_show(struct mddev *mddev, char *page)
5121 {
5122 	if (mddev->resync_max == MaxSector)
5123 		return sprintf(page, "max\n");
5124 	else
5125 		return sprintf(page, "%llu\n",
5126 			       (unsigned long long)mddev->resync_max);
5127 }
5128 static ssize_t
5129 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
5130 {
5131 	int err;
5132 	spin_lock(&mddev->lock);
5133 	if (strncmp(buf, "max", 3) == 0)
5134 		mddev->resync_max = MaxSector;
5135 	else {
5136 		unsigned long long max;
5137 		int chunk;
5138 
5139 		err = -EINVAL;
5140 		if (kstrtoull(buf, 10, &max))
5141 			goto out_unlock;
5142 		if (max < mddev->resync_min)
5143 			goto out_unlock;
5144 
5145 		err = -EBUSY;
5146 		if (max < mddev->resync_max && md_is_rdwr(mddev) &&
5147 		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5148 			goto out_unlock;
5149 
5150 		/* Must be a multiple of chunk_size */
5151 		chunk = mddev->chunk_sectors;
5152 		if (chunk) {
5153 			sector_t temp = max;
5154 
5155 			err = -EINVAL;
5156 			if (sector_div(temp, chunk))
5157 				goto out_unlock;
5158 		}
5159 		mddev->resync_max = max;
5160 	}
5161 	wake_up(&mddev->recovery_wait);
5162 	err = 0;
5163 out_unlock:
5164 	spin_unlock(&mddev->lock);
5165 	return err ?: len;
5166 }
5167 
5168 static struct md_sysfs_entry md_max_sync =
5169 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5170 
5171 static ssize_t
5172 suspend_lo_show(struct mddev *mddev, char *page)
5173 {
5174 	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5175 }
5176 
5177 static ssize_t
5178 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
5179 {
5180 	unsigned long long new;
5181 	int err;
5182 
5183 	err = kstrtoull(buf, 10, &new);
5184 	if (err < 0)
5185 		return err;
5186 	if (new != (sector_t)new)
5187 		return -EINVAL;
5188 
5189 	err = mddev_lock(mddev);
5190 	if (err)
5191 		return err;
5192 
5193 	mddev_suspend(mddev);
5194 	mddev->suspend_lo = new;
5195 	mddev_resume(mddev);
5196 
5197 	mddev_unlock(mddev);
5198 	return len;
5199 }
5200 static struct md_sysfs_entry md_suspend_lo =
5201 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5202 
5203 static ssize_t
5204 suspend_hi_show(struct mddev *mddev, char *page)
5205 {
5206 	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5207 }
5208 
5209 static ssize_t
5210 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
5211 {
5212 	unsigned long long new;
5213 	int err;
5214 
5215 	err = kstrtoull(buf, 10, &new);
5216 	if (err < 0)
5217 		return err;
5218 	if (new != (sector_t)new)
5219 		return -EINVAL;
5220 
5221 	err = mddev_lock(mddev);
5222 	if (err)
5223 		return err;
5224 
5225 	mddev_suspend(mddev);
5226 	mddev->suspend_hi = new;
5227 	mddev_resume(mddev);
5228 
5229 	mddev_unlock(mddev);
5230 	return len;
5231 }
5232 static struct md_sysfs_entry md_suspend_hi =
5233 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5234 
5235 static ssize_t
5236 reshape_position_show(struct mddev *mddev, char *page)
5237 {
5238 	if (mddev->reshape_position != MaxSector)
5239 		return sprintf(page, "%llu\n",
5240 			       (unsigned long long)mddev->reshape_position);
5241 	strcpy(page, "none\n");
5242 	return 5;
5243 }
5244 
5245 static ssize_t
5246 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
5247 {
5248 	struct md_rdev *rdev;
5249 	unsigned long long new;
5250 	int err;
5251 
5252 	err = kstrtoull(buf, 10, &new);
5253 	if (err < 0)
5254 		return err;
5255 	if (new != (sector_t)new)
5256 		return -EINVAL;
5257 	err = mddev_lock(mddev);
5258 	if (err)
5259 		return err;
5260 	err = -EBUSY;
5261 	if (mddev->pers)
5262 		goto unlock;
5263 	mddev->reshape_position = new;
5264 	mddev->delta_disks = 0;
5265 	mddev->reshape_backwards = 0;
5266 	mddev->new_level = mddev->level;
5267 	mddev->new_layout = mddev->layout;
5268 	mddev->new_chunk_sectors = mddev->chunk_sectors;
5269 	rdev_for_each(rdev, mddev)
5270 		rdev->new_data_offset = rdev->data_offset;
5271 	err = 0;
5272 unlock:
5273 	mddev_unlock(mddev);
5274 	return err ?: len;
5275 }
5276 
5277 static struct md_sysfs_entry md_reshape_position =
5278 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5279        reshape_position_store);
5280 
5281 static ssize_t
5282 reshape_direction_show(struct mddev *mddev, char *page)
5283 {
5284 	return sprintf(page, "%s\n",
5285 		       mddev->reshape_backwards ? "backwards" : "forwards");
5286 }
5287 
5288 static ssize_t
5289 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5290 {
5291 	int backwards = 0;
5292 	int err;
5293 
5294 	if (cmd_match(buf, "forwards"))
5295 		backwards = 0;
5296 	else if (cmd_match(buf, "backwards"))
5297 		backwards = 1;
5298 	else
5299 		return -EINVAL;
5300 	if (mddev->reshape_backwards == backwards)
5301 		return len;
5302 
5303 	err = mddev_lock(mddev);
5304 	if (err)
5305 		return err;
5306 	/* check if we are allowed to change */
5307 	if (mddev->delta_disks)
5308 		err = -EBUSY;
5309 	else if (mddev->persistent &&
5310 	    mddev->major_version == 0)
5311 		err =  -EINVAL;
5312 	else
5313 		mddev->reshape_backwards = backwards;
5314 	mddev_unlock(mddev);
5315 	return err ?: len;
5316 }
5317 
5318 static struct md_sysfs_entry md_reshape_direction =
5319 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5320        reshape_direction_store);
5321 
5322 static ssize_t
5323 array_size_show(struct mddev *mddev, char *page)
5324 {
5325 	if (mddev->external_size)
5326 		return sprintf(page, "%llu\n",
5327 			       (unsigned long long)mddev->array_sectors/2);
5328 	else
5329 		return sprintf(page, "default\n");
5330 }
5331 
5332 static ssize_t
5333 array_size_store(struct mddev *mddev, const char *buf, size_t len)
5334 {
5335 	sector_t sectors;
5336 	int err;
5337 
5338 	err = mddev_lock(mddev);
5339 	if (err)
5340 		return err;
5341 
5342 	/* cluster raid doesn't support change array_sectors */
5343 	if (mddev_is_clustered(mddev)) {
5344 		mddev_unlock(mddev);
5345 		return -EINVAL;
5346 	}
5347 
5348 	if (strncmp(buf, "default", 7) == 0) {
5349 		if (mddev->pers)
5350 			sectors = mddev->pers->size(mddev, 0, 0);
5351 		else
5352 			sectors = mddev->array_sectors;
5353 
5354 		mddev->external_size = 0;
5355 	} else {
5356 		if (strict_blocks_to_sectors(buf, &sectors) < 0)
5357 			err = -EINVAL;
5358 		else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5359 			err = -E2BIG;
5360 		else
5361 			mddev->external_size = 1;
5362 	}
5363 
5364 	if (!err) {
5365 		mddev->array_sectors = sectors;
5366 		if (mddev->pers)
5367 			set_capacity_and_notify(mddev->gendisk,
5368 						mddev->array_sectors);
5369 	}
5370 	mddev_unlock(mddev);
5371 	return err ?: len;
5372 }
5373 
5374 static struct md_sysfs_entry md_array_size =
5375 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5376        array_size_store);
5377 
5378 static ssize_t
5379 consistency_policy_show(struct mddev *mddev, char *page)
5380 {
5381 	int ret;
5382 
5383 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5384 		ret = sprintf(page, "journal\n");
5385 	} else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5386 		ret = sprintf(page, "ppl\n");
5387 	} else if (mddev->bitmap) {
5388 		ret = sprintf(page, "bitmap\n");
5389 	} else if (mddev->pers) {
5390 		if (mddev->pers->sync_request)
5391 			ret = sprintf(page, "resync\n");
5392 		else
5393 			ret = sprintf(page, "none\n");
5394 	} else {
5395 		ret = sprintf(page, "unknown\n");
5396 	}
5397 
5398 	return ret;
5399 }
5400 
5401 static ssize_t
5402 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5403 {
5404 	int err = 0;
5405 
5406 	if (mddev->pers) {
5407 		if (mddev->pers->change_consistency_policy)
5408 			err = mddev->pers->change_consistency_policy(mddev, buf);
5409 		else
5410 			err = -EBUSY;
5411 	} else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5412 		set_bit(MD_HAS_PPL, &mddev->flags);
5413 	} else {
5414 		err = -EINVAL;
5415 	}
5416 
5417 	return err ? err : len;
5418 }
5419 
5420 static struct md_sysfs_entry md_consistency_policy =
5421 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5422        consistency_policy_store);
5423 
5424 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5425 {
5426 	return sprintf(page, "%d\n", mddev->fail_last_dev);
5427 }
5428 
5429 /*
5430  * Setting fail_last_dev to true to allow last device to be forcibly removed
5431  * from RAID1/RAID10.
5432  */
5433 static ssize_t
5434 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5435 {
5436 	int ret;
5437 	bool value;
5438 
5439 	ret = kstrtobool(buf, &value);
5440 	if (ret)
5441 		return ret;
5442 
5443 	if (value != mddev->fail_last_dev)
5444 		mddev->fail_last_dev = value;
5445 
5446 	return len;
5447 }
5448 static struct md_sysfs_entry md_fail_last_dev =
5449 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5450        fail_last_dev_store);
5451 
5452 static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5453 {
5454 	if (mddev->pers == NULL || (mddev->pers->level != 1))
5455 		return sprintf(page, "n/a\n");
5456 	else
5457 		return sprintf(page, "%d\n", mddev->serialize_policy);
5458 }
5459 
5460 /*
5461  * Setting serialize_policy to true to enforce write IO is not reordered
5462  * for raid1.
5463  */
5464 static ssize_t
5465 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5466 {
5467 	int err;
5468 	bool value;
5469 
5470 	err = kstrtobool(buf, &value);
5471 	if (err)
5472 		return err;
5473 
5474 	if (value == mddev->serialize_policy)
5475 		return len;
5476 
5477 	err = mddev_lock(mddev);
5478 	if (err)
5479 		return err;
5480 	if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5481 		pr_err("md: serialize_policy is only effective for raid1\n");
5482 		err = -EINVAL;
5483 		goto unlock;
5484 	}
5485 
5486 	mddev_suspend(mddev);
5487 	if (value)
5488 		mddev_create_serial_pool(mddev, NULL, true);
5489 	else
5490 		mddev_destroy_serial_pool(mddev, NULL, true);
5491 	mddev->serialize_policy = value;
5492 	mddev_resume(mddev);
5493 unlock:
5494 	mddev_unlock(mddev);
5495 	return err ?: len;
5496 }
5497 
5498 static struct md_sysfs_entry md_serialize_policy =
5499 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5500        serialize_policy_store);
5501 
5502 
5503 static struct attribute *md_default_attrs[] = {
5504 	&md_level.attr,
5505 	&md_layout.attr,
5506 	&md_raid_disks.attr,
5507 	&md_uuid.attr,
5508 	&md_chunk_size.attr,
5509 	&md_size.attr,
5510 	&md_resync_start.attr,
5511 	&md_metadata.attr,
5512 	&md_new_device.attr,
5513 	&md_safe_delay.attr,
5514 	&md_array_state.attr,
5515 	&md_reshape_position.attr,
5516 	&md_reshape_direction.attr,
5517 	&md_array_size.attr,
5518 	&max_corr_read_errors.attr,
5519 	&md_consistency_policy.attr,
5520 	&md_fail_last_dev.attr,
5521 	&md_serialize_policy.attr,
5522 	NULL,
5523 };
5524 
5525 static const struct attribute_group md_default_group = {
5526 	.attrs = md_default_attrs,
5527 };
5528 
5529 static struct attribute *md_redundancy_attrs[] = {
5530 	&md_scan_mode.attr,
5531 	&md_last_scan_mode.attr,
5532 	&md_mismatches.attr,
5533 	&md_sync_min.attr,
5534 	&md_sync_max.attr,
5535 	&md_sync_speed.attr,
5536 	&md_sync_force_parallel.attr,
5537 	&md_sync_completed.attr,
5538 	&md_min_sync.attr,
5539 	&md_max_sync.attr,
5540 	&md_suspend_lo.attr,
5541 	&md_suspend_hi.attr,
5542 	&md_bitmap.attr,
5543 	&md_degraded.attr,
5544 	NULL,
5545 };
5546 static const struct attribute_group md_redundancy_group = {
5547 	.name = NULL,
5548 	.attrs = md_redundancy_attrs,
5549 };
5550 
5551 static const struct attribute_group *md_attr_groups[] = {
5552 	&md_default_group,
5553 	&md_bitmap_group,
5554 	NULL,
5555 };
5556 
5557 static ssize_t
5558 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5559 {
5560 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5561 	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5562 	ssize_t rv;
5563 
5564 	if (!entry->show)
5565 		return -EIO;
5566 	spin_lock(&all_mddevs_lock);
5567 	if (!mddev_get(mddev)) {
5568 		spin_unlock(&all_mddevs_lock);
5569 		return -EBUSY;
5570 	}
5571 	spin_unlock(&all_mddevs_lock);
5572 
5573 	rv = entry->show(mddev, page);
5574 	mddev_put(mddev);
5575 	return rv;
5576 }
5577 
5578 static ssize_t
5579 md_attr_store(struct kobject *kobj, struct attribute *attr,
5580 	      const char *page, size_t length)
5581 {
5582 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5583 	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5584 	ssize_t rv;
5585 
5586 	if (!entry->store)
5587 		return -EIO;
5588 	if (!capable(CAP_SYS_ADMIN))
5589 		return -EACCES;
5590 	spin_lock(&all_mddevs_lock);
5591 	if (!mddev_get(mddev)) {
5592 		spin_unlock(&all_mddevs_lock);
5593 		return -EBUSY;
5594 	}
5595 	spin_unlock(&all_mddevs_lock);
5596 	rv = entry->store(mddev, page, length);
5597 	mddev_put(mddev);
5598 	return rv;
5599 }
5600 
5601 static void md_kobj_release(struct kobject *ko)
5602 {
5603 	struct mddev *mddev = container_of(ko, struct mddev, kobj);
5604 
5605 	if (mddev->sysfs_state)
5606 		sysfs_put(mddev->sysfs_state);
5607 	if (mddev->sysfs_level)
5608 		sysfs_put(mddev->sysfs_level);
5609 
5610 	del_gendisk(mddev->gendisk);
5611 	put_disk(mddev->gendisk);
5612 }
5613 
5614 static const struct sysfs_ops md_sysfs_ops = {
5615 	.show	= md_attr_show,
5616 	.store	= md_attr_store,
5617 };
5618 static const struct kobj_type md_ktype = {
5619 	.release	= md_kobj_release,
5620 	.sysfs_ops	= &md_sysfs_ops,
5621 	.default_groups	= md_attr_groups,
5622 };
5623 
5624 int mdp_major = 0;
5625 
5626 static void mddev_delayed_delete(struct work_struct *ws)
5627 {
5628 	struct mddev *mddev = container_of(ws, struct mddev, del_work);
5629 
5630 	kobject_put(&mddev->kobj);
5631 }
5632 
5633 struct mddev *md_alloc(dev_t dev, char *name)
5634 {
5635 	/*
5636 	 * If dev is zero, name is the name of a device to allocate with
5637 	 * an arbitrary minor number.  It will be "md_???"
5638 	 * If dev is non-zero it must be a device number with a MAJOR of
5639 	 * MD_MAJOR or mdp_major.  In this case, if "name" is NULL, then
5640 	 * the device is being created by opening a node in /dev.
5641 	 * If "name" is not NULL, the device is being created by
5642 	 * writing to /sys/module/md_mod/parameters/new_array.
5643 	 */
5644 	static DEFINE_MUTEX(disks_mutex);
5645 	struct mddev *mddev;
5646 	struct gendisk *disk;
5647 	int partitioned;
5648 	int shift;
5649 	int unit;
5650 	int error ;
5651 
5652 	/*
5653 	 * Wait for any previous instance of this device to be completely
5654 	 * removed (mddev_delayed_delete).
5655 	 */
5656 	flush_workqueue(md_misc_wq);
5657 
5658 	mutex_lock(&disks_mutex);
5659 	mddev = mddev_alloc(dev);
5660 	if (IS_ERR(mddev)) {
5661 		error = PTR_ERR(mddev);
5662 		goto out_unlock;
5663 	}
5664 
5665 	partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5666 	shift = partitioned ? MdpMinorShift : 0;
5667 	unit = MINOR(mddev->unit) >> shift;
5668 
5669 	if (name && !dev) {
5670 		/* Need to ensure that 'name' is not a duplicate.
5671 		 */
5672 		struct mddev *mddev2;
5673 		spin_lock(&all_mddevs_lock);
5674 
5675 		list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5676 			if (mddev2->gendisk &&
5677 			    strcmp(mddev2->gendisk->disk_name, name) == 0) {
5678 				spin_unlock(&all_mddevs_lock);
5679 				error = -EEXIST;
5680 				goto out_free_mddev;
5681 			}
5682 		spin_unlock(&all_mddevs_lock);
5683 	}
5684 	if (name && dev)
5685 		/*
5686 		 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5687 		 */
5688 		mddev->hold_active = UNTIL_STOP;
5689 
5690 	error = -ENOMEM;
5691 	disk = blk_alloc_disk(NUMA_NO_NODE);
5692 	if (!disk)
5693 		goto out_free_mddev;
5694 
5695 	disk->major = MAJOR(mddev->unit);
5696 	disk->first_minor = unit << shift;
5697 	disk->minors = 1 << shift;
5698 	if (name)
5699 		strcpy(disk->disk_name, name);
5700 	else if (partitioned)
5701 		sprintf(disk->disk_name, "md_d%d", unit);
5702 	else
5703 		sprintf(disk->disk_name, "md%d", unit);
5704 	disk->fops = &md_fops;
5705 	disk->private_data = mddev;
5706 
5707 	mddev->queue = disk->queue;
5708 	blk_set_stacking_limits(&mddev->queue->limits);
5709 	blk_queue_write_cache(mddev->queue, true, true);
5710 	disk->events |= DISK_EVENT_MEDIA_CHANGE;
5711 	mddev->gendisk = disk;
5712 	error = add_disk(disk);
5713 	if (error)
5714 		goto out_put_disk;
5715 
5716 	kobject_init(&mddev->kobj, &md_ktype);
5717 	error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
5718 	if (error) {
5719 		/*
5720 		 * The disk is already live at this point.  Clear the hold flag
5721 		 * and let mddev_put take care of the deletion, as it isn't any
5722 		 * different from a normal close on last release now.
5723 		 */
5724 		mddev->hold_active = 0;
5725 		mutex_unlock(&disks_mutex);
5726 		mddev_put(mddev);
5727 		return ERR_PTR(error);
5728 	}
5729 
5730 	kobject_uevent(&mddev->kobj, KOBJ_ADD);
5731 	mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5732 	mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
5733 	mutex_unlock(&disks_mutex);
5734 	return mddev;
5735 
5736 out_put_disk:
5737 	put_disk(disk);
5738 out_free_mddev:
5739 	mddev_free(mddev);
5740 out_unlock:
5741 	mutex_unlock(&disks_mutex);
5742 	return ERR_PTR(error);
5743 }
5744 
5745 static int md_alloc_and_put(dev_t dev, char *name)
5746 {
5747 	struct mddev *mddev = md_alloc(dev, name);
5748 
5749 	if (IS_ERR(mddev))
5750 		return PTR_ERR(mddev);
5751 	mddev_put(mddev);
5752 	return 0;
5753 }
5754 
5755 static void md_probe(dev_t dev)
5756 {
5757 	if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5758 		return;
5759 	if (create_on_open)
5760 		md_alloc_and_put(dev, NULL);
5761 }
5762 
5763 static int add_named_array(const char *val, const struct kernel_param *kp)
5764 {
5765 	/*
5766 	 * val must be "md_*" or "mdNNN".
5767 	 * For "md_*" we allocate an array with a large free minor number, and
5768 	 * set the name to val.  val must not already be an active name.
5769 	 * For "mdNNN" we allocate an array with the minor number NNN
5770 	 * which must not already be in use.
5771 	 */
5772 	int len = strlen(val);
5773 	char buf[DISK_NAME_LEN];
5774 	unsigned long devnum;
5775 
5776 	while (len && val[len-1] == '\n')
5777 		len--;
5778 	if (len >= DISK_NAME_LEN)
5779 		return -E2BIG;
5780 	strscpy(buf, val, len+1);
5781 	if (strncmp(buf, "md_", 3) == 0)
5782 		return md_alloc_and_put(0, buf);
5783 	if (strncmp(buf, "md", 2) == 0 &&
5784 	    isdigit(buf[2]) &&
5785 	    kstrtoul(buf+2, 10, &devnum) == 0 &&
5786 	    devnum <= MINORMASK)
5787 		return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL);
5788 
5789 	return -EINVAL;
5790 }
5791 
5792 static void md_safemode_timeout(struct timer_list *t)
5793 {
5794 	struct mddev *mddev = from_timer(mddev, t, safemode_timer);
5795 
5796 	mddev->safemode = 1;
5797 	if (mddev->external)
5798 		sysfs_notify_dirent_safe(mddev->sysfs_state);
5799 
5800 	md_wakeup_thread(mddev->thread);
5801 }
5802 
5803 static int start_dirty_degraded;
5804 
5805 int md_run(struct mddev *mddev)
5806 {
5807 	int err;
5808 	struct md_rdev *rdev;
5809 	struct md_personality *pers;
5810 	bool nowait = true;
5811 
5812 	if (list_empty(&mddev->disks))
5813 		/* cannot run an array with no devices.. */
5814 		return -EINVAL;
5815 
5816 	if (mddev->pers)
5817 		return -EBUSY;
5818 	/* Cannot run until previous stop completes properly */
5819 	if (mddev->sysfs_active)
5820 		return -EBUSY;
5821 
5822 	/*
5823 	 * Analyze all RAID superblock(s)
5824 	 */
5825 	if (!mddev->raid_disks) {
5826 		if (!mddev->persistent)
5827 			return -EINVAL;
5828 		err = analyze_sbs(mddev);
5829 		if (err)
5830 			return -EINVAL;
5831 	}
5832 
5833 	if (mddev->level != LEVEL_NONE)
5834 		request_module("md-level-%d", mddev->level);
5835 	else if (mddev->clevel[0])
5836 		request_module("md-%s", mddev->clevel);
5837 
5838 	/*
5839 	 * Drop all container device buffers, from now on
5840 	 * the only valid external interface is through the md
5841 	 * device.
5842 	 */
5843 	mddev->has_superblocks = false;
5844 	rdev_for_each(rdev, mddev) {
5845 		if (test_bit(Faulty, &rdev->flags))
5846 			continue;
5847 		sync_blockdev(rdev->bdev);
5848 		invalidate_bdev(rdev->bdev);
5849 		if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
5850 			mddev->ro = MD_RDONLY;
5851 			if (mddev->gendisk)
5852 				set_disk_ro(mddev->gendisk, 1);
5853 		}
5854 
5855 		if (rdev->sb_page)
5856 			mddev->has_superblocks = true;
5857 
5858 		/* perform some consistency tests on the device.
5859 		 * We don't want the data to overlap the metadata,
5860 		 * Internal Bitmap issues have been handled elsewhere.
5861 		 */
5862 		if (rdev->meta_bdev) {
5863 			/* Nothing to check */;
5864 		} else if (rdev->data_offset < rdev->sb_start) {
5865 			if (mddev->dev_sectors &&
5866 			    rdev->data_offset + mddev->dev_sectors
5867 			    > rdev->sb_start) {
5868 				pr_warn("md: %s: data overlaps metadata\n",
5869 					mdname(mddev));
5870 				return -EINVAL;
5871 			}
5872 		} else {
5873 			if (rdev->sb_start + rdev->sb_size/512
5874 			    > rdev->data_offset) {
5875 				pr_warn("md: %s: metadata overlaps data\n",
5876 					mdname(mddev));
5877 				return -EINVAL;
5878 			}
5879 		}
5880 		sysfs_notify_dirent_safe(rdev->sysfs_state);
5881 		nowait = nowait && bdev_nowait(rdev->bdev);
5882 	}
5883 
5884 	if (!bioset_initialized(&mddev->bio_set)) {
5885 		err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5886 		if (err)
5887 			return err;
5888 	}
5889 	if (!bioset_initialized(&mddev->sync_set)) {
5890 		err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5891 		if (err)
5892 			goto exit_bio_set;
5893 	}
5894 
5895 	if (!bioset_initialized(&mddev->io_clone_set)) {
5896 		err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE,
5897 				  offsetof(struct md_io_clone, bio_clone), 0);
5898 		if (err)
5899 			goto exit_sync_set;
5900 	}
5901 
5902 	spin_lock(&pers_lock);
5903 	pers = find_pers(mddev->level, mddev->clevel);
5904 	if (!pers || !try_module_get(pers->owner)) {
5905 		spin_unlock(&pers_lock);
5906 		if (mddev->level != LEVEL_NONE)
5907 			pr_warn("md: personality for level %d is not loaded!\n",
5908 				mddev->level);
5909 		else
5910 			pr_warn("md: personality for level %s is not loaded!\n",
5911 				mddev->clevel);
5912 		err = -EINVAL;
5913 		goto abort;
5914 	}
5915 	spin_unlock(&pers_lock);
5916 	if (mddev->level != pers->level) {
5917 		mddev->level = pers->level;
5918 		mddev->new_level = pers->level;
5919 	}
5920 	strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5921 
5922 	if (mddev->reshape_position != MaxSector &&
5923 	    pers->start_reshape == NULL) {
5924 		/* This personality cannot handle reshaping... */
5925 		module_put(pers->owner);
5926 		err = -EINVAL;
5927 		goto abort;
5928 	}
5929 
5930 	if (pers->sync_request) {
5931 		/* Warn if this is a potentially silly
5932 		 * configuration.
5933 		 */
5934 		struct md_rdev *rdev2;
5935 		int warned = 0;
5936 
5937 		rdev_for_each(rdev, mddev)
5938 			rdev_for_each(rdev2, mddev) {
5939 				if (rdev < rdev2 &&
5940 				    rdev->bdev->bd_disk ==
5941 				    rdev2->bdev->bd_disk) {
5942 					pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n",
5943 						mdname(mddev),
5944 						rdev->bdev,
5945 						rdev2->bdev);
5946 					warned = 1;
5947 				}
5948 			}
5949 
5950 		if (warned)
5951 			pr_warn("True protection against single-disk failure might be compromised.\n");
5952 	}
5953 
5954 	mddev->recovery = 0;
5955 	/* may be over-ridden by personality */
5956 	mddev->resync_max_sectors = mddev->dev_sectors;
5957 
5958 	mddev->ok_start_degraded = start_dirty_degraded;
5959 
5960 	if (start_readonly && md_is_rdwr(mddev))
5961 		mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */
5962 
5963 	err = pers->run(mddev);
5964 	if (err)
5965 		pr_warn("md: pers->run() failed ...\n");
5966 	else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5967 		WARN_ONCE(!mddev->external_size,
5968 			  "%s: default size too small, but 'external_size' not in effect?\n",
5969 			  __func__);
5970 		pr_warn("md: invalid array_size %llu > default size %llu\n",
5971 			(unsigned long long)mddev->array_sectors / 2,
5972 			(unsigned long long)pers->size(mddev, 0, 0) / 2);
5973 		err = -EINVAL;
5974 	}
5975 	if (err == 0 && pers->sync_request &&
5976 	    (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5977 		struct bitmap *bitmap;
5978 
5979 		bitmap = md_bitmap_create(mddev, -1);
5980 		if (IS_ERR(bitmap)) {
5981 			err = PTR_ERR(bitmap);
5982 			pr_warn("%s: failed to create bitmap (%d)\n",
5983 				mdname(mddev), err);
5984 		} else
5985 			mddev->bitmap = bitmap;
5986 
5987 	}
5988 	if (err)
5989 		goto bitmap_abort;
5990 
5991 	if (mddev->bitmap_info.max_write_behind > 0) {
5992 		bool create_pool = false;
5993 
5994 		rdev_for_each(rdev, mddev) {
5995 			if (test_bit(WriteMostly, &rdev->flags) &&
5996 			    rdev_init_serial(rdev))
5997 				create_pool = true;
5998 		}
5999 		if (create_pool && mddev->serial_info_pool == NULL) {
6000 			mddev->serial_info_pool =
6001 				mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
6002 						    sizeof(struct serial_info));
6003 			if (!mddev->serial_info_pool) {
6004 				err = -ENOMEM;
6005 				goto bitmap_abort;
6006 			}
6007 		}
6008 	}
6009 
6010 	if (mddev->queue) {
6011 		bool nonrot = true;
6012 
6013 		rdev_for_each(rdev, mddev) {
6014 			if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
6015 				nonrot = false;
6016 				break;
6017 			}
6018 		}
6019 		if (mddev->degraded)
6020 			nonrot = false;
6021 		if (nonrot)
6022 			blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
6023 		else
6024 			blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
6025 		blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
6026 
6027 		/* Set the NOWAIT flags if all underlying devices support it */
6028 		if (nowait)
6029 			blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
6030 	}
6031 	if (pers->sync_request) {
6032 		if (mddev->kobj.sd &&
6033 		    sysfs_create_group(&mddev->kobj, &md_redundancy_group))
6034 			pr_warn("md: cannot register extra attributes for %s\n",
6035 				mdname(mddev));
6036 		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
6037 		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6038 		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
6039 	} else if (mddev->ro == MD_AUTO_READ)
6040 		mddev->ro = MD_RDWR;
6041 
6042 	atomic_set(&mddev->max_corr_read_errors,
6043 		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
6044 	mddev->safemode = 0;
6045 	if (mddev_is_clustered(mddev))
6046 		mddev->safemode_delay = 0;
6047 	else
6048 		mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
6049 	mddev->in_sync = 1;
6050 	smp_wmb();
6051 	spin_lock(&mddev->lock);
6052 	mddev->pers = pers;
6053 	spin_unlock(&mddev->lock);
6054 	rdev_for_each(rdev, mddev)
6055 		if (rdev->raid_disk >= 0)
6056 			sysfs_link_rdev(mddev, rdev); /* failure here is OK */
6057 
6058 	if (mddev->degraded && md_is_rdwr(mddev))
6059 		/* This ensures that recovering status is reported immediately
6060 		 * via sysfs - until a lack of spares is confirmed.
6061 		 */
6062 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6063 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6064 
6065 	if (mddev->sb_flags)
6066 		md_update_sb(mddev, 0);
6067 
6068 	md_new_event();
6069 	return 0;
6070 
6071 bitmap_abort:
6072 	mddev_detach(mddev);
6073 	if (mddev->private)
6074 		pers->free(mddev, mddev->private);
6075 	mddev->private = NULL;
6076 	module_put(pers->owner);
6077 	md_bitmap_destroy(mddev);
6078 abort:
6079 	bioset_exit(&mddev->io_clone_set);
6080 exit_sync_set:
6081 	bioset_exit(&mddev->sync_set);
6082 exit_bio_set:
6083 	bioset_exit(&mddev->bio_set);
6084 	return err;
6085 }
6086 EXPORT_SYMBOL_GPL(md_run);
6087 
6088 int do_md_run(struct mddev *mddev)
6089 {
6090 	int err;
6091 
6092 	set_bit(MD_NOT_READY, &mddev->flags);
6093 	err = md_run(mddev);
6094 	if (err)
6095 		goto out;
6096 	err = md_bitmap_load(mddev);
6097 	if (err) {
6098 		md_bitmap_destroy(mddev);
6099 		goto out;
6100 	}
6101 
6102 	if (mddev_is_clustered(mddev))
6103 		md_allow_write(mddev);
6104 
6105 	/* run start up tasks that require md_thread */
6106 	md_start(mddev);
6107 
6108 	md_wakeup_thread(mddev->thread);
6109 	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6110 
6111 	set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
6112 	clear_bit(MD_NOT_READY, &mddev->flags);
6113 	mddev->changed = 1;
6114 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
6115 	sysfs_notify_dirent_safe(mddev->sysfs_state);
6116 	sysfs_notify_dirent_safe(mddev->sysfs_action);
6117 	sysfs_notify_dirent_safe(mddev->sysfs_degraded);
6118 out:
6119 	clear_bit(MD_NOT_READY, &mddev->flags);
6120 	return err;
6121 }
6122 
6123 int md_start(struct mddev *mddev)
6124 {
6125 	int ret = 0;
6126 
6127 	if (mddev->pers->start) {
6128 		set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6129 		md_wakeup_thread(mddev->thread);
6130 		ret = mddev->pers->start(mddev);
6131 		clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6132 		md_wakeup_thread(mddev->sync_thread);
6133 	}
6134 	return ret;
6135 }
6136 EXPORT_SYMBOL_GPL(md_start);
6137 
6138 static int restart_array(struct mddev *mddev)
6139 {
6140 	struct gendisk *disk = mddev->gendisk;
6141 	struct md_rdev *rdev;
6142 	bool has_journal = false;
6143 	bool has_readonly = false;
6144 
6145 	/* Complain if it has no devices */
6146 	if (list_empty(&mddev->disks))
6147 		return -ENXIO;
6148 	if (!mddev->pers)
6149 		return -EINVAL;
6150 	if (md_is_rdwr(mddev))
6151 		return -EBUSY;
6152 
6153 	rcu_read_lock();
6154 	rdev_for_each_rcu(rdev, mddev) {
6155 		if (test_bit(Journal, &rdev->flags) &&
6156 		    !test_bit(Faulty, &rdev->flags))
6157 			has_journal = true;
6158 		if (rdev_read_only(rdev))
6159 			has_readonly = true;
6160 	}
6161 	rcu_read_unlock();
6162 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6163 		/* Don't restart rw with journal missing/faulty */
6164 			return -EINVAL;
6165 	if (has_readonly)
6166 		return -EROFS;
6167 
6168 	mddev->safemode = 0;
6169 	mddev->ro = MD_RDWR;
6170 	set_disk_ro(disk, 0);
6171 	pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
6172 	/* Kick recovery or resync if necessary */
6173 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6174 	md_wakeup_thread(mddev->thread);
6175 	md_wakeup_thread(mddev->sync_thread);
6176 	sysfs_notify_dirent_safe(mddev->sysfs_state);
6177 	return 0;
6178 }
6179 
6180 static void md_clean(struct mddev *mddev)
6181 {
6182 	mddev->array_sectors = 0;
6183 	mddev->external_size = 0;
6184 	mddev->dev_sectors = 0;
6185 	mddev->raid_disks = 0;
6186 	mddev->recovery_cp = 0;
6187 	mddev->resync_min = 0;
6188 	mddev->resync_max = MaxSector;
6189 	mddev->reshape_position = MaxSector;
6190 	/* we still need mddev->external in export_rdev, do not clear it yet */
6191 	mddev->persistent = 0;
6192 	mddev->level = LEVEL_NONE;
6193 	mddev->clevel[0] = 0;
6194 	mddev->flags = 0;
6195 	mddev->sb_flags = 0;
6196 	mddev->ro = MD_RDWR;
6197 	mddev->metadata_type[0] = 0;
6198 	mddev->chunk_sectors = 0;
6199 	mddev->ctime = mddev->utime = 0;
6200 	mddev->layout = 0;
6201 	mddev->max_disks = 0;
6202 	mddev->events = 0;
6203 	mddev->can_decrease_events = 0;
6204 	mddev->delta_disks = 0;
6205 	mddev->reshape_backwards = 0;
6206 	mddev->new_level = LEVEL_NONE;
6207 	mddev->new_layout = 0;
6208 	mddev->new_chunk_sectors = 0;
6209 	mddev->curr_resync = MD_RESYNC_NONE;
6210 	atomic64_set(&mddev->resync_mismatches, 0);
6211 	mddev->suspend_lo = mddev->suspend_hi = 0;
6212 	mddev->sync_speed_min = mddev->sync_speed_max = 0;
6213 	mddev->recovery = 0;
6214 	mddev->in_sync = 0;
6215 	mddev->changed = 0;
6216 	mddev->degraded = 0;
6217 	mddev->safemode = 0;
6218 	mddev->private = NULL;
6219 	mddev->cluster_info = NULL;
6220 	mddev->bitmap_info.offset = 0;
6221 	mddev->bitmap_info.default_offset = 0;
6222 	mddev->bitmap_info.default_space = 0;
6223 	mddev->bitmap_info.chunksize = 0;
6224 	mddev->bitmap_info.daemon_sleep = 0;
6225 	mddev->bitmap_info.max_write_behind = 0;
6226 	mddev->bitmap_info.nodes = 0;
6227 }
6228 
6229 static void __md_stop_writes(struct mddev *mddev)
6230 {
6231 	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6232 	if (work_pending(&mddev->del_work))
6233 		flush_workqueue(md_misc_wq);
6234 	if (mddev->sync_thread) {
6235 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6236 		md_reap_sync_thread(mddev);
6237 	}
6238 
6239 	del_timer_sync(&mddev->safemode_timer);
6240 
6241 	if (mddev->pers && mddev->pers->quiesce) {
6242 		mddev->pers->quiesce(mddev, 1);
6243 		mddev->pers->quiesce(mddev, 0);
6244 	}
6245 	md_bitmap_flush(mddev);
6246 
6247 	if (md_is_rdwr(mddev) &&
6248 	    ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
6249 	     mddev->sb_flags)) {
6250 		/* mark array as shutdown cleanly */
6251 		if (!mddev_is_clustered(mddev))
6252 			mddev->in_sync = 1;
6253 		md_update_sb(mddev, 1);
6254 	}
6255 	/* disable policy to guarantee rdevs free resources for serialization */
6256 	mddev->serialize_policy = 0;
6257 	mddev_destroy_serial_pool(mddev, NULL, true);
6258 }
6259 
6260 void md_stop_writes(struct mddev *mddev)
6261 {
6262 	mddev_lock_nointr(mddev);
6263 	__md_stop_writes(mddev);
6264 	mddev_unlock(mddev);
6265 }
6266 EXPORT_SYMBOL_GPL(md_stop_writes);
6267 
6268 static void mddev_detach(struct mddev *mddev)
6269 {
6270 	md_bitmap_wait_behind_writes(mddev);
6271 	if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
6272 		mddev->pers->quiesce(mddev, 1);
6273 		mddev->pers->quiesce(mddev, 0);
6274 	}
6275 	md_unregister_thread(mddev, &mddev->thread);
6276 	if (mddev->queue)
6277 		blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6278 }
6279 
6280 static void __md_stop(struct mddev *mddev)
6281 {
6282 	struct md_personality *pers = mddev->pers;
6283 	md_bitmap_destroy(mddev);
6284 	mddev_detach(mddev);
6285 	/* Ensure ->event_work is done */
6286 	if (mddev->event_work.func)
6287 		flush_workqueue(md_misc_wq);
6288 	spin_lock(&mddev->lock);
6289 	mddev->pers = NULL;
6290 	spin_unlock(&mddev->lock);
6291 	if (mddev->private)
6292 		pers->free(mddev, mddev->private);
6293 	mddev->private = NULL;
6294 	if (pers->sync_request && mddev->to_remove == NULL)
6295 		mddev->to_remove = &md_redundancy_group;
6296 	module_put(pers->owner);
6297 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6298 
6299 	bioset_exit(&mddev->bio_set);
6300 	bioset_exit(&mddev->sync_set);
6301 	bioset_exit(&mddev->io_clone_set);
6302 }
6303 
6304 void md_stop(struct mddev *mddev)
6305 {
6306 	lockdep_assert_held(&mddev->reconfig_mutex);
6307 
6308 	/* stop the array and free an attached data structures.
6309 	 * This is called from dm-raid
6310 	 */
6311 	__md_stop_writes(mddev);
6312 	__md_stop(mddev);
6313 }
6314 
6315 EXPORT_SYMBOL_GPL(md_stop);
6316 
6317 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
6318 {
6319 	int err = 0;
6320 	int did_freeze = 0;
6321 
6322 	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6323 		did_freeze = 1;
6324 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6325 		md_wakeup_thread(mddev->thread);
6326 	}
6327 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6328 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6329 
6330 	/*
6331 	 * Thread might be blocked waiting for metadata update which will now
6332 	 * never happen
6333 	 */
6334 	md_wakeup_thread_directly(mddev->sync_thread);
6335 
6336 	if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6337 		return -EBUSY;
6338 	mddev_unlock(mddev);
6339 	wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6340 					  &mddev->recovery));
6341 	wait_event(mddev->sb_wait,
6342 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
6343 	mddev_lock_nointr(mddev);
6344 
6345 	mutex_lock(&mddev->open_mutex);
6346 	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6347 	    mddev->sync_thread ||
6348 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6349 		pr_warn("md: %s still in use.\n",mdname(mddev));
6350 		if (did_freeze) {
6351 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6352 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6353 			md_wakeup_thread(mddev->thread);
6354 		}
6355 		err = -EBUSY;
6356 		goto out;
6357 	}
6358 	if (mddev->pers) {
6359 		__md_stop_writes(mddev);
6360 
6361 		err  = -ENXIO;
6362 		if (mddev->ro == MD_RDONLY)
6363 			goto out;
6364 		mddev->ro = MD_RDONLY;
6365 		set_disk_ro(mddev->gendisk, 1);
6366 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6367 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6368 		md_wakeup_thread(mddev->thread);
6369 		sysfs_notify_dirent_safe(mddev->sysfs_state);
6370 		err = 0;
6371 	}
6372 out:
6373 	mutex_unlock(&mddev->open_mutex);
6374 	return err;
6375 }
6376 
6377 /* mode:
6378  *   0 - completely stop and dis-assemble array
6379  *   2 - stop but do not disassemble array
6380  */
6381 static int do_md_stop(struct mddev *mddev, int mode,
6382 		      struct block_device *bdev)
6383 {
6384 	struct gendisk *disk = mddev->gendisk;
6385 	struct md_rdev *rdev;
6386 	int did_freeze = 0;
6387 
6388 	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6389 		did_freeze = 1;
6390 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6391 		md_wakeup_thread(mddev->thread);
6392 	}
6393 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6394 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6395 
6396 	/*
6397 	 * Thread might be blocked waiting for metadata update which will now
6398 	 * never happen
6399 	 */
6400 	md_wakeup_thread_directly(mddev->sync_thread);
6401 
6402 	mddev_unlock(mddev);
6403 	wait_event(resync_wait, (mddev->sync_thread == NULL &&
6404 				 !test_bit(MD_RECOVERY_RUNNING,
6405 					   &mddev->recovery)));
6406 	mddev_lock_nointr(mddev);
6407 
6408 	mutex_lock(&mddev->open_mutex);
6409 	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6410 	    mddev->sysfs_active ||
6411 	    mddev->sync_thread ||
6412 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6413 		pr_warn("md: %s still in use.\n",mdname(mddev));
6414 		mutex_unlock(&mddev->open_mutex);
6415 		if (did_freeze) {
6416 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6417 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6418 			md_wakeup_thread(mddev->thread);
6419 		}
6420 		return -EBUSY;
6421 	}
6422 	if (mddev->pers) {
6423 		if (!md_is_rdwr(mddev))
6424 			set_disk_ro(disk, 0);
6425 
6426 		__md_stop_writes(mddev);
6427 		__md_stop(mddev);
6428 
6429 		/* tell userspace to handle 'inactive' */
6430 		sysfs_notify_dirent_safe(mddev->sysfs_state);
6431 
6432 		rdev_for_each(rdev, mddev)
6433 			if (rdev->raid_disk >= 0)
6434 				sysfs_unlink_rdev(mddev, rdev);
6435 
6436 		set_capacity_and_notify(disk, 0);
6437 		mutex_unlock(&mddev->open_mutex);
6438 		mddev->changed = 1;
6439 
6440 		if (!md_is_rdwr(mddev))
6441 			mddev->ro = MD_RDWR;
6442 	} else
6443 		mutex_unlock(&mddev->open_mutex);
6444 	/*
6445 	 * Free resources if final stop
6446 	 */
6447 	if (mode == 0) {
6448 		pr_info("md: %s stopped.\n", mdname(mddev));
6449 
6450 		if (mddev->bitmap_info.file) {
6451 			struct file *f = mddev->bitmap_info.file;
6452 			spin_lock(&mddev->lock);
6453 			mddev->bitmap_info.file = NULL;
6454 			spin_unlock(&mddev->lock);
6455 			fput(f);
6456 		}
6457 		mddev->bitmap_info.offset = 0;
6458 
6459 		export_array(mddev);
6460 
6461 		md_clean(mddev);
6462 		if (mddev->hold_active == UNTIL_STOP)
6463 			mddev->hold_active = 0;
6464 	}
6465 	md_new_event();
6466 	sysfs_notify_dirent_safe(mddev->sysfs_state);
6467 	return 0;
6468 }
6469 
6470 #ifndef MODULE
6471 static void autorun_array(struct mddev *mddev)
6472 {
6473 	struct md_rdev *rdev;
6474 	int err;
6475 
6476 	if (list_empty(&mddev->disks))
6477 		return;
6478 
6479 	pr_info("md: running: ");
6480 
6481 	rdev_for_each(rdev, mddev) {
6482 		pr_cont("<%pg>", rdev->bdev);
6483 	}
6484 	pr_cont("\n");
6485 
6486 	err = do_md_run(mddev);
6487 	if (err) {
6488 		pr_warn("md: do_md_run() returned %d\n", err);
6489 		do_md_stop(mddev, 0, NULL);
6490 	}
6491 }
6492 
6493 /*
6494  * lets try to run arrays based on all disks that have arrived
6495  * until now. (those are in pending_raid_disks)
6496  *
6497  * the method: pick the first pending disk, collect all disks with
6498  * the same UUID, remove all from the pending list and put them into
6499  * the 'same_array' list. Then order this list based on superblock
6500  * update time (freshest comes first), kick out 'old' disks and
6501  * compare superblocks. If everything's fine then run it.
6502  *
6503  * If "unit" is allocated, then bump its reference count
6504  */
6505 static void autorun_devices(int part)
6506 {
6507 	struct md_rdev *rdev0, *rdev, *tmp;
6508 	struct mddev *mddev;
6509 
6510 	pr_info("md: autorun ...\n");
6511 	while (!list_empty(&pending_raid_disks)) {
6512 		int unit;
6513 		dev_t dev;
6514 		LIST_HEAD(candidates);
6515 		rdev0 = list_entry(pending_raid_disks.next,
6516 					 struct md_rdev, same_set);
6517 
6518 		pr_debug("md: considering %pg ...\n", rdev0->bdev);
6519 		INIT_LIST_HEAD(&candidates);
6520 		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
6521 			if (super_90_load(rdev, rdev0, 0) >= 0) {
6522 				pr_debug("md:  adding %pg ...\n",
6523 					 rdev->bdev);
6524 				list_move(&rdev->same_set, &candidates);
6525 			}
6526 		/*
6527 		 * now we have a set of devices, with all of them having
6528 		 * mostly sane superblocks. It's time to allocate the
6529 		 * mddev.
6530 		 */
6531 		if (part) {
6532 			dev = MKDEV(mdp_major,
6533 				    rdev0->preferred_minor << MdpMinorShift);
6534 			unit = MINOR(dev) >> MdpMinorShift;
6535 		} else {
6536 			dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6537 			unit = MINOR(dev);
6538 		}
6539 		if (rdev0->preferred_minor != unit) {
6540 			pr_warn("md: unit number in %pg is bad: %d\n",
6541 				rdev0->bdev, rdev0->preferred_minor);
6542 			break;
6543 		}
6544 
6545 		mddev = md_alloc(dev, NULL);
6546 		if (IS_ERR(mddev))
6547 			break;
6548 
6549 		if (mddev_lock(mddev))
6550 			pr_warn("md: %s locked, cannot run\n", mdname(mddev));
6551 		else if (mddev->raid_disks || mddev->major_version
6552 			 || !list_empty(&mddev->disks)) {
6553 			pr_warn("md: %s already running, cannot run %pg\n",
6554 				mdname(mddev), rdev0->bdev);
6555 			mddev_unlock(mddev);
6556 		} else {
6557 			pr_debug("md: created %s\n", mdname(mddev));
6558 			mddev->persistent = 1;
6559 			rdev_for_each_list(rdev, tmp, &candidates) {
6560 				list_del_init(&rdev->same_set);
6561 				if (bind_rdev_to_array(rdev, mddev))
6562 					export_rdev(rdev, mddev);
6563 			}
6564 			autorun_array(mddev);
6565 			mddev_unlock(mddev);
6566 		}
6567 		/* on success, candidates will be empty, on error
6568 		 * it won't...
6569 		 */
6570 		rdev_for_each_list(rdev, tmp, &candidates) {
6571 			list_del_init(&rdev->same_set);
6572 			export_rdev(rdev, mddev);
6573 		}
6574 		mddev_put(mddev);
6575 	}
6576 	pr_info("md: ... autorun DONE.\n");
6577 }
6578 #endif /* !MODULE */
6579 
6580 static int get_version(void __user *arg)
6581 {
6582 	mdu_version_t ver;
6583 
6584 	ver.major = MD_MAJOR_VERSION;
6585 	ver.minor = MD_MINOR_VERSION;
6586 	ver.patchlevel = MD_PATCHLEVEL_VERSION;
6587 
6588 	if (copy_to_user(arg, &ver, sizeof(ver)))
6589 		return -EFAULT;
6590 
6591 	return 0;
6592 }
6593 
6594 static int get_array_info(struct mddev *mddev, void __user *arg)
6595 {
6596 	mdu_array_info_t info;
6597 	int nr,working,insync,failed,spare;
6598 	struct md_rdev *rdev;
6599 
6600 	nr = working = insync = failed = spare = 0;
6601 	rcu_read_lock();
6602 	rdev_for_each_rcu(rdev, mddev) {
6603 		nr++;
6604 		if (test_bit(Faulty, &rdev->flags))
6605 			failed++;
6606 		else {
6607 			working++;
6608 			if (test_bit(In_sync, &rdev->flags))
6609 				insync++;
6610 			else if (test_bit(Journal, &rdev->flags))
6611 				/* TODO: add journal count to md_u.h */
6612 				;
6613 			else
6614 				spare++;
6615 		}
6616 	}
6617 	rcu_read_unlock();
6618 
6619 	info.major_version = mddev->major_version;
6620 	info.minor_version = mddev->minor_version;
6621 	info.patch_version = MD_PATCHLEVEL_VERSION;
6622 	info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
6623 	info.level         = mddev->level;
6624 	info.size          = mddev->dev_sectors / 2;
6625 	if (info.size != mddev->dev_sectors / 2) /* overflow */
6626 		info.size = -1;
6627 	info.nr_disks      = nr;
6628 	info.raid_disks    = mddev->raid_disks;
6629 	info.md_minor      = mddev->md_minor;
6630 	info.not_persistent= !mddev->persistent;
6631 
6632 	info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
6633 	info.state         = 0;
6634 	if (mddev->in_sync)
6635 		info.state = (1<<MD_SB_CLEAN);
6636 	if (mddev->bitmap && mddev->bitmap_info.offset)
6637 		info.state |= (1<<MD_SB_BITMAP_PRESENT);
6638 	if (mddev_is_clustered(mddev))
6639 		info.state |= (1<<MD_SB_CLUSTERED);
6640 	info.active_disks  = insync;
6641 	info.working_disks = working;
6642 	info.failed_disks  = failed;
6643 	info.spare_disks   = spare;
6644 
6645 	info.layout        = mddev->layout;
6646 	info.chunk_size    = mddev->chunk_sectors << 9;
6647 
6648 	if (copy_to_user(arg, &info, sizeof(info)))
6649 		return -EFAULT;
6650 
6651 	return 0;
6652 }
6653 
6654 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
6655 {
6656 	mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
6657 	char *ptr;
6658 	int err;
6659 
6660 	file = kzalloc(sizeof(*file), GFP_NOIO);
6661 	if (!file)
6662 		return -ENOMEM;
6663 
6664 	err = 0;
6665 	spin_lock(&mddev->lock);
6666 	/* bitmap enabled */
6667 	if (mddev->bitmap_info.file) {
6668 		ptr = file_path(mddev->bitmap_info.file, file->pathname,
6669 				sizeof(file->pathname));
6670 		if (IS_ERR(ptr))
6671 			err = PTR_ERR(ptr);
6672 		else
6673 			memmove(file->pathname, ptr,
6674 				sizeof(file->pathname)-(ptr-file->pathname));
6675 	}
6676 	spin_unlock(&mddev->lock);
6677 
6678 	if (err == 0 &&
6679 	    copy_to_user(arg, file, sizeof(*file)))
6680 		err = -EFAULT;
6681 
6682 	kfree(file);
6683 	return err;
6684 }
6685 
6686 static int get_disk_info(struct mddev *mddev, void __user * arg)
6687 {
6688 	mdu_disk_info_t info;
6689 	struct md_rdev *rdev;
6690 
6691 	if (copy_from_user(&info, arg, sizeof(info)))
6692 		return -EFAULT;
6693 
6694 	rcu_read_lock();
6695 	rdev = md_find_rdev_nr_rcu(mddev, info.number);
6696 	if (rdev) {
6697 		info.major = MAJOR(rdev->bdev->bd_dev);
6698 		info.minor = MINOR(rdev->bdev->bd_dev);
6699 		info.raid_disk = rdev->raid_disk;
6700 		info.state = 0;
6701 		if (test_bit(Faulty, &rdev->flags))
6702 			info.state |= (1<<MD_DISK_FAULTY);
6703 		else if (test_bit(In_sync, &rdev->flags)) {
6704 			info.state |= (1<<MD_DISK_ACTIVE);
6705 			info.state |= (1<<MD_DISK_SYNC);
6706 		}
6707 		if (test_bit(Journal, &rdev->flags))
6708 			info.state |= (1<<MD_DISK_JOURNAL);
6709 		if (test_bit(WriteMostly, &rdev->flags))
6710 			info.state |= (1<<MD_DISK_WRITEMOSTLY);
6711 		if (test_bit(FailFast, &rdev->flags))
6712 			info.state |= (1<<MD_DISK_FAILFAST);
6713 	} else {
6714 		info.major = info.minor = 0;
6715 		info.raid_disk = -1;
6716 		info.state = (1<<MD_DISK_REMOVED);
6717 	}
6718 	rcu_read_unlock();
6719 
6720 	if (copy_to_user(arg, &info, sizeof(info)))
6721 		return -EFAULT;
6722 
6723 	return 0;
6724 }
6725 
6726 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
6727 {
6728 	struct md_rdev *rdev;
6729 	dev_t dev = MKDEV(info->major,info->minor);
6730 
6731 	if (mddev_is_clustered(mddev) &&
6732 		!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6733 		pr_warn("%s: Cannot add to clustered mddev.\n",
6734 			mdname(mddev));
6735 		return -EINVAL;
6736 	}
6737 
6738 	if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6739 		return -EOVERFLOW;
6740 
6741 	if (!mddev->raid_disks) {
6742 		int err;
6743 		/* expecting a device which has a superblock */
6744 		rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6745 		if (IS_ERR(rdev)) {
6746 			pr_warn("md: md_import_device returned %ld\n",
6747 				PTR_ERR(rdev));
6748 			return PTR_ERR(rdev);
6749 		}
6750 		if (!list_empty(&mddev->disks)) {
6751 			struct md_rdev *rdev0
6752 				= list_entry(mddev->disks.next,
6753 					     struct md_rdev, same_set);
6754 			err = super_types[mddev->major_version]
6755 				.load_super(rdev, rdev0, mddev->minor_version);
6756 			if (err < 0) {
6757 				pr_warn("md: %pg has different UUID to %pg\n",
6758 					rdev->bdev,
6759 					rdev0->bdev);
6760 				export_rdev(rdev, mddev);
6761 				return -EINVAL;
6762 			}
6763 		}
6764 		err = bind_rdev_to_array(rdev, mddev);
6765 		if (err)
6766 			export_rdev(rdev, mddev);
6767 		return err;
6768 	}
6769 
6770 	/*
6771 	 * md_add_new_disk can be used once the array is assembled
6772 	 * to add "hot spares".  They must already have a superblock
6773 	 * written
6774 	 */
6775 	if (mddev->pers) {
6776 		int err;
6777 		if (!mddev->pers->hot_add_disk) {
6778 			pr_warn("%s: personality does not support diskops!\n",
6779 				mdname(mddev));
6780 			return -EINVAL;
6781 		}
6782 		if (mddev->persistent)
6783 			rdev = md_import_device(dev, mddev->major_version,
6784 						mddev->minor_version);
6785 		else
6786 			rdev = md_import_device(dev, -1, -1);
6787 		if (IS_ERR(rdev)) {
6788 			pr_warn("md: md_import_device returned %ld\n",
6789 				PTR_ERR(rdev));
6790 			return PTR_ERR(rdev);
6791 		}
6792 		/* set saved_raid_disk if appropriate */
6793 		if (!mddev->persistent) {
6794 			if (info->state & (1<<MD_DISK_SYNC)  &&
6795 			    info->raid_disk < mddev->raid_disks) {
6796 				rdev->raid_disk = info->raid_disk;
6797 				clear_bit(Bitmap_sync, &rdev->flags);
6798 			} else
6799 				rdev->raid_disk = -1;
6800 			rdev->saved_raid_disk = rdev->raid_disk;
6801 		} else
6802 			super_types[mddev->major_version].
6803 				validate_super(mddev, rdev);
6804 		if ((info->state & (1<<MD_DISK_SYNC)) &&
6805 		     rdev->raid_disk != info->raid_disk) {
6806 			/* This was a hot-add request, but events doesn't
6807 			 * match, so reject it.
6808 			 */
6809 			export_rdev(rdev, mddev);
6810 			return -EINVAL;
6811 		}
6812 
6813 		clear_bit(In_sync, &rdev->flags); /* just to be sure */
6814 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6815 			set_bit(WriteMostly, &rdev->flags);
6816 		else
6817 			clear_bit(WriteMostly, &rdev->flags);
6818 		if (info->state & (1<<MD_DISK_FAILFAST))
6819 			set_bit(FailFast, &rdev->flags);
6820 		else
6821 			clear_bit(FailFast, &rdev->flags);
6822 
6823 		if (info->state & (1<<MD_DISK_JOURNAL)) {
6824 			struct md_rdev *rdev2;
6825 			bool has_journal = false;
6826 
6827 			/* make sure no existing journal disk */
6828 			rdev_for_each(rdev2, mddev) {
6829 				if (test_bit(Journal, &rdev2->flags)) {
6830 					has_journal = true;
6831 					break;
6832 				}
6833 			}
6834 			if (has_journal || mddev->bitmap) {
6835 				export_rdev(rdev, mddev);
6836 				return -EBUSY;
6837 			}
6838 			set_bit(Journal, &rdev->flags);
6839 		}
6840 		/*
6841 		 * check whether the device shows up in other nodes
6842 		 */
6843 		if (mddev_is_clustered(mddev)) {
6844 			if (info->state & (1 << MD_DISK_CANDIDATE))
6845 				set_bit(Candidate, &rdev->flags);
6846 			else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6847 				/* --add initiated by this node */
6848 				err = md_cluster_ops->add_new_disk(mddev, rdev);
6849 				if (err) {
6850 					export_rdev(rdev, mddev);
6851 					return err;
6852 				}
6853 			}
6854 		}
6855 
6856 		rdev->raid_disk = -1;
6857 		err = bind_rdev_to_array(rdev, mddev);
6858 
6859 		if (err)
6860 			export_rdev(rdev, mddev);
6861 
6862 		if (mddev_is_clustered(mddev)) {
6863 			if (info->state & (1 << MD_DISK_CANDIDATE)) {
6864 				if (!err) {
6865 					err = md_cluster_ops->new_disk_ack(mddev,
6866 						err == 0);
6867 					if (err)
6868 						md_kick_rdev_from_array(rdev);
6869 				}
6870 			} else {
6871 				if (err)
6872 					md_cluster_ops->add_new_disk_cancel(mddev);
6873 				else
6874 					err = add_bound_rdev(rdev);
6875 			}
6876 
6877 		} else if (!err)
6878 			err = add_bound_rdev(rdev);
6879 
6880 		return err;
6881 	}
6882 
6883 	/* otherwise, md_add_new_disk is only allowed
6884 	 * for major_version==0 superblocks
6885 	 */
6886 	if (mddev->major_version != 0) {
6887 		pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6888 		return -EINVAL;
6889 	}
6890 
6891 	if (!(info->state & (1<<MD_DISK_FAULTY))) {
6892 		int err;
6893 		rdev = md_import_device(dev, -1, 0);
6894 		if (IS_ERR(rdev)) {
6895 			pr_warn("md: error, md_import_device() returned %ld\n",
6896 				PTR_ERR(rdev));
6897 			return PTR_ERR(rdev);
6898 		}
6899 		rdev->desc_nr = info->number;
6900 		if (info->raid_disk < mddev->raid_disks)
6901 			rdev->raid_disk = info->raid_disk;
6902 		else
6903 			rdev->raid_disk = -1;
6904 
6905 		if (rdev->raid_disk < mddev->raid_disks)
6906 			if (info->state & (1<<MD_DISK_SYNC))
6907 				set_bit(In_sync, &rdev->flags);
6908 
6909 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6910 			set_bit(WriteMostly, &rdev->flags);
6911 		if (info->state & (1<<MD_DISK_FAILFAST))
6912 			set_bit(FailFast, &rdev->flags);
6913 
6914 		if (!mddev->persistent) {
6915 			pr_debug("md: nonpersistent superblock ...\n");
6916 			rdev->sb_start = bdev_nr_sectors(rdev->bdev);
6917 		} else
6918 			rdev->sb_start = calc_dev_sboffset(rdev);
6919 		rdev->sectors = rdev->sb_start;
6920 
6921 		err = bind_rdev_to_array(rdev, mddev);
6922 		if (err) {
6923 			export_rdev(rdev, mddev);
6924 			return err;
6925 		}
6926 	}
6927 
6928 	return 0;
6929 }
6930 
6931 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6932 {
6933 	struct md_rdev *rdev;
6934 
6935 	if (!mddev->pers)
6936 		return -ENODEV;
6937 
6938 	rdev = find_rdev(mddev, dev);
6939 	if (!rdev)
6940 		return -ENXIO;
6941 
6942 	if (rdev->raid_disk < 0)
6943 		goto kick_rdev;
6944 
6945 	clear_bit(Blocked, &rdev->flags);
6946 	remove_and_add_spares(mddev, rdev);
6947 
6948 	if (rdev->raid_disk >= 0)
6949 		goto busy;
6950 
6951 kick_rdev:
6952 	if (mddev_is_clustered(mddev)) {
6953 		if (md_cluster_ops->remove_disk(mddev, rdev))
6954 			goto busy;
6955 	}
6956 
6957 	md_kick_rdev_from_array(rdev);
6958 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6959 	if (mddev->thread)
6960 		md_wakeup_thread(mddev->thread);
6961 	else
6962 		md_update_sb(mddev, 1);
6963 	md_new_event();
6964 
6965 	return 0;
6966 busy:
6967 	pr_debug("md: cannot remove active disk %pg from %s ...\n",
6968 		 rdev->bdev, mdname(mddev));
6969 	return -EBUSY;
6970 }
6971 
6972 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6973 {
6974 	int err;
6975 	struct md_rdev *rdev;
6976 
6977 	if (!mddev->pers)
6978 		return -ENODEV;
6979 
6980 	if (mddev->major_version != 0) {
6981 		pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6982 			mdname(mddev));
6983 		return -EINVAL;
6984 	}
6985 	if (!mddev->pers->hot_add_disk) {
6986 		pr_warn("%s: personality does not support diskops!\n",
6987 			mdname(mddev));
6988 		return -EINVAL;
6989 	}
6990 
6991 	rdev = md_import_device(dev, -1, 0);
6992 	if (IS_ERR(rdev)) {
6993 		pr_warn("md: error, md_import_device() returned %ld\n",
6994 			PTR_ERR(rdev));
6995 		return -EINVAL;
6996 	}
6997 
6998 	if (mddev->persistent)
6999 		rdev->sb_start = calc_dev_sboffset(rdev);
7000 	else
7001 		rdev->sb_start = bdev_nr_sectors(rdev->bdev);
7002 
7003 	rdev->sectors = rdev->sb_start;
7004 
7005 	if (test_bit(Faulty, &rdev->flags)) {
7006 		pr_warn("md: can not hot-add faulty %pg disk to %s!\n",
7007 			rdev->bdev, mdname(mddev));
7008 		err = -EINVAL;
7009 		goto abort_export;
7010 	}
7011 
7012 	clear_bit(In_sync, &rdev->flags);
7013 	rdev->desc_nr = -1;
7014 	rdev->saved_raid_disk = -1;
7015 	err = bind_rdev_to_array(rdev, mddev);
7016 	if (err)
7017 		goto abort_export;
7018 
7019 	/*
7020 	 * The rest should better be atomic, we can have disk failures
7021 	 * noticed in interrupt contexts ...
7022 	 */
7023 
7024 	rdev->raid_disk = -1;
7025 
7026 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7027 	if (!mddev->thread)
7028 		md_update_sb(mddev, 1);
7029 	/*
7030 	 * If the new disk does not support REQ_NOWAIT,
7031 	 * disable on the whole MD.
7032 	 */
7033 	if (!bdev_nowait(rdev->bdev)) {
7034 		pr_info("%s: Disabling nowait because %pg does not support nowait\n",
7035 			mdname(mddev), rdev->bdev);
7036 		blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
7037 	}
7038 	/*
7039 	 * Kick recovery, maybe this spare has to be added to the
7040 	 * array immediately.
7041 	 */
7042 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7043 	md_wakeup_thread(mddev->thread);
7044 	md_new_event();
7045 	return 0;
7046 
7047 abort_export:
7048 	export_rdev(rdev, mddev);
7049 	return err;
7050 }
7051 
7052 static int set_bitmap_file(struct mddev *mddev, int fd)
7053 {
7054 	int err = 0;
7055 
7056 	if (mddev->pers) {
7057 		if (!mddev->pers->quiesce || !mddev->thread)
7058 			return -EBUSY;
7059 		if (mddev->recovery || mddev->sync_thread)
7060 			return -EBUSY;
7061 		/* we should be able to change the bitmap.. */
7062 	}
7063 
7064 	if (fd >= 0) {
7065 		struct inode *inode;
7066 		struct file *f;
7067 
7068 		if (mddev->bitmap || mddev->bitmap_info.file)
7069 			return -EEXIST; /* cannot add when bitmap is present */
7070 
7071 		if (!IS_ENABLED(CONFIG_MD_BITMAP_FILE)) {
7072 			pr_warn("%s: bitmap files not supported by this kernel\n",
7073 				mdname(mddev));
7074 			return -EINVAL;
7075 		}
7076 		pr_warn("%s: using deprecated bitmap file support\n",
7077 			mdname(mddev));
7078 
7079 		f = fget(fd);
7080 
7081 		if (f == NULL) {
7082 			pr_warn("%s: error: failed to get bitmap file\n",
7083 				mdname(mddev));
7084 			return -EBADF;
7085 		}
7086 
7087 		inode = f->f_mapping->host;
7088 		if (!S_ISREG(inode->i_mode)) {
7089 			pr_warn("%s: error: bitmap file must be a regular file\n",
7090 				mdname(mddev));
7091 			err = -EBADF;
7092 		} else if (!(f->f_mode & FMODE_WRITE)) {
7093 			pr_warn("%s: error: bitmap file must open for write\n",
7094 				mdname(mddev));
7095 			err = -EBADF;
7096 		} else if (atomic_read(&inode->i_writecount) != 1) {
7097 			pr_warn("%s: error: bitmap file is already in use\n",
7098 				mdname(mddev));
7099 			err = -EBUSY;
7100 		}
7101 		if (err) {
7102 			fput(f);
7103 			return err;
7104 		}
7105 		mddev->bitmap_info.file = f;
7106 		mddev->bitmap_info.offset = 0; /* file overrides offset */
7107 	} else if (mddev->bitmap == NULL)
7108 		return -ENOENT; /* cannot remove what isn't there */
7109 	err = 0;
7110 	if (mddev->pers) {
7111 		if (fd >= 0) {
7112 			struct bitmap *bitmap;
7113 
7114 			bitmap = md_bitmap_create(mddev, -1);
7115 			mddev_suspend(mddev);
7116 			if (!IS_ERR(bitmap)) {
7117 				mddev->bitmap = bitmap;
7118 				err = md_bitmap_load(mddev);
7119 			} else
7120 				err = PTR_ERR(bitmap);
7121 			if (err) {
7122 				md_bitmap_destroy(mddev);
7123 				fd = -1;
7124 			}
7125 			mddev_resume(mddev);
7126 		} else if (fd < 0) {
7127 			mddev_suspend(mddev);
7128 			md_bitmap_destroy(mddev);
7129 			mddev_resume(mddev);
7130 		}
7131 	}
7132 	if (fd < 0) {
7133 		struct file *f = mddev->bitmap_info.file;
7134 		if (f) {
7135 			spin_lock(&mddev->lock);
7136 			mddev->bitmap_info.file = NULL;
7137 			spin_unlock(&mddev->lock);
7138 			fput(f);
7139 		}
7140 	}
7141 
7142 	return err;
7143 }
7144 
7145 /*
7146  * md_set_array_info is used two different ways
7147  * The original usage is when creating a new array.
7148  * In this usage, raid_disks is > 0 and it together with
7149  *  level, size, not_persistent,layout,chunksize determine the
7150  *  shape of the array.
7151  *  This will always create an array with a type-0.90.0 superblock.
7152  * The newer usage is when assembling an array.
7153  *  In this case raid_disks will be 0, and the major_version field is
7154  *  use to determine which style super-blocks are to be found on the devices.
7155  *  The minor and patch _version numbers are also kept incase the
7156  *  super_block handler wishes to interpret them.
7157  */
7158 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
7159 {
7160 	if (info->raid_disks == 0) {
7161 		/* just setting version number for superblock loading */
7162 		if (info->major_version < 0 ||
7163 		    info->major_version >= ARRAY_SIZE(super_types) ||
7164 		    super_types[info->major_version].name == NULL) {
7165 			/* maybe try to auto-load a module? */
7166 			pr_warn("md: superblock version %d not known\n",
7167 				info->major_version);
7168 			return -EINVAL;
7169 		}
7170 		mddev->major_version = info->major_version;
7171 		mddev->minor_version = info->minor_version;
7172 		mddev->patch_version = info->patch_version;
7173 		mddev->persistent = !info->not_persistent;
7174 		/* ensure mddev_put doesn't delete this now that there
7175 		 * is some minimal configuration.
7176 		 */
7177 		mddev->ctime         = ktime_get_real_seconds();
7178 		return 0;
7179 	}
7180 	mddev->major_version = MD_MAJOR_VERSION;
7181 	mddev->minor_version = MD_MINOR_VERSION;
7182 	mddev->patch_version = MD_PATCHLEVEL_VERSION;
7183 	mddev->ctime         = ktime_get_real_seconds();
7184 
7185 	mddev->level         = info->level;
7186 	mddev->clevel[0]     = 0;
7187 	mddev->dev_sectors   = 2 * (sector_t)info->size;
7188 	mddev->raid_disks    = info->raid_disks;
7189 	/* don't set md_minor, it is determined by which /dev/md* was
7190 	 * openned
7191 	 */
7192 	if (info->state & (1<<MD_SB_CLEAN))
7193 		mddev->recovery_cp = MaxSector;
7194 	else
7195 		mddev->recovery_cp = 0;
7196 	mddev->persistent    = ! info->not_persistent;
7197 	mddev->external	     = 0;
7198 
7199 	mddev->layout        = info->layout;
7200 	if (mddev->level == 0)
7201 		/* Cannot trust RAID0 layout info here */
7202 		mddev->layout = -1;
7203 	mddev->chunk_sectors = info->chunk_size >> 9;
7204 
7205 	if (mddev->persistent) {
7206 		mddev->max_disks = MD_SB_DISKS;
7207 		mddev->flags = 0;
7208 		mddev->sb_flags = 0;
7209 	}
7210 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7211 
7212 	mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
7213 	mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
7214 	mddev->bitmap_info.offset = 0;
7215 
7216 	mddev->reshape_position = MaxSector;
7217 
7218 	/*
7219 	 * Generate a 128 bit UUID
7220 	 */
7221 	get_random_bytes(mddev->uuid, 16);
7222 
7223 	mddev->new_level = mddev->level;
7224 	mddev->new_chunk_sectors = mddev->chunk_sectors;
7225 	mddev->new_layout = mddev->layout;
7226 	mddev->delta_disks = 0;
7227 	mddev->reshape_backwards = 0;
7228 
7229 	return 0;
7230 }
7231 
7232 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
7233 {
7234 	lockdep_assert_held(&mddev->reconfig_mutex);
7235 
7236 	if (mddev->external_size)
7237 		return;
7238 
7239 	mddev->array_sectors = array_sectors;
7240 }
7241 EXPORT_SYMBOL(md_set_array_sectors);
7242 
7243 static int update_size(struct mddev *mddev, sector_t num_sectors)
7244 {
7245 	struct md_rdev *rdev;
7246 	int rv;
7247 	int fit = (num_sectors == 0);
7248 	sector_t old_dev_sectors = mddev->dev_sectors;
7249 
7250 	if (mddev->pers->resize == NULL)
7251 		return -EINVAL;
7252 	/* The "num_sectors" is the number of sectors of each device that
7253 	 * is used.  This can only make sense for arrays with redundancy.
7254 	 * linear and raid0 always use whatever space is available. We can only
7255 	 * consider changing this number if no resync or reconstruction is
7256 	 * happening, and if the new size is acceptable. It must fit before the
7257 	 * sb_start or, if that is <data_offset, it must fit before the size
7258 	 * of each device.  If num_sectors is zero, we find the largest size
7259 	 * that fits.
7260 	 */
7261 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7262 	    mddev->sync_thread)
7263 		return -EBUSY;
7264 	if (!md_is_rdwr(mddev))
7265 		return -EROFS;
7266 
7267 	rdev_for_each(rdev, mddev) {
7268 		sector_t avail = rdev->sectors;
7269 
7270 		if (fit && (num_sectors == 0 || num_sectors > avail))
7271 			num_sectors = avail;
7272 		if (avail < num_sectors)
7273 			return -ENOSPC;
7274 	}
7275 	rv = mddev->pers->resize(mddev, num_sectors);
7276 	if (!rv) {
7277 		if (mddev_is_clustered(mddev))
7278 			md_cluster_ops->update_size(mddev, old_dev_sectors);
7279 		else if (mddev->queue) {
7280 			set_capacity_and_notify(mddev->gendisk,
7281 						mddev->array_sectors);
7282 		}
7283 	}
7284 	return rv;
7285 }
7286 
7287 static int update_raid_disks(struct mddev *mddev, int raid_disks)
7288 {
7289 	int rv;
7290 	struct md_rdev *rdev;
7291 	/* change the number of raid disks */
7292 	if (mddev->pers->check_reshape == NULL)
7293 		return -EINVAL;
7294 	if (!md_is_rdwr(mddev))
7295 		return -EROFS;
7296 	if (raid_disks <= 0 ||
7297 	    (mddev->max_disks && raid_disks >= mddev->max_disks))
7298 		return -EINVAL;
7299 	if (mddev->sync_thread ||
7300 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7301 	    test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
7302 	    mddev->reshape_position != MaxSector)
7303 		return -EBUSY;
7304 
7305 	rdev_for_each(rdev, mddev) {
7306 		if (mddev->raid_disks < raid_disks &&
7307 		    rdev->data_offset < rdev->new_data_offset)
7308 			return -EINVAL;
7309 		if (mddev->raid_disks > raid_disks &&
7310 		    rdev->data_offset > rdev->new_data_offset)
7311 			return -EINVAL;
7312 	}
7313 
7314 	mddev->delta_disks = raid_disks - mddev->raid_disks;
7315 	if (mddev->delta_disks < 0)
7316 		mddev->reshape_backwards = 1;
7317 	else if (mddev->delta_disks > 0)
7318 		mddev->reshape_backwards = 0;
7319 
7320 	rv = mddev->pers->check_reshape(mddev);
7321 	if (rv < 0) {
7322 		mddev->delta_disks = 0;
7323 		mddev->reshape_backwards = 0;
7324 	}
7325 	return rv;
7326 }
7327 
7328 /*
7329  * update_array_info is used to change the configuration of an
7330  * on-line array.
7331  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7332  * fields in the info are checked against the array.
7333  * Any differences that cannot be handled will cause an error.
7334  * Normally, only one change can be managed at a time.
7335  */
7336 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
7337 {
7338 	int rv = 0;
7339 	int cnt = 0;
7340 	int state = 0;
7341 
7342 	/* calculate expected state,ignoring low bits */
7343 	if (mddev->bitmap && mddev->bitmap_info.offset)
7344 		state |= (1 << MD_SB_BITMAP_PRESENT);
7345 
7346 	if (mddev->major_version != info->major_version ||
7347 	    mddev->minor_version != info->minor_version ||
7348 /*	    mddev->patch_version != info->patch_version || */
7349 	    mddev->ctime         != info->ctime         ||
7350 	    mddev->level         != info->level         ||
7351 /*	    mddev->layout        != info->layout        || */
7352 	    mddev->persistent	 != !info->not_persistent ||
7353 	    mddev->chunk_sectors != info->chunk_size >> 9 ||
7354 	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7355 	    ((state^info->state) & 0xfffffe00)
7356 		)
7357 		return -EINVAL;
7358 	/* Check there is only one change */
7359 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7360 		cnt++;
7361 	if (mddev->raid_disks != info->raid_disks)
7362 		cnt++;
7363 	if (mddev->layout != info->layout)
7364 		cnt++;
7365 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7366 		cnt++;
7367 	if (cnt == 0)
7368 		return 0;
7369 	if (cnt > 1)
7370 		return -EINVAL;
7371 
7372 	if (mddev->layout != info->layout) {
7373 		/* Change layout
7374 		 * we don't need to do anything at the md level, the
7375 		 * personality will take care of it all.
7376 		 */
7377 		if (mddev->pers->check_reshape == NULL)
7378 			return -EINVAL;
7379 		else {
7380 			mddev->new_layout = info->layout;
7381 			rv = mddev->pers->check_reshape(mddev);
7382 			if (rv)
7383 				mddev->new_layout = mddev->layout;
7384 			return rv;
7385 		}
7386 	}
7387 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7388 		rv = update_size(mddev, (sector_t)info->size * 2);
7389 
7390 	if (mddev->raid_disks    != info->raid_disks)
7391 		rv = update_raid_disks(mddev, info->raid_disks);
7392 
7393 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
7394 		if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7395 			rv = -EINVAL;
7396 			goto err;
7397 		}
7398 		if (mddev->recovery || mddev->sync_thread) {
7399 			rv = -EBUSY;
7400 			goto err;
7401 		}
7402 		if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
7403 			struct bitmap *bitmap;
7404 			/* add the bitmap */
7405 			if (mddev->bitmap) {
7406 				rv = -EEXIST;
7407 				goto err;
7408 			}
7409 			if (mddev->bitmap_info.default_offset == 0) {
7410 				rv = -EINVAL;
7411 				goto err;
7412 			}
7413 			mddev->bitmap_info.offset =
7414 				mddev->bitmap_info.default_offset;
7415 			mddev->bitmap_info.space =
7416 				mddev->bitmap_info.default_space;
7417 			bitmap = md_bitmap_create(mddev, -1);
7418 			mddev_suspend(mddev);
7419 			if (!IS_ERR(bitmap)) {
7420 				mddev->bitmap = bitmap;
7421 				rv = md_bitmap_load(mddev);
7422 			} else
7423 				rv = PTR_ERR(bitmap);
7424 			if (rv)
7425 				md_bitmap_destroy(mddev);
7426 			mddev_resume(mddev);
7427 		} else {
7428 			/* remove the bitmap */
7429 			if (!mddev->bitmap) {
7430 				rv = -ENOENT;
7431 				goto err;
7432 			}
7433 			if (mddev->bitmap->storage.file) {
7434 				rv = -EINVAL;
7435 				goto err;
7436 			}
7437 			if (mddev->bitmap_info.nodes) {
7438 				/* hold PW on all the bitmap lock */
7439 				if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
7440 					pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
7441 					rv = -EPERM;
7442 					md_cluster_ops->unlock_all_bitmaps(mddev);
7443 					goto err;
7444 				}
7445 
7446 				mddev->bitmap_info.nodes = 0;
7447 				md_cluster_ops->leave(mddev);
7448 				module_put(md_cluster_mod);
7449 				mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
7450 			}
7451 			mddev_suspend(mddev);
7452 			md_bitmap_destroy(mddev);
7453 			mddev_resume(mddev);
7454 			mddev->bitmap_info.offset = 0;
7455 		}
7456 	}
7457 	md_update_sb(mddev, 1);
7458 	return rv;
7459 err:
7460 	return rv;
7461 }
7462 
7463 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
7464 {
7465 	struct md_rdev *rdev;
7466 	int err = 0;
7467 
7468 	if (mddev->pers == NULL)
7469 		return -ENODEV;
7470 
7471 	rcu_read_lock();
7472 	rdev = md_find_rdev_rcu(mddev, dev);
7473 	if (!rdev)
7474 		err =  -ENODEV;
7475 	else {
7476 		md_error(mddev, rdev);
7477 		if (test_bit(MD_BROKEN, &mddev->flags))
7478 			err = -EBUSY;
7479 	}
7480 	rcu_read_unlock();
7481 	return err;
7482 }
7483 
7484 /*
7485  * We have a problem here : there is no easy way to give a CHS
7486  * virtual geometry. We currently pretend that we have a 2 heads
7487  * 4 sectors (with a BIG number of cylinders...). This drives
7488  * dosfs just mad... ;-)
7489  */
7490 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7491 {
7492 	struct mddev *mddev = bdev->bd_disk->private_data;
7493 
7494 	geo->heads = 2;
7495 	geo->sectors = 4;
7496 	geo->cylinders = mddev->array_sectors / 8;
7497 	return 0;
7498 }
7499 
7500 static inline bool md_ioctl_valid(unsigned int cmd)
7501 {
7502 	switch (cmd) {
7503 	case ADD_NEW_DISK:
7504 	case GET_ARRAY_INFO:
7505 	case GET_BITMAP_FILE:
7506 	case GET_DISK_INFO:
7507 	case HOT_ADD_DISK:
7508 	case HOT_REMOVE_DISK:
7509 	case RAID_VERSION:
7510 	case RESTART_ARRAY_RW:
7511 	case RUN_ARRAY:
7512 	case SET_ARRAY_INFO:
7513 	case SET_BITMAP_FILE:
7514 	case SET_DISK_FAULTY:
7515 	case STOP_ARRAY:
7516 	case STOP_ARRAY_RO:
7517 	case CLUSTERED_DISK_NACK:
7518 		return true;
7519 	default:
7520 		return false;
7521 	}
7522 }
7523 
7524 static int __md_set_array_info(struct mddev *mddev, void __user *argp)
7525 {
7526 	mdu_array_info_t info;
7527 	int err;
7528 
7529 	if (!argp)
7530 		memset(&info, 0, sizeof(info));
7531 	else if (copy_from_user(&info, argp, sizeof(info)))
7532 		return -EFAULT;
7533 
7534 	if (mddev->pers) {
7535 		err = update_array_info(mddev, &info);
7536 		if (err)
7537 			pr_warn("md: couldn't update array info. %d\n", err);
7538 		return err;
7539 	}
7540 
7541 	if (!list_empty(&mddev->disks)) {
7542 		pr_warn("md: array %s already has disks!\n", mdname(mddev));
7543 		return -EBUSY;
7544 	}
7545 
7546 	if (mddev->raid_disks) {
7547 		pr_warn("md: array %s already initialised!\n", mdname(mddev));
7548 		return -EBUSY;
7549 	}
7550 
7551 	err = md_set_array_info(mddev, &info);
7552 	if (err)
7553 		pr_warn("md: couldn't set array info. %d\n", err);
7554 
7555 	return err;
7556 }
7557 
7558 static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
7559 			unsigned int cmd, unsigned long arg)
7560 {
7561 	int err = 0;
7562 	void __user *argp = (void __user *)arg;
7563 	struct mddev *mddev = NULL;
7564 	bool did_set_md_closing = false;
7565 
7566 	if (!md_ioctl_valid(cmd))
7567 		return -ENOTTY;
7568 
7569 	switch (cmd) {
7570 	case RAID_VERSION:
7571 	case GET_ARRAY_INFO:
7572 	case GET_DISK_INFO:
7573 		break;
7574 	default:
7575 		if (!capable(CAP_SYS_ADMIN))
7576 			return -EACCES;
7577 	}
7578 
7579 	/*
7580 	 * Commands dealing with the RAID driver but not any
7581 	 * particular array:
7582 	 */
7583 	switch (cmd) {
7584 	case RAID_VERSION:
7585 		err = get_version(argp);
7586 		goto out;
7587 	default:;
7588 	}
7589 
7590 	/*
7591 	 * Commands creating/starting a new array:
7592 	 */
7593 
7594 	mddev = bdev->bd_disk->private_data;
7595 
7596 	if (!mddev) {
7597 		BUG();
7598 		goto out;
7599 	}
7600 
7601 	/* Some actions do not requires the mutex */
7602 	switch (cmd) {
7603 	case GET_ARRAY_INFO:
7604 		if (!mddev->raid_disks && !mddev->external)
7605 			err = -ENODEV;
7606 		else
7607 			err = get_array_info(mddev, argp);
7608 		goto out;
7609 
7610 	case GET_DISK_INFO:
7611 		if (!mddev->raid_disks && !mddev->external)
7612 			err = -ENODEV;
7613 		else
7614 			err = get_disk_info(mddev, argp);
7615 		goto out;
7616 
7617 	case SET_DISK_FAULTY:
7618 		err = set_disk_faulty(mddev, new_decode_dev(arg));
7619 		goto out;
7620 
7621 	case GET_BITMAP_FILE:
7622 		err = get_bitmap_file(mddev, argp);
7623 		goto out;
7624 
7625 	}
7626 
7627 	if (cmd == HOT_REMOVE_DISK)
7628 		/* need to ensure recovery thread has run */
7629 		wait_event_interruptible_timeout(mddev->sb_wait,
7630 						 !test_bit(MD_RECOVERY_NEEDED,
7631 							   &mddev->recovery),
7632 						 msecs_to_jiffies(5000));
7633 	if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7634 		/* Need to flush page cache, and ensure no-one else opens
7635 		 * and writes
7636 		 */
7637 		mutex_lock(&mddev->open_mutex);
7638 		if (mddev->pers && atomic_read(&mddev->openers) > 1) {
7639 			mutex_unlock(&mddev->open_mutex);
7640 			err = -EBUSY;
7641 			goto out;
7642 		}
7643 		if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7644 			mutex_unlock(&mddev->open_mutex);
7645 			err = -EBUSY;
7646 			goto out;
7647 		}
7648 		did_set_md_closing = true;
7649 		mutex_unlock(&mddev->open_mutex);
7650 		sync_blockdev(bdev);
7651 	}
7652 
7653 	if (!md_is_rdwr(mddev))
7654 		flush_work(&mddev->sync_work);
7655 
7656 	err = mddev_lock(mddev);
7657 	if (err) {
7658 		pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7659 			 err, cmd);
7660 		goto out;
7661 	}
7662 
7663 	if (cmd == SET_ARRAY_INFO) {
7664 		err = __md_set_array_info(mddev, argp);
7665 		goto unlock;
7666 	}
7667 
7668 	/*
7669 	 * Commands querying/configuring an existing array:
7670 	 */
7671 	/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
7672 	 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
7673 	if ((!mddev->raid_disks && !mddev->external)
7674 	    && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7675 	    && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7676 	    && cmd != GET_BITMAP_FILE) {
7677 		err = -ENODEV;
7678 		goto unlock;
7679 	}
7680 
7681 	/*
7682 	 * Commands even a read-only array can execute:
7683 	 */
7684 	switch (cmd) {
7685 	case RESTART_ARRAY_RW:
7686 		err = restart_array(mddev);
7687 		goto unlock;
7688 
7689 	case STOP_ARRAY:
7690 		err = do_md_stop(mddev, 0, bdev);
7691 		goto unlock;
7692 
7693 	case STOP_ARRAY_RO:
7694 		err = md_set_readonly(mddev, bdev);
7695 		goto unlock;
7696 
7697 	case HOT_REMOVE_DISK:
7698 		err = hot_remove_disk(mddev, new_decode_dev(arg));
7699 		goto unlock;
7700 
7701 	case ADD_NEW_DISK:
7702 		/* We can support ADD_NEW_DISK on read-only arrays
7703 		 * only if we are re-adding a preexisting device.
7704 		 * So require mddev->pers and MD_DISK_SYNC.
7705 		 */
7706 		if (mddev->pers) {
7707 			mdu_disk_info_t info;
7708 			if (copy_from_user(&info, argp, sizeof(info)))
7709 				err = -EFAULT;
7710 			else if (!(info.state & (1<<MD_DISK_SYNC)))
7711 				/* Need to clear read-only for this */
7712 				break;
7713 			else
7714 				err = md_add_new_disk(mddev, &info);
7715 			goto unlock;
7716 		}
7717 		break;
7718 	}
7719 
7720 	/*
7721 	 * The remaining ioctls are changing the state of the
7722 	 * superblock, so we do not allow them on read-only arrays.
7723 	 */
7724 	if (!md_is_rdwr(mddev) && mddev->pers) {
7725 		if (mddev->ro != MD_AUTO_READ) {
7726 			err = -EROFS;
7727 			goto unlock;
7728 		}
7729 		mddev->ro = MD_RDWR;
7730 		sysfs_notify_dirent_safe(mddev->sysfs_state);
7731 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7732 		/* mddev_unlock will wake thread */
7733 		/* If a device failed while we were read-only, we
7734 		 * need to make sure the metadata is updated now.
7735 		 */
7736 		if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7737 			mddev_unlock(mddev);
7738 			wait_event(mddev->sb_wait,
7739 				   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7740 				   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7741 			mddev_lock_nointr(mddev);
7742 		}
7743 	}
7744 
7745 	switch (cmd) {
7746 	case ADD_NEW_DISK:
7747 	{
7748 		mdu_disk_info_t info;
7749 		if (copy_from_user(&info, argp, sizeof(info)))
7750 			err = -EFAULT;
7751 		else
7752 			err = md_add_new_disk(mddev, &info);
7753 		goto unlock;
7754 	}
7755 
7756 	case CLUSTERED_DISK_NACK:
7757 		if (mddev_is_clustered(mddev))
7758 			md_cluster_ops->new_disk_ack(mddev, false);
7759 		else
7760 			err = -EINVAL;
7761 		goto unlock;
7762 
7763 	case HOT_ADD_DISK:
7764 		err = hot_add_disk(mddev, new_decode_dev(arg));
7765 		goto unlock;
7766 
7767 	case RUN_ARRAY:
7768 		err = do_md_run(mddev);
7769 		goto unlock;
7770 
7771 	case SET_BITMAP_FILE:
7772 		err = set_bitmap_file(mddev, (int)arg);
7773 		goto unlock;
7774 
7775 	default:
7776 		err = -EINVAL;
7777 		goto unlock;
7778 	}
7779 
7780 unlock:
7781 	if (mddev->hold_active == UNTIL_IOCTL &&
7782 	    err != -EINVAL)
7783 		mddev->hold_active = 0;
7784 	mddev_unlock(mddev);
7785 out:
7786 	if(did_set_md_closing)
7787 		clear_bit(MD_CLOSING, &mddev->flags);
7788 	return err;
7789 }
7790 #ifdef CONFIG_COMPAT
7791 static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
7792 		    unsigned int cmd, unsigned long arg)
7793 {
7794 	switch (cmd) {
7795 	case HOT_REMOVE_DISK:
7796 	case HOT_ADD_DISK:
7797 	case SET_DISK_FAULTY:
7798 	case SET_BITMAP_FILE:
7799 		/* These take in integer arg, do not convert */
7800 		break;
7801 	default:
7802 		arg = (unsigned long)compat_ptr(arg);
7803 		break;
7804 	}
7805 
7806 	return md_ioctl(bdev, mode, cmd, arg);
7807 }
7808 #endif /* CONFIG_COMPAT */
7809 
7810 static int md_set_read_only(struct block_device *bdev, bool ro)
7811 {
7812 	struct mddev *mddev = bdev->bd_disk->private_data;
7813 	int err;
7814 
7815 	err = mddev_lock(mddev);
7816 	if (err)
7817 		return err;
7818 
7819 	if (!mddev->raid_disks && !mddev->external) {
7820 		err = -ENODEV;
7821 		goto out_unlock;
7822 	}
7823 
7824 	/*
7825 	 * Transitioning to read-auto need only happen for arrays that call
7826 	 * md_write_start and which are not ready for writes yet.
7827 	 */
7828 	if (!ro && mddev->ro == MD_RDONLY && mddev->pers) {
7829 		err = restart_array(mddev);
7830 		if (err)
7831 			goto out_unlock;
7832 		mddev->ro = MD_AUTO_READ;
7833 	}
7834 
7835 out_unlock:
7836 	mddev_unlock(mddev);
7837 	return err;
7838 }
7839 
7840 static int md_open(struct gendisk *disk, blk_mode_t mode)
7841 {
7842 	struct mddev *mddev;
7843 	int err;
7844 
7845 	spin_lock(&all_mddevs_lock);
7846 	mddev = mddev_get(disk->private_data);
7847 	spin_unlock(&all_mddevs_lock);
7848 	if (!mddev)
7849 		return -ENODEV;
7850 
7851 	err = mutex_lock_interruptible(&mddev->open_mutex);
7852 	if (err)
7853 		goto out;
7854 
7855 	err = -ENODEV;
7856 	if (test_bit(MD_CLOSING, &mddev->flags))
7857 		goto out_unlock;
7858 
7859 	atomic_inc(&mddev->openers);
7860 	mutex_unlock(&mddev->open_mutex);
7861 
7862 	disk_check_media_change(disk);
7863 	return 0;
7864 
7865 out_unlock:
7866 	mutex_unlock(&mddev->open_mutex);
7867 out:
7868 	mddev_put(mddev);
7869 	return err;
7870 }
7871 
7872 static void md_release(struct gendisk *disk)
7873 {
7874 	struct mddev *mddev = disk->private_data;
7875 
7876 	BUG_ON(!mddev);
7877 	atomic_dec(&mddev->openers);
7878 	mddev_put(mddev);
7879 }
7880 
7881 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
7882 {
7883 	struct mddev *mddev = disk->private_data;
7884 	unsigned int ret = 0;
7885 
7886 	if (mddev->changed)
7887 		ret = DISK_EVENT_MEDIA_CHANGE;
7888 	mddev->changed = 0;
7889 	return ret;
7890 }
7891 
7892 static void md_free_disk(struct gendisk *disk)
7893 {
7894 	struct mddev *mddev = disk->private_data;
7895 
7896 	mddev_free(mddev);
7897 }
7898 
7899 const struct block_device_operations md_fops =
7900 {
7901 	.owner		= THIS_MODULE,
7902 	.submit_bio	= md_submit_bio,
7903 	.open		= md_open,
7904 	.release	= md_release,
7905 	.ioctl		= md_ioctl,
7906 #ifdef CONFIG_COMPAT
7907 	.compat_ioctl	= md_compat_ioctl,
7908 #endif
7909 	.getgeo		= md_getgeo,
7910 	.check_events	= md_check_events,
7911 	.set_read_only	= md_set_read_only,
7912 	.free_disk	= md_free_disk,
7913 };
7914 
7915 static int md_thread(void *arg)
7916 {
7917 	struct md_thread *thread = arg;
7918 
7919 	/*
7920 	 * md_thread is a 'system-thread', it's priority should be very
7921 	 * high. We avoid resource deadlocks individually in each
7922 	 * raid personality. (RAID5 does preallocation) We also use RR and
7923 	 * the very same RT priority as kswapd, thus we will never get
7924 	 * into a priority inversion deadlock.
7925 	 *
7926 	 * we definitely have to have equal or higher priority than
7927 	 * bdflush, otherwise bdflush will deadlock if there are too
7928 	 * many dirty RAID5 blocks.
7929 	 */
7930 
7931 	allow_signal(SIGKILL);
7932 	while (!kthread_should_stop()) {
7933 
7934 		/* We need to wait INTERRUPTIBLE so that
7935 		 * we don't add to the load-average.
7936 		 * That means we need to be sure no signals are
7937 		 * pending
7938 		 */
7939 		if (signal_pending(current))
7940 			flush_signals(current);
7941 
7942 		wait_event_interruptible_timeout
7943 			(thread->wqueue,
7944 			 test_bit(THREAD_WAKEUP, &thread->flags)
7945 			 || kthread_should_stop() || kthread_should_park(),
7946 			 thread->timeout);
7947 
7948 		clear_bit(THREAD_WAKEUP, &thread->flags);
7949 		if (kthread_should_park())
7950 			kthread_parkme();
7951 		if (!kthread_should_stop())
7952 			thread->run(thread);
7953 	}
7954 
7955 	return 0;
7956 }
7957 
7958 static void md_wakeup_thread_directly(struct md_thread __rcu *thread)
7959 {
7960 	struct md_thread *t;
7961 
7962 	rcu_read_lock();
7963 	t = rcu_dereference(thread);
7964 	if (t)
7965 		wake_up_process(t->tsk);
7966 	rcu_read_unlock();
7967 }
7968 
7969 void md_wakeup_thread(struct md_thread __rcu *thread)
7970 {
7971 	struct md_thread *t;
7972 
7973 	rcu_read_lock();
7974 	t = rcu_dereference(thread);
7975 	if (t) {
7976 		pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
7977 		set_bit(THREAD_WAKEUP, &t->flags);
7978 		wake_up(&t->wqueue);
7979 	}
7980 	rcu_read_unlock();
7981 }
7982 EXPORT_SYMBOL(md_wakeup_thread);
7983 
7984 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7985 		struct mddev *mddev, const char *name)
7986 {
7987 	struct md_thread *thread;
7988 
7989 	thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7990 	if (!thread)
7991 		return NULL;
7992 
7993 	init_waitqueue_head(&thread->wqueue);
7994 
7995 	thread->run = run;
7996 	thread->mddev = mddev;
7997 	thread->timeout = MAX_SCHEDULE_TIMEOUT;
7998 	thread->tsk = kthread_run(md_thread, thread,
7999 				  "%s_%s",
8000 				  mdname(thread->mddev),
8001 				  name);
8002 	if (IS_ERR(thread->tsk)) {
8003 		kfree(thread);
8004 		return NULL;
8005 	}
8006 	return thread;
8007 }
8008 EXPORT_SYMBOL(md_register_thread);
8009 
8010 void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp)
8011 {
8012 	struct md_thread *thread = rcu_dereference_protected(*threadp,
8013 					lockdep_is_held(&mddev->reconfig_mutex));
8014 
8015 	if (!thread)
8016 		return;
8017 
8018 	rcu_assign_pointer(*threadp, NULL);
8019 	synchronize_rcu();
8020 
8021 	pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
8022 	kthread_stop(thread->tsk);
8023 	kfree(thread);
8024 }
8025 EXPORT_SYMBOL(md_unregister_thread);
8026 
8027 void md_error(struct mddev *mddev, struct md_rdev *rdev)
8028 {
8029 	if (!rdev || test_bit(Faulty, &rdev->flags))
8030 		return;
8031 
8032 	if (!mddev->pers || !mddev->pers->error_handler)
8033 		return;
8034 	mddev->pers->error_handler(mddev, rdev);
8035 
8036 	if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
8037 		return;
8038 
8039 	if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
8040 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8041 	sysfs_notify_dirent_safe(rdev->sysfs_state);
8042 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8043 	if (!test_bit(MD_BROKEN, &mddev->flags)) {
8044 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8045 		md_wakeup_thread(mddev->thread);
8046 	}
8047 	if (mddev->event_work.func)
8048 		queue_work(md_misc_wq, &mddev->event_work);
8049 	md_new_event();
8050 }
8051 EXPORT_SYMBOL(md_error);
8052 
8053 /* seq_file implementation /proc/mdstat */
8054 
8055 static void status_unused(struct seq_file *seq)
8056 {
8057 	int i = 0;
8058 	struct md_rdev *rdev;
8059 
8060 	seq_printf(seq, "unused devices: ");
8061 
8062 	list_for_each_entry(rdev, &pending_raid_disks, same_set) {
8063 		i++;
8064 		seq_printf(seq, "%pg ", rdev->bdev);
8065 	}
8066 	if (!i)
8067 		seq_printf(seq, "<none>");
8068 
8069 	seq_printf(seq, "\n");
8070 }
8071 
8072 static int status_resync(struct seq_file *seq, struct mddev *mddev)
8073 {
8074 	sector_t max_sectors, resync, res;
8075 	unsigned long dt, db = 0;
8076 	sector_t rt, curr_mark_cnt, resync_mark_cnt;
8077 	int scale, recovery_active;
8078 	unsigned int per_milli;
8079 
8080 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8081 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8082 		max_sectors = mddev->resync_max_sectors;
8083 	else
8084 		max_sectors = mddev->dev_sectors;
8085 
8086 	resync = mddev->curr_resync;
8087 	if (resync < MD_RESYNC_ACTIVE) {
8088 		if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8089 			/* Still cleaning up */
8090 			resync = max_sectors;
8091 	} else if (resync > max_sectors) {
8092 		resync = max_sectors;
8093 	} else {
8094 		res = atomic_read(&mddev->recovery_active);
8095 		/*
8096 		 * Resync has started, but the subtraction has overflowed or
8097 		 * yielded one of the special values. Force it to active to
8098 		 * ensure the status reports an active resync.
8099 		 */
8100 		if (resync < res || resync - res < MD_RESYNC_ACTIVE)
8101 			resync = MD_RESYNC_ACTIVE;
8102 		else
8103 			resync -= res;
8104 	}
8105 
8106 	if (resync == MD_RESYNC_NONE) {
8107 		if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8108 			struct md_rdev *rdev;
8109 
8110 			rdev_for_each(rdev, mddev)
8111 				if (rdev->raid_disk >= 0 &&
8112 				    !test_bit(Faulty, &rdev->flags) &&
8113 				    rdev->recovery_offset != MaxSector &&
8114 				    rdev->recovery_offset) {
8115 					seq_printf(seq, "\trecover=REMOTE");
8116 					return 1;
8117 				}
8118 			if (mddev->reshape_position != MaxSector)
8119 				seq_printf(seq, "\treshape=REMOTE");
8120 			else
8121 				seq_printf(seq, "\tresync=REMOTE");
8122 			return 1;
8123 		}
8124 		if (mddev->recovery_cp < MaxSector) {
8125 			seq_printf(seq, "\tresync=PENDING");
8126 			return 1;
8127 		}
8128 		return 0;
8129 	}
8130 	if (resync < MD_RESYNC_ACTIVE) {
8131 		seq_printf(seq, "\tresync=DELAYED");
8132 		return 1;
8133 	}
8134 
8135 	WARN_ON(max_sectors == 0);
8136 	/* Pick 'scale' such that (resync>>scale)*1000 will fit
8137 	 * in a sector_t, and (max_sectors>>scale) will fit in a
8138 	 * u32, as those are the requirements for sector_div.
8139 	 * Thus 'scale' must be at least 10
8140 	 */
8141 	scale = 10;
8142 	if (sizeof(sector_t) > sizeof(unsigned long)) {
8143 		while ( max_sectors/2 > (1ULL<<(scale+32)))
8144 			scale++;
8145 	}
8146 	res = (resync>>scale)*1000;
8147 	sector_div(res, (u32)((max_sectors>>scale)+1));
8148 
8149 	per_milli = res;
8150 	{
8151 		int i, x = per_milli/50, y = 20-x;
8152 		seq_printf(seq, "[");
8153 		for (i = 0; i < x; i++)
8154 			seq_printf(seq, "=");
8155 		seq_printf(seq, ">");
8156 		for (i = 0; i < y; i++)
8157 			seq_printf(seq, ".");
8158 		seq_printf(seq, "] ");
8159 	}
8160 	seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
8161 		   (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8162 		    "reshape" :
8163 		    (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8164 		     "check" :
8165 		     (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8166 		      "resync" : "recovery"))),
8167 		   per_milli/10, per_milli % 10,
8168 		   (unsigned long long) resync/2,
8169 		   (unsigned long long) max_sectors/2);
8170 
8171 	/*
8172 	 * dt: time from mark until now
8173 	 * db: blocks written from mark until now
8174 	 * rt: remaining time
8175 	 *
8176 	 * rt is a sector_t, which is always 64bit now. We are keeping
8177 	 * the original algorithm, but it is not really necessary.
8178 	 *
8179 	 * Original algorithm:
8180 	 *   So we divide before multiply in case it is 32bit and close
8181 	 *   to the limit.
8182 	 *   We scale the divisor (db) by 32 to avoid losing precision
8183 	 *   near the end of resync when the number of remaining sectors
8184 	 *   is close to 'db'.
8185 	 *   We then divide rt by 32 after multiplying by db to compensate.
8186 	 *   The '+1' avoids division by zero if db is very small.
8187 	 */
8188 	dt = ((jiffies - mddev->resync_mark) / HZ);
8189 	if (!dt) dt++;
8190 
8191 	curr_mark_cnt = mddev->curr_mark_cnt;
8192 	recovery_active = atomic_read(&mddev->recovery_active);
8193 	resync_mark_cnt = mddev->resync_mark_cnt;
8194 
8195 	if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8196 		db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
8197 
8198 	rt = max_sectors - resync;    /* number of remaining sectors */
8199 	rt = div64_u64(rt, db/32+1);
8200 	rt *= dt;
8201 	rt >>= 5;
8202 
8203 	seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8204 		   ((unsigned long)rt % 60)/6);
8205 
8206 	seq_printf(seq, " speed=%ldK/sec", db/2/dt);
8207 	return 1;
8208 }
8209 
8210 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8211 {
8212 	struct list_head *tmp;
8213 	loff_t l = *pos;
8214 	struct mddev *mddev;
8215 
8216 	if (l == 0x10000) {
8217 		++*pos;
8218 		return (void *)2;
8219 	}
8220 	if (l > 0x10000)
8221 		return NULL;
8222 	if (!l--)
8223 		/* header */
8224 		return (void*)1;
8225 
8226 	spin_lock(&all_mddevs_lock);
8227 	list_for_each(tmp,&all_mddevs)
8228 		if (!l--) {
8229 			mddev = list_entry(tmp, struct mddev, all_mddevs);
8230 			if (!mddev_get(mddev))
8231 				continue;
8232 			spin_unlock(&all_mddevs_lock);
8233 			return mddev;
8234 		}
8235 	spin_unlock(&all_mddevs_lock);
8236 	if (!l--)
8237 		return (void*)2;/* tail */
8238 	return NULL;
8239 }
8240 
8241 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8242 {
8243 	struct list_head *tmp;
8244 	struct mddev *next_mddev, *mddev = v;
8245 	struct mddev *to_put = NULL;
8246 
8247 	++*pos;
8248 	if (v == (void*)2)
8249 		return NULL;
8250 
8251 	spin_lock(&all_mddevs_lock);
8252 	if (v == (void*)1) {
8253 		tmp = all_mddevs.next;
8254 	} else {
8255 		to_put = mddev;
8256 		tmp = mddev->all_mddevs.next;
8257 	}
8258 
8259 	for (;;) {
8260 		if (tmp == &all_mddevs) {
8261 			next_mddev = (void*)2;
8262 			*pos = 0x10000;
8263 			break;
8264 		}
8265 		next_mddev = list_entry(tmp, struct mddev, all_mddevs);
8266 		if (mddev_get(next_mddev))
8267 			break;
8268 		mddev = next_mddev;
8269 		tmp = mddev->all_mddevs.next;
8270 	}
8271 	spin_unlock(&all_mddevs_lock);
8272 
8273 	if (to_put)
8274 		mddev_put(to_put);
8275 	return next_mddev;
8276 
8277 }
8278 
8279 static void md_seq_stop(struct seq_file *seq, void *v)
8280 {
8281 	struct mddev *mddev = v;
8282 
8283 	if (mddev && v != (void*)1 && v != (void*)2)
8284 		mddev_put(mddev);
8285 }
8286 
8287 static int md_seq_show(struct seq_file *seq, void *v)
8288 {
8289 	struct mddev *mddev = v;
8290 	sector_t sectors;
8291 	struct md_rdev *rdev;
8292 
8293 	if (v == (void*)1) {
8294 		struct md_personality *pers;
8295 		seq_printf(seq, "Personalities : ");
8296 		spin_lock(&pers_lock);
8297 		list_for_each_entry(pers, &pers_list, list)
8298 			seq_printf(seq, "[%s] ", pers->name);
8299 
8300 		spin_unlock(&pers_lock);
8301 		seq_printf(seq, "\n");
8302 		seq->poll_event = atomic_read(&md_event_count);
8303 		return 0;
8304 	}
8305 	if (v == (void*)2) {
8306 		status_unused(seq);
8307 		return 0;
8308 	}
8309 
8310 	spin_lock(&mddev->lock);
8311 	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8312 		seq_printf(seq, "%s : %sactive", mdname(mddev),
8313 						mddev->pers ? "" : "in");
8314 		if (mddev->pers) {
8315 			if (mddev->ro == MD_RDONLY)
8316 				seq_printf(seq, " (read-only)");
8317 			if (mddev->ro == MD_AUTO_READ)
8318 				seq_printf(seq, " (auto-read-only)");
8319 			seq_printf(seq, " %s", mddev->pers->name);
8320 		}
8321 
8322 		sectors = 0;
8323 		rcu_read_lock();
8324 		rdev_for_each_rcu(rdev, mddev) {
8325 			seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
8326 
8327 			if (test_bit(WriteMostly, &rdev->flags))
8328 				seq_printf(seq, "(W)");
8329 			if (test_bit(Journal, &rdev->flags))
8330 				seq_printf(seq, "(J)");
8331 			if (test_bit(Faulty, &rdev->flags)) {
8332 				seq_printf(seq, "(F)");
8333 				continue;
8334 			}
8335 			if (rdev->raid_disk < 0)
8336 				seq_printf(seq, "(S)"); /* spare */
8337 			if (test_bit(Replacement, &rdev->flags))
8338 				seq_printf(seq, "(R)");
8339 			sectors += rdev->sectors;
8340 		}
8341 		rcu_read_unlock();
8342 
8343 		if (!list_empty(&mddev->disks)) {
8344 			if (mddev->pers)
8345 				seq_printf(seq, "\n      %llu blocks",
8346 					   (unsigned long long)
8347 					   mddev->array_sectors / 2);
8348 			else
8349 				seq_printf(seq, "\n      %llu blocks",
8350 					   (unsigned long long)sectors / 2);
8351 		}
8352 		if (mddev->persistent) {
8353 			if (mddev->major_version != 0 ||
8354 			    mddev->minor_version != 90) {
8355 				seq_printf(seq," super %d.%d",
8356 					   mddev->major_version,
8357 					   mddev->minor_version);
8358 			}
8359 		} else if (mddev->external)
8360 			seq_printf(seq, " super external:%s",
8361 				   mddev->metadata_type);
8362 		else
8363 			seq_printf(seq, " super non-persistent");
8364 
8365 		if (mddev->pers) {
8366 			mddev->pers->status(seq, mddev);
8367 			seq_printf(seq, "\n      ");
8368 			if (mddev->pers->sync_request) {
8369 				if (status_resync(seq, mddev))
8370 					seq_printf(seq, "\n      ");
8371 			}
8372 		} else
8373 			seq_printf(seq, "\n       ");
8374 
8375 		md_bitmap_status(seq, mddev->bitmap);
8376 
8377 		seq_printf(seq, "\n");
8378 	}
8379 	spin_unlock(&mddev->lock);
8380 
8381 	return 0;
8382 }
8383 
8384 static const struct seq_operations md_seq_ops = {
8385 	.start  = md_seq_start,
8386 	.next   = md_seq_next,
8387 	.stop   = md_seq_stop,
8388 	.show   = md_seq_show,
8389 };
8390 
8391 static int md_seq_open(struct inode *inode, struct file *file)
8392 {
8393 	struct seq_file *seq;
8394 	int error;
8395 
8396 	error = seq_open(file, &md_seq_ops);
8397 	if (error)
8398 		return error;
8399 
8400 	seq = file->private_data;
8401 	seq->poll_event = atomic_read(&md_event_count);
8402 	return error;
8403 }
8404 
8405 static int md_unloading;
8406 static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
8407 {
8408 	struct seq_file *seq = filp->private_data;
8409 	__poll_t mask;
8410 
8411 	if (md_unloading)
8412 		return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
8413 	poll_wait(filp, &md_event_waiters, wait);
8414 
8415 	/* always allow read */
8416 	mask = EPOLLIN | EPOLLRDNORM;
8417 
8418 	if (seq->poll_event != atomic_read(&md_event_count))
8419 		mask |= EPOLLERR | EPOLLPRI;
8420 	return mask;
8421 }
8422 
8423 static const struct proc_ops mdstat_proc_ops = {
8424 	.proc_open	= md_seq_open,
8425 	.proc_read	= seq_read,
8426 	.proc_lseek	= seq_lseek,
8427 	.proc_release	= seq_release,
8428 	.proc_poll	= mdstat_poll,
8429 };
8430 
8431 int register_md_personality(struct md_personality *p)
8432 {
8433 	pr_debug("md: %s personality registered for level %d\n",
8434 		 p->name, p->level);
8435 	spin_lock(&pers_lock);
8436 	list_add_tail(&p->list, &pers_list);
8437 	spin_unlock(&pers_lock);
8438 	return 0;
8439 }
8440 EXPORT_SYMBOL(register_md_personality);
8441 
8442 int unregister_md_personality(struct md_personality *p)
8443 {
8444 	pr_debug("md: %s personality unregistered\n", p->name);
8445 	spin_lock(&pers_lock);
8446 	list_del_init(&p->list);
8447 	spin_unlock(&pers_lock);
8448 	return 0;
8449 }
8450 EXPORT_SYMBOL(unregister_md_personality);
8451 
8452 int register_md_cluster_operations(struct md_cluster_operations *ops,
8453 				   struct module *module)
8454 {
8455 	int ret = 0;
8456 	spin_lock(&pers_lock);
8457 	if (md_cluster_ops != NULL)
8458 		ret = -EALREADY;
8459 	else {
8460 		md_cluster_ops = ops;
8461 		md_cluster_mod = module;
8462 	}
8463 	spin_unlock(&pers_lock);
8464 	return ret;
8465 }
8466 EXPORT_SYMBOL(register_md_cluster_operations);
8467 
8468 int unregister_md_cluster_operations(void)
8469 {
8470 	spin_lock(&pers_lock);
8471 	md_cluster_ops = NULL;
8472 	spin_unlock(&pers_lock);
8473 	return 0;
8474 }
8475 EXPORT_SYMBOL(unregister_md_cluster_operations);
8476 
8477 int md_setup_cluster(struct mddev *mddev, int nodes)
8478 {
8479 	int ret;
8480 	if (!md_cluster_ops)
8481 		request_module("md-cluster");
8482 	spin_lock(&pers_lock);
8483 	/* ensure module won't be unloaded */
8484 	if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
8485 		pr_warn("can't find md-cluster module or get its reference.\n");
8486 		spin_unlock(&pers_lock);
8487 		return -ENOENT;
8488 	}
8489 	spin_unlock(&pers_lock);
8490 
8491 	ret = md_cluster_ops->join(mddev, nodes);
8492 	if (!ret)
8493 		mddev->safemode_delay = 0;
8494 	return ret;
8495 }
8496 
8497 void md_cluster_stop(struct mddev *mddev)
8498 {
8499 	if (!md_cluster_ops)
8500 		return;
8501 	md_cluster_ops->leave(mddev);
8502 	module_put(md_cluster_mod);
8503 }
8504 
8505 static int is_mddev_idle(struct mddev *mddev, int init)
8506 {
8507 	struct md_rdev *rdev;
8508 	int idle;
8509 	int curr_events;
8510 
8511 	idle = 1;
8512 	rcu_read_lock();
8513 	rdev_for_each_rcu(rdev, mddev) {
8514 		struct gendisk *disk = rdev->bdev->bd_disk;
8515 		curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
8516 			      atomic_read(&disk->sync_io);
8517 		/* sync IO will cause sync_io to increase before the disk_stats
8518 		 * as sync_io is counted when a request starts, and
8519 		 * disk_stats is counted when it completes.
8520 		 * So resync activity will cause curr_events to be smaller than
8521 		 * when there was no such activity.
8522 		 * non-sync IO will cause disk_stat to increase without
8523 		 * increasing sync_io so curr_events will (eventually)
8524 		 * be larger than it was before.  Once it becomes
8525 		 * substantially larger, the test below will cause
8526 		 * the array to appear non-idle, and resync will slow
8527 		 * down.
8528 		 * If there is a lot of outstanding resync activity when
8529 		 * we set last_event to curr_events, then all that activity
8530 		 * completing might cause the array to appear non-idle
8531 		 * and resync will be slowed down even though there might
8532 		 * not have been non-resync activity.  This will only
8533 		 * happen once though.  'last_events' will soon reflect
8534 		 * the state where there is little or no outstanding
8535 		 * resync requests, and further resync activity will
8536 		 * always make curr_events less than last_events.
8537 		 *
8538 		 */
8539 		if (init || curr_events - rdev->last_events > 64) {
8540 			rdev->last_events = curr_events;
8541 			idle = 0;
8542 		}
8543 	}
8544 	rcu_read_unlock();
8545 	return idle;
8546 }
8547 
8548 void md_done_sync(struct mddev *mddev, int blocks, int ok)
8549 {
8550 	/* another "blocks" (512byte) blocks have been synced */
8551 	atomic_sub(blocks, &mddev->recovery_active);
8552 	wake_up(&mddev->recovery_wait);
8553 	if (!ok) {
8554 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8555 		set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
8556 		md_wakeup_thread(mddev->thread);
8557 		// stop recovery, signal do_sync ....
8558 	}
8559 }
8560 EXPORT_SYMBOL(md_done_sync);
8561 
8562 /* md_write_start(mddev, bi)
8563  * If we need to update some array metadata (e.g. 'active' flag
8564  * in superblock) before writing, schedule a superblock update
8565  * and wait for it to complete.
8566  * A return value of 'false' means that the write wasn't recorded
8567  * and cannot proceed as the array is being suspend.
8568  */
8569 bool md_write_start(struct mddev *mddev, struct bio *bi)
8570 {
8571 	int did_change = 0;
8572 
8573 	if (bio_data_dir(bi) != WRITE)
8574 		return true;
8575 
8576 	BUG_ON(mddev->ro == MD_RDONLY);
8577 	if (mddev->ro == MD_AUTO_READ) {
8578 		/* need to switch to read/write */
8579 		flush_work(&mddev->sync_work);
8580 		mddev->ro = MD_RDWR;
8581 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8582 		md_wakeup_thread(mddev->thread);
8583 		md_wakeup_thread(mddev->sync_thread);
8584 		did_change = 1;
8585 	}
8586 	rcu_read_lock();
8587 	percpu_ref_get(&mddev->writes_pending);
8588 	smp_mb(); /* Match smp_mb in set_in_sync() */
8589 	if (mddev->safemode == 1)
8590 		mddev->safemode = 0;
8591 	/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
8592 	if (mddev->in_sync || mddev->sync_checkers) {
8593 		spin_lock(&mddev->lock);
8594 		if (mddev->in_sync) {
8595 			mddev->in_sync = 0;
8596 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8597 			set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8598 			md_wakeup_thread(mddev->thread);
8599 			did_change = 1;
8600 		}
8601 		spin_unlock(&mddev->lock);
8602 	}
8603 	rcu_read_unlock();
8604 	if (did_change)
8605 		sysfs_notify_dirent_safe(mddev->sysfs_state);
8606 	if (!mddev->has_superblocks)
8607 		return true;
8608 	wait_event(mddev->sb_wait,
8609 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8610 		   is_md_suspended(mddev));
8611 	if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8612 		percpu_ref_put(&mddev->writes_pending);
8613 		return false;
8614 	}
8615 	return true;
8616 }
8617 EXPORT_SYMBOL(md_write_start);
8618 
8619 /* md_write_inc can only be called when md_write_start() has
8620  * already been called at least once of the current request.
8621  * It increments the counter and is useful when a single request
8622  * is split into several parts.  Each part causes an increment and
8623  * so needs a matching md_write_end().
8624  * Unlike md_write_start(), it is safe to call md_write_inc() inside
8625  * a spinlocked region.
8626  */
8627 void md_write_inc(struct mddev *mddev, struct bio *bi)
8628 {
8629 	if (bio_data_dir(bi) != WRITE)
8630 		return;
8631 	WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev));
8632 	percpu_ref_get(&mddev->writes_pending);
8633 }
8634 EXPORT_SYMBOL(md_write_inc);
8635 
8636 void md_write_end(struct mddev *mddev)
8637 {
8638 	percpu_ref_put(&mddev->writes_pending);
8639 
8640 	if (mddev->safemode == 2)
8641 		md_wakeup_thread(mddev->thread);
8642 	else if (mddev->safemode_delay)
8643 		/* The roundup() ensures this only performs locking once
8644 		 * every ->safemode_delay jiffies
8645 		 */
8646 		mod_timer(&mddev->safemode_timer,
8647 			  roundup(jiffies, mddev->safemode_delay) +
8648 			  mddev->safemode_delay);
8649 }
8650 
8651 EXPORT_SYMBOL(md_write_end);
8652 
8653 /* This is used by raid0 and raid10 */
8654 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
8655 			struct bio *bio, sector_t start, sector_t size)
8656 {
8657 	struct bio *discard_bio = NULL;
8658 
8659 	if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO,
8660 			&discard_bio) || !discard_bio)
8661 		return;
8662 
8663 	bio_chain(discard_bio, bio);
8664 	bio_clone_blkg_association(discard_bio, bio);
8665 	if (mddev->gendisk)
8666 		trace_block_bio_remap(discard_bio,
8667 				disk_devt(mddev->gendisk),
8668 				bio->bi_iter.bi_sector);
8669 	submit_bio_noacct(discard_bio);
8670 }
8671 EXPORT_SYMBOL_GPL(md_submit_discard_bio);
8672 
8673 static void md_end_clone_io(struct bio *bio)
8674 {
8675 	struct md_io_clone *md_io_clone = bio->bi_private;
8676 	struct bio *orig_bio = md_io_clone->orig_bio;
8677 	struct mddev *mddev = md_io_clone->mddev;
8678 
8679 	orig_bio->bi_status = bio->bi_status;
8680 
8681 	if (md_io_clone->start_time)
8682 		bio_end_io_acct(orig_bio, md_io_clone->start_time);
8683 
8684 	bio_put(bio);
8685 	bio_endio(orig_bio);
8686 	percpu_ref_put(&mddev->active_io);
8687 }
8688 
8689 static void md_clone_bio(struct mddev *mddev, struct bio **bio)
8690 {
8691 	struct block_device *bdev = (*bio)->bi_bdev;
8692 	struct md_io_clone *md_io_clone;
8693 	struct bio *clone =
8694 		bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set);
8695 
8696 	md_io_clone = container_of(clone, struct md_io_clone, bio_clone);
8697 	md_io_clone->orig_bio = *bio;
8698 	md_io_clone->mddev = mddev;
8699 	if (blk_queue_io_stat(bdev->bd_disk->queue))
8700 		md_io_clone->start_time = bio_start_io_acct(*bio);
8701 
8702 	clone->bi_end_io = md_end_clone_io;
8703 	clone->bi_private = md_io_clone;
8704 	*bio = clone;
8705 }
8706 
8707 void md_account_bio(struct mddev *mddev, struct bio **bio)
8708 {
8709 	percpu_ref_get(&mddev->active_io);
8710 	md_clone_bio(mddev, bio);
8711 }
8712 EXPORT_SYMBOL_GPL(md_account_bio);
8713 
8714 /* md_allow_write(mddev)
8715  * Calling this ensures that the array is marked 'active' so that writes
8716  * may proceed without blocking.  It is important to call this before
8717  * attempting a GFP_KERNEL allocation while holding the mddev lock.
8718  * Must be called with mddev_lock held.
8719  */
8720 void md_allow_write(struct mddev *mddev)
8721 {
8722 	if (!mddev->pers)
8723 		return;
8724 	if (!md_is_rdwr(mddev))
8725 		return;
8726 	if (!mddev->pers->sync_request)
8727 		return;
8728 
8729 	spin_lock(&mddev->lock);
8730 	if (mddev->in_sync) {
8731 		mddev->in_sync = 0;
8732 		set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8733 		set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8734 		if (mddev->safemode_delay &&
8735 		    mddev->safemode == 0)
8736 			mddev->safemode = 1;
8737 		spin_unlock(&mddev->lock);
8738 		md_update_sb(mddev, 0);
8739 		sysfs_notify_dirent_safe(mddev->sysfs_state);
8740 		/* wait for the dirty state to be recorded in the metadata */
8741 		wait_event(mddev->sb_wait,
8742 			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8743 	} else
8744 		spin_unlock(&mddev->lock);
8745 }
8746 EXPORT_SYMBOL_GPL(md_allow_write);
8747 
8748 #define SYNC_MARKS	10
8749 #define	SYNC_MARK_STEP	(3*HZ)
8750 #define UPDATE_FREQUENCY (5*60*HZ)
8751 void md_do_sync(struct md_thread *thread)
8752 {
8753 	struct mddev *mddev = thread->mddev;
8754 	struct mddev *mddev2;
8755 	unsigned int currspeed = 0, window;
8756 	sector_t max_sectors,j, io_sectors, recovery_done;
8757 	unsigned long mark[SYNC_MARKS];
8758 	unsigned long update_time;
8759 	sector_t mark_cnt[SYNC_MARKS];
8760 	int last_mark,m;
8761 	sector_t last_check;
8762 	int skipped = 0;
8763 	struct md_rdev *rdev;
8764 	char *desc, *action = NULL;
8765 	struct blk_plug plug;
8766 	int ret;
8767 
8768 	/* just incase thread restarts... */
8769 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8770 	    test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
8771 		return;
8772 	if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
8773 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8774 		return;
8775 	}
8776 
8777 	if (mddev_is_clustered(mddev)) {
8778 		ret = md_cluster_ops->resync_start(mddev);
8779 		if (ret)
8780 			goto skip;
8781 
8782 		set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
8783 		if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8784 			test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8785 			test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8786 		     && ((unsigned long long)mddev->curr_resync_completed
8787 			 < (unsigned long long)mddev->resync_max_sectors))
8788 			goto skip;
8789 	}
8790 
8791 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8792 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
8793 			desc = "data-check";
8794 			action = "check";
8795 		} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8796 			desc = "requested-resync";
8797 			action = "repair";
8798 		} else
8799 			desc = "resync";
8800 	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8801 		desc = "reshape";
8802 	else
8803 		desc = "recovery";
8804 
8805 	mddev->last_sync_action = action ?: desc;
8806 
8807 	/*
8808 	 * Before starting a resync we must have set curr_resync to
8809 	 * 2, and then checked that every "conflicting" array has curr_resync
8810 	 * less than ours.  When we find one that is the same or higher
8811 	 * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
8812 	 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8813 	 * This will mean we have to start checking from the beginning again.
8814 	 *
8815 	 */
8816 
8817 	do {
8818 		int mddev2_minor = -1;
8819 		mddev->curr_resync = MD_RESYNC_DELAYED;
8820 
8821 	try_again:
8822 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8823 			goto skip;
8824 		spin_lock(&all_mddevs_lock);
8825 		list_for_each_entry(mddev2, &all_mddevs, all_mddevs) {
8826 			if (test_bit(MD_DELETED, &mddev2->flags))
8827 				continue;
8828 			if (mddev2 == mddev)
8829 				continue;
8830 			if (!mddev->parallel_resync
8831 			&&  mddev2->curr_resync
8832 			&&  match_mddev_units(mddev, mddev2)) {
8833 				DEFINE_WAIT(wq);
8834 				if (mddev < mddev2 &&
8835 				    mddev->curr_resync == MD_RESYNC_DELAYED) {
8836 					/* arbitrarily yield */
8837 					mddev->curr_resync = MD_RESYNC_YIELDED;
8838 					wake_up(&resync_wait);
8839 				}
8840 				if (mddev > mddev2 &&
8841 				    mddev->curr_resync == MD_RESYNC_YIELDED)
8842 					/* no need to wait here, we can wait the next
8843 					 * time 'round when curr_resync == 2
8844 					 */
8845 					continue;
8846 				/* We need to wait 'interruptible' so as not to
8847 				 * contribute to the load average, and not to
8848 				 * be caught by 'softlockup'
8849 				 */
8850 				prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
8851 				if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8852 				    mddev2->curr_resync >= mddev->curr_resync) {
8853 					if (mddev2_minor != mddev2->md_minor) {
8854 						mddev2_minor = mddev2->md_minor;
8855 						pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8856 							desc, mdname(mddev),
8857 							mdname(mddev2));
8858 					}
8859 					spin_unlock(&all_mddevs_lock);
8860 
8861 					if (signal_pending(current))
8862 						flush_signals(current);
8863 					schedule();
8864 					finish_wait(&resync_wait, &wq);
8865 					goto try_again;
8866 				}
8867 				finish_wait(&resync_wait, &wq);
8868 			}
8869 		}
8870 		spin_unlock(&all_mddevs_lock);
8871 	} while (mddev->curr_resync < MD_RESYNC_DELAYED);
8872 
8873 	j = 0;
8874 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8875 		/* resync follows the size requested by the personality,
8876 		 * which defaults to physical size, but can be virtual size
8877 		 */
8878 		max_sectors = mddev->resync_max_sectors;
8879 		atomic64_set(&mddev->resync_mismatches, 0);
8880 		/* we don't use the checkpoint if there's a bitmap */
8881 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8882 			j = mddev->resync_min;
8883 		else if (!mddev->bitmap)
8884 			j = mddev->recovery_cp;
8885 
8886 	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
8887 		max_sectors = mddev->resync_max_sectors;
8888 		/*
8889 		 * If the original node aborts reshaping then we continue the
8890 		 * reshaping, so set j again to avoid restart reshape from the
8891 		 * first beginning
8892 		 */
8893 		if (mddev_is_clustered(mddev) &&
8894 		    mddev->reshape_position != MaxSector)
8895 			j = mddev->reshape_position;
8896 	} else {
8897 		/* recovery follows the physical size of devices */
8898 		max_sectors = mddev->dev_sectors;
8899 		j = MaxSector;
8900 		rcu_read_lock();
8901 		rdev_for_each_rcu(rdev, mddev)
8902 			if (rdev->raid_disk >= 0 &&
8903 			    !test_bit(Journal, &rdev->flags) &&
8904 			    !test_bit(Faulty, &rdev->flags) &&
8905 			    !test_bit(In_sync, &rdev->flags) &&
8906 			    rdev->recovery_offset < j)
8907 				j = rdev->recovery_offset;
8908 		rcu_read_unlock();
8909 
8910 		/* If there is a bitmap, we need to make sure all
8911 		 * writes that started before we added a spare
8912 		 * complete before we start doing a recovery.
8913 		 * Otherwise the write might complete and (via
8914 		 * bitmap_endwrite) set a bit in the bitmap after the
8915 		 * recovery has checked that bit and skipped that
8916 		 * region.
8917 		 */
8918 		if (mddev->bitmap) {
8919 			mddev->pers->quiesce(mddev, 1);
8920 			mddev->pers->quiesce(mddev, 0);
8921 		}
8922 	}
8923 
8924 	pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8925 	pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
8926 	pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8927 		 speed_max(mddev), desc);
8928 
8929 	is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8930 
8931 	io_sectors = 0;
8932 	for (m = 0; m < SYNC_MARKS; m++) {
8933 		mark[m] = jiffies;
8934 		mark_cnt[m] = io_sectors;
8935 	}
8936 	last_mark = 0;
8937 	mddev->resync_mark = mark[last_mark];
8938 	mddev->resync_mark_cnt = mark_cnt[last_mark];
8939 
8940 	/*
8941 	 * Tune reconstruction:
8942 	 */
8943 	window = 32 * (PAGE_SIZE / 512);
8944 	pr_debug("md: using %dk window, over a total of %lluk.\n",
8945 		 window/2, (unsigned long long)max_sectors/2);
8946 
8947 	atomic_set(&mddev->recovery_active, 0);
8948 	last_check = 0;
8949 
8950 	if (j >= MD_RESYNC_ACTIVE) {
8951 		pr_debug("md: resuming %s of %s from checkpoint.\n",
8952 			 desc, mdname(mddev));
8953 		mddev->curr_resync = j;
8954 	} else
8955 		mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
8956 	mddev->curr_resync_completed = j;
8957 	sysfs_notify_dirent_safe(mddev->sysfs_completed);
8958 	md_new_event();
8959 	update_time = jiffies;
8960 
8961 	blk_start_plug(&plug);
8962 	while (j < max_sectors) {
8963 		sector_t sectors;
8964 
8965 		skipped = 0;
8966 
8967 		if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8968 		    ((mddev->curr_resync > mddev->curr_resync_completed &&
8969 		      (mddev->curr_resync - mddev->curr_resync_completed)
8970 		      > (max_sectors >> 4)) ||
8971 		     time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8972 		     (j - mddev->curr_resync_completed)*2
8973 		     >= mddev->resync_max - mddev->curr_resync_completed ||
8974 		     mddev->curr_resync_completed > mddev->resync_max
8975 			    )) {
8976 			/* time to update curr_resync_completed */
8977 			wait_event(mddev->recovery_wait,
8978 				   atomic_read(&mddev->recovery_active) == 0);
8979 			mddev->curr_resync_completed = j;
8980 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8981 			    j > mddev->recovery_cp)
8982 				mddev->recovery_cp = j;
8983 			update_time = jiffies;
8984 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8985 			sysfs_notify_dirent_safe(mddev->sysfs_completed);
8986 		}
8987 
8988 		while (j >= mddev->resync_max &&
8989 		       !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8990 			/* As this condition is controlled by user-space,
8991 			 * we can block indefinitely, so use '_interruptible'
8992 			 * to avoid triggering warnings.
8993 			 */
8994 			flush_signals(current); /* just in case */
8995 			wait_event_interruptible(mddev->recovery_wait,
8996 						 mddev->resync_max > j
8997 						 || test_bit(MD_RECOVERY_INTR,
8998 							     &mddev->recovery));
8999 		}
9000 
9001 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9002 			break;
9003 
9004 		sectors = mddev->pers->sync_request(mddev, j, &skipped);
9005 		if (sectors == 0) {
9006 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9007 			break;
9008 		}
9009 
9010 		if (!skipped) { /* actual IO requested */
9011 			io_sectors += sectors;
9012 			atomic_add(sectors, &mddev->recovery_active);
9013 		}
9014 
9015 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9016 			break;
9017 
9018 		j += sectors;
9019 		if (j > max_sectors)
9020 			/* when skipping, extra large numbers can be returned. */
9021 			j = max_sectors;
9022 		if (j >= MD_RESYNC_ACTIVE)
9023 			mddev->curr_resync = j;
9024 		mddev->curr_mark_cnt = io_sectors;
9025 		if (last_check == 0)
9026 			/* this is the earliest that rebuild will be
9027 			 * visible in /proc/mdstat
9028 			 */
9029 			md_new_event();
9030 
9031 		if (last_check + window > io_sectors || j == max_sectors)
9032 			continue;
9033 
9034 		last_check = io_sectors;
9035 	repeat:
9036 		if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
9037 			/* step marks */
9038 			int next = (last_mark+1) % SYNC_MARKS;
9039 
9040 			mddev->resync_mark = mark[next];
9041 			mddev->resync_mark_cnt = mark_cnt[next];
9042 			mark[next] = jiffies;
9043 			mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
9044 			last_mark = next;
9045 		}
9046 
9047 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9048 			break;
9049 
9050 		/*
9051 		 * this loop exits only if either when we are slower than
9052 		 * the 'hard' speed limit, or the system was IO-idle for
9053 		 * a jiffy.
9054 		 * the system might be non-idle CPU-wise, but we only care
9055 		 * about not overloading the IO subsystem. (things like an
9056 		 * e2fsck being done on the RAID array should execute fast)
9057 		 */
9058 		cond_resched();
9059 
9060 		recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
9061 		currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
9062 			/((jiffies-mddev->resync_mark)/HZ +1) +1;
9063 
9064 		if (currspeed > speed_min(mddev)) {
9065 			if (currspeed > speed_max(mddev)) {
9066 				msleep(500);
9067 				goto repeat;
9068 			}
9069 			if (!is_mddev_idle(mddev, 0)) {
9070 				/*
9071 				 * Give other IO more of a chance.
9072 				 * The faster the devices, the less we wait.
9073 				 */
9074 				wait_event(mddev->recovery_wait,
9075 					   !atomic_read(&mddev->recovery_active));
9076 			}
9077 		}
9078 	}
9079 	pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
9080 		test_bit(MD_RECOVERY_INTR, &mddev->recovery)
9081 		? "interrupted" : "done");
9082 	/*
9083 	 * this also signals 'finished resyncing' to md_stop
9084 	 */
9085 	blk_finish_plug(&plug);
9086 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
9087 
9088 	if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9089 	    !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9090 	    mddev->curr_resync >= MD_RESYNC_ACTIVE) {
9091 		mddev->curr_resync_completed = mddev->curr_resync;
9092 		sysfs_notify_dirent_safe(mddev->sysfs_completed);
9093 	}
9094 	mddev->pers->sync_request(mddev, max_sectors, &skipped);
9095 
9096 	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
9097 	    mddev->curr_resync > MD_RESYNC_ACTIVE) {
9098 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
9099 			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9100 				if (mddev->curr_resync >= mddev->recovery_cp) {
9101 					pr_debug("md: checkpointing %s of %s.\n",
9102 						 desc, mdname(mddev));
9103 					if (test_bit(MD_RECOVERY_ERROR,
9104 						&mddev->recovery))
9105 						mddev->recovery_cp =
9106 							mddev->curr_resync_completed;
9107 					else
9108 						mddev->recovery_cp =
9109 							mddev->curr_resync;
9110 				}
9111 			} else
9112 				mddev->recovery_cp = MaxSector;
9113 		} else {
9114 			if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9115 				mddev->curr_resync = MaxSector;
9116 			if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9117 			    test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
9118 				rcu_read_lock();
9119 				rdev_for_each_rcu(rdev, mddev)
9120 					if (rdev->raid_disk >= 0 &&
9121 					    mddev->delta_disks >= 0 &&
9122 					    !test_bit(Journal, &rdev->flags) &&
9123 					    !test_bit(Faulty, &rdev->flags) &&
9124 					    !test_bit(In_sync, &rdev->flags) &&
9125 					    rdev->recovery_offset < mddev->curr_resync)
9126 						rdev->recovery_offset = mddev->curr_resync;
9127 				rcu_read_unlock();
9128 			}
9129 		}
9130 	}
9131  skip:
9132 	/* set CHANGE_PENDING here since maybe another update is needed,
9133 	 * so other nodes are informed. It should be harmless for normal
9134 	 * raid */
9135 	set_mask_bits(&mddev->sb_flags, 0,
9136 		      BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
9137 
9138 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9139 			!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9140 			mddev->delta_disks > 0 &&
9141 			mddev->pers->finish_reshape &&
9142 			mddev->pers->size &&
9143 			mddev->queue) {
9144 		mddev_lock_nointr(mddev);
9145 		md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9146 		mddev_unlock(mddev);
9147 		if (!mddev_is_clustered(mddev))
9148 			set_capacity_and_notify(mddev->gendisk,
9149 						mddev->array_sectors);
9150 	}
9151 
9152 	spin_lock(&mddev->lock);
9153 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9154 		/* We completed so min/max setting can be forgotten if used. */
9155 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9156 			mddev->resync_min = 0;
9157 		mddev->resync_max = MaxSector;
9158 	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9159 		mddev->resync_min = mddev->curr_resync_completed;
9160 	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
9161 	mddev->curr_resync = MD_RESYNC_NONE;
9162 	spin_unlock(&mddev->lock);
9163 
9164 	wake_up(&resync_wait);
9165 	wake_up(&mddev->sb_wait);
9166 	md_wakeup_thread(mddev->thread);
9167 	return;
9168 }
9169 EXPORT_SYMBOL_GPL(md_do_sync);
9170 
9171 static bool rdev_removeable(struct md_rdev *rdev)
9172 {
9173 	/* rdev is not used. */
9174 	if (rdev->raid_disk < 0)
9175 		return false;
9176 
9177 	/* There are still inflight io, don't remove this rdev. */
9178 	if (atomic_read(&rdev->nr_pending))
9179 		return false;
9180 
9181 	/*
9182 	 * An error occurred but has not yet been acknowledged by the metadata
9183 	 * handler, don't remove this rdev.
9184 	 */
9185 	if (test_bit(Blocked, &rdev->flags))
9186 		return false;
9187 
9188 	/* Fautly rdev is not used, it's safe to remove it. */
9189 	if (test_bit(Faulty, &rdev->flags))
9190 		return true;
9191 
9192 	/* Journal disk can only be removed if it's faulty. */
9193 	if (test_bit(Journal, &rdev->flags))
9194 		return false;
9195 
9196 	/*
9197 	 * 'In_sync' is cleared while 'raid_disk' is valid, which means
9198 	 * replacement has just become active from pers->spare_active(), and
9199 	 * then pers->hot_remove_disk() will replace this rdev with replacement.
9200 	 */
9201 	if (!test_bit(In_sync, &rdev->flags))
9202 		return true;
9203 
9204 	return false;
9205 }
9206 
9207 static bool rdev_is_spare(struct md_rdev *rdev)
9208 {
9209 	return !test_bit(Candidate, &rdev->flags) && rdev->raid_disk >= 0 &&
9210 	       !test_bit(In_sync, &rdev->flags) &&
9211 	       !test_bit(Journal, &rdev->flags) &&
9212 	       !test_bit(Faulty, &rdev->flags);
9213 }
9214 
9215 static bool rdev_addable(struct md_rdev *rdev)
9216 {
9217 	/* rdev is already used, don't add it again. */
9218 	if (test_bit(Candidate, &rdev->flags) || rdev->raid_disk >= 0 ||
9219 	    test_bit(Faulty, &rdev->flags))
9220 		return false;
9221 
9222 	/* Allow to add journal disk. */
9223 	if (test_bit(Journal, &rdev->flags))
9224 		return true;
9225 
9226 	/* Allow to add if array is read-write. */
9227 	if (md_is_rdwr(rdev->mddev))
9228 		return true;
9229 
9230 	/*
9231 	 * For read-only array, only allow to readd a rdev. And if bitmap is
9232 	 * used, don't allow to readd a rdev that is too old.
9233 	 */
9234 	if (rdev->saved_raid_disk >= 0 && !test_bit(Bitmap_sync, &rdev->flags))
9235 		return true;
9236 
9237 	return false;
9238 }
9239 
9240 static bool md_spares_need_change(struct mddev *mddev)
9241 {
9242 	struct md_rdev *rdev;
9243 
9244 	rdev_for_each(rdev, mddev)
9245 		if (rdev_removeable(rdev) || rdev_addable(rdev))
9246 			return true;
9247 	return false;
9248 }
9249 
9250 static int remove_and_add_spares(struct mddev *mddev,
9251 				 struct md_rdev *this)
9252 {
9253 	struct md_rdev *rdev;
9254 	int spares = 0;
9255 	int removed = 0;
9256 	bool remove_some = false;
9257 
9258 	if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9259 		/* Mustn't remove devices when resync thread is running */
9260 		return 0;
9261 
9262 	rdev_for_each(rdev, mddev) {
9263 		if ((this == NULL || rdev == this) &&
9264 		    rdev->raid_disk >= 0 &&
9265 		    !test_bit(Blocked, &rdev->flags) &&
9266 		    test_bit(Faulty, &rdev->flags) &&
9267 		    atomic_read(&rdev->nr_pending)==0) {
9268 			/* Faulty non-Blocked devices with nr_pending == 0
9269 			 * never get nr_pending incremented,
9270 			 * never get Faulty cleared, and never get Blocked set.
9271 			 * So we can synchronize_rcu now rather than once per device
9272 			 */
9273 			remove_some = true;
9274 			set_bit(RemoveSynchronized, &rdev->flags);
9275 		}
9276 	}
9277 
9278 	if (remove_some)
9279 		synchronize_rcu();
9280 	rdev_for_each(rdev, mddev) {
9281 		if ((this == NULL || rdev == this) &&
9282 		    (test_bit(RemoveSynchronized, &rdev->flags) ||
9283 		     rdev_removeable(rdev))) {
9284 			if (mddev->pers->hot_remove_disk(
9285 				    mddev, rdev) == 0) {
9286 				sysfs_unlink_rdev(mddev, rdev);
9287 				rdev->saved_raid_disk = rdev->raid_disk;
9288 				rdev->raid_disk = -1;
9289 				removed++;
9290 			}
9291 		}
9292 		if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9293 			clear_bit(RemoveSynchronized, &rdev->flags);
9294 	}
9295 
9296 	if (removed && mddev->kobj.sd)
9297 		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9298 
9299 	if (this && removed)
9300 		goto no_add;
9301 
9302 	rdev_for_each(rdev, mddev) {
9303 		if (this && this != rdev)
9304 			continue;
9305 		if (rdev_is_spare(rdev))
9306 			spares++;
9307 		if (!rdev_addable(rdev))
9308 			continue;
9309 		if (!test_bit(Journal, &rdev->flags))
9310 			rdev->recovery_offset = 0;
9311 		if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
9312 			/* failure here is OK */
9313 			sysfs_link_rdev(mddev, rdev);
9314 			if (!test_bit(Journal, &rdev->flags))
9315 				spares++;
9316 			md_new_event();
9317 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9318 		}
9319 	}
9320 no_add:
9321 	if (removed)
9322 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9323 	return spares;
9324 }
9325 
9326 static bool md_choose_sync_action(struct mddev *mddev, int *spares)
9327 {
9328 	/* Check if reshape is in progress first. */
9329 	if (mddev->reshape_position != MaxSector) {
9330 		if (mddev->pers->check_reshape == NULL ||
9331 		    mddev->pers->check_reshape(mddev) != 0)
9332 			return false;
9333 
9334 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9335 		clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9336 		return true;
9337 	}
9338 
9339 	/*
9340 	 * Remove any failed drives, then add spares if possible. Spares are
9341 	 * also removed and re-added, to allow the personality to fail the
9342 	 * re-add.
9343 	 */
9344 	*spares = remove_and_add_spares(mddev, NULL);
9345 	if (*spares) {
9346 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9347 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9348 		clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9349 
9350 		/* Start new recovery. */
9351 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9352 		return true;
9353 	}
9354 
9355 	/* Check if recovery is in progress. */
9356 	if (mddev->recovery_cp < MaxSector) {
9357 		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9358 		clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9359 		return true;
9360 	}
9361 
9362 	/* Delay to choose resync/check/repair in md_do_sync(). */
9363 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9364 		return true;
9365 
9366 	/* Nothing to be done */
9367 	return false;
9368 }
9369 
9370 static void md_start_sync(struct work_struct *ws)
9371 {
9372 	struct mddev *mddev = container_of(ws, struct mddev, sync_work);
9373 	int spares = 0;
9374 
9375 	mddev_lock_nointr(mddev);
9376 
9377 	if (!md_is_rdwr(mddev)) {
9378 		/*
9379 		 * On a read-only array we can:
9380 		 * - remove failed devices
9381 		 * - add already-in_sync devices if the array itself is in-sync.
9382 		 * As we only add devices that are already in-sync, we can
9383 		 * activate the spares immediately.
9384 		 */
9385 		remove_and_add_spares(mddev, NULL);
9386 		goto not_running;
9387 	}
9388 
9389 	if (!md_choose_sync_action(mddev, &spares))
9390 		goto not_running;
9391 
9392 	if (!mddev->pers->sync_request)
9393 		goto not_running;
9394 
9395 	/*
9396 	 * We are adding a device or devices to an array which has the bitmap
9397 	 * stored on all devices. So make sure all bitmap pages get written.
9398 	 */
9399 	if (spares)
9400 		md_bitmap_write_all(mddev->bitmap);
9401 
9402 	rcu_assign_pointer(mddev->sync_thread,
9403 			   md_register_thread(md_do_sync, mddev, "resync"));
9404 	if (!mddev->sync_thread) {
9405 		pr_warn("%s: could not start resync thread...\n",
9406 			mdname(mddev));
9407 		/* leave the spares where they are, it shouldn't hurt */
9408 		goto not_running;
9409 	}
9410 
9411 	mddev_unlock(mddev);
9412 	md_wakeup_thread(mddev->sync_thread);
9413 	sysfs_notify_dirent_safe(mddev->sysfs_action);
9414 	md_new_event();
9415 	return;
9416 
9417 not_running:
9418 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9419 	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9420 	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9421 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9422 	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9423 	mddev_unlock(mddev);
9424 
9425 	wake_up(&resync_wait);
9426 	if (test_and_clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
9427 	    mddev->sysfs_action)
9428 		sysfs_notify_dirent_safe(mddev->sysfs_action);
9429 }
9430 
9431 /*
9432  * This routine is regularly called by all per-raid-array threads to
9433  * deal with generic issues like resync and super-block update.
9434  * Raid personalities that don't have a thread (linear/raid0) do not
9435  * need this as they never do any recovery or update the superblock.
9436  *
9437  * It does not do any resync itself, but rather "forks" off other threads
9438  * to do that as needed.
9439  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9440  * "->recovery" and create a thread at ->sync_thread.
9441  * When the thread finishes it sets MD_RECOVERY_DONE
9442  * and wakeups up this thread which will reap the thread and finish up.
9443  * This thread also removes any faulty devices (with nr_pending == 0).
9444  *
9445  * The overall approach is:
9446  *  1/ if the superblock needs updating, update it.
9447  *  2/ If a recovery thread is running, don't do anything else.
9448  *  3/ If recovery has finished, clean up, possibly marking spares active.
9449  *  4/ If there are any faulty devices, remove them.
9450  *  5/ If array is degraded, try to add spares devices
9451  *  6/ If array has spares or is not in-sync, start a resync thread.
9452  */
9453 void md_check_recovery(struct mddev *mddev)
9454 {
9455 	if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9456 		/* Write superblock - thread that called mddev_suspend()
9457 		 * holds reconfig_mutex for us.
9458 		 */
9459 		set_bit(MD_UPDATING_SB, &mddev->flags);
9460 		smp_mb__after_atomic();
9461 		if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9462 			md_update_sb(mddev, 0);
9463 		clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9464 		wake_up(&mddev->sb_wait);
9465 	}
9466 
9467 	if (is_md_suspended(mddev))
9468 		return;
9469 
9470 	if (mddev->bitmap)
9471 		md_bitmap_daemon_work(mddev);
9472 
9473 	if (signal_pending(current)) {
9474 		if (mddev->pers->sync_request && !mddev->external) {
9475 			pr_debug("md: %s in immediate safe mode\n",
9476 				 mdname(mddev));
9477 			mddev->safemode = 2;
9478 		}
9479 		flush_signals(current);
9480 	}
9481 
9482 	if (!md_is_rdwr(mddev) &&
9483 	    !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9484 		return;
9485 	if ( ! (
9486 		(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
9487 		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9488 		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
9489 		(mddev->external == 0 && mddev->safemode == 1) ||
9490 		(mddev->safemode == 2
9491 		 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
9492 		))
9493 		return;
9494 
9495 	if (mddev_trylock(mddev)) {
9496 		bool try_set_sync = mddev->safemode != 0;
9497 
9498 		if (!mddev->external && mddev->safemode == 1)
9499 			mddev->safemode = 0;
9500 
9501 		if (!md_is_rdwr(mddev)) {
9502 			struct md_rdev *rdev;
9503 
9504 			if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
9505 				/* sync_work already queued. */
9506 				clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9507 				goto unlock;
9508 			}
9509 
9510 			if (!mddev->external && mddev->in_sync)
9511 				/*
9512 				 * 'Blocked' flag not needed as failed devices
9513 				 * will be recorded if array switched to read/write.
9514 				 * Leaving it set will prevent the device
9515 				 * from being removed.
9516 				 */
9517 				rdev_for_each(rdev, mddev)
9518 					clear_bit(Blocked, &rdev->flags);
9519 
9520 			/*
9521 			 * There is no thread, but we need to call
9522 			 * ->spare_active and clear saved_raid_disk
9523 			 */
9524 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9525 			md_reap_sync_thread(mddev);
9526 
9527 			/*
9528 			 * Let md_start_sync() to remove and add rdevs to the
9529 			 * array.
9530 			 */
9531 			if (md_spares_need_change(mddev)) {
9532 				set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9533 				queue_work(md_misc_wq, &mddev->sync_work);
9534 			}
9535 
9536 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9537 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9538 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9539 
9540 			goto unlock;
9541 		}
9542 
9543 		if (mddev_is_clustered(mddev)) {
9544 			struct md_rdev *rdev, *tmp;
9545 			/* kick the device if another node issued a
9546 			 * remove disk.
9547 			 */
9548 			rdev_for_each_safe(rdev, tmp, mddev) {
9549 				if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9550 						rdev->raid_disk < 0)
9551 					md_kick_rdev_from_array(rdev);
9552 			}
9553 		}
9554 
9555 		if (try_set_sync && !mddev->external && !mddev->in_sync) {
9556 			spin_lock(&mddev->lock);
9557 			set_in_sync(mddev);
9558 			spin_unlock(&mddev->lock);
9559 		}
9560 
9561 		if (mddev->sb_flags)
9562 			md_update_sb(mddev, 0);
9563 
9564 		/*
9565 		 * Never start a new sync thread if MD_RECOVERY_RUNNING is
9566 		 * still set.
9567 		 */
9568 		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
9569 			if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9570 				/* resync/recovery still happening */
9571 				clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9572 				goto unlock;
9573 			}
9574 
9575 			if (WARN_ON_ONCE(!mddev->sync_thread))
9576 				goto unlock;
9577 
9578 			md_reap_sync_thread(mddev);
9579 			goto unlock;
9580 		}
9581 
9582 		/* Set RUNNING before clearing NEEDED to avoid
9583 		 * any transients in the value of "sync_action".
9584 		 */
9585 		mddev->curr_resync_completed = 0;
9586 		spin_lock(&mddev->lock);
9587 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9588 		spin_unlock(&mddev->lock);
9589 		/* Clear some bits that don't mean anything, but
9590 		 * might be left set
9591 		 */
9592 		clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9593 		clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9594 
9595 		if (test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) &&
9596 		    !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
9597 			queue_work(md_misc_wq, &mddev->sync_work);
9598 		} else {
9599 			clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9600 			wake_up(&resync_wait);
9601 		}
9602 
9603 	unlock:
9604 		wake_up(&mddev->sb_wait);
9605 		mddev_unlock(mddev);
9606 	}
9607 }
9608 EXPORT_SYMBOL(md_check_recovery);
9609 
9610 void md_reap_sync_thread(struct mddev *mddev)
9611 {
9612 	struct md_rdev *rdev;
9613 	sector_t old_dev_sectors = mddev->dev_sectors;
9614 	bool is_reshaped = false;
9615 
9616 	/* resync has finished, collect result */
9617 	md_unregister_thread(mddev, &mddev->sync_thread);
9618 	atomic_inc(&mddev->sync_seq);
9619 
9620 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9621 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9622 	    mddev->degraded != mddev->raid_disks) {
9623 		/* success...*/
9624 		/* activate any spares */
9625 		if (mddev->pers->spare_active(mddev)) {
9626 			sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9627 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9628 		}
9629 	}
9630 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9631 	    mddev->pers->finish_reshape) {
9632 		mddev->pers->finish_reshape(mddev);
9633 		if (mddev_is_clustered(mddev))
9634 			is_reshaped = true;
9635 	}
9636 
9637 	/* If array is no-longer degraded, then any saved_raid_disk
9638 	 * information must be scrapped.
9639 	 */
9640 	if (!mddev->degraded)
9641 		rdev_for_each(rdev, mddev)
9642 			rdev->saved_raid_disk = -1;
9643 
9644 	md_update_sb(mddev, 1);
9645 	/* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
9646 	 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9647 	 * clustered raid */
9648 	if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9649 		md_cluster_ops->resync_finish(mddev);
9650 	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9651 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9652 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9653 	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9654 	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9655 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9656 	/*
9657 	 * We call md_cluster_ops->update_size here because sync_size could
9658 	 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9659 	 * so it is time to update size across cluster.
9660 	 */
9661 	if (mddev_is_clustered(mddev) && is_reshaped
9662 				      && !test_bit(MD_CLOSING, &mddev->flags))
9663 		md_cluster_ops->update_size(mddev, old_dev_sectors);
9664 	/* flag recovery needed just to double check */
9665 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9666 	sysfs_notify_dirent_safe(mddev->sysfs_completed);
9667 	sysfs_notify_dirent_safe(mddev->sysfs_action);
9668 	md_new_event();
9669 	if (mddev->event_work.func)
9670 		queue_work(md_misc_wq, &mddev->event_work);
9671 	wake_up(&resync_wait);
9672 }
9673 EXPORT_SYMBOL(md_reap_sync_thread);
9674 
9675 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
9676 {
9677 	sysfs_notify_dirent_safe(rdev->sysfs_state);
9678 	wait_event_timeout(rdev->blocked_wait,
9679 			   !test_bit(Blocked, &rdev->flags) &&
9680 			   !test_bit(BlockedBadBlocks, &rdev->flags),
9681 			   msecs_to_jiffies(5000));
9682 	rdev_dec_pending(rdev, mddev);
9683 }
9684 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9685 
9686 void md_finish_reshape(struct mddev *mddev)
9687 {
9688 	/* called be personality module when reshape completes. */
9689 	struct md_rdev *rdev;
9690 
9691 	rdev_for_each(rdev, mddev) {
9692 		if (rdev->data_offset > rdev->new_data_offset)
9693 			rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9694 		else
9695 			rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9696 		rdev->data_offset = rdev->new_data_offset;
9697 	}
9698 }
9699 EXPORT_SYMBOL(md_finish_reshape);
9700 
9701 /* Bad block management */
9702 
9703 /* Returns 1 on success, 0 on failure */
9704 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9705 		       int is_new)
9706 {
9707 	struct mddev *mddev = rdev->mddev;
9708 	int rv;
9709 	if (is_new)
9710 		s += rdev->new_data_offset;
9711 	else
9712 		s += rdev->data_offset;
9713 	rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9714 	if (rv == 0) {
9715 		/* Make sure they get written out promptly */
9716 		if (test_bit(ExternalBbl, &rdev->flags))
9717 			sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
9718 		sysfs_notify_dirent_safe(rdev->sysfs_state);
9719 		set_mask_bits(&mddev->sb_flags, 0,
9720 			      BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
9721 		md_wakeup_thread(rdev->mddev->thread);
9722 		return 1;
9723 	} else
9724 		return 0;
9725 }
9726 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9727 
9728 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9729 			 int is_new)
9730 {
9731 	int rv;
9732 	if (is_new)
9733 		s += rdev->new_data_offset;
9734 	else
9735 		s += rdev->data_offset;
9736 	rv = badblocks_clear(&rdev->badblocks, s, sectors);
9737 	if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
9738 		sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
9739 	return rv;
9740 }
9741 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9742 
9743 static int md_notify_reboot(struct notifier_block *this,
9744 			    unsigned long code, void *x)
9745 {
9746 	struct mddev *mddev, *n;
9747 	int need_delay = 0;
9748 
9749 	spin_lock(&all_mddevs_lock);
9750 	list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
9751 		if (!mddev_get(mddev))
9752 			continue;
9753 		spin_unlock(&all_mddevs_lock);
9754 		if (mddev_trylock(mddev)) {
9755 			if (mddev->pers)
9756 				__md_stop_writes(mddev);
9757 			if (mddev->persistent)
9758 				mddev->safemode = 2;
9759 			mddev_unlock(mddev);
9760 		}
9761 		need_delay = 1;
9762 		mddev_put(mddev);
9763 		spin_lock(&all_mddevs_lock);
9764 	}
9765 	spin_unlock(&all_mddevs_lock);
9766 
9767 	/*
9768 	 * certain more exotic SCSI devices are known to be
9769 	 * volatile wrt too early system reboots. While the
9770 	 * right place to handle this issue is the given
9771 	 * driver, we do want to have a safe RAID driver ...
9772 	 */
9773 	if (need_delay)
9774 		msleep(1000);
9775 
9776 	return NOTIFY_DONE;
9777 }
9778 
9779 static struct notifier_block md_notifier = {
9780 	.notifier_call	= md_notify_reboot,
9781 	.next		= NULL,
9782 	.priority	= INT_MAX, /* before any real devices */
9783 };
9784 
9785 static void md_geninit(void)
9786 {
9787 	pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
9788 
9789 	proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
9790 }
9791 
9792 static int __init md_init(void)
9793 {
9794 	int ret = -ENOMEM;
9795 
9796 	md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
9797 	if (!md_wq)
9798 		goto err_wq;
9799 
9800 	md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9801 	if (!md_misc_wq)
9802 		goto err_misc_wq;
9803 
9804 	md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
9805 				       0);
9806 	if (!md_bitmap_wq)
9807 		goto err_bitmap_wq;
9808 
9809 	ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9810 	if (ret < 0)
9811 		goto err_md;
9812 
9813 	ret = __register_blkdev(0, "mdp", md_probe);
9814 	if (ret < 0)
9815 		goto err_mdp;
9816 	mdp_major = ret;
9817 
9818 	register_reboot_notifier(&md_notifier);
9819 	raid_table_header = register_sysctl("dev/raid", raid_table);
9820 
9821 	md_geninit();
9822 	return 0;
9823 
9824 err_mdp:
9825 	unregister_blkdev(MD_MAJOR, "md");
9826 err_md:
9827 	destroy_workqueue(md_bitmap_wq);
9828 err_bitmap_wq:
9829 	destroy_workqueue(md_misc_wq);
9830 err_misc_wq:
9831 	destroy_workqueue(md_wq);
9832 err_wq:
9833 	return ret;
9834 }
9835 
9836 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9837 {
9838 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9839 	struct md_rdev *rdev2, *tmp;
9840 	int role, ret;
9841 
9842 	/*
9843 	 * If size is changed in another node then we need to
9844 	 * do resize as well.
9845 	 */
9846 	if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9847 		ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9848 		if (ret)
9849 			pr_info("md-cluster: resize failed\n");
9850 		else
9851 			md_bitmap_update_sb(mddev->bitmap);
9852 	}
9853 
9854 	/* Check for change of roles in the active devices */
9855 	rdev_for_each_safe(rdev2, tmp, mddev) {
9856 		if (test_bit(Faulty, &rdev2->flags))
9857 			continue;
9858 
9859 		/* Check if the roles changed */
9860 		role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
9861 
9862 		if (test_bit(Candidate, &rdev2->flags)) {
9863 			if (role == MD_DISK_ROLE_FAULTY) {
9864 				pr_info("md: Removing Candidate device %pg because add failed\n",
9865 					rdev2->bdev);
9866 				md_kick_rdev_from_array(rdev2);
9867 				continue;
9868 			}
9869 			else
9870 				clear_bit(Candidate, &rdev2->flags);
9871 		}
9872 
9873 		if (role != rdev2->raid_disk) {
9874 			/*
9875 			 * got activated except reshape is happening.
9876 			 */
9877 			if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
9878 			    !(le32_to_cpu(sb->feature_map) &
9879 			      MD_FEATURE_RESHAPE_ACTIVE)) {
9880 				rdev2->saved_raid_disk = role;
9881 				ret = remove_and_add_spares(mddev, rdev2);
9882 				pr_info("Activated spare: %pg\n",
9883 					rdev2->bdev);
9884 				/* wakeup mddev->thread here, so array could
9885 				 * perform resync with the new activated disk */
9886 				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9887 				md_wakeup_thread(mddev->thread);
9888 			}
9889 			/* device faulty
9890 			 * We just want to do the minimum to mark the disk
9891 			 * as faulty. The recovery is performed by the
9892 			 * one who initiated the error.
9893 			 */
9894 			if (role == MD_DISK_ROLE_FAULTY ||
9895 			    role == MD_DISK_ROLE_JOURNAL) {
9896 				md_error(mddev, rdev2);
9897 				clear_bit(Blocked, &rdev2->flags);
9898 			}
9899 		}
9900 	}
9901 
9902 	if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9903 		ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9904 		if (ret)
9905 			pr_warn("md: updating array disks failed. %d\n", ret);
9906 	}
9907 
9908 	/*
9909 	 * Since mddev->delta_disks has already updated in update_raid_disks,
9910 	 * so it is time to check reshape.
9911 	 */
9912 	if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9913 	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9914 		/*
9915 		 * reshape is happening in the remote node, we need to
9916 		 * update reshape_position and call start_reshape.
9917 		 */
9918 		mddev->reshape_position = le64_to_cpu(sb->reshape_position);
9919 		if (mddev->pers->update_reshape_pos)
9920 			mddev->pers->update_reshape_pos(mddev);
9921 		if (mddev->pers->start_reshape)
9922 			mddev->pers->start_reshape(mddev);
9923 	} else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9924 		   mddev->reshape_position != MaxSector &&
9925 		   !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9926 		/* reshape is just done in another node. */
9927 		mddev->reshape_position = MaxSector;
9928 		if (mddev->pers->update_reshape_pos)
9929 			mddev->pers->update_reshape_pos(mddev);
9930 	}
9931 
9932 	/* Finally set the event to be up to date */
9933 	mddev->events = le64_to_cpu(sb->events);
9934 }
9935 
9936 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9937 {
9938 	int err;
9939 	struct page *swapout = rdev->sb_page;
9940 	struct mdp_superblock_1 *sb;
9941 
9942 	/* Store the sb page of the rdev in the swapout temporary
9943 	 * variable in case we err in the future
9944 	 */
9945 	rdev->sb_page = NULL;
9946 	err = alloc_disk_sb(rdev);
9947 	if (err == 0) {
9948 		ClearPageUptodate(rdev->sb_page);
9949 		rdev->sb_loaded = 0;
9950 		err = super_types[mddev->major_version].
9951 			load_super(rdev, NULL, mddev->minor_version);
9952 	}
9953 	if (err < 0) {
9954 		pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9955 				__func__, __LINE__, rdev->desc_nr, err);
9956 		if (rdev->sb_page)
9957 			put_page(rdev->sb_page);
9958 		rdev->sb_page = swapout;
9959 		rdev->sb_loaded = 1;
9960 		return err;
9961 	}
9962 
9963 	sb = page_address(rdev->sb_page);
9964 	/* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9965 	 * is not set
9966 	 */
9967 
9968 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9969 		rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9970 
9971 	/* The other node finished recovery, call spare_active to set
9972 	 * device In_sync and mddev->degraded
9973 	 */
9974 	if (rdev->recovery_offset == MaxSector &&
9975 	    !test_bit(In_sync, &rdev->flags) &&
9976 	    mddev->pers->spare_active(mddev))
9977 		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9978 
9979 	put_page(swapout);
9980 	return 0;
9981 }
9982 
9983 void md_reload_sb(struct mddev *mddev, int nr)
9984 {
9985 	struct md_rdev *rdev = NULL, *iter;
9986 	int err;
9987 
9988 	/* Find the rdev */
9989 	rdev_for_each_rcu(iter, mddev) {
9990 		if (iter->desc_nr == nr) {
9991 			rdev = iter;
9992 			break;
9993 		}
9994 	}
9995 
9996 	if (!rdev) {
9997 		pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9998 		return;
9999 	}
10000 
10001 	err = read_rdev(mddev, rdev);
10002 	if (err < 0)
10003 		return;
10004 
10005 	check_sb_changes(mddev, rdev);
10006 
10007 	/* Read all rdev's to update recovery_offset */
10008 	rdev_for_each_rcu(rdev, mddev) {
10009 		if (!test_bit(Faulty, &rdev->flags))
10010 			read_rdev(mddev, rdev);
10011 	}
10012 }
10013 EXPORT_SYMBOL(md_reload_sb);
10014 
10015 #ifndef MODULE
10016 
10017 /*
10018  * Searches all registered partitions for autorun RAID arrays
10019  * at boot time.
10020  */
10021 
10022 static DEFINE_MUTEX(detected_devices_mutex);
10023 static LIST_HEAD(all_detected_devices);
10024 struct detected_devices_node {
10025 	struct list_head list;
10026 	dev_t dev;
10027 };
10028 
10029 void md_autodetect_dev(dev_t dev)
10030 {
10031 	struct detected_devices_node *node_detected_dev;
10032 
10033 	node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
10034 	if (node_detected_dev) {
10035 		node_detected_dev->dev = dev;
10036 		mutex_lock(&detected_devices_mutex);
10037 		list_add_tail(&node_detected_dev->list, &all_detected_devices);
10038 		mutex_unlock(&detected_devices_mutex);
10039 	}
10040 }
10041 
10042 void md_autostart_arrays(int part)
10043 {
10044 	struct md_rdev *rdev;
10045 	struct detected_devices_node *node_detected_dev;
10046 	dev_t dev;
10047 	int i_scanned, i_passed;
10048 
10049 	i_scanned = 0;
10050 	i_passed = 0;
10051 
10052 	pr_info("md: Autodetecting RAID arrays.\n");
10053 
10054 	mutex_lock(&detected_devices_mutex);
10055 	while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
10056 		i_scanned++;
10057 		node_detected_dev = list_entry(all_detected_devices.next,
10058 					struct detected_devices_node, list);
10059 		list_del(&node_detected_dev->list);
10060 		dev = node_detected_dev->dev;
10061 		kfree(node_detected_dev);
10062 		mutex_unlock(&detected_devices_mutex);
10063 		rdev = md_import_device(dev,0, 90);
10064 		mutex_lock(&detected_devices_mutex);
10065 		if (IS_ERR(rdev))
10066 			continue;
10067 
10068 		if (test_bit(Faulty, &rdev->flags))
10069 			continue;
10070 
10071 		set_bit(AutoDetected, &rdev->flags);
10072 		list_add(&rdev->same_set, &pending_raid_disks);
10073 		i_passed++;
10074 	}
10075 	mutex_unlock(&detected_devices_mutex);
10076 
10077 	pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
10078 
10079 	autorun_devices(part);
10080 }
10081 
10082 #endif /* !MODULE */
10083 
10084 static __exit void md_exit(void)
10085 {
10086 	struct mddev *mddev, *n;
10087 	int delay = 1;
10088 
10089 	unregister_blkdev(MD_MAJOR,"md");
10090 	unregister_blkdev(mdp_major, "mdp");
10091 	unregister_reboot_notifier(&md_notifier);
10092 	unregister_sysctl_table(raid_table_header);
10093 
10094 	/* We cannot unload the modules while some process is
10095 	 * waiting for us in select() or poll() - wake them up
10096 	 */
10097 	md_unloading = 1;
10098 	while (waitqueue_active(&md_event_waiters)) {
10099 		/* not safe to leave yet */
10100 		wake_up(&md_event_waiters);
10101 		msleep(delay);
10102 		delay += delay;
10103 	}
10104 	remove_proc_entry("mdstat", NULL);
10105 
10106 	spin_lock(&all_mddevs_lock);
10107 	list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
10108 		if (!mddev_get(mddev))
10109 			continue;
10110 		spin_unlock(&all_mddevs_lock);
10111 		export_array(mddev);
10112 		mddev->ctime = 0;
10113 		mddev->hold_active = 0;
10114 		/*
10115 		 * As the mddev is now fully clear, mddev_put will schedule
10116 		 * the mddev for destruction by a workqueue, and the
10117 		 * destroy_workqueue() below will wait for that to complete.
10118 		 */
10119 		mddev_put(mddev);
10120 		spin_lock(&all_mddevs_lock);
10121 	}
10122 	spin_unlock(&all_mddevs_lock);
10123 
10124 	destroy_workqueue(md_misc_wq);
10125 	destroy_workqueue(md_bitmap_wq);
10126 	destroy_workqueue(md_wq);
10127 }
10128 
10129 subsys_initcall(md_init);
10130 module_exit(md_exit)
10131 
10132 static int get_ro(char *buffer, const struct kernel_param *kp)
10133 {
10134 	return sprintf(buffer, "%d\n", start_readonly);
10135 }
10136 static int set_ro(const char *val, const struct kernel_param *kp)
10137 {
10138 	return kstrtouint(val, 10, (unsigned int *)&start_readonly);
10139 }
10140 
10141 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
10142 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
10143 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
10144 module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
10145 
10146 MODULE_LICENSE("GPL");
10147 MODULE_DESCRIPTION("MD RAID framework");
10148 MODULE_ALIAS("md");
10149 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
10150