xref: /linux/drivers/md/md.c (revision f990ad67f0febc51274adb604d5bdeab0d06d024)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    md.c : Multiple Devices driver for Linux
4      Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 
6      completely rewritten, based on the MD driver code from Marc Zyngier
7 
8    Changes:
9 
10    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14    - kmod support by: Cyrus Durgin
15    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 
18    - lots of fixes and improvements to the RAID1/RAID5 and generic
19      RAID code (such as request based resynchronization):
20 
21      Neil Brown <neilb@cse.unsw.edu.au>.
22 
23    - persistent bitmap code
24      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 
26 
27    Errors, Warnings, etc.
28    Please use:
29      pr_crit() for error conditions that risk data loss
30      pr_err() for error conditions that are unexpected, like an IO error
31          or internal inconsistency
32      pr_warn() for error conditions that could have been predicated, like
33          adding a device to an array when it has incompatible metadata
34      pr_info() for every interesting, very rare events, like an array starting
35          or stopping, or resync starting or stopping
36      pr_debug() for everything else.
37 
38 */
39 
40 #include <linux/sched/mm.h>
41 #include <linux/sched/signal.h>
42 #include <linux/kthread.h>
43 #include <linux/blkdev.h>
44 #include <linux/blk-integrity.h>
45 #include <linux/badblocks.h>
46 #include <linux/sysctl.h>
47 #include <linux/seq_file.h>
48 #include <linux/fs.h>
49 #include <linux/poll.h>
50 #include <linux/ctype.h>
51 #include <linux/string.h>
52 #include <linux/hdreg.h>
53 #include <linux/proc_fs.h>
54 #include <linux/random.h>
55 #include <linux/major.h>
56 #include <linux/module.h>
57 #include <linux/reboot.h>
58 #include <linux/file.h>
59 #include <linux/compat.h>
60 #include <linux/delay.h>
61 #include <linux/raid/md_p.h>
62 #include <linux/raid/md_u.h>
63 #include <linux/raid/detect.h>
64 #include <linux/slab.h>
65 #include <linux/percpu-refcount.h>
66 #include <linux/part_stat.h>
67 
68 #include "md.h"
69 #include "md-bitmap.h"
70 #include "md-cluster.h"
71 
72 static const char *action_name[NR_SYNC_ACTIONS] = {
73 	[ACTION_RESYNC]		= "resync",
74 	[ACTION_RECOVER]	= "recover",
75 	[ACTION_CHECK]		= "check",
76 	[ACTION_REPAIR]		= "repair",
77 	[ACTION_RESHAPE]	= "reshape",
78 	[ACTION_FROZEN]		= "frozen",
79 	[ACTION_IDLE]		= "idle",
80 };
81 
82 static DEFINE_XARRAY(md_submodule);
83 
84 static const struct kobj_type md_ktype;
85 
86 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
87 static struct workqueue_struct *md_wq;
88 
89 /*
90  * This workqueue is used for sync_work to register new sync_thread, and for
91  * del_work to remove rdev, and for event_work that is only set by dm-raid.
92  *
93  * Noted that sync_work will grab reconfig_mutex, hence never flush this
94  * workqueue whith reconfig_mutex grabbed.
95  */
96 static struct workqueue_struct *md_misc_wq;
97 
98 static int remove_and_add_spares(struct mddev *mddev,
99 				 struct md_rdev *this);
100 static void mddev_detach(struct mddev *mddev);
101 static void export_rdev(struct md_rdev *rdev);
102 static void md_wakeup_thread_directly(struct md_thread __rcu **thread);
103 
104 /*
105  * Default number of read corrections we'll attempt on an rdev
106  * before ejecting it from the array. We divide the read error
107  * count by 2 for every hour elapsed between read errors.
108  */
109 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
110 /* Default safemode delay: 200 msec */
111 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
112 /*
113  * Current RAID-1,4,5,6,10 parallel reconstruction 'guaranteed speed limit'
114  * is sysctl_speed_limit_min, 1000 KB/sec by default, so the extra system load
115  * does not show up that much. Increase it if you want to have more guaranteed
116  * speed. Note that the RAID driver will use the maximum bandwidth
117  * sysctl_speed_limit_max, 200 MB/sec by default, if the IO subsystem is idle.
118  *
119  * Background sync IO speed control:
120  *
121  * - below speed min:
122  *   no limit;
123  * - above speed min and below speed max:
124  *   a) if mddev is idle, then no limit;
125  *   b) if mddev is busy handling normal IO, then limit inflight sync IO
126  *   to sync_io_depth;
127  * - above speed max:
128  *   sync IO can't be issued;
129  *
130  * Following configurations can be changed via /proc/sys/dev/raid/ for system
131  * or /sys/block/mdX/md/ for one array.
132  */
133 static int sysctl_speed_limit_min = 1000;
134 static int sysctl_speed_limit_max = 200000;
135 static int sysctl_sync_io_depth = 32;
136 
137 static int speed_min(struct mddev *mddev)
138 {
139 	return mddev->sync_speed_min ?
140 		mddev->sync_speed_min : sysctl_speed_limit_min;
141 }
142 
143 static int speed_max(struct mddev *mddev)
144 {
145 	return mddev->sync_speed_max ?
146 		mddev->sync_speed_max : sysctl_speed_limit_max;
147 }
148 
149 static int sync_io_depth(struct mddev *mddev)
150 {
151 	return mddev->sync_io_depth ?
152 		mddev->sync_io_depth : sysctl_sync_io_depth;
153 }
154 
155 static void rdev_uninit_serial(struct md_rdev *rdev)
156 {
157 	if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
158 		return;
159 
160 	kvfree(rdev->serial);
161 	rdev->serial = NULL;
162 }
163 
164 static void rdevs_uninit_serial(struct mddev *mddev)
165 {
166 	struct md_rdev *rdev;
167 
168 	rdev_for_each(rdev, mddev)
169 		rdev_uninit_serial(rdev);
170 }
171 
172 static int rdev_init_serial(struct md_rdev *rdev)
173 {
174 	/* serial_nums equals with BARRIER_BUCKETS_NR */
175 	int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
176 	struct serial_in_rdev *serial = NULL;
177 
178 	if (test_bit(CollisionCheck, &rdev->flags))
179 		return 0;
180 
181 	serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
182 			  GFP_KERNEL);
183 	if (!serial)
184 		return -ENOMEM;
185 
186 	for (i = 0; i < serial_nums; i++) {
187 		struct serial_in_rdev *serial_tmp = &serial[i];
188 
189 		spin_lock_init(&serial_tmp->serial_lock);
190 		serial_tmp->serial_rb = RB_ROOT_CACHED;
191 		init_waitqueue_head(&serial_tmp->serial_io_wait);
192 	}
193 
194 	rdev->serial = serial;
195 	set_bit(CollisionCheck, &rdev->flags);
196 
197 	return 0;
198 }
199 
200 static int rdevs_init_serial(struct mddev *mddev)
201 {
202 	struct md_rdev *rdev;
203 	int ret = 0;
204 
205 	rdev_for_each(rdev, mddev) {
206 		ret = rdev_init_serial(rdev);
207 		if (ret)
208 			break;
209 	}
210 
211 	/* Free all resources if pool is not existed */
212 	if (ret && !mddev->serial_info_pool)
213 		rdevs_uninit_serial(mddev);
214 
215 	return ret;
216 }
217 
218 /*
219  * rdev needs to enable serial stuffs if it meets the conditions:
220  * 1. it is multi-queue device flaged with writemostly.
221  * 2. the write-behind mode is enabled.
222  */
223 static int rdev_need_serial(struct md_rdev *rdev)
224 {
225 	return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
226 		rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
227 		test_bit(WriteMostly, &rdev->flags));
228 }
229 
230 /*
231  * Init resource for rdev(s), then create serial_info_pool if:
232  * 1. rdev is the first device which return true from rdev_enable_serial.
233  * 2. rdev is NULL, means we want to enable serialization for all rdevs.
234  */
235 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev)
236 {
237 	int ret = 0;
238 
239 	if (rdev && !rdev_need_serial(rdev) &&
240 	    !test_bit(CollisionCheck, &rdev->flags))
241 		return;
242 
243 	if (!rdev)
244 		ret = rdevs_init_serial(mddev);
245 	else
246 		ret = rdev_init_serial(rdev);
247 	if (ret)
248 		return;
249 
250 	if (mddev->serial_info_pool == NULL) {
251 		/*
252 		 * already in memalloc noio context by
253 		 * mddev_suspend()
254 		 */
255 		mddev->serial_info_pool =
256 			mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
257 						sizeof(struct serial_info));
258 		if (!mddev->serial_info_pool) {
259 			rdevs_uninit_serial(mddev);
260 			pr_err("can't alloc memory pool for serialization\n");
261 		}
262 	}
263 }
264 
265 /*
266  * Free resource from rdev(s), and destroy serial_info_pool under conditions:
267  * 1. rdev is the last device flaged with CollisionCheck.
268  * 2. when bitmap is destroyed while policy is not enabled.
269  * 3. for disable policy, the pool is destroyed only when no rdev needs it.
270  */
271 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev)
272 {
273 	if (rdev && !test_bit(CollisionCheck, &rdev->flags))
274 		return;
275 
276 	if (mddev->serial_info_pool) {
277 		struct md_rdev *temp;
278 		int num = 0; /* used to track if other rdevs need the pool */
279 
280 		rdev_for_each(temp, mddev) {
281 			if (!rdev) {
282 				if (!test_bit(MD_SERIALIZE_POLICY,
283 					      &mddev->flags) ||
284 				    !rdev_need_serial(temp))
285 					rdev_uninit_serial(temp);
286 				else
287 					num++;
288 			} else if (temp != rdev &&
289 				   test_bit(CollisionCheck, &temp->flags))
290 				num++;
291 		}
292 
293 		if (rdev)
294 			rdev_uninit_serial(rdev);
295 
296 		if (num)
297 			pr_info("The mempool could be used by other devices\n");
298 		else {
299 			mempool_destroy(mddev->serial_info_pool);
300 			mddev->serial_info_pool = NULL;
301 		}
302 	}
303 }
304 
305 static struct ctl_table_header *raid_table_header;
306 
307 static const struct ctl_table raid_table[] = {
308 	{
309 		.procname	= "speed_limit_min",
310 		.data		= &sysctl_speed_limit_min,
311 		.maxlen		= sizeof(int),
312 		.mode		= 0644,
313 		.proc_handler	= proc_dointvec,
314 	},
315 	{
316 		.procname	= "speed_limit_max",
317 		.data		= &sysctl_speed_limit_max,
318 		.maxlen		= sizeof(int),
319 		.mode		= 0644,
320 		.proc_handler	= proc_dointvec,
321 	},
322 	{
323 		.procname	= "sync_io_depth",
324 		.data		= &sysctl_sync_io_depth,
325 		.maxlen		= sizeof(int),
326 		.mode		= 0644,
327 		.proc_handler	= proc_dointvec,
328 	},
329 };
330 
331 static int start_readonly;
332 
333 /*
334  * The original mechanism for creating an md device is to create
335  * a device node in /dev and to open it.  This causes races with device-close.
336  * The preferred method is to write to the "new_array" module parameter.
337  * This can avoid races.
338  * Setting create_on_open to false disables the original mechanism
339  * so all the races disappear.
340  */
341 static bool create_on_open = true;
342 static bool legacy_async_del_gendisk = true;
343 static bool check_new_feature = true;
344 
345 /*
346  * We have a system wide 'event count' that is incremented
347  * on any 'interesting' event, and readers of /proc/mdstat
348  * can use 'poll' or 'select' to find out when the event
349  * count increases.
350  *
351  * Events are:
352  *  start array, stop array, error, add device, remove device,
353  *  start build, activate spare
354  */
355 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
356 static atomic_t md_event_count;
357 void md_new_event(void)
358 {
359 	atomic_inc(&md_event_count);
360 	wake_up(&md_event_waiters);
361 }
362 EXPORT_SYMBOL_GPL(md_new_event);
363 
364 /*
365  * Enables to iterate over all existing md arrays
366  * all_mddevs_lock protects this list.
367  */
368 static LIST_HEAD(all_mddevs);
369 static DEFINE_SPINLOCK(all_mddevs_lock);
370 
371 static bool is_md_suspended(struct mddev *mddev)
372 {
373 	return percpu_ref_is_dying(&mddev->active_io);
374 }
375 /* Rather than calling directly into the personality make_request function,
376  * IO requests come here first so that we can check if the device is
377  * being suspended pending a reconfiguration.
378  * We hold a refcount over the call to ->make_request.  By the time that
379  * call has finished, the bio has been linked into some internal structure
380  * and so is visible to ->quiesce(), so we don't need the refcount any more.
381  */
382 static bool is_suspended(struct mddev *mddev, struct bio *bio)
383 {
384 	if (is_md_suspended(mddev))
385 		return true;
386 	if (bio_data_dir(bio) != WRITE)
387 		return false;
388 	if (READ_ONCE(mddev->suspend_lo) >= READ_ONCE(mddev->suspend_hi))
389 		return false;
390 	if (bio->bi_iter.bi_sector >= READ_ONCE(mddev->suspend_hi))
391 		return false;
392 	if (bio_end_sector(bio) < READ_ONCE(mddev->suspend_lo))
393 		return false;
394 	return true;
395 }
396 
397 bool md_handle_request(struct mddev *mddev, struct bio *bio)
398 {
399 check_suspended:
400 	if (is_suspended(mddev, bio)) {
401 		DEFINE_WAIT(__wait);
402 		/* Bail out if REQ_NOWAIT is set for the bio */
403 		if (bio->bi_opf & REQ_NOWAIT) {
404 			bio_wouldblock_error(bio);
405 			return true;
406 		}
407 		for (;;) {
408 			prepare_to_wait(&mddev->sb_wait, &__wait,
409 					TASK_UNINTERRUPTIBLE);
410 			if (!is_suspended(mddev, bio))
411 				break;
412 			schedule();
413 		}
414 		finish_wait(&mddev->sb_wait, &__wait);
415 	}
416 	if (!percpu_ref_tryget_live(&mddev->active_io))
417 		goto check_suspended;
418 
419 	if (!mddev->pers->make_request(mddev, bio)) {
420 		percpu_ref_put(&mddev->active_io);
421 		if (!mddev->gendisk && mddev->pers->prepare_suspend)
422 			return false;
423 		goto check_suspended;
424 	}
425 
426 	percpu_ref_put(&mddev->active_io);
427 	return true;
428 }
429 EXPORT_SYMBOL(md_handle_request);
430 
431 static void md_submit_bio(struct bio *bio)
432 {
433 	const int rw = bio_data_dir(bio);
434 	struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
435 
436 	if (mddev == NULL || mddev->pers == NULL) {
437 		bio_io_error(bio);
438 		return;
439 	}
440 
441 	if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
442 		bio_io_error(bio);
443 		return;
444 	}
445 
446 	bio = bio_split_to_limits(bio);
447 	if (!bio)
448 		return;
449 
450 	if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
451 		if (bio_sectors(bio) != 0)
452 			bio->bi_status = BLK_STS_IOERR;
453 		bio_endio(bio);
454 		return;
455 	}
456 
457 	/* bio could be mergeable after passing to underlayer */
458 	bio->bi_opf &= ~REQ_NOMERGE;
459 
460 	md_handle_request(mddev, bio);
461 }
462 
463 /*
464  * Make sure no new requests are submitted to the device, and any requests that
465  * have been submitted are completely handled.
466  */
467 int mddev_suspend(struct mddev *mddev, bool interruptible)
468 {
469 	int err = 0;
470 
471 	/*
472 	 * hold reconfig_mutex to wait for normal io will deadlock, because
473 	 * other context can't update super_block, and normal io can rely on
474 	 * updating super_block.
475 	 */
476 	lockdep_assert_not_held(&mddev->reconfig_mutex);
477 
478 	if (interruptible)
479 		err = mutex_lock_interruptible(&mddev->suspend_mutex);
480 	else
481 		mutex_lock(&mddev->suspend_mutex);
482 	if (err)
483 		return err;
484 
485 	if (mddev->suspended) {
486 		WRITE_ONCE(mddev->suspended, mddev->suspended + 1);
487 		mutex_unlock(&mddev->suspend_mutex);
488 		return 0;
489 	}
490 
491 	percpu_ref_kill(&mddev->active_io);
492 	if (interruptible)
493 		err = wait_event_interruptible(mddev->sb_wait,
494 				percpu_ref_is_zero(&mddev->active_io));
495 	else
496 		wait_event(mddev->sb_wait,
497 				percpu_ref_is_zero(&mddev->active_io));
498 	if (err) {
499 		percpu_ref_resurrect(&mddev->active_io);
500 		mutex_unlock(&mddev->suspend_mutex);
501 		return err;
502 	}
503 
504 	/*
505 	 * For raid456, io might be waiting for reshape to make progress,
506 	 * allow new reshape to start while waiting for io to be done to
507 	 * prevent deadlock.
508 	 */
509 	WRITE_ONCE(mddev->suspended, mddev->suspended + 1);
510 
511 	/* restrict memory reclaim I/O during raid array is suspend */
512 	mddev->noio_flag = memalloc_noio_save();
513 
514 	mutex_unlock(&mddev->suspend_mutex);
515 	return 0;
516 }
517 EXPORT_SYMBOL_GPL(mddev_suspend);
518 
519 static void __mddev_resume(struct mddev *mddev, bool recovery_needed)
520 {
521 	lockdep_assert_not_held(&mddev->reconfig_mutex);
522 
523 	mutex_lock(&mddev->suspend_mutex);
524 	WRITE_ONCE(mddev->suspended, mddev->suspended - 1);
525 	if (mddev->suspended) {
526 		mutex_unlock(&mddev->suspend_mutex);
527 		return;
528 	}
529 
530 	/* entred the memalloc scope from mddev_suspend() */
531 	memalloc_noio_restore(mddev->noio_flag);
532 
533 	percpu_ref_resurrect(&mddev->active_io);
534 	wake_up(&mddev->sb_wait);
535 
536 	if (recovery_needed)
537 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
538 	md_wakeup_thread(mddev->thread);
539 	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
540 
541 	mutex_unlock(&mddev->suspend_mutex);
542 }
543 
544 void mddev_resume(struct mddev *mddev)
545 {
546 	return __mddev_resume(mddev, true);
547 }
548 EXPORT_SYMBOL_GPL(mddev_resume);
549 
550 /* sync bdev before setting device to readonly or stopping raid*/
551 static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_num)
552 {
553 	mutex_lock(&mddev->open_mutex);
554 	if (mddev->pers && atomic_read(&mddev->openers) > opener_num) {
555 		mutex_unlock(&mddev->open_mutex);
556 		return -EBUSY;
557 	}
558 	if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
559 		mutex_unlock(&mddev->open_mutex);
560 		return -EBUSY;
561 	}
562 	mutex_unlock(&mddev->open_mutex);
563 
564 	sync_blockdev(mddev->gendisk->part0);
565 	return 0;
566 }
567 
568 /*
569  * The only difference from bio_chain_endio() is that the current
570  * bi_status of bio does not affect the bi_status of parent.
571  */
572 static void md_end_flush(struct bio *bio)
573 {
574 	struct bio *parent = bio->bi_private;
575 
576 	/*
577 	 * If any flush io error before the power failure,
578 	 * disk data may be lost.
579 	 */
580 	if (bio->bi_status)
581 		pr_err("md: %pg flush io error %d\n", bio->bi_bdev,
582 			blk_status_to_errno(bio->bi_status));
583 
584 	bio_put(bio);
585 	bio_endio(parent);
586 }
587 
588 bool md_flush_request(struct mddev *mddev, struct bio *bio)
589 {
590 	struct md_rdev *rdev;
591 	struct bio *new;
592 
593 	/*
594 	 * md_flush_reqeust() should be called under md_handle_request() and
595 	 * 'active_io' is already grabbed. Hence it's safe to get rdev directly
596 	 * without rcu protection.
597 	 */
598 	WARN_ON(percpu_ref_is_zero(&mddev->active_io));
599 
600 	rdev_for_each(rdev, mddev) {
601 		if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags))
602 			continue;
603 
604 		new = bio_alloc_bioset(rdev->bdev, 0,
605 				       REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO,
606 				       &mddev->bio_set);
607 		new->bi_private = bio;
608 		new->bi_end_io = md_end_flush;
609 		bio_inc_remaining(bio);
610 		submit_bio(new);
611 	}
612 
613 	if (bio_sectors(bio) == 0) {
614 		bio_endio(bio);
615 		return true;
616 	}
617 
618 	bio->bi_opf &= ~REQ_PREFLUSH;
619 	return false;
620 }
621 EXPORT_SYMBOL(md_flush_request);
622 
623 static inline struct mddev *mddev_get(struct mddev *mddev)
624 {
625 	lockdep_assert_held(&all_mddevs_lock);
626 
627 	if (test_bit(MD_DELETED, &mddev->flags))
628 		return NULL;
629 	atomic_inc(&mddev->active);
630 	return mddev;
631 }
632 
633 static void mddev_delayed_delete(struct work_struct *ws);
634 
635 static void __mddev_put(struct mddev *mddev)
636 {
637 	if (mddev->raid_disks || !list_empty(&mddev->disks) ||
638 	    mddev->ctime || mddev->hold_active)
639 		return;
640 
641 	/*
642 	 * If array is freed by stopping array, MD_DELETED is set by
643 	 * do_md_stop(), MD_DELETED is still set here in case mddev is freed
644 	 * directly by closing a mddev that is created by create_on_open.
645 	 */
646 	set_bit(MD_DELETED, &mddev->flags);
647 	/*
648 	 * Call queue_work inside the spinlock so that flush_workqueue() after
649 	 * mddev_find will succeed in waiting for the work to be done.
650 	 */
651 	queue_work(md_misc_wq, &mddev->del_work);
652 }
653 
654 static void mddev_put_locked(struct mddev *mddev)
655 {
656 	if (atomic_dec_and_test(&mddev->active))
657 		__mddev_put(mddev);
658 }
659 
660 void mddev_put(struct mddev *mddev)
661 {
662 	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
663 		return;
664 
665 	__mddev_put(mddev);
666 	spin_unlock(&all_mddevs_lock);
667 }
668 
669 static void md_safemode_timeout(struct timer_list *t);
670 static void md_start_sync(struct work_struct *ws);
671 
672 static void active_io_release(struct percpu_ref *ref)
673 {
674 	struct mddev *mddev = container_of(ref, struct mddev, active_io);
675 
676 	wake_up(&mddev->sb_wait);
677 }
678 
679 static void no_op(struct percpu_ref *r) {}
680 
681 static bool mddev_set_bitmap_ops(struct mddev *mddev)
682 {
683 	struct bitmap_operations *old = mddev->bitmap_ops;
684 	struct md_submodule_head *head;
685 
686 	if (mddev->bitmap_id == ID_BITMAP_NONE ||
687 	    (old && old->head.id == mddev->bitmap_id))
688 		return true;
689 
690 	xa_lock(&md_submodule);
691 	head = xa_load(&md_submodule, mddev->bitmap_id);
692 
693 	if (!head) {
694 		pr_warn("md: can't find bitmap id %d\n", mddev->bitmap_id);
695 		goto err;
696 	}
697 
698 	if (head->type != MD_BITMAP) {
699 		pr_warn("md: invalid bitmap id %d\n", mddev->bitmap_id);
700 		goto err;
701 	}
702 
703 	mddev->bitmap_ops = (void *)head;
704 	xa_unlock(&md_submodule);
705 
706 	if (!mddev_is_dm(mddev) && mddev->bitmap_ops->group) {
707 		if (sysfs_create_group(&mddev->kobj, mddev->bitmap_ops->group))
708 			pr_warn("md: cannot register extra bitmap attributes for %s\n",
709 				mdname(mddev));
710 		else
711 			/*
712 			 * Inform user with KOBJ_CHANGE about new bitmap
713 			 * attributes.
714 			 */
715 			kobject_uevent(&mddev->kobj, KOBJ_CHANGE);
716 	}
717 	return true;
718 
719 err:
720 	xa_unlock(&md_submodule);
721 	return false;
722 }
723 
724 static void mddev_clear_bitmap_ops(struct mddev *mddev)
725 {
726 	if (!mddev_is_dm(mddev) && mddev->bitmap_ops &&
727 	    mddev->bitmap_ops->group)
728 		sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->group);
729 
730 	mddev->bitmap_ops = NULL;
731 }
732 
733 int mddev_init(struct mddev *mddev)
734 {
735 	int err = 0;
736 
737 	if (!IS_ENABLED(CONFIG_MD_BITMAP))
738 		mddev->bitmap_id = ID_BITMAP_NONE;
739 	else
740 		mddev->bitmap_id = ID_BITMAP;
741 
742 	if (percpu_ref_init(&mddev->active_io, active_io_release,
743 			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
744 		return -ENOMEM;
745 
746 	if (percpu_ref_init(&mddev->writes_pending, no_op,
747 			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
748 		err = -ENOMEM;
749 		goto exit_acitve_io;
750 	}
751 
752 	err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
753 	if (err)
754 		goto exit_writes_pending;
755 
756 	err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
757 	if (err)
758 		goto exit_bio_set;
759 
760 	err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE,
761 			  offsetof(struct md_io_clone, bio_clone), 0);
762 	if (err)
763 		goto exit_sync_set;
764 
765 	/* We want to start with the refcount at zero */
766 	percpu_ref_put(&mddev->writes_pending);
767 
768 	mutex_init(&mddev->open_mutex);
769 	mutex_init(&mddev->reconfig_mutex);
770 	mutex_init(&mddev->suspend_mutex);
771 	mutex_init(&mddev->bitmap_info.mutex);
772 	INIT_LIST_HEAD(&mddev->disks);
773 	INIT_LIST_HEAD(&mddev->all_mddevs);
774 	INIT_LIST_HEAD(&mddev->deleting);
775 	timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
776 	atomic_set(&mddev->active, 1);
777 	atomic_set(&mddev->openers, 0);
778 	atomic_set(&mddev->sync_seq, 0);
779 	spin_lock_init(&mddev->lock);
780 	init_waitqueue_head(&mddev->sb_wait);
781 	init_waitqueue_head(&mddev->recovery_wait);
782 	mddev->reshape_position = MaxSector;
783 	mddev->reshape_backwards = 0;
784 	mddev->last_sync_action = ACTION_IDLE;
785 	mddev->resync_min = 0;
786 	mddev->resync_max = MaxSector;
787 	mddev->level = LEVEL_NONE;
788 
789 	INIT_WORK(&mddev->sync_work, md_start_sync);
790 	INIT_WORK(&mddev->del_work, mddev_delayed_delete);
791 
792 	return 0;
793 
794 exit_sync_set:
795 	bioset_exit(&mddev->sync_set);
796 exit_bio_set:
797 	bioset_exit(&mddev->bio_set);
798 exit_writes_pending:
799 	percpu_ref_exit(&mddev->writes_pending);
800 exit_acitve_io:
801 	percpu_ref_exit(&mddev->active_io);
802 	return err;
803 }
804 EXPORT_SYMBOL_GPL(mddev_init);
805 
806 void mddev_destroy(struct mddev *mddev)
807 {
808 	bioset_exit(&mddev->bio_set);
809 	bioset_exit(&mddev->sync_set);
810 	bioset_exit(&mddev->io_clone_set);
811 	percpu_ref_exit(&mddev->active_io);
812 	percpu_ref_exit(&mddev->writes_pending);
813 }
814 EXPORT_SYMBOL_GPL(mddev_destroy);
815 
816 static struct mddev *mddev_find_locked(dev_t unit)
817 {
818 	struct mddev *mddev;
819 
820 	list_for_each_entry(mddev, &all_mddevs, all_mddevs)
821 		if (mddev->unit == unit)
822 			return mddev;
823 
824 	return NULL;
825 }
826 
827 /* find an unused unit number */
828 static dev_t mddev_alloc_unit(void)
829 {
830 	static int next_minor = 512;
831 	int start = next_minor;
832 	bool is_free = 0;
833 	dev_t dev = 0;
834 
835 	while (!is_free) {
836 		dev = MKDEV(MD_MAJOR, next_minor);
837 		next_minor++;
838 		if (next_minor > MINORMASK)
839 			next_minor = 0;
840 		if (next_minor == start)
841 			return 0;		/* Oh dear, all in use. */
842 		is_free = !mddev_find_locked(dev);
843 	}
844 
845 	return dev;
846 }
847 
848 static struct mddev *mddev_alloc(dev_t unit)
849 {
850 	struct mddev *new;
851 	int error;
852 
853 	if (unit && MAJOR(unit) != MD_MAJOR)
854 		unit &= ~((1 << MdpMinorShift) - 1);
855 
856 	new = kzalloc_obj(*new);
857 	if (!new)
858 		return ERR_PTR(-ENOMEM);
859 
860 	error = mddev_init(new);
861 	if (error)
862 		goto out_free_new;
863 
864 	spin_lock(&all_mddevs_lock);
865 	if (unit) {
866 		error = -EEXIST;
867 		if (mddev_find_locked(unit))
868 			goto out_destroy_new;
869 		new->unit = unit;
870 		if (MAJOR(unit) == MD_MAJOR)
871 			new->md_minor = MINOR(unit);
872 		else
873 			new->md_minor = MINOR(unit) >> MdpMinorShift;
874 		new->hold_active = UNTIL_IOCTL;
875 	} else {
876 		error = -ENODEV;
877 		new->unit = mddev_alloc_unit();
878 		if (!new->unit)
879 			goto out_destroy_new;
880 		new->md_minor = MINOR(new->unit);
881 		new->hold_active = UNTIL_STOP;
882 	}
883 
884 	list_add(&new->all_mddevs, &all_mddevs);
885 	spin_unlock(&all_mddevs_lock);
886 	return new;
887 
888 out_destroy_new:
889 	spin_unlock(&all_mddevs_lock);
890 	mddev_destroy(new);
891 out_free_new:
892 	kfree(new);
893 	return ERR_PTR(error);
894 }
895 
896 static void mddev_free(struct mddev *mddev)
897 {
898 	spin_lock(&all_mddevs_lock);
899 	list_del(&mddev->all_mddevs);
900 	spin_unlock(&all_mddevs_lock);
901 
902 	mddev_destroy(mddev);
903 	kfree(mddev);
904 }
905 
906 static const struct attribute_group md_redundancy_group;
907 
908 void mddev_unlock(struct mddev *mddev)
909 {
910 	struct md_rdev *rdev;
911 	struct md_rdev *tmp;
912 	LIST_HEAD(delete);
913 
914 	if (!list_empty(&mddev->deleting))
915 		list_splice_init(&mddev->deleting, &delete);
916 
917 	if (mddev->to_remove) {
918 		/* These cannot be removed under reconfig_mutex as
919 		 * an access to the files will try to take reconfig_mutex
920 		 * while holding the file unremovable, which leads to
921 		 * a deadlock.
922 		 * So hold set sysfs_active while the remove in happeing,
923 		 * and anything else which might set ->to_remove or my
924 		 * otherwise change the sysfs namespace will fail with
925 		 * -EBUSY if sysfs_active is still set.
926 		 * We set sysfs_active under reconfig_mutex and elsewhere
927 		 * test it under the same mutex to ensure its correct value
928 		 * is seen.
929 		 */
930 		const struct attribute_group *to_remove = mddev->to_remove;
931 		mddev->to_remove = NULL;
932 		mddev->sysfs_active = 1;
933 		mutex_unlock(&mddev->reconfig_mutex);
934 
935 		if (mddev->kobj.sd) {
936 			if (to_remove != &md_redundancy_group)
937 				sysfs_remove_group(&mddev->kobj, to_remove);
938 			if (mddev->pers == NULL ||
939 			    mddev->pers->sync_request == NULL) {
940 				sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
941 				if (mddev->sysfs_action)
942 					sysfs_put(mddev->sysfs_action);
943 				if (mddev->sysfs_completed)
944 					sysfs_put(mddev->sysfs_completed);
945 				if (mddev->sysfs_degraded)
946 					sysfs_put(mddev->sysfs_degraded);
947 				mddev->sysfs_action = NULL;
948 				mddev->sysfs_completed = NULL;
949 				mddev->sysfs_degraded = NULL;
950 			}
951 		}
952 		mddev->sysfs_active = 0;
953 	} else
954 		mutex_unlock(&mddev->reconfig_mutex);
955 
956 	md_wakeup_thread(mddev->thread);
957 	wake_up(&mddev->sb_wait);
958 
959 	list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
960 		list_del_init(&rdev->same_set);
961 		kobject_del(&rdev->kobj);
962 		export_rdev(rdev);
963 	}
964 
965 	if (!legacy_async_del_gendisk) {
966 		/*
967 		 * Call del_gendisk after release reconfig_mutex to avoid
968 		 * deadlock (e.g. call del_gendisk under the lock and an
969 		 * access to sysfs files waits the lock)
970 		 * And MD_DELETED is only used for md raid which is set in
971 		 * do_md_stop. dm raid only uses md_stop to stop. So dm raid
972 		 * doesn't need to check MD_DELETED when getting reconfig lock
973 		 */
974 		if (test_bit(MD_DELETED, &mddev->flags) &&
975 		    !test_and_set_bit(MD_DO_DELETE, &mddev->flags)) {
976 			kobject_del(&mddev->kobj);
977 			del_gendisk(mddev->gendisk);
978 		}
979 	}
980 }
981 EXPORT_SYMBOL_GPL(mddev_unlock);
982 
983 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
984 {
985 	struct md_rdev *rdev;
986 
987 	rdev_for_each_rcu(rdev, mddev)
988 		if (rdev->desc_nr == nr)
989 			return rdev;
990 
991 	return NULL;
992 }
993 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
994 
995 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
996 {
997 	struct md_rdev *rdev;
998 
999 	rdev_for_each(rdev, mddev)
1000 		if (rdev->bdev->bd_dev == dev)
1001 			return rdev;
1002 
1003 	return NULL;
1004 }
1005 
1006 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
1007 {
1008 	struct md_rdev *rdev;
1009 
1010 	rdev_for_each_rcu(rdev, mddev)
1011 		if (rdev->bdev->bd_dev == dev)
1012 			return rdev;
1013 
1014 	return NULL;
1015 }
1016 EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
1017 
1018 static struct md_personality *get_pers(int level, char *clevel)
1019 {
1020 	struct md_personality *ret = NULL;
1021 	struct md_submodule_head *head;
1022 	unsigned long i;
1023 
1024 	xa_lock(&md_submodule);
1025 	xa_for_each(&md_submodule, i, head) {
1026 		if (head->type != MD_PERSONALITY)
1027 			continue;
1028 		if ((level != LEVEL_NONE && head->id == level) ||
1029 		    !strcmp(head->name, clevel)) {
1030 			if (try_module_get(head->owner))
1031 				ret = (void *)head;
1032 			break;
1033 		}
1034 	}
1035 	xa_unlock(&md_submodule);
1036 
1037 	if (!ret) {
1038 		if (level != LEVEL_NONE)
1039 			pr_warn("md: personality for level %d is not loaded!\n",
1040 				level);
1041 		else
1042 			pr_warn("md: personality for level %s is not loaded!\n",
1043 				clevel);
1044 	}
1045 
1046 	return ret;
1047 }
1048 
1049 static void put_pers(struct md_personality *pers)
1050 {
1051 	module_put(pers->head.owner);
1052 }
1053 
1054 /* return the offset of the super block in 512byte sectors */
1055 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
1056 {
1057 	return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
1058 }
1059 
1060 static int alloc_disk_sb(struct md_rdev *rdev)
1061 {
1062 	rdev->sb_page = alloc_page(GFP_KERNEL);
1063 	if (!rdev->sb_page)
1064 		return -ENOMEM;
1065 	return 0;
1066 }
1067 
1068 void md_rdev_clear(struct md_rdev *rdev)
1069 {
1070 	if (rdev->sb_page) {
1071 		put_page(rdev->sb_page);
1072 		rdev->sb_loaded = 0;
1073 		rdev->sb_page = NULL;
1074 		rdev->sb_start = 0;
1075 		rdev->sectors = 0;
1076 	}
1077 	if (rdev->bb_page) {
1078 		put_page(rdev->bb_page);
1079 		rdev->bb_page = NULL;
1080 	}
1081 	badblocks_exit(&rdev->badblocks);
1082 }
1083 EXPORT_SYMBOL_GPL(md_rdev_clear);
1084 
1085 static void super_written(struct bio *bio)
1086 {
1087 	struct md_rdev *rdev = bio->bi_private;
1088 	struct mddev *mddev = rdev->mddev;
1089 
1090 	if (bio->bi_status) {
1091 		pr_err("md: %s gets error=%d\n", __func__,
1092 		       blk_status_to_errno(bio->bi_status));
1093 		md_error(mddev, rdev);
1094 		if (!test_bit(Faulty, &rdev->flags)
1095 		    && (bio->bi_opf & MD_FAILFAST)) {
1096 			set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
1097 			set_bit(LastDev, &rdev->flags);
1098 		}
1099 	} else
1100 		clear_bit(LastDev, &rdev->flags);
1101 
1102 	bio_put(bio);
1103 
1104 	rdev_dec_pending(rdev, mddev);
1105 
1106 	if (atomic_dec_and_test(&mddev->pending_writes))
1107 		wake_up(&mddev->sb_wait);
1108 }
1109 
1110 /**
1111  * md_write_metadata - write metadata to underlying disk, including
1112  * array superblock, badblocks, bitmap superblock and bitmap bits.
1113  * @mddev:	the array to write
1114  * @rdev:	the underlying disk to write
1115  * @sector:	the offset to @rdev
1116  * @size:	the length of the metadata
1117  * @page:	the metadata
1118  * @offset:	the offset to @page
1119  *
1120  * Write @size bytes of @page start from @offset, to @sector of @rdev, Increment
1121  * mddev->pending_writes before returning, and decrement it on completion,
1122  * waking up sb_wait. Caller must call md_super_wait() after issuing io to all
1123  * rdev. If an error occurred, md_error() will be called, and the @rdev will be
1124  * kicked out from @mddev.
1125  */
1126 void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev,
1127 		       sector_t sector, int size, struct page *page,
1128 		       unsigned int offset)
1129 {
1130 	struct bio *bio;
1131 
1132 	if (!page)
1133 		return;
1134 
1135 	if (test_bit(Faulty, &rdev->flags))
1136 		return;
1137 
1138 	bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
1139 			      1,
1140 			      REQ_OP_WRITE | REQ_SYNC | REQ_IDLE | REQ_META
1141 				  | REQ_PREFLUSH | REQ_FUA,
1142 			      GFP_NOIO, &mddev->sync_set);
1143 
1144 	atomic_inc(&rdev->nr_pending);
1145 
1146 	bio->bi_iter.bi_sector = sector;
1147 	__bio_add_page(bio, page, size, offset);
1148 	bio->bi_private = rdev;
1149 	bio->bi_end_io = super_written;
1150 
1151 	if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
1152 	    test_bit(FailFast, &rdev->flags) &&
1153 	    !test_bit(LastDev, &rdev->flags))
1154 		bio->bi_opf |= MD_FAILFAST;
1155 
1156 	atomic_inc(&mddev->pending_writes);
1157 	submit_bio(bio);
1158 }
1159 
1160 int md_super_wait(struct mddev *mddev)
1161 {
1162 	/* wait for all superblock writes that were scheduled to complete */
1163 	wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
1164 	if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
1165 		return -EAGAIN;
1166 	return 0;
1167 }
1168 
1169 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
1170 		 struct page *page, blk_opf_t opf, bool metadata_op)
1171 {
1172 	struct bio bio;
1173 	struct bio_vec bvec;
1174 
1175 	if (metadata_op && rdev->meta_bdev)
1176 		bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
1177 	else
1178 		bio_init(&bio, rdev->bdev, &bvec, 1, opf);
1179 
1180 	if (metadata_op)
1181 		bio.bi_iter.bi_sector = sector + rdev->sb_start;
1182 	else if (rdev->mddev->reshape_position != MaxSector &&
1183 		 (rdev->mddev->reshape_backwards ==
1184 		  (sector >= rdev->mddev->reshape_position)))
1185 		bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
1186 	else
1187 		bio.bi_iter.bi_sector = sector + rdev->data_offset;
1188 	__bio_add_page(&bio, page, size, 0);
1189 
1190 	submit_bio_wait(&bio);
1191 
1192 	return !bio.bi_status;
1193 }
1194 EXPORT_SYMBOL_GPL(sync_page_io);
1195 
1196 static int read_disk_sb(struct md_rdev *rdev, int size)
1197 {
1198 	if (rdev->sb_loaded)
1199 		return 0;
1200 
1201 	if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true))
1202 		goto fail;
1203 	rdev->sb_loaded = 1;
1204 	return 0;
1205 
1206 fail:
1207 	pr_err("md: disabled device %pg, could not read superblock.\n",
1208 	       rdev->bdev);
1209 	return -EINVAL;
1210 }
1211 
1212 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1213 {
1214 	return	sb1->set_uuid0 == sb2->set_uuid0 &&
1215 		sb1->set_uuid1 == sb2->set_uuid1 &&
1216 		sb1->set_uuid2 == sb2->set_uuid2 &&
1217 		sb1->set_uuid3 == sb2->set_uuid3;
1218 }
1219 
1220 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1221 {
1222 	int ret;
1223 	mdp_super_t *tmp1, *tmp2;
1224 
1225 	tmp1 = kmalloc_obj(*tmp1);
1226 	tmp2 = kmalloc_obj(*tmp2);
1227 
1228 	if (!tmp1 || !tmp2) {
1229 		ret = 0;
1230 		goto abort;
1231 	}
1232 
1233 	*tmp1 = *sb1;
1234 	*tmp2 = *sb2;
1235 
1236 	/*
1237 	 * nr_disks is not constant
1238 	 */
1239 	tmp1->nr_disks = 0;
1240 	tmp2->nr_disks = 0;
1241 
1242 	ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
1243 abort:
1244 	kfree(tmp1);
1245 	kfree(tmp2);
1246 	return ret;
1247 }
1248 
1249 static u32 md_csum_fold(u32 csum)
1250 {
1251 	csum = (csum & 0xffff) + (csum >> 16);
1252 	return (csum & 0xffff) + (csum >> 16);
1253 }
1254 
1255 static unsigned int calc_sb_csum(mdp_super_t *sb)
1256 {
1257 	u64 newcsum = 0;
1258 	u32 *sb32 = (u32*)sb;
1259 	int i;
1260 	unsigned int disk_csum, csum;
1261 
1262 	disk_csum = sb->sb_csum;
1263 	sb->sb_csum = 0;
1264 
1265 	for (i = 0; i < MD_SB_BYTES/4 ; i++)
1266 		newcsum += sb32[i];
1267 	csum = (newcsum & 0xffffffff) + (newcsum>>32);
1268 
1269 #ifdef CONFIG_ALPHA
1270 	/* This used to use csum_partial, which was wrong for several
1271 	 * reasons including that different results are returned on
1272 	 * different architectures.  It isn't critical that we get exactly
1273 	 * the same return value as before (we always csum_fold before
1274 	 * testing, and that removes any differences).  However as we
1275 	 * know that csum_partial always returned a 16bit value on
1276 	 * alphas, do a fold to maximise conformity to previous behaviour.
1277 	 */
1278 	sb->sb_csum = md_csum_fold(disk_csum);
1279 #else
1280 	sb->sb_csum = disk_csum;
1281 #endif
1282 	return csum;
1283 }
1284 
1285 /*
1286  * Handle superblock details.
1287  * We want to be able to handle multiple superblock formats
1288  * so we have a common interface to them all, and an array of
1289  * different handlers.
1290  * We rely on user-space to write the initial superblock, and support
1291  * reading and updating of superblocks.
1292  * Interface methods are:
1293  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1294  *      loads and validates a superblock on dev.
1295  *      if refdev != NULL, compare superblocks on both devices
1296  *    Return:
1297  *      0 - dev has a superblock that is compatible with refdev
1298  *      1 - dev has a superblock that is compatible and newer than refdev
1299  *          so dev should be used as the refdev in future
1300  *     -EINVAL superblock incompatible or invalid
1301  *     -othererror e.g. -EIO
1302  *
1303  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
1304  *      Verify that dev is acceptable into mddev.
1305  *       The first time, mddev->raid_disks will be 0, and data from
1306  *       dev should be merged in.  Subsequent calls check that dev
1307  *       is new enough.  Return 0 or -EINVAL
1308  *
1309  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
1310  *     Update the superblock for rdev with data in mddev
1311  *     This does not write to disc.
1312  *
1313  */
1314 
1315 struct super_type  {
1316 	char		    *name;
1317 	struct module	    *owner;
1318 	int		    (*load_super)(struct md_rdev *rdev,
1319 					  struct md_rdev *refdev,
1320 					  int minor_version);
1321 	int		    (*validate_super)(struct mddev *mddev,
1322 					      struct md_rdev *freshest,
1323 					      struct md_rdev *rdev);
1324 	void		    (*sync_super)(struct mddev *mddev,
1325 					  struct md_rdev *rdev);
1326 	unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
1327 						sector_t num_sectors);
1328 	int		    (*allow_new_offset)(struct md_rdev *rdev,
1329 						unsigned long long new_offset);
1330 };
1331 
1332 /*
1333  * Check that the given mddev has no bitmap.
1334  *
1335  * This function is called from the run method of all personalities that do not
1336  * support bitmaps. It prints an error message and returns non-zero if mddev
1337  * has a bitmap. Otherwise, it returns 0.
1338  *
1339  */
1340 int md_check_no_bitmap(struct mddev *mddev)
1341 {
1342 	if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1343 		return 0;
1344 	pr_warn("%s: bitmaps are not supported for %s\n",
1345 		mdname(mddev), mddev->pers->head.name);
1346 	return 1;
1347 }
1348 EXPORT_SYMBOL(md_check_no_bitmap);
1349 
1350 /*
1351  * load_super for 0.90.0
1352  */
1353 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1354 {
1355 	mdp_super_t *sb;
1356 	int ret;
1357 	bool spare_disk = true;
1358 
1359 	/*
1360 	 * Calculate the position of the superblock (512byte sectors),
1361 	 * it's at the end of the disk.
1362 	 *
1363 	 * It also happens to be a multiple of 4Kb.
1364 	 */
1365 	rdev->sb_start = calc_dev_sboffset(rdev);
1366 
1367 	ret = read_disk_sb(rdev, MD_SB_BYTES);
1368 	if (ret)
1369 		return ret;
1370 
1371 	ret = -EINVAL;
1372 
1373 	sb = page_address(rdev->sb_page);
1374 
1375 	if (sb->md_magic != MD_SB_MAGIC) {
1376 		pr_warn("md: invalid raid superblock magic on %pg\n",
1377 			rdev->bdev);
1378 		goto abort;
1379 	}
1380 
1381 	if (sb->major_version != 0 ||
1382 	    sb->minor_version < 90 ||
1383 	    sb->minor_version > 91) {
1384 		pr_warn("Bad version number %d.%d on %pg\n",
1385 			sb->major_version, sb->minor_version, rdev->bdev);
1386 		goto abort;
1387 	}
1388 
1389 	if (sb->raid_disks <= 0)
1390 		goto abort;
1391 
1392 	if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1393 		pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
1394 		goto abort;
1395 	}
1396 
1397 	rdev->preferred_minor = sb->md_minor;
1398 	rdev->data_offset = 0;
1399 	rdev->new_data_offset = 0;
1400 	rdev->sb_size = MD_SB_BYTES;
1401 	rdev->badblocks.shift = -1;
1402 
1403 	rdev->desc_nr = sb->this_disk.number;
1404 
1405 	/* not spare disk */
1406 	if (rdev->desc_nr >= 0 && rdev->desc_nr < MD_SB_DISKS &&
1407 	    sb->disks[rdev->desc_nr].state & ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1408 		spare_disk = false;
1409 
1410 	if (!refdev) {
1411 		if (!spare_disk)
1412 			ret = 1;
1413 		else
1414 			ret = 0;
1415 	} else {
1416 		__u64 ev1, ev2;
1417 		mdp_super_t *refsb = page_address(refdev->sb_page);
1418 		if (!md_uuid_equal(refsb, sb)) {
1419 			pr_warn("md: %pg has different UUID to %pg\n",
1420 				rdev->bdev, refdev->bdev);
1421 			goto abort;
1422 		}
1423 		if (!md_sb_equal(refsb, sb)) {
1424 			pr_warn("md: %pg has same UUID but different superblock to %pg\n",
1425 				rdev->bdev, refdev->bdev);
1426 			goto abort;
1427 		}
1428 		ev1 = md_event(sb);
1429 		ev2 = md_event(refsb);
1430 
1431 		if (!spare_disk && ev1 > ev2)
1432 			ret = 1;
1433 		else
1434 			ret = 0;
1435 	}
1436 	rdev->sectors = rdev->sb_start;
1437 	/* Limit to 4TB as metadata cannot record more than that.
1438 	 * (not needed for Linear and RAID0 as metadata doesn't
1439 	 * record this size)
1440 	 */
1441 	if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1442 		rdev->sectors = (sector_t)(2ULL << 32) - 2;
1443 
1444 	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1445 		/* "this cannot possibly happen" ... */
1446 		ret = -EINVAL;
1447 
1448  abort:
1449 	return ret;
1450 }
1451 
1452 static u64 md_bitmap_events_cleared(struct mddev *mddev)
1453 {
1454 	struct md_bitmap_stats stats;
1455 	int err;
1456 
1457 	if (!md_bitmap_enabled(mddev, false))
1458 		return 0;
1459 
1460 	err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
1461 	if (err)
1462 		return 0;
1463 
1464 	return stats.events_cleared;
1465 }
1466 
1467 /*
1468  * validate_super for 0.90.0
1469  * note: we are not using "freshest" for 0.9 superblock
1470  */
1471 static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1472 {
1473 	mdp_disk_t *desc;
1474 	mdp_super_t *sb = page_address(rdev->sb_page);
1475 	__u64 ev1 = md_event(sb);
1476 
1477 	rdev->raid_disk = -1;
1478 	clear_bit(Faulty, &rdev->flags);
1479 	clear_bit(In_sync, &rdev->flags);
1480 	clear_bit(Bitmap_sync, &rdev->flags);
1481 	clear_bit(WriteMostly, &rdev->flags);
1482 
1483 	if (mddev->raid_disks == 0) {
1484 		mddev->major_version = 0;
1485 		mddev->minor_version = sb->minor_version;
1486 		mddev->patch_version = sb->patch_version;
1487 		mddev->external = 0;
1488 		mddev->chunk_sectors = sb->chunk_size >> 9;
1489 		mddev->ctime = sb->ctime;
1490 		mddev->utime = sb->utime;
1491 		mddev->level = sb->level;
1492 		mddev->clevel[0] = 0;
1493 		mddev->layout = sb->layout;
1494 		mddev->raid_disks = sb->raid_disks;
1495 		mddev->dev_sectors = ((sector_t)sb->size) * 2;
1496 		mddev->events = ev1;
1497 		mddev->bitmap_info.offset = 0;
1498 		mddev->bitmap_info.space = 0;
1499 		/* bitmap can use 60 K after the 4K superblocks */
1500 		mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1501 		mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1502 		mddev->reshape_backwards = 0;
1503 
1504 		if (mddev->minor_version >= 91) {
1505 			mddev->reshape_position = sb->reshape_position;
1506 			mddev->delta_disks = sb->delta_disks;
1507 			mddev->new_level = sb->new_level;
1508 			mddev->new_layout = sb->new_layout;
1509 			mddev->new_chunk_sectors = sb->new_chunk >> 9;
1510 			if (mddev->delta_disks < 0)
1511 				mddev->reshape_backwards = 1;
1512 		} else {
1513 			mddev->reshape_position = MaxSector;
1514 			mddev->delta_disks = 0;
1515 			mddev->new_level = mddev->level;
1516 			mddev->new_layout = mddev->layout;
1517 			mddev->new_chunk_sectors = mddev->chunk_sectors;
1518 		}
1519 		if (mddev->level == 0)
1520 			mddev->layout = -1;
1521 
1522 		if (sb->state & (1<<MD_SB_CLEAN))
1523 			mddev->resync_offset = MaxSector;
1524 		else {
1525 			if (sb->events_hi == sb->cp_events_hi &&
1526 				sb->events_lo == sb->cp_events_lo) {
1527 				mddev->resync_offset = sb->recovery_cp;
1528 			} else
1529 				mddev->resync_offset = 0;
1530 		}
1531 
1532 		memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1533 		memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1534 		memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1535 		memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1536 
1537 		mddev->max_disks = MD_SB_DISKS;
1538 
1539 		if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1540 		    mddev->bitmap_info.file == NULL) {
1541 			mddev->bitmap_info.offset =
1542 				mddev->bitmap_info.default_offset;
1543 			mddev->bitmap_info.space =
1544 				mddev->bitmap_info.default_space;
1545 		}
1546 
1547 	} else if (mddev->pers == NULL) {
1548 		/* Insist on good event counter while assembling, except
1549 		 * for spares (which don't need an event count) */
1550 		++ev1;
1551 		if (sb->disks[rdev->desc_nr].state & (
1552 			    (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1553 			if (ev1 < mddev->events)
1554 				return -EINVAL;
1555 	} else if (mddev->bitmap) {
1556 		/* if adding to array with a bitmap, then we can accept an
1557 		 * older device ... but not too old.
1558 		 */
1559 		if (ev1 < md_bitmap_events_cleared(mddev))
1560 			return 0;
1561 		if (ev1 < mddev->events)
1562 			set_bit(Bitmap_sync, &rdev->flags);
1563 	} else {
1564 		if (ev1 < mddev->events)
1565 			/* just a hot-add of a new device, leave raid_disk at -1 */
1566 			return 0;
1567 	}
1568 
1569 	desc = sb->disks + rdev->desc_nr;
1570 
1571 	if (desc->state & (1<<MD_DISK_FAULTY))
1572 		set_bit(Faulty, &rdev->flags);
1573 	else if (desc->state & (1<<MD_DISK_SYNC)) {
1574 		set_bit(In_sync, &rdev->flags);
1575 		rdev->raid_disk = desc->raid_disk;
1576 		rdev->saved_raid_disk = desc->raid_disk;
1577 	} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1578 		/* active but not in sync implies recovery up to
1579 		 * reshape position.  We don't know exactly where
1580 		 * that is, so set to zero for now
1581 		 */
1582 		if (mddev->minor_version >= 91) {
1583 			rdev->recovery_offset = 0;
1584 			rdev->raid_disk = desc->raid_disk;
1585 		}
1586 	}
1587 	if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1588 		set_bit(WriteMostly, &rdev->flags);
1589 	if (desc->state & (1<<MD_DISK_FAILFAST))
1590 		set_bit(FailFast, &rdev->flags);
1591 	return 0;
1592 }
1593 
1594 /*
1595  * sync_super for 0.90.0
1596  */
1597 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1598 {
1599 	mdp_super_t *sb;
1600 	struct md_rdev *rdev2;
1601 	int next_spare = mddev->raid_disks;
1602 
1603 	/* make rdev->sb match mddev data..
1604 	 *
1605 	 * 1/ zero out disks
1606 	 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1607 	 * 3/ any empty disks < next_spare become removed
1608 	 *
1609 	 * disks[0] gets initialised to REMOVED because
1610 	 * we cannot be sure from other fields if it has
1611 	 * been initialised or not.
1612 	 */
1613 	int i;
1614 	int active=0, working=0,failed=0,spare=0,nr_disks=0;
1615 
1616 	rdev->sb_size = MD_SB_BYTES;
1617 
1618 	sb = page_address(rdev->sb_page);
1619 
1620 	memset(sb, 0, sizeof(*sb));
1621 
1622 	sb->md_magic = MD_SB_MAGIC;
1623 	sb->major_version = mddev->major_version;
1624 	sb->patch_version = mddev->patch_version;
1625 	sb->gvalid_words  = 0; /* ignored */
1626 	memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1627 	memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1628 	memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1629 	memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1630 
1631 	sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1632 	sb->level = mddev->level;
1633 	sb->size = mddev->dev_sectors / 2;
1634 	sb->raid_disks = mddev->raid_disks;
1635 	sb->md_minor = mddev->md_minor;
1636 	sb->not_persistent = 0;
1637 	sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1638 	sb->state = 0;
1639 	sb->events_hi = (mddev->events>>32);
1640 	sb->events_lo = (u32)mddev->events;
1641 
1642 	if (mddev->reshape_position == MaxSector)
1643 		sb->minor_version = 90;
1644 	else {
1645 		sb->minor_version = 91;
1646 		sb->reshape_position = mddev->reshape_position;
1647 		sb->new_level = mddev->new_level;
1648 		sb->delta_disks = mddev->delta_disks;
1649 		sb->new_layout = mddev->new_layout;
1650 		sb->new_chunk = mddev->new_chunk_sectors << 9;
1651 	}
1652 	mddev->minor_version = sb->minor_version;
1653 	if (mddev->in_sync)
1654 	{
1655 		sb->recovery_cp = mddev->resync_offset;
1656 		sb->cp_events_hi = (mddev->events>>32);
1657 		sb->cp_events_lo = (u32)mddev->events;
1658 		if (mddev->resync_offset == MaxSector)
1659 			sb->state = (1<< MD_SB_CLEAN);
1660 	} else
1661 		sb->recovery_cp = 0;
1662 
1663 	sb->layout = mddev->layout;
1664 	sb->chunk_size = mddev->chunk_sectors << 9;
1665 
1666 	if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1667 		sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1668 
1669 	sb->disks[0].state = (1<<MD_DISK_REMOVED);
1670 	rdev_for_each(rdev2, mddev) {
1671 		mdp_disk_t *d;
1672 		int desc_nr;
1673 		int is_active = test_bit(In_sync, &rdev2->flags);
1674 
1675 		if (rdev2->raid_disk >= 0 &&
1676 		    sb->minor_version >= 91)
1677 			/* we have nowhere to store the recovery_offset,
1678 			 * but if it is not below the reshape_position,
1679 			 * we can piggy-back on that.
1680 			 */
1681 			is_active = 1;
1682 		if (rdev2->raid_disk < 0 ||
1683 		    test_bit(Faulty, &rdev2->flags))
1684 			is_active = 0;
1685 		if (is_active)
1686 			desc_nr = rdev2->raid_disk;
1687 		else
1688 			desc_nr = next_spare++;
1689 		rdev2->desc_nr = desc_nr;
1690 		d = &sb->disks[rdev2->desc_nr];
1691 		nr_disks++;
1692 		d->number = rdev2->desc_nr;
1693 		d->major = MAJOR(rdev2->bdev->bd_dev);
1694 		d->minor = MINOR(rdev2->bdev->bd_dev);
1695 		if (is_active)
1696 			d->raid_disk = rdev2->raid_disk;
1697 		else
1698 			d->raid_disk = rdev2->desc_nr; /* compatibility */
1699 		if (test_bit(Faulty, &rdev2->flags))
1700 			d->state = (1<<MD_DISK_FAULTY);
1701 		else if (is_active) {
1702 			d->state = (1<<MD_DISK_ACTIVE);
1703 			if (test_bit(In_sync, &rdev2->flags))
1704 				d->state |= (1<<MD_DISK_SYNC);
1705 			active++;
1706 			working++;
1707 		} else {
1708 			d->state = 0;
1709 			spare++;
1710 			working++;
1711 		}
1712 		if (test_bit(WriteMostly, &rdev2->flags))
1713 			d->state |= (1<<MD_DISK_WRITEMOSTLY);
1714 		if (test_bit(FailFast, &rdev2->flags))
1715 			d->state |= (1<<MD_DISK_FAILFAST);
1716 	}
1717 	/* now set the "removed" and "faulty" bits on any missing devices */
1718 	for (i=0 ; i < mddev->raid_disks ; i++) {
1719 		mdp_disk_t *d = &sb->disks[i];
1720 		if (d->state == 0 && d->number == 0) {
1721 			d->number = i;
1722 			d->raid_disk = i;
1723 			d->state = (1<<MD_DISK_REMOVED);
1724 			d->state |= (1<<MD_DISK_FAULTY);
1725 			failed++;
1726 		}
1727 	}
1728 	sb->nr_disks = nr_disks;
1729 	sb->active_disks = active;
1730 	sb->working_disks = working;
1731 	sb->failed_disks = failed;
1732 	sb->spare_disks = spare;
1733 
1734 	sb->this_disk = sb->disks[rdev->desc_nr];
1735 	sb->sb_csum = calc_sb_csum(sb);
1736 }
1737 
1738 /*
1739  * rdev_size_change for 0.90.0
1740  */
1741 static unsigned long long
1742 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1743 {
1744 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1745 		return 0; /* component must fit device */
1746 	if (rdev->mddev->bitmap_info.offset)
1747 		return 0; /* can't move bitmap */
1748 	rdev->sb_start = calc_dev_sboffset(rdev);
1749 	if (!num_sectors || num_sectors > rdev->sb_start)
1750 		num_sectors = rdev->sb_start;
1751 	/* Limit to 4TB as metadata cannot record more than that.
1752 	 * 4TB == 2^32 KB, or 2*2^32 sectors.
1753 	 */
1754 	if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1755 		num_sectors = (sector_t)(2ULL << 32) - 2;
1756 	do {
1757 		md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
1758 				  rdev->sb_size, rdev->sb_page, 0);
1759 	} while (md_super_wait(rdev->mddev) < 0);
1760 	return num_sectors;
1761 }
1762 
1763 static int
1764 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1765 {
1766 	/* non-zero offset changes not possible with v0.90 */
1767 	return new_offset == 0;
1768 }
1769 
1770 /*
1771  * version 1 superblock
1772  */
1773 
1774 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1775 {
1776 	__le32 disk_csum;
1777 	u32 csum;
1778 	unsigned long long newcsum;
1779 	int size = 256 + le32_to_cpu(sb->max_dev)*2;
1780 	__le32 *isuper = (__le32*)sb;
1781 
1782 	disk_csum = sb->sb_csum;
1783 	sb->sb_csum = 0;
1784 	newcsum = 0;
1785 	for (; size >= 4; size -= 4)
1786 		newcsum += le32_to_cpu(*isuper++);
1787 
1788 	if (size == 2)
1789 		newcsum += le16_to_cpu(*(__le16*) isuper);
1790 
1791 	csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1792 	sb->sb_csum = disk_csum;
1793 	return cpu_to_le32(csum);
1794 }
1795 
1796 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1797 {
1798 	struct mdp_superblock_1 *sb;
1799 	int ret;
1800 	sector_t sb_start;
1801 	sector_t sectors;
1802 	int bmask;
1803 	bool spare_disk = true;
1804 
1805 	/*
1806 	 * Calculate the position of the superblock in 512byte sectors.
1807 	 * It is always aligned to a 4K boundary and
1808 	 * depeding on minor_version, it can be:
1809 	 * 0: At least 8K, but less than 12K, from end of device
1810 	 * 1: At start of device
1811 	 * 2: 4K from start of device.
1812 	 */
1813 	switch(minor_version) {
1814 	case 0:
1815 		sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
1816 		sb_start &= ~(sector_t)(4*2-1);
1817 		break;
1818 	case 1:
1819 		sb_start = 0;
1820 		break;
1821 	case 2:
1822 		sb_start = 8;
1823 		break;
1824 	default:
1825 		return -EINVAL;
1826 	}
1827 	rdev->sb_start = sb_start;
1828 
1829 	/* superblock is rarely larger than 1K, but it can be larger,
1830 	 * and it is safe to read 4k, so we do that
1831 	 */
1832 	ret = read_disk_sb(rdev, 4096);
1833 	if (ret) return ret;
1834 
1835 	sb = page_address(rdev->sb_page);
1836 
1837 	if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1838 	    sb->major_version != cpu_to_le32(1) ||
1839 	    le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1840 	    le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1841 	    (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1842 		return -EINVAL;
1843 
1844 	if (calc_sb_1_csum(sb) != sb->sb_csum) {
1845 		pr_warn("md: invalid superblock checksum on %pg\n",
1846 			rdev->bdev);
1847 		return -EINVAL;
1848 	}
1849 	if (le64_to_cpu(sb->data_size) < 10) {
1850 		pr_warn("md: data_size too small on %pg\n",
1851 			rdev->bdev);
1852 		return -EINVAL;
1853 	}
1854 	if (sb->pad0 ||
1855 	    sb->pad3[0] ||
1856 	    memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) {
1857 		pr_warn("Some padding is non-zero on %pg, might be a new feature\n",
1858 			rdev->bdev);
1859 		if (check_new_feature)
1860 			return -EINVAL;
1861 		pr_warn("check_new_feature is disabled, data corruption possible\n");
1862 	}
1863 
1864 	rdev->preferred_minor = 0xffff;
1865 	rdev->data_offset = le64_to_cpu(sb->data_offset);
1866 	rdev->new_data_offset = rdev->data_offset;
1867 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1868 	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1869 		rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1870 	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1871 
1872 	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1873 	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1874 	if (rdev->sb_size & bmask)
1875 		rdev->sb_size = (rdev->sb_size | bmask) + 1;
1876 
1877 	if (minor_version
1878 	    && rdev->data_offset < sb_start + (rdev->sb_size/512))
1879 		return -EINVAL;
1880 	if (minor_version
1881 	    && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1882 		return -EINVAL;
1883 
1884 	rdev->desc_nr = le32_to_cpu(sb->dev_number);
1885 
1886 	if (!rdev->bb_page) {
1887 		rdev->bb_page = alloc_page(GFP_KERNEL);
1888 		if (!rdev->bb_page)
1889 			return -ENOMEM;
1890 	}
1891 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1892 	    rdev->badblocks.count == 0) {
1893 		/* need to load the bad block list.
1894 		 * Currently we limit it to one page.
1895 		 */
1896 		s32 offset;
1897 		sector_t bb_sector;
1898 		__le64 *bbp;
1899 		int i;
1900 		int sectors = le16_to_cpu(sb->bblog_size);
1901 		if (sectors > (PAGE_SIZE / 512))
1902 			return -EINVAL;
1903 		offset = le32_to_cpu(sb->bblog_offset);
1904 		if (offset == 0)
1905 			return -EINVAL;
1906 		bb_sector = (long long)offset;
1907 		if (!sync_page_io(rdev, bb_sector, sectors << 9,
1908 				  rdev->bb_page, REQ_OP_READ, true))
1909 			return -EIO;
1910 		bbp = (__le64 *)page_address(rdev->bb_page);
1911 		rdev->badblocks.shift = sb->bblog_shift;
1912 		for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1913 			u64 bb = le64_to_cpu(*bbp);
1914 			int count = bb & (0x3ff);
1915 			u64 sector = bb >> 10;
1916 			sector <<= sb->bblog_shift;
1917 			count <<= sb->bblog_shift;
1918 			if (bb + 1 == 0)
1919 				break;
1920 			if (!badblocks_set(&rdev->badblocks, sector, count, 1))
1921 				return -EINVAL;
1922 		}
1923 	} else if (sb->bblog_offset != 0)
1924 		rdev->badblocks.shift = 0;
1925 
1926 	if ((le32_to_cpu(sb->feature_map) &
1927 	    (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
1928 		rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1929 		rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1930 		rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1931 	}
1932 
1933 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1934 	    sb->level != 0)
1935 		return -EINVAL;
1936 
1937 	/* not spare disk */
1938 	if (rdev->desc_nr >= 0 && rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1939 	    (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1940 	     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1941 		spare_disk = false;
1942 
1943 	if (!refdev) {
1944 		if (!spare_disk)
1945 			ret = 1;
1946 		else
1947 			ret = 0;
1948 	} else {
1949 		__u64 ev1, ev2;
1950 		struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1951 
1952 		if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1953 		    sb->level != refsb->level ||
1954 		    sb->layout != refsb->layout ||
1955 		    sb->chunksize != refsb->chunksize) {
1956 			pr_warn("md: %pg has strangely different superblock to %pg\n",
1957 				rdev->bdev,
1958 				refdev->bdev);
1959 			return -EINVAL;
1960 		}
1961 		ev1 = le64_to_cpu(sb->events);
1962 		ev2 = le64_to_cpu(refsb->events);
1963 
1964 		if (!spare_disk && ev1 > ev2)
1965 			ret = 1;
1966 		else
1967 			ret = 0;
1968 	}
1969 	if (minor_version)
1970 		sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
1971 	else
1972 		sectors = rdev->sb_start;
1973 	if (sectors < le64_to_cpu(sb->data_size))
1974 		return -EINVAL;
1975 	rdev->sectors = le64_to_cpu(sb->data_size);
1976 	return ret;
1977 }
1978 
1979 static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1980 {
1981 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1982 	__u64 ev1 = le64_to_cpu(sb->events);
1983 	int role;
1984 
1985 	rdev->raid_disk = -1;
1986 	clear_bit(Faulty, &rdev->flags);
1987 	clear_bit(In_sync, &rdev->flags);
1988 	clear_bit(Bitmap_sync, &rdev->flags);
1989 	clear_bit(WriteMostly, &rdev->flags);
1990 
1991 	if (mddev->raid_disks == 0) {
1992 		mddev->major_version = 1;
1993 		mddev->patch_version = 0;
1994 		mddev->external = 0;
1995 		mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1996 		mddev->ctime = le64_to_cpu(sb->ctime);
1997 		mddev->utime = le64_to_cpu(sb->utime);
1998 		mddev->level = le32_to_cpu(sb->level);
1999 		mddev->clevel[0] = 0;
2000 		mddev->layout = le32_to_cpu(sb->layout);
2001 		mddev->raid_disks = le32_to_cpu(sb->raid_disks);
2002 		mddev->dev_sectors = le64_to_cpu(sb->size);
2003 		mddev->events = ev1;
2004 		mddev->bitmap_info.offset = 0;
2005 		mddev->bitmap_info.space = 0;
2006 		/* Default location for bitmap is 1K after superblock
2007 		 * using 3K - total of 4K
2008 		 */
2009 		mddev->bitmap_info.default_offset = 1024 >> 9;
2010 		mddev->bitmap_info.default_space = (4096-1024) >> 9;
2011 		mddev->reshape_backwards = 0;
2012 
2013 		mddev->resync_offset = le64_to_cpu(sb->resync_offset);
2014 		memcpy(mddev->uuid, sb->set_uuid, 16);
2015 
2016 		mddev->max_disks =  (4096-256)/2;
2017 
2018 		if (!mddev->logical_block_size)
2019 			mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
2020 
2021 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
2022 		    mddev->bitmap_info.file == NULL) {
2023 			mddev->bitmap_info.offset =
2024 				(__s32)le32_to_cpu(sb->bitmap_offset);
2025 			/* Metadata doesn't record how much space is available.
2026 			 * For 1.0, we assume we can use up to the superblock
2027 			 * if before, else to 4K beyond superblock.
2028 			 * For others, assume no change is possible.
2029 			 */
2030 			if (mddev->minor_version > 0)
2031 				mddev->bitmap_info.space = 0;
2032 			else if (mddev->bitmap_info.offset > 0)
2033 				mddev->bitmap_info.space =
2034 					8 - mddev->bitmap_info.offset;
2035 			else
2036 				mddev->bitmap_info.space =
2037 					-mddev->bitmap_info.offset;
2038 		}
2039 
2040 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
2041 			mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2042 			mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2043 			mddev->new_level = le32_to_cpu(sb->new_level);
2044 			mddev->new_layout = le32_to_cpu(sb->new_layout);
2045 			mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
2046 			if (mddev->delta_disks < 0 ||
2047 			    (mddev->delta_disks == 0 &&
2048 			     (le32_to_cpu(sb->feature_map)
2049 			      & MD_FEATURE_RESHAPE_BACKWARDS)))
2050 				mddev->reshape_backwards = 1;
2051 		} else {
2052 			mddev->reshape_position = MaxSector;
2053 			mddev->delta_disks = 0;
2054 			mddev->new_level = mddev->level;
2055 			mddev->new_layout = mddev->layout;
2056 			mddev->new_chunk_sectors = mddev->chunk_sectors;
2057 		}
2058 
2059 		if (mddev->level == 0 &&
2060 		    !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
2061 			mddev->layout = -1;
2062 
2063 		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
2064 			set_bit(MD_HAS_JOURNAL, &mddev->flags);
2065 
2066 		if (le32_to_cpu(sb->feature_map) &
2067 		    (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
2068 			if (le32_to_cpu(sb->feature_map) &
2069 			    (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
2070 				return -EINVAL;
2071 			if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
2072 			    (le32_to_cpu(sb->feature_map) &
2073 					    MD_FEATURE_MULTIPLE_PPLS))
2074 				return -EINVAL;
2075 			set_bit(MD_HAS_PPL, &mddev->flags);
2076 		}
2077 	} else if (mddev->pers == NULL) {
2078 		/* Insist of good event counter while assembling, except for
2079 		 * spares (which don't need an event count).
2080 		 * Similar to mdadm, we allow event counter difference of 1
2081 		 * from the freshest device.
2082 		 */
2083 		if (rdev->desc_nr >= 0 &&
2084 		    rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
2085 		    (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
2086 		     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
2087 			if (ev1 + 1 < mddev->events)
2088 				return -EINVAL;
2089 	} else if (mddev->bitmap) {
2090 		/* If adding to array with a bitmap, then we can accept an
2091 		 * older device, but not too old.
2092 		 */
2093 		if (ev1 < md_bitmap_events_cleared(mddev))
2094 			return 0;
2095 		if (ev1 < mddev->events)
2096 			set_bit(Bitmap_sync, &rdev->flags);
2097 	} else {
2098 		if (ev1 < mddev->events)
2099 			/* just a hot-add of a new device, leave raid_disk at -1 */
2100 			return 0;
2101 	}
2102 
2103 	if (rdev->desc_nr < 0 ||
2104 	    rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
2105 		role = MD_DISK_ROLE_SPARE;
2106 		rdev->desc_nr = -1;
2107 	} else if (mddev->pers == NULL && freshest && ev1 < mddev->events) {
2108 		/*
2109 		 * If we are assembling, and our event counter is smaller than the
2110 		 * highest event counter, we cannot trust our superblock about the role.
2111 		 * It could happen that our rdev was marked as Faulty, and all other
2112 		 * superblocks were updated with +1 event counter.
2113 		 * Then, before the next superblock update, which typically happens when
2114 		 * remove_and_add_spares() removes the device from the array, there was
2115 		 * a crash or reboot.
2116 		 * If we allow current rdev without consulting the freshest superblock,
2117 		 * we could cause data corruption.
2118 		 * Note that in this case our event counter is smaller by 1 than the
2119 		 * highest, otherwise, this rdev would not be allowed into array;
2120 		 * both kernel and mdadm allow event counter difference of 1.
2121 		 */
2122 		struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page);
2123 		u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev);
2124 
2125 		if (rdev->desc_nr >= freshest_max_dev) {
2126 			/* this is unexpected, better not proceed */
2127 			pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
2128 				mdname(mddev), rdev->bdev, rdev->desc_nr,
2129 				freshest->bdev, freshest_max_dev);
2130 			return -EUCLEAN;
2131 		}
2132 
2133 		role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
2134 		pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
2135 			 mdname(mddev), rdev->bdev, role, role, freshest->bdev);
2136 	} else {
2137 		role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2138 	}
2139 	switch (role) {
2140 	case MD_DISK_ROLE_SPARE: /* spare */
2141 		break;
2142 	case MD_DISK_ROLE_FAULTY: /* faulty */
2143 		set_bit(Faulty, &rdev->flags);
2144 		break;
2145 	case MD_DISK_ROLE_JOURNAL: /* journal device */
2146 		if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
2147 			/* journal device without journal feature */
2148 			pr_warn("md: journal device provided without journal feature, ignoring the device\n");
2149 			return -EINVAL;
2150 		}
2151 		set_bit(Journal, &rdev->flags);
2152 		rdev->journal_tail = le64_to_cpu(sb->journal_tail);
2153 		rdev->raid_disk = 0;
2154 		break;
2155 	default:
2156 		rdev->saved_raid_disk = role;
2157 		if ((le32_to_cpu(sb->feature_map) &
2158 		     MD_FEATURE_RECOVERY_OFFSET)) {
2159 			rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
2160 			if (!(le32_to_cpu(sb->feature_map) &
2161 			      MD_FEATURE_RECOVERY_BITMAP))
2162 				rdev->saved_raid_disk = -1;
2163 		} else {
2164 			/*
2165 			 * If the array is FROZEN, then the device can't
2166 			 * be in_sync with rest of array.
2167 			 */
2168 			if (!test_bit(MD_RECOVERY_FROZEN,
2169 				      &mddev->recovery))
2170 				set_bit(In_sync, &rdev->flags);
2171 		}
2172 		rdev->raid_disk = role;
2173 		break;
2174 	}
2175 	if (sb->devflags & WriteMostly1)
2176 		set_bit(WriteMostly, &rdev->flags);
2177 	if (sb->devflags & FailFast1)
2178 		set_bit(FailFast, &rdev->flags);
2179 	if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
2180 		set_bit(Replacement, &rdev->flags);
2181 
2182 	return 0;
2183 }
2184 
2185 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
2186 {
2187 	struct mdp_superblock_1 *sb;
2188 	struct md_rdev *rdev2;
2189 	int max_dev, i;
2190 	/* make rdev->sb match mddev and rdev data. */
2191 
2192 	sb = page_address(rdev->sb_page);
2193 
2194 	sb->feature_map = 0;
2195 	sb->pad0 = 0;
2196 	sb->recovery_offset = cpu_to_le64(0);
2197 	memset(sb->pad3, 0, sizeof(sb->pad3));
2198 
2199 	sb->utime = cpu_to_le64((__u64)mddev->utime);
2200 	sb->events = cpu_to_le64(mddev->events);
2201 	if (mddev->in_sync)
2202 		sb->resync_offset = cpu_to_le64(mddev->resync_offset);
2203 	else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2204 		sb->resync_offset = cpu_to_le64(MaxSector);
2205 	else
2206 		sb->resync_offset = cpu_to_le64(0);
2207 
2208 	sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
2209 
2210 	sb->raid_disks = cpu_to_le32(mddev->raid_disks);
2211 	sb->size = cpu_to_le64(mddev->dev_sectors);
2212 	sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
2213 	sb->level = cpu_to_le32(mddev->level);
2214 	sb->layout = cpu_to_le32(mddev->layout);
2215 	sb->logical_block_size = cpu_to_le32(mddev->logical_block_size);
2216 	if (test_bit(FailFast, &rdev->flags))
2217 		sb->devflags |= FailFast1;
2218 	else
2219 		sb->devflags &= ~FailFast1;
2220 
2221 	if (test_bit(WriteMostly, &rdev->flags))
2222 		sb->devflags |= WriteMostly1;
2223 	else
2224 		sb->devflags &= ~WriteMostly1;
2225 	sb->data_offset = cpu_to_le64(rdev->data_offset);
2226 	sb->data_size = cpu_to_le64(rdev->sectors);
2227 
2228 	if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2229 		sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
2230 		sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
2231 	}
2232 
2233 	if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
2234 	    !test_bit(In_sync, &rdev->flags)) {
2235 		sb->feature_map |=
2236 			cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2237 		sb->recovery_offset =
2238 			cpu_to_le64(rdev->recovery_offset);
2239 		if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2240 			sb->feature_map |=
2241 				cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
2242 	}
2243 	/* Note: recovery_offset and journal_tail share space  */
2244 	if (test_bit(Journal, &rdev->flags))
2245 		sb->journal_tail = cpu_to_le64(rdev->journal_tail);
2246 	if (test_bit(Replacement, &rdev->flags))
2247 		sb->feature_map |=
2248 			cpu_to_le32(MD_FEATURE_REPLACEMENT);
2249 
2250 	if (mddev->reshape_position != MaxSector) {
2251 		sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2252 		sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2253 		sb->new_layout = cpu_to_le32(mddev->new_layout);
2254 		sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2255 		sb->new_level = cpu_to_le32(mddev->new_level);
2256 		sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
2257 		if (mddev->delta_disks == 0 &&
2258 		    mddev->reshape_backwards)
2259 			sb->feature_map
2260 				|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
2261 		if (rdev->new_data_offset != rdev->data_offset) {
2262 			sb->feature_map
2263 				|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2264 			sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2265 							     - rdev->data_offset));
2266 		}
2267 	}
2268 
2269 	if (mddev_is_clustered(mddev))
2270 		sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2271 
2272 	if (rdev->badblocks.count == 0)
2273 		/* Nothing to do for bad blocks*/ ;
2274 	else if (sb->bblog_offset == 0)
2275 		/* Cannot record bad blocks on this device */
2276 		md_error(mddev, rdev);
2277 	else {
2278 		struct badblocks *bb = &rdev->badblocks;
2279 		__le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2280 		u64 *p = bb->page;
2281 		sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2282 		if (bb->changed) {
2283 			unsigned seq;
2284 
2285 retry:
2286 			seq = read_seqbegin(&bb->lock);
2287 
2288 			memset(bbp, 0xff, PAGE_SIZE);
2289 
2290 			for (i = 0 ; i < bb->count ; i++) {
2291 				u64 internal_bb = p[i];
2292 				u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2293 						| BB_LEN(internal_bb));
2294 				bbp[i] = cpu_to_le64(store_bb);
2295 			}
2296 			bb->changed = 0;
2297 			if (read_seqretry(&bb->lock, seq))
2298 				goto retry;
2299 
2300 			bb->sector = (rdev->sb_start +
2301 				      (int)le32_to_cpu(sb->bblog_offset));
2302 			bb->size = le16_to_cpu(sb->bblog_size);
2303 		}
2304 	}
2305 
2306 	max_dev = 0;
2307 	rdev_for_each(rdev2, mddev)
2308 		if (rdev2->desc_nr+1 > max_dev)
2309 			max_dev = rdev2->desc_nr+1;
2310 
2311 	if (max_dev > le32_to_cpu(sb->max_dev)) {
2312 		int bmask;
2313 		sb->max_dev = cpu_to_le32(max_dev);
2314 		rdev->sb_size = max_dev * 2 + 256;
2315 		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2316 		if (rdev->sb_size & bmask)
2317 			rdev->sb_size = (rdev->sb_size | bmask) + 1;
2318 	} else
2319 		max_dev = le32_to_cpu(sb->max_dev);
2320 
2321 	for (i=0; i<max_dev;i++)
2322 		sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2323 
2324 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2325 		sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
2326 
2327 	if (test_bit(MD_HAS_PPL, &mddev->flags)) {
2328 		if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2329 			sb->feature_map |=
2330 			    cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2331 		else
2332 			sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
2333 		sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2334 		sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2335 	}
2336 
2337 	rdev_for_each(rdev2, mddev) {
2338 		i = rdev2->desc_nr;
2339 		if (test_bit(Faulty, &rdev2->flags))
2340 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
2341 		else if (test_bit(In_sync, &rdev2->flags))
2342 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2343 		else if (test_bit(Journal, &rdev2->flags))
2344 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
2345 		else if (rdev2->raid_disk >= 0)
2346 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2347 		else
2348 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2349 	}
2350 
2351 	sb->sb_csum = calc_sb_1_csum(sb);
2352 }
2353 
2354 static sector_t super_1_choose_bm_space(sector_t dev_size)
2355 {
2356 	sector_t bm_space;
2357 
2358 	/* if the device is bigger than 8Gig, save 64k for bitmap
2359 	 * usage, if bigger than 200Gig, save 128k
2360 	 */
2361 	if (dev_size < 64*2)
2362 		bm_space = 0;
2363 	else if (dev_size - 64*2 >= 200*1024*1024*2)
2364 		bm_space = 128*2;
2365 	else if (dev_size - 4*2 > 8*1024*1024*2)
2366 		bm_space = 64*2;
2367 	else
2368 		bm_space = 4*2;
2369 	return bm_space;
2370 }
2371 
2372 static unsigned long long
2373 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2374 {
2375 	struct mdp_superblock_1 *sb;
2376 	sector_t max_sectors;
2377 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2378 		return 0; /* component must fit device */
2379 	if (rdev->data_offset != rdev->new_data_offset)
2380 		return 0; /* too confusing */
2381 	if (rdev->sb_start < rdev->data_offset) {
2382 		/* minor versions 1 and 2; superblock before data */
2383 		max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
2384 		if (!num_sectors || num_sectors > max_sectors)
2385 			num_sectors = max_sectors;
2386 	} else if (rdev->mddev->bitmap_info.offset) {
2387 		/* minor version 0 with bitmap we can't move */
2388 		return 0;
2389 	} else {
2390 		/* minor version 0; superblock after data */
2391 		sector_t sb_start, bm_space;
2392 		sector_t dev_size = bdev_nr_sectors(rdev->bdev);
2393 
2394 		/* 8K is for superblock */
2395 		sb_start = dev_size - 8*2;
2396 		sb_start &= ~(sector_t)(4*2 - 1);
2397 
2398 		bm_space = super_1_choose_bm_space(dev_size);
2399 
2400 		/* Space that can be used to store date needs to decrease
2401 		 * superblock bitmap space and bad block space(4K)
2402 		 */
2403 		max_sectors = sb_start - bm_space - 4*2;
2404 
2405 		if (!num_sectors || num_sectors > max_sectors)
2406 			num_sectors = max_sectors;
2407 		rdev->sb_start = sb_start;
2408 	}
2409 	sb = page_address(rdev->sb_page);
2410 	sb->data_size = cpu_to_le64(num_sectors);
2411 	sb->super_offset = cpu_to_le64(rdev->sb_start);
2412 	sb->sb_csum = calc_sb_1_csum(sb);
2413 	do {
2414 		md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
2415 				  rdev->sb_size, rdev->sb_page, 0);
2416 	} while (md_super_wait(rdev->mddev) < 0);
2417 	return num_sectors;
2418 
2419 }
2420 
2421 static int
2422 super_1_allow_new_offset(struct md_rdev *rdev,
2423 			 unsigned long long new_offset)
2424 {
2425 	struct mddev *mddev = rdev->mddev;
2426 
2427 	/* All necessary checks on new >= old have been done */
2428 	if (new_offset >= rdev->data_offset)
2429 		return 1;
2430 
2431 	/* with 1.0 metadata, there is no metadata to tread on
2432 	 * so we can always move back */
2433 	if (mddev->minor_version == 0)
2434 		return 1;
2435 
2436 	/* otherwise we must be sure not to step on
2437 	 * any metadata, so stay:
2438 	 * 36K beyond start of superblock
2439 	 * beyond end of badblocks
2440 	 * beyond write-intent bitmap
2441 	 */
2442 	if (rdev->sb_start + (32+4)*2 > new_offset)
2443 		return 0;
2444 
2445 	if (md_bitmap_registered(mddev) && !mddev->bitmap_info.file) {
2446 		struct md_bitmap_stats stats;
2447 		int err;
2448 
2449 		err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
2450 		if (!err && rdev->sb_start + mddev->bitmap_info.offset +
2451 		    stats.file_pages * (PAGE_SIZE >> 9) > new_offset)
2452 			return 0;
2453 	}
2454 
2455 	if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2456 		return 0;
2457 
2458 	return 1;
2459 }
2460 
2461 static struct super_type super_types[] = {
2462 	[0] = {
2463 		.name	= "0.90.0",
2464 		.owner	= THIS_MODULE,
2465 		.load_super	    = super_90_load,
2466 		.validate_super	    = super_90_validate,
2467 		.sync_super	    = super_90_sync,
2468 		.rdev_size_change   = super_90_rdev_size_change,
2469 		.allow_new_offset   = super_90_allow_new_offset,
2470 	},
2471 	[1] = {
2472 		.name	= "md-1",
2473 		.owner	= THIS_MODULE,
2474 		.load_super	    = super_1_load,
2475 		.validate_super	    = super_1_validate,
2476 		.sync_super	    = super_1_sync,
2477 		.rdev_size_change   = super_1_rdev_size_change,
2478 		.allow_new_offset   = super_1_allow_new_offset,
2479 	},
2480 };
2481 
2482 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2483 {
2484 	if (mddev->sync_super) {
2485 		mddev->sync_super(mddev, rdev);
2486 		return;
2487 	}
2488 
2489 	BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2490 
2491 	super_types[mddev->major_version].sync_super(mddev, rdev);
2492 }
2493 
2494 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
2495 {
2496 	struct md_rdev *rdev, *rdev2;
2497 
2498 	rcu_read_lock();
2499 	rdev_for_each_rcu(rdev, mddev1) {
2500 		if (test_bit(Faulty, &rdev->flags) ||
2501 		    test_bit(Journal, &rdev->flags) ||
2502 		    rdev->raid_disk == -1)
2503 			continue;
2504 		rdev_for_each_rcu(rdev2, mddev2) {
2505 			if (test_bit(Faulty, &rdev2->flags) ||
2506 			    test_bit(Journal, &rdev2->flags) ||
2507 			    rdev2->raid_disk == -1)
2508 				continue;
2509 			if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
2510 				rcu_read_unlock();
2511 				return 1;
2512 			}
2513 		}
2514 	}
2515 	rcu_read_unlock();
2516 	return 0;
2517 }
2518 
2519 static LIST_HEAD(pending_raid_disks);
2520 
2521 /*
2522  * Try to register data integrity profile for an mddev
2523  *
2524  * This is called when an array is started and after a disk has been kicked
2525  * from the array. It only succeeds if all working and active component devices
2526  * are integrity capable with matching profiles.
2527  */
2528 int md_integrity_register(struct mddev *mddev)
2529 {
2530 	if (list_empty(&mddev->disks))
2531 		return 0; /* nothing to do */
2532 	if (mddev_is_dm(mddev) || !blk_get_integrity(mddev->gendisk))
2533 		return 0; /* shouldn't register */
2534 
2535 	pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2536 	return 0;
2537 }
2538 EXPORT_SYMBOL(md_integrity_register);
2539 
2540 static bool rdev_read_only(struct md_rdev *rdev)
2541 {
2542 	return bdev_read_only(rdev->bdev) ||
2543 		(rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
2544 }
2545 
2546 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2547 {
2548 	char b[BDEVNAME_SIZE];
2549 	int err;
2550 
2551 	/* prevent duplicates */
2552 	if (find_rdev(mddev, rdev->bdev->bd_dev))
2553 		return -EEXIST;
2554 
2555 	if (rdev_read_only(rdev) && mddev->pers)
2556 		return -EROFS;
2557 
2558 	/* make sure rdev->sectors exceeds mddev->dev_sectors */
2559 	if (!test_bit(Journal, &rdev->flags) &&
2560 	    rdev->sectors &&
2561 	    (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2562 		if (mddev->pers) {
2563 			/* Cannot change size, so fail
2564 			 * If mddev->level <= 0, then we don't care
2565 			 * about aligning sizes (e.g. linear)
2566 			 */
2567 			if (mddev->level > 0)
2568 				return -ENOSPC;
2569 		} else
2570 			mddev->dev_sectors = rdev->sectors;
2571 	}
2572 
2573 	/* Verify rdev->desc_nr is unique.
2574 	 * If it is -1, assign a free number, else
2575 	 * check number is not in use
2576 	 */
2577 	rcu_read_lock();
2578 	if (rdev->desc_nr < 0) {
2579 		int choice = 0;
2580 		if (mddev->pers)
2581 			choice = mddev->raid_disks;
2582 		while (md_find_rdev_nr_rcu(mddev, choice))
2583 			choice++;
2584 		rdev->desc_nr = choice;
2585 	} else {
2586 		if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2587 			rcu_read_unlock();
2588 			return -EBUSY;
2589 		}
2590 	}
2591 	rcu_read_unlock();
2592 	if (!test_bit(Journal, &rdev->flags) &&
2593 	    mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2594 		pr_warn("md: %s: array is limited to %d devices\n",
2595 			mdname(mddev), mddev->max_disks);
2596 		return -EBUSY;
2597 	}
2598 	snprintf(b, sizeof(b), "%pg", rdev->bdev);
2599 	strreplace(b, '/', '!');
2600 
2601 	rdev->mddev = mddev;
2602 	pr_debug("md: bind<%s>\n", b);
2603 
2604 	if (mddev->raid_disks)
2605 		mddev_create_serial_pool(mddev, rdev);
2606 
2607 	if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2608 		goto fail;
2609 
2610 	/* failure here is OK */
2611 	err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
2612 	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2613 	rdev->sysfs_unack_badblocks =
2614 		sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2615 	rdev->sysfs_badblocks =
2616 		sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
2617 
2618 	list_add_rcu(&rdev->same_set, &mddev->disks);
2619 	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2620 
2621 	return 0;
2622 
2623  fail:
2624 	pr_warn("md: failed to register dev-%s for %s\n",
2625 		b, mdname(mddev));
2626 	mddev_destroy_serial_pool(mddev, rdev);
2627 	return err;
2628 }
2629 
2630 void md_autodetect_dev(dev_t dev);
2631 
2632 /* just for claiming the bdev */
2633 static struct md_rdev claim_rdev;
2634 
2635 static void export_rdev(struct md_rdev *rdev)
2636 {
2637 	pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
2638 	md_rdev_clear(rdev);
2639 #ifndef MODULE
2640 	if (test_bit(AutoDetected, &rdev->flags))
2641 		md_autodetect_dev(rdev->bdev->bd_dev);
2642 #endif
2643 	fput(rdev->bdev_file);
2644 	rdev->bdev = NULL;
2645 	kobject_put(&rdev->kobj);
2646 }
2647 
2648 static void md_kick_rdev_from_array(struct md_rdev *rdev)
2649 {
2650 	struct mddev *mddev = rdev->mddev;
2651 
2652 	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2653 	list_del_rcu(&rdev->same_set);
2654 	pr_debug("md: unbind<%pg>\n", rdev->bdev);
2655 	mddev_destroy_serial_pool(rdev->mddev, rdev);
2656 	WRITE_ONCE(rdev->mddev, NULL);
2657 	sysfs_remove_link(&rdev->kobj, "block");
2658 	sysfs_put(rdev->sysfs_state);
2659 	sysfs_put(rdev->sysfs_unack_badblocks);
2660 	sysfs_put(rdev->sysfs_badblocks);
2661 	rdev->sysfs_state = NULL;
2662 	rdev->sysfs_unack_badblocks = NULL;
2663 	rdev->sysfs_badblocks = NULL;
2664 	rdev->badblocks.count = 0;
2665 
2666 	synchronize_rcu();
2667 
2668 	/*
2669 	 * kobject_del() will wait for all in progress writers to be done, where
2670 	 * reconfig_mutex is held, hence it can't be called under
2671 	 * reconfig_mutex and it's delayed to mddev_unlock().
2672 	 */
2673 	list_add(&rdev->same_set, &mddev->deleting);
2674 }
2675 
2676 static void export_array(struct mddev *mddev)
2677 {
2678 	struct md_rdev *rdev;
2679 
2680 	while (!list_empty(&mddev->disks)) {
2681 		rdev = list_first_entry(&mddev->disks, struct md_rdev,
2682 					same_set);
2683 		md_kick_rdev_from_array(rdev);
2684 	}
2685 	mddev->raid_disks = 0;
2686 	mddev->major_version = 0;
2687 }
2688 
2689 static bool set_in_sync(struct mddev *mddev)
2690 {
2691 	lockdep_assert_held(&mddev->lock);
2692 	if (!mddev->in_sync) {
2693 		mddev->sync_checkers++;
2694 		spin_unlock(&mddev->lock);
2695 		percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2696 		spin_lock(&mddev->lock);
2697 		if (!mddev->in_sync &&
2698 		    percpu_ref_is_zero(&mddev->writes_pending)) {
2699 			mddev->in_sync = 1;
2700 			/*
2701 			 * Ensure ->in_sync is visible before we clear
2702 			 * ->sync_checkers.
2703 			 */
2704 			smp_mb();
2705 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2706 			sysfs_notify_dirent_safe(mddev->sysfs_state);
2707 		}
2708 		if (--mddev->sync_checkers == 0)
2709 			percpu_ref_switch_to_percpu(&mddev->writes_pending);
2710 	}
2711 	if (mddev->safemode == 1)
2712 		mddev->safemode = 0;
2713 	return mddev->in_sync;
2714 }
2715 
2716 static void sync_sbs(struct mddev *mddev, int nospares)
2717 {
2718 	/* Update each superblock (in-memory image), but
2719 	 * if we are allowed to, skip spares which already
2720 	 * have the right event counter, or have one earlier
2721 	 * (which would mean they aren't being marked as dirty
2722 	 * with the rest of the array)
2723 	 */
2724 	struct md_rdev *rdev;
2725 	rdev_for_each(rdev, mddev) {
2726 		if (rdev->sb_events == mddev->events ||
2727 		    (nospares &&
2728 		     rdev->raid_disk < 0 &&
2729 		     rdev->sb_events+1 == mddev->events)) {
2730 			/* Don't update this superblock */
2731 			rdev->sb_loaded = 2;
2732 		} else {
2733 			sync_super(mddev, rdev);
2734 			rdev->sb_loaded = 1;
2735 		}
2736 	}
2737 }
2738 
2739 static bool does_sb_need_changing(struct mddev *mddev)
2740 {
2741 	struct md_rdev *rdev = NULL, *iter;
2742 	struct mdp_superblock_1 *sb;
2743 	int role;
2744 
2745 	/* Find a good rdev */
2746 	rdev_for_each(iter, mddev)
2747 		if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
2748 			rdev = iter;
2749 			break;
2750 		}
2751 
2752 	/* No good device found. */
2753 	if (!rdev)
2754 		return false;
2755 
2756 	sb = page_address(rdev->sb_page);
2757 	/* Check if a device has become faulty or a spare become active */
2758 	rdev_for_each(rdev, mddev) {
2759 		role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2760 		/* Device activated? */
2761 		if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 &&
2762 		    !test_bit(Faulty, &rdev->flags))
2763 			return true;
2764 		/* Device turned faulty? */
2765 		if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX))
2766 			return true;
2767 	}
2768 
2769 	/* Check if any mddev parameters have changed */
2770 	if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2771 	    (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2772 	    (mddev->layout != le32_to_cpu(sb->layout)) ||
2773 	    (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2774 	    (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2775 		return true;
2776 
2777 	return false;
2778 }
2779 
2780 void md_update_sb(struct mddev *mddev, int force_change)
2781 {
2782 	struct md_rdev *rdev;
2783 	int sync_req;
2784 	int nospares = 0;
2785 	int any_badblocks_changed = 0;
2786 	int ret = -1;
2787 
2788 	if (!md_is_rdwr(mddev)) {
2789 		if (force_change)
2790 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2791 		if (!mddev_is_dm(mddev))
2792 			pr_err_ratelimited("%s: can't update sb for read-only array %s\n",
2793 					   __func__, mdname(mddev));
2794 		return;
2795 	}
2796 
2797 repeat:
2798 	if (mddev_is_clustered(mddev)) {
2799 		if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2800 			force_change = 1;
2801 		if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2802 			nospares = 1;
2803 		ret = mddev->cluster_ops->metadata_update_start(mddev);
2804 		/* Has someone else has updated the sb */
2805 		if (!does_sb_need_changing(mddev)) {
2806 			if (ret == 0)
2807 				mddev->cluster_ops->metadata_update_cancel(mddev);
2808 			bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2809 							 BIT(MD_SB_CHANGE_DEVS) |
2810 							 BIT(MD_SB_CHANGE_CLEAN));
2811 			return;
2812 		}
2813 	}
2814 
2815 	/*
2816 	 * First make sure individual recovery_offsets are correct
2817 	 * curr_resync_completed can only be used during recovery.
2818 	 * During reshape/resync it might use array-addresses rather
2819 	 * that device addresses.
2820 	 */
2821 	rdev_for_each(rdev, mddev) {
2822 		if (rdev->raid_disk >= 0 &&
2823 		    mddev->delta_disks >= 0 &&
2824 		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2825 		    test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2826 		    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2827 		    !test_bit(Journal, &rdev->flags) &&
2828 		    !test_bit(In_sync, &rdev->flags) &&
2829 		    mddev->curr_resync_completed > rdev->recovery_offset)
2830 				rdev->recovery_offset = mddev->curr_resync_completed;
2831 
2832 	}
2833 	if (!mddev->persistent) {
2834 		clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2835 		clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2836 		if (!mddev->external) {
2837 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2838 			rdev_for_each(rdev, mddev) {
2839 				if (rdev->badblocks.changed) {
2840 					rdev->badblocks.changed = 0;
2841 					ack_all_badblocks(&rdev->badblocks);
2842 					md_error(mddev, rdev);
2843 				}
2844 				clear_bit(Blocked, &rdev->flags);
2845 				clear_bit(BlockedBadBlocks, &rdev->flags);
2846 				wake_up(&rdev->blocked_wait);
2847 			}
2848 		}
2849 		wake_up(&mddev->sb_wait);
2850 		return;
2851 	}
2852 
2853 	spin_lock(&mddev->lock);
2854 
2855 	mddev->utime = ktime_get_real_seconds();
2856 
2857 	if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2858 		force_change = 1;
2859 	if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2860 		/* just a clean<-> dirty transition, possibly leave spares alone,
2861 		 * though if events isn't the right even/odd, we will have to do
2862 		 * spares after all
2863 		 */
2864 		nospares = 1;
2865 	if (force_change)
2866 		nospares = 0;
2867 	if (mddev->degraded)
2868 		/* If the array is degraded, then skipping spares is both
2869 		 * dangerous and fairly pointless.
2870 		 * Dangerous because a device that was removed from the array
2871 		 * might have a event_count that still looks up-to-date,
2872 		 * so it can be re-added without a resync.
2873 		 * Pointless because if there are any spares to skip,
2874 		 * then a recovery will happen and soon that array won't
2875 		 * be degraded any more and the spare can go back to sleep then.
2876 		 */
2877 		nospares = 0;
2878 
2879 	sync_req = mddev->in_sync;
2880 
2881 	/* If this is just a dirty<->clean transition, and the array is clean
2882 	 * and 'events' is odd, we can roll back to the previous clean state */
2883 	if (nospares
2884 	    && (mddev->in_sync && mddev->resync_offset == MaxSector)
2885 	    && mddev->can_decrease_events
2886 	    && mddev->events != 1) {
2887 		mddev->events--;
2888 		mddev->can_decrease_events = 0;
2889 	} else {
2890 		/* otherwise we have to go forward and ... */
2891 		mddev->events ++;
2892 		mddev->can_decrease_events = nospares;
2893 	}
2894 
2895 	/*
2896 	 * This 64-bit counter should never wrap.
2897 	 * Either we are in around ~1 trillion A.C., assuming
2898 	 * 1 reboot per second, or we have a bug...
2899 	 */
2900 	WARN_ON(mddev->events == 0);
2901 
2902 	rdev_for_each(rdev, mddev) {
2903 		if (rdev->badblocks.changed)
2904 			any_badblocks_changed++;
2905 		if (test_bit(Faulty, &rdev->flags))
2906 			set_bit(FaultRecorded, &rdev->flags);
2907 	}
2908 
2909 	sync_sbs(mddev, nospares);
2910 	spin_unlock(&mddev->lock);
2911 
2912 	pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2913 		 mdname(mddev), mddev->in_sync);
2914 
2915 	mddev_add_trace_msg(mddev, "md md_update_sb");
2916 rewrite:
2917 	if (md_bitmap_enabled(mddev, false))
2918 		mddev->bitmap_ops->update_sb(mddev->bitmap);
2919 	rdev_for_each(rdev, mddev) {
2920 		if (rdev->sb_loaded != 1)
2921 			continue; /* no noise on spare devices */
2922 
2923 		if (!test_bit(Faulty, &rdev->flags)) {
2924 			md_write_metadata(mddev, rdev, rdev->sb_start,
2925 					  rdev->sb_size, rdev->sb_page, 0);
2926 			pr_debug("md: (write) %pg's sb offset: %llu\n",
2927 				 rdev->bdev,
2928 				 (unsigned long long)rdev->sb_start);
2929 			rdev->sb_events = mddev->events;
2930 			if (rdev->badblocks.size) {
2931 				md_write_metadata(mddev, rdev,
2932 						  rdev->badblocks.sector,
2933 						  rdev->badblocks.size << 9,
2934 						  rdev->bb_page, 0);
2935 				rdev->badblocks.size = 0;
2936 			}
2937 
2938 		} else
2939 			pr_debug("md: %pg (skipping faulty)\n",
2940 				 rdev->bdev);
2941 	}
2942 	if (md_super_wait(mddev) < 0)
2943 		goto rewrite;
2944 	/* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2945 
2946 	if (mddev_is_clustered(mddev) && ret == 0)
2947 		mddev->cluster_ops->metadata_update_finish(mddev);
2948 
2949 	if (mddev->in_sync != sync_req ||
2950 	    !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2951 			       BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2952 		/* have to write it out again */
2953 		goto repeat;
2954 	wake_up(&mddev->sb_wait);
2955 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2956 		sysfs_notify_dirent_safe(mddev->sysfs_completed);
2957 
2958 	rdev_for_each(rdev, mddev) {
2959 		if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2960 			clear_bit(Blocked, &rdev->flags);
2961 
2962 		if (any_badblocks_changed)
2963 			ack_all_badblocks(&rdev->badblocks);
2964 		clear_bit(BlockedBadBlocks, &rdev->flags);
2965 		wake_up(&rdev->blocked_wait);
2966 	}
2967 }
2968 EXPORT_SYMBOL(md_update_sb);
2969 
2970 static int add_bound_rdev(struct md_rdev *rdev)
2971 {
2972 	struct mddev *mddev = rdev->mddev;
2973 	int err = 0;
2974 	bool add_journal = test_bit(Journal, &rdev->flags);
2975 
2976 	if (!mddev->pers->hot_remove_disk || add_journal) {
2977 		/* If there is hot_add_disk but no hot_remove_disk
2978 		 * then added disks for geometry changes,
2979 		 * and should be added immediately.
2980 		 */
2981 		super_types[mddev->major_version].
2982 			validate_super(mddev, NULL/*freshest*/, rdev);
2983 		err = mddev->pers->hot_add_disk(mddev, rdev);
2984 		if (err) {
2985 			md_kick_rdev_from_array(rdev);
2986 			return err;
2987 		}
2988 	}
2989 	sysfs_notify_dirent_safe(rdev->sysfs_state);
2990 
2991 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2992 	if (mddev->degraded)
2993 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2994 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2995 	md_new_event();
2996 	return 0;
2997 }
2998 
2999 /* words written to sysfs files may, or may not, be \n terminated.
3000  * We want to accept with case. For this we use cmd_match.
3001  */
3002 static int cmd_match(const char *cmd, const char *str)
3003 {
3004 	/* See if cmd, written into a sysfs file, matches
3005 	 * str.  They must either be the same, or cmd can
3006 	 * have a trailing newline
3007 	 */
3008 	while (*cmd && *str && *cmd == *str) {
3009 		cmd++;
3010 		str++;
3011 	}
3012 	if (*cmd == '\n')
3013 		cmd++;
3014 	if (*str || *cmd)
3015 		return 0;
3016 	return 1;
3017 }
3018 
3019 struct rdev_sysfs_entry {
3020 	struct attribute attr;
3021 	ssize_t (*show)(struct md_rdev *, char *);
3022 	ssize_t (*store)(struct md_rdev *, const char *, size_t);
3023 };
3024 
3025 static ssize_t
3026 state_show(struct md_rdev *rdev, char *page)
3027 {
3028 	char *sep = ",";
3029 	size_t len = 0;
3030 	unsigned long flags = READ_ONCE(rdev->flags);
3031 
3032 	if (test_bit(Faulty, &flags) ||
3033 	    (!test_bit(ExternalBbl, &flags) &&
3034 	    rdev->badblocks.unacked_exist))
3035 		len += sprintf(page+len, "faulty%s", sep);
3036 	if (test_bit(In_sync, &flags))
3037 		len += sprintf(page+len, "in_sync%s", sep);
3038 	if (test_bit(Journal, &flags))
3039 		len += sprintf(page+len, "journal%s", sep);
3040 	if (test_bit(WriteMostly, &flags))
3041 		len += sprintf(page+len, "write_mostly%s", sep);
3042 	if (test_bit(Blocked, &flags) ||
3043 	    (rdev->badblocks.unacked_exist
3044 	     && !test_bit(Faulty, &flags)))
3045 		len += sprintf(page+len, "blocked%s", sep);
3046 	if (!test_bit(Faulty, &flags) &&
3047 	    !test_bit(Journal, &flags) &&
3048 	    !test_bit(In_sync, &flags))
3049 		len += sprintf(page+len, "spare%s", sep);
3050 	if (test_bit(WriteErrorSeen, &flags))
3051 		len += sprintf(page+len, "write_error%s", sep);
3052 	if (test_bit(WantReplacement, &flags))
3053 		len += sprintf(page+len, "want_replacement%s", sep);
3054 	if (test_bit(Replacement, &flags))
3055 		len += sprintf(page+len, "replacement%s", sep);
3056 	if (test_bit(ExternalBbl, &flags))
3057 		len += sprintf(page+len, "external_bbl%s", sep);
3058 	if (test_bit(FailFast, &flags))
3059 		len += sprintf(page+len, "failfast%s", sep);
3060 
3061 	if (len)
3062 		len -= strlen(sep);
3063 
3064 	return len+sprintf(page+len, "\n");
3065 }
3066 
3067 static ssize_t
3068 state_store(struct md_rdev *rdev, const char *buf, size_t len)
3069 {
3070 	/* can write
3071 	 *  faulty  - simulates an error
3072 	 *  remove  - disconnects the device
3073 	 *  writemostly - sets write_mostly
3074 	 *  -writemostly - clears write_mostly
3075 	 *  blocked - sets the Blocked flags
3076 	 *  -blocked - clears the Blocked and possibly simulates an error
3077 	 *  insync - sets Insync providing device isn't active
3078 	 *  -insync - clear Insync for a device with a slot assigned,
3079 	 *            so that it gets rebuilt based on bitmap
3080 	 *  write_error - sets WriteErrorSeen
3081 	 *  -write_error - clears WriteErrorSeen
3082 	 *  {,-}failfast - set/clear FailFast
3083 	 */
3084 
3085 	struct mddev *mddev = rdev->mddev;
3086 	int err = -EINVAL;
3087 	bool need_update_sb = false;
3088 
3089 	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
3090 		md_error(rdev->mddev, rdev);
3091 
3092 		if (test_bit(MD_BROKEN, &rdev->mddev->flags))
3093 			err = -EBUSY;
3094 		else
3095 			err = 0;
3096 	} else if (cmd_match(buf, "remove")) {
3097 		if (rdev->mddev->pers) {
3098 			clear_bit(Blocked, &rdev->flags);
3099 			remove_and_add_spares(rdev->mddev, rdev);
3100 		}
3101 		if (rdev->raid_disk >= 0)
3102 			err = -EBUSY;
3103 		else {
3104 			err = 0;
3105 			if (mddev_is_clustered(mddev))
3106 				err = mddev->cluster_ops->remove_disk(mddev, rdev);
3107 
3108 			if (err == 0) {
3109 				md_kick_rdev_from_array(rdev);
3110 				if (mddev->pers)
3111 					set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3112 				md_new_event();
3113 			}
3114 		}
3115 	} else if (cmd_match(buf, "writemostly")) {
3116 		set_bit(WriteMostly, &rdev->flags);
3117 		mddev_create_serial_pool(rdev->mddev, rdev);
3118 		need_update_sb = true;
3119 		err = 0;
3120 	} else if (cmd_match(buf, "-writemostly")) {
3121 		mddev_destroy_serial_pool(rdev->mddev, rdev);
3122 		clear_bit(WriteMostly, &rdev->flags);
3123 		need_update_sb = true;
3124 		err = 0;
3125 	} else if (cmd_match(buf, "blocked")) {
3126 		set_bit(Blocked, &rdev->flags);
3127 		err = 0;
3128 	} else if (cmd_match(buf, "-blocked")) {
3129 		if (!test_bit(Faulty, &rdev->flags) &&
3130 		    !test_bit(ExternalBbl, &rdev->flags) &&
3131 		    rdev->badblocks.unacked_exist) {
3132 			/* metadata handler doesn't understand badblocks,
3133 			 * so we need to fail the device
3134 			 */
3135 			md_error(rdev->mddev, rdev);
3136 		}
3137 		clear_bit(Blocked, &rdev->flags);
3138 		clear_bit(BlockedBadBlocks, &rdev->flags);
3139 		wake_up(&rdev->blocked_wait);
3140 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3141 
3142 		err = 0;
3143 	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3144 		set_bit(In_sync, &rdev->flags);
3145 		err = 0;
3146 	} else if (cmd_match(buf, "failfast")) {
3147 		set_bit(FailFast, &rdev->flags);
3148 		need_update_sb = true;
3149 		err = 0;
3150 	} else if (cmd_match(buf, "-failfast")) {
3151 		clear_bit(FailFast, &rdev->flags);
3152 		need_update_sb = true;
3153 		err = 0;
3154 	} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3155 		   !test_bit(Journal, &rdev->flags)) {
3156 		if (rdev->mddev->pers == NULL) {
3157 			clear_bit(In_sync, &rdev->flags);
3158 			rdev->saved_raid_disk = rdev->raid_disk;
3159 			rdev->raid_disk = -1;
3160 			err = 0;
3161 		}
3162 	} else if (cmd_match(buf, "write_error")) {
3163 		set_bit(WriteErrorSeen, &rdev->flags);
3164 		err = 0;
3165 	} else if (cmd_match(buf, "-write_error")) {
3166 		clear_bit(WriteErrorSeen, &rdev->flags);
3167 		err = 0;
3168 	} else if (cmd_match(buf, "want_replacement")) {
3169 		/* Any non-spare device that is not a replacement can
3170 		 * become want_replacement at any time, but we then need to
3171 		 * check if recovery is needed.
3172 		 */
3173 		if (rdev->raid_disk >= 0 &&
3174 		    !test_bit(Journal, &rdev->flags) &&
3175 		    !test_bit(Replacement, &rdev->flags))
3176 			set_bit(WantReplacement, &rdev->flags);
3177 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3178 		err = 0;
3179 	} else if (cmd_match(buf, "-want_replacement")) {
3180 		/* Clearing 'want_replacement' is always allowed.
3181 		 * Once replacements starts it is too late though.
3182 		 */
3183 		err = 0;
3184 		clear_bit(WantReplacement, &rdev->flags);
3185 	} else if (cmd_match(buf, "replacement")) {
3186 		/* Can only set a device as a replacement when array has not
3187 		 * yet been started.  Once running, replacement is automatic
3188 		 * from spares, or by assigning 'slot'.
3189 		 */
3190 		if (rdev->mddev->pers)
3191 			err = -EBUSY;
3192 		else {
3193 			set_bit(Replacement, &rdev->flags);
3194 			err = 0;
3195 		}
3196 	} else if (cmd_match(buf, "-replacement")) {
3197 		/* Similarly, can only clear Replacement before start */
3198 		if (rdev->mddev->pers)
3199 			err = -EBUSY;
3200 		else {
3201 			clear_bit(Replacement, &rdev->flags);
3202 			err = 0;
3203 		}
3204 	} else if (cmd_match(buf, "re-add")) {
3205 		if (!rdev->mddev->pers)
3206 			err = -EINVAL;
3207 		else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3208 				rdev->saved_raid_disk >= 0) {
3209 			/* clear_bit is performed _after_ all the devices
3210 			 * have their local Faulty bit cleared. If any writes
3211 			 * happen in the meantime in the local node, they
3212 			 * will land in the local bitmap, which will be synced
3213 			 * by this node eventually
3214 			 */
3215 			if (!mddev_is_clustered(rdev->mddev) ||
3216 			    (err = mddev->cluster_ops->gather_bitmaps(rdev)) == 0) {
3217 				clear_bit(Faulty, &rdev->flags);
3218 				err = add_bound_rdev(rdev);
3219 			}
3220 		} else
3221 			err = -EBUSY;
3222 	} else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3223 		set_bit(ExternalBbl, &rdev->flags);
3224 		rdev->badblocks.shift = 0;
3225 		err = 0;
3226 	} else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3227 		clear_bit(ExternalBbl, &rdev->flags);
3228 		err = 0;
3229 	}
3230 	if (need_update_sb)
3231 		md_update_sb(mddev, 1);
3232 	if (!err)
3233 		sysfs_notify_dirent_safe(rdev->sysfs_state);
3234 	return err ? err : len;
3235 }
3236 static struct rdev_sysfs_entry rdev_state =
3237 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
3238 
3239 static ssize_t
3240 errors_show(struct md_rdev *rdev, char *page)
3241 {
3242 	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3243 }
3244 
3245 static ssize_t
3246 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3247 {
3248 	unsigned int n;
3249 	int rv;
3250 
3251 	rv = kstrtouint(buf, 10, &n);
3252 	if (rv < 0)
3253 		return rv;
3254 	atomic_set(&rdev->corrected_errors, n);
3255 	return len;
3256 }
3257 static struct rdev_sysfs_entry rdev_errors =
3258 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
3259 
3260 static ssize_t
3261 slot_show(struct md_rdev *rdev, char *page)
3262 {
3263 	if (test_bit(Journal, &rdev->flags))
3264 		return sprintf(page, "journal\n");
3265 	else if (rdev->raid_disk < 0)
3266 		return sprintf(page, "none\n");
3267 	else
3268 		return sprintf(page, "%d\n", rdev->raid_disk);
3269 }
3270 
3271 static ssize_t
3272 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3273 {
3274 	int slot;
3275 	int err;
3276 
3277 	if (test_bit(Journal, &rdev->flags))
3278 		return -EBUSY;
3279 	if (strncmp(buf, "none", 4)==0)
3280 		slot = -1;
3281 	else {
3282 		err = kstrtouint(buf, 10, (unsigned int *)&slot);
3283 		if (err < 0)
3284 			return err;
3285 		if (slot < 0)
3286 			/* overflow */
3287 			return -ENOSPC;
3288 	}
3289 	if (rdev->mddev->pers && slot == -1) {
3290 		/* Setting 'slot' on an active array requires also
3291 		 * updating the 'rd%d' link, and communicating
3292 		 * with the personality with ->hot_*_disk.
3293 		 * For now we only support removing
3294 		 * failed/spare devices.  This normally happens automatically,
3295 		 * but not when the metadata is externally managed.
3296 		 */
3297 		if (rdev->raid_disk == -1)
3298 			return -EEXIST;
3299 		/* personality does all needed checks */
3300 		if (rdev->mddev->pers->hot_remove_disk == NULL)
3301 			return -EINVAL;
3302 		clear_bit(Blocked, &rdev->flags);
3303 		remove_and_add_spares(rdev->mddev, rdev);
3304 		if (rdev->raid_disk >= 0)
3305 			return -EBUSY;
3306 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3307 	} else if (rdev->mddev->pers) {
3308 		/* Activating a spare .. or possibly reactivating
3309 		 * if we ever get bitmaps working here.
3310 		 */
3311 		int err;
3312 
3313 		if (rdev->raid_disk != -1)
3314 			return -EBUSY;
3315 
3316 		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3317 			return -EBUSY;
3318 
3319 		if (rdev->mddev->pers->hot_add_disk == NULL)
3320 			return -EINVAL;
3321 
3322 		if (slot >= rdev->mddev->raid_disks &&
3323 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3324 			return -ENOSPC;
3325 
3326 		rdev->raid_disk = slot;
3327 		if (test_bit(In_sync, &rdev->flags))
3328 			rdev->saved_raid_disk = slot;
3329 		else
3330 			rdev->saved_raid_disk = -1;
3331 		clear_bit(In_sync, &rdev->flags);
3332 		clear_bit(Bitmap_sync, &rdev->flags);
3333 		err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
3334 		if (err) {
3335 			rdev->raid_disk = -1;
3336 			return err;
3337 		} else
3338 			sysfs_notify_dirent_safe(rdev->sysfs_state);
3339 		/* failure here is OK */;
3340 		sysfs_link_rdev(rdev->mddev, rdev);
3341 		/* don't wakeup anyone, leave that to userspace. */
3342 	} else {
3343 		if (slot >= rdev->mddev->raid_disks &&
3344 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3345 			return -ENOSPC;
3346 		rdev->raid_disk = slot;
3347 		/* assume it is working */
3348 		clear_bit(Faulty, &rdev->flags);
3349 		clear_bit(WriteMostly, &rdev->flags);
3350 		set_bit(In_sync, &rdev->flags);
3351 		sysfs_notify_dirent_safe(rdev->sysfs_state);
3352 	}
3353 	return len;
3354 }
3355 
3356 static struct rdev_sysfs_entry rdev_slot =
3357 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
3358 
3359 static ssize_t
3360 offset_show(struct md_rdev *rdev, char *page)
3361 {
3362 	return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3363 }
3364 
3365 static ssize_t
3366 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3367 {
3368 	unsigned long long offset;
3369 	if (kstrtoull(buf, 10, &offset) < 0)
3370 		return -EINVAL;
3371 	if (rdev->mddev->pers && rdev->raid_disk >= 0)
3372 		return -EBUSY;
3373 	if (rdev->sectors && rdev->mddev->external)
3374 		/* Must set offset before size, so overlap checks
3375 		 * can be sane */
3376 		return -EBUSY;
3377 	rdev->data_offset = offset;
3378 	rdev->new_data_offset = offset;
3379 	return len;
3380 }
3381 
3382 static struct rdev_sysfs_entry rdev_offset =
3383 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
3384 
3385 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3386 {
3387 	return sprintf(page, "%llu\n",
3388 		       (unsigned long long)rdev->new_data_offset);
3389 }
3390 
3391 static ssize_t new_offset_store(struct md_rdev *rdev,
3392 				const char *buf, size_t len)
3393 {
3394 	unsigned long long new_offset;
3395 	struct mddev *mddev = rdev->mddev;
3396 
3397 	if (kstrtoull(buf, 10, &new_offset) < 0)
3398 		return -EINVAL;
3399 
3400 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3401 		return -EBUSY;
3402 	if (new_offset == rdev->data_offset)
3403 		/* reset is always permitted */
3404 		;
3405 	else if (new_offset > rdev->data_offset) {
3406 		/* must not push array size beyond rdev_sectors */
3407 		if (new_offset - rdev->data_offset
3408 		    + mddev->dev_sectors > rdev->sectors)
3409 				return -E2BIG;
3410 	}
3411 	/* Metadata worries about other space details. */
3412 
3413 	/* decreasing the offset is inconsistent with a backwards
3414 	 * reshape.
3415 	 */
3416 	if (new_offset < rdev->data_offset &&
3417 	    mddev->reshape_backwards)
3418 		return -EINVAL;
3419 	/* Increasing offset is inconsistent with forwards
3420 	 * reshape.  reshape_direction should be set to
3421 	 * 'backwards' first.
3422 	 */
3423 	if (new_offset > rdev->data_offset &&
3424 	    !mddev->reshape_backwards)
3425 		return -EINVAL;
3426 
3427 	if (mddev->pers && mddev->persistent &&
3428 	    !super_types[mddev->major_version]
3429 	    .allow_new_offset(rdev, new_offset))
3430 		return -E2BIG;
3431 	rdev->new_data_offset = new_offset;
3432 	if (new_offset > rdev->data_offset)
3433 		mddev->reshape_backwards = 1;
3434 	else if (new_offset < rdev->data_offset)
3435 		mddev->reshape_backwards = 0;
3436 
3437 	return len;
3438 }
3439 static struct rdev_sysfs_entry rdev_new_offset =
3440 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3441 
3442 static ssize_t
3443 rdev_size_show(struct md_rdev *rdev, char *page)
3444 {
3445 	return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3446 }
3447 
3448 static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b)
3449 {
3450 	/* check if two start/length pairs overlap */
3451 	if (a->data_offset + a->sectors <= b->data_offset)
3452 		return false;
3453 	if (b->data_offset + b->sectors <= a->data_offset)
3454 		return false;
3455 	return true;
3456 }
3457 
3458 static bool md_rdev_overlaps(struct md_rdev *rdev)
3459 {
3460 	struct mddev *mddev;
3461 	struct md_rdev *rdev2;
3462 
3463 	spin_lock(&all_mddevs_lock);
3464 	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
3465 		if (test_bit(MD_DELETED, &mddev->flags))
3466 			continue;
3467 		rdev_for_each(rdev2, mddev) {
3468 			if (rdev != rdev2 && rdev->bdev == rdev2->bdev &&
3469 			    md_rdevs_overlap(rdev, rdev2)) {
3470 				spin_unlock(&all_mddevs_lock);
3471 				return true;
3472 			}
3473 		}
3474 	}
3475 	spin_unlock(&all_mddevs_lock);
3476 	return false;
3477 }
3478 
3479 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3480 {
3481 	unsigned long long blocks;
3482 	sector_t new;
3483 
3484 	if (kstrtoull(buf, 10, &blocks) < 0)
3485 		return -EINVAL;
3486 
3487 	if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3488 		return -EINVAL; /* sector conversion overflow */
3489 
3490 	new = blocks * 2;
3491 	if (new != blocks * 2)
3492 		return -EINVAL; /* unsigned long long to sector_t overflow */
3493 
3494 	*sectors = new;
3495 	return 0;
3496 }
3497 
3498 static ssize_t
3499 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3500 {
3501 	struct mddev *my_mddev = rdev->mddev;
3502 	sector_t oldsectors = rdev->sectors;
3503 	sector_t sectors;
3504 
3505 	if (test_bit(Journal, &rdev->flags))
3506 		return -EBUSY;
3507 	if (strict_blocks_to_sectors(buf, &sectors) < 0)
3508 		return -EINVAL;
3509 	if (rdev->data_offset != rdev->new_data_offset)
3510 		return -EINVAL; /* too confusing */
3511 	if (my_mddev->pers && rdev->raid_disk >= 0) {
3512 		if (my_mddev->persistent) {
3513 			sectors = super_types[my_mddev->major_version].
3514 				rdev_size_change(rdev, sectors);
3515 			if (!sectors)
3516 				return -EBUSY;
3517 		} else if (!sectors)
3518 			sectors = bdev_nr_sectors(rdev->bdev) -
3519 				rdev->data_offset;
3520 		if (!my_mddev->pers->resize)
3521 			/* Cannot change size for RAID0 or Linear etc */
3522 			return -EINVAL;
3523 	}
3524 	if (sectors < my_mddev->dev_sectors)
3525 		return -EINVAL; /* component must fit device */
3526 
3527 	rdev->sectors = sectors;
3528 
3529 	/*
3530 	 * Check that all other rdevs with the same bdev do not overlap.  This
3531 	 * check does not provide a hard guarantee, it just helps avoid
3532 	 * dangerous mistakes.
3533 	 */
3534 	if (sectors > oldsectors && my_mddev->external &&
3535 	    md_rdev_overlaps(rdev)) {
3536 		/*
3537 		 * Someone else could have slipped in a size change here, but
3538 		 * doing so is just silly.  We put oldsectors back because we
3539 		 * know it is safe, and trust userspace not to race with itself.
3540 		 */
3541 		rdev->sectors = oldsectors;
3542 		return -EBUSY;
3543 	}
3544 	return len;
3545 }
3546 
3547 static struct rdev_sysfs_entry rdev_size =
3548 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3549 
3550 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3551 {
3552 	unsigned long long recovery_start = rdev->recovery_offset;
3553 
3554 	if (test_bit(In_sync, &rdev->flags) ||
3555 	    recovery_start == MaxSector)
3556 		return sprintf(page, "none\n");
3557 
3558 	return sprintf(page, "%llu\n", recovery_start);
3559 }
3560 
3561 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3562 {
3563 	unsigned long long recovery_start;
3564 
3565 	if (cmd_match(buf, "none"))
3566 		recovery_start = MaxSector;
3567 	else if (kstrtoull(buf, 10, &recovery_start))
3568 		return -EINVAL;
3569 
3570 	if (rdev->mddev->pers &&
3571 	    rdev->raid_disk >= 0)
3572 		return -EBUSY;
3573 
3574 	rdev->recovery_offset = recovery_start;
3575 	if (recovery_start == MaxSector)
3576 		set_bit(In_sync, &rdev->flags);
3577 	else
3578 		clear_bit(In_sync, &rdev->flags);
3579 	return len;
3580 }
3581 
3582 static struct rdev_sysfs_entry rdev_recovery_start =
3583 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3584 
3585 /* sysfs access to bad-blocks list.
3586  * We present two files.
3587  * 'bad-blocks' lists sector numbers and lengths of ranges that
3588  *    are recorded as bad.  The list is truncated to fit within
3589  *    the one-page limit of sysfs.
3590  *    Writing "sector length" to this file adds an acknowledged
3591  *    bad block list.
3592  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3593  *    been acknowledged.  Writing to this file adds bad blocks
3594  *    without acknowledging them.  This is largely for testing.
3595  */
3596 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3597 {
3598 	return badblocks_show(&rdev->badblocks, page, 0);
3599 }
3600 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3601 {
3602 	int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3603 	/* Maybe that ack was all we needed */
3604 	if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3605 		wake_up(&rdev->blocked_wait);
3606 	return rv;
3607 }
3608 static struct rdev_sysfs_entry rdev_bad_blocks =
3609 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3610 
3611 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3612 {
3613 	return badblocks_show(&rdev->badblocks, page, 1);
3614 }
3615 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3616 {
3617 	return badblocks_store(&rdev->badblocks, page, len, 1);
3618 }
3619 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3620 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3621 
3622 static ssize_t
3623 ppl_sector_show(struct md_rdev *rdev, char *page)
3624 {
3625 	return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3626 }
3627 
3628 static ssize_t
3629 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3630 {
3631 	unsigned long long sector;
3632 
3633 	if (kstrtoull(buf, 10, &sector) < 0)
3634 		return -EINVAL;
3635 	if (sector != (sector_t)sector)
3636 		return -EINVAL;
3637 
3638 	if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3639 	    rdev->raid_disk >= 0)
3640 		return -EBUSY;
3641 
3642 	if (rdev->mddev->persistent) {
3643 		if (rdev->mddev->major_version == 0)
3644 			return -EINVAL;
3645 		if ((sector > rdev->sb_start &&
3646 		     sector - rdev->sb_start > S16_MAX) ||
3647 		    (sector < rdev->sb_start &&
3648 		     rdev->sb_start - sector > -S16_MIN))
3649 			return -EINVAL;
3650 		rdev->ppl.offset = sector - rdev->sb_start;
3651 	} else if (!rdev->mddev->external) {
3652 		return -EBUSY;
3653 	}
3654 	rdev->ppl.sector = sector;
3655 	return len;
3656 }
3657 
3658 static struct rdev_sysfs_entry rdev_ppl_sector =
3659 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3660 
3661 static ssize_t
3662 ppl_size_show(struct md_rdev *rdev, char *page)
3663 {
3664 	return sprintf(page, "%u\n", rdev->ppl.size);
3665 }
3666 
3667 static ssize_t
3668 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3669 {
3670 	unsigned int size;
3671 
3672 	if (kstrtouint(buf, 10, &size) < 0)
3673 		return -EINVAL;
3674 
3675 	if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3676 	    rdev->raid_disk >= 0)
3677 		return -EBUSY;
3678 
3679 	if (rdev->mddev->persistent) {
3680 		if (rdev->mddev->major_version == 0)
3681 			return -EINVAL;
3682 		if (size > U16_MAX)
3683 			return -EINVAL;
3684 	} else if (!rdev->mddev->external) {
3685 		return -EBUSY;
3686 	}
3687 	rdev->ppl.size = size;
3688 	return len;
3689 }
3690 
3691 static struct rdev_sysfs_entry rdev_ppl_size =
3692 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3693 
3694 static struct attribute *rdev_default_attrs[] = {
3695 	&rdev_state.attr,
3696 	&rdev_errors.attr,
3697 	&rdev_slot.attr,
3698 	&rdev_offset.attr,
3699 	&rdev_new_offset.attr,
3700 	&rdev_size.attr,
3701 	&rdev_recovery_start.attr,
3702 	&rdev_bad_blocks.attr,
3703 	&rdev_unack_bad_blocks.attr,
3704 	&rdev_ppl_sector.attr,
3705 	&rdev_ppl_size.attr,
3706 	NULL,
3707 };
3708 ATTRIBUTE_GROUPS(rdev_default);
3709 static ssize_t
3710 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3711 {
3712 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3713 	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3714 
3715 	if (!entry->show)
3716 		return -EIO;
3717 	if (!rdev->mddev)
3718 		return -ENODEV;
3719 	return entry->show(rdev, page);
3720 }
3721 
3722 static ssize_t
3723 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3724 	      const char *page, size_t length)
3725 {
3726 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3727 	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3728 	struct kernfs_node *kn = NULL;
3729 	bool suspend = false;
3730 	ssize_t rv;
3731 	struct mddev *mddev = READ_ONCE(rdev->mddev);
3732 
3733 	if (!entry->store)
3734 		return -EIO;
3735 	if (!capable(CAP_SYS_ADMIN))
3736 		return -EACCES;
3737 	if (!mddev)
3738 		return -ENODEV;
3739 
3740 	if (entry->store == state_store) {
3741 		if (cmd_match(page, "remove"))
3742 			kn = sysfs_break_active_protection(kobj, attr);
3743 		if (cmd_match(page, "remove") || cmd_match(page, "re-add") ||
3744 		    cmd_match(page, "writemostly") ||
3745 		    cmd_match(page, "-writemostly"))
3746 			suspend = true;
3747 	}
3748 
3749 	rv = suspend ? mddev_suspend_and_lock(mddev) : mddev_lock(mddev);
3750 	if (!rv) {
3751 		if (rdev->mddev == NULL)
3752 			rv = -ENODEV;
3753 		else
3754 			rv = entry->store(rdev, page, length);
3755 		suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev);
3756 	}
3757 
3758 	if (kn)
3759 		sysfs_unbreak_active_protection(kn);
3760 
3761 	return rv;
3762 }
3763 
3764 static void rdev_free(struct kobject *ko)
3765 {
3766 	struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3767 	kfree(rdev);
3768 }
3769 static const struct sysfs_ops rdev_sysfs_ops = {
3770 	.show		= rdev_attr_show,
3771 	.store		= rdev_attr_store,
3772 };
3773 static const struct kobj_type rdev_ktype = {
3774 	.release	= rdev_free,
3775 	.sysfs_ops	= &rdev_sysfs_ops,
3776 	.default_groups	= rdev_default_groups,
3777 };
3778 
3779 int md_rdev_init(struct md_rdev *rdev)
3780 {
3781 	rdev->desc_nr = -1;
3782 	rdev->saved_raid_disk = -1;
3783 	rdev->raid_disk = -1;
3784 	rdev->flags = 0;
3785 	rdev->data_offset = 0;
3786 	rdev->new_data_offset = 0;
3787 	rdev->sb_events = 0;
3788 	rdev->last_read_error = 0;
3789 	rdev->sb_loaded = 0;
3790 	rdev->bb_page = NULL;
3791 	atomic_set(&rdev->nr_pending, 0);
3792 	atomic_set(&rdev->read_errors, 0);
3793 	atomic_set(&rdev->corrected_errors, 0);
3794 
3795 	INIT_LIST_HEAD(&rdev->same_set);
3796 	init_waitqueue_head(&rdev->blocked_wait);
3797 
3798 	/* Add space to store bad block list.
3799 	 * This reserves the space even on arrays where it cannot
3800 	 * be used - I wonder if that matters
3801 	 */
3802 	return badblocks_init(&rdev->badblocks, 0);
3803 }
3804 EXPORT_SYMBOL_GPL(md_rdev_init);
3805 
3806 /*
3807  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3808  *
3809  * mark the device faulty if:
3810  *
3811  *   - the device is nonexistent (zero size)
3812  *   - the device has no valid superblock
3813  *
3814  * a faulty rdev _never_ has rdev->sb set.
3815  */
3816 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3817 {
3818 	struct md_rdev *rdev;
3819 	sector_t size;
3820 	int err;
3821 
3822 	rdev = kzalloc_obj(*rdev);
3823 	if (!rdev)
3824 		return ERR_PTR(-ENOMEM);
3825 
3826 	err = md_rdev_init(rdev);
3827 	if (err)
3828 		goto out_free_rdev;
3829 	err = alloc_disk_sb(rdev);
3830 	if (err)
3831 		goto out_clear_rdev;
3832 
3833 	rdev->bdev_file = bdev_file_open_by_dev(newdev,
3834 			BLK_OPEN_READ | BLK_OPEN_WRITE,
3835 			super_format == -2 ? &claim_rdev : rdev, NULL);
3836 	if (IS_ERR(rdev->bdev_file)) {
3837 		pr_warn("md: could not open device unknown-block(%u,%u).\n",
3838 			MAJOR(newdev), MINOR(newdev));
3839 		err = PTR_ERR(rdev->bdev_file);
3840 		goto out_clear_rdev;
3841 	}
3842 	rdev->bdev = file_bdev(rdev->bdev_file);
3843 
3844 	kobject_init(&rdev->kobj, &rdev_ktype);
3845 
3846 	size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
3847 	if (!size) {
3848 		pr_warn("md: %pg has zero or unknown size, marking faulty!\n",
3849 			rdev->bdev);
3850 		err = -EINVAL;
3851 		goto out_blkdev_put;
3852 	}
3853 
3854 	if (super_format >= 0) {
3855 		err = super_types[super_format].
3856 			load_super(rdev, NULL, super_minor);
3857 		if (err == -EINVAL) {
3858 			pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
3859 				rdev->bdev,
3860 				super_format, super_minor);
3861 			goto out_blkdev_put;
3862 		}
3863 		if (err < 0) {
3864 			pr_warn("md: could not read %pg's sb, not importing!\n",
3865 				rdev->bdev);
3866 			goto out_blkdev_put;
3867 		}
3868 	}
3869 
3870 	return rdev;
3871 
3872 out_blkdev_put:
3873 	fput(rdev->bdev_file);
3874 out_clear_rdev:
3875 	md_rdev_clear(rdev);
3876 out_free_rdev:
3877 	kfree(rdev);
3878 	return ERR_PTR(err);
3879 }
3880 
3881 /*
3882  * Check a full RAID array for plausibility
3883  */
3884 
3885 static int analyze_sbs(struct mddev *mddev)
3886 {
3887 	struct md_rdev *rdev, *freshest, *tmp;
3888 
3889 	freshest = NULL;
3890 	rdev_for_each_safe(rdev, tmp, mddev)
3891 		switch (super_types[mddev->major_version].
3892 			load_super(rdev, freshest, mddev->minor_version)) {
3893 		case 1:
3894 			freshest = rdev;
3895 			break;
3896 		case 0:
3897 			break;
3898 		default:
3899 			pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n",
3900 				rdev->bdev);
3901 			md_kick_rdev_from_array(rdev);
3902 		}
3903 
3904 	/* Cannot find a valid fresh disk */
3905 	if (!freshest) {
3906 		pr_warn("md: cannot find a valid disk\n");
3907 		return -EINVAL;
3908 	}
3909 
3910 	super_types[mddev->major_version].
3911 		validate_super(mddev, NULL/*freshest*/, freshest);
3912 
3913 	rdev_for_each_safe(rdev, tmp, mddev) {
3914 		if (mddev->max_disks &&
3915 		    rdev->desc_nr >= mddev->max_disks) {
3916 			pr_warn("md: %s: %pg: only %d devices permitted\n",
3917 				mdname(mddev), rdev->bdev,
3918 				mddev->max_disks);
3919 			md_kick_rdev_from_array(rdev);
3920 			continue;
3921 		}
3922 		if (rdev != freshest) {
3923 			if (super_types[mddev->major_version].
3924 			    validate_super(mddev, freshest, rdev)) {
3925 				pr_warn("md: kicking non-fresh %pg from array!\n",
3926 					rdev->bdev);
3927 				md_kick_rdev_from_array(rdev);
3928 				continue;
3929 			}
3930 		}
3931 		if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3932 		    !test_bit(Journal, &rdev->flags)) {
3933 			rdev->raid_disk = -1;
3934 			clear_bit(In_sync, &rdev->flags);
3935 		}
3936 	}
3937 
3938 	return 0;
3939 }
3940 
3941 /* Read a fixed-point number.
3942  * Numbers in sysfs attributes should be in "standard" units where
3943  * possible, so time should be in seconds.
3944  * However we internally use a a much smaller unit such as
3945  * milliseconds or jiffies.
3946  * This function takes a decimal number with a possible fractional
3947  * component, and produces an integer which is the result of
3948  * multiplying that number by 10^'scale'.
3949  * all without any floating-point arithmetic.
3950  */
3951 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3952 {
3953 	unsigned long result = 0;
3954 	long decimals = -1;
3955 	while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3956 		if (*cp == '.')
3957 			decimals = 0;
3958 		else if (decimals < scale) {
3959 			unsigned int value;
3960 			value = *cp - '0';
3961 			result = result * 10 + value;
3962 			if (decimals >= 0)
3963 				decimals++;
3964 		}
3965 		cp++;
3966 	}
3967 	if (*cp == '\n')
3968 		cp++;
3969 	if (*cp)
3970 		return -EINVAL;
3971 	if (decimals < 0)
3972 		decimals = 0;
3973 	*res = result * int_pow(10, scale - decimals);
3974 	return 0;
3975 }
3976 
3977 static ssize_t
3978 safe_delay_show(struct mddev *mddev, char *page)
3979 {
3980 	unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
3981 
3982 	return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
3983 }
3984 static ssize_t
3985 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3986 {
3987 	unsigned long msec;
3988 
3989 	if (mddev_is_clustered(mddev)) {
3990 		pr_warn("md: Safemode is disabled for clustered mode\n");
3991 		return -EINVAL;
3992 	}
3993 
3994 	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
3995 		return -EINVAL;
3996 	if (msec == 0)
3997 		mddev->safemode_delay = 0;
3998 	else {
3999 		unsigned long old_delay = mddev->safemode_delay;
4000 		unsigned long new_delay = (msec*HZ)/1000;
4001 
4002 		if (new_delay == 0)
4003 			new_delay = 1;
4004 		mddev->safemode_delay = new_delay;
4005 		if (new_delay < old_delay || old_delay == 0)
4006 			mod_timer(&mddev->safemode_timer, jiffies+1);
4007 	}
4008 	return len;
4009 }
4010 static struct md_sysfs_entry md_safe_delay =
4011 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
4012 
4013 static ssize_t
4014 level_show(struct mddev *mddev, char *page)
4015 {
4016 	struct md_personality *p;
4017 	int ret;
4018 	spin_lock(&mddev->lock);
4019 	p = mddev->pers;
4020 	if (p)
4021 		ret = sprintf(page, "%s\n", p->head.name);
4022 	else if (mddev->clevel[0])
4023 		ret = sprintf(page, "%s\n", mddev->clevel);
4024 	else if (mddev->level != LEVEL_NONE)
4025 		ret = sprintf(page, "%d\n", mddev->level);
4026 	else
4027 		ret = 0;
4028 	spin_unlock(&mddev->lock);
4029 	return ret;
4030 }
4031 
4032 static ssize_t
4033 level_store(struct mddev *mddev, const char *buf, size_t len)
4034 {
4035 	char clevel[16];
4036 	ssize_t rv;
4037 	size_t slen = len;
4038 	struct md_personality *pers, *oldpers;
4039 	long level;
4040 	void *priv, *oldpriv;
4041 	struct md_rdev *rdev;
4042 
4043 	if (slen == 0 || slen >= sizeof(clevel))
4044 		return -EINVAL;
4045 
4046 	rv = mddev_suspend_and_lock(mddev);
4047 	if (rv)
4048 		return rv;
4049 
4050 	if (mddev->pers == NULL) {
4051 		memcpy(mddev->clevel, buf, slen);
4052 		if (mddev->clevel[slen-1] == '\n')
4053 			slen--;
4054 		mddev->clevel[slen] = 0;
4055 		mddev->level = LEVEL_NONE;
4056 		rv = len;
4057 		goto out_unlock;
4058 	}
4059 	rv = -EROFS;
4060 	if (!md_is_rdwr(mddev))
4061 		goto out_unlock;
4062 
4063 	/* request to change the personality.  Need to ensure:
4064 	 *  - array is not engaged in resync/recovery/reshape
4065 	 *  - old personality can be suspended
4066 	 *  - new personality will access other array.
4067 	 */
4068 
4069 	rv = -EBUSY;
4070 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4071 	    mddev->reshape_position != MaxSector ||
4072 	    mddev->sysfs_active)
4073 		goto out_unlock;
4074 
4075 	rv = -EINVAL;
4076 	if (!mddev->pers->quiesce) {
4077 		pr_warn("md: %s: %s does not support online personality change\n",
4078 			mdname(mddev), mddev->pers->head.name);
4079 		goto out_unlock;
4080 	}
4081 
4082 	/* Now find the new personality */
4083 	memcpy(clevel, buf, slen);
4084 	if (clevel[slen-1] == '\n')
4085 		slen--;
4086 	clevel[slen] = 0;
4087 	if (kstrtol(clevel, 10, &level))
4088 		level = LEVEL_NONE;
4089 
4090 	if (request_module("md-%s", clevel) != 0)
4091 		request_module("md-level-%s", clevel);
4092 	pers = get_pers(level, clevel);
4093 	if (!pers) {
4094 		rv = -EINVAL;
4095 		goto out_unlock;
4096 	}
4097 
4098 	if (pers == mddev->pers) {
4099 		/* Nothing to do! */
4100 		put_pers(pers);
4101 		rv = len;
4102 		goto out_unlock;
4103 	}
4104 	if (!pers->takeover) {
4105 		put_pers(pers);
4106 		pr_warn("md: %s: %s does not support personality takeover\n",
4107 			mdname(mddev), clevel);
4108 		rv = -EINVAL;
4109 		goto out_unlock;
4110 	}
4111 
4112 	rdev_for_each(rdev, mddev)
4113 		rdev->new_raid_disk = rdev->raid_disk;
4114 
4115 	/* ->takeover must set new_* and/or delta_disks
4116 	 * if it succeeds, and may set them when it fails.
4117 	 */
4118 	priv = pers->takeover(mddev);
4119 	if (IS_ERR(priv)) {
4120 		mddev->new_level = mddev->level;
4121 		mddev->new_layout = mddev->layout;
4122 		mddev->new_chunk_sectors = mddev->chunk_sectors;
4123 		mddev->raid_disks -= mddev->delta_disks;
4124 		mddev->delta_disks = 0;
4125 		mddev->reshape_backwards = 0;
4126 		put_pers(pers);
4127 		pr_warn("md: %s: %s would not accept array\n",
4128 			mdname(mddev), clevel);
4129 		rv = PTR_ERR(priv);
4130 		goto out_unlock;
4131 	}
4132 
4133 	/* Looks like we have a winner */
4134 	mddev_detach(mddev);
4135 
4136 	spin_lock(&mddev->lock);
4137 	oldpers = mddev->pers;
4138 	oldpriv = mddev->private;
4139 	mddev->pers = pers;
4140 	mddev->private = priv;
4141 	strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel));
4142 	mddev->level = mddev->new_level;
4143 	mddev->layout = mddev->new_layout;
4144 	mddev->chunk_sectors = mddev->new_chunk_sectors;
4145 	mddev->delta_disks = 0;
4146 	mddev->reshape_backwards = 0;
4147 	mddev->degraded = 0;
4148 	spin_unlock(&mddev->lock);
4149 
4150 	if (oldpers->sync_request == NULL &&
4151 	    mddev->external) {
4152 		/* We are converting from a no-redundancy array
4153 		 * to a redundancy array and metadata is managed
4154 		 * externally so we need to be sure that writes
4155 		 * won't block due to a need to transition
4156 		 *      clean->dirty
4157 		 * until external management is started.
4158 		 */
4159 		mddev->in_sync = 0;
4160 		mddev->safemode_delay = 0;
4161 		mddev->safemode = 0;
4162 	}
4163 
4164 	oldpers->free(mddev, oldpriv);
4165 
4166 	if (oldpers->sync_request == NULL &&
4167 	    pers->sync_request != NULL) {
4168 		/* need to add the md_redundancy_group */
4169 		if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4170 			pr_warn("md: cannot register extra attributes for %s\n",
4171 				mdname(mddev));
4172 		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4173 		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4174 		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
4175 	}
4176 	if (oldpers->sync_request != NULL &&
4177 	    pers->sync_request == NULL) {
4178 		/* need to remove the md_redundancy_group */
4179 		if (mddev->to_remove == NULL)
4180 			mddev->to_remove = &md_redundancy_group;
4181 	}
4182 
4183 	put_pers(oldpers);
4184 
4185 	rdev_for_each(rdev, mddev) {
4186 		if (rdev->raid_disk < 0)
4187 			continue;
4188 		if (rdev->new_raid_disk >= mddev->raid_disks)
4189 			rdev->new_raid_disk = -1;
4190 		if (rdev->new_raid_disk == rdev->raid_disk)
4191 			continue;
4192 		sysfs_unlink_rdev(mddev, rdev);
4193 	}
4194 	rdev_for_each(rdev, mddev) {
4195 		if (rdev->raid_disk < 0)
4196 			continue;
4197 		if (rdev->new_raid_disk == rdev->raid_disk)
4198 			continue;
4199 		rdev->raid_disk = rdev->new_raid_disk;
4200 		if (rdev->raid_disk < 0)
4201 			clear_bit(In_sync, &rdev->flags);
4202 		else {
4203 			if (sysfs_link_rdev(mddev, rdev))
4204 				pr_warn("md: cannot register rd%d for %s after level change\n",
4205 					rdev->raid_disk, mdname(mddev));
4206 		}
4207 	}
4208 
4209 	if (pers->sync_request == NULL) {
4210 		/* this is now an array without redundancy, so
4211 		 * it must always be in_sync
4212 		 */
4213 		mddev->in_sync = 1;
4214 		timer_delete_sync(&mddev->safemode_timer);
4215 	}
4216 	pers->run(mddev);
4217 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4218 	if (!mddev->thread)
4219 		md_update_sb(mddev, 1);
4220 	sysfs_notify_dirent_safe(mddev->sysfs_level);
4221 	md_new_event();
4222 	rv = len;
4223 out_unlock:
4224 	mddev_unlock_and_resume(mddev);
4225 	return rv;
4226 }
4227 
4228 static struct md_sysfs_entry md_level =
4229 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
4230 
4231 static ssize_t
4232 new_level_show(struct mddev *mddev, char *page)
4233 {
4234 	return sprintf(page, "%d\n", mddev->new_level);
4235 }
4236 
4237 static ssize_t
4238 new_level_store(struct mddev *mddev, const char *buf, size_t len)
4239 {
4240 	unsigned int n;
4241 	int err;
4242 
4243 	err = kstrtouint(buf, 10, &n);
4244 	if (err < 0)
4245 		return err;
4246 	err = mddev_lock(mddev);
4247 	if (err)
4248 		return err;
4249 
4250 	mddev->new_level = n;
4251 	md_update_sb(mddev, 1);
4252 
4253 	mddev_unlock(mddev);
4254 	return len;
4255 }
4256 static struct md_sysfs_entry md_new_level =
4257 __ATTR(new_level, 0664, new_level_show, new_level_store);
4258 
4259 static ssize_t
4260 bitmap_type_show(struct mddev *mddev, char *page)
4261 {
4262 	struct md_submodule_head *head;
4263 	unsigned long i;
4264 	ssize_t len = 0;
4265 
4266 	if (mddev->bitmap_id == ID_BITMAP_NONE)
4267 		len += sprintf(page + len, "[none] ");
4268 	else
4269 		len += sprintf(page + len, "none ");
4270 
4271 	xa_lock(&md_submodule);
4272 	xa_for_each(&md_submodule, i, head) {
4273 		if (head->type != MD_BITMAP)
4274 			continue;
4275 
4276 		if (mddev->bitmap_id == head->id)
4277 			len += sprintf(page + len, "[%s] ", head->name);
4278 		else
4279 			len += sprintf(page + len, "%s ", head->name);
4280 	}
4281 	xa_unlock(&md_submodule);
4282 
4283 	len += sprintf(page + len, "\n");
4284 	return len;
4285 }
4286 
4287 static ssize_t
4288 bitmap_type_store(struct mddev *mddev, const char *buf, size_t len)
4289 {
4290 	struct md_submodule_head *head;
4291 	enum md_submodule_id id;
4292 	unsigned long i;
4293 	int err = 0;
4294 
4295 	xa_lock(&md_submodule);
4296 
4297 	if (mddev->bitmap_ops) {
4298 		err = -EBUSY;
4299 		goto out;
4300 	}
4301 
4302 	if (cmd_match(buf, "none")) {
4303 		mddev->bitmap_id = ID_BITMAP_NONE;
4304 		goto out;
4305 	}
4306 
4307 	xa_for_each(&md_submodule, i, head) {
4308 		if (head->type == MD_BITMAP && cmd_match(buf, head->name)) {
4309 			mddev->bitmap_id = head->id;
4310 			goto out;
4311 		}
4312 	}
4313 
4314 	err = kstrtoint(buf, 10, &id);
4315 	if (err)
4316 		goto out;
4317 
4318 	if (id == ID_BITMAP_NONE) {
4319 		mddev->bitmap_id = id;
4320 		goto out;
4321 	}
4322 
4323 	head = xa_load(&md_submodule, id);
4324 	if (head && head->type == MD_BITMAP) {
4325 		mddev->bitmap_id = id;
4326 		goto out;
4327 	}
4328 
4329 	err = -ENOENT;
4330 
4331 out:
4332 	xa_unlock(&md_submodule);
4333 	return err ? err : len;
4334 }
4335 
4336 static struct md_sysfs_entry md_bitmap_type =
4337 __ATTR(bitmap_type, 0664, bitmap_type_show, bitmap_type_store);
4338 
4339 static ssize_t
4340 layout_show(struct mddev *mddev, char *page)
4341 {
4342 	/* just a number, not meaningful for all levels */
4343 	if (mddev->reshape_position != MaxSector &&
4344 	    mddev->layout != mddev->new_layout)
4345 		return sprintf(page, "%d (%d)\n",
4346 			       mddev->new_layout, mddev->layout);
4347 	return sprintf(page, "%d\n", mddev->layout);
4348 }
4349 
4350 static ssize_t
4351 layout_store(struct mddev *mddev, const char *buf, size_t len)
4352 {
4353 	unsigned int n;
4354 	int err;
4355 
4356 	err = kstrtouint(buf, 10, &n);
4357 	if (err < 0)
4358 		return err;
4359 	err = mddev_lock(mddev);
4360 	if (err)
4361 		return err;
4362 
4363 	if (mddev->pers) {
4364 		if (mddev->pers->check_reshape == NULL)
4365 			err = -EBUSY;
4366 		else if (!md_is_rdwr(mddev))
4367 			err = -EROFS;
4368 		else {
4369 			mddev->new_layout = n;
4370 			err = mddev->pers->check_reshape(mddev);
4371 			if (err)
4372 				mddev->new_layout = mddev->layout;
4373 		}
4374 	} else {
4375 		mddev->new_layout = n;
4376 		if (mddev->reshape_position == MaxSector)
4377 			mddev->layout = n;
4378 	}
4379 	mddev_unlock(mddev);
4380 	return err ?: len;
4381 }
4382 static struct md_sysfs_entry md_layout =
4383 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
4384 
4385 static ssize_t
4386 raid_disks_show(struct mddev *mddev, char *page)
4387 {
4388 	if (mddev->raid_disks == 0)
4389 		return 0;
4390 	if (mddev->reshape_position != MaxSector &&
4391 	    mddev->delta_disks != 0)
4392 		return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4393 			       mddev->raid_disks - mddev->delta_disks);
4394 	return sprintf(page, "%d\n", mddev->raid_disks);
4395 }
4396 
4397 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4398 
4399 static ssize_t
4400 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
4401 {
4402 	unsigned int n;
4403 	int err;
4404 
4405 	err = kstrtouint(buf, 10, &n);
4406 	if (err < 0)
4407 		return err;
4408 
4409 	err = mddev_suspend_and_lock(mddev);
4410 	if (err)
4411 		return err;
4412 	if (mddev->pers)
4413 		err = update_raid_disks(mddev, n);
4414 	else if (mddev->reshape_position != MaxSector) {
4415 		struct md_rdev *rdev;
4416 		int olddisks = mddev->raid_disks - mddev->delta_disks;
4417 
4418 		err = -EINVAL;
4419 		rdev_for_each(rdev, mddev) {
4420 			if (olddisks < n &&
4421 			    rdev->data_offset < rdev->new_data_offset)
4422 				goto out_unlock;
4423 			if (olddisks > n &&
4424 			    rdev->data_offset > rdev->new_data_offset)
4425 				goto out_unlock;
4426 		}
4427 		err = 0;
4428 		mddev->delta_disks = n - olddisks;
4429 		mddev->raid_disks = n;
4430 		mddev->reshape_backwards = (mddev->delta_disks < 0);
4431 	} else
4432 		mddev->raid_disks = n;
4433 out_unlock:
4434 	mddev_unlock_and_resume(mddev);
4435 	return err ? err : len;
4436 }
4437 static struct md_sysfs_entry md_raid_disks =
4438 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
4439 
4440 static ssize_t
4441 uuid_show(struct mddev *mddev, char *page)
4442 {
4443 	return sprintf(page, "%pU\n", mddev->uuid);
4444 }
4445 static struct md_sysfs_entry md_uuid =
4446 __ATTR(uuid, S_IRUGO, uuid_show, NULL);
4447 
4448 static ssize_t
4449 chunk_size_show(struct mddev *mddev, char *page)
4450 {
4451 	if (mddev->reshape_position != MaxSector &&
4452 	    mddev->chunk_sectors != mddev->new_chunk_sectors)
4453 		return sprintf(page, "%d (%d)\n",
4454 			       mddev->new_chunk_sectors << 9,
4455 			       mddev->chunk_sectors << 9);
4456 	return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
4457 }
4458 
4459 static ssize_t
4460 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
4461 {
4462 	unsigned long n;
4463 	int err;
4464 
4465 	err = kstrtoul(buf, 10, &n);
4466 	if (err < 0)
4467 		return err;
4468 
4469 	err = mddev_lock(mddev);
4470 	if (err)
4471 		return err;
4472 	if (mddev->pers) {
4473 		if (mddev->pers->check_reshape == NULL)
4474 			err = -EBUSY;
4475 		else if (!md_is_rdwr(mddev))
4476 			err = -EROFS;
4477 		else {
4478 			mddev->new_chunk_sectors = n >> 9;
4479 			err = mddev->pers->check_reshape(mddev);
4480 			if (err)
4481 				mddev->new_chunk_sectors = mddev->chunk_sectors;
4482 		}
4483 	} else {
4484 		mddev->new_chunk_sectors = n >> 9;
4485 		if (mddev->reshape_position == MaxSector)
4486 			mddev->chunk_sectors = n >> 9;
4487 	}
4488 	mddev_unlock(mddev);
4489 	return err ?: len;
4490 }
4491 static struct md_sysfs_entry md_chunk_size =
4492 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
4493 
4494 static ssize_t
4495 resync_start_show(struct mddev *mddev, char *page)
4496 {
4497 	if (mddev->resync_offset == MaxSector)
4498 		return sprintf(page, "none\n");
4499 	return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_offset);
4500 }
4501 
4502 static ssize_t
4503 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
4504 {
4505 	unsigned long long n;
4506 	int err;
4507 
4508 	if (cmd_match(buf, "none"))
4509 		n = MaxSector;
4510 	else {
4511 		err = kstrtoull(buf, 10, &n);
4512 		if (err < 0)
4513 			return err;
4514 		if (n != (sector_t)n)
4515 			return -EINVAL;
4516 	}
4517 
4518 	err = mddev_lock(mddev);
4519 	if (err)
4520 		return err;
4521 	if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4522 		err = -EBUSY;
4523 
4524 	if (!err) {
4525 		mddev->resync_offset = n;
4526 		if (mddev->pers)
4527 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4528 	}
4529 	mddev_unlock(mddev);
4530 	return err ?: len;
4531 }
4532 static struct md_sysfs_entry md_resync_start =
4533 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4534 		resync_start_show, resync_start_store);
4535 
4536 /*
4537  * The array state can be:
4538  *
4539  * clear
4540  *     No devices, no size, no level
4541  *     Equivalent to STOP_ARRAY ioctl
4542  * inactive
4543  *     May have some settings, but array is not active
4544  *        all IO results in error
4545  *     When written, doesn't tear down array, but just stops it
4546  * suspended (not supported yet)
4547  *     All IO requests will block. The array can be reconfigured.
4548  *     Writing this, if accepted, will block until array is quiescent
4549  * readonly
4550  *     no resync can happen.  no superblocks get written.
4551  *     write requests fail
4552  * read-auto
4553  *     like readonly, but behaves like 'clean' on a write request.
4554  *
4555  * clean - no pending writes, but otherwise active.
4556  *     When written to inactive array, starts without resync
4557  *     If a write request arrives then
4558  *       if metadata is known, mark 'dirty' and switch to 'active'.
4559  *       if not known, block and switch to write-pending
4560  *     If written to an active array that has pending writes, then fails.
4561  * active
4562  *     fully active: IO and resync can be happening.
4563  *     When written to inactive array, starts with resync
4564  *
4565  * write-pending
4566  *     clean, but writes are blocked waiting for 'active' to be written.
4567  *
4568  * active-idle
4569  *     like active, but no writes have been seen for a while (100msec).
4570  *
4571  * broken
4572 *     Array is failed. It's useful because mounted-arrays aren't stopped
4573 *     when array is failed, so this state will at least alert the user that
4574 *     something is wrong.
4575  */
4576 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
4577 		   write_pending, active_idle, broken, bad_word};
4578 static char *array_states[] = {
4579 	"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
4580 	"write-pending", "active-idle", "broken", NULL };
4581 
4582 static int match_word(const char *word, char **list)
4583 {
4584 	int n;
4585 	for (n=0; list[n]; n++)
4586 		if (cmd_match(word, list[n]))
4587 			break;
4588 	return n;
4589 }
4590 
4591 static ssize_t
4592 array_state_show(struct mddev *mddev, char *page)
4593 {
4594 	enum array_state st = inactive;
4595 
4596 	if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
4597 		switch(mddev->ro) {
4598 		case MD_RDONLY:
4599 			st = readonly;
4600 			break;
4601 		case MD_AUTO_READ:
4602 			st = read_auto;
4603 			break;
4604 		case MD_RDWR:
4605 			spin_lock(&mddev->lock);
4606 			if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4607 				st = write_pending;
4608 			else if (mddev->in_sync)
4609 				st = clean;
4610 			else if (mddev->safemode)
4611 				st = active_idle;
4612 			else
4613 				st = active;
4614 			spin_unlock(&mddev->lock);
4615 		}
4616 
4617 		if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4618 			st = broken;
4619 	} else {
4620 		if (list_empty(&mddev->disks) &&
4621 		    mddev->raid_disks == 0 &&
4622 		    mddev->dev_sectors == 0)
4623 			st = clear;
4624 		else
4625 			st = inactive;
4626 	}
4627 	return sprintf(page, "%s\n", array_states[st]);
4628 }
4629 
4630 static int do_md_stop(struct mddev *mddev, int ro);
4631 static int md_set_readonly(struct mddev *mddev);
4632 static int restart_array(struct mddev *mddev);
4633 
4634 static ssize_t
4635 array_state_store(struct mddev *mddev, const char *buf, size_t len)
4636 {
4637 	int err = 0;
4638 	enum array_state st = match_word(buf, array_states);
4639 
4640 	/* No lock dependent actions */
4641 	switch (st) {
4642 	case suspended:		/* not supported yet */
4643 	case write_pending:	/* cannot be set */
4644 	case active_idle:	/* cannot be set */
4645 	case broken:		/* cannot be set */
4646 	case bad_word:
4647 		return -EINVAL;
4648 	case clear:
4649 	case readonly:
4650 	case inactive:
4651 	case read_auto:
4652 		if (!mddev->pers || !md_is_rdwr(mddev))
4653 			break;
4654 		/* write sysfs will not open mddev and opener should be 0 */
4655 		err = mddev_set_closing_and_sync_blockdev(mddev, 0);
4656 		if (err)
4657 			return err;
4658 		break;
4659 	default:
4660 		break;
4661 	}
4662 
4663 	if (mddev->pers && (st == active || st == clean) &&
4664 	    mddev->ro != MD_RDONLY) {
4665 		/* don't take reconfig_mutex when toggling between
4666 		 * clean and active
4667 		 */
4668 		spin_lock(&mddev->lock);
4669 		if (st == active) {
4670 			restart_array(mddev);
4671 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4672 			md_wakeup_thread(mddev->thread);
4673 			wake_up(&mddev->sb_wait);
4674 		} else /* st == clean */ {
4675 			restart_array(mddev);
4676 			if (!set_in_sync(mddev))
4677 				err = -EBUSY;
4678 		}
4679 		if (!err)
4680 			sysfs_notify_dirent_safe(mddev->sysfs_state);
4681 		spin_unlock(&mddev->lock);
4682 		return err ?: len;
4683 	}
4684 	err = mddev_lock(mddev);
4685 	if (err)
4686 		return err;
4687 
4688 	switch (st) {
4689 	case inactive:
4690 		/* stop an active array, return 0 otherwise */
4691 		if (mddev->pers)
4692 			err = do_md_stop(mddev, 2);
4693 		break;
4694 	case clear:
4695 		err = do_md_stop(mddev, 0);
4696 		break;
4697 	case readonly:
4698 		if (mddev->pers)
4699 			err = md_set_readonly(mddev);
4700 		else {
4701 			mddev->ro = MD_RDONLY;
4702 			set_disk_ro(mddev->gendisk, 1);
4703 			err = do_md_run(mddev);
4704 		}
4705 		break;
4706 	case read_auto:
4707 		if (mddev->pers) {
4708 			if (md_is_rdwr(mddev))
4709 				err = md_set_readonly(mddev);
4710 			else if (mddev->ro == MD_RDONLY)
4711 				err = restart_array(mddev);
4712 			if (err == 0) {
4713 				mddev->ro = MD_AUTO_READ;
4714 				set_disk_ro(mddev->gendisk, 0);
4715 			}
4716 		} else {
4717 			mddev->ro = MD_AUTO_READ;
4718 			err = do_md_run(mddev);
4719 		}
4720 		break;
4721 	case clean:
4722 		if (mddev->pers) {
4723 			err = restart_array(mddev);
4724 			if (err)
4725 				break;
4726 			spin_lock(&mddev->lock);
4727 			if (!set_in_sync(mddev))
4728 				err = -EBUSY;
4729 			spin_unlock(&mddev->lock);
4730 		} else
4731 			err = -EINVAL;
4732 		break;
4733 	case active:
4734 		if (mddev->pers) {
4735 			err = restart_array(mddev);
4736 			if (err)
4737 				break;
4738 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4739 			wake_up(&mddev->sb_wait);
4740 			err = 0;
4741 		} else {
4742 			mddev->ro = MD_RDWR;
4743 			set_disk_ro(mddev->gendisk, 0);
4744 			err = do_md_run(mddev);
4745 		}
4746 		break;
4747 	default:
4748 		err = -EINVAL;
4749 		break;
4750 	}
4751 
4752 	if (!err) {
4753 		if (mddev->hold_active == UNTIL_IOCTL)
4754 			mddev->hold_active = 0;
4755 		sysfs_notify_dirent_safe(mddev->sysfs_state);
4756 	}
4757 	mddev_unlock(mddev);
4758 
4759 	if (st == readonly || st == read_auto || st == inactive ||
4760 	    (err && st == clear))
4761 		clear_bit(MD_CLOSING, &mddev->flags);
4762 
4763 	return err ?: len;
4764 }
4765 static struct md_sysfs_entry md_array_state =
4766 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4767 
4768 static ssize_t
4769 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4770 	return sprintf(page, "%d\n",
4771 		       atomic_read(&mddev->max_corr_read_errors));
4772 }
4773 
4774 static ssize_t
4775 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4776 {
4777 	unsigned int n;
4778 	int rv;
4779 
4780 	rv = kstrtouint(buf, 10, &n);
4781 	if (rv < 0)
4782 		return rv;
4783 	if (n > INT_MAX)
4784 		return -EINVAL;
4785 	atomic_set(&mddev->max_corr_read_errors, n);
4786 	return len;
4787 }
4788 
4789 static struct md_sysfs_entry max_corr_read_errors =
4790 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4791 	max_corrected_read_errors_store);
4792 
4793 static ssize_t
4794 null_show(struct mddev *mddev, char *page)
4795 {
4796 	return -EINVAL;
4797 }
4798 
4799 static ssize_t
4800 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4801 {
4802 	/* buf must be %d:%d\n? giving major and minor numbers */
4803 	/* The new device is added to the array.
4804 	 * If the array has a persistent superblock, we read the
4805 	 * superblock to initialise info and check validity.
4806 	 * Otherwise, only checking done is that in bind_rdev_to_array,
4807 	 * which mainly checks size.
4808 	 */
4809 	char *e;
4810 	int major = simple_strtoul(buf, &e, 10);
4811 	int minor;
4812 	dev_t dev;
4813 	struct md_rdev *rdev;
4814 	int err;
4815 
4816 	if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4817 		return -EINVAL;
4818 	minor = simple_strtoul(e+1, &e, 10);
4819 	if (*e && *e != '\n')
4820 		return -EINVAL;
4821 	dev = MKDEV(major, minor);
4822 	if (major != MAJOR(dev) ||
4823 	    minor != MINOR(dev))
4824 		return -EOVERFLOW;
4825 
4826 	err = mddev_suspend_and_lock(mddev);
4827 	if (err)
4828 		return err;
4829 	if (mddev->persistent) {
4830 		rdev = md_import_device(dev, mddev->major_version,
4831 					mddev->minor_version);
4832 		if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4833 			struct md_rdev *rdev0
4834 				= list_entry(mddev->disks.next,
4835 					     struct md_rdev, same_set);
4836 			err = super_types[mddev->major_version]
4837 				.load_super(rdev, rdev0, mddev->minor_version);
4838 			if (err < 0)
4839 				goto out;
4840 		}
4841 	} else if (mddev->external)
4842 		rdev = md_import_device(dev, -2, -1);
4843 	else
4844 		rdev = md_import_device(dev, -1, -1);
4845 
4846 	if (IS_ERR(rdev)) {
4847 		mddev_unlock_and_resume(mddev);
4848 		return PTR_ERR(rdev);
4849 	}
4850 	err = bind_rdev_to_array(rdev, mddev);
4851  out:
4852 	if (err)
4853 		export_rdev(rdev);
4854 	mddev_unlock_and_resume(mddev);
4855 	if (!err)
4856 		md_new_event();
4857 	return err ? err : len;
4858 }
4859 
4860 static struct md_sysfs_entry md_new_device =
4861 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4862 
4863 static ssize_t
4864 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4865 {
4866 	char *end;
4867 	unsigned long chunk, end_chunk;
4868 	int err;
4869 
4870 	if (!md_bitmap_enabled(mddev, false))
4871 		return len;
4872 
4873 	err = mddev_lock(mddev);
4874 	if (err)
4875 		return err;
4876 	if (!mddev->bitmap)
4877 		goto out;
4878 	/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4879 	while (*buf) {
4880 		chunk = end_chunk = simple_strtoul(buf, &end, 0);
4881 		if (buf == end)
4882 			break;
4883 
4884 		if (*end == '-') { /* range */
4885 			buf = end + 1;
4886 			end_chunk = simple_strtoul(buf, &end, 0);
4887 			if (buf == end)
4888 				break;
4889 		}
4890 
4891 		if (*end && !isspace(*end))
4892 			break;
4893 
4894 		mddev->bitmap_ops->dirty_bits(mddev, chunk, end_chunk);
4895 		buf = skip_spaces(end);
4896 	}
4897 	mddev->bitmap_ops->unplug(mddev, true); /* flush the bits to disk */
4898 out:
4899 	mddev_unlock(mddev);
4900 	return len;
4901 }
4902 
4903 static struct md_sysfs_entry md_bitmap =
4904 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4905 
4906 static ssize_t
4907 size_show(struct mddev *mddev, char *page)
4908 {
4909 	return sprintf(page, "%llu\n",
4910 		(unsigned long long)mddev->dev_sectors / 2);
4911 }
4912 
4913 static int update_size(struct mddev *mddev, sector_t num_sectors);
4914 
4915 static ssize_t
4916 size_store(struct mddev *mddev, const char *buf, size_t len)
4917 {
4918 	/* If array is inactive, we can reduce the component size, but
4919 	 * not increase it (except from 0).
4920 	 * If array is active, we can try an on-line resize
4921 	 */
4922 	sector_t sectors;
4923 	int err = strict_blocks_to_sectors(buf, &sectors);
4924 
4925 	if (err < 0)
4926 		return err;
4927 	err = mddev_lock(mddev);
4928 	if (err)
4929 		return err;
4930 	if (mddev->pers) {
4931 		err = update_size(mddev, sectors);
4932 		if (err == 0)
4933 			md_update_sb(mddev, 1);
4934 	} else {
4935 		if (mddev->dev_sectors == 0 ||
4936 		    mddev->dev_sectors > sectors)
4937 			mddev->dev_sectors = sectors;
4938 		else
4939 			err = -ENOSPC;
4940 	}
4941 	mddev_unlock(mddev);
4942 	return err ? err : len;
4943 }
4944 
4945 static struct md_sysfs_entry md_size =
4946 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4947 
4948 /* Metadata version.
4949  * This is one of
4950  *   'none' for arrays with no metadata (good luck...)
4951  *   'external' for arrays with externally managed metadata,
4952  * or N.M for internally known formats
4953  */
4954 static ssize_t
4955 metadata_show(struct mddev *mddev, char *page)
4956 {
4957 	if (mddev->persistent)
4958 		return sprintf(page, "%d.%d\n",
4959 			       mddev->major_version, mddev->minor_version);
4960 	else if (mddev->external)
4961 		return sprintf(page, "external:%s\n", mddev->metadata_type);
4962 	else
4963 		return sprintf(page, "none\n");
4964 }
4965 
4966 static ssize_t
4967 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4968 {
4969 	int major, minor;
4970 	char *e;
4971 	int err;
4972 	/* Changing the details of 'external' metadata is
4973 	 * always permitted.  Otherwise there must be
4974 	 * no devices attached to the array.
4975 	 */
4976 
4977 	err = mddev_lock(mddev);
4978 	if (err)
4979 		return err;
4980 	err = -EBUSY;
4981 	if (mddev->external && strncmp(buf, "external:", 9) == 0)
4982 		;
4983 	else if (!list_empty(&mddev->disks))
4984 		goto out_unlock;
4985 
4986 	err = 0;
4987 	if (cmd_match(buf, "none")) {
4988 		mddev->persistent = 0;
4989 		mddev->external = 0;
4990 		mddev->major_version = 0;
4991 		mddev->minor_version = 90;
4992 		goto out_unlock;
4993 	}
4994 	if (strncmp(buf, "external:", 9) == 0) {
4995 		size_t namelen = len-9;
4996 		if (namelen >= sizeof(mddev->metadata_type))
4997 			namelen = sizeof(mddev->metadata_type)-1;
4998 		memcpy(mddev->metadata_type, buf+9, namelen);
4999 		mddev->metadata_type[namelen] = 0;
5000 		if (namelen && mddev->metadata_type[namelen-1] == '\n')
5001 			mddev->metadata_type[--namelen] = 0;
5002 		mddev->persistent = 0;
5003 		mddev->external = 1;
5004 		mddev->major_version = 0;
5005 		mddev->minor_version = 90;
5006 		goto out_unlock;
5007 	}
5008 	major = simple_strtoul(buf, &e, 10);
5009 	err = -EINVAL;
5010 	if (e==buf || *e != '.')
5011 		goto out_unlock;
5012 	buf = e+1;
5013 	minor = simple_strtoul(buf, &e, 10);
5014 	if (e==buf || (*e && *e != '\n') )
5015 		goto out_unlock;
5016 	err = -ENOENT;
5017 	if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
5018 		goto out_unlock;
5019 	mddev->major_version = major;
5020 	mddev->minor_version = minor;
5021 	mddev->persistent = 1;
5022 	mddev->external = 0;
5023 	err = 0;
5024 out_unlock:
5025 	mddev_unlock(mddev);
5026 	return err ?: len;
5027 }
5028 
5029 static struct md_sysfs_entry md_metadata =
5030 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
5031 
5032 static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors)
5033 {
5034 	return rdev->raid_disk >= 0 &&
5035 	       !test_bit(Journal, &rdev->flags) &&
5036 	       !test_bit(Faulty, &rdev->flags) &&
5037 	       !test_bit(In_sync, &rdev->flags) &&
5038 	       rdev->recovery_offset < sectors;
5039 }
5040 
5041 static enum sync_action md_get_active_sync_action(struct mddev *mddev)
5042 {
5043 	struct md_rdev *rdev;
5044 	bool is_recover = false;
5045 
5046 	if (mddev->resync_offset < MaxSector)
5047 		return ACTION_RESYNC;
5048 
5049 	if (mddev->reshape_position != MaxSector)
5050 		return ACTION_RESHAPE;
5051 
5052 	rcu_read_lock();
5053 	rdev_for_each_rcu(rdev, mddev) {
5054 		if (rdev_needs_recovery(rdev, MaxSector)) {
5055 			is_recover = true;
5056 			break;
5057 		}
5058 	}
5059 	rcu_read_unlock();
5060 
5061 	return is_recover ? ACTION_RECOVER : ACTION_IDLE;
5062 }
5063 
5064 enum sync_action md_sync_action(struct mddev *mddev)
5065 {
5066 	unsigned long recovery = mddev->recovery;
5067 	enum sync_action active_action;
5068 
5069 	/*
5070 	 * frozen has the highest priority, means running sync_thread will be
5071 	 * stopped immediately, and no new sync_thread can start.
5072 	 */
5073 	if (test_bit(MD_RECOVERY_FROZEN, &recovery))
5074 		return ACTION_FROZEN;
5075 
5076 	/*
5077 	 * read-only array can't register sync_thread, and it can only
5078 	 * add/remove spares.
5079 	 */
5080 	if (!md_is_rdwr(mddev))
5081 		return ACTION_IDLE;
5082 
5083 	/*
5084 	 * idle means no sync_thread is running, and no new sync_thread is
5085 	 * requested.
5086 	 */
5087 	if (!test_bit(MD_RECOVERY_RUNNING, &recovery) &&
5088 	    !test_bit(MD_RECOVERY_NEEDED, &recovery))
5089 		return ACTION_IDLE;
5090 
5091 	/*
5092 	 * Check if any sync operation (resync/recover/reshape) is
5093 	 * currently active. This ensures that only one sync operation
5094 	 * can run at a time. Returns the type of active operation, or
5095 	 * ACTION_IDLE if none are active.
5096 	 */
5097 	active_action = md_get_active_sync_action(mddev);
5098 	if (active_action != ACTION_IDLE)
5099 		return active_action;
5100 
5101 	if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
5102 		return ACTION_RESHAPE;
5103 
5104 	if (test_bit(MD_RECOVERY_RECOVER, &recovery))
5105 		return ACTION_RECOVER;
5106 
5107 	if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
5108 		/*
5109 		 * MD_RECOVERY_CHECK must be paired with
5110 		 * MD_RECOVERY_REQUESTED.
5111 		 */
5112 		if (test_bit(MD_RECOVERY_CHECK, &recovery))
5113 			return ACTION_CHECK;
5114 		if (test_bit(MD_RECOVERY_REQUESTED, &recovery))
5115 			return ACTION_REPAIR;
5116 		return ACTION_RESYNC;
5117 	}
5118 
5119 	/*
5120 	 * MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no
5121 	 * sync_action is specified.
5122 	 */
5123 	return ACTION_IDLE;
5124 }
5125 
5126 enum sync_action md_sync_action_by_name(const char *page)
5127 {
5128 	enum sync_action action;
5129 
5130 	for (action = 0; action < NR_SYNC_ACTIONS; ++action) {
5131 		if (cmd_match(page, action_name[action]))
5132 			return action;
5133 	}
5134 
5135 	return NR_SYNC_ACTIONS;
5136 }
5137 
5138 const char *md_sync_action_name(enum sync_action action)
5139 {
5140 	return action_name[action];
5141 }
5142 
5143 static ssize_t
5144 action_show(struct mddev *mddev, char *page)
5145 {
5146 	enum sync_action action = md_sync_action(mddev);
5147 
5148 	return sprintf(page, "%s\n", md_sync_action_name(action));
5149 }
5150 
5151 /**
5152  * stop_sync_thread() - wait for sync_thread to stop if it's running.
5153  * @mddev:	the array.
5154  * @locked:	if set, reconfig_mutex will still be held after this function
5155  *		return; if not set, reconfig_mutex will be released after this
5156  *		function return.
5157  */
5158 static void stop_sync_thread(struct mddev *mddev, bool locked)
5159 {
5160 	int sync_seq = atomic_read(&mddev->sync_seq);
5161 
5162 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5163 		if (!locked)
5164 			mddev_unlock(mddev);
5165 		return;
5166 	}
5167 
5168 	mddev_unlock(mddev);
5169 
5170 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5171 	/*
5172 	 * Thread might be blocked waiting for metadata update which will now
5173 	 * never happen
5174 	 */
5175 	md_wakeup_thread_directly(&mddev->sync_thread);
5176 	if (work_pending(&mddev->sync_work))
5177 		flush_work(&mddev->sync_work);
5178 
5179 	wait_event(resync_wait,
5180 		   !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5181 		   (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery) &&
5182 		    sync_seq != atomic_read(&mddev->sync_seq)));
5183 
5184 	if (locked)
5185 		mddev_lock_nointr(mddev);
5186 }
5187 
5188 void md_idle_sync_thread(struct mddev *mddev)
5189 {
5190 	lockdep_assert_held(&mddev->reconfig_mutex);
5191 
5192 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5193 	stop_sync_thread(mddev, true);
5194 }
5195 EXPORT_SYMBOL_GPL(md_idle_sync_thread);
5196 
5197 void md_frozen_sync_thread(struct mddev *mddev)
5198 {
5199 	lockdep_assert_held(&mddev->reconfig_mutex);
5200 
5201 	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5202 	stop_sync_thread(mddev, true);
5203 }
5204 EXPORT_SYMBOL_GPL(md_frozen_sync_thread);
5205 
5206 void md_unfrozen_sync_thread(struct mddev *mddev)
5207 {
5208 	lockdep_assert_held(&mddev->reconfig_mutex);
5209 
5210 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5211 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5212 	md_wakeup_thread(mddev->thread);
5213 	sysfs_notify_dirent_safe(mddev->sysfs_action);
5214 }
5215 EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread);
5216 
5217 static int mddev_start_reshape(struct mddev *mddev)
5218 {
5219 	int ret;
5220 
5221 	if (mddev->pers->start_reshape == NULL)
5222 		return -EINVAL;
5223 
5224 	if (mddev->reshape_position == MaxSector ||
5225 	    mddev->pers->check_reshape == NULL ||
5226 	    mddev->pers->check_reshape(mddev)) {
5227 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5228 		ret = mddev->pers->start_reshape(mddev);
5229 		if (ret)
5230 			return ret;
5231 	} else {
5232 		/*
5233 		 * If reshape is still in progress, and md_check_recovery() can
5234 		 * continue to reshape, don't restart reshape because data can
5235 		 * be corrupted for raid456.
5236 		 */
5237 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5238 	}
5239 
5240 	sysfs_notify_dirent_safe(mddev->sysfs_degraded);
5241 	return 0;
5242 }
5243 
5244 static ssize_t
5245 action_store(struct mddev *mddev, const char *page, size_t len)
5246 {
5247 	int ret;
5248 	enum sync_action action;
5249 
5250 	if (!mddev->pers || !mddev->pers->sync_request)
5251 		return -EINVAL;
5252 
5253 retry:
5254 	if (work_busy(&mddev->sync_work))
5255 		flush_work(&mddev->sync_work);
5256 
5257 	ret = mddev_lock(mddev);
5258 	if (ret)
5259 		return ret;
5260 
5261 	if (work_busy(&mddev->sync_work)) {
5262 		mddev_unlock(mddev);
5263 		goto retry;
5264 	}
5265 
5266 	action = md_sync_action_by_name(page);
5267 
5268 	/* TODO: mdadm rely on "idle" to start sync_thread. */
5269 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5270 		switch (action) {
5271 		case ACTION_FROZEN:
5272 			md_frozen_sync_thread(mddev);
5273 			ret = len;
5274 			goto out;
5275 		case ACTION_IDLE:
5276 			md_idle_sync_thread(mddev);
5277 			break;
5278 		case ACTION_RESHAPE:
5279 		case ACTION_RECOVER:
5280 		case ACTION_CHECK:
5281 		case ACTION_REPAIR:
5282 		case ACTION_RESYNC:
5283 			ret = -EBUSY;
5284 			goto out;
5285 		default:
5286 			ret = -EINVAL;
5287 			goto out;
5288 		}
5289 	} else {
5290 		switch (action) {
5291 		case ACTION_FROZEN:
5292 			set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5293 			ret = len;
5294 			goto out;
5295 		case ACTION_RESHAPE:
5296 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5297 			ret = mddev_start_reshape(mddev);
5298 			if (ret)
5299 				goto out;
5300 			break;
5301 		case ACTION_RECOVER:
5302 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5303 			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5304 			break;
5305 		case ACTION_CHECK:
5306 			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5307 			fallthrough;
5308 		case ACTION_REPAIR:
5309 			set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
5310 			set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5311 			fallthrough;
5312 		case ACTION_RESYNC:
5313 		case ACTION_IDLE:
5314 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5315 			break;
5316 		default:
5317 			ret = -EINVAL;
5318 			goto out;
5319 		}
5320 	}
5321 
5322 	if (mddev->ro == MD_AUTO_READ) {
5323 		/* A write to sync_action is enough to justify
5324 		 * canceling read-auto mode
5325 		 */
5326 		mddev->ro = MD_RDWR;
5327 		md_wakeup_thread(mddev->sync_thread);
5328 	}
5329 
5330 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5331 	md_wakeup_thread(mddev->thread);
5332 	sysfs_notify_dirent_safe(mddev->sysfs_action);
5333 	ret = len;
5334 
5335 out:
5336 	mddev_unlock(mddev);
5337 	return ret;
5338 }
5339 
5340 static struct md_sysfs_entry md_scan_mode =
5341 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
5342 
5343 static ssize_t
5344 last_sync_action_show(struct mddev *mddev, char *page)
5345 {
5346 	return sprintf(page, "%s\n",
5347 		       md_sync_action_name(mddev->last_sync_action));
5348 }
5349 
5350 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
5351 
5352 static ssize_t
5353 mismatch_cnt_show(struct mddev *mddev, char *page)
5354 {
5355 	return sprintf(page, "%llu\n",
5356 		       (unsigned long long)
5357 		       atomic64_read(&mddev->resync_mismatches));
5358 }
5359 
5360 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
5361 
5362 static ssize_t
5363 sync_min_show(struct mddev *mddev, char *page)
5364 {
5365 	return sprintf(page, "%d (%s)\n", speed_min(mddev),
5366 		       mddev->sync_speed_min ? "local" : "system");
5367 }
5368 
5369 static ssize_t
5370 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
5371 {
5372 	unsigned int min;
5373 	int rv;
5374 
5375 	if (strncmp(buf, "system", 6) == 0) {
5376 		min = 0;
5377 	} else {
5378 		rv = kstrtouint(buf, 10, &min);
5379 		if (rv < 0)
5380 			return rv;
5381 		if (min == 0)
5382 			return -EINVAL;
5383 	}
5384 	mddev->sync_speed_min = min;
5385 	return len;
5386 }
5387 
5388 static struct md_sysfs_entry md_sync_min =
5389 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
5390 
5391 static ssize_t
5392 sync_max_show(struct mddev *mddev, char *page)
5393 {
5394 	return sprintf(page, "%d (%s)\n", speed_max(mddev),
5395 		       mddev->sync_speed_max ? "local" : "system");
5396 }
5397 
5398 static ssize_t
5399 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
5400 {
5401 	unsigned int max;
5402 	int rv;
5403 
5404 	if (strncmp(buf, "system", 6) == 0) {
5405 		max = 0;
5406 	} else {
5407 		rv = kstrtouint(buf, 10, &max);
5408 		if (rv < 0)
5409 			return rv;
5410 		if (max == 0)
5411 			return -EINVAL;
5412 	}
5413 	mddev->sync_speed_max = max;
5414 	return len;
5415 }
5416 
5417 static struct md_sysfs_entry md_sync_max =
5418 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
5419 
5420 static ssize_t
5421 sync_io_depth_show(struct mddev *mddev, char *page)
5422 {
5423 	return sprintf(page, "%d (%s)\n", sync_io_depth(mddev),
5424 		       mddev->sync_io_depth ? "local" : "system");
5425 }
5426 
5427 static ssize_t
5428 sync_io_depth_store(struct mddev *mddev, const char *buf, size_t len)
5429 {
5430 	unsigned int max;
5431 	int rv;
5432 
5433 	if (strncmp(buf, "system", 6) == 0) {
5434 		max = 0;
5435 	} else {
5436 		rv = kstrtouint(buf, 10, &max);
5437 		if (rv < 0)
5438 			return rv;
5439 		if (max == 0)
5440 			return -EINVAL;
5441 	}
5442 	mddev->sync_io_depth = max;
5443 	return len;
5444 }
5445 
5446 static struct md_sysfs_entry md_sync_io_depth =
5447 __ATTR_RW(sync_io_depth);
5448 
5449 static ssize_t
5450 degraded_show(struct mddev *mddev, char *page)
5451 {
5452 	return sprintf(page, "%d\n", mddev->degraded);
5453 }
5454 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
5455 
5456 static ssize_t
5457 sync_force_parallel_show(struct mddev *mddev, char *page)
5458 {
5459 	return sprintf(page, "%d\n", mddev->parallel_resync);
5460 }
5461 
5462 static ssize_t
5463 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
5464 {
5465 	long n;
5466 
5467 	if (kstrtol(buf, 10, &n))
5468 		return -EINVAL;
5469 
5470 	if (n != 0 && n != 1)
5471 		return -EINVAL;
5472 
5473 	mddev->parallel_resync = n;
5474 
5475 	if (mddev->sync_thread)
5476 		wake_up(&resync_wait);
5477 
5478 	return len;
5479 }
5480 
5481 /* force parallel resync, even with shared block devices */
5482 static struct md_sysfs_entry md_sync_force_parallel =
5483 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5484        sync_force_parallel_show, sync_force_parallel_store);
5485 
5486 static ssize_t
5487 sync_speed_show(struct mddev *mddev, char *page)
5488 {
5489 	unsigned long resync, dt, db;
5490 	if (mddev->curr_resync == MD_RESYNC_NONE)
5491 		return sprintf(page, "none\n");
5492 	resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5493 	dt = (jiffies - mddev->resync_mark) / HZ;
5494 	if (!dt) dt++;
5495 	db = resync - mddev->resync_mark_cnt;
5496 	return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
5497 }
5498 
5499 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
5500 
5501 static ssize_t
5502 sync_completed_show(struct mddev *mddev, char *page)
5503 {
5504 	unsigned long long max_sectors, resync;
5505 
5506 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5507 		return sprintf(page, "none\n");
5508 
5509 	if (mddev->curr_resync == MD_RESYNC_YIELDED ||
5510 	    mddev->curr_resync == MD_RESYNC_DELAYED)
5511 		return sprintf(page, "delayed\n");
5512 
5513 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5514 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5515 		max_sectors = mddev->resync_max_sectors;
5516 	else
5517 		max_sectors = mddev->dev_sectors;
5518 
5519 	resync = mddev->curr_resync_completed;
5520 	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
5521 }
5522 
5523 static struct md_sysfs_entry md_sync_completed =
5524 	__ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
5525 
5526 static ssize_t
5527 min_sync_show(struct mddev *mddev, char *page)
5528 {
5529 	return sprintf(page, "%llu\n",
5530 		       (unsigned long long)mddev->resync_min);
5531 }
5532 static ssize_t
5533 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
5534 {
5535 	unsigned long long min;
5536 	int err;
5537 
5538 	if (kstrtoull(buf, 10, &min))
5539 		return -EINVAL;
5540 
5541 	spin_lock(&mddev->lock);
5542 	err = -EINVAL;
5543 	if (min > mddev->resync_max)
5544 		goto out_unlock;
5545 
5546 	err = -EBUSY;
5547 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5548 		goto out_unlock;
5549 
5550 	/* Round down to multiple of 4K for safety */
5551 	mddev->resync_min = round_down(min, 8);
5552 	err = 0;
5553 
5554 out_unlock:
5555 	spin_unlock(&mddev->lock);
5556 	return err ?: len;
5557 }
5558 
5559 static struct md_sysfs_entry md_min_sync =
5560 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5561 
5562 static ssize_t
5563 max_sync_show(struct mddev *mddev, char *page)
5564 {
5565 	if (mddev->resync_max == MaxSector)
5566 		return sprintf(page, "max\n");
5567 	else
5568 		return sprintf(page, "%llu\n",
5569 			       (unsigned long long)mddev->resync_max);
5570 }
5571 static ssize_t
5572 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
5573 {
5574 	int err;
5575 	spin_lock(&mddev->lock);
5576 	if (strncmp(buf, "max", 3) == 0)
5577 		mddev->resync_max = MaxSector;
5578 	else {
5579 		unsigned long long max;
5580 		int chunk;
5581 
5582 		err = -EINVAL;
5583 		if (kstrtoull(buf, 10, &max))
5584 			goto out_unlock;
5585 		if (max < mddev->resync_min)
5586 			goto out_unlock;
5587 
5588 		err = -EBUSY;
5589 		if (max < mddev->resync_max && md_is_rdwr(mddev) &&
5590 		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5591 			goto out_unlock;
5592 
5593 		/* Must be a multiple of chunk_size */
5594 		chunk = mddev->chunk_sectors;
5595 		if (chunk) {
5596 			sector_t temp = max;
5597 
5598 			err = -EINVAL;
5599 			if (sector_div(temp, chunk))
5600 				goto out_unlock;
5601 		}
5602 		mddev->resync_max = max;
5603 	}
5604 	wake_up(&mddev->recovery_wait);
5605 	err = 0;
5606 out_unlock:
5607 	spin_unlock(&mddev->lock);
5608 	return err ?: len;
5609 }
5610 
5611 static struct md_sysfs_entry md_max_sync =
5612 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5613 
5614 static ssize_t
5615 suspend_lo_show(struct mddev *mddev, char *page)
5616 {
5617 	return sprintf(page, "%llu\n",
5618 		       (unsigned long long)READ_ONCE(mddev->suspend_lo));
5619 }
5620 
5621 static ssize_t
5622 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
5623 {
5624 	unsigned long long new;
5625 	int err;
5626 
5627 	err = kstrtoull(buf, 10, &new);
5628 	if (err < 0)
5629 		return err;
5630 	if (new != (sector_t)new)
5631 		return -EINVAL;
5632 
5633 	err = mddev_suspend(mddev, true);
5634 	if (err)
5635 		return err;
5636 
5637 	WRITE_ONCE(mddev->suspend_lo, new);
5638 	mddev_resume(mddev);
5639 
5640 	return len;
5641 }
5642 static struct md_sysfs_entry md_suspend_lo =
5643 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5644 
5645 static ssize_t
5646 suspend_hi_show(struct mddev *mddev, char *page)
5647 {
5648 	return sprintf(page, "%llu\n",
5649 		       (unsigned long long)READ_ONCE(mddev->suspend_hi));
5650 }
5651 
5652 static ssize_t
5653 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
5654 {
5655 	unsigned long long new;
5656 	int err;
5657 
5658 	err = kstrtoull(buf, 10, &new);
5659 	if (err < 0)
5660 		return err;
5661 	if (new != (sector_t)new)
5662 		return -EINVAL;
5663 
5664 	err = mddev_suspend(mddev, true);
5665 	if (err)
5666 		return err;
5667 
5668 	WRITE_ONCE(mddev->suspend_hi, new);
5669 	mddev_resume(mddev);
5670 
5671 	return len;
5672 }
5673 static struct md_sysfs_entry md_suspend_hi =
5674 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5675 
5676 static ssize_t
5677 reshape_position_show(struct mddev *mddev, char *page)
5678 {
5679 	if (mddev->reshape_position != MaxSector)
5680 		return sprintf(page, "%llu\n",
5681 			       (unsigned long long)mddev->reshape_position);
5682 	strcpy(page, "none\n");
5683 	return 5;
5684 }
5685 
5686 static ssize_t
5687 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
5688 {
5689 	struct md_rdev *rdev;
5690 	unsigned long long new;
5691 	int err;
5692 
5693 	err = kstrtoull(buf, 10, &new);
5694 	if (err < 0)
5695 		return err;
5696 	if (new != (sector_t)new)
5697 		return -EINVAL;
5698 	err = mddev_lock(mddev);
5699 	if (err)
5700 		return err;
5701 	err = -EBUSY;
5702 	if (mddev->pers)
5703 		goto unlock;
5704 	mddev->reshape_position = new;
5705 	mddev->delta_disks = 0;
5706 	mddev->reshape_backwards = 0;
5707 	mddev->new_level = mddev->level;
5708 	mddev->new_layout = mddev->layout;
5709 	mddev->new_chunk_sectors = mddev->chunk_sectors;
5710 	rdev_for_each(rdev, mddev)
5711 		rdev->new_data_offset = rdev->data_offset;
5712 	err = 0;
5713 unlock:
5714 	mddev_unlock(mddev);
5715 	return err ?: len;
5716 }
5717 
5718 static struct md_sysfs_entry md_reshape_position =
5719 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5720        reshape_position_store);
5721 
5722 static ssize_t
5723 reshape_direction_show(struct mddev *mddev, char *page)
5724 {
5725 	return sprintf(page, "%s\n",
5726 		       mddev->reshape_backwards ? "backwards" : "forwards");
5727 }
5728 
5729 static ssize_t
5730 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5731 {
5732 	int backwards = 0;
5733 	int err;
5734 
5735 	if (cmd_match(buf, "forwards"))
5736 		backwards = 0;
5737 	else if (cmd_match(buf, "backwards"))
5738 		backwards = 1;
5739 	else
5740 		return -EINVAL;
5741 	if (mddev->reshape_backwards == backwards)
5742 		return len;
5743 
5744 	err = mddev_lock(mddev);
5745 	if (err)
5746 		return err;
5747 	/* check if we are allowed to change */
5748 	if (mddev->delta_disks)
5749 		err = -EBUSY;
5750 	else if (mddev->persistent &&
5751 	    mddev->major_version == 0)
5752 		err =  -EINVAL;
5753 	else
5754 		mddev->reshape_backwards = backwards;
5755 	mddev_unlock(mddev);
5756 	return err ?: len;
5757 }
5758 
5759 static struct md_sysfs_entry md_reshape_direction =
5760 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5761        reshape_direction_store);
5762 
5763 static ssize_t
5764 array_size_show(struct mddev *mddev, char *page)
5765 {
5766 	if (mddev->external_size)
5767 		return sprintf(page, "%llu\n",
5768 			       (unsigned long long)mddev->array_sectors/2);
5769 	else
5770 		return sprintf(page, "default\n");
5771 }
5772 
5773 static ssize_t
5774 array_size_store(struct mddev *mddev, const char *buf, size_t len)
5775 {
5776 	sector_t sectors;
5777 	int err;
5778 
5779 	err = mddev_lock(mddev);
5780 	if (err)
5781 		return err;
5782 
5783 	/* cluster raid doesn't support change array_sectors */
5784 	if (mddev_is_clustered(mddev)) {
5785 		mddev_unlock(mddev);
5786 		return -EINVAL;
5787 	}
5788 
5789 	if (strncmp(buf, "default", 7) == 0) {
5790 		if (mddev->pers)
5791 			sectors = mddev->pers->size(mddev, 0, 0);
5792 		else
5793 			sectors = mddev->array_sectors;
5794 
5795 		mddev->external_size = 0;
5796 	} else {
5797 		if (strict_blocks_to_sectors(buf, &sectors) < 0)
5798 			err = -EINVAL;
5799 		else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5800 			err = -E2BIG;
5801 		else
5802 			mddev->external_size = 1;
5803 	}
5804 
5805 	if (!err) {
5806 		mddev->array_sectors = sectors;
5807 		if (mddev->pers)
5808 			set_capacity_and_notify(mddev->gendisk,
5809 						mddev->array_sectors);
5810 	}
5811 	mddev_unlock(mddev);
5812 	return err ?: len;
5813 }
5814 
5815 static struct md_sysfs_entry md_array_size =
5816 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5817        array_size_store);
5818 
5819 static ssize_t
5820 consistency_policy_show(struct mddev *mddev, char *page)
5821 {
5822 	int ret;
5823 
5824 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5825 		ret = sprintf(page, "journal\n");
5826 	} else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5827 		ret = sprintf(page, "ppl\n");
5828 	} else if (mddev->bitmap) {
5829 		ret = sprintf(page, "bitmap\n");
5830 	} else if (mddev->pers) {
5831 		if (mddev->pers->sync_request)
5832 			ret = sprintf(page, "resync\n");
5833 		else
5834 			ret = sprintf(page, "none\n");
5835 	} else {
5836 		ret = sprintf(page, "unknown\n");
5837 	}
5838 
5839 	return ret;
5840 }
5841 
5842 static ssize_t
5843 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5844 {
5845 	int err = 0;
5846 
5847 	if (mddev->pers) {
5848 		if (mddev->pers->change_consistency_policy)
5849 			err = mddev->pers->change_consistency_policy(mddev, buf);
5850 		else
5851 			err = -EBUSY;
5852 	} else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5853 		set_bit(MD_HAS_PPL, &mddev->flags);
5854 	} else {
5855 		err = -EINVAL;
5856 	}
5857 
5858 	return err ? err : len;
5859 }
5860 
5861 static struct md_sysfs_entry md_consistency_policy =
5862 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5863        consistency_policy_store);
5864 
5865 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5866 {
5867 	return sprintf(page, "%d\n", test_bit(MD_FAILLAST_DEV, &mddev->flags));
5868 }
5869 
5870 /*
5871  * Setting MD_FAILLAST_DEV to allow last device to be forcibly removed
5872  * from RAID1/RAID10.
5873  */
5874 static ssize_t
5875 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5876 {
5877 	int ret;
5878 	bool value;
5879 
5880 	ret = kstrtobool(buf, &value);
5881 	if (ret)
5882 		return ret;
5883 
5884 	if (value)
5885 		set_bit(MD_FAILLAST_DEV, &mddev->flags);
5886 	else
5887 		clear_bit(MD_FAILLAST_DEV, &mddev->flags);
5888 
5889 	return len;
5890 }
5891 static struct md_sysfs_entry md_fail_last_dev =
5892 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5893        fail_last_dev_store);
5894 
5895 static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5896 {
5897 	if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1))
5898 		return sprintf(page, "n/a\n");
5899 	else
5900 		return sprintf(page, "%d\n",
5901 			       test_bit(MD_SERIALIZE_POLICY, &mddev->flags));
5902 }
5903 
5904 /*
5905  * Setting MD_SERIALIZE_POLICY enforce write IO is not reordered
5906  * for raid1.
5907  */
5908 static ssize_t
5909 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5910 {
5911 	int err;
5912 	bool value;
5913 
5914 	err = kstrtobool(buf, &value);
5915 	if (err)
5916 		return err;
5917 
5918 	if (value == test_bit(MD_SERIALIZE_POLICY, &mddev->flags))
5919 		return len;
5920 
5921 	err = mddev_suspend_and_lock(mddev);
5922 	if (err)
5923 		return err;
5924 	if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1)) {
5925 		pr_err("md: serialize_policy is only effective for raid1\n");
5926 		err = -EINVAL;
5927 		goto unlock;
5928 	}
5929 
5930 	if (value) {
5931 		mddev_create_serial_pool(mddev, NULL);
5932 		set_bit(MD_SERIALIZE_POLICY, &mddev->flags);
5933 	} else {
5934 		mddev_destroy_serial_pool(mddev, NULL);
5935 		clear_bit(MD_SERIALIZE_POLICY, &mddev->flags);
5936 	}
5937 unlock:
5938 	mddev_unlock_and_resume(mddev);
5939 	return err ?: len;
5940 }
5941 
5942 static struct md_sysfs_entry md_serialize_policy =
5943 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5944        serialize_policy_store);
5945 
5946 static int mddev_set_logical_block_size(struct mddev *mddev,
5947 				unsigned int lbs)
5948 {
5949 	int err = 0;
5950 	struct queue_limits lim;
5951 
5952 	if (queue_logical_block_size(mddev->gendisk->queue) >= lbs) {
5953 		pr_err("%s: Cannot set LBS smaller than mddev LBS %u\n",
5954 		       mdname(mddev), lbs);
5955 		return -EINVAL;
5956 	}
5957 
5958 	lim = queue_limits_start_update(mddev->gendisk->queue);
5959 	lim.logical_block_size = lbs;
5960 	pr_info("%s: logical_block_size is changed, data may be lost\n",
5961 		mdname(mddev));
5962 	err = queue_limits_commit_update(mddev->gendisk->queue, &lim);
5963 	if (err)
5964 		return err;
5965 
5966 	mddev->logical_block_size = lbs;
5967 	/* New lbs will be written to superblock after array is running */
5968 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
5969 	return 0;
5970 }
5971 
5972 static ssize_t
5973 lbs_show(struct mddev *mddev, char *page)
5974 {
5975 	return sprintf(page, "%u\n", mddev->logical_block_size);
5976 }
5977 
5978 static ssize_t
5979 lbs_store(struct mddev *mddev, const char *buf, size_t len)
5980 {
5981 	unsigned int lbs;
5982 	int err = -EBUSY;
5983 
5984 	/* Only 1.x meta supports configurable LBS */
5985 	if (mddev->major_version == 0)
5986 		return -EINVAL;
5987 
5988 	err = kstrtouint(buf, 10, &lbs);
5989 	if (err < 0)
5990 		return -EINVAL;
5991 
5992 	if (mddev->pers) {
5993 		unsigned int curr_lbs;
5994 
5995 		if (mddev->logical_block_size)
5996 			return -EBUSY;
5997 		/*
5998 		 * To fix forward compatibility issues, LBS is not
5999 		 * configured for arrays from old kernels (<=6.18) by default.
6000 		 * If the user confirms no rollback to old kernels,
6001 		 * enable LBS by writing current LBS — to prevent data
6002 		 * loss from LBS changes.
6003 		 */
6004 		curr_lbs = queue_logical_block_size(mddev->gendisk->queue);
6005 		if (lbs != curr_lbs)
6006 			return -EINVAL;
6007 
6008 		mddev->logical_block_size = curr_lbs;
6009 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6010 		pr_info("%s: logical block size configured successfully, array will not be assembled in old kernels (<= 6.18)\n",
6011 			mdname(mddev));
6012 		return len;
6013 	}
6014 
6015 	err = mddev_lock(mddev);
6016 	if (err)
6017 		goto unlock;
6018 
6019 	err = mddev_set_logical_block_size(mddev, lbs);
6020 
6021 unlock:
6022 	mddev_unlock(mddev);
6023 	return err ?: len;
6024 }
6025 
6026 static struct md_sysfs_entry md_logical_block_size =
6027 __ATTR(logical_block_size, 0644, lbs_show, lbs_store);
6028 
6029 static struct attribute *md_default_attrs[] = {
6030 	&md_level.attr,
6031 	&md_new_level.attr,
6032 	&md_bitmap_type.attr,
6033 	&md_layout.attr,
6034 	&md_raid_disks.attr,
6035 	&md_uuid.attr,
6036 	&md_chunk_size.attr,
6037 	&md_size.attr,
6038 	&md_resync_start.attr,
6039 	&md_metadata.attr,
6040 	&md_new_device.attr,
6041 	&md_safe_delay.attr,
6042 	&md_array_state.attr,
6043 	&md_reshape_position.attr,
6044 	&md_reshape_direction.attr,
6045 	&md_array_size.attr,
6046 	&max_corr_read_errors.attr,
6047 	&md_consistency_policy.attr,
6048 	&md_fail_last_dev.attr,
6049 	&md_serialize_policy.attr,
6050 	&md_logical_block_size.attr,
6051 	NULL,
6052 };
6053 
6054 static const struct attribute_group md_default_group = {
6055 	.attrs = md_default_attrs,
6056 };
6057 
6058 static struct attribute *md_redundancy_attrs[] = {
6059 	&md_scan_mode.attr,
6060 	&md_last_scan_mode.attr,
6061 	&md_mismatches.attr,
6062 	&md_sync_min.attr,
6063 	&md_sync_max.attr,
6064 	&md_sync_io_depth.attr,
6065 	&md_sync_speed.attr,
6066 	&md_sync_force_parallel.attr,
6067 	&md_sync_completed.attr,
6068 	&md_min_sync.attr,
6069 	&md_max_sync.attr,
6070 	&md_suspend_lo.attr,
6071 	&md_suspend_hi.attr,
6072 	&md_bitmap.attr,
6073 	&md_degraded.attr,
6074 	NULL,
6075 };
6076 static const struct attribute_group md_redundancy_group = {
6077 	.name = NULL,
6078 	.attrs = md_redundancy_attrs,
6079 };
6080 
6081 static const struct attribute_group *md_attr_groups[] = {
6082 	&md_default_group,
6083 	NULL,
6084 };
6085 
6086 static ssize_t
6087 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
6088 {
6089 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
6090 	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
6091 	ssize_t rv;
6092 
6093 	if (!entry->show)
6094 		return -EIO;
6095 	spin_lock(&all_mddevs_lock);
6096 	if (!mddev_get(mddev)) {
6097 		spin_unlock(&all_mddevs_lock);
6098 		return -EBUSY;
6099 	}
6100 	spin_unlock(&all_mddevs_lock);
6101 
6102 	rv = entry->show(mddev, page);
6103 	mddev_put(mddev);
6104 	return rv;
6105 }
6106 
6107 static ssize_t
6108 md_attr_store(struct kobject *kobj, struct attribute *attr,
6109 	      const char *page, size_t length)
6110 {
6111 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
6112 	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
6113 	ssize_t rv;
6114 	struct kernfs_node *kn = NULL;
6115 
6116 	if (!entry->store)
6117 		return -EIO;
6118 	if (!capable(CAP_SYS_ADMIN))
6119 		return -EACCES;
6120 
6121 	if (entry->store == array_state_store && cmd_match(page, "clear"))
6122 		kn = sysfs_break_active_protection(kobj, attr);
6123 
6124 	spin_lock(&all_mddevs_lock);
6125 	if (!mddev_get(mddev)) {
6126 		spin_unlock(&all_mddevs_lock);
6127 		if (kn)
6128 			sysfs_unbreak_active_protection(kn);
6129 		return -EBUSY;
6130 	}
6131 	spin_unlock(&all_mddevs_lock);
6132 	rv = entry->store(mddev, page, length);
6133 	mddev_put(mddev);
6134 
6135 	if (kn)
6136 		sysfs_unbreak_active_protection(kn);
6137 
6138 	return rv;
6139 }
6140 
6141 static void md_kobj_release(struct kobject *ko)
6142 {
6143 	struct mddev *mddev = container_of(ko, struct mddev, kobj);
6144 
6145 	if (legacy_async_del_gendisk) {
6146 		if (mddev->sysfs_state)
6147 			sysfs_put(mddev->sysfs_state);
6148 		if (mddev->sysfs_level)
6149 			sysfs_put(mddev->sysfs_level);
6150 		del_gendisk(mddev->gendisk);
6151 	}
6152 	put_disk(mddev->gendisk);
6153 }
6154 
6155 static const struct sysfs_ops md_sysfs_ops = {
6156 	.show	= md_attr_show,
6157 	.store	= md_attr_store,
6158 };
6159 static const struct kobj_type md_ktype = {
6160 	.release	= md_kobj_release,
6161 	.sysfs_ops	= &md_sysfs_ops,
6162 	.default_groups	= md_attr_groups,
6163 };
6164 
6165 int mdp_major = 0;
6166 
6167 /* stack the limit for all rdevs into lim */
6168 int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
6169 		unsigned int flags)
6170 {
6171 	struct md_rdev *rdev;
6172 
6173 	rdev_for_each(rdev, mddev) {
6174 		queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset,
6175 					mddev->gendisk->disk_name);
6176 		if ((flags & MDDEV_STACK_INTEGRITY) &&
6177 		    !queue_limits_stack_integrity_bdev(lim, rdev->bdev))
6178 			return -EINVAL;
6179 	}
6180 
6181 	/*
6182 	 * Before RAID adding folio support, the logical_block_size
6183 	 * should be smaller than the page size.
6184 	 */
6185 	if (lim->logical_block_size > PAGE_SIZE) {
6186 		pr_err("%s: logical_block_size must not larger than PAGE_SIZE\n",
6187 			mdname(mddev));
6188 		return -EINVAL;
6189 	}
6190 
6191 	/* Only 1.x meta needs to set logical block size */
6192 	if (mddev->major_version == 0)
6193 		return 0;
6194 
6195 	/*
6196 	 * Fix forward compatibility issue. Only set LBS by default for
6197 	 * new arrays, mddev->events == 0 indicates the array was just
6198 	 * created. When assembling an array, read LBS from the superblock
6199 	 * instead — LBS is 0 in superblocks created by old kernels.
6200 	 */
6201 	if (!mddev->events) {
6202 		pr_info("%s: array will not be assembled in old kernels that lack configurable LBS support (<= 6.18)\n",
6203 			mdname(mddev));
6204 		mddev->logical_block_size = lim->logical_block_size;
6205 	}
6206 
6207 	if (!mddev->logical_block_size)
6208 		pr_warn("%s: echo current LBS to md/logical_block_size to prevent data loss issues from LBS changes.\n"
6209 			"\tNote: After setting, array will not be assembled in old kernels (<= 6.18)\n",
6210 			mdname(mddev));
6211 
6212 	return 0;
6213 }
6214 EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits);
6215 
6216 /* apply the extra stacking limits from a new rdev into mddev */
6217 int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev)
6218 {
6219 	struct queue_limits lim;
6220 
6221 	if (mddev_is_dm(mddev))
6222 		return 0;
6223 
6224 	if (queue_logical_block_size(rdev->bdev->bd_disk->queue) >
6225 	    queue_logical_block_size(mddev->gendisk->queue)) {
6226 		pr_err("%s: incompatible logical_block_size, can not add\n",
6227 		       mdname(mddev));
6228 		return -EINVAL;
6229 	}
6230 
6231 	lim = queue_limits_start_update(mddev->gendisk->queue);
6232 	queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset,
6233 				mddev->gendisk->disk_name);
6234 
6235 	if (!queue_limits_stack_integrity_bdev(&lim, rdev->bdev)) {
6236 		pr_err("%s: incompatible integrity profile for %pg\n",
6237 		       mdname(mddev), rdev->bdev);
6238 		queue_limits_cancel_update(mddev->gendisk->queue);
6239 		return -ENXIO;
6240 	}
6241 
6242 	return queue_limits_commit_update(mddev->gendisk->queue, &lim);
6243 }
6244 EXPORT_SYMBOL_GPL(mddev_stack_new_rdev);
6245 
6246 /* update the optimal I/O size after a reshape */
6247 void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes)
6248 {
6249 	struct queue_limits lim;
6250 
6251 	if (mddev_is_dm(mddev))
6252 		return;
6253 
6254 	/* don't bother updating io_opt if we can't suspend the array */
6255 	if (mddev_suspend(mddev, false) < 0)
6256 		return;
6257 	lim = queue_limits_start_update(mddev->gendisk->queue);
6258 	lim.io_opt = lim.io_min * nr_stripes;
6259 	queue_limits_commit_update(mddev->gendisk->queue, &lim);
6260 	mddev_resume(mddev);
6261 }
6262 EXPORT_SYMBOL_GPL(mddev_update_io_opt);
6263 
6264 static void mddev_delayed_delete(struct work_struct *ws)
6265 {
6266 	struct mddev *mddev = container_of(ws, struct mddev, del_work);
6267 
6268 	kobject_put(&mddev->kobj);
6269 }
6270 
6271 void md_init_stacking_limits(struct queue_limits *lim)
6272 {
6273 	blk_set_stacking_limits(lim);
6274 	lim->features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
6275 			BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
6276 }
6277 EXPORT_SYMBOL_GPL(md_init_stacking_limits);
6278 
6279 struct mddev *md_alloc(dev_t dev, char *name)
6280 {
6281 	/*
6282 	 * If dev is zero, name is the name of a device to allocate with
6283 	 * an arbitrary minor number.  It will be "md_???"
6284 	 * If dev is non-zero it must be a device number with a MAJOR of
6285 	 * MD_MAJOR or mdp_major.  In this case, if "name" is NULL, then
6286 	 * the device is being created by opening a node in /dev.
6287 	 * If "name" is not NULL, the device is being created by
6288 	 * writing to /sys/module/md_mod/parameters/new_array.
6289 	 */
6290 	static DEFINE_MUTEX(disks_mutex);
6291 	struct mddev *mddev;
6292 	struct gendisk *disk;
6293 	int partitioned;
6294 	int shift;
6295 	int unit;
6296 	int error;
6297 
6298 	/*
6299 	 * Wait for any previous instance of this device to be completely
6300 	 * removed (mddev_delayed_delete).
6301 	 */
6302 	flush_workqueue(md_misc_wq);
6303 
6304 	mutex_lock(&disks_mutex);
6305 	mddev = mddev_alloc(dev);
6306 	if (IS_ERR(mddev)) {
6307 		error = PTR_ERR(mddev);
6308 		goto out_unlock;
6309 	}
6310 
6311 	partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
6312 	shift = partitioned ? MdpMinorShift : 0;
6313 	unit = MINOR(mddev->unit) >> shift;
6314 
6315 	if (name && !dev) {
6316 		/* Need to ensure that 'name' is not a duplicate.
6317 		 */
6318 		struct mddev *mddev2;
6319 		spin_lock(&all_mddevs_lock);
6320 
6321 		list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
6322 			if (mddev2->gendisk &&
6323 			    strcmp(mddev2->gendisk->disk_name, name) == 0) {
6324 				spin_unlock(&all_mddevs_lock);
6325 				error = -EEXIST;
6326 				goto out_free_mddev;
6327 			}
6328 		spin_unlock(&all_mddevs_lock);
6329 	}
6330 	if (name && dev)
6331 		/*
6332 		 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
6333 		 */
6334 		mddev->hold_active = UNTIL_STOP;
6335 
6336 	disk = blk_alloc_disk(NULL, NUMA_NO_NODE);
6337 	if (IS_ERR(disk)) {
6338 		error = PTR_ERR(disk);
6339 		goto out_free_mddev;
6340 	}
6341 
6342 	disk->major = MAJOR(mddev->unit);
6343 	disk->first_minor = unit << shift;
6344 	disk->minors = 1 << shift;
6345 	if (name)
6346 		strcpy(disk->disk_name, name);
6347 	else if (partitioned)
6348 		sprintf(disk->disk_name, "md_d%d", unit);
6349 	else
6350 		sprintf(disk->disk_name, "md%d", unit);
6351 	disk->fops = &md_fops;
6352 	disk->private_data = mddev;
6353 
6354 	disk->events |= DISK_EVENT_MEDIA_CHANGE;
6355 	mddev->gendisk = disk;
6356 	error = add_disk(disk);
6357 	if (error)
6358 		goto out_put_disk;
6359 
6360 	kobject_init(&mddev->kobj, &md_ktype);
6361 	error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
6362 	if (error) {
6363 		/*
6364 		 * The disk is already live at this point.  Clear the hold flag
6365 		 * and let mddev_put take care of the deletion, as it isn't any
6366 		 * different from a normal close on last release now.
6367 		 */
6368 		mddev->hold_active = 0;
6369 		mutex_unlock(&disks_mutex);
6370 		mddev_put(mddev);
6371 		return ERR_PTR(error);
6372 	}
6373 
6374 	kobject_uevent(&mddev->kobj, KOBJ_ADD);
6375 	mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
6376 	mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
6377 	mutex_unlock(&disks_mutex);
6378 	return mddev;
6379 
6380 out_put_disk:
6381 	put_disk(disk);
6382 out_free_mddev:
6383 	mddev_free(mddev);
6384 out_unlock:
6385 	mutex_unlock(&disks_mutex);
6386 	return ERR_PTR(error);
6387 }
6388 
6389 static int md_alloc_and_put(dev_t dev, char *name)
6390 {
6391 	struct mddev *mddev = md_alloc(dev, name);
6392 
6393 	if (legacy_async_del_gendisk)
6394 		pr_warn("md: async del_gendisk mode will be removed in future, please upgrade to mdadm-4.5+\n");
6395 
6396 	if (IS_ERR(mddev))
6397 		return PTR_ERR(mddev);
6398 	mddev_put(mddev);
6399 	return 0;
6400 }
6401 
6402 static void md_probe(dev_t dev)
6403 {
6404 	if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
6405 		return;
6406 	if (create_on_open)
6407 		md_alloc_and_put(dev, NULL);
6408 }
6409 
6410 static int add_named_array(const char *val, const struct kernel_param *kp)
6411 {
6412 	/*
6413 	 * val must be "md_*" or "mdNNN".
6414 	 * For "md_*" we allocate an array with a large free minor number, and
6415 	 * set the name to val.  val must not already be an active name.
6416 	 * For "mdNNN" we allocate an array with the minor number NNN
6417 	 * which must not already be in use.
6418 	 */
6419 	int len = strlen(val);
6420 	char buf[DISK_NAME_LEN];
6421 	unsigned long devnum;
6422 
6423 	while (len && val[len-1] == '\n')
6424 		len--;
6425 	if (len >= DISK_NAME_LEN)
6426 		return -E2BIG;
6427 	strscpy(buf, val, len+1);
6428 	if (strncmp(buf, "md_", 3) == 0)
6429 		return md_alloc_and_put(0, buf);
6430 	if (strncmp(buf, "md", 2) == 0 &&
6431 	    isdigit(buf[2]) &&
6432 	    kstrtoul(buf+2, 10, &devnum) == 0 &&
6433 	    devnum <= MINORMASK)
6434 		return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL);
6435 
6436 	return -EINVAL;
6437 }
6438 
6439 static void md_safemode_timeout(struct timer_list *t)
6440 {
6441 	struct mddev *mddev = timer_container_of(mddev, t, safemode_timer);
6442 
6443 	mddev->safemode = 1;
6444 	if (mddev->external)
6445 		sysfs_notify_dirent_safe(mddev->sysfs_state);
6446 
6447 	md_wakeup_thread(mddev->thread);
6448 }
6449 
6450 static int start_dirty_degraded;
6451 
6452 static int md_bitmap_create(struct mddev *mddev)
6453 {
6454 	if (mddev->bitmap_id == ID_BITMAP_NONE)
6455 		return -EINVAL;
6456 
6457 	if (!mddev_set_bitmap_ops(mddev))
6458 		return -ENOENT;
6459 
6460 	return mddev->bitmap_ops->create(mddev);
6461 }
6462 
6463 static void md_bitmap_destroy(struct mddev *mddev)
6464 {
6465 	if (!md_bitmap_registered(mddev))
6466 		return;
6467 
6468 	mddev->bitmap_ops->destroy(mddev);
6469 	mddev_clear_bitmap_ops(mddev);
6470 }
6471 
6472 int md_run(struct mddev *mddev)
6473 {
6474 	int err;
6475 	struct md_rdev *rdev;
6476 	struct md_personality *pers;
6477 	bool nowait = true;
6478 
6479 	if (list_empty(&mddev->disks))
6480 		/* cannot run an array with no devices.. */
6481 		return -EINVAL;
6482 
6483 	if (mddev->pers)
6484 		return -EBUSY;
6485 	/* Cannot run until previous stop completes properly */
6486 	if (mddev->sysfs_active)
6487 		return -EBUSY;
6488 
6489 	/*
6490 	 * Analyze all RAID superblock(s)
6491 	 */
6492 	if (!mddev->raid_disks) {
6493 		if (!mddev->persistent)
6494 			return -EINVAL;
6495 		err = analyze_sbs(mddev);
6496 		if (err)
6497 			return -EINVAL;
6498 	}
6499 
6500 	if (mddev->level != LEVEL_NONE)
6501 		request_module("md-level-%d", mddev->level);
6502 	else if (mddev->clevel[0])
6503 		request_module("md-%s", mddev->clevel);
6504 
6505 	/*
6506 	 * Drop all container device buffers, from now on
6507 	 * the only valid external interface is through the md
6508 	 * device.
6509 	 */
6510 	clear_bit(MD_HAS_SUPERBLOCK, &mddev->flags);
6511 	rdev_for_each(rdev, mddev) {
6512 		if (test_bit(Faulty, &rdev->flags))
6513 			continue;
6514 		sync_blockdev(rdev->bdev);
6515 		invalidate_bdev(rdev->bdev);
6516 		if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
6517 			mddev->ro = MD_RDONLY;
6518 			if (!mddev_is_dm(mddev))
6519 				set_disk_ro(mddev->gendisk, 1);
6520 		}
6521 
6522 		if (rdev->sb_page)
6523 			set_bit(MD_HAS_SUPERBLOCK, &mddev->flags);
6524 
6525 		/* perform some consistency tests on the device.
6526 		 * We don't want the data to overlap the metadata,
6527 		 * Internal Bitmap issues have been handled elsewhere.
6528 		 */
6529 		if (rdev->meta_bdev) {
6530 			/* Nothing to check */;
6531 		} else if (rdev->data_offset < rdev->sb_start) {
6532 			if (mddev->dev_sectors &&
6533 			    rdev->data_offset + mddev->dev_sectors
6534 			    > rdev->sb_start) {
6535 				pr_warn("md: %s: data overlaps metadata\n",
6536 					mdname(mddev));
6537 				return -EINVAL;
6538 			}
6539 		} else {
6540 			if (rdev->sb_start + rdev->sb_size/512
6541 			    > rdev->data_offset) {
6542 				pr_warn("md: %s: metadata overlaps data\n",
6543 					mdname(mddev));
6544 				return -EINVAL;
6545 			}
6546 		}
6547 		sysfs_notify_dirent_safe(rdev->sysfs_state);
6548 		nowait = nowait && bdev_nowait(rdev->bdev);
6549 	}
6550 
6551 	pers = get_pers(mddev->level, mddev->clevel);
6552 	if (!pers)
6553 		return -EINVAL;
6554 	if (mddev->level != pers->head.id) {
6555 		mddev->level = pers->head.id;
6556 		mddev->new_level = pers->head.id;
6557 	}
6558 	strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel));
6559 
6560 	if (mddev->reshape_position != MaxSector &&
6561 	    pers->start_reshape == NULL) {
6562 		/* This personality cannot handle reshaping... */
6563 		put_pers(pers);
6564 		return -EINVAL;
6565 	}
6566 
6567 	if (pers->sync_request) {
6568 		/* Warn if this is a potentially silly
6569 		 * configuration.
6570 		 */
6571 		struct md_rdev *rdev2;
6572 		int warned = 0;
6573 
6574 		rdev_for_each(rdev, mddev)
6575 			rdev_for_each(rdev2, mddev) {
6576 				if (rdev < rdev2 &&
6577 				    rdev->bdev->bd_disk ==
6578 				    rdev2->bdev->bd_disk) {
6579 					pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n",
6580 						mdname(mddev),
6581 						rdev->bdev,
6582 						rdev2->bdev);
6583 					warned = 1;
6584 				}
6585 			}
6586 
6587 		if (warned)
6588 			pr_warn("True protection against single-disk failure might be compromised.\n");
6589 	}
6590 
6591 	/* dm-raid expect sync_thread to be frozen until resume */
6592 	if (mddev->gendisk)
6593 		mddev->recovery = 0;
6594 
6595 	/* may be over-ridden by personality */
6596 	mddev->resync_max_sectors = mddev->dev_sectors;
6597 
6598 	mddev->ok_start_degraded = start_dirty_degraded;
6599 
6600 	if (start_readonly && md_is_rdwr(mddev))
6601 		mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */
6602 
6603 	err = pers->run(mddev);
6604 	if (err)
6605 		pr_warn("md: pers->run() failed ...\n");
6606 	else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
6607 		WARN_ONCE(!mddev->external_size,
6608 			  "%s: default size too small, but 'external_size' not in effect?\n",
6609 			  __func__);
6610 		pr_warn("md: invalid array_size %llu > default size %llu\n",
6611 			(unsigned long long)mddev->array_sectors / 2,
6612 			(unsigned long long)pers->size(mddev, 0, 0) / 2);
6613 		err = -EINVAL;
6614 	}
6615 	if (err == 0 && pers->sync_request &&
6616 	    (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
6617 		err = md_bitmap_create(mddev);
6618 		if (err)
6619 			pr_warn("%s: failed to create bitmap (%d)\n",
6620 				mdname(mddev), err);
6621 	}
6622 	if (err)
6623 		goto bitmap_abort;
6624 
6625 	if (mddev->bitmap_info.max_write_behind > 0) {
6626 		bool create_pool = false;
6627 
6628 		rdev_for_each(rdev, mddev) {
6629 			if (test_bit(WriteMostly, &rdev->flags) &&
6630 			    rdev_init_serial(rdev))
6631 				create_pool = true;
6632 		}
6633 		if (create_pool && mddev->serial_info_pool == NULL) {
6634 			mddev->serial_info_pool =
6635 				mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
6636 						    sizeof(struct serial_info));
6637 			if (!mddev->serial_info_pool) {
6638 				err = -ENOMEM;
6639 				goto bitmap_abort;
6640 			}
6641 		}
6642 	}
6643 
6644 	if (pers->sync_request) {
6645 		if (mddev->kobj.sd &&
6646 		    sysfs_create_group(&mddev->kobj, &md_redundancy_group))
6647 			pr_warn("md: cannot register extra attributes for %s\n",
6648 				mdname(mddev));
6649 		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
6650 		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6651 		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
6652 	} else if (mddev->ro == MD_AUTO_READ)
6653 		mddev->ro = MD_RDWR;
6654 
6655 	atomic_set(&mddev->max_corr_read_errors,
6656 		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
6657 	mddev->safemode = 0;
6658 	if (mddev_is_clustered(mddev))
6659 		mddev->safemode_delay = 0;
6660 	else
6661 		mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
6662 	mddev->in_sync = 1;
6663 	smp_wmb();
6664 	spin_lock(&mddev->lock);
6665 	mddev->pers = pers;
6666 	spin_unlock(&mddev->lock);
6667 	rdev_for_each(rdev, mddev)
6668 		if (rdev->raid_disk >= 0)
6669 			sysfs_link_rdev(mddev, rdev); /* failure here is OK */
6670 
6671 	if (mddev->degraded && md_is_rdwr(mddev))
6672 		/* This ensures that recovering status is reported immediately
6673 		 * via sysfs - until a lack of spares is confirmed.
6674 		 */
6675 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6676 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6677 
6678 	if (mddev->sb_flags)
6679 		md_update_sb(mddev, 0);
6680 
6681 	md_new_event();
6682 	return 0;
6683 
6684 bitmap_abort:
6685 	mddev_detach(mddev);
6686 	if (mddev->private)
6687 		pers->free(mddev, mddev->private);
6688 	mddev->private = NULL;
6689 	put_pers(pers);
6690 	md_bitmap_destroy(mddev);
6691 	return err;
6692 }
6693 EXPORT_SYMBOL_GPL(md_run);
6694 
6695 int do_md_run(struct mddev *mddev)
6696 {
6697 	int err;
6698 
6699 	set_bit(MD_NOT_READY, &mddev->flags);
6700 	err = md_run(mddev);
6701 	if (err)
6702 		goto out;
6703 
6704 	if (md_bitmap_registered(mddev)) {
6705 		err = mddev->bitmap_ops->load(mddev);
6706 		if (err) {
6707 			md_bitmap_destroy(mddev);
6708 			goto out;
6709 		}
6710 	}
6711 
6712 	if (mddev_is_clustered(mddev))
6713 		md_allow_write(mddev);
6714 
6715 	/* run start up tasks that require md_thread */
6716 	md_start(mddev);
6717 
6718 	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6719 
6720 	set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
6721 	clear_bit(MD_NOT_READY, &mddev->flags);
6722 	mddev->changed = 1;
6723 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
6724 	sysfs_notify_dirent_safe(mddev->sysfs_state);
6725 	sysfs_notify_dirent_safe(mddev->sysfs_action);
6726 	sysfs_notify_dirent_safe(mddev->sysfs_degraded);
6727 out:
6728 	clear_bit(MD_NOT_READY, &mddev->flags);
6729 	return err;
6730 }
6731 
6732 int md_start(struct mddev *mddev)
6733 {
6734 	int ret = 0;
6735 
6736 	if (mddev->pers->start) {
6737 		set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6738 		ret = mddev->pers->start(mddev);
6739 		clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6740 		md_wakeup_thread(mddev->sync_thread);
6741 	}
6742 	return ret;
6743 }
6744 EXPORT_SYMBOL_GPL(md_start);
6745 
6746 static int restart_array(struct mddev *mddev)
6747 {
6748 	struct gendisk *disk = mddev->gendisk;
6749 	struct md_rdev *rdev;
6750 	bool has_journal = false;
6751 	bool has_readonly = false;
6752 
6753 	/* Complain if it has no devices */
6754 	if (list_empty(&mddev->disks))
6755 		return -ENXIO;
6756 	if (!mddev->pers)
6757 		return -EINVAL;
6758 	if (md_is_rdwr(mddev))
6759 		return -EBUSY;
6760 
6761 	rcu_read_lock();
6762 	rdev_for_each_rcu(rdev, mddev) {
6763 		if (test_bit(Journal, &rdev->flags) &&
6764 		    !test_bit(Faulty, &rdev->flags))
6765 			has_journal = true;
6766 		if (rdev_read_only(rdev))
6767 			has_readonly = true;
6768 	}
6769 	rcu_read_unlock();
6770 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6771 		/* Don't restart rw with journal missing/faulty */
6772 			return -EINVAL;
6773 	if (has_readonly)
6774 		return -EROFS;
6775 
6776 	mddev->safemode = 0;
6777 	mddev->ro = MD_RDWR;
6778 	set_disk_ro(disk, 0);
6779 	pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
6780 	/* Kick recovery or resync if necessary */
6781 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6782 	md_wakeup_thread(mddev->sync_thread);
6783 	sysfs_notify_dirent_safe(mddev->sysfs_state);
6784 	return 0;
6785 }
6786 
6787 static void md_clean(struct mddev *mddev)
6788 {
6789 	mddev->array_sectors = 0;
6790 	mddev->external_size = 0;
6791 	mddev->dev_sectors = 0;
6792 	mddev->raid_disks = 0;
6793 	mddev->resync_offset = 0;
6794 	mddev->resync_min = 0;
6795 	mddev->resync_max = MaxSector;
6796 	mddev->reshape_position = MaxSector;
6797 	/* we still need mddev->external in export_rdev, do not clear it yet */
6798 	mddev->persistent = 0;
6799 	mddev->level = LEVEL_NONE;
6800 	mddev->clevel[0] = 0;
6801 
6802 	/*
6803 	 * For legacy_async_del_gendisk mode, it can stop the array in the
6804 	 * middle of assembling it, then it still can access the array. So
6805 	 * it needs to clear MD_CLOSING. If not legacy_async_del_gendisk,
6806 	 * it can't open the array again after stopping it. So it doesn't
6807 	 * clear MD_CLOSING.
6808 	 */
6809 	if (legacy_async_del_gendisk && mddev->hold_active) {
6810 		clear_bit(MD_CLOSING, &mddev->flags);
6811 	} else {
6812 		/* if UNTIL_STOP is set, it's cleared here */
6813 		mddev->hold_active = 0;
6814 		/* Don't clear MD_CLOSING, or mddev can be opened again. */
6815 		mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
6816 	}
6817 	mddev->sb_flags = 0;
6818 	mddev->ro = MD_RDWR;
6819 	mddev->metadata_type[0] = 0;
6820 	mddev->chunk_sectors = 0;
6821 	mddev->ctime = mddev->utime = 0;
6822 	mddev->layout = 0;
6823 	mddev->logical_block_size = 0;
6824 	mddev->max_disks = 0;
6825 	mddev->events = 0;
6826 	mddev->can_decrease_events = 0;
6827 	mddev->delta_disks = 0;
6828 	mddev->reshape_backwards = 0;
6829 	mddev->new_level = LEVEL_NONE;
6830 	mddev->new_layout = 0;
6831 	mddev->new_chunk_sectors = 0;
6832 	mddev->curr_resync = MD_RESYNC_NONE;
6833 	atomic64_set(&mddev->resync_mismatches, 0);
6834 	mddev->suspend_lo = mddev->suspend_hi = 0;
6835 	mddev->sync_speed_min = mddev->sync_speed_max = 0;
6836 	mddev->recovery = 0;
6837 	mddev->in_sync = 0;
6838 	mddev->changed = 0;
6839 	mddev->degraded = 0;
6840 	mddev->safemode = 0;
6841 	mddev->private = NULL;
6842 	mddev->cluster_info = NULL;
6843 	mddev->bitmap_info.offset = 0;
6844 	mddev->bitmap_info.default_offset = 0;
6845 	mddev->bitmap_info.default_space = 0;
6846 	mddev->bitmap_info.chunksize = 0;
6847 	mddev->bitmap_info.daemon_sleep = 0;
6848 	mddev->bitmap_info.max_write_behind = 0;
6849 	mddev->bitmap_info.nodes = 0;
6850 }
6851 
6852 static void __md_stop_writes(struct mddev *mddev)
6853 {
6854 	timer_delete_sync(&mddev->safemode_timer);
6855 
6856 	if (md_is_rdwr(mddev) || !mddev_is_dm(mddev)) {
6857 		if (mddev->pers && mddev->pers->quiesce) {
6858 			mddev->pers->quiesce(mddev, 1);
6859 			mddev->pers->quiesce(mddev, 0);
6860 		}
6861 
6862 		if (md_bitmap_enabled(mddev, true))
6863 			mddev->bitmap_ops->flush(mddev);
6864 	}
6865 
6866 	if (md_is_rdwr(mddev) &&
6867 	    ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
6868 	     mddev->sb_flags)) {
6869 		/* mark array as shutdown cleanly */
6870 		if (!mddev_is_clustered(mddev))
6871 			mddev->in_sync = 1;
6872 		md_update_sb(mddev, 1);
6873 	}
6874 	/* disable policy to guarantee rdevs free resources for serialization */
6875 	clear_bit(MD_SERIALIZE_POLICY, &mddev->flags);
6876 	mddev_destroy_serial_pool(mddev, NULL);
6877 }
6878 
6879 void md_stop_writes(struct mddev *mddev)
6880 {
6881 	mddev_lock_nointr(mddev);
6882 	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6883 	stop_sync_thread(mddev, true);
6884 	__md_stop_writes(mddev);
6885 	mddev_unlock(mddev);
6886 }
6887 EXPORT_SYMBOL_GPL(md_stop_writes);
6888 
6889 static void mddev_detach(struct mddev *mddev)
6890 {
6891 	if (md_bitmap_enabled(mddev, false))
6892 		mddev->bitmap_ops->wait_behind_writes(mddev);
6893 	if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
6894 		mddev->pers->quiesce(mddev, 1);
6895 		mddev->pers->quiesce(mddev, 0);
6896 	}
6897 	md_unregister_thread(mddev, &mddev->thread);
6898 
6899 	/* the unplug fn references 'conf' */
6900 	if (!mddev_is_dm(mddev))
6901 		blk_sync_queue(mddev->gendisk->queue);
6902 }
6903 
6904 static void __md_stop(struct mddev *mddev)
6905 {
6906 	struct md_personality *pers = mddev->pers;
6907 
6908 	md_bitmap_destroy(mddev);
6909 	mddev_detach(mddev);
6910 	spin_lock(&mddev->lock);
6911 	mddev->pers = NULL;
6912 	spin_unlock(&mddev->lock);
6913 	if (mddev->private)
6914 		pers->free(mddev, mddev->private);
6915 	mddev->private = NULL;
6916 	put_pers(pers);
6917 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6918 }
6919 
6920 void md_stop(struct mddev *mddev)
6921 {
6922 	lockdep_assert_held(&mddev->reconfig_mutex);
6923 
6924 	/* stop the array and free an attached data structures.
6925 	 * This is called from dm-raid
6926 	 */
6927 	__md_stop_writes(mddev);
6928 	__md_stop(mddev);
6929 }
6930 
6931 EXPORT_SYMBOL_GPL(md_stop);
6932 
6933 /* ensure 'mddev->pers' exist before calling md_set_readonly() */
6934 static int md_set_readonly(struct mddev *mddev)
6935 {
6936 	int err = 0;
6937 	int did_freeze = 0;
6938 
6939 	if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6940 		return -EBUSY;
6941 
6942 	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6943 		did_freeze = 1;
6944 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6945 	}
6946 
6947 	stop_sync_thread(mddev, false);
6948 	wait_event(mddev->sb_wait,
6949 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
6950 	mddev_lock_nointr(mddev);
6951 
6952 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6953 		pr_warn("md: %s still in use.\n",mdname(mddev));
6954 		err = -EBUSY;
6955 		goto out;
6956 	}
6957 
6958 	__md_stop_writes(mddev);
6959 
6960 	if (mddev->ro == MD_RDONLY) {
6961 		err  = -ENXIO;
6962 		goto out;
6963 	}
6964 
6965 	mddev->ro = MD_RDONLY;
6966 	set_disk_ro(mddev->gendisk, 1);
6967 
6968 out:
6969 	if (!err || did_freeze) {
6970 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6971 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6972 		sysfs_notify_dirent_safe(mddev->sysfs_state);
6973 	}
6974 
6975 	return err;
6976 }
6977 
6978 /* mode:
6979  *   0 - completely stop and dis-assemble array
6980  *   2 - stop but do not disassemble array
6981  */
6982 static int do_md_stop(struct mddev *mddev, int mode)
6983 {
6984 	struct gendisk *disk = mddev->gendisk;
6985 	struct md_rdev *rdev;
6986 	int did_freeze = 0;
6987 
6988 	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6989 		did_freeze = 1;
6990 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6991 	}
6992 
6993 	stop_sync_thread(mddev, true);
6994 
6995 	if (mddev->sysfs_active ||
6996 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6997 		pr_warn("md: %s still in use.\n",mdname(mddev));
6998 		if (did_freeze) {
6999 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
7000 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7001 		}
7002 		return -EBUSY;
7003 	}
7004 	if (mddev->pers) {
7005 		if (!md_is_rdwr(mddev))
7006 			set_disk_ro(disk, 0);
7007 
7008 		if (mode == 2 && mddev->pers->sync_request &&
7009 		    mddev->to_remove == NULL)
7010 			mddev->to_remove = &md_redundancy_group;
7011 
7012 		__md_stop_writes(mddev);
7013 		__md_stop(mddev);
7014 
7015 		/* tell userspace to handle 'inactive' */
7016 		sysfs_notify_dirent_safe(mddev->sysfs_state);
7017 
7018 		rdev_for_each(rdev, mddev)
7019 			if (rdev->raid_disk >= 0)
7020 				sysfs_unlink_rdev(mddev, rdev);
7021 
7022 		set_capacity_and_notify(disk, 0);
7023 		mddev->changed = 1;
7024 
7025 		if (!md_is_rdwr(mddev))
7026 			mddev->ro = MD_RDWR;
7027 	}
7028 	/*
7029 	 * Free resources if final stop
7030 	 */
7031 	if (mode == 0) {
7032 		pr_info("md: %s stopped.\n", mdname(mddev));
7033 
7034 		if (mddev->bitmap_info.file) {
7035 			struct file *f = mddev->bitmap_info.file;
7036 			spin_lock(&mddev->lock);
7037 			mddev->bitmap_info.file = NULL;
7038 			spin_unlock(&mddev->lock);
7039 			fput(f);
7040 		}
7041 		mddev->bitmap_info.offset = 0;
7042 
7043 		export_array(mddev);
7044 		md_clean(mddev);
7045 		if (!legacy_async_del_gendisk)
7046 			set_bit(MD_DELETED, &mddev->flags);
7047 	}
7048 	md_new_event();
7049 	sysfs_notify_dirent_safe(mddev->sysfs_state);
7050 	return 0;
7051 }
7052 
7053 #ifndef MODULE
7054 static void autorun_array(struct mddev *mddev)
7055 {
7056 	struct md_rdev *rdev;
7057 	int err;
7058 
7059 	if (list_empty(&mddev->disks))
7060 		return;
7061 
7062 	pr_info("md: running: ");
7063 
7064 	rdev_for_each(rdev, mddev) {
7065 		pr_cont("<%pg>", rdev->bdev);
7066 	}
7067 	pr_cont("\n");
7068 
7069 	err = do_md_run(mddev);
7070 	if (err) {
7071 		pr_warn("md: do_md_run() returned %d\n", err);
7072 		do_md_stop(mddev, 0);
7073 	}
7074 }
7075 
7076 /*
7077  * lets try to run arrays based on all disks that have arrived
7078  * until now. (those are in pending_raid_disks)
7079  *
7080  * the method: pick the first pending disk, collect all disks with
7081  * the same UUID, remove all from the pending list and put them into
7082  * the 'same_array' list. Then order this list based on superblock
7083  * update time (freshest comes first), kick out 'old' disks and
7084  * compare superblocks. If everything's fine then run it.
7085  *
7086  * If "unit" is allocated, then bump its reference count
7087  */
7088 static void autorun_devices(int part)
7089 {
7090 	struct md_rdev *rdev0, *rdev, *tmp;
7091 	struct mddev *mddev;
7092 
7093 	pr_info("md: autorun ...\n");
7094 	while (!list_empty(&pending_raid_disks)) {
7095 		int unit;
7096 		dev_t dev;
7097 		LIST_HEAD(candidates);
7098 		rdev0 = list_entry(pending_raid_disks.next,
7099 					 struct md_rdev, same_set);
7100 
7101 		pr_debug("md: considering %pg ...\n", rdev0->bdev);
7102 		INIT_LIST_HEAD(&candidates);
7103 		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
7104 			if (super_90_load(rdev, rdev0, 0) >= 0) {
7105 				pr_debug("md:  adding %pg ...\n",
7106 					 rdev->bdev);
7107 				list_move(&rdev->same_set, &candidates);
7108 			}
7109 		/*
7110 		 * now we have a set of devices, with all of them having
7111 		 * mostly sane superblocks. It's time to allocate the
7112 		 * mddev.
7113 		 */
7114 		if (part) {
7115 			dev = MKDEV(mdp_major,
7116 				    rdev0->preferred_minor << MdpMinorShift);
7117 			unit = MINOR(dev) >> MdpMinorShift;
7118 		} else {
7119 			dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
7120 			unit = MINOR(dev);
7121 		}
7122 		if (rdev0->preferred_minor != unit) {
7123 			pr_warn("md: unit number in %pg is bad: %d\n",
7124 				rdev0->bdev, rdev0->preferred_minor);
7125 			break;
7126 		}
7127 
7128 		mddev = md_alloc(dev, NULL);
7129 		if (IS_ERR(mddev))
7130 			break;
7131 
7132 		if (mddev_suspend_and_lock(mddev))
7133 			pr_warn("md: %s locked, cannot run\n", mdname(mddev));
7134 		else if (mddev->raid_disks || mddev->major_version
7135 			 || !list_empty(&mddev->disks)) {
7136 			pr_warn("md: %s already running, cannot run %pg\n",
7137 				mdname(mddev), rdev0->bdev);
7138 			mddev_unlock_and_resume(mddev);
7139 		} else {
7140 			pr_debug("md: created %s\n", mdname(mddev));
7141 			mddev->persistent = 1;
7142 			rdev_for_each_list(rdev, tmp, &candidates) {
7143 				list_del_init(&rdev->same_set);
7144 				if (bind_rdev_to_array(rdev, mddev))
7145 					export_rdev(rdev);
7146 			}
7147 			autorun_array(mddev);
7148 			mddev_unlock_and_resume(mddev);
7149 		}
7150 		/* on success, candidates will be empty, on error
7151 		 * it won't...
7152 		 */
7153 		rdev_for_each_list(rdev, tmp, &candidates) {
7154 			list_del_init(&rdev->same_set);
7155 			export_rdev(rdev);
7156 		}
7157 		mddev_put(mddev);
7158 	}
7159 	pr_info("md: ... autorun DONE.\n");
7160 }
7161 #endif /* !MODULE */
7162 
7163 static int get_version(void __user *arg)
7164 {
7165 	mdu_version_t ver;
7166 
7167 	ver.major = MD_MAJOR_VERSION;
7168 	ver.minor = MD_MINOR_VERSION;
7169 	ver.patchlevel = MD_PATCHLEVEL_VERSION;
7170 
7171 	if (copy_to_user(arg, &ver, sizeof(ver)))
7172 		return -EFAULT;
7173 
7174 	return 0;
7175 }
7176 
7177 static int get_array_info(struct mddev *mddev, void __user *arg)
7178 {
7179 	mdu_array_info_t info;
7180 	int nr,working,insync,failed,spare;
7181 	struct md_rdev *rdev;
7182 
7183 	nr = working = insync = failed = spare = 0;
7184 	rcu_read_lock();
7185 	rdev_for_each_rcu(rdev, mddev) {
7186 		nr++;
7187 		if (test_bit(Faulty, &rdev->flags))
7188 			failed++;
7189 		else {
7190 			working++;
7191 			if (test_bit(In_sync, &rdev->flags))
7192 				insync++;
7193 			else if (test_bit(Journal, &rdev->flags))
7194 				/* TODO: add journal count to md_u.h */
7195 				;
7196 			else
7197 				spare++;
7198 		}
7199 	}
7200 	rcu_read_unlock();
7201 
7202 	info.major_version = mddev->major_version;
7203 	info.minor_version = mddev->minor_version;
7204 	info.patch_version = MD_PATCHLEVEL_VERSION;
7205 	info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
7206 	info.level         = mddev->level;
7207 	info.size          = mddev->dev_sectors / 2;
7208 	if (info.size != mddev->dev_sectors / 2) /* overflow */
7209 		info.size = -1;
7210 	info.nr_disks      = nr;
7211 	info.raid_disks    = mddev->raid_disks;
7212 	info.md_minor      = mddev->md_minor;
7213 	info.not_persistent= !mddev->persistent;
7214 
7215 	info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
7216 	info.state         = 0;
7217 	if (mddev->in_sync)
7218 		info.state = (1<<MD_SB_CLEAN);
7219 	if (mddev->bitmap && mddev->bitmap_info.offset)
7220 		info.state |= (1<<MD_SB_BITMAP_PRESENT);
7221 	if (mddev_is_clustered(mddev))
7222 		info.state |= (1<<MD_SB_CLUSTERED);
7223 	info.active_disks  = insync;
7224 	info.working_disks = working;
7225 	info.failed_disks  = failed;
7226 	info.spare_disks   = spare;
7227 
7228 	info.layout        = mddev->layout;
7229 	info.chunk_size    = mddev->chunk_sectors << 9;
7230 
7231 	if (copy_to_user(arg, &info, sizeof(info)))
7232 		return -EFAULT;
7233 
7234 	return 0;
7235 }
7236 
7237 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
7238 {
7239 	mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
7240 	char *ptr;
7241 	int err;
7242 
7243 	file = kzalloc_obj(*file, GFP_NOIO);
7244 	if (!file)
7245 		return -ENOMEM;
7246 
7247 	err = 0;
7248 	spin_lock(&mddev->lock);
7249 	/* bitmap enabled */
7250 	if (mddev->bitmap_info.file) {
7251 		ptr = file_path(mddev->bitmap_info.file, file->pathname,
7252 				sizeof(file->pathname));
7253 		if (IS_ERR(ptr))
7254 			err = PTR_ERR(ptr);
7255 		else
7256 			memmove(file->pathname, ptr,
7257 				sizeof(file->pathname)-(ptr-file->pathname));
7258 	}
7259 	spin_unlock(&mddev->lock);
7260 
7261 	if (err == 0 &&
7262 	    copy_to_user(arg, file, sizeof(*file)))
7263 		err = -EFAULT;
7264 
7265 	kfree(file);
7266 	return err;
7267 }
7268 
7269 static int get_disk_info(struct mddev *mddev, void __user * arg)
7270 {
7271 	mdu_disk_info_t info;
7272 	struct md_rdev *rdev;
7273 
7274 	if (copy_from_user(&info, arg, sizeof(info)))
7275 		return -EFAULT;
7276 
7277 	rcu_read_lock();
7278 	rdev = md_find_rdev_nr_rcu(mddev, info.number);
7279 	if (rdev) {
7280 		info.major = MAJOR(rdev->bdev->bd_dev);
7281 		info.minor = MINOR(rdev->bdev->bd_dev);
7282 		info.raid_disk = rdev->raid_disk;
7283 		info.state = 0;
7284 		if (test_bit(Faulty, &rdev->flags))
7285 			info.state |= (1<<MD_DISK_FAULTY);
7286 		else if (test_bit(In_sync, &rdev->flags)) {
7287 			info.state |= (1<<MD_DISK_ACTIVE);
7288 			info.state |= (1<<MD_DISK_SYNC);
7289 		}
7290 		if (test_bit(Journal, &rdev->flags))
7291 			info.state |= (1<<MD_DISK_JOURNAL);
7292 		if (test_bit(WriteMostly, &rdev->flags))
7293 			info.state |= (1<<MD_DISK_WRITEMOSTLY);
7294 		if (test_bit(FailFast, &rdev->flags))
7295 			info.state |= (1<<MD_DISK_FAILFAST);
7296 	} else {
7297 		info.major = info.minor = 0;
7298 		info.raid_disk = -1;
7299 		info.state = (1<<MD_DISK_REMOVED);
7300 	}
7301 	rcu_read_unlock();
7302 
7303 	if (copy_to_user(arg, &info, sizeof(info)))
7304 		return -EFAULT;
7305 
7306 	return 0;
7307 }
7308 
7309 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
7310 {
7311 	struct md_rdev *rdev;
7312 	dev_t dev = MKDEV(info->major,info->minor);
7313 
7314 	if (mddev_is_clustered(mddev) &&
7315 		!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
7316 		pr_warn("%s: Cannot add to clustered mddev.\n",
7317 			mdname(mddev));
7318 		return -EINVAL;
7319 	}
7320 
7321 	if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
7322 		return -EOVERFLOW;
7323 
7324 	if (!mddev->raid_disks) {
7325 		int err;
7326 		/* expecting a device which has a superblock */
7327 		rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
7328 		if (IS_ERR(rdev)) {
7329 			pr_warn("md: md_import_device returned %ld\n",
7330 				PTR_ERR(rdev));
7331 			return PTR_ERR(rdev);
7332 		}
7333 		if (!list_empty(&mddev->disks)) {
7334 			struct md_rdev *rdev0
7335 				= list_entry(mddev->disks.next,
7336 					     struct md_rdev, same_set);
7337 			err = super_types[mddev->major_version]
7338 				.load_super(rdev, rdev0, mddev->minor_version);
7339 			if (err < 0) {
7340 				pr_warn("md: %pg has different UUID to %pg\n",
7341 					rdev->bdev,
7342 					rdev0->bdev);
7343 				export_rdev(rdev);
7344 				return -EINVAL;
7345 			}
7346 		}
7347 		err = bind_rdev_to_array(rdev, mddev);
7348 		if (err)
7349 			export_rdev(rdev);
7350 		return err;
7351 	}
7352 
7353 	/*
7354 	 * md_add_new_disk can be used once the array is assembled
7355 	 * to add "hot spares".  They must already have a superblock
7356 	 * written
7357 	 */
7358 	if (mddev->pers) {
7359 		int err;
7360 		if (!mddev->pers->hot_add_disk) {
7361 			pr_warn("%s: personality does not support diskops!\n",
7362 				mdname(mddev));
7363 			return -EINVAL;
7364 		}
7365 		if (mddev->persistent)
7366 			rdev = md_import_device(dev, mddev->major_version,
7367 						mddev->minor_version);
7368 		else
7369 			rdev = md_import_device(dev, -1, -1);
7370 		if (IS_ERR(rdev)) {
7371 			pr_warn("md: md_import_device returned %ld\n",
7372 				PTR_ERR(rdev));
7373 			return PTR_ERR(rdev);
7374 		}
7375 		/* set saved_raid_disk if appropriate */
7376 		if (!mddev->persistent) {
7377 			if (info->state & (1<<MD_DISK_SYNC)  &&
7378 			    info->raid_disk < mddev->raid_disks) {
7379 				rdev->raid_disk = info->raid_disk;
7380 				clear_bit(Bitmap_sync, &rdev->flags);
7381 			} else
7382 				rdev->raid_disk = -1;
7383 			rdev->saved_raid_disk = rdev->raid_disk;
7384 		} else
7385 			super_types[mddev->major_version].
7386 				validate_super(mddev, NULL/*freshest*/, rdev);
7387 		if ((info->state & (1<<MD_DISK_SYNC)) &&
7388 		     rdev->raid_disk != info->raid_disk) {
7389 			/* This was a hot-add request, but events doesn't
7390 			 * match, so reject it.
7391 			 */
7392 			export_rdev(rdev);
7393 			return -EINVAL;
7394 		}
7395 
7396 		clear_bit(In_sync, &rdev->flags); /* just to be sure */
7397 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
7398 			set_bit(WriteMostly, &rdev->flags);
7399 		else
7400 			clear_bit(WriteMostly, &rdev->flags);
7401 		if (info->state & (1<<MD_DISK_FAILFAST))
7402 			set_bit(FailFast, &rdev->flags);
7403 		else
7404 			clear_bit(FailFast, &rdev->flags);
7405 
7406 		if (info->state & (1<<MD_DISK_JOURNAL)) {
7407 			struct md_rdev *rdev2;
7408 			bool has_journal = false;
7409 
7410 			/* make sure no existing journal disk */
7411 			rdev_for_each(rdev2, mddev) {
7412 				if (test_bit(Journal, &rdev2->flags)) {
7413 					has_journal = true;
7414 					break;
7415 				}
7416 			}
7417 			if (has_journal || mddev->bitmap) {
7418 				export_rdev(rdev);
7419 				return -EBUSY;
7420 			}
7421 			set_bit(Journal, &rdev->flags);
7422 		}
7423 		/*
7424 		 * check whether the device shows up in other nodes
7425 		 */
7426 		if (mddev_is_clustered(mddev)) {
7427 			if (info->state & (1 << MD_DISK_CANDIDATE))
7428 				set_bit(Candidate, &rdev->flags);
7429 			else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
7430 				/* --add initiated by this node */
7431 				err = mddev->cluster_ops->add_new_disk(mddev, rdev);
7432 				if (err) {
7433 					export_rdev(rdev);
7434 					return err;
7435 				}
7436 			}
7437 		}
7438 
7439 		rdev->raid_disk = -1;
7440 		err = bind_rdev_to_array(rdev, mddev);
7441 
7442 		if (err)
7443 			export_rdev(rdev);
7444 
7445 		if (mddev_is_clustered(mddev)) {
7446 			if (info->state & (1 << MD_DISK_CANDIDATE)) {
7447 				if (!err) {
7448 					err = mddev->cluster_ops->new_disk_ack(
7449 							mddev, err == 0);
7450 					if (err)
7451 						md_kick_rdev_from_array(rdev);
7452 				}
7453 			} else {
7454 				if (err)
7455 					mddev->cluster_ops->add_new_disk_cancel(mddev);
7456 				else
7457 					err = add_bound_rdev(rdev);
7458 			}
7459 
7460 		} else if (!err)
7461 			err = add_bound_rdev(rdev);
7462 
7463 		return err;
7464 	}
7465 
7466 	/* otherwise, md_add_new_disk is only allowed
7467 	 * for major_version==0 superblocks
7468 	 */
7469 	if (mddev->major_version != 0) {
7470 		pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
7471 		return -EINVAL;
7472 	}
7473 
7474 	if (!(info->state & (1<<MD_DISK_FAULTY))) {
7475 		int err;
7476 		rdev = md_import_device(dev, -1, 0);
7477 		if (IS_ERR(rdev)) {
7478 			pr_warn("md: error, md_import_device() returned %ld\n",
7479 				PTR_ERR(rdev));
7480 			return PTR_ERR(rdev);
7481 		}
7482 		rdev->desc_nr = info->number;
7483 		if (info->raid_disk < mddev->raid_disks)
7484 			rdev->raid_disk = info->raid_disk;
7485 		else
7486 			rdev->raid_disk = -1;
7487 
7488 		if (rdev->raid_disk < mddev->raid_disks)
7489 			if (info->state & (1<<MD_DISK_SYNC))
7490 				set_bit(In_sync, &rdev->flags);
7491 
7492 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
7493 			set_bit(WriteMostly, &rdev->flags);
7494 		if (info->state & (1<<MD_DISK_FAILFAST))
7495 			set_bit(FailFast, &rdev->flags);
7496 
7497 		if (!mddev->persistent) {
7498 			pr_debug("md: nonpersistent superblock ...\n");
7499 			rdev->sb_start = bdev_nr_sectors(rdev->bdev);
7500 		} else
7501 			rdev->sb_start = calc_dev_sboffset(rdev);
7502 		rdev->sectors = rdev->sb_start;
7503 
7504 		err = bind_rdev_to_array(rdev, mddev);
7505 		if (err) {
7506 			export_rdev(rdev);
7507 			return err;
7508 		}
7509 	}
7510 
7511 	return 0;
7512 }
7513 
7514 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
7515 {
7516 	struct md_rdev *rdev;
7517 
7518 	if (!mddev->pers)
7519 		return -ENODEV;
7520 
7521 	rdev = find_rdev(mddev, dev);
7522 	if (!rdev)
7523 		return -ENXIO;
7524 
7525 	if (rdev->raid_disk < 0)
7526 		goto kick_rdev;
7527 
7528 	clear_bit(Blocked, &rdev->flags);
7529 	remove_and_add_spares(mddev, rdev);
7530 
7531 	if (rdev->raid_disk >= 0)
7532 		goto busy;
7533 
7534 kick_rdev:
7535 	if (mddev_is_clustered(mddev) &&
7536 	    mddev->cluster_ops->remove_disk(mddev, rdev))
7537 		goto busy;
7538 
7539 	md_kick_rdev_from_array(rdev);
7540 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7541 	if (!mddev->thread)
7542 		md_update_sb(mddev, 1);
7543 	md_new_event();
7544 
7545 	return 0;
7546 busy:
7547 	pr_debug("md: cannot remove active disk %pg from %s ...\n",
7548 		 rdev->bdev, mdname(mddev));
7549 	return -EBUSY;
7550 }
7551 
7552 static int hot_add_disk(struct mddev *mddev, dev_t dev)
7553 {
7554 	int err;
7555 	struct md_rdev *rdev;
7556 
7557 	if (!mddev->pers)
7558 		return -ENODEV;
7559 
7560 	if (mddev->major_version != 0) {
7561 		pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
7562 			mdname(mddev));
7563 		return -EINVAL;
7564 	}
7565 	if (!mddev->pers->hot_add_disk) {
7566 		pr_warn("%s: personality does not support diskops!\n",
7567 			mdname(mddev));
7568 		return -EINVAL;
7569 	}
7570 
7571 	rdev = md_import_device(dev, -1, 0);
7572 	if (IS_ERR(rdev)) {
7573 		pr_warn("md: error, md_import_device() returned %ld\n",
7574 			PTR_ERR(rdev));
7575 		return -EINVAL;
7576 	}
7577 
7578 	if (mddev->persistent)
7579 		rdev->sb_start = calc_dev_sboffset(rdev);
7580 	else
7581 		rdev->sb_start = bdev_nr_sectors(rdev->bdev);
7582 
7583 	rdev->sectors = rdev->sb_start;
7584 
7585 	if (test_bit(Faulty, &rdev->flags)) {
7586 		pr_warn("md: can not hot-add faulty %pg disk to %s!\n",
7587 			rdev->bdev, mdname(mddev));
7588 		err = -EINVAL;
7589 		goto abort_export;
7590 	}
7591 
7592 	clear_bit(In_sync, &rdev->flags);
7593 	rdev->desc_nr = -1;
7594 	rdev->saved_raid_disk = -1;
7595 	err = bind_rdev_to_array(rdev, mddev);
7596 	if (err)
7597 		goto abort_export;
7598 
7599 	/*
7600 	 * The rest should better be atomic, we can have disk failures
7601 	 * noticed in interrupt contexts ...
7602 	 */
7603 
7604 	rdev->raid_disk = -1;
7605 
7606 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7607 	if (!mddev->thread)
7608 		md_update_sb(mddev, 1);
7609 	/*
7610 	 * Kick recovery, maybe this spare has to be added to the
7611 	 * array immediately.
7612 	 */
7613 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7614 	md_new_event();
7615 	return 0;
7616 
7617 abort_export:
7618 	export_rdev(rdev);
7619 	return err;
7620 }
7621 
7622 static int set_bitmap_file(struct mddev *mddev, int fd)
7623 {
7624 	int err = 0;
7625 
7626 	if (!md_bitmap_registered(mddev))
7627 		return -EINVAL;
7628 
7629 	if (mddev->pers) {
7630 		if (!mddev->pers->quiesce || !mddev->thread)
7631 			return -EBUSY;
7632 		if (mddev->recovery || mddev->sync_thread)
7633 			return -EBUSY;
7634 		/* we should be able to change the bitmap.. */
7635 	}
7636 
7637 	if (fd >= 0) {
7638 		struct inode *inode;
7639 		struct file *f;
7640 
7641 		if (mddev->bitmap || mddev->bitmap_info.file)
7642 			return -EEXIST; /* cannot add when bitmap is present */
7643 
7644 		if (!IS_ENABLED(CONFIG_MD_BITMAP_FILE)) {
7645 			pr_warn("%s: bitmap files not supported by this kernel\n",
7646 				mdname(mddev));
7647 			return -EINVAL;
7648 		}
7649 		pr_warn("%s: using deprecated bitmap file support\n",
7650 			mdname(mddev));
7651 
7652 		f = fget(fd);
7653 
7654 		if (f == NULL) {
7655 			pr_warn("%s: error: failed to get bitmap file\n",
7656 				mdname(mddev));
7657 			return -EBADF;
7658 		}
7659 
7660 		inode = f->f_mapping->host;
7661 		if (!S_ISREG(inode->i_mode)) {
7662 			pr_warn("%s: error: bitmap file must be a regular file\n",
7663 				mdname(mddev));
7664 			err = -EBADF;
7665 		} else if (!(f->f_mode & FMODE_WRITE)) {
7666 			pr_warn("%s: error: bitmap file must open for write\n",
7667 				mdname(mddev));
7668 			err = -EBADF;
7669 		} else if (atomic_read(&inode->i_writecount) != 1) {
7670 			pr_warn("%s: error: bitmap file is already in use\n",
7671 				mdname(mddev));
7672 			err = -EBUSY;
7673 		}
7674 		if (err) {
7675 			fput(f);
7676 			return err;
7677 		}
7678 		mddev->bitmap_info.file = f;
7679 		mddev->bitmap_info.offset = 0; /* file overrides offset */
7680 	} else if (mddev->bitmap == NULL)
7681 		return -ENOENT; /* cannot remove what isn't there */
7682 	err = 0;
7683 	if (mddev->pers) {
7684 		if (fd >= 0) {
7685 			err = md_bitmap_create(mddev);
7686 			if (!err)
7687 				err = mddev->bitmap_ops->load(mddev);
7688 
7689 			if (err) {
7690 				md_bitmap_destroy(mddev);
7691 				fd = -1;
7692 			}
7693 		} else if (fd < 0) {
7694 			md_bitmap_destroy(mddev);
7695 		}
7696 	}
7697 
7698 	if (fd < 0) {
7699 		struct file *f = mddev->bitmap_info.file;
7700 		if (f) {
7701 			spin_lock(&mddev->lock);
7702 			mddev->bitmap_info.file = NULL;
7703 			spin_unlock(&mddev->lock);
7704 			fput(f);
7705 		}
7706 	}
7707 
7708 	return err;
7709 }
7710 
7711 /*
7712  * md_set_array_info is used two different ways
7713  * The original usage is when creating a new array.
7714  * In this usage, raid_disks is > 0 and it together with
7715  *  level, size, not_persistent,layout,chunksize determine the
7716  *  shape of the array.
7717  *  This will always create an array with a type-0.90.0 superblock.
7718  * The newer usage is when assembling an array.
7719  *  In this case raid_disks will be 0, and the major_version field is
7720  *  use to determine which style super-blocks are to be found on the devices.
7721  *  The minor and patch _version numbers are also kept incase the
7722  *  super_block handler wishes to interpret them.
7723  */
7724 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
7725 {
7726 	if (info->raid_disks == 0) {
7727 		/* just setting version number for superblock loading */
7728 		if (info->major_version < 0 ||
7729 		    info->major_version >= ARRAY_SIZE(super_types) ||
7730 		    super_types[info->major_version].name == NULL) {
7731 			/* maybe try to auto-load a module? */
7732 			pr_warn("md: superblock version %d not known\n",
7733 				info->major_version);
7734 			return -EINVAL;
7735 		}
7736 		mddev->major_version = info->major_version;
7737 		mddev->minor_version = info->minor_version;
7738 		mddev->patch_version = info->patch_version;
7739 		mddev->persistent = !info->not_persistent;
7740 		/* ensure mddev_put doesn't delete this now that there
7741 		 * is some minimal configuration.
7742 		 */
7743 		mddev->ctime         = ktime_get_real_seconds();
7744 		return 0;
7745 	}
7746 	mddev->major_version = MD_MAJOR_VERSION;
7747 	mddev->minor_version = MD_MINOR_VERSION;
7748 	mddev->patch_version = MD_PATCHLEVEL_VERSION;
7749 	mddev->ctime         = ktime_get_real_seconds();
7750 
7751 	mddev->level         = info->level;
7752 	mddev->clevel[0]     = 0;
7753 	mddev->dev_sectors   = 2 * (sector_t)info->size;
7754 	mddev->raid_disks    = info->raid_disks;
7755 	/* don't set md_minor, it is determined by which /dev/md* was
7756 	 * openned
7757 	 */
7758 	if (info->state & (1<<MD_SB_CLEAN))
7759 		mddev->resync_offset = MaxSector;
7760 	else
7761 		mddev->resync_offset = 0;
7762 	mddev->persistent    = ! info->not_persistent;
7763 	mddev->external	     = 0;
7764 
7765 	mddev->layout        = info->layout;
7766 	if (mddev->level == 0)
7767 		/* Cannot trust RAID0 layout info here */
7768 		mddev->layout = -1;
7769 	mddev->chunk_sectors = info->chunk_size >> 9;
7770 
7771 	if (mddev->persistent) {
7772 		mddev->max_disks = MD_SB_DISKS;
7773 		mddev->flags = 0;
7774 		mddev->sb_flags = 0;
7775 	}
7776 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7777 
7778 	mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
7779 	mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
7780 	mddev->bitmap_info.offset = 0;
7781 
7782 	mddev->reshape_position = MaxSector;
7783 
7784 	/*
7785 	 * Generate a 128 bit UUID
7786 	 */
7787 	get_random_bytes(mddev->uuid, 16);
7788 
7789 	mddev->new_level = mddev->level;
7790 	mddev->new_chunk_sectors = mddev->chunk_sectors;
7791 	mddev->new_layout = mddev->layout;
7792 	mddev->delta_disks = 0;
7793 	mddev->reshape_backwards = 0;
7794 
7795 	return 0;
7796 }
7797 
7798 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
7799 {
7800 	lockdep_assert_held(&mddev->reconfig_mutex);
7801 
7802 	if (mddev->external_size)
7803 		return;
7804 
7805 	mddev->array_sectors = array_sectors;
7806 }
7807 EXPORT_SYMBOL(md_set_array_sectors);
7808 
7809 static int update_size(struct mddev *mddev, sector_t num_sectors)
7810 {
7811 	struct md_rdev *rdev;
7812 	int rv;
7813 	int fit = (num_sectors == 0);
7814 	sector_t old_dev_sectors = mddev->dev_sectors;
7815 
7816 	if (mddev->pers->resize == NULL)
7817 		return -EINVAL;
7818 	/* The "num_sectors" is the number of sectors of each device that
7819 	 * is used.  This can only make sense for arrays with redundancy.
7820 	 * linear and raid0 always use whatever space is available. We can only
7821 	 * consider changing this number if no resync or reconstruction is
7822 	 * happening, and if the new size is acceptable. It must fit before the
7823 	 * sb_start or, if that is <data_offset, it must fit before the size
7824 	 * of each device.  If num_sectors is zero, we find the largest size
7825 	 * that fits.
7826 	 */
7827 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
7828 		return -EBUSY;
7829 	if (!md_is_rdwr(mddev))
7830 		return -EROFS;
7831 
7832 	rdev_for_each(rdev, mddev) {
7833 		sector_t avail = rdev->sectors;
7834 
7835 		if (fit && (num_sectors == 0 || num_sectors > avail))
7836 			num_sectors = avail;
7837 		if (avail < num_sectors)
7838 			return -ENOSPC;
7839 	}
7840 	rv = mddev->pers->resize(mddev, num_sectors);
7841 	if (!rv) {
7842 		if (mddev_is_clustered(mddev))
7843 			mddev->cluster_ops->update_size(mddev, old_dev_sectors);
7844 		else if (!mddev_is_dm(mddev))
7845 			set_capacity_and_notify(mddev->gendisk,
7846 						mddev->array_sectors);
7847 	}
7848 	return rv;
7849 }
7850 
7851 static int update_raid_disks(struct mddev *mddev, int raid_disks)
7852 {
7853 	int rv;
7854 	struct md_rdev *rdev;
7855 	/* change the number of raid disks */
7856 	if (mddev->pers->check_reshape == NULL)
7857 		return -EINVAL;
7858 	if (!md_is_rdwr(mddev))
7859 		return -EROFS;
7860 	if (raid_disks <= 0 ||
7861 	    (mddev->max_disks && raid_disks >= mddev->max_disks))
7862 		return -EINVAL;
7863 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7864 	    test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
7865 	    mddev->reshape_position != MaxSector)
7866 		return -EBUSY;
7867 
7868 	rdev_for_each(rdev, mddev) {
7869 		if (mddev->raid_disks < raid_disks &&
7870 		    rdev->data_offset < rdev->new_data_offset)
7871 			return -EINVAL;
7872 		if (mddev->raid_disks > raid_disks &&
7873 		    rdev->data_offset > rdev->new_data_offset)
7874 			return -EINVAL;
7875 	}
7876 
7877 	mddev->delta_disks = raid_disks - mddev->raid_disks;
7878 	if (mddev->delta_disks < 0)
7879 		mddev->reshape_backwards = 1;
7880 	else if (mddev->delta_disks > 0)
7881 		mddev->reshape_backwards = 0;
7882 
7883 	rv = mddev->pers->check_reshape(mddev);
7884 	if (rv < 0) {
7885 		mddev->delta_disks = 0;
7886 		mddev->reshape_backwards = 0;
7887 	}
7888 	return rv;
7889 }
7890 
7891 static int get_cluster_ops(struct mddev *mddev)
7892 {
7893 	xa_lock(&md_submodule);
7894 	mddev->cluster_ops = xa_load(&md_submodule, ID_CLUSTER);
7895 	if (mddev->cluster_ops &&
7896 	    !try_module_get(mddev->cluster_ops->head.owner))
7897 		mddev->cluster_ops = NULL;
7898 	xa_unlock(&md_submodule);
7899 
7900 	return mddev->cluster_ops == NULL ? -ENOENT : 0;
7901 }
7902 
7903 static void put_cluster_ops(struct mddev *mddev)
7904 {
7905 	if (!mddev->cluster_ops)
7906 		return;
7907 
7908 	mddev->cluster_ops->leave(mddev);
7909 	module_put(mddev->cluster_ops->head.owner);
7910 	mddev->cluster_ops = NULL;
7911 }
7912 
7913 /*
7914  * update_array_info is used to change the configuration of an
7915  * on-line array.
7916  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7917  * fields in the info are checked against the array.
7918  * Any differences that cannot be handled will cause an error.
7919  * Normally, only one change can be managed at a time.
7920  */
7921 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
7922 {
7923 	int rv = 0;
7924 	int cnt = 0;
7925 	int state = 0;
7926 
7927 	/* calculate expected state,ignoring low bits */
7928 	if (mddev->bitmap && mddev->bitmap_info.offset)
7929 		state |= (1 << MD_SB_BITMAP_PRESENT);
7930 
7931 	if (mddev->major_version != info->major_version ||
7932 	    mddev->minor_version != info->minor_version ||
7933 /*	    mddev->patch_version != info->patch_version || */
7934 	    mddev->ctime         != info->ctime         ||
7935 	    mddev->level         != info->level         ||
7936 /*	    mddev->layout        != info->layout        || */
7937 	    mddev->persistent	 != !info->not_persistent ||
7938 	    mddev->chunk_sectors != info->chunk_size >> 9 ||
7939 	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7940 	    ((state^info->state) & 0xfffffe00)
7941 		)
7942 		return -EINVAL;
7943 	/* Check there is only one change */
7944 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7945 		cnt++;
7946 	if (mddev->raid_disks != info->raid_disks)
7947 		cnt++;
7948 	if (mddev->layout != info->layout)
7949 		cnt++;
7950 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7951 		cnt++;
7952 	if (cnt == 0)
7953 		return 0;
7954 	if (cnt > 1)
7955 		return -EINVAL;
7956 
7957 	if (mddev->layout != info->layout) {
7958 		/* Change layout
7959 		 * we don't need to do anything at the md level, the
7960 		 * personality will take care of it all.
7961 		 */
7962 		if (mddev->pers->check_reshape == NULL)
7963 			return -EINVAL;
7964 		else {
7965 			mddev->new_layout = info->layout;
7966 			rv = mddev->pers->check_reshape(mddev);
7967 			if (rv)
7968 				mddev->new_layout = mddev->layout;
7969 			return rv;
7970 		}
7971 	}
7972 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7973 		rv = update_size(mddev, (sector_t)info->size * 2);
7974 
7975 	if (mddev->raid_disks    != info->raid_disks)
7976 		rv = update_raid_disks(mddev, info->raid_disks);
7977 
7978 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
7979 		if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7980 			rv = -EINVAL;
7981 			goto err;
7982 		}
7983 		if (mddev->recovery || mddev->sync_thread) {
7984 			rv = -EBUSY;
7985 			goto err;
7986 		}
7987 		if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
7988 			/* add the bitmap */
7989 			if (mddev->bitmap) {
7990 				rv = -EEXIST;
7991 				goto err;
7992 			}
7993 			if (mddev->bitmap_info.default_offset == 0) {
7994 				rv = -EINVAL;
7995 				goto err;
7996 			}
7997 			mddev->bitmap_info.offset =
7998 				mddev->bitmap_info.default_offset;
7999 			mddev->bitmap_info.space =
8000 				mddev->bitmap_info.default_space;
8001 			rv = md_bitmap_create(mddev);
8002 			if (!rv)
8003 				rv = mddev->bitmap_ops->load(mddev);
8004 
8005 			if (rv)
8006 				md_bitmap_destroy(mddev);
8007 		} else {
8008 			struct md_bitmap_stats stats;
8009 
8010 			rv = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
8011 			if (rv)
8012 				goto err;
8013 
8014 			if (stats.file) {
8015 				rv = -EINVAL;
8016 				goto err;
8017 			}
8018 
8019 			if (mddev->bitmap_info.nodes) {
8020 				/* hold PW on all the bitmap lock */
8021 				if (mddev->cluster_ops->lock_all_bitmaps(mddev) <= 0) {
8022 					pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
8023 					rv = -EPERM;
8024 					mddev->cluster_ops->unlock_all_bitmaps(mddev);
8025 					goto err;
8026 				}
8027 
8028 				mddev->bitmap_info.nodes = 0;
8029 				put_cluster_ops(mddev);
8030 				mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
8031 			}
8032 			md_bitmap_destroy(mddev);
8033 			mddev->bitmap_info.offset = 0;
8034 		}
8035 	}
8036 	md_update_sb(mddev, 1);
8037 	return rv;
8038 err:
8039 	return rv;
8040 }
8041 
8042 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
8043 {
8044 	struct md_rdev *rdev;
8045 	int err = 0;
8046 
8047 	if (mddev->pers == NULL)
8048 		return -ENODEV;
8049 
8050 	rcu_read_lock();
8051 	rdev = md_find_rdev_rcu(mddev, dev);
8052 	if (!rdev)
8053 		err =  -ENODEV;
8054 	else {
8055 		md_error(mddev, rdev);
8056 		if (test_bit(MD_BROKEN, &mddev->flags))
8057 			err = -EBUSY;
8058 	}
8059 	rcu_read_unlock();
8060 	return err;
8061 }
8062 
8063 /*
8064  * We have a problem here : there is no easy way to give a CHS
8065  * virtual geometry. We currently pretend that we have a 2 heads
8066  * 4 sectors (with a BIG number of cylinders...). This drives
8067  * dosfs just mad... ;-)
8068  */
8069 static int md_getgeo(struct gendisk *disk, struct hd_geometry *geo)
8070 {
8071 	struct mddev *mddev = disk->private_data;
8072 
8073 	geo->heads = 2;
8074 	geo->sectors = 4;
8075 	geo->cylinders = mddev->array_sectors / 8;
8076 	return 0;
8077 }
8078 
8079 static inline int md_ioctl_valid(unsigned int cmd)
8080 {
8081 	switch (cmd) {
8082 	case GET_ARRAY_INFO:
8083 	case GET_DISK_INFO:
8084 	case RAID_VERSION:
8085 		return 0;
8086 	case ADD_NEW_DISK:
8087 	case GET_BITMAP_FILE:
8088 	case HOT_ADD_DISK:
8089 	case HOT_REMOVE_DISK:
8090 	case RESTART_ARRAY_RW:
8091 	case RUN_ARRAY:
8092 	case SET_ARRAY_INFO:
8093 	case SET_BITMAP_FILE:
8094 	case SET_DISK_FAULTY:
8095 	case STOP_ARRAY:
8096 	case STOP_ARRAY_RO:
8097 	case CLUSTERED_DISK_NACK:
8098 		if (!capable(CAP_SYS_ADMIN))
8099 			return -EACCES;
8100 		return 0;
8101 	default:
8102 		return -ENOTTY;
8103 	}
8104 }
8105 
8106 static bool md_ioctl_need_suspend(unsigned int cmd)
8107 {
8108 	switch (cmd) {
8109 	case ADD_NEW_DISK:
8110 	case HOT_ADD_DISK:
8111 	case HOT_REMOVE_DISK:
8112 	case SET_BITMAP_FILE:
8113 	case SET_ARRAY_INFO:
8114 		return true;
8115 	default:
8116 		return false;
8117 	}
8118 }
8119 
8120 static int __md_set_array_info(struct mddev *mddev, void __user *argp)
8121 {
8122 	mdu_array_info_t info;
8123 	int err;
8124 
8125 	if (!argp)
8126 		memset(&info, 0, sizeof(info));
8127 	else if (copy_from_user(&info, argp, sizeof(info)))
8128 		return -EFAULT;
8129 
8130 	if (mddev->pers) {
8131 		err = update_array_info(mddev, &info);
8132 		if (err)
8133 			pr_warn("md: couldn't update array info. %d\n", err);
8134 		return err;
8135 	}
8136 
8137 	if (!list_empty(&mddev->disks)) {
8138 		pr_warn("md: array %s already has disks!\n", mdname(mddev));
8139 		return -EBUSY;
8140 	}
8141 
8142 	if (mddev->raid_disks) {
8143 		pr_warn("md: array %s already initialised!\n", mdname(mddev));
8144 		return -EBUSY;
8145 	}
8146 
8147 	err = md_set_array_info(mddev, &info);
8148 	if (err)
8149 		pr_warn("md: couldn't set array info. %d\n", err);
8150 
8151 	return err;
8152 }
8153 
8154 static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
8155 			unsigned int cmd, unsigned long arg)
8156 {
8157 	int err = 0;
8158 	void __user *argp = (void __user *)arg;
8159 	struct mddev *mddev = NULL;
8160 
8161 	err = md_ioctl_valid(cmd);
8162 	if (err)
8163 		return err;
8164 
8165 	/*
8166 	 * Commands dealing with the RAID driver but not any
8167 	 * particular array:
8168 	 */
8169 	if (cmd == RAID_VERSION)
8170 		return get_version(argp);
8171 
8172 	/*
8173 	 * Commands creating/starting a new array:
8174 	 */
8175 
8176 	mddev = bdev->bd_disk->private_data;
8177 
8178 	/* Some actions do not requires the mutex */
8179 	switch (cmd) {
8180 	case GET_ARRAY_INFO:
8181 		if (!mddev->raid_disks && !mddev->external)
8182 			return -ENODEV;
8183 		return get_array_info(mddev, argp);
8184 
8185 	case GET_DISK_INFO:
8186 		if (!mddev->raid_disks && !mddev->external)
8187 			return -ENODEV;
8188 		return get_disk_info(mddev, argp);
8189 
8190 	case SET_DISK_FAULTY:
8191 		return set_disk_faulty(mddev, new_decode_dev(arg));
8192 
8193 	case GET_BITMAP_FILE:
8194 		return get_bitmap_file(mddev, argp);
8195 	}
8196 
8197 	if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
8198 		/* Need to flush page cache, and ensure no-one else opens
8199 		 * and writes
8200 		 */
8201 		err = mddev_set_closing_and_sync_blockdev(mddev, 1);
8202 		if (err)
8203 			return err;
8204 	}
8205 
8206 	if (!md_is_rdwr(mddev))
8207 		flush_work(&mddev->sync_work);
8208 
8209 	err = md_ioctl_need_suspend(cmd) ? mddev_suspend_and_lock(mddev) :
8210 					   mddev_lock(mddev);
8211 	if (err) {
8212 		pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
8213 			 err, cmd);
8214 		goto out;
8215 	}
8216 
8217 	if (cmd == SET_ARRAY_INFO) {
8218 		err = __md_set_array_info(mddev, argp);
8219 		goto unlock;
8220 	}
8221 
8222 	/*
8223 	 * Commands querying/configuring an existing array:
8224 	 */
8225 	/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
8226 	 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
8227 	if ((!mddev->raid_disks && !mddev->external)
8228 	    && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
8229 	    && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
8230 	    && cmd != GET_BITMAP_FILE) {
8231 		err = -ENODEV;
8232 		goto unlock;
8233 	}
8234 
8235 	/*
8236 	 * Commands even a read-only array can execute:
8237 	 */
8238 	switch (cmd) {
8239 	case RESTART_ARRAY_RW:
8240 		err = restart_array(mddev);
8241 		goto unlock;
8242 
8243 	case STOP_ARRAY:
8244 		err = do_md_stop(mddev, 0);
8245 		goto unlock;
8246 
8247 	case STOP_ARRAY_RO:
8248 		if (mddev->pers)
8249 			err = md_set_readonly(mddev);
8250 		goto unlock;
8251 
8252 	case HOT_REMOVE_DISK:
8253 		err = hot_remove_disk(mddev, new_decode_dev(arg));
8254 		goto unlock;
8255 
8256 	case ADD_NEW_DISK:
8257 		/* We can support ADD_NEW_DISK on read-only arrays
8258 		 * only if we are re-adding a preexisting device.
8259 		 * So require mddev->pers and MD_DISK_SYNC.
8260 		 */
8261 		if (mddev->pers) {
8262 			mdu_disk_info_t info;
8263 			if (copy_from_user(&info, argp, sizeof(info)))
8264 				err = -EFAULT;
8265 			else if (!(info.state & (1<<MD_DISK_SYNC)))
8266 				/* Need to clear read-only for this */
8267 				break;
8268 			else
8269 				err = md_add_new_disk(mddev, &info);
8270 			goto unlock;
8271 		}
8272 		break;
8273 	}
8274 
8275 	/*
8276 	 * The remaining ioctls are changing the state of the
8277 	 * superblock, so we do not allow them on read-only arrays.
8278 	 */
8279 	if (!md_is_rdwr(mddev) && mddev->pers) {
8280 		if (mddev->ro != MD_AUTO_READ) {
8281 			err = -EROFS;
8282 			goto unlock;
8283 		}
8284 		mddev->ro = MD_RDWR;
8285 		sysfs_notify_dirent_safe(mddev->sysfs_state);
8286 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8287 		/* mddev_unlock will wake thread */
8288 		/* If a device failed while we were read-only, we
8289 		 * need to make sure the metadata is updated now.
8290 		 */
8291 		if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
8292 			mddev_unlock(mddev);
8293 			wait_event(mddev->sb_wait,
8294 				   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
8295 				   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8296 			mddev_lock_nointr(mddev);
8297 		}
8298 	}
8299 
8300 	switch (cmd) {
8301 	case ADD_NEW_DISK:
8302 	{
8303 		mdu_disk_info_t info;
8304 		if (copy_from_user(&info, argp, sizeof(info)))
8305 			err = -EFAULT;
8306 		else
8307 			err = md_add_new_disk(mddev, &info);
8308 		goto unlock;
8309 	}
8310 
8311 	case CLUSTERED_DISK_NACK:
8312 		if (mddev_is_clustered(mddev))
8313 			mddev->cluster_ops->new_disk_ack(mddev, false);
8314 		else
8315 			err = -EINVAL;
8316 		goto unlock;
8317 
8318 	case HOT_ADD_DISK:
8319 		err = hot_add_disk(mddev, new_decode_dev(arg));
8320 		goto unlock;
8321 
8322 	case RUN_ARRAY:
8323 		err = do_md_run(mddev);
8324 		goto unlock;
8325 
8326 	case SET_BITMAP_FILE:
8327 		err = set_bitmap_file(mddev, (int)arg);
8328 		goto unlock;
8329 
8330 	default:
8331 		err = -EINVAL;
8332 		goto unlock;
8333 	}
8334 
8335 unlock:
8336 	if (mddev->hold_active == UNTIL_IOCTL &&
8337 	    err != -EINVAL)
8338 		mddev->hold_active = 0;
8339 
8340 	md_ioctl_need_suspend(cmd) ? mddev_unlock_and_resume(mddev) :
8341 				     mddev_unlock(mddev);
8342 
8343 out:
8344 	if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
8345 		clear_bit(MD_CLOSING, &mddev->flags);
8346 	return err;
8347 }
8348 #ifdef CONFIG_COMPAT
8349 static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
8350 		    unsigned int cmd, unsigned long arg)
8351 {
8352 	switch (cmd) {
8353 	case HOT_REMOVE_DISK:
8354 	case HOT_ADD_DISK:
8355 	case SET_DISK_FAULTY:
8356 	case SET_BITMAP_FILE:
8357 		/* These take in integer arg, do not convert */
8358 		break;
8359 	default:
8360 		arg = (unsigned long)compat_ptr(arg);
8361 		break;
8362 	}
8363 
8364 	return md_ioctl(bdev, mode, cmd, arg);
8365 }
8366 #endif /* CONFIG_COMPAT */
8367 
8368 static int md_set_read_only(struct block_device *bdev, bool ro)
8369 {
8370 	struct mddev *mddev = bdev->bd_disk->private_data;
8371 	int err;
8372 
8373 	err = mddev_lock(mddev);
8374 	if (err)
8375 		return err;
8376 
8377 	if (!mddev->raid_disks && !mddev->external) {
8378 		err = -ENODEV;
8379 		goto out_unlock;
8380 	}
8381 
8382 	/*
8383 	 * Transitioning to read-auto need only happen for arrays that call
8384 	 * md_write_start and which are not ready for writes yet.
8385 	 */
8386 	if (!ro && mddev->ro == MD_RDONLY && mddev->pers) {
8387 		err = restart_array(mddev);
8388 		if (err)
8389 			goto out_unlock;
8390 		mddev->ro = MD_AUTO_READ;
8391 	}
8392 
8393 out_unlock:
8394 	mddev_unlock(mddev);
8395 	return err;
8396 }
8397 
8398 static int md_open(struct gendisk *disk, blk_mode_t mode)
8399 {
8400 	struct mddev *mddev;
8401 	int err;
8402 
8403 	spin_lock(&all_mddevs_lock);
8404 	mddev = mddev_get(disk->private_data);
8405 	spin_unlock(&all_mddevs_lock);
8406 	if (!mddev)
8407 		return -ENODEV;
8408 
8409 	err = mutex_lock_interruptible(&mddev->open_mutex);
8410 	if (err)
8411 		goto out;
8412 
8413 	err = -ENODEV;
8414 	if (test_bit(MD_CLOSING, &mddev->flags))
8415 		goto out_unlock;
8416 
8417 	atomic_inc(&mddev->openers);
8418 	mutex_unlock(&mddev->open_mutex);
8419 
8420 	disk_check_media_change(disk);
8421 	return 0;
8422 
8423 out_unlock:
8424 	mutex_unlock(&mddev->open_mutex);
8425 out:
8426 	mddev_put(mddev);
8427 	return err;
8428 }
8429 
8430 static void md_release(struct gendisk *disk)
8431 {
8432 	struct mddev *mddev = disk->private_data;
8433 
8434 	BUG_ON(!mddev);
8435 	atomic_dec(&mddev->openers);
8436 	mddev_put(mddev);
8437 }
8438 
8439 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
8440 {
8441 	struct mddev *mddev = disk->private_data;
8442 	unsigned int ret = 0;
8443 
8444 	if (mddev->changed)
8445 		ret = DISK_EVENT_MEDIA_CHANGE;
8446 	mddev->changed = 0;
8447 	return ret;
8448 }
8449 
8450 static void md_free_disk(struct gendisk *disk)
8451 {
8452 	struct mddev *mddev = disk->private_data;
8453 
8454 	mddev_free(mddev);
8455 }
8456 
8457 const struct block_device_operations md_fops =
8458 {
8459 	.owner		= THIS_MODULE,
8460 	.submit_bio	= md_submit_bio,
8461 	.open		= md_open,
8462 	.release	= md_release,
8463 	.ioctl		= md_ioctl,
8464 #ifdef CONFIG_COMPAT
8465 	.compat_ioctl	= md_compat_ioctl,
8466 #endif
8467 	.getgeo		= md_getgeo,
8468 	.check_events	= md_check_events,
8469 	.set_read_only	= md_set_read_only,
8470 	.free_disk	= md_free_disk,
8471 };
8472 
8473 static int md_thread(void *arg)
8474 {
8475 	struct md_thread *thread = arg;
8476 
8477 	/*
8478 	 * md_thread is a 'system-thread', it's priority should be very
8479 	 * high. We avoid resource deadlocks individually in each
8480 	 * raid personality. (RAID5 does preallocation) We also use RR and
8481 	 * the very same RT priority as kswapd, thus we will never get
8482 	 * into a priority inversion deadlock.
8483 	 *
8484 	 * we definitely have to have equal or higher priority than
8485 	 * bdflush, otherwise bdflush will deadlock if there are too
8486 	 * many dirty RAID5 blocks.
8487 	 */
8488 
8489 	allow_signal(SIGKILL);
8490 	while (!kthread_should_stop()) {
8491 
8492 		/* We need to wait INTERRUPTIBLE so that
8493 		 * we don't add to the load-average.
8494 		 * That means we need to be sure no signals are
8495 		 * pending
8496 		 */
8497 		if (signal_pending(current))
8498 			flush_signals(current);
8499 
8500 		wait_event_interruptible_timeout
8501 			(thread->wqueue,
8502 			 test_bit(THREAD_WAKEUP, &thread->flags)
8503 			 || kthread_should_stop() || kthread_should_park(),
8504 			 thread->timeout);
8505 
8506 		clear_bit(THREAD_WAKEUP, &thread->flags);
8507 		if (kthread_should_park())
8508 			kthread_parkme();
8509 		if (!kthread_should_stop())
8510 			thread->run(thread);
8511 	}
8512 
8513 	return 0;
8514 }
8515 
8516 static void md_wakeup_thread_directly(struct md_thread __rcu **thread)
8517 {
8518 	struct md_thread *t;
8519 
8520 	rcu_read_lock();
8521 	t = rcu_dereference(*thread);
8522 	if (t)
8523 		wake_up_process(t->tsk);
8524 	rcu_read_unlock();
8525 }
8526 
8527 void __md_wakeup_thread(struct md_thread __rcu *thread)
8528 {
8529 	struct md_thread *t;
8530 
8531 	t = rcu_dereference(thread);
8532 	if (t) {
8533 		pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
8534 		set_bit(THREAD_WAKEUP, &t->flags);
8535 		if (wq_has_sleeper(&t->wqueue))
8536 			wake_up(&t->wqueue);
8537 	}
8538 }
8539 EXPORT_SYMBOL(__md_wakeup_thread);
8540 
8541 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
8542 		struct mddev *mddev, const char *name)
8543 {
8544 	struct md_thread *thread;
8545 
8546 	thread = kzalloc_obj(struct md_thread);
8547 	if (!thread)
8548 		return NULL;
8549 
8550 	init_waitqueue_head(&thread->wqueue);
8551 
8552 	thread->run = run;
8553 	thread->mddev = mddev;
8554 	thread->timeout = MAX_SCHEDULE_TIMEOUT;
8555 	thread->tsk = kthread_run(md_thread, thread,
8556 				  "%s_%s",
8557 				  mdname(thread->mddev),
8558 				  name);
8559 	if (IS_ERR(thread->tsk)) {
8560 		kfree(thread);
8561 		return NULL;
8562 	}
8563 	return thread;
8564 }
8565 EXPORT_SYMBOL(md_register_thread);
8566 
8567 void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp)
8568 {
8569 	struct md_thread *thread = rcu_dereference_protected(*threadp,
8570 					lockdep_is_held(&mddev->reconfig_mutex));
8571 
8572 	if (!thread)
8573 		return;
8574 
8575 	rcu_assign_pointer(*threadp, NULL);
8576 	synchronize_rcu();
8577 
8578 	pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
8579 	kthread_stop(thread->tsk);
8580 	kfree(thread);
8581 }
8582 EXPORT_SYMBOL(md_unregister_thread);
8583 
8584 void md_error(struct mddev *mddev, struct md_rdev *rdev)
8585 {
8586 	if (!rdev || test_bit(Faulty, &rdev->flags))
8587 		return;
8588 
8589 	if (!mddev->pers || !mddev->pers->error_handler)
8590 		return;
8591 	mddev->pers->error_handler(mddev, rdev);
8592 
8593 	if (mddev->pers->head.id == ID_RAID0 ||
8594 	    mddev->pers->head.id == ID_LINEAR)
8595 		return;
8596 
8597 	if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
8598 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8599 	sysfs_notify_dirent_safe(rdev->sysfs_state);
8600 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8601 	if (!test_bit(MD_BROKEN, &mddev->flags)) {
8602 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8603 		md_wakeup_thread(mddev->thread);
8604 	}
8605 	if (mddev->event_work.func)
8606 		queue_work(md_misc_wq, &mddev->event_work);
8607 	md_new_event();
8608 }
8609 EXPORT_SYMBOL(md_error);
8610 
8611 /* seq_file implementation /proc/mdstat */
8612 
8613 static void status_unused(struct seq_file *seq)
8614 {
8615 	int i = 0;
8616 	struct md_rdev *rdev;
8617 
8618 	seq_printf(seq, "unused devices: ");
8619 
8620 	list_for_each_entry(rdev, &pending_raid_disks, same_set) {
8621 		i++;
8622 		seq_printf(seq, "%pg ", rdev->bdev);
8623 	}
8624 	if (!i)
8625 		seq_printf(seq, "<none>");
8626 
8627 	seq_printf(seq, "\n");
8628 }
8629 
8630 static void status_personalities(struct seq_file *seq)
8631 {
8632 	struct md_submodule_head *head;
8633 	unsigned long i;
8634 
8635 	seq_puts(seq, "Personalities : ");
8636 
8637 	xa_lock(&md_submodule);
8638 	xa_for_each(&md_submodule, i, head)
8639 		if (head->type == MD_PERSONALITY)
8640 			seq_printf(seq, "[%s] ", head->name);
8641 	xa_unlock(&md_submodule);
8642 
8643 	seq_puts(seq, "\n");
8644 }
8645 
8646 static int status_resync(struct seq_file *seq, struct mddev *mddev)
8647 {
8648 	sector_t max_sectors, resync, res;
8649 	unsigned long dt, db = 0;
8650 	sector_t rt, curr_mark_cnt, resync_mark_cnt;
8651 	int scale, recovery_active;
8652 	unsigned int per_milli;
8653 
8654 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8655 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8656 		max_sectors = mddev->resync_max_sectors;
8657 	else
8658 		max_sectors = mddev->dev_sectors;
8659 
8660 	resync = mddev->curr_resync;
8661 	if (resync < MD_RESYNC_ACTIVE) {
8662 		if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8663 			/* Still cleaning up */
8664 			resync = max_sectors;
8665 	} else if (resync > max_sectors) {
8666 		resync = max_sectors;
8667 	} else {
8668 		res = atomic_read(&mddev->recovery_active);
8669 		/*
8670 		 * Resync has started, but the subtraction has overflowed or
8671 		 * yielded one of the special values. Force it to active to
8672 		 * ensure the status reports an active resync.
8673 		 */
8674 		if (resync < res || resync - res < MD_RESYNC_ACTIVE)
8675 			resync = MD_RESYNC_ACTIVE;
8676 		else
8677 			resync -= res;
8678 	}
8679 
8680 	if (resync == MD_RESYNC_NONE) {
8681 		if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8682 			struct md_rdev *rdev;
8683 
8684 			rdev_for_each(rdev, mddev)
8685 				if (rdev->raid_disk >= 0 &&
8686 				    !test_bit(Faulty, &rdev->flags) &&
8687 				    rdev->recovery_offset != MaxSector &&
8688 				    rdev->recovery_offset) {
8689 					seq_printf(seq, "\trecover=REMOTE");
8690 					return 1;
8691 				}
8692 			if (mddev->reshape_position != MaxSector)
8693 				seq_printf(seq, "\treshape=REMOTE");
8694 			else
8695 				seq_printf(seq, "\tresync=REMOTE");
8696 			return 1;
8697 		}
8698 		if (mddev->resync_offset < MaxSector) {
8699 			seq_printf(seq, "\tresync=PENDING");
8700 			return 1;
8701 		}
8702 		return 0;
8703 	}
8704 	if (resync < MD_RESYNC_ACTIVE) {
8705 		seq_printf(seq, "\tresync=DELAYED");
8706 		return 1;
8707 	}
8708 
8709 	WARN_ON(max_sectors == 0);
8710 	/* Pick 'scale' such that (resync>>scale)*1000 will fit
8711 	 * in a sector_t, and (max_sectors>>scale) will fit in a
8712 	 * u32, as those are the requirements for sector_div.
8713 	 * Thus 'scale' must be at least 10
8714 	 */
8715 	scale = 10;
8716 	if (sizeof(sector_t) > sizeof(unsigned long)) {
8717 		while ( max_sectors/2 > (1ULL<<(scale+32)))
8718 			scale++;
8719 	}
8720 	res = (resync>>scale)*1000;
8721 	sector_div(res, (u32)((max_sectors>>scale)+1));
8722 
8723 	per_milli = res;
8724 	{
8725 		int i, x = per_milli/50, y = 20-x;
8726 		seq_printf(seq, "[");
8727 		for (i = 0; i < x; i++)
8728 			seq_printf(seq, "=");
8729 		seq_printf(seq, ">");
8730 		for (i = 0; i < y; i++)
8731 			seq_printf(seq, ".");
8732 		seq_printf(seq, "] ");
8733 	}
8734 	seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
8735 		   (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8736 		    "reshape" :
8737 		    (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8738 		     "check" :
8739 		     (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8740 		      "resync" : "recovery"))),
8741 		   per_milli/10, per_milli % 10,
8742 		   (unsigned long long) resync/2,
8743 		   (unsigned long long) max_sectors/2);
8744 
8745 	/*
8746 	 * dt: time from mark until now
8747 	 * db: blocks written from mark until now
8748 	 * rt: remaining time
8749 	 *
8750 	 * rt is a sector_t, which is always 64bit now. We are keeping
8751 	 * the original algorithm, but it is not really necessary.
8752 	 *
8753 	 * Original algorithm:
8754 	 *   So we divide before multiply in case it is 32bit and close
8755 	 *   to the limit.
8756 	 *   We scale the divisor (db) by 32 to avoid losing precision
8757 	 *   near the end of resync when the number of remaining sectors
8758 	 *   is close to 'db'.
8759 	 *   We then divide rt by 32 after multiplying by db to compensate.
8760 	 *   The '+1' avoids division by zero if db is very small.
8761 	 */
8762 	dt = ((jiffies - mddev->resync_mark) / HZ);
8763 	if (!dt) dt++;
8764 
8765 	curr_mark_cnt = mddev->curr_mark_cnt;
8766 	recovery_active = atomic_read(&mddev->recovery_active);
8767 	resync_mark_cnt = mddev->resync_mark_cnt;
8768 
8769 	if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8770 		db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
8771 
8772 	rt = max_sectors - resync;    /* number of remaining sectors */
8773 	rt = div64_u64(rt, db/32+1);
8774 	rt *= dt;
8775 	rt >>= 5;
8776 
8777 	seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8778 		   ((unsigned long)rt % 60)/6);
8779 
8780 	seq_printf(seq, " speed=%ldK/sec", db/2/dt);
8781 	return 1;
8782 }
8783 
8784 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8785 	__acquires(&all_mddevs_lock)
8786 {
8787 	seq->poll_event = atomic_read(&md_event_count);
8788 	spin_lock(&all_mddevs_lock);
8789 
8790 	return seq_list_start_head(&all_mddevs, *pos);
8791 }
8792 
8793 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8794 {
8795 	return seq_list_next(v, &all_mddevs, pos);
8796 }
8797 
8798 static void md_seq_stop(struct seq_file *seq, void *v)
8799 	__releases(&all_mddevs_lock)
8800 {
8801 	spin_unlock(&all_mddevs_lock);
8802 }
8803 
8804 static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev)
8805 {
8806 	struct md_bitmap_stats stats;
8807 	unsigned long used_pages;
8808 	unsigned long chunk_kb;
8809 	int err;
8810 
8811 	if (!md_bitmap_enabled(mddev, false))
8812 		return;
8813 
8814 	err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
8815 	if (err)
8816 		return;
8817 
8818 	chunk_kb = mddev->bitmap_info.chunksize >> 10;
8819 	used_pages = stats.pages - stats.missing_pages;
8820 
8821 	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], %lu%s chunk",
8822 		   used_pages, stats.pages, used_pages << (PAGE_SHIFT - 10),
8823 		   chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
8824 		   chunk_kb ? "KB" : "B");
8825 
8826 	if (stats.file) {
8827 		seq_puts(seq, ", file: ");
8828 		seq_file_path(seq, stats.file, " \t\n");
8829 	}
8830 
8831 	seq_putc(seq, '\n');
8832 }
8833 
8834 static int md_seq_show(struct seq_file *seq, void *v)
8835 {
8836 	struct mddev *mddev;
8837 	sector_t sectors;
8838 	struct md_rdev *rdev;
8839 
8840 	if (v == &all_mddevs) {
8841 		status_personalities(seq);
8842 		if (list_empty(&all_mddevs))
8843 			status_unused(seq);
8844 		return 0;
8845 	}
8846 
8847 	mddev = list_entry(v, struct mddev, all_mddevs);
8848 	if (!mddev_get(mddev))
8849 		return 0;
8850 
8851 	spin_unlock(&all_mddevs_lock);
8852 
8853 	/* prevent bitmap to be freed after checking */
8854 	mutex_lock(&mddev->bitmap_info.mutex);
8855 
8856 	spin_lock(&mddev->lock);
8857 	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8858 		seq_printf(seq, "%s : ", mdname(mddev));
8859 		if (mddev->pers) {
8860 			if (test_bit(MD_BROKEN, &mddev->flags))
8861 				seq_printf(seq, "broken");
8862 			else
8863 				seq_printf(seq, "active");
8864 			if (mddev->ro == MD_RDONLY)
8865 				seq_printf(seq, " (read-only)");
8866 			if (mddev->ro == MD_AUTO_READ)
8867 				seq_printf(seq, " (auto-read-only)");
8868 			seq_printf(seq, " %s", mddev->pers->head.name);
8869 		} else {
8870 			seq_printf(seq, "inactive");
8871 		}
8872 
8873 		sectors = 0;
8874 		rcu_read_lock();
8875 		rdev_for_each_rcu(rdev, mddev) {
8876 			seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
8877 
8878 			if (test_bit(WriteMostly, &rdev->flags))
8879 				seq_printf(seq, "(W)");
8880 			if (test_bit(Journal, &rdev->flags))
8881 				seq_printf(seq, "(J)");
8882 			if (test_bit(Faulty, &rdev->flags)) {
8883 				seq_printf(seq, "(F)");
8884 				continue;
8885 			}
8886 			if (rdev->raid_disk < 0)
8887 				seq_printf(seq, "(S)"); /* spare */
8888 			if (test_bit(Replacement, &rdev->flags))
8889 				seq_printf(seq, "(R)");
8890 			sectors += rdev->sectors;
8891 		}
8892 		rcu_read_unlock();
8893 
8894 		if (!list_empty(&mddev->disks)) {
8895 			if (mddev->pers)
8896 				seq_printf(seq, "\n      %llu blocks",
8897 					   (unsigned long long)
8898 					   mddev->array_sectors / 2);
8899 			else
8900 				seq_printf(seq, "\n      %llu blocks",
8901 					   (unsigned long long)sectors / 2);
8902 		}
8903 		if (mddev->persistent) {
8904 			if (mddev->major_version != 0 ||
8905 			    mddev->minor_version != 90) {
8906 				seq_printf(seq," super %d.%d",
8907 					   mddev->major_version,
8908 					   mddev->minor_version);
8909 			}
8910 		} else if (mddev->external)
8911 			seq_printf(seq, " super external:%s",
8912 				   mddev->metadata_type);
8913 		else
8914 			seq_printf(seq, " super non-persistent");
8915 
8916 		if (mddev->pers) {
8917 			mddev->pers->status(seq, mddev);
8918 			seq_printf(seq, "\n      ");
8919 			if (mddev->pers->sync_request) {
8920 				if (status_resync(seq, mddev))
8921 					seq_printf(seq, "\n      ");
8922 			}
8923 		} else
8924 			seq_printf(seq, "\n       ");
8925 
8926 		md_bitmap_status(seq, mddev);
8927 
8928 		seq_printf(seq, "\n");
8929 	}
8930 	spin_unlock(&mddev->lock);
8931 	mutex_unlock(&mddev->bitmap_info.mutex);
8932 	spin_lock(&all_mddevs_lock);
8933 
8934 	if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
8935 		status_unused(seq);
8936 
8937 	mddev_put_locked(mddev);
8938 	return 0;
8939 }
8940 
8941 static const struct seq_operations md_seq_ops = {
8942 	.start  = md_seq_start,
8943 	.next   = md_seq_next,
8944 	.stop   = md_seq_stop,
8945 	.show   = md_seq_show,
8946 };
8947 
8948 static int md_seq_open(struct inode *inode, struct file *file)
8949 {
8950 	struct seq_file *seq;
8951 	int error;
8952 
8953 	error = seq_open(file, &md_seq_ops);
8954 	if (error)
8955 		return error;
8956 
8957 	seq = file->private_data;
8958 	seq->poll_event = atomic_read(&md_event_count);
8959 	return error;
8960 }
8961 
8962 static int md_unloading;
8963 static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
8964 {
8965 	struct seq_file *seq = filp->private_data;
8966 	__poll_t mask;
8967 
8968 	if (md_unloading)
8969 		return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
8970 	poll_wait(filp, &md_event_waiters, wait);
8971 
8972 	/* always allow read */
8973 	mask = EPOLLIN | EPOLLRDNORM;
8974 
8975 	if (seq->poll_event != atomic_read(&md_event_count))
8976 		mask |= EPOLLERR | EPOLLPRI;
8977 	return mask;
8978 }
8979 
8980 static const struct proc_ops mdstat_proc_ops = {
8981 	.proc_open	= md_seq_open,
8982 	.proc_read	= seq_read,
8983 	.proc_lseek	= seq_lseek,
8984 	.proc_release	= seq_release,
8985 	.proc_poll	= mdstat_poll,
8986 };
8987 
8988 int register_md_submodule(struct md_submodule_head *msh)
8989 {
8990 	return xa_insert(&md_submodule, msh->id, msh, GFP_KERNEL);
8991 }
8992 EXPORT_SYMBOL_GPL(register_md_submodule);
8993 
8994 void unregister_md_submodule(struct md_submodule_head *msh)
8995 {
8996 	xa_erase(&md_submodule, msh->id);
8997 }
8998 EXPORT_SYMBOL_GPL(unregister_md_submodule);
8999 
9000 int md_setup_cluster(struct mddev *mddev, int nodes)
9001 {
9002 	int ret = get_cluster_ops(mddev);
9003 
9004 	if (ret) {
9005 		request_module("md-cluster");
9006 		ret = get_cluster_ops(mddev);
9007 	}
9008 
9009 	/* ensure module won't be unloaded */
9010 	if (ret) {
9011 		pr_warn("can't find md-cluster module or get its reference.\n");
9012 		return ret;
9013 	}
9014 
9015 	ret = mddev->cluster_ops->join(mddev, nodes);
9016 	if (!ret)
9017 		mddev->safemode_delay = 0;
9018 	return ret;
9019 }
9020 
9021 void md_cluster_stop(struct mddev *mddev)
9022 {
9023 	put_cluster_ops(mddev);
9024 }
9025 
9026 static bool is_rdev_holder_idle(struct md_rdev *rdev, bool init)
9027 {
9028 	unsigned long last_events = rdev->last_events;
9029 
9030 	if (!bdev_is_partition(rdev->bdev))
9031 		return true;
9032 
9033 	/*
9034 	 * If rdev is partition, and user doesn't issue IO to the array, the
9035 	 * array is still not idle if user issues IO to other partitions.
9036 	 */
9037 	rdev->last_events = part_stat_read_accum(rdev->bdev->bd_disk->part0,
9038 						 sectors) -
9039 			    part_stat_read_accum(rdev->bdev, sectors);
9040 
9041 	return init || rdev->last_events <= last_events;
9042 }
9043 
9044 /*
9045  * mddev is idle if following conditions are matched since last check:
9046  * 1) mddev doesn't have normal IO completed;
9047  * 2) mddev doesn't have inflight normal IO;
9048  * 3) if any member disk is partition, and other partitions don't have IO
9049  *    completed;
9050  *
9051  * Noted this checking rely on IO accounting is enabled.
9052  */
9053 static bool is_mddev_idle(struct mddev *mddev, int init)
9054 {
9055 	unsigned long last_events = mddev->normal_io_events;
9056 	struct gendisk *disk;
9057 	struct md_rdev *rdev;
9058 	bool idle = true;
9059 
9060 	disk = mddev_is_dm(mddev) ? mddev->dm_gendisk : mddev->gendisk;
9061 	if (!disk)
9062 		return true;
9063 
9064 	mddev->normal_io_events = part_stat_read_accum(disk->part0, sectors);
9065 	if (!init && (mddev->normal_io_events > last_events ||
9066 		      bdev_count_inflight(disk->part0)))
9067 		idle = false;
9068 
9069 	rcu_read_lock();
9070 	rdev_for_each_rcu(rdev, mddev)
9071 		if (!is_rdev_holder_idle(rdev, init))
9072 			idle = false;
9073 	rcu_read_unlock();
9074 
9075 	return idle;
9076 }
9077 
9078 void md_done_sync(struct mddev *mddev, int blocks)
9079 {
9080 	/* another "blocks" (512byte) blocks have been synced */
9081 	atomic_sub(blocks, &mddev->recovery_active);
9082 	wake_up(&mddev->recovery_wait);
9083 }
9084 EXPORT_SYMBOL(md_done_sync);
9085 
9086 void md_sync_error(struct mddev *mddev)
9087 {
9088 	// stop recovery, signal do_sync ....
9089 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9090 	md_wakeup_thread(mddev->thread);
9091 }
9092 EXPORT_SYMBOL(md_sync_error);
9093 
9094 /* md_write_start(mddev, bi)
9095  * If we need to update some array metadata (e.g. 'active' flag
9096  * in superblock) before writing, schedule a superblock update
9097  * and wait for it to complete.
9098  * A return value of 'false' means that the write wasn't recorded
9099  * and cannot proceed as the array is being suspend.
9100  */
9101 void md_write_start(struct mddev *mddev, struct bio *bi)
9102 {
9103 	int did_change = 0;
9104 
9105 	if (bio_data_dir(bi) != WRITE)
9106 		return;
9107 
9108 	BUG_ON(mddev->ro == MD_RDONLY);
9109 	if (mddev->ro == MD_AUTO_READ) {
9110 		/* need to switch to read/write */
9111 		mddev->ro = MD_RDWR;
9112 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9113 		md_wakeup_thread(mddev->thread);
9114 		md_wakeup_thread(mddev->sync_thread);
9115 		did_change = 1;
9116 	}
9117 	rcu_read_lock();
9118 	percpu_ref_get(&mddev->writes_pending);
9119 	smp_mb(); /* Match smp_mb in set_in_sync() */
9120 	if (mddev->safemode == 1)
9121 		mddev->safemode = 0;
9122 	/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
9123 	if (mddev->in_sync || mddev->sync_checkers) {
9124 		spin_lock(&mddev->lock);
9125 		if (mddev->in_sync) {
9126 			mddev->in_sync = 0;
9127 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
9128 			set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9129 			md_wakeup_thread(mddev->thread);
9130 			did_change = 1;
9131 		}
9132 		spin_unlock(&mddev->lock);
9133 	}
9134 	rcu_read_unlock();
9135 	if (did_change)
9136 		sysfs_notify_dirent_safe(mddev->sysfs_state);
9137 	if (!test_bit(MD_HAS_SUPERBLOCK, &mddev->flags))
9138 		return;
9139 	wait_event(mddev->sb_wait,
9140 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
9141 }
9142 EXPORT_SYMBOL(md_write_start);
9143 
9144 /* md_write_inc can only be called when md_write_start() has
9145  * already been called at least once of the current request.
9146  * It increments the counter and is useful when a single request
9147  * is split into several parts.  Each part causes an increment and
9148  * so needs a matching md_write_end().
9149  * Unlike md_write_start(), it is safe to call md_write_inc() inside
9150  * a spinlocked region.
9151  */
9152 void md_write_inc(struct mddev *mddev, struct bio *bi)
9153 {
9154 	if (bio_data_dir(bi) != WRITE)
9155 		return;
9156 	WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev));
9157 	percpu_ref_get(&mddev->writes_pending);
9158 }
9159 EXPORT_SYMBOL(md_write_inc);
9160 
9161 void md_write_end(struct mddev *mddev)
9162 {
9163 	percpu_ref_put(&mddev->writes_pending);
9164 
9165 	if (mddev->safemode == 2)
9166 		md_wakeup_thread(mddev->thread);
9167 	else if (mddev->safemode_delay)
9168 		/* The roundup() ensures this only performs locking once
9169 		 * every ->safemode_delay jiffies
9170 		 */
9171 		mod_timer(&mddev->safemode_timer,
9172 			  roundup(jiffies, mddev->safemode_delay) +
9173 			  mddev->safemode_delay);
9174 }
9175 
9176 EXPORT_SYMBOL(md_write_end);
9177 
9178 /* This is used by raid0 and raid10 */
9179 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
9180 			struct bio *bio, sector_t start, sector_t size)
9181 {
9182 	struct bio *discard_bio = NULL;
9183 
9184 	__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, &discard_bio);
9185 	if (!discard_bio)
9186 		return;
9187 
9188 	bio_chain(discard_bio, bio);
9189 	bio_clone_blkg_association(discard_bio, bio);
9190 	mddev_trace_remap(mddev, discard_bio, bio->bi_iter.bi_sector);
9191 	submit_bio_noacct(discard_bio);
9192 }
9193 EXPORT_SYMBOL_GPL(md_submit_discard_bio);
9194 
9195 static void md_bitmap_start(struct mddev *mddev,
9196 			    struct md_io_clone *md_io_clone)
9197 {
9198 	md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
9199 			   mddev->bitmap_ops->start_discard :
9200 			   mddev->bitmap_ops->start_write;
9201 
9202 	if (mddev->pers->bitmap_sector)
9203 		mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
9204 					   &md_io_clone->sectors);
9205 
9206 	fn(mddev, md_io_clone->offset, md_io_clone->sectors);
9207 }
9208 
9209 static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
9210 {
9211 	md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
9212 			   mddev->bitmap_ops->end_discard :
9213 			   mddev->bitmap_ops->end_write;
9214 
9215 	fn(mddev, md_io_clone->offset, md_io_clone->sectors);
9216 }
9217 
9218 static void md_end_clone_io(struct bio *bio)
9219 {
9220 	struct md_io_clone *md_io_clone = bio->bi_private;
9221 	struct bio *orig_bio = md_io_clone->orig_bio;
9222 	struct mddev *mddev = md_io_clone->mddev;
9223 
9224 	if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
9225 		md_bitmap_end(mddev, md_io_clone);
9226 
9227 	if (bio->bi_status && !orig_bio->bi_status)
9228 		orig_bio->bi_status = bio->bi_status;
9229 
9230 	if (md_io_clone->start_time)
9231 		bio_end_io_acct(orig_bio, md_io_clone->start_time);
9232 
9233 	bio_put(bio);
9234 	bio_endio(orig_bio);
9235 	percpu_ref_put(&mddev->active_io);
9236 }
9237 
9238 static void md_clone_bio(struct mddev *mddev, struct bio **bio)
9239 {
9240 	struct block_device *bdev = (*bio)->bi_bdev;
9241 	struct md_io_clone *md_io_clone;
9242 	struct bio *clone =
9243 		bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set);
9244 
9245 	md_io_clone = container_of(clone, struct md_io_clone, bio_clone);
9246 	md_io_clone->orig_bio = *bio;
9247 	md_io_clone->mddev = mddev;
9248 	if (blk_queue_io_stat(bdev->bd_disk->queue))
9249 		md_io_clone->start_time = bio_start_io_acct(*bio);
9250 
9251 	if (bio_data_dir(*bio) == WRITE && md_bitmap_enabled(mddev, false)) {
9252 		md_io_clone->offset = (*bio)->bi_iter.bi_sector;
9253 		md_io_clone->sectors = bio_sectors(*bio);
9254 		md_io_clone->rw = op_stat_group(bio_op(*bio));
9255 		md_bitmap_start(mddev, md_io_clone);
9256 	}
9257 
9258 	clone->bi_end_io = md_end_clone_io;
9259 	clone->bi_private = md_io_clone;
9260 	*bio = clone;
9261 }
9262 
9263 void md_account_bio(struct mddev *mddev, struct bio **bio)
9264 {
9265 	percpu_ref_get(&mddev->active_io);
9266 	md_clone_bio(mddev, bio);
9267 }
9268 EXPORT_SYMBOL_GPL(md_account_bio);
9269 
9270 void md_free_cloned_bio(struct bio *bio)
9271 {
9272 	struct md_io_clone *md_io_clone = bio->bi_private;
9273 	struct bio *orig_bio = md_io_clone->orig_bio;
9274 	struct mddev *mddev = md_io_clone->mddev;
9275 
9276 	if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
9277 		md_bitmap_end(mddev, md_io_clone);
9278 
9279 	if (bio->bi_status && !orig_bio->bi_status)
9280 		orig_bio->bi_status = bio->bi_status;
9281 
9282 	if (md_io_clone->start_time)
9283 		bio_end_io_acct(orig_bio, md_io_clone->start_time);
9284 
9285 	bio_put(bio);
9286 	percpu_ref_put(&mddev->active_io);
9287 }
9288 EXPORT_SYMBOL_GPL(md_free_cloned_bio);
9289 
9290 /* md_allow_write(mddev)
9291  * Calling this ensures that the array is marked 'active' so that writes
9292  * may proceed without blocking.  It is important to call this before
9293  * attempting a GFP_KERNEL allocation while holding the mddev lock.
9294  * Must be called with mddev_lock held.
9295  */
9296 void md_allow_write(struct mddev *mddev)
9297 {
9298 	if (!mddev->pers)
9299 		return;
9300 	if (!md_is_rdwr(mddev))
9301 		return;
9302 	if (!mddev->pers->sync_request)
9303 		return;
9304 
9305 	spin_lock(&mddev->lock);
9306 	if (mddev->in_sync) {
9307 		mddev->in_sync = 0;
9308 		set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
9309 		set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9310 		if (mddev->safemode_delay &&
9311 		    mddev->safemode == 0)
9312 			mddev->safemode = 1;
9313 		spin_unlock(&mddev->lock);
9314 		md_update_sb(mddev, 0);
9315 		sysfs_notify_dirent_safe(mddev->sysfs_state);
9316 		/* wait for the dirty state to be recorded in the metadata */
9317 		wait_event(mddev->sb_wait,
9318 			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
9319 	} else
9320 		spin_unlock(&mddev->lock);
9321 }
9322 EXPORT_SYMBOL_GPL(md_allow_write);
9323 
9324 static sector_t md_sync_max_sectors(struct mddev *mddev,
9325 				    enum sync_action action)
9326 {
9327 	switch (action) {
9328 	case ACTION_RESYNC:
9329 	case ACTION_CHECK:
9330 	case ACTION_REPAIR:
9331 		atomic64_set(&mddev->resync_mismatches, 0);
9332 		fallthrough;
9333 	case ACTION_RESHAPE:
9334 		return mddev->resync_max_sectors;
9335 	case ACTION_RECOVER:
9336 		return mddev->dev_sectors;
9337 	default:
9338 		return 0;
9339 	}
9340 }
9341 
9342 /*
9343  * If lazy recovery is requested and all rdevs are in sync, select the rdev with
9344  * the higest index to perfore recovery to build initial xor data, this is the
9345  * same as old bitmap.
9346  */
9347 static bool mddev_select_lazy_recover_rdev(struct mddev *mddev)
9348 {
9349 	struct md_rdev *recover_rdev = NULL;
9350 	struct md_rdev *rdev;
9351 	bool ret = false;
9352 
9353 	rcu_read_lock();
9354 	rdev_for_each_rcu(rdev, mddev) {
9355 		if (rdev->raid_disk < 0)
9356 			continue;
9357 
9358 		if (test_bit(Faulty, &rdev->flags) ||
9359 		    !test_bit(In_sync, &rdev->flags))
9360 			break;
9361 
9362 		if (!recover_rdev || recover_rdev->raid_disk < rdev->raid_disk)
9363 			recover_rdev = rdev;
9364 	}
9365 
9366 	if (recover_rdev) {
9367 		clear_bit(In_sync, &recover_rdev->flags);
9368 		ret = true;
9369 	}
9370 
9371 	rcu_read_unlock();
9372 	return ret;
9373 }
9374 
9375 static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
9376 {
9377 	sector_t start = 0;
9378 	struct md_rdev *rdev;
9379 
9380 	switch (action) {
9381 	case ACTION_CHECK:
9382 	case ACTION_REPAIR:
9383 		return mddev->resync_min;
9384 	case ACTION_RESYNC:
9385 		if (!mddev->bitmap)
9386 			return mddev->resync_offset;
9387 		return 0;
9388 	case ACTION_RESHAPE:
9389 		/*
9390 		 * If the original node aborts reshaping then we continue the
9391 		 * reshaping, so set again to avoid restart reshape from the
9392 		 * first beginning
9393 		 */
9394 		if (mddev_is_clustered(mddev) &&
9395 		    mddev->reshape_position != MaxSector)
9396 			return mddev->reshape_position;
9397 		return 0;
9398 	case ACTION_RECOVER:
9399 		start = MaxSector;
9400 		rcu_read_lock();
9401 		rdev_for_each_rcu(rdev, mddev)
9402 			if (rdev_needs_recovery(rdev, start))
9403 				start = rdev->recovery_offset;
9404 		rcu_read_unlock();
9405 
9406 		/*
9407 		 * If there are no spares, and raid456 lazy initial recover is
9408 		 * requested.
9409 		 */
9410 		if (test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery) &&
9411 		    start == MaxSector && mddev_select_lazy_recover_rdev(mddev))
9412 			start = 0;
9413 
9414 		/* If there is a bitmap, we need to make sure all
9415 		 * writes that started before we added a spare
9416 		 * complete before we start doing a recovery.
9417 		 * Otherwise the write might complete and (via
9418 		 * bitmap_endwrite) set a bit in the bitmap after the
9419 		 * recovery has checked that bit and skipped that
9420 		 * region.
9421 		 */
9422 		if (mddev->bitmap) {
9423 			mddev->pers->quiesce(mddev, 1);
9424 			mddev->pers->quiesce(mddev, 0);
9425 		}
9426 		return start;
9427 	default:
9428 		return MaxSector;
9429 	}
9430 }
9431 
9432 static bool sync_io_within_limit(struct mddev *mddev)
9433 {
9434 	/*
9435 	 * For raid456, sync IO is stripe(4k) per IO, for other levels, it's
9436 	 * RESYNC_PAGES(64k) per IO.
9437 	 */
9438 	return atomic_read(&mddev->recovery_active) <
9439 	       (raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev);
9440 }
9441 
9442 /*
9443  * Update sync offset and mddev status when sync completes
9444  */
9445 static void md_finish_sync(struct mddev *mddev, enum sync_action action)
9446 {
9447 	struct md_rdev *rdev;
9448 
9449 	switch (action) {
9450 	case ACTION_RESYNC:
9451 	case ACTION_REPAIR:
9452 		if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9453 			mddev->curr_resync = MaxSector;
9454 		mddev->resync_offset = mddev->curr_resync;
9455 		break;
9456 	case ACTION_RECOVER:
9457 		if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9458 			mddev->curr_resync = MaxSector;
9459 		rcu_read_lock();
9460 		rdev_for_each_rcu(rdev, mddev)
9461 			if (mddev->delta_disks >= 0 &&
9462 			    rdev_needs_recovery(rdev, mddev->curr_resync))
9463 				rdev->recovery_offset = mddev->curr_resync;
9464 		rcu_read_unlock();
9465 		break;
9466 	case ACTION_RESHAPE:
9467 		if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9468 		    mddev->delta_disks > 0 &&
9469 		    mddev->pers->finish_reshape &&
9470 		    mddev->pers->size &&
9471 		    !mddev_is_dm(mddev)) {
9472 			mddev_lock_nointr(mddev);
9473 			md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9474 			mddev_unlock(mddev);
9475 			if (!mddev_is_clustered(mddev))
9476 				set_capacity_and_notify(mddev->gendisk,
9477 							mddev->array_sectors);
9478 		}
9479 		if (mddev->pers->finish_reshape)
9480 			mddev->pers->finish_reshape(mddev);
9481 		break;
9482 	/* */
9483 	case ACTION_CHECK:
9484 	default:
9485 		break;
9486 	}
9487 }
9488 
9489 #define SYNC_MARKS	10
9490 #define	SYNC_MARK_STEP	(3*HZ)
9491 #define UPDATE_FREQUENCY (5*60*HZ)
9492 void md_do_sync(struct md_thread *thread)
9493 {
9494 	struct mddev *mddev = thread->mddev;
9495 	struct mddev *mddev2;
9496 	unsigned int currspeed = 0, window;
9497 	sector_t max_sectors,j, io_sectors, recovery_done;
9498 	unsigned long mark[SYNC_MARKS];
9499 	unsigned long update_time;
9500 	sector_t mark_cnt[SYNC_MARKS];
9501 	int last_mark,m;
9502 	sector_t last_check;
9503 	int skipped = 0;
9504 	enum sync_action action;
9505 	const char *desc;
9506 	struct blk_plug plug;
9507 	int ret;
9508 
9509 	/* just incase thread restarts... */
9510 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
9511 		return;
9512 
9513 	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9514 		goto skip;
9515 
9516 	if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
9517 	    !md_is_rdwr(mddev)) {/* never try to sync a read-only array */
9518 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9519 		goto skip;
9520 	}
9521 
9522 	if (mddev_is_clustered(mddev)) {
9523 		ret = mddev->cluster_ops->resync_start(mddev);
9524 		if (ret)
9525 			goto skip;
9526 
9527 		set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
9528 		if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
9529 			test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
9530 			test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
9531 		     && ((unsigned long long)mddev->curr_resync_completed
9532 			 < (unsigned long long)mddev->resync_max_sectors))
9533 			goto skip;
9534 	}
9535 
9536 	action = md_sync_action(mddev);
9537 	if (action == ACTION_FROZEN || action == ACTION_IDLE) {
9538 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9539 		goto skip;
9540 	}
9541 
9542 	desc = md_sync_action_name(action);
9543 	mddev->last_sync_action = action;
9544 
9545 	/*
9546 	 * Before starting a resync we must have set curr_resync to
9547 	 * 2, and then checked that every "conflicting" array has curr_resync
9548 	 * less than ours.  When we find one that is the same or higher
9549 	 * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
9550 	 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
9551 	 * This will mean we have to start checking from the beginning again.
9552 	 *
9553 	 */
9554 	if (mddev_is_clustered(mddev))
9555 		mddev->cluster_ops->resync_start_notify(mddev);
9556 	do {
9557 		int mddev2_minor = -1;
9558 		mddev->curr_resync = MD_RESYNC_DELAYED;
9559 
9560 	try_again:
9561 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9562 			goto skip;
9563 		spin_lock(&all_mddevs_lock);
9564 		list_for_each_entry(mddev2, &all_mddevs, all_mddevs) {
9565 			if (test_bit(MD_DELETED, &mddev2->flags))
9566 				continue;
9567 			if (mddev2 == mddev)
9568 				continue;
9569 			if (!mddev->parallel_resync
9570 			&&  mddev2->curr_resync
9571 			&&  match_mddev_units(mddev, mddev2)) {
9572 				DEFINE_WAIT(wq);
9573 				if (mddev < mddev2 &&
9574 				    mddev->curr_resync == MD_RESYNC_DELAYED) {
9575 					/* arbitrarily yield */
9576 					mddev->curr_resync = MD_RESYNC_YIELDED;
9577 					wake_up(&resync_wait);
9578 				}
9579 				if (mddev > mddev2 &&
9580 				    mddev->curr_resync == MD_RESYNC_YIELDED)
9581 					/* no need to wait here, we can wait the next
9582 					 * time 'round when curr_resync == 2
9583 					 */
9584 					continue;
9585 				/* We need to wait 'interruptible' so as not to
9586 				 * contribute to the load average, and not to
9587 				 * be caught by 'softlockup'
9588 				 */
9589 				prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
9590 				if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9591 				    mddev2->curr_resync >= mddev->curr_resync) {
9592 					if (mddev2_minor != mddev2->md_minor) {
9593 						mddev2_minor = mddev2->md_minor;
9594 						pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
9595 							desc, mdname(mddev),
9596 							mdname(mddev2));
9597 					}
9598 					spin_unlock(&all_mddevs_lock);
9599 
9600 					if (signal_pending(current))
9601 						flush_signals(current);
9602 					schedule();
9603 					finish_wait(&resync_wait, &wq);
9604 					goto try_again;
9605 				}
9606 				finish_wait(&resync_wait, &wq);
9607 			}
9608 		}
9609 		spin_unlock(&all_mddevs_lock);
9610 	} while (mddev->curr_resync < MD_RESYNC_DELAYED);
9611 
9612 	max_sectors = md_sync_max_sectors(mddev, action);
9613 	j = md_sync_position(mddev, action);
9614 
9615 	pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
9616 	pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
9617 	pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
9618 		 speed_max(mddev), desc);
9619 
9620 	is_mddev_idle(mddev, 1); /* this initializes IO event counters */
9621 
9622 	io_sectors = 0;
9623 	for (m = 0; m < SYNC_MARKS; m++) {
9624 		mark[m] = jiffies;
9625 		mark_cnt[m] = io_sectors;
9626 	}
9627 	last_mark = 0;
9628 	mddev->resync_mark = mark[last_mark];
9629 	mddev->resync_mark_cnt = mark_cnt[last_mark];
9630 
9631 	/*
9632 	 * Tune reconstruction:
9633 	 */
9634 	window = 32 * (PAGE_SIZE / 512);
9635 	pr_debug("md: using %dk window, over a total of %lluk.\n",
9636 		 window/2, (unsigned long long)max_sectors/2);
9637 
9638 	atomic_set(&mddev->recovery_active, 0);
9639 	last_check = 0;
9640 
9641 	if (j >= MD_RESYNC_ACTIVE) {
9642 		pr_debug("md: resuming %s of %s from checkpoint.\n",
9643 			 desc, mdname(mddev));
9644 		mddev->curr_resync = j;
9645 	} else
9646 		mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
9647 	mddev->curr_resync_completed = j;
9648 	sysfs_notify_dirent_safe(mddev->sysfs_completed);
9649 	md_new_event();
9650 	update_time = jiffies;
9651 
9652 	blk_start_plug(&plug);
9653 	while (j < max_sectors) {
9654 		sector_t sectors;
9655 
9656 		skipped = 0;
9657 
9658 		if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9659 		    ((mddev->curr_resync > mddev->curr_resync_completed &&
9660 		      (mddev->curr_resync - mddev->curr_resync_completed)
9661 		      > (max_sectors >> 4)) ||
9662 		     time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
9663 		     (j - mddev->curr_resync_completed)*2
9664 		     >= mddev->resync_max - mddev->curr_resync_completed ||
9665 		     mddev->curr_resync_completed > mddev->resync_max
9666 			    )) {
9667 			/* time to update curr_resync_completed */
9668 			wait_event(mddev->recovery_wait,
9669 				   atomic_read(&mddev->recovery_active) == 0);
9670 			mddev->curr_resync_completed = j;
9671 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
9672 			    j > mddev->resync_offset)
9673 				mddev->resync_offset = j;
9674 			update_time = jiffies;
9675 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
9676 			sysfs_notify_dirent_safe(mddev->sysfs_completed);
9677 		}
9678 
9679 		while (j >= mddev->resync_max &&
9680 		       !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9681 			/* As this condition is controlled by user-space,
9682 			 * we can block indefinitely, so use '_interruptible'
9683 			 * to avoid triggering warnings.
9684 			 */
9685 			flush_signals(current); /* just in case */
9686 			wait_event_interruptible(mddev->recovery_wait,
9687 						 mddev->resync_max > j
9688 						 || test_bit(MD_RECOVERY_INTR,
9689 							     &mddev->recovery));
9690 		}
9691 
9692 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9693 			break;
9694 
9695 		if (mddev->bitmap_ops && mddev->bitmap_ops->skip_sync_blocks) {
9696 			sectors = mddev->bitmap_ops->skip_sync_blocks(mddev, j);
9697 			if (sectors)
9698 				goto update;
9699 		}
9700 
9701 		sectors = mddev->pers->sync_request(mddev, j, max_sectors,
9702 						    &skipped);
9703 		if (sectors == 0) {
9704 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9705 			break;
9706 		}
9707 
9708 		if (!skipped) { /* actual IO requested */
9709 			io_sectors += sectors;
9710 			atomic_add(sectors, &mddev->recovery_active);
9711 		}
9712 
9713 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9714 			break;
9715 
9716 update:
9717 		j += sectors;
9718 		if (j > max_sectors)
9719 			/* when skipping, extra large numbers can be returned. */
9720 			j = max_sectors;
9721 		if (j >= MD_RESYNC_ACTIVE)
9722 			mddev->curr_resync = j;
9723 		mddev->curr_mark_cnt = io_sectors;
9724 		if (last_check == 0)
9725 			/* this is the earliest that rebuild will be
9726 			 * visible in /proc/mdstat
9727 			 */
9728 			md_new_event();
9729 
9730 		if (last_check + window > io_sectors || j == max_sectors)
9731 			continue;
9732 
9733 		last_check = io_sectors;
9734 	repeat:
9735 		if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
9736 			/* step marks */
9737 			int next = (last_mark+1) % SYNC_MARKS;
9738 
9739 			mddev->resync_mark = mark[next];
9740 			mddev->resync_mark_cnt = mark_cnt[next];
9741 			mark[next] = jiffies;
9742 			mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
9743 			last_mark = next;
9744 		}
9745 
9746 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9747 			break;
9748 
9749 		/*
9750 		 * this loop exits only if either when we are slower than
9751 		 * the 'hard' speed limit, or the system was IO-idle for
9752 		 * a jiffy.
9753 		 * the system might be non-idle CPU-wise, but we only care
9754 		 * about not overloading the IO subsystem. (things like an
9755 		 * e2fsck being done on the RAID array should execute fast)
9756 		 */
9757 		cond_resched();
9758 
9759 		recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
9760 		currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
9761 			/((jiffies-mddev->resync_mark)/HZ +1) +1;
9762 
9763 		if (currspeed > speed_min(mddev)) {
9764 			if (currspeed > speed_max(mddev)) {
9765 				msleep(500);
9766 				goto repeat;
9767 			}
9768 			if (!sync_io_within_limit(mddev) &&
9769 			    !is_mddev_idle(mddev, 0)) {
9770 				/*
9771 				 * Give other IO more of a chance.
9772 				 * The faster the devices, the less we wait.
9773 				 */
9774 				wait_event(mddev->recovery_wait,
9775 					   !atomic_read(&mddev->recovery_active));
9776 			}
9777 		}
9778 	}
9779 	pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
9780 		test_bit(MD_RECOVERY_INTR, &mddev->recovery)
9781 		? "interrupted" : "done");
9782 	/*
9783 	 * this also signals 'finished resyncing' to md_stop
9784 	 */
9785 	blk_finish_plug(&plug);
9786 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
9787 
9788 	if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9789 	    mddev->curr_resync >= MD_RESYNC_ACTIVE) {
9790 		/* All sync IO completes after recovery_active becomes 0 */
9791 		mddev->curr_resync_completed = mddev->curr_resync;
9792 		sysfs_notify_dirent_safe(mddev->sysfs_completed);
9793 	}
9794 	mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped);
9795 
9796 	if (mddev->curr_resync > MD_RESYNC_ACTIVE)
9797 		md_finish_sync(mddev, action);
9798  skip:
9799 	/* set CHANGE_PENDING here since maybe another update is needed,
9800 	 * so other nodes are informed. It should be harmless for normal
9801 	 * raid */
9802 	set_mask_bits(&mddev->sb_flags, 0,
9803 		      BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
9804 	spin_lock(&mddev->lock);
9805 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9806 		/* We completed so min/max setting can be forgotten if used. */
9807 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9808 			mddev->resync_min = 0;
9809 		mddev->resync_max = MaxSector;
9810 	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9811 		mddev->resync_min = mddev->curr_resync_completed;
9812 	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
9813 	mddev->curr_resync = MD_RESYNC_NONE;
9814 	spin_unlock(&mddev->lock);
9815 
9816 	wake_up(&resync_wait);
9817 	md_wakeup_thread(mddev->thread);
9818 	return;
9819 }
9820 EXPORT_SYMBOL_GPL(md_do_sync);
9821 
9822 static bool rdev_removeable(struct md_rdev *rdev)
9823 {
9824 	/* rdev is not used. */
9825 	if (rdev->raid_disk < 0)
9826 		return false;
9827 
9828 	/* There are still inflight io, don't remove this rdev. */
9829 	if (atomic_read(&rdev->nr_pending))
9830 		return false;
9831 
9832 	/*
9833 	 * An error occurred but has not yet been acknowledged by the metadata
9834 	 * handler, don't remove this rdev.
9835 	 */
9836 	if (test_bit(Blocked, &rdev->flags))
9837 		return false;
9838 
9839 	/* Fautly rdev is not used, it's safe to remove it. */
9840 	if (test_bit(Faulty, &rdev->flags))
9841 		return true;
9842 
9843 	/* Journal disk can only be removed if it's faulty. */
9844 	if (test_bit(Journal, &rdev->flags))
9845 		return false;
9846 
9847 	/*
9848 	 * 'In_sync' is cleared while 'raid_disk' is valid, which means
9849 	 * replacement has just become active from pers->spare_active(), and
9850 	 * then pers->hot_remove_disk() will replace this rdev with replacement.
9851 	 */
9852 	if (!test_bit(In_sync, &rdev->flags))
9853 		return true;
9854 
9855 	return false;
9856 }
9857 
9858 static bool rdev_is_spare(struct md_rdev *rdev)
9859 {
9860 	return !test_bit(Candidate, &rdev->flags) && rdev->raid_disk >= 0 &&
9861 	       !test_bit(In_sync, &rdev->flags) &&
9862 	       !test_bit(Journal, &rdev->flags) &&
9863 	       !test_bit(Faulty, &rdev->flags);
9864 }
9865 
9866 static bool rdev_addable(struct md_rdev *rdev)
9867 {
9868 	struct mddev *mddev;
9869 
9870 	mddev = READ_ONCE(rdev->mddev);
9871 	if (!mddev)
9872 		return false;
9873 
9874 	/* rdev is already used, don't add it again. */
9875 	if (test_bit(Candidate, &rdev->flags) || rdev->raid_disk >= 0 ||
9876 	    test_bit(Faulty, &rdev->flags))
9877 		return false;
9878 
9879 	/* Allow to add journal disk. */
9880 	if (test_bit(Journal, &rdev->flags))
9881 		return true;
9882 
9883 	/* Allow to add if array is read-write. */
9884 	if (md_is_rdwr(mddev))
9885 		return true;
9886 
9887 	/*
9888 	 * For read-only array, only allow to readd a rdev. And if bitmap is
9889 	 * used, don't allow to readd a rdev that is too old.
9890 	 */
9891 	if (rdev->saved_raid_disk >= 0 && !test_bit(Bitmap_sync, &rdev->flags))
9892 		return true;
9893 
9894 	return false;
9895 }
9896 
9897 static bool md_spares_need_change(struct mddev *mddev)
9898 {
9899 	struct md_rdev *rdev;
9900 
9901 	rcu_read_lock();
9902 	rdev_for_each_rcu(rdev, mddev) {
9903 		if (rdev_removeable(rdev) || rdev_addable(rdev)) {
9904 			rcu_read_unlock();
9905 			return true;
9906 		}
9907 	}
9908 	rcu_read_unlock();
9909 	return false;
9910 }
9911 
9912 static int remove_spares(struct mddev *mddev, struct md_rdev *this)
9913 {
9914 	struct md_rdev *rdev;
9915 	int removed = 0;
9916 
9917 	rdev_for_each(rdev, mddev) {
9918 		if ((this == NULL || rdev == this) && rdev_removeable(rdev) &&
9919 		    !mddev->pers->hot_remove_disk(mddev, rdev)) {
9920 			sysfs_unlink_rdev(mddev, rdev);
9921 			rdev->saved_raid_disk = rdev->raid_disk;
9922 			rdev->raid_disk = -1;
9923 			removed++;
9924 		}
9925 	}
9926 
9927 	if (removed && mddev->kobj.sd)
9928 		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9929 
9930 	return removed;
9931 }
9932 
9933 static int remove_and_add_spares(struct mddev *mddev,
9934 				 struct md_rdev *this)
9935 {
9936 	struct md_rdev *rdev;
9937 	int spares = 0;
9938 	int removed = 0;
9939 
9940 	if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9941 		/* Mustn't remove devices when resync thread is running */
9942 		return 0;
9943 
9944 	removed = remove_spares(mddev, this);
9945 	if (this && removed)
9946 		goto no_add;
9947 
9948 	rdev_for_each(rdev, mddev) {
9949 		if (this && this != rdev)
9950 			continue;
9951 		if (rdev_is_spare(rdev))
9952 			spares++;
9953 		if (!rdev_addable(rdev))
9954 			continue;
9955 		if (!test_bit(Journal, &rdev->flags))
9956 			rdev->recovery_offset = 0;
9957 		if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
9958 			/* failure here is OK */
9959 			sysfs_link_rdev(mddev, rdev);
9960 			if (!test_bit(Journal, &rdev->flags))
9961 				spares++;
9962 			md_new_event();
9963 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9964 		}
9965 	}
9966 no_add:
9967 	if (removed)
9968 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9969 	return spares;
9970 }
9971 
9972 static bool md_choose_sync_action(struct mddev *mddev, int *spares)
9973 {
9974 	/* Check if reshape is in progress first. */
9975 	if (mddev->reshape_position != MaxSector) {
9976 		if (mddev->pers->check_reshape == NULL ||
9977 		    mddev->pers->check_reshape(mddev) != 0)
9978 			return false;
9979 
9980 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9981 		clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9982 		clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
9983 		return true;
9984 	}
9985 
9986 	/* Check if resync is in progress. */
9987 	if (mddev->resync_offset < MaxSector) {
9988 		remove_spares(mddev, NULL);
9989 		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9990 		clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9991 		clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
9992 		return true;
9993 	}
9994 
9995 	/*
9996 	 * Remove any failed drives, then add spares if possible. Spares are
9997 	 * also removed and re-added, to allow the personality to fail the
9998 	 * re-add.
9999 	 */
10000 	*spares = remove_and_add_spares(mddev, NULL);
10001 	if (*spares || test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery)) {
10002 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
10003 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
10004 		clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
10005 
10006 		/* Start new recovery. */
10007 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
10008 		return true;
10009 	}
10010 
10011 	/* Delay to choose resync/check/repair in md_do_sync(). */
10012 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
10013 		return true;
10014 
10015 	/* Nothing to be done */
10016 	return false;
10017 }
10018 
10019 static void md_start_sync(struct work_struct *ws)
10020 {
10021 	struct mddev *mddev = container_of(ws, struct mddev, sync_work);
10022 	int spares = 0;
10023 	bool suspend = false;
10024 	char *name;
10025 
10026 	/*
10027 	 * If reshape is still in progress, spares won't be added or removed
10028 	 * from conf until reshape is done.
10029 	 */
10030 	if (mddev->reshape_position == MaxSector &&
10031 	    md_spares_need_change(mddev)) {
10032 		suspend = true;
10033 		mddev_suspend(mddev, false);
10034 	}
10035 
10036 	mddev_lock_nointr(mddev);
10037 	if (!md_is_rdwr(mddev)) {
10038 		/*
10039 		 * On a read-only array we can:
10040 		 * - remove failed devices
10041 		 * - add already-in_sync devices if the array itself is in-sync.
10042 		 * As we only add devices that are already in-sync, we can
10043 		 * activate the spares immediately.
10044 		 */
10045 		remove_and_add_spares(mddev, NULL);
10046 		goto not_running;
10047 	}
10048 
10049 	if (!md_choose_sync_action(mddev, &spares))
10050 		goto not_running;
10051 
10052 	if (!mddev->pers->sync_request)
10053 		goto not_running;
10054 
10055 	/*
10056 	 * We are adding a device or devices to an array which has the bitmap
10057 	 * stored on all devices. So make sure all bitmap pages get written.
10058 	 */
10059 	if (spares && md_bitmap_enabled(mddev, true))
10060 		mddev->bitmap_ops->write_all(mddev);
10061 
10062 	name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
10063 			"reshape" : "resync";
10064 	rcu_assign_pointer(mddev->sync_thread,
10065 			   md_register_thread(md_do_sync, mddev, name));
10066 	if (!mddev->sync_thread) {
10067 		pr_warn("%s: could not start resync thread...\n",
10068 			mdname(mddev));
10069 		/* leave the spares where they are, it shouldn't hurt */
10070 		goto not_running;
10071 	}
10072 
10073 	mddev_unlock(mddev);
10074 	/*
10075 	 * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
10076 	 * not set it again. Otherwise, we may cause issue like this one:
10077 	 *     https://bugzilla.kernel.org/show_bug.cgi?id=218200
10078 	 * Therefore, use __mddev_resume(mddev, false).
10079 	 */
10080 	if (suspend)
10081 		__mddev_resume(mddev, false);
10082 	md_wakeup_thread(mddev->sync_thread);
10083 	sysfs_notify_dirent_safe(mddev->sysfs_action);
10084 	md_new_event();
10085 	return;
10086 
10087 not_running:
10088 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
10089 	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
10090 	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
10091 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
10092 	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10093 	mddev_unlock(mddev);
10094 	/*
10095 	 * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
10096 	 * not set it again. Otherwise, we may cause issue like this one:
10097 	 *     https://bugzilla.kernel.org/show_bug.cgi?id=218200
10098 	 * Therefore, use __mddev_resume(mddev, false).
10099 	 */
10100 	if (suspend)
10101 		__mddev_resume(mddev, false);
10102 
10103 	wake_up(&resync_wait);
10104 	if (test_and_clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
10105 	    mddev->sysfs_action)
10106 		sysfs_notify_dirent_safe(mddev->sysfs_action);
10107 }
10108 
10109 static void unregister_sync_thread(struct mddev *mddev)
10110 {
10111 	if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
10112 		/* resync/recovery still happening */
10113 		clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10114 		return;
10115 	}
10116 
10117 	if (WARN_ON_ONCE(!mddev->sync_thread))
10118 		return;
10119 
10120 	md_reap_sync_thread(mddev);
10121 }
10122 
10123 static bool md_should_do_recovery(struct mddev *mddev)
10124 {
10125 	/*
10126 	 * As long as one of the following flags is set,
10127 	 * recovery needs to do or cleanup.
10128 	 */
10129 	if (test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
10130 	    test_bit(MD_RECOVERY_DONE, &mddev->recovery))
10131 		return true;
10132 
10133 	/*
10134 	 * If no flags are set and it is in read-only status,
10135 	 * there is nothing to do.
10136 	 */
10137 	if (!md_is_rdwr(mddev))
10138 		return false;
10139 
10140 	/*
10141 	 * MD_SB_CHANGE_PENDING indicates that the array is switching from clean to
10142 	 * active, and no action is needed for now.
10143 	 * All other MD_SB_* flags require to update the superblock.
10144 	 */
10145 	if (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING))
10146 		return true;
10147 
10148 	/*
10149 	 * If the array is not using external metadata and there has been no data
10150 	 * written for some time, then the array's status needs to be set to
10151 	 * in_sync.
10152 	 */
10153 	if (mddev->external == 0 && mddev->safemode == 1)
10154 		return true;
10155 
10156 	/*
10157 	 * When the system is about to restart or the process receives an signal,
10158 	 * the array needs to be synchronized as soon as possible.
10159 	 * Once the data synchronization is completed, need to change the array
10160 	 * status to in_sync.
10161 	 */
10162 	if (mddev->safemode == 2 && !mddev->in_sync &&
10163 	    mddev->resync_offset == MaxSector)
10164 		return true;
10165 
10166 	return false;
10167 }
10168 
10169 /*
10170  * This routine is regularly called by all per-raid-array threads to
10171  * deal with generic issues like resync and super-block update.
10172  * Raid personalities that don't have a thread (linear/raid0) do not
10173  * need this as they never do any recovery or update the superblock.
10174  *
10175  * It does not do any resync itself, but rather "forks" off other threads
10176  * to do that as needed.
10177  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
10178  * "->recovery" and create a thread at ->sync_thread.
10179  * When the thread finishes it sets MD_RECOVERY_DONE
10180  * and wakeups up this thread which will reap the thread and finish up.
10181  * This thread also removes any faulty devices (with nr_pending == 0).
10182  *
10183  * The overall approach is:
10184  *  1/ if the superblock needs updating, update it.
10185  *  2/ If a recovery thread is running, don't do anything else.
10186  *  3/ If recovery has finished, clean up, possibly marking spares active.
10187  *  4/ If there are any faulty devices, remove them.
10188  *  5/ If array is degraded, try to add spares devices
10189  *  6/ If array has spares or is not in-sync, start a resync thread.
10190  */
10191 void md_check_recovery(struct mddev *mddev)
10192 {
10193 	if (md_bitmap_enabled(mddev, false) && mddev->bitmap_ops->daemon_work)
10194 		mddev->bitmap_ops->daemon_work(mddev);
10195 
10196 	if (signal_pending(current)) {
10197 		if (mddev->pers->sync_request && !mddev->external) {
10198 			pr_debug("md: %s in immediate safe mode\n",
10199 				 mdname(mddev));
10200 			mddev->safemode = 2;
10201 		}
10202 		flush_signals(current);
10203 	}
10204 
10205 	if (!md_should_do_recovery(mddev))
10206 		return;
10207 
10208 	if (mddev_trylock(mddev)) {
10209 		bool try_set_sync = mddev->safemode != 0;
10210 
10211 		if (!mddev->external && mddev->safemode == 1)
10212 			mddev->safemode = 0;
10213 
10214 		if (!md_is_rdwr(mddev)) {
10215 			struct md_rdev *rdev;
10216 
10217 			if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
10218 				unregister_sync_thread(mddev);
10219 				goto unlock;
10220 			}
10221 
10222 			if (!mddev->external && mddev->in_sync)
10223 				/*
10224 				 * 'Blocked' flag not needed as failed devices
10225 				 * will be recorded if array switched to read/write.
10226 				 * Leaving it set will prevent the device
10227 				 * from being removed.
10228 				 */
10229 				rdev_for_each(rdev, mddev)
10230 					clear_bit(Blocked, &rdev->flags);
10231 
10232 			/*
10233 			 * There is no thread, but we need to call
10234 			 * ->spare_active and clear saved_raid_disk
10235 			 */
10236 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
10237 			md_reap_sync_thread(mddev);
10238 
10239 			/*
10240 			 * Let md_start_sync() to remove and add rdevs to the
10241 			 * array.
10242 			 */
10243 			if (md_spares_need_change(mddev)) {
10244 				set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10245 				queue_work(md_misc_wq, &mddev->sync_work);
10246 			}
10247 
10248 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
10249 			clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
10250 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10251 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
10252 
10253 			goto unlock;
10254 		}
10255 
10256 		if (mddev_is_clustered(mddev)) {
10257 			struct md_rdev *rdev, *tmp;
10258 			/* kick the device if another node issued a
10259 			 * remove disk.
10260 			 */
10261 			rdev_for_each_safe(rdev, tmp, mddev) {
10262 				if (rdev->raid_disk < 0 &&
10263 				    test_and_clear_bit(ClusterRemove, &rdev->flags))
10264 					md_kick_rdev_from_array(rdev);
10265 			}
10266 		}
10267 
10268 		if (try_set_sync && !mddev->external && !mddev->in_sync) {
10269 			spin_lock(&mddev->lock);
10270 			set_in_sync(mddev);
10271 			spin_unlock(&mddev->lock);
10272 		}
10273 
10274 		if (mddev->sb_flags)
10275 			md_update_sb(mddev, 0);
10276 
10277 		/*
10278 		 * Never start a new sync thread if MD_RECOVERY_RUNNING is
10279 		 * still set.
10280 		 */
10281 		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
10282 			unregister_sync_thread(mddev);
10283 			goto unlock;
10284 		}
10285 
10286 		/* Set RUNNING before clearing NEEDED to avoid
10287 		 * any transients in the value of "sync_action".
10288 		 */
10289 		mddev->curr_resync_completed = 0;
10290 		spin_lock(&mddev->lock);
10291 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10292 		spin_unlock(&mddev->lock);
10293 		/* Clear some bits that don't mean anything, but
10294 		 * might be left set
10295 		 */
10296 		clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
10297 		clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
10298 
10299 		if (test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) &&
10300 		    !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
10301 			queue_work(md_misc_wq, &mddev->sync_work);
10302 		} else {
10303 			clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10304 			wake_up(&resync_wait);
10305 		}
10306 
10307 	unlock:
10308 		wake_up(&mddev->sb_wait);
10309 		mddev_unlock(mddev);
10310 	}
10311 }
10312 EXPORT_SYMBOL(md_check_recovery);
10313 
10314 void md_reap_sync_thread(struct mddev *mddev)
10315 {
10316 	struct md_rdev *rdev;
10317 	sector_t old_dev_sectors = mddev->dev_sectors;
10318 	bool is_reshaped = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
10319 
10320 	/* resync has finished, collect result */
10321 	md_unregister_thread(mddev, &mddev->sync_thread);
10322 	atomic_inc(&mddev->sync_seq);
10323 
10324 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
10325 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
10326 	    mddev->degraded != mddev->raid_disks) {
10327 		/* success...*/
10328 		/* activate any spares */
10329 		if (mddev->pers->spare_active(mddev)) {
10330 			sysfs_notify_dirent_safe(mddev->sysfs_degraded);
10331 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
10332 		}
10333 	}
10334 
10335 	/* If array is no-longer degraded, then any saved_raid_disk
10336 	 * information must be scrapped.
10337 	 */
10338 	if (!mddev->degraded)
10339 		rdev_for_each(rdev, mddev)
10340 			rdev->saved_raid_disk = -1;
10341 
10342 	md_update_sb(mddev, 1);
10343 	/* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
10344 	 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
10345 	 * clustered raid */
10346 	if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
10347 		mddev->cluster_ops->resync_finish(mddev);
10348 	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10349 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
10350 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
10351 	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
10352 	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
10353 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
10354 	clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
10355 	/*
10356 	 * We call mddev->cluster_ops->update_size here because sync_size could
10357 	 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
10358 	 * so it is time to update size across cluster.
10359 	 */
10360 	if (mddev_is_clustered(mddev) && is_reshaped &&
10361 	    mddev->pers->finish_reshape &&
10362 	    !test_bit(MD_CLOSING, &mddev->flags))
10363 		mddev->cluster_ops->update_size(mddev, old_dev_sectors);
10364 	/* flag recovery needed just to double check */
10365 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10366 	sysfs_notify_dirent_safe(mddev->sysfs_completed);
10367 	sysfs_notify_dirent_safe(mddev->sysfs_action);
10368 	md_new_event();
10369 	if (mddev->event_work.func)
10370 		queue_work(md_misc_wq, &mddev->event_work);
10371 	wake_up(&resync_wait);
10372 }
10373 EXPORT_SYMBOL(md_reap_sync_thread);
10374 
10375 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
10376 {
10377 	sysfs_notify_dirent_safe(rdev->sysfs_state);
10378 	wait_event_timeout(rdev->blocked_wait, !rdev_blocked(rdev),
10379 			   msecs_to_jiffies(5000));
10380 	rdev_dec_pending(rdev, mddev);
10381 }
10382 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
10383 
10384 void md_finish_reshape(struct mddev *mddev)
10385 {
10386 	/* called be personality module when reshape completes. */
10387 	struct md_rdev *rdev;
10388 
10389 	rdev_for_each(rdev, mddev) {
10390 		if (rdev->data_offset > rdev->new_data_offset)
10391 			rdev->sectors += rdev->data_offset - rdev->new_data_offset;
10392 		else
10393 			rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
10394 		rdev->data_offset = rdev->new_data_offset;
10395 	}
10396 }
10397 EXPORT_SYMBOL(md_finish_reshape);
10398 
10399 /* Bad block management */
10400 
10401 /* Returns true on success, false on failure */
10402 bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
10403 			int is_new)
10404 {
10405 	struct mddev *mddev = rdev->mddev;
10406 
10407 	/*
10408 	 * Recording new badblocks for faulty rdev will force unnecessary
10409 	 * super block updating. This is fragile for external management because
10410 	 * userspace daemon may trying to remove this device and deadlock may
10411 	 * occur. This will be probably solved in the mdadm, but it is safer to
10412 	 * avoid it.
10413 	 */
10414 	if (test_bit(Faulty, &rdev->flags))
10415 		return true;
10416 
10417 	if (is_new)
10418 		s += rdev->new_data_offset;
10419 	else
10420 		s += rdev->data_offset;
10421 
10422 	if (!badblocks_set(&rdev->badblocks, s, sectors, 0)) {
10423 		/*
10424 		 * Mark the disk as Faulty when setting badblocks fails,
10425 		 * otherwise, bad sectors may be read.
10426 		 */
10427 		md_error(mddev, rdev);
10428 		return false;
10429 	}
10430 
10431 	/* Make sure they get written out promptly */
10432 	if (test_bit(ExternalBbl, &rdev->flags))
10433 		sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
10434 	sysfs_notify_dirent_safe(rdev->sysfs_state);
10435 	set_mask_bits(&mddev->sb_flags, 0,
10436 		      BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
10437 	md_wakeup_thread(rdev->mddev->thread);
10438 	return true;
10439 }
10440 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
10441 
10442 void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
10443 			  int is_new)
10444 {
10445 	if (is_new)
10446 		s += rdev->new_data_offset;
10447 	else
10448 		s += rdev->data_offset;
10449 
10450 	if (!badblocks_clear(&rdev->badblocks, s, sectors))
10451 		return;
10452 
10453 	if (test_bit(ExternalBbl, &rdev->flags))
10454 		sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
10455 }
10456 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
10457 
10458 static int md_notify_reboot(struct notifier_block *this,
10459 			    unsigned long code, void *x)
10460 {
10461 	struct mddev *mddev;
10462 
10463 	spin_lock(&all_mddevs_lock);
10464 	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
10465 		if (!mddev_get(mddev))
10466 			continue;
10467 		spin_unlock(&all_mddevs_lock);
10468 		if (mddev_trylock(mddev)) {
10469 			if (mddev->pers)
10470 				__md_stop_writes(mddev);
10471 			if (mddev->persistent)
10472 				mddev->safemode = 2;
10473 			mddev_unlock(mddev);
10474 		}
10475 		spin_lock(&all_mddevs_lock);
10476 		mddev_put_locked(mddev);
10477 	}
10478 	spin_unlock(&all_mddevs_lock);
10479 
10480 	return NOTIFY_DONE;
10481 }
10482 
10483 static struct notifier_block md_notifier = {
10484 	.notifier_call	= md_notify_reboot,
10485 	.next		= NULL,
10486 	.priority	= INT_MAX, /* before any real devices */
10487 };
10488 
10489 static void md_geninit(void)
10490 {
10491 	pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
10492 
10493 	proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
10494 }
10495 
10496 static int __init md_init(void)
10497 {
10498 	int ret = md_bitmap_init();
10499 
10500 	if (ret)
10501 		return ret;
10502 
10503 	ret = md_llbitmap_init();
10504 	if (ret)
10505 		goto err_bitmap;
10506 
10507 	ret = -ENOMEM;
10508 	md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
10509 	if (!md_wq)
10510 		goto err_wq;
10511 
10512 	md_misc_wq = alloc_workqueue("md_misc", WQ_PERCPU, 0);
10513 	if (!md_misc_wq)
10514 		goto err_misc_wq;
10515 
10516 	ret = __register_blkdev(MD_MAJOR, "md", md_probe);
10517 	if (ret < 0)
10518 		goto err_md;
10519 
10520 	ret = __register_blkdev(0, "mdp", md_probe);
10521 	if (ret < 0)
10522 		goto err_mdp;
10523 	mdp_major = ret;
10524 
10525 	register_reboot_notifier(&md_notifier);
10526 	raid_table_header = register_sysctl("dev/raid", raid_table);
10527 
10528 	md_geninit();
10529 	return 0;
10530 
10531 err_mdp:
10532 	unregister_blkdev(MD_MAJOR, "md");
10533 err_md:
10534 	destroy_workqueue(md_misc_wq);
10535 err_misc_wq:
10536 	destroy_workqueue(md_wq);
10537 err_wq:
10538 	md_llbitmap_exit();
10539 err_bitmap:
10540 	md_bitmap_exit();
10541 	return ret;
10542 }
10543 
10544 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
10545 {
10546 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
10547 	struct md_rdev *rdev2, *tmp;
10548 	int role, ret;
10549 
10550 	/*
10551 	 * If size is changed in another node then we need to
10552 	 * do resize as well.
10553 	 */
10554 	if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
10555 		ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
10556 		if (ret)
10557 			pr_info("md-cluster: resize failed\n");
10558 		else if (md_bitmap_enabled(mddev, false))
10559 			mddev->bitmap_ops->update_sb(mddev->bitmap);
10560 	}
10561 
10562 	/* Check for change of roles in the active devices */
10563 	rdev_for_each_safe(rdev2, tmp, mddev) {
10564 		if (test_bit(Faulty, &rdev2->flags)) {
10565 			if (test_bit(ClusterRemove, &rdev2->flags))
10566 				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10567 			continue;
10568 		}
10569 
10570 		/* Check if the roles changed */
10571 		role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
10572 
10573 		if (test_bit(Candidate, &rdev2->flags)) {
10574 			if (role == MD_DISK_ROLE_FAULTY) {
10575 				pr_info("md: Removing Candidate device %pg because add failed\n",
10576 					rdev2->bdev);
10577 				md_kick_rdev_from_array(rdev2);
10578 				continue;
10579 			}
10580 			else
10581 				clear_bit(Candidate, &rdev2->flags);
10582 		}
10583 
10584 		if (role != rdev2->raid_disk) {
10585 			/*
10586 			 * got activated except reshape is happening.
10587 			 */
10588 			if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
10589 			    !(le32_to_cpu(sb->feature_map) &
10590 			      MD_FEATURE_RESHAPE_ACTIVE) &&
10591 			    !mddev->cluster_ops->resync_status_get(mddev)) {
10592 				/*
10593 				 * -1 to make raid1_add_disk() set conf->fullsync
10594 				 * to 1. This could avoid skipping sync when the
10595 				 * remote node is down during resyncing.
10596 				 */
10597 				if ((le32_to_cpu(sb->feature_map)
10598 				    & MD_FEATURE_RECOVERY_OFFSET))
10599 					rdev2->saved_raid_disk = -1;
10600 				else
10601 					rdev2->saved_raid_disk = role;
10602 				ret = remove_and_add_spares(mddev, rdev2);
10603 				pr_info("Activated spare: %pg\n",
10604 					rdev2->bdev);
10605 				/* wakeup mddev->thread here, so array could
10606 				 * perform resync with the new activated disk */
10607 				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10608 				md_wakeup_thread(mddev->thread);
10609 			}
10610 			/* device faulty
10611 			 * We just want to do the minimum to mark the disk
10612 			 * as faulty. The recovery is performed by the
10613 			 * one who initiated the error.
10614 			 */
10615 			if (role == MD_DISK_ROLE_FAULTY ||
10616 			    role == MD_DISK_ROLE_JOURNAL) {
10617 				md_error(mddev, rdev2);
10618 				clear_bit(Blocked, &rdev2->flags);
10619 			}
10620 		}
10621 	}
10622 
10623 	if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
10624 		ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
10625 		if (ret)
10626 			pr_warn("md: updating array disks failed. %d\n", ret);
10627 	}
10628 
10629 	/*
10630 	 * Since mddev->delta_disks has already updated in update_raid_disks,
10631 	 * so it is time to check reshape.
10632 	 */
10633 	if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
10634 	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
10635 		/*
10636 		 * reshape is happening in the remote node, we need to
10637 		 * update reshape_position and call start_reshape.
10638 		 */
10639 		mddev->reshape_position = le64_to_cpu(sb->reshape_position);
10640 		if (mddev->pers->update_reshape_pos)
10641 			mddev->pers->update_reshape_pos(mddev);
10642 		if (mddev->pers->start_reshape)
10643 			mddev->pers->start_reshape(mddev);
10644 	} else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
10645 		   mddev->reshape_position != MaxSector &&
10646 		   !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
10647 		/* reshape is just done in another node. */
10648 		mddev->reshape_position = MaxSector;
10649 		if (mddev->pers->update_reshape_pos)
10650 			mddev->pers->update_reshape_pos(mddev);
10651 	}
10652 
10653 	/* Finally set the event to be up to date */
10654 	mddev->events = le64_to_cpu(sb->events);
10655 }
10656 
10657 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
10658 {
10659 	int err;
10660 	struct page *swapout = rdev->sb_page;
10661 	struct mdp_superblock_1 *sb;
10662 
10663 	/* Store the sb page of the rdev in the swapout temporary
10664 	 * variable in case we err in the future
10665 	 */
10666 	rdev->sb_page = NULL;
10667 	err = alloc_disk_sb(rdev);
10668 	if (err == 0) {
10669 		ClearPageUptodate(rdev->sb_page);
10670 		rdev->sb_loaded = 0;
10671 		err = super_types[mddev->major_version].
10672 			load_super(rdev, NULL, mddev->minor_version);
10673 	}
10674 	if (err < 0) {
10675 		pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
10676 				__func__, __LINE__, rdev->desc_nr, err);
10677 		if (rdev->sb_page)
10678 			put_page(rdev->sb_page);
10679 		rdev->sb_page = swapout;
10680 		rdev->sb_loaded = 1;
10681 		return err;
10682 	}
10683 
10684 	sb = page_address(rdev->sb_page);
10685 	/* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
10686 	 * is not set
10687 	 */
10688 
10689 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
10690 		rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
10691 
10692 	/* The other node finished recovery, call spare_active to set
10693 	 * device In_sync and mddev->degraded
10694 	 */
10695 	if (rdev->recovery_offset == MaxSector &&
10696 	    !test_bit(In_sync, &rdev->flags) &&
10697 	    mddev->pers->spare_active(mddev))
10698 		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
10699 
10700 	put_page(swapout);
10701 	return 0;
10702 }
10703 
10704 void md_reload_sb(struct mddev *mddev, int nr)
10705 {
10706 	struct md_rdev *rdev = NULL, *iter;
10707 	int err;
10708 
10709 	/* Find the rdev */
10710 	rdev_for_each_rcu(iter, mddev) {
10711 		if (iter->desc_nr == nr) {
10712 			rdev = iter;
10713 			break;
10714 		}
10715 	}
10716 
10717 	if (!rdev) {
10718 		pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
10719 		return;
10720 	}
10721 
10722 	err = read_rdev(mddev, rdev);
10723 	if (err < 0)
10724 		return;
10725 
10726 	check_sb_changes(mddev, rdev);
10727 
10728 	/* Read all rdev's to update recovery_offset */
10729 	rdev_for_each_rcu(rdev, mddev) {
10730 		if (!test_bit(Faulty, &rdev->flags))
10731 			read_rdev(mddev, rdev);
10732 	}
10733 }
10734 EXPORT_SYMBOL(md_reload_sb);
10735 
10736 #ifndef MODULE
10737 
10738 /*
10739  * Searches all registered partitions for autorun RAID arrays
10740  * at boot time.
10741  */
10742 
10743 static DEFINE_MUTEX(detected_devices_mutex);
10744 static LIST_HEAD(all_detected_devices);
10745 struct detected_devices_node {
10746 	struct list_head list;
10747 	dev_t dev;
10748 };
10749 
10750 void md_autodetect_dev(dev_t dev)
10751 {
10752 	struct detected_devices_node *node_detected_dev;
10753 
10754 	node_detected_dev = kzalloc_obj(*node_detected_dev);
10755 	if (node_detected_dev) {
10756 		node_detected_dev->dev = dev;
10757 		mutex_lock(&detected_devices_mutex);
10758 		list_add_tail(&node_detected_dev->list, &all_detected_devices);
10759 		mutex_unlock(&detected_devices_mutex);
10760 	}
10761 }
10762 
10763 void md_autostart_arrays(int part)
10764 {
10765 	struct md_rdev *rdev;
10766 	struct detected_devices_node *node_detected_dev;
10767 	dev_t dev;
10768 	int i_scanned, i_passed;
10769 
10770 	i_scanned = 0;
10771 	i_passed = 0;
10772 
10773 	pr_info("md: Autodetecting RAID arrays.\n");
10774 
10775 	mutex_lock(&detected_devices_mutex);
10776 	while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
10777 		i_scanned++;
10778 		node_detected_dev = list_entry(all_detected_devices.next,
10779 					struct detected_devices_node, list);
10780 		list_del(&node_detected_dev->list);
10781 		dev = node_detected_dev->dev;
10782 		kfree(node_detected_dev);
10783 		mutex_unlock(&detected_devices_mutex);
10784 		rdev = md_import_device(dev,0, 90);
10785 		mutex_lock(&detected_devices_mutex);
10786 		if (IS_ERR(rdev))
10787 			continue;
10788 
10789 		if (test_bit(Faulty, &rdev->flags))
10790 			continue;
10791 
10792 		set_bit(AutoDetected, &rdev->flags);
10793 		list_add(&rdev->same_set, &pending_raid_disks);
10794 		i_passed++;
10795 	}
10796 	mutex_unlock(&detected_devices_mutex);
10797 
10798 	pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
10799 
10800 	autorun_devices(part);
10801 }
10802 
10803 #endif /* !MODULE */
10804 
10805 static __exit void md_exit(void)
10806 {
10807 	struct mddev *mddev;
10808 	int delay = 1;
10809 
10810 	unregister_blkdev(MD_MAJOR,"md");
10811 	unregister_blkdev(mdp_major, "mdp");
10812 	unregister_reboot_notifier(&md_notifier);
10813 	unregister_sysctl_table(raid_table_header);
10814 
10815 	/* We cannot unload the modules while some process is
10816 	 * waiting for us in select() or poll() - wake them up
10817 	 */
10818 	md_unloading = 1;
10819 	while (waitqueue_active(&md_event_waiters)) {
10820 		/* not safe to leave yet */
10821 		wake_up(&md_event_waiters);
10822 		msleep(delay);
10823 		delay += delay;
10824 	}
10825 	remove_proc_entry("mdstat", NULL);
10826 
10827 	spin_lock(&all_mddevs_lock);
10828 	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
10829 		if (!mddev_get(mddev))
10830 			continue;
10831 		spin_unlock(&all_mddevs_lock);
10832 		export_array(mddev);
10833 		mddev->ctime = 0;
10834 		mddev->hold_active = 0;
10835 		/*
10836 		 * As the mddev is now fully clear, mddev_put will schedule
10837 		 * the mddev for destruction by a workqueue, and the
10838 		 * destroy_workqueue() below will wait for that to complete.
10839 		 */
10840 		spin_lock(&all_mddevs_lock);
10841 		mddev_put_locked(mddev);
10842 	}
10843 	spin_unlock(&all_mddevs_lock);
10844 
10845 	destroy_workqueue(md_misc_wq);
10846 	destroy_workqueue(md_wq);
10847 	md_bitmap_exit();
10848 }
10849 
10850 subsys_initcall(md_init);
10851 module_exit(md_exit)
10852 
10853 static int get_ro(char *buffer, const struct kernel_param *kp)
10854 {
10855 	return sprintf(buffer, "%d\n", start_readonly);
10856 }
10857 static int set_ro(const char *val, const struct kernel_param *kp)
10858 {
10859 	return kstrtouint(val, 10, (unsigned int *)&start_readonly);
10860 }
10861 
10862 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
10863 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
10864 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
10865 module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
10866 module_param(legacy_async_del_gendisk, bool, 0600);
10867 module_param(check_new_feature, bool, 0600);
10868 
10869 MODULE_LICENSE("GPL");
10870 MODULE_DESCRIPTION("MD RAID framework");
10871 MODULE_ALIAS("md");
10872 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
10873