xref: /linux/drivers/md/md-bitmap.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
4  *
5  * bitmap_create  - sets up the bitmap structure
6  * bitmap_destroy - destroys the bitmap structure
7  *
8  * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9  * - added disk storage for bitmap
10  * - changes to allow various bitmap chunk sizes
11  */
12 
13 /*
14  * Still to do:
15  *
16  * flush after percent set rather than just time based. (maybe both).
17  */
18 
19 #include <linux/blkdev.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/timer.h>
25 #include <linux/sched.h>
26 #include <linux/list.h>
27 #include <linux/file.h>
28 #include <linux/mount.h>
29 #include <linux/buffer_head.h>
30 #include <linux/seq_file.h>
31 #include <trace/events/block.h>
32 #include "md.h"
33 #include "md-bitmap.h"
34 
35 static inline char *bmname(struct bitmap *bitmap)
36 {
37 	return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
38 }
39 
40 /*
41  * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
42  *
43  * 1) check to see if this page is allocated, if it's not then try to alloc
44  * 2) if the alloc fails, set the page's hijacked flag so we'll use the
45  *    page pointer directly as a counter
46  *
47  * if we find our page, we increment the page's refcount so that it stays
48  * allocated while we're using it
49  */
50 static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
51 			       unsigned long page, int create, int no_hijack)
52 __releases(bitmap->lock)
53 __acquires(bitmap->lock)
54 {
55 	unsigned char *mappage;
56 
57 	WARN_ON_ONCE(page >= bitmap->pages);
58 	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
59 		return 0;
60 
61 	if (bitmap->bp[page].map) /* page is already allocated, just return */
62 		return 0;
63 
64 	if (!create)
65 		return -ENOENT;
66 
67 	/* this page has not been allocated yet */
68 
69 	spin_unlock_irq(&bitmap->lock);
70 	/* It is possible that this is being called inside a
71 	 * prepare_to_wait/finish_wait loop from raid5c:make_request().
72 	 * In general it is not permitted to sleep in that context as it
73 	 * can cause the loop to spin freely.
74 	 * That doesn't apply here as we can only reach this point
75 	 * once with any loop.
76 	 * When this function completes, either bp[page].map or
77 	 * bp[page].hijacked.  In either case, this function will
78 	 * abort before getting to this point again.  So there is
79 	 * no risk of a free-spin, and so it is safe to assert
80 	 * that sleeping here is allowed.
81 	 */
82 	sched_annotate_sleep();
83 	mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
84 	spin_lock_irq(&bitmap->lock);
85 
86 	if (mappage == NULL) {
87 		pr_debug("md/bitmap: map page allocation failed, hijacking\n");
88 		/* We don't support hijack for cluster raid */
89 		if (no_hijack)
90 			return -ENOMEM;
91 		/* failed - set the hijacked flag so that we can use the
92 		 * pointer as a counter */
93 		if (!bitmap->bp[page].map)
94 			bitmap->bp[page].hijacked = 1;
95 	} else if (bitmap->bp[page].map ||
96 		   bitmap->bp[page].hijacked) {
97 		/* somebody beat us to getting the page */
98 		kfree(mappage);
99 	} else {
100 
101 		/* no page was in place and we have one, so install it */
102 
103 		bitmap->bp[page].map = mappage;
104 		bitmap->missing_pages--;
105 	}
106 	return 0;
107 }
108 
109 /* if page is completely empty, put it back on the free list, or dealloc it */
110 /* if page was hijacked, unmark the flag so it might get alloced next time */
111 /* Note: lock should be held when calling this */
112 static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
113 {
114 	char *ptr;
115 
116 	if (bitmap->bp[page].count) /* page is still busy */
117 		return;
118 
119 	/* page is no longer in use, it can be released */
120 
121 	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
122 		bitmap->bp[page].hijacked = 0;
123 		bitmap->bp[page].map = NULL;
124 	} else {
125 		/* normal case, free the page */
126 		ptr = bitmap->bp[page].map;
127 		bitmap->bp[page].map = NULL;
128 		bitmap->missing_pages++;
129 		kfree(ptr);
130 	}
131 }
132 
133 /*
134  * bitmap file handling - read and write the bitmap file and its superblock
135  */
136 
137 /*
138  * basic page I/O operations
139  */
140 
141 /* IO operations when bitmap is stored near all superblocks */
142 
143 /* choose a good rdev and read the page from there */
144 static int read_sb_page(struct mddev *mddev, loff_t offset,
145 		struct page *page, unsigned long index, int size)
146 {
147 
148 	sector_t sector = mddev->bitmap_info.offset + offset +
149 		index * (PAGE_SIZE / SECTOR_SIZE);
150 	struct md_rdev *rdev;
151 
152 	rdev_for_each(rdev, mddev) {
153 		u32 iosize = roundup(size, bdev_logical_block_size(rdev->bdev));
154 
155 		if (!test_bit(In_sync, &rdev->flags) ||
156 		    test_bit(Faulty, &rdev->flags) ||
157 		    test_bit(Bitmap_sync, &rdev->flags))
158 			continue;
159 
160 		if (sync_page_io(rdev, sector, iosize, page, REQ_OP_READ, true))
161 			return 0;
162 	}
163 	return -EIO;
164 }
165 
166 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
167 {
168 	/* Iterate the disks of an mddev, using rcu to protect access to the
169 	 * linked list, and raising the refcount of devices we return to ensure
170 	 * they don't disappear while in use.
171 	 * As devices are only added or removed when raid_disk is < 0 and
172 	 * nr_pending is 0 and In_sync is clear, the entries we return will
173 	 * still be in the same position on the list when we re-enter
174 	 * list_for_each_entry_continue_rcu.
175 	 *
176 	 * Note that if entered with 'rdev == NULL' to start at the
177 	 * beginning, we temporarily assign 'rdev' to an address which
178 	 * isn't really an rdev, but which can be used by
179 	 * list_for_each_entry_continue_rcu() to find the first entry.
180 	 */
181 	rcu_read_lock();
182 	if (rdev == NULL)
183 		/* start at the beginning */
184 		rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
185 	else {
186 		/* release the previous rdev and start from there. */
187 		rdev_dec_pending(rdev, mddev);
188 	}
189 	list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
190 		if (rdev->raid_disk >= 0 &&
191 		    !test_bit(Faulty, &rdev->flags)) {
192 			/* this is a usable devices */
193 			atomic_inc(&rdev->nr_pending);
194 			rcu_read_unlock();
195 			return rdev;
196 		}
197 	}
198 	rcu_read_unlock();
199 	return NULL;
200 }
201 
202 static unsigned int optimal_io_size(struct block_device *bdev,
203 				    unsigned int last_page_size,
204 				    unsigned int io_size)
205 {
206 	if (bdev_io_opt(bdev) > bdev_logical_block_size(bdev))
207 		return roundup(last_page_size, bdev_io_opt(bdev));
208 	return io_size;
209 }
210 
211 static unsigned int bitmap_io_size(unsigned int io_size, unsigned int opt_size,
212 				   loff_t start, loff_t boundary)
213 {
214 	if (io_size != opt_size &&
215 	    start + opt_size / SECTOR_SIZE <= boundary)
216 		return opt_size;
217 	if (start + io_size / SECTOR_SIZE <= boundary)
218 		return io_size;
219 
220 	/* Overflows boundary */
221 	return 0;
222 }
223 
224 static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
225 			   unsigned long pg_index, struct page *page)
226 {
227 	struct block_device *bdev;
228 	struct mddev *mddev = bitmap->mddev;
229 	struct bitmap_storage *store = &bitmap->storage;
230 	loff_t sboff, offset = mddev->bitmap_info.offset;
231 	sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
232 	unsigned int size = PAGE_SIZE;
233 	unsigned int opt_size = PAGE_SIZE;
234 	sector_t doff;
235 
236 	bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
237 	if (pg_index == store->file_pages - 1) {
238 		unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
239 
240 		if (last_page_size == 0)
241 			last_page_size = PAGE_SIZE;
242 		size = roundup(last_page_size, bdev_logical_block_size(bdev));
243 		opt_size = optimal_io_size(bdev, last_page_size, size);
244 	}
245 
246 	sboff = rdev->sb_start + offset;
247 	doff = rdev->data_offset;
248 
249 	/* Just make sure we aren't corrupting data or metadata */
250 	if (mddev->external) {
251 		/* Bitmap could be anywhere. */
252 		if (sboff + ps > doff &&
253 		    sboff < (doff + mddev->dev_sectors + PAGE_SIZE / SECTOR_SIZE))
254 			return -EINVAL;
255 	} else if (offset < 0) {
256 		/* DATA  BITMAP METADATA  */
257 		size = bitmap_io_size(size, opt_size, offset + ps, 0);
258 		if (size == 0)
259 			/* bitmap runs in to metadata */
260 			return -EINVAL;
261 
262 		if (doff + mddev->dev_sectors > sboff)
263 			/* data runs in to bitmap */
264 			return -EINVAL;
265 	} else if (rdev->sb_start < rdev->data_offset) {
266 		/* METADATA BITMAP DATA */
267 		size = bitmap_io_size(size, opt_size, sboff + ps, doff);
268 		if (size == 0)
269 			/* bitmap runs in to data */
270 			return -EINVAL;
271 	} else {
272 		/* DATA METADATA BITMAP - no problems */
273 	}
274 
275 	md_super_write(mddev, rdev, sboff + ps, (int) size, page);
276 	return 0;
277 }
278 
279 static void write_sb_page(struct bitmap *bitmap, unsigned long pg_index,
280 			  struct page *page, bool wait)
281 {
282 	struct mddev *mddev = bitmap->mddev;
283 
284 	do {
285 		struct md_rdev *rdev = NULL;
286 
287 		while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
288 			if (__write_sb_page(rdev, bitmap, pg_index, page) < 0) {
289 				set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
290 				return;
291 			}
292 		}
293 	} while (wait && md_super_wait(mddev) < 0);
294 }
295 
296 static void md_bitmap_file_kick(struct bitmap *bitmap);
297 
298 #ifdef CONFIG_MD_BITMAP_FILE
299 static void write_file_page(struct bitmap *bitmap, struct page *page, int wait)
300 {
301 	struct buffer_head *bh = page_buffers(page);
302 
303 	while (bh && bh->b_blocknr) {
304 		atomic_inc(&bitmap->pending_writes);
305 		set_buffer_locked(bh);
306 		set_buffer_mapped(bh);
307 		submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
308 		bh = bh->b_this_page;
309 	}
310 
311 	if (wait)
312 		wait_event(bitmap->write_wait,
313 			   atomic_read(&bitmap->pending_writes) == 0);
314 }
315 
316 static void end_bitmap_write(struct buffer_head *bh, int uptodate)
317 {
318 	struct bitmap *bitmap = bh->b_private;
319 
320 	if (!uptodate)
321 		set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
322 	if (atomic_dec_and_test(&bitmap->pending_writes))
323 		wake_up(&bitmap->write_wait);
324 }
325 
326 static void free_buffers(struct page *page)
327 {
328 	struct buffer_head *bh;
329 
330 	if (!PagePrivate(page))
331 		return;
332 
333 	bh = page_buffers(page);
334 	while (bh) {
335 		struct buffer_head *next = bh->b_this_page;
336 		free_buffer_head(bh);
337 		bh = next;
338 	}
339 	detach_page_private(page);
340 	put_page(page);
341 }
342 
343 /* read a page from a file.
344  * We both read the page, and attach buffers to the page to record the
345  * address of each block (using bmap).  These addresses will be used
346  * to write the block later, completely bypassing the filesystem.
347  * This usage is similar to how swap files are handled, and allows us
348  * to write to a file with no concerns of memory allocation failing.
349  */
350 static int read_file_page(struct file *file, unsigned long index,
351 		struct bitmap *bitmap, unsigned long count, struct page *page)
352 {
353 	int ret = 0;
354 	struct inode *inode = file_inode(file);
355 	struct buffer_head *bh;
356 	sector_t block, blk_cur;
357 	unsigned long blocksize = i_blocksize(inode);
358 
359 	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
360 		 (unsigned long long)index << PAGE_SHIFT);
361 
362 	bh = alloc_page_buffers(page, blocksize, false);
363 	if (!bh) {
364 		ret = -ENOMEM;
365 		goto out;
366 	}
367 	attach_page_private(page, bh);
368 	blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
369 	while (bh) {
370 		block = blk_cur;
371 
372 		if (count == 0)
373 			bh->b_blocknr = 0;
374 		else {
375 			ret = bmap(inode, &block);
376 			if (ret || !block) {
377 				ret = -EINVAL;
378 				bh->b_blocknr = 0;
379 				goto out;
380 			}
381 
382 			bh->b_blocknr = block;
383 			bh->b_bdev = inode->i_sb->s_bdev;
384 			if (count < blocksize)
385 				count = 0;
386 			else
387 				count -= blocksize;
388 
389 			bh->b_end_io = end_bitmap_write;
390 			bh->b_private = bitmap;
391 			atomic_inc(&bitmap->pending_writes);
392 			set_buffer_locked(bh);
393 			set_buffer_mapped(bh);
394 			submit_bh(REQ_OP_READ, bh);
395 		}
396 		blk_cur++;
397 		bh = bh->b_this_page;
398 	}
399 
400 	wait_event(bitmap->write_wait,
401 		   atomic_read(&bitmap->pending_writes)==0);
402 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
403 		ret = -EIO;
404 out:
405 	if (ret)
406 		pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
407 		       (int)PAGE_SIZE,
408 		       (unsigned long long)index << PAGE_SHIFT,
409 		       ret);
410 	return ret;
411 }
412 #else /* CONFIG_MD_BITMAP_FILE */
413 static void write_file_page(struct bitmap *bitmap, struct page *page, int wait)
414 {
415 }
416 static int read_file_page(struct file *file, unsigned long index,
417 		struct bitmap *bitmap, unsigned long count, struct page *page)
418 {
419 	return -EIO;
420 }
421 static void free_buffers(struct page *page)
422 {
423 	put_page(page);
424 }
425 #endif /* CONFIG_MD_BITMAP_FILE */
426 
427 /*
428  * bitmap file superblock operations
429  */
430 
431 /*
432  * write out a page to a file
433  */
434 static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index,
435 			       bool wait)
436 {
437 	struct bitmap_storage *store = &bitmap->storage;
438 	struct page *page = store->filemap[pg_index];
439 
440 	if (mddev_is_clustered(bitmap->mddev)) {
441 		pg_index += bitmap->cluster_slot *
442 			DIV_ROUND_UP(store->bytes, PAGE_SIZE);
443 	}
444 
445 	if (store->file)
446 		write_file_page(bitmap, page, wait);
447 	else
448 		write_sb_page(bitmap, pg_index, page, wait);
449 }
450 
451 /*
452  * md_bitmap_wait_writes() should be called before writing any bitmap
453  * blocks, to ensure previous writes, particularly from
454  * md_bitmap_daemon_work(), have completed.
455  */
456 static void md_bitmap_wait_writes(struct bitmap *bitmap)
457 {
458 	if (bitmap->storage.file)
459 		wait_event(bitmap->write_wait,
460 			   atomic_read(&bitmap->pending_writes)==0);
461 	else
462 		/* Note that we ignore the return value.  The writes
463 		 * might have failed, but that would just mean that
464 		 * some bits which should be cleared haven't been,
465 		 * which is safe.  The relevant bitmap blocks will
466 		 * probably get written again, but there is no great
467 		 * loss if they aren't.
468 		 */
469 		md_super_wait(bitmap->mddev);
470 }
471 
472 
473 /* update the event counter and sync the superblock to disk */
474 void md_bitmap_update_sb(struct bitmap *bitmap)
475 {
476 	bitmap_super_t *sb;
477 
478 	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
479 		return;
480 	if (bitmap->mddev->bitmap_info.external)
481 		return;
482 	if (!bitmap->storage.sb_page) /* no superblock */
483 		return;
484 	sb = kmap_atomic(bitmap->storage.sb_page);
485 	sb->events = cpu_to_le64(bitmap->mddev->events);
486 	if (bitmap->mddev->events < bitmap->events_cleared)
487 		/* rocking back to read-only */
488 		bitmap->events_cleared = bitmap->mddev->events;
489 	sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
490 	/*
491 	 * clear BITMAP_WRITE_ERROR bit to protect against the case that
492 	 * a bitmap write error occurred but the later writes succeeded.
493 	 */
494 	sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
495 	/* Just in case these have been changed via sysfs: */
496 	sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
497 	sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
498 	/* This might have been changed by a reshape */
499 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
500 	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
501 	sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
502 	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
503 					   bitmap_info.space);
504 	kunmap_atomic(sb);
505 
506 	if (bitmap->storage.file)
507 		write_file_page(bitmap, bitmap->storage.sb_page, 1);
508 	else
509 		write_sb_page(bitmap, bitmap->storage.sb_index,
510 			      bitmap->storage.sb_page, 1);
511 }
512 EXPORT_SYMBOL(md_bitmap_update_sb);
513 
514 /* print out the bitmap file superblock */
515 void md_bitmap_print_sb(struct bitmap *bitmap)
516 {
517 	bitmap_super_t *sb;
518 
519 	if (!bitmap || !bitmap->storage.sb_page)
520 		return;
521 	sb = kmap_atomic(bitmap->storage.sb_page);
522 	pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
523 	pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
524 	pr_debug("       version: %u\n", le32_to_cpu(sb->version));
525 	pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
526 		 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
527 		 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
528 		 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
529 		 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
530 	pr_debug("        events: %llu\n",
531 		 (unsigned long long) le64_to_cpu(sb->events));
532 	pr_debug("events cleared: %llu\n",
533 		 (unsigned long long) le64_to_cpu(sb->events_cleared));
534 	pr_debug("         state: %08x\n", le32_to_cpu(sb->state));
535 	pr_debug("     chunksize: %u B\n", le32_to_cpu(sb->chunksize));
536 	pr_debug("  daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
537 	pr_debug("     sync size: %llu KB\n",
538 		 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
539 	pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
540 	kunmap_atomic(sb);
541 }
542 
543 /*
544  * bitmap_new_disk_sb
545  * @bitmap
546  *
547  * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
548  * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
549  * This function verifies 'bitmap_info' and populates the on-disk bitmap
550  * structure, which is to be written to disk.
551  *
552  * Returns: 0 on success, -Exxx on error
553  */
554 static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
555 {
556 	bitmap_super_t *sb;
557 	unsigned long chunksize, daemon_sleep, write_behind;
558 
559 	bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
560 	if (bitmap->storage.sb_page == NULL)
561 		return -ENOMEM;
562 	bitmap->storage.sb_index = 0;
563 
564 	sb = kmap_atomic(bitmap->storage.sb_page);
565 
566 	sb->magic = cpu_to_le32(BITMAP_MAGIC);
567 	sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
568 
569 	chunksize = bitmap->mddev->bitmap_info.chunksize;
570 	BUG_ON(!chunksize);
571 	if (!is_power_of_2(chunksize)) {
572 		kunmap_atomic(sb);
573 		pr_warn("bitmap chunksize not a power of 2\n");
574 		return -EINVAL;
575 	}
576 	sb->chunksize = cpu_to_le32(chunksize);
577 
578 	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
579 	if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
580 		pr_debug("Choosing daemon_sleep default (5 sec)\n");
581 		daemon_sleep = 5 * HZ;
582 	}
583 	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
584 	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
585 
586 	/*
587 	 * FIXME: write_behind for RAID1.  If not specified, what
588 	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
589 	 */
590 	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
591 	if (write_behind > COUNTER_MAX)
592 		write_behind = COUNTER_MAX / 2;
593 	sb->write_behind = cpu_to_le32(write_behind);
594 	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
595 
596 	/* keep the array size field of the bitmap superblock up to date */
597 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
598 
599 	memcpy(sb->uuid, bitmap->mddev->uuid, 16);
600 
601 	set_bit(BITMAP_STALE, &bitmap->flags);
602 	sb->state = cpu_to_le32(bitmap->flags);
603 	bitmap->events_cleared = bitmap->mddev->events;
604 	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
605 	bitmap->mddev->bitmap_info.nodes = 0;
606 
607 	kunmap_atomic(sb);
608 
609 	return 0;
610 }
611 
612 /* read the superblock from the bitmap file and initialize some bitmap fields */
613 static int md_bitmap_read_sb(struct bitmap *bitmap)
614 {
615 	char *reason = NULL;
616 	bitmap_super_t *sb;
617 	unsigned long chunksize, daemon_sleep, write_behind;
618 	unsigned long long events;
619 	int nodes = 0;
620 	unsigned long sectors_reserved = 0;
621 	int err = -EINVAL;
622 	struct page *sb_page;
623 	loff_t offset = 0;
624 
625 	if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
626 		chunksize = 128 * 1024 * 1024;
627 		daemon_sleep = 5 * HZ;
628 		write_behind = 0;
629 		set_bit(BITMAP_STALE, &bitmap->flags);
630 		err = 0;
631 		goto out_no_sb;
632 	}
633 	/* page 0 is the superblock, read it... */
634 	sb_page = alloc_page(GFP_KERNEL);
635 	if (!sb_page)
636 		return -ENOMEM;
637 	bitmap->storage.sb_page = sb_page;
638 
639 re_read:
640 	/* If cluster_slot is set, the cluster is setup */
641 	if (bitmap->cluster_slot >= 0) {
642 		sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
643 
644 		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks,
645 			   (bitmap->mddev->bitmap_info.chunksize >> 9));
646 		/* bits to bytes */
647 		bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
648 		/* to 4k blocks */
649 		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
650 		offset = bitmap->cluster_slot * (bm_blocks << 3);
651 		pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
652 			bitmap->cluster_slot, offset);
653 	}
654 
655 	if (bitmap->storage.file) {
656 		loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
657 		int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
658 
659 		err = read_file_page(bitmap->storage.file, 0,
660 				bitmap, bytes, sb_page);
661 	} else {
662 		err = read_sb_page(bitmap->mddev, offset, sb_page, 0,
663 				   sizeof(bitmap_super_t));
664 	}
665 	if (err)
666 		return err;
667 
668 	err = -EINVAL;
669 	sb = kmap_atomic(sb_page);
670 
671 	chunksize = le32_to_cpu(sb->chunksize);
672 	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
673 	write_behind = le32_to_cpu(sb->write_behind);
674 	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
675 
676 	/* verify that the bitmap-specific fields are valid */
677 	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
678 		reason = "bad magic";
679 	else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
680 		 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
681 		reason = "unrecognized superblock version";
682 	else if (chunksize < 512)
683 		reason = "bitmap chunksize too small";
684 	else if (!is_power_of_2(chunksize))
685 		reason = "bitmap chunksize not a power of 2";
686 	else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
687 		reason = "daemon sleep period out of range";
688 	else if (write_behind > COUNTER_MAX)
689 		reason = "write-behind limit out of range (0 - 16383)";
690 	if (reason) {
691 		pr_warn("%s: invalid bitmap file superblock: %s\n",
692 			bmname(bitmap), reason);
693 		goto out;
694 	}
695 
696 	/*
697 	 * Setup nodes/clustername only if bitmap version is
698 	 * cluster-compatible
699 	 */
700 	if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
701 		nodes = le32_to_cpu(sb->nodes);
702 		strscpy(bitmap->mddev->bitmap_info.cluster_name,
703 				sb->cluster_name, 64);
704 	}
705 
706 	/* keep the array size field of the bitmap superblock up to date */
707 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
708 
709 	if (bitmap->mddev->persistent) {
710 		/*
711 		 * We have a persistent array superblock, so compare the
712 		 * bitmap's UUID and event counter to the mddev's
713 		 */
714 		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
715 			pr_warn("%s: bitmap superblock UUID mismatch\n",
716 				bmname(bitmap));
717 			goto out;
718 		}
719 		events = le64_to_cpu(sb->events);
720 		if (!nodes && (events < bitmap->mddev->events)) {
721 			pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
722 				bmname(bitmap), events,
723 				(unsigned long long) bitmap->mddev->events);
724 			set_bit(BITMAP_STALE, &bitmap->flags);
725 		}
726 	}
727 
728 	/* assign fields using values from superblock */
729 	bitmap->flags |= le32_to_cpu(sb->state);
730 	if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
731 		set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
732 	bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
733 	err = 0;
734 
735 out:
736 	kunmap_atomic(sb);
737 	if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
738 		/* Assigning chunksize is required for "re_read" */
739 		bitmap->mddev->bitmap_info.chunksize = chunksize;
740 		err = md_setup_cluster(bitmap->mddev, nodes);
741 		if (err) {
742 			pr_warn("%s: Could not setup cluster service (%d)\n",
743 				bmname(bitmap), err);
744 			goto out_no_sb;
745 		}
746 		bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
747 		goto re_read;
748 	}
749 
750 out_no_sb:
751 	if (err == 0) {
752 		if (test_bit(BITMAP_STALE, &bitmap->flags))
753 			bitmap->events_cleared = bitmap->mddev->events;
754 		bitmap->mddev->bitmap_info.chunksize = chunksize;
755 		bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
756 		bitmap->mddev->bitmap_info.max_write_behind = write_behind;
757 		bitmap->mddev->bitmap_info.nodes = nodes;
758 		if (bitmap->mddev->bitmap_info.space == 0 ||
759 			bitmap->mddev->bitmap_info.space > sectors_reserved)
760 			bitmap->mddev->bitmap_info.space = sectors_reserved;
761 	} else {
762 		md_bitmap_print_sb(bitmap);
763 		if (bitmap->cluster_slot < 0)
764 			md_cluster_stop(bitmap->mddev);
765 	}
766 	return err;
767 }
768 
769 /*
770  * general bitmap file operations
771  */
772 
773 /*
774  * on-disk bitmap:
775  *
776  * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
777  * file a page at a time. There's a superblock at the start of the file.
778  */
779 /* calculate the index of the page that contains this bit */
780 static inline unsigned long file_page_index(struct bitmap_storage *store,
781 					    unsigned long chunk)
782 {
783 	if (store->sb_page)
784 		chunk += sizeof(bitmap_super_t) << 3;
785 	return chunk >> PAGE_BIT_SHIFT;
786 }
787 
788 /* calculate the (bit) offset of this bit within a page */
789 static inline unsigned long file_page_offset(struct bitmap_storage *store,
790 					     unsigned long chunk)
791 {
792 	if (store->sb_page)
793 		chunk += sizeof(bitmap_super_t) << 3;
794 	return chunk & (PAGE_BITS - 1);
795 }
796 
797 /*
798  * return a pointer to the page in the filemap that contains the given bit
799  *
800  */
801 static inline struct page *filemap_get_page(struct bitmap_storage *store,
802 					    unsigned long chunk)
803 {
804 	if (file_page_index(store, chunk) >= store->file_pages)
805 		return NULL;
806 	return store->filemap[file_page_index(store, chunk)];
807 }
808 
809 static int md_bitmap_storage_alloc(struct bitmap_storage *store,
810 				   unsigned long chunks, int with_super,
811 				   int slot_number)
812 {
813 	int pnum, offset = 0;
814 	unsigned long num_pages;
815 	unsigned long bytes;
816 
817 	bytes = DIV_ROUND_UP(chunks, 8);
818 	if (with_super)
819 		bytes += sizeof(bitmap_super_t);
820 
821 	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
822 	offset = slot_number * num_pages;
823 
824 	store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
825 				       GFP_KERNEL);
826 	if (!store->filemap)
827 		return -ENOMEM;
828 
829 	if (with_super && !store->sb_page) {
830 		store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
831 		if (store->sb_page == NULL)
832 			return -ENOMEM;
833 	}
834 
835 	pnum = 0;
836 	if (store->sb_page) {
837 		store->filemap[0] = store->sb_page;
838 		pnum = 1;
839 		store->sb_index = offset;
840 	}
841 
842 	for ( ; pnum < num_pages; pnum++) {
843 		store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
844 		if (!store->filemap[pnum]) {
845 			store->file_pages = pnum;
846 			return -ENOMEM;
847 		}
848 	}
849 	store->file_pages = pnum;
850 
851 	/* We need 4 bits per page, rounded up to a multiple
852 	 * of sizeof(unsigned long) */
853 	store->filemap_attr = kzalloc(
854 		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
855 		GFP_KERNEL);
856 	if (!store->filemap_attr)
857 		return -ENOMEM;
858 
859 	store->bytes = bytes;
860 
861 	return 0;
862 }
863 
864 static void md_bitmap_file_unmap(struct bitmap_storage *store)
865 {
866 	struct file *file = store->file;
867 	struct page *sb_page = store->sb_page;
868 	struct page **map = store->filemap;
869 	int pages = store->file_pages;
870 
871 	while (pages--)
872 		if (map[pages] != sb_page) /* 0 is sb_page, release it below */
873 			free_buffers(map[pages]);
874 	kfree(map);
875 	kfree(store->filemap_attr);
876 
877 	if (sb_page)
878 		free_buffers(sb_page);
879 
880 	if (file) {
881 		struct inode *inode = file_inode(file);
882 		invalidate_mapping_pages(inode->i_mapping, 0, -1);
883 		fput(file);
884 	}
885 }
886 
887 /*
888  * bitmap_file_kick - if an error occurs while manipulating the bitmap file
889  * then it is no longer reliable, so we stop using it and we mark the file
890  * as failed in the superblock
891  */
892 static void md_bitmap_file_kick(struct bitmap *bitmap)
893 {
894 	if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
895 		md_bitmap_update_sb(bitmap);
896 
897 		if (bitmap->storage.file) {
898 			pr_warn("%s: kicking failed bitmap file %pD4 from array!\n",
899 				bmname(bitmap), bitmap->storage.file);
900 
901 		} else
902 			pr_warn("%s: disabling internal bitmap due to errors\n",
903 				bmname(bitmap));
904 	}
905 }
906 
907 enum bitmap_page_attr {
908 	BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
909 	BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
910 				    * i.e. counter is 1 or 2. */
911 	BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
912 };
913 
914 static inline void set_page_attr(struct bitmap *bitmap, int pnum,
915 				 enum bitmap_page_attr attr)
916 {
917 	set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
918 }
919 
920 static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
921 				   enum bitmap_page_attr attr)
922 {
923 	clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
924 }
925 
926 static inline int test_page_attr(struct bitmap *bitmap, int pnum,
927 				 enum bitmap_page_attr attr)
928 {
929 	return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
930 }
931 
932 static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
933 					   enum bitmap_page_attr attr)
934 {
935 	return test_and_clear_bit((pnum<<2) + attr,
936 				  bitmap->storage.filemap_attr);
937 }
938 /*
939  * bitmap_file_set_bit -- called before performing a write to the md device
940  * to set (and eventually sync) a particular bit in the bitmap file
941  *
942  * we set the bit immediately, then we record the page number so that
943  * when an unplug occurs, we can flush the dirty pages out to disk
944  */
945 static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
946 {
947 	unsigned long bit;
948 	struct page *page;
949 	void *kaddr;
950 	unsigned long chunk = block >> bitmap->counts.chunkshift;
951 	struct bitmap_storage *store = &bitmap->storage;
952 	unsigned long index = file_page_index(store, chunk);
953 	unsigned long node_offset = 0;
954 
955 	if (mddev_is_clustered(bitmap->mddev))
956 		node_offset = bitmap->cluster_slot * store->file_pages;
957 
958 	page = filemap_get_page(&bitmap->storage, chunk);
959 	if (!page)
960 		return;
961 	bit = file_page_offset(&bitmap->storage, chunk);
962 
963 	/* set the bit */
964 	kaddr = kmap_atomic(page);
965 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
966 		set_bit(bit, kaddr);
967 	else
968 		set_bit_le(bit, kaddr);
969 	kunmap_atomic(kaddr);
970 	pr_debug("set file bit %lu page %lu\n", bit, index);
971 	/* record page number so it gets flushed to disk when unplug occurs */
972 	set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_DIRTY);
973 }
974 
975 static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
976 {
977 	unsigned long bit;
978 	struct page *page;
979 	void *paddr;
980 	unsigned long chunk = block >> bitmap->counts.chunkshift;
981 	struct bitmap_storage *store = &bitmap->storage;
982 	unsigned long index = file_page_index(store, chunk);
983 	unsigned long node_offset = 0;
984 
985 	if (mddev_is_clustered(bitmap->mddev))
986 		node_offset = bitmap->cluster_slot * store->file_pages;
987 
988 	page = filemap_get_page(&bitmap->storage, chunk);
989 	if (!page)
990 		return;
991 	bit = file_page_offset(&bitmap->storage, chunk);
992 	paddr = kmap_atomic(page);
993 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
994 		clear_bit(bit, paddr);
995 	else
996 		clear_bit_le(bit, paddr);
997 	kunmap_atomic(paddr);
998 	if (!test_page_attr(bitmap, index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
999 		set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_PENDING);
1000 		bitmap->allclean = 0;
1001 	}
1002 }
1003 
1004 static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
1005 {
1006 	unsigned long bit;
1007 	struct page *page;
1008 	void *paddr;
1009 	unsigned long chunk = block >> bitmap->counts.chunkshift;
1010 	int set = 0;
1011 
1012 	page = filemap_get_page(&bitmap->storage, chunk);
1013 	if (!page)
1014 		return -EINVAL;
1015 	bit = file_page_offset(&bitmap->storage, chunk);
1016 	paddr = kmap_atomic(page);
1017 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1018 		set = test_bit(bit, paddr);
1019 	else
1020 		set = test_bit_le(bit, paddr);
1021 	kunmap_atomic(paddr);
1022 	return set;
1023 }
1024 
1025 /* this gets called when the md device is ready to unplug its underlying
1026  * (slave) device queues -- before we let any writes go down, we need to
1027  * sync the dirty pages of the bitmap file to disk */
1028 void md_bitmap_unplug(struct bitmap *bitmap)
1029 {
1030 	unsigned long i;
1031 	int dirty, need_write;
1032 	int writing = 0;
1033 
1034 	if (!md_bitmap_enabled(bitmap))
1035 		return;
1036 
1037 	/* look at each page to see if there are any set bits that need to be
1038 	 * flushed out to disk */
1039 	for (i = 0; i < bitmap->storage.file_pages; i++) {
1040 		dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
1041 		need_write = test_and_clear_page_attr(bitmap, i,
1042 						      BITMAP_PAGE_NEEDWRITE);
1043 		if (dirty || need_write) {
1044 			if (!writing) {
1045 				md_bitmap_wait_writes(bitmap);
1046 				if (bitmap->mddev->queue)
1047 					blk_add_trace_msg(bitmap->mddev->queue,
1048 							  "md bitmap_unplug");
1049 			}
1050 			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
1051 			filemap_write_page(bitmap, i, false);
1052 			writing = 1;
1053 		}
1054 	}
1055 	if (writing)
1056 		md_bitmap_wait_writes(bitmap);
1057 
1058 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1059 		md_bitmap_file_kick(bitmap);
1060 }
1061 EXPORT_SYMBOL(md_bitmap_unplug);
1062 
1063 struct bitmap_unplug_work {
1064 	struct work_struct work;
1065 	struct bitmap *bitmap;
1066 	struct completion *done;
1067 };
1068 
1069 static void md_bitmap_unplug_fn(struct work_struct *work)
1070 {
1071 	struct bitmap_unplug_work *unplug_work =
1072 		container_of(work, struct bitmap_unplug_work, work);
1073 
1074 	md_bitmap_unplug(unplug_work->bitmap);
1075 	complete(unplug_work->done);
1076 }
1077 
1078 void md_bitmap_unplug_async(struct bitmap *bitmap)
1079 {
1080 	DECLARE_COMPLETION_ONSTACK(done);
1081 	struct bitmap_unplug_work unplug_work;
1082 
1083 	INIT_WORK_ONSTACK(&unplug_work.work, md_bitmap_unplug_fn);
1084 	unplug_work.bitmap = bitmap;
1085 	unplug_work.done = &done;
1086 
1087 	queue_work(md_bitmap_wq, &unplug_work.work);
1088 	wait_for_completion(&done);
1089 }
1090 EXPORT_SYMBOL(md_bitmap_unplug_async);
1091 
1092 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1093 
1094 /*
1095  * Initialize the in-memory bitmap from the on-disk bitmap and set up the memory
1096  * mapping of the bitmap file.
1097  *
1098  * Special case: If there's no bitmap file, or if the bitmap file had been
1099  * previously kicked from the array, we mark all the bits as 1's in order to
1100  * cause a full resync.
1101  *
1102  * We ignore all bits for sectors that end earlier than 'start'.
1103  * This is used when reading an out-of-date bitmap.
1104  */
1105 static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1106 {
1107 	bool outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1108 	struct mddev *mddev = bitmap->mddev;
1109 	unsigned long chunks = bitmap->counts.chunks;
1110 	struct bitmap_storage *store = &bitmap->storage;
1111 	struct file *file = store->file;
1112 	unsigned long node_offset = 0;
1113 	unsigned long bit_cnt = 0;
1114 	unsigned long i;
1115 	int ret;
1116 
1117 	if (!file && !mddev->bitmap_info.offset) {
1118 		/* No permanent bitmap - fill with '1s'. */
1119 		store->filemap = NULL;
1120 		store->file_pages = 0;
1121 		for (i = 0; i < chunks ; i++) {
1122 			/* if the disk bit is set, set the memory bit */
1123 			int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1124 				      >= start);
1125 			md_bitmap_set_memory_bits(bitmap,
1126 						  (sector_t)i << bitmap->counts.chunkshift,
1127 						  needed);
1128 		}
1129 		return 0;
1130 	}
1131 
1132 	if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1133 		pr_warn("%s: bitmap file too short %lu < %lu\n",
1134 			bmname(bitmap),
1135 			(unsigned long) i_size_read(file->f_mapping->host),
1136 			store->bytes);
1137 		ret = -ENOSPC;
1138 		goto err;
1139 	}
1140 
1141 	if (mddev_is_clustered(mddev))
1142 		node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
1143 
1144 	for (i = 0; i < store->file_pages; i++) {
1145 		struct page *page = store->filemap[i];
1146 		int count;
1147 
1148 		/* unmap the old page, we're done with it */
1149 		if (i == store->file_pages - 1)
1150 			count = store->bytes - i * PAGE_SIZE;
1151 		else
1152 			count = PAGE_SIZE;
1153 
1154 		if (file)
1155 			ret = read_file_page(file, i, bitmap, count, page);
1156 		else
1157 			ret = read_sb_page(mddev, 0, page, i + node_offset,
1158 					   count);
1159 		if (ret)
1160 			goto err;
1161 	}
1162 
1163 	if (outofdate) {
1164 		pr_warn("%s: bitmap file is out of date, doing full recovery\n",
1165 			bmname(bitmap));
1166 
1167 		for (i = 0; i < store->file_pages; i++) {
1168 			struct page *page = store->filemap[i];
1169 			unsigned long offset = 0;
1170 			void *paddr;
1171 
1172 			if (i == 0 && !mddev->bitmap_info.external)
1173 				offset = sizeof(bitmap_super_t);
1174 
1175 			/*
1176 			 * If the bitmap is out of date, dirty the whole page
1177 			 * and write it out
1178 			 */
1179 			paddr = kmap_atomic(page);
1180 			memset(paddr + offset, 0xff, PAGE_SIZE - offset);
1181 			kunmap_atomic(paddr);
1182 
1183 			filemap_write_page(bitmap, i, true);
1184 			if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) {
1185 				ret = -EIO;
1186 				goto err;
1187 			}
1188 		}
1189 	}
1190 
1191 	for (i = 0; i < chunks; i++) {
1192 		struct page *page = filemap_get_page(&bitmap->storage, i);
1193 		unsigned long bit = file_page_offset(&bitmap->storage, i);
1194 		void *paddr;
1195 		bool was_set;
1196 
1197 		paddr = kmap_atomic(page);
1198 		if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1199 			was_set = test_bit(bit, paddr);
1200 		else
1201 			was_set = test_bit_le(bit, paddr);
1202 		kunmap_atomic(paddr);
1203 
1204 		if (was_set) {
1205 			/* if the disk bit is set, set the memory bit */
1206 			int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1207 				      >= start);
1208 			md_bitmap_set_memory_bits(bitmap,
1209 						  (sector_t)i << bitmap->counts.chunkshift,
1210 						  needed);
1211 			bit_cnt++;
1212 		}
1213 	}
1214 
1215 	pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1216 		 bmname(bitmap), store->file_pages,
1217 		 bit_cnt, chunks);
1218 
1219 	return 0;
1220 
1221  err:
1222 	pr_warn("%s: bitmap initialisation failed: %d\n",
1223 		bmname(bitmap), ret);
1224 	return ret;
1225 }
1226 
1227 void md_bitmap_write_all(struct bitmap *bitmap)
1228 {
1229 	/* We don't actually write all bitmap blocks here,
1230 	 * just flag them as needing to be written
1231 	 */
1232 	int i;
1233 
1234 	if (!bitmap || !bitmap->storage.filemap)
1235 		return;
1236 	if (bitmap->storage.file)
1237 		/* Only one copy, so nothing needed */
1238 		return;
1239 
1240 	for (i = 0; i < bitmap->storage.file_pages; i++)
1241 		set_page_attr(bitmap, i,
1242 			      BITMAP_PAGE_NEEDWRITE);
1243 	bitmap->allclean = 0;
1244 }
1245 
1246 static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1247 				 sector_t offset, int inc)
1248 {
1249 	sector_t chunk = offset >> bitmap->chunkshift;
1250 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1251 	bitmap->bp[page].count += inc;
1252 	md_bitmap_checkfree(bitmap, page);
1253 }
1254 
1255 static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1256 {
1257 	sector_t chunk = offset >> bitmap->chunkshift;
1258 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1259 	struct bitmap_page *bp = &bitmap->bp[page];
1260 
1261 	if (!bp->pending)
1262 		bp->pending = 1;
1263 }
1264 
1265 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1266 					       sector_t offset, sector_t *blocks,
1267 					       int create);
1268 
1269 static void mddev_set_timeout(struct mddev *mddev, unsigned long timeout,
1270 			      bool force)
1271 {
1272 	struct md_thread *thread;
1273 
1274 	rcu_read_lock();
1275 	thread = rcu_dereference(mddev->thread);
1276 
1277 	if (!thread)
1278 		goto out;
1279 
1280 	if (force || thread->timeout < MAX_SCHEDULE_TIMEOUT)
1281 		thread->timeout = timeout;
1282 
1283 out:
1284 	rcu_read_unlock();
1285 }
1286 
1287 /*
1288  * bitmap daemon -- periodically wakes up to clean bits and flush pages
1289  *			out to disk
1290  */
1291 void md_bitmap_daemon_work(struct mddev *mddev)
1292 {
1293 	struct bitmap *bitmap;
1294 	unsigned long j;
1295 	unsigned long nextpage;
1296 	sector_t blocks;
1297 	struct bitmap_counts *counts;
1298 
1299 	/* Use a mutex to guard daemon_work against
1300 	 * bitmap_destroy.
1301 	 */
1302 	mutex_lock(&mddev->bitmap_info.mutex);
1303 	bitmap = mddev->bitmap;
1304 	if (bitmap == NULL) {
1305 		mutex_unlock(&mddev->bitmap_info.mutex);
1306 		return;
1307 	}
1308 	if (time_before(jiffies, bitmap->daemon_lastrun
1309 			+ mddev->bitmap_info.daemon_sleep))
1310 		goto done;
1311 
1312 	bitmap->daemon_lastrun = jiffies;
1313 	if (bitmap->allclean) {
1314 		mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);
1315 		goto done;
1316 	}
1317 	bitmap->allclean = 1;
1318 
1319 	if (bitmap->mddev->queue)
1320 		blk_add_trace_msg(bitmap->mddev->queue,
1321 				  "md bitmap_daemon_work");
1322 
1323 	/* Any file-page which is PENDING now needs to be written.
1324 	 * So set NEEDWRITE now, then after we make any last-minute changes
1325 	 * we will write it.
1326 	 */
1327 	for (j = 0; j < bitmap->storage.file_pages; j++)
1328 		if (test_and_clear_page_attr(bitmap, j,
1329 					     BITMAP_PAGE_PENDING))
1330 			set_page_attr(bitmap, j,
1331 				      BITMAP_PAGE_NEEDWRITE);
1332 
1333 	if (bitmap->need_sync &&
1334 	    mddev->bitmap_info.external == 0) {
1335 		/* Arrange for superblock update as well as
1336 		 * other changes */
1337 		bitmap_super_t *sb;
1338 		bitmap->need_sync = 0;
1339 		if (bitmap->storage.filemap) {
1340 			sb = kmap_atomic(bitmap->storage.sb_page);
1341 			sb->events_cleared =
1342 				cpu_to_le64(bitmap->events_cleared);
1343 			kunmap_atomic(sb);
1344 			set_page_attr(bitmap, 0,
1345 				      BITMAP_PAGE_NEEDWRITE);
1346 		}
1347 	}
1348 	/* Now look at the bitmap counters and if any are '2' or '1',
1349 	 * decrement and handle accordingly.
1350 	 */
1351 	counts = &bitmap->counts;
1352 	spin_lock_irq(&counts->lock);
1353 	nextpage = 0;
1354 	for (j = 0; j < counts->chunks; j++) {
1355 		bitmap_counter_t *bmc;
1356 		sector_t  block = (sector_t)j << counts->chunkshift;
1357 
1358 		if (j == nextpage) {
1359 			nextpage += PAGE_COUNTER_RATIO;
1360 			if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1361 				j |= PAGE_COUNTER_MASK;
1362 				continue;
1363 			}
1364 			counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1365 		}
1366 
1367 		bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
1368 		if (!bmc) {
1369 			j |= PAGE_COUNTER_MASK;
1370 			continue;
1371 		}
1372 		if (*bmc == 1 && !bitmap->need_sync) {
1373 			/* We can clear the bit */
1374 			*bmc = 0;
1375 			md_bitmap_count_page(counts, block, -1);
1376 			md_bitmap_file_clear_bit(bitmap, block);
1377 		} else if (*bmc && *bmc <= 2) {
1378 			*bmc = 1;
1379 			md_bitmap_set_pending(counts, block);
1380 			bitmap->allclean = 0;
1381 		}
1382 	}
1383 	spin_unlock_irq(&counts->lock);
1384 
1385 	md_bitmap_wait_writes(bitmap);
1386 	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1387 	 * DIRTY pages need to be written by bitmap_unplug so it can wait
1388 	 * for them.
1389 	 * If we find any DIRTY page we stop there and let bitmap_unplug
1390 	 * handle all the rest.  This is important in the case where
1391 	 * the first blocking holds the superblock and it has been updated.
1392 	 * We mustn't write any other blocks before the superblock.
1393 	 */
1394 	for (j = 0;
1395 	     j < bitmap->storage.file_pages
1396 		     && !test_bit(BITMAP_STALE, &bitmap->flags);
1397 	     j++) {
1398 		if (test_page_attr(bitmap, j,
1399 				   BITMAP_PAGE_DIRTY))
1400 			/* bitmap_unplug will handle the rest */
1401 			break;
1402 		if (bitmap->storage.filemap &&
1403 		    test_and_clear_page_attr(bitmap, j,
1404 					     BITMAP_PAGE_NEEDWRITE))
1405 			filemap_write_page(bitmap, j, false);
1406 	}
1407 
1408  done:
1409 	if (bitmap->allclean == 0)
1410 		mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
1411 	mutex_unlock(&mddev->bitmap_info.mutex);
1412 }
1413 
1414 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1415 					       sector_t offset, sector_t *blocks,
1416 					       int create)
1417 __releases(bitmap->lock)
1418 __acquires(bitmap->lock)
1419 {
1420 	/* If 'create', we might release the lock and reclaim it.
1421 	 * The lock must have been taken with interrupts enabled.
1422 	 * If !create, we don't release the lock.
1423 	 */
1424 	sector_t chunk = offset >> bitmap->chunkshift;
1425 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1426 	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1427 	sector_t csize;
1428 	int err;
1429 
1430 	if (page >= bitmap->pages) {
1431 		/*
1432 		 * This can happen if bitmap_start_sync goes beyond
1433 		 * End-of-device while looking for a whole page or
1434 		 * user set a huge number to sysfs bitmap_set_bits.
1435 		 */
1436 		return NULL;
1437 	}
1438 	err = md_bitmap_checkpage(bitmap, page, create, 0);
1439 
1440 	if (bitmap->bp[page].hijacked ||
1441 	    bitmap->bp[page].map == NULL)
1442 		csize = ((sector_t)1) << (bitmap->chunkshift +
1443 					  PAGE_COUNTER_SHIFT);
1444 	else
1445 		csize = ((sector_t)1) << bitmap->chunkshift;
1446 	*blocks = csize - (offset & (csize - 1));
1447 
1448 	if (err < 0)
1449 		return NULL;
1450 
1451 	/* now locked ... */
1452 
1453 	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1454 		/* should we use the first or second counter field
1455 		 * of the hijacked pointer? */
1456 		int hi = (pageoff > PAGE_COUNTER_MASK);
1457 		return  &((bitmap_counter_t *)
1458 			  &bitmap->bp[page].map)[hi];
1459 	} else /* page is allocated */
1460 		return (bitmap_counter_t *)
1461 			&(bitmap->bp[page].map[pageoff]);
1462 }
1463 
1464 int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1465 {
1466 	if (!bitmap)
1467 		return 0;
1468 
1469 	if (behind) {
1470 		int bw;
1471 		atomic_inc(&bitmap->behind_writes);
1472 		bw = atomic_read(&bitmap->behind_writes);
1473 		if (bw > bitmap->behind_writes_used)
1474 			bitmap->behind_writes_used = bw;
1475 
1476 		pr_debug("inc write-behind count %d/%lu\n",
1477 			 bw, bitmap->mddev->bitmap_info.max_write_behind);
1478 	}
1479 
1480 	while (sectors) {
1481 		sector_t blocks;
1482 		bitmap_counter_t *bmc;
1483 
1484 		spin_lock_irq(&bitmap->counts.lock);
1485 		bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1486 		if (!bmc) {
1487 			spin_unlock_irq(&bitmap->counts.lock);
1488 			return 0;
1489 		}
1490 
1491 		if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1492 			DEFINE_WAIT(__wait);
1493 			/* note that it is safe to do the prepare_to_wait
1494 			 * after the test as long as we do it before dropping
1495 			 * the spinlock.
1496 			 */
1497 			prepare_to_wait(&bitmap->overflow_wait, &__wait,
1498 					TASK_UNINTERRUPTIBLE);
1499 			spin_unlock_irq(&bitmap->counts.lock);
1500 			schedule();
1501 			finish_wait(&bitmap->overflow_wait, &__wait);
1502 			continue;
1503 		}
1504 
1505 		switch (*bmc) {
1506 		case 0:
1507 			md_bitmap_file_set_bit(bitmap, offset);
1508 			md_bitmap_count_page(&bitmap->counts, offset, 1);
1509 			fallthrough;
1510 		case 1:
1511 			*bmc = 2;
1512 		}
1513 
1514 		(*bmc)++;
1515 
1516 		spin_unlock_irq(&bitmap->counts.lock);
1517 
1518 		offset += blocks;
1519 		if (sectors > blocks)
1520 			sectors -= blocks;
1521 		else
1522 			sectors = 0;
1523 	}
1524 	return 0;
1525 }
1526 EXPORT_SYMBOL(md_bitmap_startwrite);
1527 
1528 void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
1529 			unsigned long sectors, int success, int behind)
1530 {
1531 	if (!bitmap)
1532 		return;
1533 	if (behind) {
1534 		if (atomic_dec_and_test(&bitmap->behind_writes))
1535 			wake_up(&bitmap->behind_wait);
1536 		pr_debug("dec write-behind count %d/%lu\n",
1537 			 atomic_read(&bitmap->behind_writes),
1538 			 bitmap->mddev->bitmap_info.max_write_behind);
1539 	}
1540 
1541 	while (sectors) {
1542 		sector_t blocks;
1543 		unsigned long flags;
1544 		bitmap_counter_t *bmc;
1545 
1546 		spin_lock_irqsave(&bitmap->counts.lock, flags);
1547 		bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1548 		if (!bmc) {
1549 			spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1550 			return;
1551 		}
1552 
1553 		if (success && !bitmap->mddev->degraded &&
1554 		    bitmap->events_cleared < bitmap->mddev->events) {
1555 			bitmap->events_cleared = bitmap->mddev->events;
1556 			bitmap->need_sync = 1;
1557 			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1558 		}
1559 
1560 		if (!success && !NEEDED(*bmc))
1561 			*bmc |= NEEDED_MASK;
1562 
1563 		if (COUNTER(*bmc) == COUNTER_MAX)
1564 			wake_up(&bitmap->overflow_wait);
1565 
1566 		(*bmc)--;
1567 		if (*bmc <= 2) {
1568 			md_bitmap_set_pending(&bitmap->counts, offset);
1569 			bitmap->allclean = 0;
1570 		}
1571 		spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1572 		offset += blocks;
1573 		if (sectors > blocks)
1574 			sectors -= blocks;
1575 		else
1576 			sectors = 0;
1577 	}
1578 }
1579 EXPORT_SYMBOL(md_bitmap_endwrite);
1580 
1581 static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1582 			       int degraded)
1583 {
1584 	bitmap_counter_t *bmc;
1585 	int rv;
1586 	if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1587 		*blocks = 1024;
1588 		return 1; /* always resync if no bitmap */
1589 	}
1590 	spin_lock_irq(&bitmap->counts.lock);
1591 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1592 	rv = 0;
1593 	if (bmc) {
1594 		/* locked */
1595 		if (RESYNC(*bmc))
1596 			rv = 1;
1597 		else if (NEEDED(*bmc)) {
1598 			rv = 1;
1599 			if (!degraded) { /* don't set/clear bits if degraded */
1600 				*bmc |= RESYNC_MASK;
1601 				*bmc &= ~NEEDED_MASK;
1602 			}
1603 		}
1604 	}
1605 	spin_unlock_irq(&bitmap->counts.lock);
1606 	return rv;
1607 }
1608 
1609 int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1610 			 int degraded)
1611 {
1612 	/* bitmap_start_sync must always report on multiples of whole
1613 	 * pages, otherwise resync (which is very PAGE_SIZE based) will
1614 	 * get confused.
1615 	 * So call __bitmap_start_sync repeatedly (if needed) until
1616 	 * At least PAGE_SIZE>>9 blocks are covered.
1617 	 * Return the 'or' of the result.
1618 	 */
1619 	int rv = 0;
1620 	sector_t blocks1;
1621 
1622 	*blocks = 0;
1623 	while (*blocks < (PAGE_SIZE>>9)) {
1624 		rv |= __bitmap_start_sync(bitmap, offset,
1625 					  &blocks1, degraded);
1626 		offset += blocks1;
1627 		*blocks += blocks1;
1628 	}
1629 	return rv;
1630 }
1631 EXPORT_SYMBOL(md_bitmap_start_sync);
1632 
1633 void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1634 {
1635 	bitmap_counter_t *bmc;
1636 	unsigned long flags;
1637 
1638 	if (bitmap == NULL) {
1639 		*blocks = 1024;
1640 		return;
1641 	}
1642 	spin_lock_irqsave(&bitmap->counts.lock, flags);
1643 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1644 	if (bmc == NULL)
1645 		goto unlock;
1646 	/* locked */
1647 	if (RESYNC(*bmc)) {
1648 		*bmc &= ~RESYNC_MASK;
1649 
1650 		if (!NEEDED(*bmc) && aborted)
1651 			*bmc |= NEEDED_MASK;
1652 		else {
1653 			if (*bmc <= 2) {
1654 				md_bitmap_set_pending(&bitmap->counts, offset);
1655 				bitmap->allclean = 0;
1656 			}
1657 		}
1658 	}
1659  unlock:
1660 	spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1661 }
1662 EXPORT_SYMBOL(md_bitmap_end_sync);
1663 
1664 void md_bitmap_close_sync(struct bitmap *bitmap)
1665 {
1666 	/* Sync has finished, and any bitmap chunks that weren't synced
1667 	 * properly have been aborted.  It remains to us to clear the
1668 	 * RESYNC bit wherever it is still on
1669 	 */
1670 	sector_t sector = 0;
1671 	sector_t blocks;
1672 	if (!bitmap)
1673 		return;
1674 	while (sector < bitmap->mddev->resync_max_sectors) {
1675 		md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1676 		sector += blocks;
1677 	}
1678 }
1679 EXPORT_SYMBOL(md_bitmap_close_sync);
1680 
1681 void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
1682 {
1683 	sector_t s = 0;
1684 	sector_t blocks;
1685 
1686 	if (!bitmap)
1687 		return;
1688 	if (sector == 0) {
1689 		bitmap->last_end_sync = jiffies;
1690 		return;
1691 	}
1692 	if (!force && time_before(jiffies, (bitmap->last_end_sync
1693 				  + bitmap->mddev->bitmap_info.daemon_sleep)))
1694 		return;
1695 	wait_event(bitmap->mddev->recovery_wait,
1696 		   atomic_read(&bitmap->mddev->recovery_active) == 0);
1697 
1698 	bitmap->mddev->curr_resync_completed = sector;
1699 	set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
1700 	sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
1701 	s = 0;
1702 	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
1703 		md_bitmap_end_sync(bitmap, s, &blocks, 0);
1704 		s += blocks;
1705 	}
1706 	bitmap->last_end_sync = jiffies;
1707 	sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
1708 }
1709 EXPORT_SYMBOL(md_bitmap_cond_end_sync);
1710 
1711 void md_bitmap_sync_with_cluster(struct mddev *mddev,
1712 			      sector_t old_lo, sector_t old_hi,
1713 			      sector_t new_lo, sector_t new_hi)
1714 {
1715 	struct bitmap *bitmap = mddev->bitmap;
1716 	sector_t sector, blocks = 0;
1717 
1718 	for (sector = old_lo; sector < new_lo; ) {
1719 		md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1720 		sector += blocks;
1721 	}
1722 	WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1723 
1724 	for (sector = old_hi; sector < new_hi; ) {
1725 		md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1726 		sector += blocks;
1727 	}
1728 	WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1729 }
1730 EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
1731 
1732 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1733 {
1734 	/* For each chunk covered by any of these sectors, set the
1735 	 * counter to 2 and possibly set resync_needed.  They should all
1736 	 * be 0 at this point
1737 	 */
1738 
1739 	sector_t secs;
1740 	bitmap_counter_t *bmc;
1741 	spin_lock_irq(&bitmap->counts.lock);
1742 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1743 	if (!bmc) {
1744 		spin_unlock_irq(&bitmap->counts.lock);
1745 		return;
1746 	}
1747 	if (!*bmc) {
1748 		*bmc = 2;
1749 		md_bitmap_count_page(&bitmap->counts, offset, 1);
1750 		md_bitmap_set_pending(&bitmap->counts, offset);
1751 		bitmap->allclean = 0;
1752 	}
1753 	if (needed)
1754 		*bmc |= NEEDED_MASK;
1755 	spin_unlock_irq(&bitmap->counts.lock);
1756 }
1757 
1758 /* dirty the memory and file bits for bitmap chunks "s" to "e" */
1759 void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1760 {
1761 	unsigned long chunk;
1762 
1763 	for (chunk = s; chunk <= e; chunk++) {
1764 		sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1765 		md_bitmap_set_memory_bits(bitmap, sec, 1);
1766 		md_bitmap_file_set_bit(bitmap, sec);
1767 		if (sec < bitmap->mddev->recovery_cp)
1768 			/* We are asserting that the array is dirty,
1769 			 * so move the recovery_cp address back so
1770 			 * that it is obvious that it is dirty
1771 			 */
1772 			bitmap->mddev->recovery_cp = sec;
1773 	}
1774 }
1775 
1776 /*
1777  * flush out any pending updates
1778  */
1779 void md_bitmap_flush(struct mddev *mddev)
1780 {
1781 	struct bitmap *bitmap = mddev->bitmap;
1782 	long sleep;
1783 
1784 	if (!bitmap) /* there was no bitmap */
1785 		return;
1786 
1787 	/* run the daemon_work three time to ensure everything is flushed
1788 	 * that can be
1789 	 */
1790 	sleep = mddev->bitmap_info.daemon_sleep * 2;
1791 	bitmap->daemon_lastrun -= sleep;
1792 	md_bitmap_daemon_work(mddev);
1793 	bitmap->daemon_lastrun -= sleep;
1794 	md_bitmap_daemon_work(mddev);
1795 	bitmap->daemon_lastrun -= sleep;
1796 	md_bitmap_daemon_work(mddev);
1797 	if (mddev->bitmap_info.external)
1798 		md_super_wait(mddev);
1799 	md_bitmap_update_sb(bitmap);
1800 }
1801 
1802 /*
1803  * free memory that was allocated
1804  */
1805 void md_bitmap_free(struct bitmap *bitmap)
1806 {
1807 	unsigned long k, pages;
1808 	struct bitmap_page *bp;
1809 
1810 	if (!bitmap) /* there was no bitmap */
1811 		return;
1812 
1813 	if (bitmap->sysfs_can_clear)
1814 		sysfs_put(bitmap->sysfs_can_clear);
1815 
1816 	if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1817 		bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1818 		md_cluster_stop(bitmap->mddev);
1819 
1820 	/* Shouldn't be needed - but just in case.... */
1821 	wait_event(bitmap->write_wait,
1822 		   atomic_read(&bitmap->pending_writes) == 0);
1823 
1824 	/* release the bitmap file  */
1825 	md_bitmap_file_unmap(&bitmap->storage);
1826 
1827 	bp = bitmap->counts.bp;
1828 	pages = bitmap->counts.pages;
1829 
1830 	/* free all allocated memory */
1831 
1832 	if (bp) /* deallocate the page memory */
1833 		for (k = 0; k < pages; k++)
1834 			if (bp[k].map && !bp[k].hijacked)
1835 				kfree(bp[k].map);
1836 	kfree(bp);
1837 	kfree(bitmap);
1838 }
1839 EXPORT_SYMBOL(md_bitmap_free);
1840 
1841 void md_bitmap_wait_behind_writes(struct mddev *mddev)
1842 {
1843 	struct bitmap *bitmap = mddev->bitmap;
1844 
1845 	/* wait for behind writes to complete */
1846 	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1847 		pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
1848 			 mdname(mddev));
1849 		/* need to kick something here to make sure I/O goes? */
1850 		wait_event(bitmap->behind_wait,
1851 			   atomic_read(&bitmap->behind_writes) == 0);
1852 	}
1853 }
1854 
1855 void md_bitmap_destroy(struct mddev *mddev)
1856 {
1857 	struct bitmap *bitmap = mddev->bitmap;
1858 
1859 	if (!bitmap) /* there was no bitmap */
1860 		return;
1861 
1862 	md_bitmap_wait_behind_writes(mddev);
1863 	if (!mddev->serialize_policy)
1864 		mddev_destroy_serial_pool(mddev, NULL, true);
1865 
1866 	mutex_lock(&mddev->bitmap_info.mutex);
1867 	spin_lock(&mddev->lock);
1868 	mddev->bitmap = NULL; /* disconnect from the md device */
1869 	spin_unlock(&mddev->lock);
1870 	mutex_unlock(&mddev->bitmap_info.mutex);
1871 	mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);
1872 
1873 	md_bitmap_free(bitmap);
1874 }
1875 
1876 /*
1877  * initialize the bitmap structure
1878  * if this returns an error, bitmap_destroy must be called to do clean up
1879  * once mddev->bitmap is set
1880  */
1881 struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
1882 {
1883 	struct bitmap *bitmap;
1884 	sector_t blocks = mddev->resync_max_sectors;
1885 	struct file *file = mddev->bitmap_info.file;
1886 	int err;
1887 	struct kernfs_node *bm = NULL;
1888 
1889 	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1890 
1891 	BUG_ON(file && mddev->bitmap_info.offset);
1892 
1893 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1894 		pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
1895 			  mdname(mddev));
1896 		return ERR_PTR(-EBUSY);
1897 	}
1898 
1899 	bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1900 	if (!bitmap)
1901 		return ERR_PTR(-ENOMEM);
1902 
1903 	spin_lock_init(&bitmap->counts.lock);
1904 	atomic_set(&bitmap->pending_writes, 0);
1905 	init_waitqueue_head(&bitmap->write_wait);
1906 	init_waitqueue_head(&bitmap->overflow_wait);
1907 	init_waitqueue_head(&bitmap->behind_wait);
1908 
1909 	bitmap->mddev = mddev;
1910 	bitmap->cluster_slot = slot;
1911 
1912 	if (mddev->kobj.sd)
1913 		bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
1914 	if (bm) {
1915 		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
1916 		sysfs_put(bm);
1917 	} else
1918 		bitmap->sysfs_can_clear = NULL;
1919 
1920 	bitmap->storage.file = file;
1921 	if (file) {
1922 		get_file(file);
1923 		/* As future accesses to this file will use bmap,
1924 		 * and bypass the page cache, we must sync the file
1925 		 * first.
1926 		 */
1927 		vfs_fsync(file, 1);
1928 	}
1929 	/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1930 	if (!mddev->bitmap_info.external) {
1931 		/*
1932 		 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1933 		 * instructing us to create a new on-disk bitmap instance.
1934 		 */
1935 		if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
1936 			err = md_bitmap_new_disk_sb(bitmap);
1937 		else
1938 			err = md_bitmap_read_sb(bitmap);
1939 	} else {
1940 		err = 0;
1941 		if (mddev->bitmap_info.chunksize == 0 ||
1942 		    mddev->bitmap_info.daemon_sleep == 0)
1943 			/* chunksize and time_base need to be
1944 			 * set first. */
1945 			err = -EINVAL;
1946 	}
1947 	if (err)
1948 		goto error;
1949 
1950 	bitmap->daemon_lastrun = jiffies;
1951 	err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
1952 	if (err)
1953 		goto error;
1954 
1955 	pr_debug("created bitmap (%lu pages) for device %s\n",
1956 		 bitmap->counts.pages, bmname(bitmap));
1957 
1958 	err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
1959 	if (err)
1960 		goto error;
1961 
1962 	return bitmap;
1963  error:
1964 	md_bitmap_free(bitmap);
1965 	return ERR_PTR(err);
1966 }
1967 
1968 int md_bitmap_load(struct mddev *mddev)
1969 {
1970 	int err = 0;
1971 	sector_t start = 0;
1972 	sector_t sector = 0;
1973 	struct bitmap *bitmap = mddev->bitmap;
1974 	struct md_rdev *rdev;
1975 
1976 	if (!bitmap)
1977 		goto out;
1978 
1979 	rdev_for_each(rdev, mddev)
1980 		mddev_create_serial_pool(mddev, rdev, true);
1981 
1982 	if (mddev_is_clustered(mddev))
1983 		md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
1984 
1985 	/* Clear out old bitmap info first:  Either there is none, or we
1986 	 * are resuming after someone else has possibly changed things,
1987 	 * so we should forget old cached info.
1988 	 * All chunks should be clean, but some might need_sync.
1989 	 */
1990 	while (sector < mddev->resync_max_sectors) {
1991 		sector_t blocks;
1992 		md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1993 		sector += blocks;
1994 	}
1995 	md_bitmap_close_sync(bitmap);
1996 
1997 	if (mddev->degraded == 0
1998 	    || bitmap->events_cleared == mddev->events)
1999 		/* no need to keep dirty bits to optimise a
2000 		 * re-add of a missing device */
2001 		start = mddev->recovery_cp;
2002 
2003 	mutex_lock(&mddev->bitmap_info.mutex);
2004 	err = md_bitmap_init_from_disk(bitmap, start);
2005 	mutex_unlock(&mddev->bitmap_info.mutex);
2006 
2007 	if (err)
2008 		goto out;
2009 	clear_bit(BITMAP_STALE, &bitmap->flags);
2010 
2011 	/* Kick recovery in case any bits were set */
2012 	set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
2013 
2014 	mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
2015 	md_wakeup_thread(mddev->thread);
2016 
2017 	md_bitmap_update_sb(bitmap);
2018 
2019 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
2020 		err = -EIO;
2021 out:
2022 	return err;
2023 }
2024 EXPORT_SYMBOL_GPL(md_bitmap_load);
2025 
2026 /* caller need to free returned bitmap with md_bitmap_free() */
2027 struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
2028 {
2029 	int rv = 0;
2030 	struct bitmap *bitmap;
2031 
2032 	bitmap = md_bitmap_create(mddev, slot);
2033 	if (IS_ERR(bitmap)) {
2034 		rv = PTR_ERR(bitmap);
2035 		return ERR_PTR(rv);
2036 	}
2037 
2038 	rv = md_bitmap_init_from_disk(bitmap, 0);
2039 	if (rv) {
2040 		md_bitmap_free(bitmap);
2041 		return ERR_PTR(rv);
2042 	}
2043 
2044 	return bitmap;
2045 }
2046 EXPORT_SYMBOL(get_bitmap_from_slot);
2047 
2048 /* Loads the bitmap associated with slot and copies the resync information
2049  * to our bitmap
2050  */
2051 int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
2052 		sector_t *low, sector_t *high, bool clear_bits)
2053 {
2054 	int rv = 0, i, j;
2055 	sector_t block, lo = 0, hi = 0;
2056 	struct bitmap_counts *counts;
2057 	struct bitmap *bitmap;
2058 
2059 	bitmap = get_bitmap_from_slot(mddev, slot);
2060 	if (IS_ERR(bitmap)) {
2061 		pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
2062 		return -1;
2063 	}
2064 
2065 	counts = &bitmap->counts;
2066 	for (j = 0; j < counts->chunks; j++) {
2067 		block = (sector_t)j << counts->chunkshift;
2068 		if (md_bitmap_file_test_bit(bitmap, block)) {
2069 			if (!lo)
2070 				lo = block;
2071 			hi = block;
2072 			md_bitmap_file_clear_bit(bitmap, block);
2073 			md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
2074 			md_bitmap_file_set_bit(mddev->bitmap, block);
2075 		}
2076 	}
2077 
2078 	if (clear_bits) {
2079 		md_bitmap_update_sb(bitmap);
2080 		/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2081 		 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
2082 		for (i = 0; i < bitmap->storage.file_pages; i++)
2083 			if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2084 				set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
2085 		md_bitmap_unplug(bitmap);
2086 	}
2087 	md_bitmap_unplug(mddev->bitmap);
2088 	*low = lo;
2089 	*high = hi;
2090 	md_bitmap_free(bitmap);
2091 
2092 	return rv;
2093 }
2094 EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
2095 
2096 
2097 void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
2098 {
2099 	unsigned long chunk_kb;
2100 	struct bitmap_counts *counts;
2101 
2102 	if (!bitmap)
2103 		return;
2104 
2105 	counts = &bitmap->counts;
2106 
2107 	chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
2108 	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
2109 		   "%lu%s chunk",
2110 		   counts->pages - counts->missing_pages,
2111 		   counts->pages,
2112 		   (counts->pages - counts->missing_pages)
2113 		   << (PAGE_SHIFT - 10),
2114 		   chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
2115 		   chunk_kb ? "KB" : "B");
2116 	if (bitmap->storage.file) {
2117 		seq_printf(seq, ", file: ");
2118 		seq_file_path(seq, bitmap->storage.file, " \t\n");
2119 	}
2120 
2121 	seq_printf(seq, "\n");
2122 }
2123 
2124 int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2125 		  int chunksize, int init)
2126 {
2127 	/* If chunk_size is 0, choose an appropriate chunk size.
2128 	 * Then possibly allocate new storage space.
2129 	 * Then quiesce, copy bits, replace bitmap, and re-start
2130 	 *
2131 	 * This function is called both to set up the initial bitmap
2132 	 * and to resize the bitmap while the array is active.
2133 	 * If this happens as a result of the array being resized,
2134 	 * chunksize will be zero, and we need to choose a suitable
2135 	 * chunksize, otherwise we use what we are given.
2136 	 */
2137 	struct bitmap_storage store;
2138 	struct bitmap_counts old_counts;
2139 	unsigned long chunks;
2140 	sector_t block;
2141 	sector_t old_blocks, new_blocks;
2142 	int chunkshift;
2143 	int ret = 0;
2144 	long pages;
2145 	struct bitmap_page *new_bp;
2146 
2147 	if (bitmap->storage.file && !init) {
2148 		pr_info("md: cannot resize file-based bitmap\n");
2149 		return -EINVAL;
2150 	}
2151 
2152 	if (chunksize == 0) {
2153 		/* If there is enough space, leave the chunk size unchanged,
2154 		 * else increase by factor of two until there is enough space.
2155 		 */
2156 		long bytes;
2157 		long space = bitmap->mddev->bitmap_info.space;
2158 
2159 		if (space == 0) {
2160 			/* We don't know how much space there is, so limit
2161 			 * to current size - in sectors.
2162 			 */
2163 			bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
2164 			if (!bitmap->mddev->bitmap_info.external)
2165 				bytes += sizeof(bitmap_super_t);
2166 			space = DIV_ROUND_UP(bytes, 512);
2167 			bitmap->mddev->bitmap_info.space = space;
2168 		}
2169 		chunkshift = bitmap->counts.chunkshift;
2170 		chunkshift--;
2171 		do {
2172 			/* 'chunkshift' is shift from block size to chunk size */
2173 			chunkshift++;
2174 			chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2175 			bytes = DIV_ROUND_UP(chunks, 8);
2176 			if (!bitmap->mddev->bitmap_info.external)
2177 				bytes += sizeof(bitmap_super_t);
2178 		} while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
2179 			(BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
2180 	} else
2181 		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
2182 
2183 	chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2184 	memset(&store, 0, sizeof(store));
2185 	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
2186 		ret = md_bitmap_storage_alloc(&store, chunks,
2187 					      !bitmap->mddev->bitmap_info.external,
2188 					      mddev_is_clustered(bitmap->mddev)
2189 					      ? bitmap->cluster_slot : 0);
2190 	if (ret) {
2191 		md_bitmap_file_unmap(&store);
2192 		goto err;
2193 	}
2194 
2195 	pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
2196 
2197 	new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
2198 	ret = -ENOMEM;
2199 	if (!new_bp) {
2200 		md_bitmap_file_unmap(&store);
2201 		goto err;
2202 	}
2203 
2204 	if (!init)
2205 		bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
2206 
2207 	store.file = bitmap->storage.file;
2208 	bitmap->storage.file = NULL;
2209 
2210 	if (store.sb_page && bitmap->storage.sb_page)
2211 		memcpy(page_address(store.sb_page),
2212 		       page_address(bitmap->storage.sb_page),
2213 		       sizeof(bitmap_super_t));
2214 	spin_lock_irq(&bitmap->counts.lock);
2215 	md_bitmap_file_unmap(&bitmap->storage);
2216 	bitmap->storage = store;
2217 
2218 	old_counts = bitmap->counts;
2219 	bitmap->counts.bp = new_bp;
2220 	bitmap->counts.pages = pages;
2221 	bitmap->counts.missing_pages = pages;
2222 	bitmap->counts.chunkshift = chunkshift;
2223 	bitmap->counts.chunks = chunks;
2224 	bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
2225 						     BITMAP_BLOCK_SHIFT);
2226 
2227 	blocks = min(old_counts.chunks << old_counts.chunkshift,
2228 		     chunks << chunkshift);
2229 
2230 	/* For cluster raid, need to pre-allocate bitmap */
2231 	if (mddev_is_clustered(bitmap->mddev)) {
2232 		unsigned long page;
2233 		for (page = 0; page < pages; page++) {
2234 			ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
2235 			if (ret) {
2236 				unsigned long k;
2237 
2238 				/* deallocate the page memory */
2239 				for (k = 0; k < page; k++) {
2240 					kfree(new_bp[k].map);
2241 				}
2242 				kfree(new_bp);
2243 
2244 				/* restore some fields from old_counts */
2245 				bitmap->counts.bp = old_counts.bp;
2246 				bitmap->counts.pages = old_counts.pages;
2247 				bitmap->counts.missing_pages = old_counts.pages;
2248 				bitmap->counts.chunkshift = old_counts.chunkshift;
2249 				bitmap->counts.chunks = old_counts.chunks;
2250 				bitmap->mddev->bitmap_info.chunksize =
2251 					1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
2252 				blocks = old_counts.chunks << old_counts.chunkshift;
2253 				pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
2254 				break;
2255 			} else
2256 				bitmap->counts.bp[page].count += 1;
2257 		}
2258 	}
2259 
2260 	for (block = 0; block < blocks; ) {
2261 		bitmap_counter_t *bmc_old, *bmc_new;
2262 		int set;
2263 
2264 		bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
2265 		set = bmc_old && NEEDED(*bmc_old);
2266 
2267 		if (set) {
2268 			bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2269 			if (bmc_new) {
2270 				if (*bmc_new == 0) {
2271 					/* need to set on-disk bits too. */
2272 					sector_t end = block + new_blocks;
2273 					sector_t start = block >> chunkshift;
2274 
2275 					start <<= chunkshift;
2276 					while (start < end) {
2277 						md_bitmap_file_set_bit(bitmap, block);
2278 						start += 1 << chunkshift;
2279 					}
2280 					*bmc_new = 2;
2281 					md_bitmap_count_page(&bitmap->counts, block, 1);
2282 					md_bitmap_set_pending(&bitmap->counts, block);
2283 				}
2284 				*bmc_new |= NEEDED_MASK;
2285 			}
2286 			if (new_blocks < old_blocks)
2287 				old_blocks = new_blocks;
2288 		}
2289 		block += old_blocks;
2290 	}
2291 
2292 	if (bitmap->counts.bp != old_counts.bp) {
2293 		unsigned long k;
2294 		for (k = 0; k < old_counts.pages; k++)
2295 			if (!old_counts.bp[k].hijacked)
2296 				kfree(old_counts.bp[k].map);
2297 		kfree(old_counts.bp);
2298 	}
2299 
2300 	if (!init) {
2301 		int i;
2302 		while (block < (chunks << chunkshift)) {
2303 			bitmap_counter_t *bmc;
2304 			bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2305 			if (bmc) {
2306 				/* new space.  It needs to be resynced, so
2307 				 * we set NEEDED_MASK.
2308 				 */
2309 				if (*bmc == 0) {
2310 					*bmc = NEEDED_MASK | 2;
2311 					md_bitmap_count_page(&bitmap->counts, block, 1);
2312 					md_bitmap_set_pending(&bitmap->counts, block);
2313 				}
2314 			}
2315 			block += new_blocks;
2316 		}
2317 		for (i = 0; i < bitmap->storage.file_pages; i++)
2318 			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
2319 	}
2320 	spin_unlock_irq(&bitmap->counts.lock);
2321 
2322 	if (!init) {
2323 		md_bitmap_unplug(bitmap);
2324 		bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2325 	}
2326 	ret = 0;
2327 err:
2328 	return ret;
2329 }
2330 EXPORT_SYMBOL_GPL(md_bitmap_resize);
2331 
2332 static ssize_t
2333 location_show(struct mddev *mddev, char *page)
2334 {
2335 	ssize_t len;
2336 	if (mddev->bitmap_info.file)
2337 		len = sprintf(page, "file");
2338 	else if (mddev->bitmap_info.offset)
2339 		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2340 	else
2341 		len = sprintf(page, "none");
2342 	len += sprintf(page+len, "\n");
2343 	return len;
2344 }
2345 
2346 static ssize_t
2347 location_store(struct mddev *mddev, const char *buf, size_t len)
2348 {
2349 	int rv;
2350 
2351 	rv = mddev_lock(mddev);
2352 	if (rv)
2353 		return rv;
2354 	if (mddev->pers) {
2355 		if (!mddev->pers->quiesce) {
2356 			rv = -EBUSY;
2357 			goto out;
2358 		}
2359 		if (mddev->recovery || mddev->sync_thread) {
2360 			rv = -EBUSY;
2361 			goto out;
2362 		}
2363 	}
2364 
2365 	if (mddev->bitmap || mddev->bitmap_info.file ||
2366 	    mddev->bitmap_info.offset) {
2367 		/* bitmap already configured.  Only option is to clear it */
2368 		if (strncmp(buf, "none", 4) != 0) {
2369 			rv = -EBUSY;
2370 			goto out;
2371 		}
2372 		if (mddev->pers) {
2373 			mddev_suspend(mddev);
2374 			md_bitmap_destroy(mddev);
2375 			mddev_resume(mddev);
2376 		}
2377 		mddev->bitmap_info.offset = 0;
2378 		if (mddev->bitmap_info.file) {
2379 			struct file *f = mddev->bitmap_info.file;
2380 			mddev->bitmap_info.file = NULL;
2381 			fput(f);
2382 		}
2383 	} else {
2384 		/* No bitmap, OK to set a location */
2385 		long long offset;
2386 		if (strncmp(buf, "none", 4) == 0)
2387 			/* nothing to be done */;
2388 		else if (strncmp(buf, "file:", 5) == 0) {
2389 			/* Not supported yet */
2390 			rv = -EINVAL;
2391 			goto out;
2392 		} else {
2393 			if (buf[0] == '+')
2394 				rv = kstrtoll(buf+1, 10, &offset);
2395 			else
2396 				rv = kstrtoll(buf, 10, &offset);
2397 			if (rv)
2398 				goto out;
2399 			if (offset == 0) {
2400 				rv = -EINVAL;
2401 				goto out;
2402 			}
2403 			if (mddev->bitmap_info.external == 0 &&
2404 			    mddev->major_version == 0 &&
2405 			    offset != mddev->bitmap_info.default_offset) {
2406 				rv = -EINVAL;
2407 				goto out;
2408 			}
2409 			mddev->bitmap_info.offset = offset;
2410 			if (mddev->pers) {
2411 				struct bitmap *bitmap;
2412 				bitmap = md_bitmap_create(mddev, -1);
2413 				mddev_suspend(mddev);
2414 				if (IS_ERR(bitmap))
2415 					rv = PTR_ERR(bitmap);
2416 				else {
2417 					mddev->bitmap = bitmap;
2418 					rv = md_bitmap_load(mddev);
2419 					if (rv)
2420 						mddev->bitmap_info.offset = 0;
2421 				}
2422 				if (rv) {
2423 					md_bitmap_destroy(mddev);
2424 					mddev_resume(mddev);
2425 					goto out;
2426 				}
2427 				mddev_resume(mddev);
2428 			}
2429 		}
2430 	}
2431 	if (!mddev->external) {
2432 		/* Ensure new bitmap info is stored in
2433 		 * metadata promptly.
2434 		 */
2435 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2436 		md_wakeup_thread(mddev->thread);
2437 	}
2438 	rv = 0;
2439 out:
2440 	mddev_unlock(mddev);
2441 	if (rv)
2442 		return rv;
2443 	return len;
2444 }
2445 
2446 static struct md_sysfs_entry bitmap_location =
2447 __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
2448 
2449 /* 'bitmap/space' is the space available at 'location' for the
2450  * bitmap.  This allows the kernel to know when it is safe to
2451  * resize the bitmap to match a resized array.
2452  */
2453 static ssize_t
2454 space_show(struct mddev *mddev, char *page)
2455 {
2456 	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
2457 }
2458 
2459 static ssize_t
2460 space_store(struct mddev *mddev, const char *buf, size_t len)
2461 {
2462 	unsigned long sectors;
2463 	int rv;
2464 
2465 	rv = kstrtoul(buf, 10, &sectors);
2466 	if (rv)
2467 		return rv;
2468 
2469 	if (sectors == 0)
2470 		return -EINVAL;
2471 
2472 	if (mddev->bitmap &&
2473 	    sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
2474 		return -EFBIG; /* Bitmap is too big for this small space */
2475 
2476 	/* could make sure it isn't too big, but that isn't really
2477 	 * needed - user-space should be careful.
2478 	 */
2479 	mddev->bitmap_info.space = sectors;
2480 	return len;
2481 }
2482 
2483 static struct md_sysfs_entry bitmap_space =
2484 __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
2485 
2486 static ssize_t
2487 timeout_show(struct mddev *mddev, char *page)
2488 {
2489 	ssize_t len;
2490 	unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
2491 	unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2492 
2493 	len = sprintf(page, "%lu", secs);
2494 	if (jifs)
2495 		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
2496 	len += sprintf(page+len, "\n");
2497 	return len;
2498 }
2499 
2500 static ssize_t
2501 timeout_store(struct mddev *mddev, const char *buf, size_t len)
2502 {
2503 	/* timeout can be set at any time */
2504 	unsigned long timeout;
2505 	int rv = strict_strtoul_scaled(buf, &timeout, 4);
2506 	if (rv)
2507 		return rv;
2508 
2509 	/* just to make sure we don't overflow... */
2510 	if (timeout >= LONG_MAX / HZ)
2511 		return -EINVAL;
2512 
2513 	timeout = timeout * HZ / 10000;
2514 
2515 	if (timeout >= MAX_SCHEDULE_TIMEOUT)
2516 		timeout = MAX_SCHEDULE_TIMEOUT-1;
2517 	if (timeout < 1)
2518 		timeout = 1;
2519 
2520 	mddev->bitmap_info.daemon_sleep = timeout;
2521 	mddev_set_timeout(mddev, timeout, false);
2522 	md_wakeup_thread(mddev->thread);
2523 
2524 	return len;
2525 }
2526 
2527 static struct md_sysfs_entry bitmap_timeout =
2528 __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
2529 
2530 static ssize_t
2531 backlog_show(struct mddev *mddev, char *page)
2532 {
2533 	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
2534 }
2535 
2536 static ssize_t
2537 backlog_store(struct mddev *mddev, const char *buf, size_t len)
2538 {
2539 	unsigned long backlog;
2540 	unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
2541 	struct md_rdev *rdev;
2542 	bool has_write_mostly = false;
2543 	int rv = kstrtoul(buf, 10, &backlog);
2544 	if (rv)
2545 		return rv;
2546 	if (backlog > COUNTER_MAX)
2547 		return -EINVAL;
2548 
2549 	rv = mddev_lock(mddev);
2550 	if (rv)
2551 		return rv;
2552 
2553 	/*
2554 	 * Without write mostly device, it doesn't make sense to set
2555 	 * backlog for max_write_behind.
2556 	 */
2557 	rdev_for_each(rdev, mddev) {
2558 		if (test_bit(WriteMostly, &rdev->flags)) {
2559 			has_write_mostly = true;
2560 			break;
2561 		}
2562 	}
2563 	if (!has_write_mostly) {
2564 		pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n",
2565 				    mdname(mddev));
2566 		mddev_unlock(mddev);
2567 		return -EINVAL;
2568 	}
2569 
2570 	mddev->bitmap_info.max_write_behind = backlog;
2571 	if (!backlog && mddev->serial_info_pool) {
2572 		/* serial_info_pool is not needed if backlog is zero */
2573 		if (!mddev->serialize_policy)
2574 			mddev_destroy_serial_pool(mddev, NULL, false);
2575 	} else if (backlog && !mddev->serial_info_pool) {
2576 		/* serial_info_pool is needed since backlog is not zero */
2577 		rdev_for_each(rdev, mddev)
2578 			mddev_create_serial_pool(mddev, rdev, false);
2579 	}
2580 	if (old_mwb != backlog)
2581 		md_bitmap_update_sb(mddev->bitmap);
2582 
2583 	mddev_unlock(mddev);
2584 	return len;
2585 }
2586 
2587 static struct md_sysfs_entry bitmap_backlog =
2588 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
2589 
2590 static ssize_t
2591 chunksize_show(struct mddev *mddev, char *page)
2592 {
2593 	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2594 }
2595 
2596 static ssize_t
2597 chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2598 {
2599 	/* Can only be changed when no bitmap is active */
2600 	int rv;
2601 	unsigned long csize;
2602 	if (mddev->bitmap)
2603 		return -EBUSY;
2604 	rv = kstrtoul(buf, 10, &csize);
2605 	if (rv)
2606 		return rv;
2607 	if (csize < 512 ||
2608 	    !is_power_of_2(csize))
2609 		return -EINVAL;
2610 	if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
2611 		sizeof(((bitmap_super_t *)0)->chunksize))))
2612 		return -EOVERFLOW;
2613 	mddev->bitmap_info.chunksize = csize;
2614 	return len;
2615 }
2616 
2617 static struct md_sysfs_entry bitmap_chunksize =
2618 __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2619 
2620 static ssize_t metadata_show(struct mddev *mddev, char *page)
2621 {
2622 	if (mddev_is_clustered(mddev))
2623 		return sprintf(page, "clustered\n");
2624 	return sprintf(page, "%s\n", (mddev->bitmap_info.external
2625 				      ? "external" : "internal"));
2626 }
2627 
2628 static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2629 {
2630 	if (mddev->bitmap ||
2631 	    mddev->bitmap_info.file ||
2632 	    mddev->bitmap_info.offset)
2633 		return -EBUSY;
2634 	if (strncmp(buf, "external", 8) == 0)
2635 		mddev->bitmap_info.external = 1;
2636 	else if ((strncmp(buf, "internal", 8) == 0) ||
2637 			(strncmp(buf, "clustered", 9) == 0))
2638 		mddev->bitmap_info.external = 0;
2639 	else
2640 		return -EINVAL;
2641 	return len;
2642 }
2643 
2644 static struct md_sysfs_entry bitmap_metadata =
2645 __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2646 
2647 static ssize_t can_clear_show(struct mddev *mddev, char *page)
2648 {
2649 	int len;
2650 	spin_lock(&mddev->lock);
2651 	if (mddev->bitmap)
2652 		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
2653 					     "false" : "true"));
2654 	else
2655 		len = sprintf(page, "\n");
2656 	spin_unlock(&mddev->lock);
2657 	return len;
2658 }
2659 
2660 static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2661 {
2662 	if (mddev->bitmap == NULL)
2663 		return -ENOENT;
2664 	if (strncmp(buf, "false", 5) == 0)
2665 		mddev->bitmap->need_sync = 1;
2666 	else if (strncmp(buf, "true", 4) == 0) {
2667 		if (mddev->degraded)
2668 			return -EBUSY;
2669 		mddev->bitmap->need_sync = 0;
2670 	} else
2671 		return -EINVAL;
2672 	return len;
2673 }
2674 
2675 static struct md_sysfs_entry bitmap_can_clear =
2676 __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2677 
2678 static ssize_t
2679 behind_writes_used_show(struct mddev *mddev, char *page)
2680 {
2681 	ssize_t ret;
2682 	spin_lock(&mddev->lock);
2683 	if (mddev->bitmap == NULL)
2684 		ret = sprintf(page, "0\n");
2685 	else
2686 		ret = sprintf(page, "%lu\n",
2687 			      mddev->bitmap->behind_writes_used);
2688 	spin_unlock(&mddev->lock);
2689 	return ret;
2690 }
2691 
2692 static ssize_t
2693 behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2694 {
2695 	if (mddev->bitmap)
2696 		mddev->bitmap->behind_writes_used = 0;
2697 	return len;
2698 }
2699 
2700 static struct md_sysfs_entry max_backlog_used =
2701 __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2702        behind_writes_used_show, behind_writes_used_reset);
2703 
2704 static struct attribute *md_bitmap_attrs[] = {
2705 	&bitmap_location.attr,
2706 	&bitmap_space.attr,
2707 	&bitmap_timeout.attr,
2708 	&bitmap_backlog.attr,
2709 	&bitmap_chunksize.attr,
2710 	&bitmap_metadata.attr,
2711 	&bitmap_can_clear.attr,
2712 	&max_backlog_used.attr,
2713 	NULL
2714 };
2715 const struct attribute_group md_bitmap_group = {
2716 	.name = "bitmap",
2717 	.attrs = md_bitmap_attrs,
2718 };
2719