xref: /linux/drivers/md/md-bitmap.c (revision 2f804aca48322f02a8f44cca540663845ee80fb1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
4  *
5  * bitmap_create  - sets up the bitmap structure
6  * bitmap_destroy - destroys the bitmap structure
7  *
8  * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9  * - added disk storage for bitmap
10  * - changes to allow various bitmap chunk sizes
11  */
12 
13 /*
14  * Still to do:
15  *
16  * flush after percent set rather than just time based. (maybe both).
17  */
18 
19 #include <linux/blkdev.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/timer.h>
25 #include <linux/sched.h>
26 #include <linux/list.h>
27 #include <linux/file.h>
28 #include <linux/mount.h>
29 #include <linux/buffer_head.h>
30 #include <linux/seq_file.h>
31 #include <trace/events/block.h>
32 #include "md.h"
33 #include "md-bitmap.h"
34 
35 static inline char *bmname(struct bitmap *bitmap)
36 {
37 	return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
38 }
39 
40 /*
41  * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
42  *
43  * 1) check to see if this page is allocated, if it's not then try to alloc
44  * 2) if the alloc fails, set the page's hijacked flag so we'll use the
45  *    page pointer directly as a counter
46  *
47  * if we find our page, we increment the page's refcount so that it stays
48  * allocated while we're using it
49  */
50 static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
51 			       unsigned long page, int create, int no_hijack)
52 __releases(bitmap->lock)
53 __acquires(bitmap->lock)
54 {
55 	unsigned char *mappage;
56 
57 	if (page >= bitmap->pages) {
58 		/* This can happen if bitmap_start_sync goes beyond
59 		 * End-of-device while looking for a whole page.
60 		 * It is harmless.
61 		 */
62 		return -EINVAL;
63 	}
64 
65 	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
66 		return 0;
67 
68 	if (bitmap->bp[page].map) /* page is already allocated, just return */
69 		return 0;
70 
71 	if (!create)
72 		return -ENOENT;
73 
74 	/* this page has not been allocated yet */
75 
76 	spin_unlock_irq(&bitmap->lock);
77 	/* It is possible that this is being called inside a
78 	 * prepare_to_wait/finish_wait loop from raid5c:make_request().
79 	 * In general it is not permitted to sleep in that context as it
80 	 * can cause the loop to spin freely.
81 	 * That doesn't apply here as we can only reach this point
82 	 * once with any loop.
83 	 * When this function completes, either bp[page].map or
84 	 * bp[page].hijacked.  In either case, this function will
85 	 * abort before getting to this point again.  So there is
86 	 * no risk of a free-spin, and so it is safe to assert
87 	 * that sleeping here is allowed.
88 	 */
89 	sched_annotate_sleep();
90 	mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
91 	spin_lock_irq(&bitmap->lock);
92 
93 	if (mappage == NULL) {
94 		pr_debug("md/bitmap: map page allocation failed, hijacking\n");
95 		/* We don't support hijack for cluster raid */
96 		if (no_hijack)
97 			return -ENOMEM;
98 		/* failed - set the hijacked flag so that we can use the
99 		 * pointer as a counter */
100 		if (!bitmap->bp[page].map)
101 			bitmap->bp[page].hijacked = 1;
102 	} else if (bitmap->bp[page].map ||
103 		   bitmap->bp[page].hijacked) {
104 		/* somebody beat us to getting the page */
105 		kfree(mappage);
106 	} else {
107 
108 		/* no page was in place and we have one, so install it */
109 
110 		bitmap->bp[page].map = mappage;
111 		bitmap->missing_pages--;
112 	}
113 	return 0;
114 }
115 
116 /* if page is completely empty, put it back on the free list, or dealloc it */
117 /* if page was hijacked, unmark the flag so it might get alloced next time */
118 /* Note: lock should be held when calling this */
119 static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
120 {
121 	char *ptr;
122 
123 	if (bitmap->bp[page].count) /* page is still busy */
124 		return;
125 
126 	/* page is no longer in use, it can be released */
127 
128 	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
129 		bitmap->bp[page].hijacked = 0;
130 		bitmap->bp[page].map = NULL;
131 	} else {
132 		/* normal case, free the page */
133 		ptr = bitmap->bp[page].map;
134 		bitmap->bp[page].map = NULL;
135 		bitmap->missing_pages++;
136 		kfree(ptr);
137 	}
138 }
139 
140 /*
141  * bitmap file handling - read and write the bitmap file and its superblock
142  */
143 
144 /*
145  * basic page I/O operations
146  */
147 
148 /* IO operations when bitmap is stored near all superblocks */
149 static int read_sb_page(struct mddev *mddev, loff_t offset,
150 			struct page *page,
151 			unsigned long index, int size)
152 {
153 	/* choose a good rdev and read the page from there */
154 
155 	struct md_rdev *rdev;
156 	sector_t target;
157 
158 	rdev_for_each(rdev, mddev) {
159 		if (! test_bit(In_sync, &rdev->flags)
160 		    || test_bit(Faulty, &rdev->flags)
161 		    || test_bit(Bitmap_sync, &rdev->flags))
162 			continue;
163 
164 		target = offset + index * (PAGE_SIZE/512);
165 
166 		if (sync_page_io(rdev, target,
167 				 roundup(size, bdev_logical_block_size(rdev->bdev)),
168 				 page, REQ_OP_READ, true)) {
169 			page->index = index;
170 			return 0;
171 		}
172 	}
173 	return -EIO;
174 }
175 
176 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
177 {
178 	/* Iterate the disks of an mddev, using rcu to protect access to the
179 	 * linked list, and raising the refcount of devices we return to ensure
180 	 * they don't disappear while in use.
181 	 * As devices are only added or removed when raid_disk is < 0 and
182 	 * nr_pending is 0 and In_sync is clear, the entries we return will
183 	 * still be in the same position on the list when we re-enter
184 	 * list_for_each_entry_continue_rcu.
185 	 *
186 	 * Note that if entered with 'rdev == NULL' to start at the
187 	 * beginning, we temporarily assign 'rdev' to an address which
188 	 * isn't really an rdev, but which can be used by
189 	 * list_for_each_entry_continue_rcu() to find the first entry.
190 	 */
191 	rcu_read_lock();
192 	if (rdev == NULL)
193 		/* start at the beginning */
194 		rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
195 	else {
196 		/* release the previous rdev and start from there. */
197 		rdev_dec_pending(rdev, mddev);
198 	}
199 	list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
200 		if (rdev->raid_disk >= 0 &&
201 		    !test_bit(Faulty, &rdev->flags)) {
202 			/* this is a usable devices */
203 			atomic_inc(&rdev->nr_pending);
204 			rcu_read_unlock();
205 			return rdev;
206 		}
207 	}
208 	rcu_read_unlock();
209 	return NULL;
210 }
211 
212 static unsigned int optimal_io_size(struct block_device *bdev,
213 				    unsigned int last_page_size,
214 				    unsigned int io_size)
215 {
216 	if (bdev_io_opt(bdev) > bdev_logical_block_size(bdev))
217 		return roundup(last_page_size, bdev_io_opt(bdev));
218 	return io_size;
219 }
220 
221 static unsigned int bitmap_io_size(unsigned int io_size, unsigned int opt_size,
222 				   loff_t start, loff_t boundary)
223 {
224 	if (io_size != opt_size &&
225 	    start + opt_size / SECTOR_SIZE <= boundary)
226 		return opt_size;
227 	if (start + io_size / SECTOR_SIZE <= boundary)
228 		return io_size;
229 
230 	/* Overflows boundary */
231 	return 0;
232 }
233 
234 static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
235 			   struct page *page)
236 {
237 	struct block_device *bdev;
238 	struct mddev *mddev = bitmap->mddev;
239 	struct bitmap_storage *store = &bitmap->storage;
240 	loff_t sboff, offset = mddev->bitmap_info.offset;
241 	sector_t ps, doff;
242 	unsigned int size = PAGE_SIZE;
243 	unsigned int opt_size = PAGE_SIZE;
244 
245 	bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
246 	if (page->index == store->file_pages - 1) {
247 		unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
248 
249 		if (last_page_size == 0)
250 			last_page_size = PAGE_SIZE;
251 		size = roundup(last_page_size, bdev_logical_block_size(bdev));
252 		opt_size = optimal_io_size(bdev, last_page_size, size);
253 	}
254 
255 	ps = page->index * PAGE_SIZE / SECTOR_SIZE;
256 	sboff = rdev->sb_start + offset;
257 	doff = rdev->data_offset;
258 
259 	/* Just make sure we aren't corrupting data or metadata */
260 	if (mddev->external) {
261 		/* Bitmap could be anywhere. */
262 		if (sboff + ps > doff &&
263 		    sboff < (doff + mddev->dev_sectors + PAGE_SIZE / SECTOR_SIZE))
264 			return -EINVAL;
265 	} else if (offset < 0) {
266 		/* DATA  BITMAP METADATA  */
267 		size = bitmap_io_size(size, opt_size, offset + ps, 0);
268 		if (size == 0)
269 			/* bitmap runs in to metadata */
270 			return -EINVAL;
271 
272 		if (doff + mddev->dev_sectors > sboff)
273 			/* data runs in to bitmap */
274 			return -EINVAL;
275 	} else if (rdev->sb_start < rdev->data_offset) {
276 		/* METADATA BITMAP DATA */
277 		size = bitmap_io_size(size, opt_size, sboff + ps, doff);
278 		if (size == 0)
279 			/* bitmap runs in to data */
280 			return -EINVAL;
281 	} else {
282 		/* DATA METADATA BITMAP - no problems */
283 	}
284 
285 	md_super_write(mddev, rdev, sboff + ps, (int) size, page);
286 	return 0;
287 }
288 
289 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
290 {
291 	struct md_rdev *rdev;
292 	struct mddev *mddev = bitmap->mddev;
293 	int ret;
294 
295 	do {
296 		rdev = NULL;
297 		while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
298 			ret = __write_sb_page(rdev, bitmap, page);
299 			if (ret)
300 				return ret;
301 		}
302 	} while (wait && md_super_wait(mddev) < 0);
303 
304 	return 0;
305 }
306 
307 static void md_bitmap_file_kick(struct bitmap *bitmap);
308 /*
309  * write out a page to a file
310  */
311 static void write_page(struct bitmap *bitmap, struct page *page, int wait)
312 {
313 	struct buffer_head *bh;
314 
315 	if (bitmap->storage.file == NULL) {
316 		switch (write_sb_page(bitmap, page, wait)) {
317 		case -EINVAL:
318 			set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
319 		}
320 	} else {
321 
322 		bh = page_buffers(page);
323 
324 		while (bh && bh->b_blocknr) {
325 			atomic_inc(&bitmap->pending_writes);
326 			set_buffer_locked(bh);
327 			set_buffer_mapped(bh);
328 			submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
329 			bh = bh->b_this_page;
330 		}
331 
332 		if (wait)
333 			wait_event(bitmap->write_wait,
334 				   atomic_read(&bitmap->pending_writes)==0);
335 	}
336 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
337 		md_bitmap_file_kick(bitmap);
338 }
339 
340 static void end_bitmap_write(struct buffer_head *bh, int uptodate)
341 {
342 	struct bitmap *bitmap = bh->b_private;
343 
344 	if (!uptodate)
345 		set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
346 	if (atomic_dec_and_test(&bitmap->pending_writes))
347 		wake_up(&bitmap->write_wait);
348 }
349 
350 static void free_buffers(struct page *page)
351 {
352 	struct buffer_head *bh;
353 
354 	if (!PagePrivate(page))
355 		return;
356 
357 	bh = page_buffers(page);
358 	while (bh) {
359 		struct buffer_head *next = bh->b_this_page;
360 		free_buffer_head(bh);
361 		bh = next;
362 	}
363 	detach_page_private(page);
364 	put_page(page);
365 }
366 
367 /* read a page from a file.
368  * We both read the page, and attach buffers to the page to record the
369  * address of each block (using bmap).  These addresses will be used
370  * to write the block later, completely bypassing the filesystem.
371  * This usage is similar to how swap files are handled, and allows us
372  * to write to a file with no concerns of memory allocation failing.
373  */
374 static int read_page(struct file *file, unsigned long index,
375 		     struct bitmap *bitmap,
376 		     unsigned long count,
377 		     struct page *page)
378 {
379 	int ret = 0;
380 	struct inode *inode = file_inode(file);
381 	struct buffer_head *bh;
382 	sector_t block, blk_cur;
383 	unsigned long blocksize = i_blocksize(inode);
384 
385 	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
386 		 (unsigned long long)index << PAGE_SHIFT);
387 
388 	bh = alloc_page_buffers(page, blocksize, false);
389 	if (!bh) {
390 		ret = -ENOMEM;
391 		goto out;
392 	}
393 	attach_page_private(page, bh);
394 	blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
395 	while (bh) {
396 		block = blk_cur;
397 
398 		if (count == 0)
399 			bh->b_blocknr = 0;
400 		else {
401 			ret = bmap(inode, &block);
402 			if (ret || !block) {
403 				ret = -EINVAL;
404 				bh->b_blocknr = 0;
405 				goto out;
406 			}
407 
408 			bh->b_blocknr = block;
409 			bh->b_bdev = inode->i_sb->s_bdev;
410 			if (count < blocksize)
411 				count = 0;
412 			else
413 				count -= blocksize;
414 
415 			bh->b_end_io = end_bitmap_write;
416 			bh->b_private = bitmap;
417 			atomic_inc(&bitmap->pending_writes);
418 			set_buffer_locked(bh);
419 			set_buffer_mapped(bh);
420 			submit_bh(REQ_OP_READ, bh);
421 		}
422 		blk_cur++;
423 		bh = bh->b_this_page;
424 	}
425 	page->index = index;
426 
427 	wait_event(bitmap->write_wait,
428 		   atomic_read(&bitmap->pending_writes)==0);
429 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
430 		ret = -EIO;
431 out:
432 	if (ret)
433 		pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
434 		       (int)PAGE_SIZE,
435 		       (unsigned long long)index << PAGE_SHIFT,
436 		       ret);
437 	return ret;
438 }
439 
440 /*
441  * bitmap file superblock operations
442  */
443 
444 /*
445  * md_bitmap_wait_writes() should be called before writing any bitmap
446  * blocks, to ensure previous writes, particularly from
447  * md_bitmap_daemon_work(), have completed.
448  */
449 static void md_bitmap_wait_writes(struct bitmap *bitmap)
450 {
451 	if (bitmap->storage.file)
452 		wait_event(bitmap->write_wait,
453 			   atomic_read(&bitmap->pending_writes)==0);
454 	else
455 		/* Note that we ignore the return value.  The writes
456 		 * might have failed, but that would just mean that
457 		 * some bits which should be cleared haven't been,
458 		 * which is safe.  The relevant bitmap blocks will
459 		 * probably get written again, but there is no great
460 		 * loss if they aren't.
461 		 */
462 		md_super_wait(bitmap->mddev);
463 }
464 
465 
466 /* update the event counter and sync the superblock to disk */
467 void md_bitmap_update_sb(struct bitmap *bitmap)
468 {
469 	bitmap_super_t *sb;
470 
471 	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
472 		return;
473 	if (bitmap->mddev->bitmap_info.external)
474 		return;
475 	if (!bitmap->storage.sb_page) /* no superblock */
476 		return;
477 	sb = kmap_atomic(bitmap->storage.sb_page);
478 	sb->events = cpu_to_le64(bitmap->mddev->events);
479 	if (bitmap->mddev->events < bitmap->events_cleared)
480 		/* rocking back to read-only */
481 		bitmap->events_cleared = bitmap->mddev->events;
482 	sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
483 	/*
484 	 * clear BITMAP_WRITE_ERROR bit to protect against the case that
485 	 * a bitmap write error occurred but the later writes succeeded.
486 	 */
487 	sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
488 	/* Just in case these have been changed via sysfs: */
489 	sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
490 	sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
491 	/* This might have been changed by a reshape */
492 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
493 	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
494 	sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
495 	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
496 					   bitmap_info.space);
497 	kunmap_atomic(sb);
498 	write_page(bitmap, bitmap->storage.sb_page, 1);
499 }
500 EXPORT_SYMBOL(md_bitmap_update_sb);
501 
502 /* print out the bitmap file superblock */
503 void md_bitmap_print_sb(struct bitmap *bitmap)
504 {
505 	bitmap_super_t *sb;
506 
507 	if (!bitmap || !bitmap->storage.sb_page)
508 		return;
509 	sb = kmap_atomic(bitmap->storage.sb_page);
510 	pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
511 	pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
512 	pr_debug("       version: %u\n", le32_to_cpu(sb->version));
513 	pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
514 		 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
515 		 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
516 		 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
517 		 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
518 	pr_debug("        events: %llu\n",
519 		 (unsigned long long) le64_to_cpu(sb->events));
520 	pr_debug("events cleared: %llu\n",
521 		 (unsigned long long) le64_to_cpu(sb->events_cleared));
522 	pr_debug("         state: %08x\n", le32_to_cpu(sb->state));
523 	pr_debug("     chunksize: %u B\n", le32_to_cpu(sb->chunksize));
524 	pr_debug("  daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
525 	pr_debug("     sync size: %llu KB\n",
526 		 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
527 	pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
528 	kunmap_atomic(sb);
529 }
530 
531 /*
532  * bitmap_new_disk_sb
533  * @bitmap
534  *
535  * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
536  * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
537  * This function verifies 'bitmap_info' and populates the on-disk bitmap
538  * structure, which is to be written to disk.
539  *
540  * Returns: 0 on success, -Exxx on error
541  */
542 static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
543 {
544 	bitmap_super_t *sb;
545 	unsigned long chunksize, daemon_sleep, write_behind;
546 
547 	bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
548 	if (bitmap->storage.sb_page == NULL)
549 		return -ENOMEM;
550 	bitmap->storage.sb_page->index = 0;
551 
552 	sb = kmap_atomic(bitmap->storage.sb_page);
553 
554 	sb->magic = cpu_to_le32(BITMAP_MAGIC);
555 	sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
556 
557 	chunksize = bitmap->mddev->bitmap_info.chunksize;
558 	BUG_ON(!chunksize);
559 	if (!is_power_of_2(chunksize)) {
560 		kunmap_atomic(sb);
561 		pr_warn("bitmap chunksize not a power of 2\n");
562 		return -EINVAL;
563 	}
564 	sb->chunksize = cpu_to_le32(chunksize);
565 
566 	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
567 	if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
568 		pr_debug("Choosing daemon_sleep default (5 sec)\n");
569 		daemon_sleep = 5 * HZ;
570 	}
571 	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
572 	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
573 
574 	/*
575 	 * FIXME: write_behind for RAID1.  If not specified, what
576 	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
577 	 */
578 	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
579 	if (write_behind > COUNTER_MAX)
580 		write_behind = COUNTER_MAX / 2;
581 	sb->write_behind = cpu_to_le32(write_behind);
582 	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
583 
584 	/* keep the array size field of the bitmap superblock up to date */
585 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
586 
587 	memcpy(sb->uuid, bitmap->mddev->uuid, 16);
588 
589 	set_bit(BITMAP_STALE, &bitmap->flags);
590 	sb->state = cpu_to_le32(bitmap->flags);
591 	bitmap->events_cleared = bitmap->mddev->events;
592 	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
593 	bitmap->mddev->bitmap_info.nodes = 0;
594 
595 	kunmap_atomic(sb);
596 
597 	return 0;
598 }
599 
600 /* read the superblock from the bitmap file and initialize some bitmap fields */
601 static int md_bitmap_read_sb(struct bitmap *bitmap)
602 {
603 	char *reason = NULL;
604 	bitmap_super_t *sb;
605 	unsigned long chunksize, daemon_sleep, write_behind;
606 	unsigned long long events;
607 	int nodes = 0;
608 	unsigned long sectors_reserved = 0;
609 	int err = -EINVAL;
610 	struct page *sb_page;
611 	loff_t offset = bitmap->mddev->bitmap_info.offset;
612 
613 	if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
614 		chunksize = 128 * 1024 * 1024;
615 		daemon_sleep = 5 * HZ;
616 		write_behind = 0;
617 		set_bit(BITMAP_STALE, &bitmap->flags);
618 		err = 0;
619 		goto out_no_sb;
620 	}
621 	/* page 0 is the superblock, read it... */
622 	sb_page = alloc_page(GFP_KERNEL);
623 	if (!sb_page)
624 		return -ENOMEM;
625 	bitmap->storage.sb_page = sb_page;
626 
627 re_read:
628 	/* If cluster_slot is set, the cluster is setup */
629 	if (bitmap->cluster_slot >= 0) {
630 		sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
631 
632 		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks,
633 			   (bitmap->mddev->bitmap_info.chunksize >> 9));
634 		/* bits to bytes */
635 		bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
636 		/* to 4k blocks */
637 		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
638 		offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
639 		pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
640 			bitmap->cluster_slot, offset);
641 	}
642 
643 	if (bitmap->storage.file) {
644 		loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
645 		int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
646 
647 		err = read_page(bitmap->storage.file, 0,
648 				bitmap, bytes, sb_page);
649 	} else {
650 		err = read_sb_page(bitmap->mddev,
651 				   offset,
652 				   sb_page,
653 				   0, sizeof(bitmap_super_t));
654 	}
655 	if (err)
656 		return err;
657 
658 	err = -EINVAL;
659 	sb = kmap_atomic(sb_page);
660 
661 	chunksize = le32_to_cpu(sb->chunksize);
662 	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
663 	write_behind = le32_to_cpu(sb->write_behind);
664 	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
665 
666 	/* verify that the bitmap-specific fields are valid */
667 	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
668 		reason = "bad magic";
669 	else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
670 		 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
671 		reason = "unrecognized superblock version";
672 	else if (chunksize < 512)
673 		reason = "bitmap chunksize too small";
674 	else if (!is_power_of_2(chunksize))
675 		reason = "bitmap chunksize not a power of 2";
676 	else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
677 		reason = "daemon sleep period out of range";
678 	else if (write_behind > COUNTER_MAX)
679 		reason = "write-behind limit out of range (0 - 16383)";
680 	if (reason) {
681 		pr_warn("%s: invalid bitmap file superblock: %s\n",
682 			bmname(bitmap), reason);
683 		goto out;
684 	}
685 
686 	/*
687 	 * Setup nodes/clustername only if bitmap version is
688 	 * cluster-compatible
689 	 */
690 	if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
691 		nodes = le32_to_cpu(sb->nodes);
692 		strscpy(bitmap->mddev->bitmap_info.cluster_name,
693 				sb->cluster_name, 64);
694 	}
695 
696 	/* keep the array size field of the bitmap superblock up to date */
697 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
698 
699 	if (bitmap->mddev->persistent) {
700 		/*
701 		 * We have a persistent array superblock, so compare the
702 		 * bitmap's UUID and event counter to the mddev's
703 		 */
704 		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
705 			pr_warn("%s: bitmap superblock UUID mismatch\n",
706 				bmname(bitmap));
707 			goto out;
708 		}
709 		events = le64_to_cpu(sb->events);
710 		if (!nodes && (events < bitmap->mddev->events)) {
711 			pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
712 				bmname(bitmap), events,
713 				(unsigned long long) bitmap->mddev->events);
714 			set_bit(BITMAP_STALE, &bitmap->flags);
715 		}
716 	}
717 
718 	/* assign fields using values from superblock */
719 	bitmap->flags |= le32_to_cpu(sb->state);
720 	if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
721 		set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
722 	bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
723 	err = 0;
724 
725 out:
726 	kunmap_atomic(sb);
727 	if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
728 		/* Assigning chunksize is required for "re_read" */
729 		bitmap->mddev->bitmap_info.chunksize = chunksize;
730 		err = md_setup_cluster(bitmap->mddev, nodes);
731 		if (err) {
732 			pr_warn("%s: Could not setup cluster service (%d)\n",
733 				bmname(bitmap), err);
734 			goto out_no_sb;
735 		}
736 		bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
737 		goto re_read;
738 	}
739 
740 out_no_sb:
741 	if (err == 0) {
742 		if (test_bit(BITMAP_STALE, &bitmap->flags))
743 			bitmap->events_cleared = bitmap->mddev->events;
744 		bitmap->mddev->bitmap_info.chunksize = chunksize;
745 		bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
746 		bitmap->mddev->bitmap_info.max_write_behind = write_behind;
747 		bitmap->mddev->bitmap_info.nodes = nodes;
748 		if (bitmap->mddev->bitmap_info.space == 0 ||
749 			bitmap->mddev->bitmap_info.space > sectors_reserved)
750 			bitmap->mddev->bitmap_info.space = sectors_reserved;
751 	} else {
752 		md_bitmap_print_sb(bitmap);
753 		if (bitmap->cluster_slot < 0)
754 			md_cluster_stop(bitmap->mddev);
755 	}
756 	return err;
757 }
758 
759 /*
760  * general bitmap file operations
761  */
762 
763 /*
764  * on-disk bitmap:
765  *
766  * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
767  * file a page at a time. There's a superblock at the start of the file.
768  */
769 /* calculate the index of the page that contains this bit */
770 static inline unsigned long file_page_index(struct bitmap_storage *store,
771 					    unsigned long chunk)
772 {
773 	if (store->sb_page)
774 		chunk += sizeof(bitmap_super_t) << 3;
775 	return chunk >> PAGE_BIT_SHIFT;
776 }
777 
778 /* calculate the (bit) offset of this bit within a page */
779 static inline unsigned long file_page_offset(struct bitmap_storage *store,
780 					     unsigned long chunk)
781 {
782 	if (store->sb_page)
783 		chunk += sizeof(bitmap_super_t) << 3;
784 	return chunk & (PAGE_BITS - 1);
785 }
786 
787 /*
788  * return a pointer to the page in the filemap that contains the given bit
789  *
790  */
791 static inline struct page *filemap_get_page(struct bitmap_storage *store,
792 					    unsigned long chunk)
793 {
794 	if (file_page_index(store, chunk) >= store->file_pages)
795 		return NULL;
796 	return store->filemap[file_page_index(store, chunk)];
797 }
798 
799 static int md_bitmap_storage_alloc(struct bitmap_storage *store,
800 				   unsigned long chunks, int with_super,
801 				   int slot_number)
802 {
803 	int pnum, offset = 0;
804 	unsigned long num_pages;
805 	unsigned long bytes;
806 
807 	bytes = DIV_ROUND_UP(chunks, 8);
808 	if (with_super)
809 		bytes += sizeof(bitmap_super_t);
810 
811 	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
812 	offset = slot_number * num_pages;
813 
814 	store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
815 				       GFP_KERNEL);
816 	if (!store->filemap)
817 		return -ENOMEM;
818 
819 	if (with_super && !store->sb_page) {
820 		store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
821 		if (store->sb_page == NULL)
822 			return -ENOMEM;
823 	}
824 
825 	pnum = 0;
826 	if (store->sb_page) {
827 		store->filemap[0] = store->sb_page;
828 		pnum = 1;
829 		store->sb_page->index = offset;
830 	}
831 
832 	for ( ; pnum < num_pages; pnum++) {
833 		store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
834 		if (!store->filemap[pnum]) {
835 			store->file_pages = pnum;
836 			return -ENOMEM;
837 		}
838 		store->filemap[pnum]->index = pnum + offset;
839 	}
840 	store->file_pages = pnum;
841 
842 	/* We need 4 bits per page, rounded up to a multiple
843 	 * of sizeof(unsigned long) */
844 	store->filemap_attr = kzalloc(
845 		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
846 		GFP_KERNEL);
847 	if (!store->filemap_attr)
848 		return -ENOMEM;
849 
850 	store->bytes = bytes;
851 
852 	return 0;
853 }
854 
855 static void md_bitmap_file_unmap(struct bitmap_storage *store)
856 {
857 	struct page **map, *sb_page;
858 	int pages;
859 	struct file *file;
860 
861 	file = store->file;
862 	map = store->filemap;
863 	pages = store->file_pages;
864 	sb_page = store->sb_page;
865 
866 	while (pages--)
867 		if (map[pages] != sb_page) /* 0 is sb_page, release it below */
868 			free_buffers(map[pages]);
869 	kfree(map);
870 	kfree(store->filemap_attr);
871 
872 	if (sb_page)
873 		free_buffers(sb_page);
874 
875 	if (file) {
876 		struct inode *inode = file_inode(file);
877 		invalidate_mapping_pages(inode->i_mapping, 0, -1);
878 		fput(file);
879 	}
880 }
881 
882 /*
883  * bitmap_file_kick - if an error occurs while manipulating the bitmap file
884  * then it is no longer reliable, so we stop using it and we mark the file
885  * as failed in the superblock
886  */
887 static void md_bitmap_file_kick(struct bitmap *bitmap)
888 {
889 	char *path, *ptr = NULL;
890 
891 	if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
892 		md_bitmap_update_sb(bitmap);
893 
894 		if (bitmap->storage.file) {
895 			path = kmalloc(PAGE_SIZE, GFP_KERNEL);
896 			if (path)
897 				ptr = file_path(bitmap->storage.file,
898 					     path, PAGE_SIZE);
899 
900 			pr_warn("%s: kicking failed bitmap file %s from array!\n",
901 				bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
902 
903 			kfree(path);
904 		} else
905 			pr_warn("%s: disabling internal bitmap due to errors\n",
906 				bmname(bitmap));
907 	}
908 }
909 
910 enum bitmap_page_attr {
911 	BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
912 	BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
913 				    * i.e. counter is 1 or 2. */
914 	BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
915 };
916 
917 static inline void set_page_attr(struct bitmap *bitmap, int pnum,
918 				 enum bitmap_page_attr attr)
919 {
920 	set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
921 }
922 
923 static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
924 				   enum bitmap_page_attr attr)
925 {
926 	clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
927 }
928 
929 static inline int test_page_attr(struct bitmap *bitmap, int pnum,
930 				 enum bitmap_page_attr attr)
931 {
932 	return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
933 }
934 
935 static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
936 					   enum bitmap_page_attr attr)
937 {
938 	return test_and_clear_bit((pnum<<2) + attr,
939 				  bitmap->storage.filemap_attr);
940 }
941 /*
942  * bitmap_file_set_bit -- called before performing a write to the md device
943  * to set (and eventually sync) a particular bit in the bitmap file
944  *
945  * we set the bit immediately, then we record the page number so that
946  * when an unplug occurs, we can flush the dirty pages out to disk
947  */
948 static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
949 {
950 	unsigned long bit;
951 	struct page *page;
952 	void *kaddr;
953 	unsigned long chunk = block >> bitmap->counts.chunkshift;
954 	struct bitmap_storage *store = &bitmap->storage;
955 	unsigned long node_offset = 0;
956 
957 	if (mddev_is_clustered(bitmap->mddev))
958 		node_offset = bitmap->cluster_slot * store->file_pages;
959 
960 	page = filemap_get_page(&bitmap->storage, chunk);
961 	if (!page)
962 		return;
963 	bit = file_page_offset(&bitmap->storage, chunk);
964 
965 	/* set the bit */
966 	kaddr = kmap_atomic(page);
967 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
968 		set_bit(bit, kaddr);
969 	else
970 		set_bit_le(bit, kaddr);
971 	kunmap_atomic(kaddr);
972 	pr_debug("set file bit %lu page %lu\n", bit, page->index);
973 	/* record page number so it gets flushed to disk when unplug occurs */
974 	set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
975 }
976 
977 static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
978 {
979 	unsigned long bit;
980 	struct page *page;
981 	void *paddr;
982 	unsigned long chunk = block >> bitmap->counts.chunkshift;
983 	struct bitmap_storage *store = &bitmap->storage;
984 	unsigned long node_offset = 0;
985 
986 	if (mddev_is_clustered(bitmap->mddev))
987 		node_offset = bitmap->cluster_slot * store->file_pages;
988 
989 	page = filemap_get_page(&bitmap->storage, chunk);
990 	if (!page)
991 		return;
992 	bit = file_page_offset(&bitmap->storage, chunk);
993 	paddr = kmap_atomic(page);
994 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
995 		clear_bit(bit, paddr);
996 	else
997 		clear_bit_le(bit, paddr);
998 	kunmap_atomic(paddr);
999 	if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
1000 		set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
1001 		bitmap->allclean = 0;
1002 	}
1003 }
1004 
1005 static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
1006 {
1007 	unsigned long bit;
1008 	struct page *page;
1009 	void *paddr;
1010 	unsigned long chunk = block >> bitmap->counts.chunkshift;
1011 	int set = 0;
1012 
1013 	page = filemap_get_page(&bitmap->storage, chunk);
1014 	if (!page)
1015 		return -EINVAL;
1016 	bit = file_page_offset(&bitmap->storage, chunk);
1017 	paddr = kmap_atomic(page);
1018 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1019 		set = test_bit(bit, paddr);
1020 	else
1021 		set = test_bit_le(bit, paddr);
1022 	kunmap_atomic(paddr);
1023 	return set;
1024 }
1025 
1026 
1027 /* this gets called when the md device is ready to unplug its underlying
1028  * (slave) device queues -- before we let any writes go down, we need to
1029  * sync the dirty pages of the bitmap file to disk */
1030 void md_bitmap_unplug(struct bitmap *bitmap)
1031 {
1032 	unsigned long i;
1033 	int dirty, need_write;
1034 	int writing = 0;
1035 
1036 	if (!bitmap || !bitmap->storage.filemap ||
1037 	    test_bit(BITMAP_STALE, &bitmap->flags))
1038 		return;
1039 
1040 	/* look at each page to see if there are any set bits that need to be
1041 	 * flushed out to disk */
1042 	for (i = 0; i < bitmap->storage.file_pages; i++) {
1043 		dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
1044 		need_write = test_and_clear_page_attr(bitmap, i,
1045 						      BITMAP_PAGE_NEEDWRITE);
1046 		if (dirty || need_write) {
1047 			if (!writing) {
1048 				md_bitmap_wait_writes(bitmap);
1049 				if (bitmap->mddev->queue)
1050 					blk_add_trace_msg(bitmap->mddev->queue,
1051 							  "md bitmap_unplug");
1052 			}
1053 			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
1054 			write_page(bitmap, bitmap->storage.filemap[i], 0);
1055 			writing = 1;
1056 		}
1057 	}
1058 	if (writing)
1059 		md_bitmap_wait_writes(bitmap);
1060 
1061 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1062 		md_bitmap_file_kick(bitmap);
1063 }
1064 EXPORT_SYMBOL(md_bitmap_unplug);
1065 
1066 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1067 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
1068  * the in-memory bitmap from the on-disk bitmap -- also, sets up the
1069  * memory mapping of the bitmap file
1070  * Special cases:
1071  *   if there's no bitmap file, or if the bitmap file had been
1072  *   previously kicked from the array, we mark all the bits as
1073  *   1's in order to cause a full resync.
1074  *
1075  * We ignore all bits for sectors that end earlier than 'start'.
1076  * This is used when reading an out-of-date bitmap...
1077  */
1078 static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1079 {
1080 	unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
1081 	struct page *page = NULL;
1082 	unsigned long bit_cnt = 0;
1083 	struct file *file;
1084 	unsigned long offset;
1085 	int outofdate;
1086 	int ret = -ENOSPC;
1087 	void *paddr;
1088 	struct bitmap_storage *store = &bitmap->storage;
1089 
1090 	chunks = bitmap->counts.chunks;
1091 	file = store->file;
1092 
1093 	if (!file && !bitmap->mddev->bitmap_info.offset) {
1094 		/* No permanent bitmap - fill with '1s'. */
1095 		store->filemap = NULL;
1096 		store->file_pages = 0;
1097 		for (i = 0; i < chunks ; i++) {
1098 			/* if the disk bit is set, set the memory bit */
1099 			int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1100 				      >= start);
1101 			md_bitmap_set_memory_bits(bitmap,
1102 						  (sector_t)i << bitmap->counts.chunkshift,
1103 						  needed);
1104 		}
1105 		return 0;
1106 	}
1107 
1108 	outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1109 	if (outofdate)
1110 		pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
1111 
1112 	if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1113 		pr_warn("%s: bitmap file too short %lu < %lu\n",
1114 			bmname(bitmap),
1115 			(unsigned long) i_size_read(file->f_mapping->host),
1116 			store->bytes);
1117 		goto err;
1118 	}
1119 
1120 	oldindex = ~0L;
1121 	offset = 0;
1122 	if (!bitmap->mddev->bitmap_info.external)
1123 		offset = sizeof(bitmap_super_t);
1124 
1125 	if (mddev_is_clustered(bitmap->mddev))
1126 		node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
1127 
1128 	for (i = 0; i < chunks; i++) {
1129 		int b;
1130 		index = file_page_index(&bitmap->storage, i);
1131 		bit = file_page_offset(&bitmap->storage, i);
1132 		if (index != oldindex) { /* this is a new page, read it in */
1133 			int count;
1134 			/* unmap the old page, we're done with it */
1135 			if (index == store->file_pages-1)
1136 				count = store->bytes - index * PAGE_SIZE;
1137 			else
1138 				count = PAGE_SIZE;
1139 			page = store->filemap[index];
1140 			if (file)
1141 				ret = read_page(file, index, bitmap,
1142 						count, page);
1143 			else
1144 				ret = read_sb_page(
1145 					bitmap->mddev,
1146 					bitmap->mddev->bitmap_info.offset,
1147 					page,
1148 					index + node_offset, count);
1149 
1150 			if (ret)
1151 				goto err;
1152 
1153 			oldindex = index;
1154 
1155 			if (outofdate) {
1156 				/*
1157 				 * if bitmap is out of date, dirty the
1158 				 * whole page and write it out
1159 				 */
1160 				paddr = kmap_atomic(page);
1161 				memset(paddr + offset, 0xff,
1162 				       PAGE_SIZE - offset);
1163 				kunmap_atomic(paddr);
1164 				write_page(bitmap, page, 1);
1165 
1166 				ret = -EIO;
1167 				if (test_bit(BITMAP_WRITE_ERROR,
1168 					     &bitmap->flags))
1169 					goto err;
1170 			}
1171 		}
1172 		paddr = kmap_atomic(page);
1173 		if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1174 			b = test_bit(bit, paddr);
1175 		else
1176 			b = test_bit_le(bit, paddr);
1177 		kunmap_atomic(paddr);
1178 		if (b) {
1179 			/* if the disk bit is set, set the memory bit */
1180 			int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1181 				      >= start);
1182 			md_bitmap_set_memory_bits(bitmap,
1183 						  (sector_t)i << bitmap->counts.chunkshift,
1184 						  needed);
1185 			bit_cnt++;
1186 		}
1187 		offset = 0;
1188 	}
1189 
1190 	pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1191 		 bmname(bitmap), store->file_pages,
1192 		 bit_cnt, chunks);
1193 
1194 	return 0;
1195 
1196  err:
1197 	pr_warn("%s: bitmap initialisation failed: %d\n",
1198 		bmname(bitmap), ret);
1199 	return ret;
1200 }
1201 
1202 void md_bitmap_write_all(struct bitmap *bitmap)
1203 {
1204 	/* We don't actually write all bitmap blocks here,
1205 	 * just flag them as needing to be written
1206 	 */
1207 	int i;
1208 
1209 	if (!bitmap || !bitmap->storage.filemap)
1210 		return;
1211 	if (bitmap->storage.file)
1212 		/* Only one copy, so nothing needed */
1213 		return;
1214 
1215 	for (i = 0; i < bitmap->storage.file_pages; i++)
1216 		set_page_attr(bitmap, i,
1217 			      BITMAP_PAGE_NEEDWRITE);
1218 	bitmap->allclean = 0;
1219 }
1220 
1221 static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1222 				 sector_t offset, int inc)
1223 {
1224 	sector_t chunk = offset >> bitmap->chunkshift;
1225 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1226 	bitmap->bp[page].count += inc;
1227 	md_bitmap_checkfree(bitmap, page);
1228 }
1229 
1230 static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1231 {
1232 	sector_t chunk = offset >> bitmap->chunkshift;
1233 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1234 	struct bitmap_page *bp = &bitmap->bp[page];
1235 
1236 	if (!bp->pending)
1237 		bp->pending = 1;
1238 }
1239 
1240 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1241 					       sector_t offset, sector_t *blocks,
1242 					       int create);
1243 
1244 /*
1245  * bitmap daemon -- periodically wakes up to clean bits and flush pages
1246  *			out to disk
1247  */
1248 
1249 void md_bitmap_daemon_work(struct mddev *mddev)
1250 {
1251 	struct bitmap *bitmap;
1252 	unsigned long j;
1253 	unsigned long nextpage;
1254 	sector_t blocks;
1255 	struct bitmap_counts *counts;
1256 
1257 	/* Use a mutex to guard daemon_work against
1258 	 * bitmap_destroy.
1259 	 */
1260 	mutex_lock(&mddev->bitmap_info.mutex);
1261 	bitmap = mddev->bitmap;
1262 	if (bitmap == NULL) {
1263 		mutex_unlock(&mddev->bitmap_info.mutex);
1264 		return;
1265 	}
1266 	if (time_before(jiffies, bitmap->daemon_lastrun
1267 			+ mddev->bitmap_info.daemon_sleep))
1268 		goto done;
1269 
1270 	bitmap->daemon_lastrun = jiffies;
1271 	if (bitmap->allclean) {
1272 		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1273 		goto done;
1274 	}
1275 	bitmap->allclean = 1;
1276 
1277 	if (bitmap->mddev->queue)
1278 		blk_add_trace_msg(bitmap->mddev->queue,
1279 				  "md bitmap_daemon_work");
1280 
1281 	/* Any file-page which is PENDING now needs to be written.
1282 	 * So set NEEDWRITE now, then after we make any last-minute changes
1283 	 * we will write it.
1284 	 */
1285 	for (j = 0; j < bitmap->storage.file_pages; j++)
1286 		if (test_and_clear_page_attr(bitmap, j,
1287 					     BITMAP_PAGE_PENDING))
1288 			set_page_attr(bitmap, j,
1289 				      BITMAP_PAGE_NEEDWRITE);
1290 
1291 	if (bitmap->need_sync &&
1292 	    mddev->bitmap_info.external == 0) {
1293 		/* Arrange for superblock update as well as
1294 		 * other changes */
1295 		bitmap_super_t *sb;
1296 		bitmap->need_sync = 0;
1297 		if (bitmap->storage.filemap) {
1298 			sb = kmap_atomic(bitmap->storage.sb_page);
1299 			sb->events_cleared =
1300 				cpu_to_le64(bitmap->events_cleared);
1301 			kunmap_atomic(sb);
1302 			set_page_attr(bitmap, 0,
1303 				      BITMAP_PAGE_NEEDWRITE);
1304 		}
1305 	}
1306 	/* Now look at the bitmap counters and if any are '2' or '1',
1307 	 * decrement and handle accordingly.
1308 	 */
1309 	counts = &bitmap->counts;
1310 	spin_lock_irq(&counts->lock);
1311 	nextpage = 0;
1312 	for (j = 0; j < counts->chunks; j++) {
1313 		bitmap_counter_t *bmc;
1314 		sector_t  block = (sector_t)j << counts->chunkshift;
1315 
1316 		if (j == nextpage) {
1317 			nextpage += PAGE_COUNTER_RATIO;
1318 			if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1319 				j |= PAGE_COUNTER_MASK;
1320 				continue;
1321 			}
1322 			counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1323 		}
1324 
1325 		bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
1326 		if (!bmc) {
1327 			j |= PAGE_COUNTER_MASK;
1328 			continue;
1329 		}
1330 		if (*bmc == 1 && !bitmap->need_sync) {
1331 			/* We can clear the bit */
1332 			*bmc = 0;
1333 			md_bitmap_count_page(counts, block, -1);
1334 			md_bitmap_file_clear_bit(bitmap, block);
1335 		} else if (*bmc && *bmc <= 2) {
1336 			*bmc = 1;
1337 			md_bitmap_set_pending(counts, block);
1338 			bitmap->allclean = 0;
1339 		}
1340 	}
1341 	spin_unlock_irq(&counts->lock);
1342 
1343 	md_bitmap_wait_writes(bitmap);
1344 	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1345 	 * DIRTY pages need to be written by bitmap_unplug so it can wait
1346 	 * for them.
1347 	 * If we find any DIRTY page we stop there and let bitmap_unplug
1348 	 * handle all the rest.  This is important in the case where
1349 	 * the first blocking holds the superblock and it has been updated.
1350 	 * We mustn't write any other blocks before the superblock.
1351 	 */
1352 	for (j = 0;
1353 	     j < bitmap->storage.file_pages
1354 		     && !test_bit(BITMAP_STALE, &bitmap->flags);
1355 	     j++) {
1356 		if (test_page_attr(bitmap, j,
1357 				   BITMAP_PAGE_DIRTY))
1358 			/* bitmap_unplug will handle the rest */
1359 			break;
1360 		if (bitmap->storage.filemap &&
1361 		    test_and_clear_page_attr(bitmap, j,
1362 					     BITMAP_PAGE_NEEDWRITE)) {
1363 			write_page(bitmap, bitmap->storage.filemap[j], 0);
1364 		}
1365 	}
1366 
1367  done:
1368 	if (bitmap->allclean == 0)
1369 		mddev->thread->timeout =
1370 			mddev->bitmap_info.daemon_sleep;
1371 	mutex_unlock(&mddev->bitmap_info.mutex);
1372 }
1373 
1374 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1375 					       sector_t offset, sector_t *blocks,
1376 					       int create)
1377 __releases(bitmap->lock)
1378 __acquires(bitmap->lock)
1379 {
1380 	/* If 'create', we might release the lock and reclaim it.
1381 	 * The lock must have been taken with interrupts enabled.
1382 	 * If !create, we don't release the lock.
1383 	 */
1384 	sector_t chunk = offset >> bitmap->chunkshift;
1385 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1386 	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1387 	sector_t csize;
1388 	int err;
1389 
1390 	err = md_bitmap_checkpage(bitmap, page, create, 0);
1391 
1392 	if (bitmap->bp[page].hijacked ||
1393 	    bitmap->bp[page].map == NULL)
1394 		csize = ((sector_t)1) << (bitmap->chunkshift +
1395 					  PAGE_COUNTER_SHIFT);
1396 	else
1397 		csize = ((sector_t)1) << bitmap->chunkshift;
1398 	*blocks = csize - (offset & (csize - 1));
1399 
1400 	if (err < 0)
1401 		return NULL;
1402 
1403 	/* now locked ... */
1404 
1405 	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1406 		/* should we use the first or second counter field
1407 		 * of the hijacked pointer? */
1408 		int hi = (pageoff > PAGE_COUNTER_MASK);
1409 		return  &((bitmap_counter_t *)
1410 			  &bitmap->bp[page].map)[hi];
1411 	} else /* page is allocated */
1412 		return (bitmap_counter_t *)
1413 			&(bitmap->bp[page].map[pageoff]);
1414 }
1415 
1416 int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1417 {
1418 	if (!bitmap)
1419 		return 0;
1420 
1421 	if (behind) {
1422 		int bw;
1423 		atomic_inc(&bitmap->behind_writes);
1424 		bw = atomic_read(&bitmap->behind_writes);
1425 		if (bw > bitmap->behind_writes_used)
1426 			bitmap->behind_writes_used = bw;
1427 
1428 		pr_debug("inc write-behind count %d/%lu\n",
1429 			 bw, bitmap->mddev->bitmap_info.max_write_behind);
1430 	}
1431 
1432 	while (sectors) {
1433 		sector_t blocks;
1434 		bitmap_counter_t *bmc;
1435 
1436 		spin_lock_irq(&bitmap->counts.lock);
1437 		bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1438 		if (!bmc) {
1439 			spin_unlock_irq(&bitmap->counts.lock);
1440 			return 0;
1441 		}
1442 
1443 		if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1444 			DEFINE_WAIT(__wait);
1445 			/* note that it is safe to do the prepare_to_wait
1446 			 * after the test as long as we do it before dropping
1447 			 * the spinlock.
1448 			 */
1449 			prepare_to_wait(&bitmap->overflow_wait, &__wait,
1450 					TASK_UNINTERRUPTIBLE);
1451 			spin_unlock_irq(&bitmap->counts.lock);
1452 			schedule();
1453 			finish_wait(&bitmap->overflow_wait, &__wait);
1454 			continue;
1455 		}
1456 
1457 		switch (*bmc) {
1458 		case 0:
1459 			md_bitmap_file_set_bit(bitmap, offset);
1460 			md_bitmap_count_page(&bitmap->counts, offset, 1);
1461 			fallthrough;
1462 		case 1:
1463 			*bmc = 2;
1464 		}
1465 
1466 		(*bmc)++;
1467 
1468 		spin_unlock_irq(&bitmap->counts.lock);
1469 
1470 		offset += blocks;
1471 		if (sectors > blocks)
1472 			sectors -= blocks;
1473 		else
1474 			sectors = 0;
1475 	}
1476 	return 0;
1477 }
1478 EXPORT_SYMBOL(md_bitmap_startwrite);
1479 
1480 void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
1481 			unsigned long sectors, int success, int behind)
1482 {
1483 	if (!bitmap)
1484 		return;
1485 	if (behind) {
1486 		if (atomic_dec_and_test(&bitmap->behind_writes))
1487 			wake_up(&bitmap->behind_wait);
1488 		pr_debug("dec write-behind count %d/%lu\n",
1489 			 atomic_read(&bitmap->behind_writes),
1490 			 bitmap->mddev->bitmap_info.max_write_behind);
1491 	}
1492 
1493 	while (sectors) {
1494 		sector_t blocks;
1495 		unsigned long flags;
1496 		bitmap_counter_t *bmc;
1497 
1498 		spin_lock_irqsave(&bitmap->counts.lock, flags);
1499 		bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1500 		if (!bmc) {
1501 			spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1502 			return;
1503 		}
1504 
1505 		if (success && !bitmap->mddev->degraded &&
1506 		    bitmap->events_cleared < bitmap->mddev->events) {
1507 			bitmap->events_cleared = bitmap->mddev->events;
1508 			bitmap->need_sync = 1;
1509 			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1510 		}
1511 
1512 		if (!success && !NEEDED(*bmc))
1513 			*bmc |= NEEDED_MASK;
1514 
1515 		if (COUNTER(*bmc) == COUNTER_MAX)
1516 			wake_up(&bitmap->overflow_wait);
1517 
1518 		(*bmc)--;
1519 		if (*bmc <= 2) {
1520 			md_bitmap_set_pending(&bitmap->counts, offset);
1521 			bitmap->allclean = 0;
1522 		}
1523 		spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1524 		offset += blocks;
1525 		if (sectors > blocks)
1526 			sectors -= blocks;
1527 		else
1528 			sectors = 0;
1529 	}
1530 }
1531 EXPORT_SYMBOL(md_bitmap_endwrite);
1532 
1533 static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1534 			       int degraded)
1535 {
1536 	bitmap_counter_t *bmc;
1537 	int rv;
1538 	if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1539 		*blocks = 1024;
1540 		return 1; /* always resync if no bitmap */
1541 	}
1542 	spin_lock_irq(&bitmap->counts.lock);
1543 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1544 	rv = 0;
1545 	if (bmc) {
1546 		/* locked */
1547 		if (RESYNC(*bmc))
1548 			rv = 1;
1549 		else if (NEEDED(*bmc)) {
1550 			rv = 1;
1551 			if (!degraded) { /* don't set/clear bits if degraded */
1552 				*bmc |= RESYNC_MASK;
1553 				*bmc &= ~NEEDED_MASK;
1554 			}
1555 		}
1556 	}
1557 	spin_unlock_irq(&bitmap->counts.lock);
1558 	return rv;
1559 }
1560 
1561 int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1562 			 int degraded)
1563 {
1564 	/* bitmap_start_sync must always report on multiples of whole
1565 	 * pages, otherwise resync (which is very PAGE_SIZE based) will
1566 	 * get confused.
1567 	 * So call __bitmap_start_sync repeatedly (if needed) until
1568 	 * At least PAGE_SIZE>>9 blocks are covered.
1569 	 * Return the 'or' of the result.
1570 	 */
1571 	int rv = 0;
1572 	sector_t blocks1;
1573 
1574 	*blocks = 0;
1575 	while (*blocks < (PAGE_SIZE>>9)) {
1576 		rv |= __bitmap_start_sync(bitmap, offset,
1577 					  &blocks1, degraded);
1578 		offset += blocks1;
1579 		*blocks += blocks1;
1580 	}
1581 	return rv;
1582 }
1583 EXPORT_SYMBOL(md_bitmap_start_sync);
1584 
1585 void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1586 {
1587 	bitmap_counter_t *bmc;
1588 	unsigned long flags;
1589 
1590 	if (bitmap == NULL) {
1591 		*blocks = 1024;
1592 		return;
1593 	}
1594 	spin_lock_irqsave(&bitmap->counts.lock, flags);
1595 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1596 	if (bmc == NULL)
1597 		goto unlock;
1598 	/* locked */
1599 	if (RESYNC(*bmc)) {
1600 		*bmc &= ~RESYNC_MASK;
1601 
1602 		if (!NEEDED(*bmc) && aborted)
1603 			*bmc |= NEEDED_MASK;
1604 		else {
1605 			if (*bmc <= 2) {
1606 				md_bitmap_set_pending(&bitmap->counts, offset);
1607 				bitmap->allclean = 0;
1608 			}
1609 		}
1610 	}
1611  unlock:
1612 	spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1613 }
1614 EXPORT_SYMBOL(md_bitmap_end_sync);
1615 
1616 void md_bitmap_close_sync(struct bitmap *bitmap)
1617 {
1618 	/* Sync has finished, and any bitmap chunks that weren't synced
1619 	 * properly have been aborted.  It remains to us to clear the
1620 	 * RESYNC bit wherever it is still on
1621 	 */
1622 	sector_t sector = 0;
1623 	sector_t blocks;
1624 	if (!bitmap)
1625 		return;
1626 	while (sector < bitmap->mddev->resync_max_sectors) {
1627 		md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1628 		sector += blocks;
1629 	}
1630 }
1631 EXPORT_SYMBOL(md_bitmap_close_sync);
1632 
1633 void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
1634 {
1635 	sector_t s = 0;
1636 	sector_t blocks;
1637 
1638 	if (!bitmap)
1639 		return;
1640 	if (sector == 0) {
1641 		bitmap->last_end_sync = jiffies;
1642 		return;
1643 	}
1644 	if (!force && time_before(jiffies, (bitmap->last_end_sync
1645 				  + bitmap->mddev->bitmap_info.daemon_sleep)))
1646 		return;
1647 	wait_event(bitmap->mddev->recovery_wait,
1648 		   atomic_read(&bitmap->mddev->recovery_active) == 0);
1649 
1650 	bitmap->mddev->curr_resync_completed = sector;
1651 	set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
1652 	sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
1653 	s = 0;
1654 	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
1655 		md_bitmap_end_sync(bitmap, s, &blocks, 0);
1656 		s += blocks;
1657 	}
1658 	bitmap->last_end_sync = jiffies;
1659 	sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
1660 }
1661 EXPORT_SYMBOL(md_bitmap_cond_end_sync);
1662 
1663 void md_bitmap_sync_with_cluster(struct mddev *mddev,
1664 			      sector_t old_lo, sector_t old_hi,
1665 			      sector_t new_lo, sector_t new_hi)
1666 {
1667 	struct bitmap *bitmap = mddev->bitmap;
1668 	sector_t sector, blocks = 0;
1669 
1670 	for (sector = old_lo; sector < new_lo; ) {
1671 		md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1672 		sector += blocks;
1673 	}
1674 	WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1675 
1676 	for (sector = old_hi; sector < new_hi; ) {
1677 		md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1678 		sector += blocks;
1679 	}
1680 	WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1681 }
1682 EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
1683 
1684 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1685 {
1686 	/* For each chunk covered by any of these sectors, set the
1687 	 * counter to 2 and possibly set resync_needed.  They should all
1688 	 * be 0 at this point
1689 	 */
1690 
1691 	sector_t secs;
1692 	bitmap_counter_t *bmc;
1693 	spin_lock_irq(&bitmap->counts.lock);
1694 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1695 	if (!bmc) {
1696 		spin_unlock_irq(&bitmap->counts.lock);
1697 		return;
1698 	}
1699 	if (!*bmc) {
1700 		*bmc = 2;
1701 		md_bitmap_count_page(&bitmap->counts, offset, 1);
1702 		md_bitmap_set_pending(&bitmap->counts, offset);
1703 		bitmap->allclean = 0;
1704 	}
1705 	if (needed)
1706 		*bmc |= NEEDED_MASK;
1707 	spin_unlock_irq(&bitmap->counts.lock);
1708 }
1709 
1710 /* dirty the memory and file bits for bitmap chunks "s" to "e" */
1711 void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1712 {
1713 	unsigned long chunk;
1714 
1715 	for (chunk = s; chunk <= e; chunk++) {
1716 		sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1717 		md_bitmap_set_memory_bits(bitmap, sec, 1);
1718 		md_bitmap_file_set_bit(bitmap, sec);
1719 		if (sec < bitmap->mddev->recovery_cp)
1720 			/* We are asserting that the array is dirty,
1721 			 * so move the recovery_cp address back so
1722 			 * that it is obvious that it is dirty
1723 			 */
1724 			bitmap->mddev->recovery_cp = sec;
1725 	}
1726 }
1727 
1728 /*
1729  * flush out any pending updates
1730  */
1731 void md_bitmap_flush(struct mddev *mddev)
1732 {
1733 	struct bitmap *bitmap = mddev->bitmap;
1734 	long sleep;
1735 
1736 	if (!bitmap) /* there was no bitmap */
1737 		return;
1738 
1739 	/* run the daemon_work three time to ensure everything is flushed
1740 	 * that can be
1741 	 */
1742 	sleep = mddev->bitmap_info.daemon_sleep * 2;
1743 	bitmap->daemon_lastrun -= sleep;
1744 	md_bitmap_daemon_work(mddev);
1745 	bitmap->daemon_lastrun -= sleep;
1746 	md_bitmap_daemon_work(mddev);
1747 	bitmap->daemon_lastrun -= sleep;
1748 	md_bitmap_daemon_work(mddev);
1749 	if (mddev->bitmap_info.external)
1750 		md_super_wait(mddev);
1751 	md_bitmap_update_sb(bitmap);
1752 }
1753 
1754 /*
1755  * free memory that was allocated
1756  */
1757 void md_bitmap_free(struct bitmap *bitmap)
1758 {
1759 	unsigned long k, pages;
1760 	struct bitmap_page *bp;
1761 
1762 	if (!bitmap) /* there was no bitmap */
1763 		return;
1764 
1765 	if (bitmap->sysfs_can_clear)
1766 		sysfs_put(bitmap->sysfs_can_clear);
1767 
1768 	if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1769 		bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1770 		md_cluster_stop(bitmap->mddev);
1771 
1772 	/* Shouldn't be needed - but just in case.... */
1773 	wait_event(bitmap->write_wait,
1774 		   atomic_read(&bitmap->pending_writes) == 0);
1775 
1776 	/* release the bitmap file  */
1777 	md_bitmap_file_unmap(&bitmap->storage);
1778 
1779 	bp = bitmap->counts.bp;
1780 	pages = bitmap->counts.pages;
1781 
1782 	/* free all allocated memory */
1783 
1784 	if (bp) /* deallocate the page memory */
1785 		for (k = 0; k < pages; k++)
1786 			if (bp[k].map && !bp[k].hijacked)
1787 				kfree(bp[k].map);
1788 	kfree(bp);
1789 	kfree(bitmap);
1790 }
1791 EXPORT_SYMBOL(md_bitmap_free);
1792 
1793 void md_bitmap_wait_behind_writes(struct mddev *mddev)
1794 {
1795 	struct bitmap *bitmap = mddev->bitmap;
1796 
1797 	/* wait for behind writes to complete */
1798 	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1799 		pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
1800 			 mdname(mddev));
1801 		/* need to kick something here to make sure I/O goes? */
1802 		wait_event(bitmap->behind_wait,
1803 			   atomic_read(&bitmap->behind_writes) == 0);
1804 	}
1805 }
1806 
1807 void md_bitmap_destroy(struct mddev *mddev)
1808 {
1809 	struct bitmap *bitmap = mddev->bitmap;
1810 
1811 	if (!bitmap) /* there was no bitmap */
1812 		return;
1813 
1814 	md_bitmap_wait_behind_writes(mddev);
1815 	if (!mddev->serialize_policy)
1816 		mddev_destroy_serial_pool(mddev, NULL, true);
1817 
1818 	mutex_lock(&mddev->bitmap_info.mutex);
1819 	spin_lock(&mddev->lock);
1820 	mddev->bitmap = NULL; /* disconnect from the md device */
1821 	spin_unlock(&mddev->lock);
1822 	mutex_unlock(&mddev->bitmap_info.mutex);
1823 	if (mddev->thread)
1824 		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1825 
1826 	md_bitmap_free(bitmap);
1827 }
1828 
1829 /*
1830  * initialize the bitmap structure
1831  * if this returns an error, bitmap_destroy must be called to do clean up
1832  * once mddev->bitmap is set
1833  */
1834 struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
1835 {
1836 	struct bitmap *bitmap;
1837 	sector_t blocks = mddev->resync_max_sectors;
1838 	struct file *file = mddev->bitmap_info.file;
1839 	int err;
1840 	struct kernfs_node *bm = NULL;
1841 
1842 	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1843 
1844 	BUG_ON(file && mddev->bitmap_info.offset);
1845 
1846 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1847 		pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
1848 			  mdname(mddev));
1849 		return ERR_PTR(-EBUSY);
1850 	}
1851 
1852 	bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1853 	if (!bitmap)
1854 		return ERR_PTR(-ENOMEM);
1855 
1856 	spin_lock_init(&bitmap->counts.lock);
1857 	atomic_set(&bitmap->pending_writes, 0);
1858 	init_waitqueue_head(&bitmap->write_wait);
1859 	init_waitqueue_head(&bitmap->overflow_wait);
1860 	init_waitqueue_head(&bitmap->behind_wait);
1861 
1862 	bitmap->mddev = mddev;
1863 	bitmap->cluster_slot = slot;
1864 
1865 	if (mddev->kobj.sd)
1866 		bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
1867 	if (bm) {
1868 		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
1869 		sysfs_put(bm);
1870 	} else
1871 		bitmap->sysfs_can_clear = NULL;
1872 
1873 	bitmap->storage.file = file;
1874 	if (file) {
1875 		get_file(file);
1876 		/* As future accesses to this file will use bmap,
1877 		 * and bypass the page cache, we must sync the file
1878 		 * first.
1879 		 */
1880 		vfs_fsync(file, 1);
1881 	}
1882 	/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1883 	if (!mddev->bitmap_info.external) {
1884 		/*
1885 		 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1886 		 * instructing us to create a new on-disk bitmap instance.
1887 		 */
1888 		if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
1889 			err = md_bitmap_new_disk_sb(bitmap);
1890 		else
1891 			err = md_bitmap_read_sb(bitmap);
1892 	} else {
1893 		err = 0;
1894 		if (mddev->bitmap_info.chunksize == 0 ||
1895 		    mddev->bitmap_info.daemon_sleep == 0)
1896 			/* chunksize and time_base need to be
1897 			 * set first. */
1898 			err = -EINVAL;
1899 	}
1900 	if (err)
1901 		goto error;
1902 
1903 	bitmap->daemon_lastrun = jiffies;
1904 	err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
1905 	if (err)
1906 		goto error;
1907 
1908 	pr_debug("created bitmap (%lu pages) for device %s\n",
1909 		 bitmap->counts.pages, bmname(bitmap));
1910 
1911 	err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
1912 	if (err)
1913 		goto error;
1914 
1915 	return bitmap;
1916  error:
1917 	md_bitmap_free(bitmap);
1918 	return ERR_PTR(err);
1919 }
1920 
1921 int md_bitmap_load(struct mddev *mddev)
1922 {
1923 	int err = 0;
1924 	sector_t start = 0;
1925 	sector_t sector = 0;
1926 	struct bitmap *bitmap = mddev->bitmap;
1927 	struct md_rdev *rdev;
1928 
1929 	if (!bitmap)
1930 		goto out;
1931 
1932 	rdev_for_each(rdev, mddev)
1933 		mddev_create_serial_pool(mddev, rdev, true);
1934 
1935 	if (mddev_is_clustered(mddev))
1936 		md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
1937 
1938 	/* Clear out old bitmap info first:  Either there is none, or we
1939 	 * are resuming after someone else has possibly changed things,
1940 	 * so we should forget old cached info.
1941 	 * All chunks should be clean, but some might need_sync.
1942 	 */
1943 	while (sector < mddev->resync_max_sectors) {
1944 		sector_t blocks;
1945 		md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1946 		sector += blocks;
1947 	}
1948 	md_bitmap_close_sync(bitmap);
1949 
1950 	if (mddev->degraded == 0
1951 	    || bitmap->events_cleared == mddev->events)
1952 		/* no need to keep dirty bits to optimise a
1953 		 * re-add of a missing device */
1954 		start = mddev->recovery_cp;
1955 
1956 	mutex_lock(&mddev->bitmap_info.mutex);
1957 	err = md_bitmap_init_from_disk(bitmap, start);
1958 	mutex_unlock(&mddev->bitmap_info.mutex);
1959 
1960 	if (err)
1961 		goto out;
1962 	clear_bit(BITMAP_STALE, &bitmap->flags);
1963 
1964 	/* Kick recovery in case any bits were set */
1965 	set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
1966 
1967 	mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1968 	md_wakeup_thread(mddev->thread);
1969 
1970 	md_bitmap_update_sb(bitmap);
1971 
1972 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1973 		err = -EIO;
1974 out:
1975 	return err;
1976 }
1977 EXPORT_SYMBOL_GPL(md_bitmap_load);
1978 
1979 /* caller need to free returned bitmap with md_bitmap_free() */
1980 struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
1981 {
1982 	int rv = 0;
1983 	struct bitmap *bitmap;
1984 
1985 	bitmap = md_bitmap_create(mddev, slot);
1986 	if (IS_ERR(bitmap)) {
1987 		rv = PTR_ERR(bitmap);
1988 		return ERR_PTR(rv);
1989 	}
1990 
1991 	rv = md_bitmap_init_from_disk(bitmap, 0);
1992 	if (rv) {
1993 		md_bitmap_free(bitmap);
1994 		return ERR_PTR(rv);
1995 	}
1996 
1997 	return bitmap;
1998 }
1999 EXPORT_SYMBOL(get_bitmap_from_slot);
2000 
2001 /* Loads the bitmap associated with slot and copies the resync information
2002  * to our bitmap
2003  */
2004 int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
2005 		sector_t *low, sector_t *high, bool clear_bits)
2006 {
2007 	int rv = 0, i, j;
2008 	sector_t block, lo = 0, hi = 0;
2009 	struct bitmap_counts *counts;
2010 	struct bitmap *bitmap;
2011 
2012 	bitmap = get_bitmap_from_slot(mddev, slot);
2013 	if (IS_ERR(bitmap)) {
2014 		pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
2015 		return -1;
2016 	}
2017 
2018 	counts = &bitmap->counts;
2019 	for (j = 0; j < counts->chunks; j++) {
2020 		block = (sector_t)j << counts->chunkshift;
2021 		if (md_bitmap_file_test_bit(bitmap, block)) {
2022 			if (!lo)
2023 				lo = block;
2024 			hi = block;
2025 			md_bitmap_file_clear_bit(bitmap, block);
2026 			md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
2027 			md_bitmap_file_set_bit(mddev->bitmap, block);
2028 		}
2029 	}
2030 
2031 	if (clear_bits) {
2032 		md_bitmap_update_sb(bitmap);
2033 		/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2034 		 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
2035 		for (i = 0; i < bitmap->storage.file_pages; i++)
2036 			if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2037 				set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
2038 		md_bitmap_unplug(bitmap);
2039 	}
2040 	md_bitmap_unplug(mddev->bitmap);
2041 	*low = lo;
2042 	*high = hi;
2043 	md_bitmap_free(bitmap);
2044 
2045 	return rv;
2046 }
2047 EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
2048 
2049 
2050 void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
2051 {
2052 	unsigned long chunk_kb;
2053 	struct bitmap_counts *counts;
2054 
2055 	if (!bitmap)
2056 		return;
2057 
2058 	counts = &bitmap->counts;
2059 
2060 	chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
2061 	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
2062 		   "%lu%s chunk",
2063 		   counts->pages - counts->missing_pages,
2064 		   counts->pages,
2065 		   (counts->pages - counts->missing_pages)
2066 		   << (PAGE_SHIFT - 10),
2067 		   chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
2068 		   chunk_kb ? "KB" : "B");
2069 	if (bitmap->storage.file) {
2070 		seq_printf(seq, ", file: ");
2071 		seq_file_path(seq, bitmap->storage.file, " \t\n");
2072 	}
2073 
2074 	seq_printf(seq, "\n");
2075 }
2076 
2077 int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2078 		  int chunksize, int init)
2079 {
2080 	/* If chunk_size is 0, choose an appropriate chunk size.
2081 	 * Then possibly allocate new storage space.
2082 	 * Then quiesce, copy bits, replace bitmap, and re-start
2083 	 *
2084 	 * This function is called both to set up the initial bitmap
2085 	 * and to resize the bitmap while the array is active.
2086 	 * If this happens as a result of the array being resized,
2087 	 * chunksize will be zero, and we need to choose a suitable
2088 	 * chunksize, otherwise we use what we are given.
2089 	 */
2090 	struct bitmap_storage store;
2091 	struct bitmap_counts old_counts;
2092 	unsigned long chunks;
2093 	sector_t block;
2094 	sector_t old_blocks, new_blocks;
2095 	int chunkshift;
2096 	int ret = 0;
2097 	long pages;
2098 	struct bitmap_page *new_bp;
2099 
2100 	if (bitmap->storage.file && !init) {
2101 		pr_info("md: cannot resize file-based bitmap\n");
2102 		return -EINVAL;
2103 	}
2104 
2105 	if (chunksize == 0) {
2106 		/* If there is enough space, leave the chunk size unchanged,
2107 		 * else increase by factor of two until there is enough space.
2108 		 */
2109 		long bytes;
2110 		long space = bitmap->mddev->bitmap_info.space;
2111 
2112 		if (space == 0) {
2113 			/* We don't know how much space there is, so limit
2114 			 * to current size - in sectors.
2115 			 */
2116 			bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
2117 			if (!bitmap->mddev->bitmap_info.external)
2118 				bytes += sizeof(bitmap_super_t);
2119 			space = DIV_ROUND_UP(bytes, 512);
2120 			bitmap->mddev->bitmap_info.space = space;
2121 		}
2122 		chunkshift = bitmap->counts.chunkshift;
2123 		chunkshift--;
2124 		do {
2125 			/* 'chunkshift' is shift from block size to chunk size */
2126 			chunkshift++;
2127 			chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2128 			bytes = DIV_ROUND_UP(chunks, 8);
2129 			if (!bitmap->mddev->bitmap_info.external)
2130 				bytes += sizeof(bitmap_super_t);
2131 		} while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
2132 			(BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
2133 	} else
2134 		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
2135 
2136 	chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2137 	memset(&store, 0, sizeof(store));
2138 	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
2139 		ret = md_bitmap_storage_alloc(&store, chunks,
2140 					      !bitmap->mddev->bitmap_info.external,
2141 					      mddev_is_clustered(bitmap->mddev)
2142 					      ? bitmap->cluster_slot : 0);
2143 	if (ret) {
2144 		md_bitmap_file_unmap(&store);
2145 		goto err;
2146 	}
2147 
2148 	pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
2149 
2150 	new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
2151 	ret = -ENOMEM;
2152 	if (!new_bp) {
2153 		md_bitmap_file_unmap(&store);
2154 		goto err;
2155 	}
2156 
2157 	if (!init)
2158 		bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
2159 
2160 	store.file = bitmap->storage.file;
2161 	bitmap->storage.file = NULL;
2162 
2163 	if (store.sb_page && bitmap->storage.sb_page)
2164 		memcpy(page_address(store.sb_page),
2165 		       page_address(bitmap->storage.sb_page),
2166 		       sizeof(bitmap_super_t));
2167 	spin_lock_irq(&bitmap->counts.lock);
2168 	md_bitmap_file_unmap(&bitmap->storage);
2169 	bitmap->storage = store;
2170 
2171 	old_counts = bitmap->counts;
2172 	bitmap->counts.bp = new_bp;
2173 	bitmap->counts.pages = pages;
2174 	bitmap->counts.missing_pages = pages;
2175 	bitmap->counts.chunkshift = chunkshift;
2176 	bitmap->counts.chunks = chunks;
2177 	bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
2178 						     BITMAP_BLOCK_SHIFT);
2179 
2180 	blocks = min(old_counts.chunks << old_counts.chunkshift,
2181 		     chunks << chunkshift);
2182 
2183 	/* For cluster raid, need to pre-allocate bitmap */
2184 	if (mddev_is_clustered(bitmap->mddev)) {
2185 		unsigned long page;
2186 		for (page = 0; page < pages; page++) {
2187 			ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
2188 			if (ret) {
2189 				unsigned long k;
2190 
2191 				/* deallocate the page memory */
2192 				for (k = 0; k < page; k++) {
2193 					kfree(new_bp[k].map);
2194 				}
2195 				kfree(new_bp);
2196 
2197 				/* restore some fields from old_counts */
2198 				bitmap->counts.bp = old_counts.bp;
2199 				bitmap->counts.pages = old_counts.pages;
2200 				bitmap->counts.missing_pages = old_counts.pages;
2201 				bitmap->counts.chunkshift = old_counts.chunkshift;
2202 				bitmap->counts.chunks = old_counts.chunks;
2203 				bitmap->mddev->bitmap_info.chunksize =
2204 					1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
2205 				blocks = old_counts.chunks << old_counts.chunkshift;
2206 				pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
2207 				break;
2208 			} else
2209 				bitmap->counts.bp[page].count += 1;
2210 		}
2211 	}
2212 
2213 	for (block = 0; block < blocks; ) {
2214 		bitmap_counter_t *bmc_old, *bmc_new;
2215 		int set;
2216 
2217 		bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
2218 		set = bmc_old && NEEDED(*bmc_old);
2219 
2220 		if (set) {
2221 			bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2222 			if (bmc_new) {
2223 				if (*bmc_new == 0) {
2224 					/* need to set on-disk bits too. */
2225 					sector_t end = block + new_blocks;
2226 					sector_t start = block >> chunkshift;
2227 
2228 					start <<= chunkshift;
2229 					while (start < end) {
2230 						md_bitmap_file_set_bit(bitmap, block);
2231 						start += 1 << chunkshift;
2232 					}
2233 					*bmc_new = 2;
2234 					md_bitmap_count_page(&bitmap->counts, block, 1);
2235 					md_bitmap_set_pending(&bitmap->counts, block);
2236 				}
2237 				*bmc_new |= NEEDED_MASK;
2238 			}
2239 			if (new_blocks < old_blocks)
2240 				old_blocks = new_blocks;
2241 		}
2242 		block += old_blocks;
2243 	}
2244 
2245 	if (bitmap->counts.bp != old_counts.bp) {
2246 		unsigned long k;
2247 		for (k = 0; k < old_counts.pages; k++)
2248 			if (!old_counts.bp[k].hijacked)
2249 				kfree(old_counts.bp[k].map);
2250 		kfree(old_counts.bp);
2251 	}
2252 
2253 	if (!init) {
2254 		int i;
2255 		while (block < (chunks << chunkshift)) {
2256 			bitmap_counter_t *bmc;
2257 			bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2258 			if (bmc) {
2259 				/* new space.  It needs to be resynced, so
2260 				 * we set NEEDED_MASK.
2261 				 */
2262 				if (*bmc == 0) {
2263 					*bmc = NEEDED_MASK | 2;
2264 					md_bitmap_count_page(&bitmap->counts, block, 1);
2265 					md_bitmap_set_pending(&bitmap->counts, block);
2266 				}
2267 			}
2268 			block += new_blocks;
2269 		}
2270 		for (i = 0; i < bitmap->storage.file_pages; i++)
2271 			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
2272 	}
2273 	spin_unlock_irq(&bitmap->counts.lock);
2274 
2275 	if (!init) {
2276 		md_bitmap_unplug(bitmap);
2277 		bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2278 	}
2279 	ret = 0;
2280 err:
2281 	return ret;
2282 }
2283 EXPORT_SYMBOL_GPL(md_bitmap_resize);
2284 
2285 static ssize_t
2286 location_show(struct mddev *mddev, char *page)
2287 {
2288 	ssize_t len;
2289 	if (mddev->bitmap_info.file)
2290 		len = sprintf(page, "file");
2291 	else if (mddev->bitmap_info.offset)
2292 		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2293 	else
2294 		len = sprintf(page, "none");
2295 	len += sprintf(page+len, "\n");
2296 	return len;
2297 }
2298 
2299 static ssize_t
2300 location_store(struct mddev *mddev, const char *buf, size_t len)
2301 {
2302 	int rv;
2303 
2304 	rv = mddev_lock(mddev);
2305 	if (rv)
2306 		return rv;
2307 	if (mddev->pers) {
2308 		if (!mddev->pers->quiesce) {
2309 			rv = -EBUSY;
2310 			goto out;
2311 		}
2312 		if (mddev->recovery || mddev->sync_thread) {
2313 			rv = -EBUSY;
2314 			goto out;
2315 		}
2316 	}
2317 
2318 	if (mddev->bitmap || mddev->bitmap_info.file ||
2319 	    mddev->bitmap_info.offset) {
2320 		/* bitmap already configured.  Only option is to clear it */
2321 		if (strncmp(buf, "none", 4) != 0) {
2322 			rv = -EBUSY;
2323 			goto out;
2324 		}
2325 		if (mddev->pers) {
2326 			mddev_suspend(mddev);
2327 			md_bitmap_destroy(mddev);
2328 			mddev_resume(mddev);
2329 		}
2330 		mddev->bitmap_info.offset = 0;
2331 		if (mddev->bitmap_info.file) {
2332 			struct file *f = mddev->bitmap_info.file;
2333 			mddev->bitmap_info.file = NULL;
2334 			fput(f);
2335 		}
2336 	} else {
2337 		/* No bitmap, OK to set a location */
2338 		long long offset;
2339 		if (strncmp(buf, "none", 4) == 0)
2340 			/* nothing to be done */;
2341 		else if (strncmp(buf, "file:", 5) == 0) {
2342 			/* Not supported yet */
2343 			rv = -EINVAL;
2344 			goto out;
2345 		} else {
2346 			if (buf[0] == '+')
2347 				rv = kstrtoll(buf+1, 10, &offset);
2348 			else
2349 				rv = kstrtoll(buf, 10, &offset);
2350 			if (rv)
2351 				goto out;
2352 			if (offset == 0) {
2353 				rv = -EINVAL;
2354 				goto out;
2355 			}
2356 			if (mddev->bitmap_info.external == 0 &&
2357 			    mddev->major_version == 0 &&
2358 			    offset != mddev->bitmap_info.default_offset) {
2359 				rv = -EINVAL;
2360 				goto out;
2361 			}
2362 			mddev->bitmap_info.offset = offset;
2363 			if (mddev->pers) {
2364 				struct bitmap *bitmap;
2365 				bitmap = md_bitmap_create(mddev, -1);
2366 				mddev_suspend(mddev);
2367 				if (IS_ERR(bitmap))
2368 					rv = PTR_ERR(bitmap);
2369 				else {
2370 					mddev->bitmap = bitmap;
2371 					rv = md_bitmap_load(mddev);
2372 					if (rv)
2373 						mddev->bitmap_info.offset = 0;
2374 				}
2375 				if (rv) {
2376 					md_bitmap_destroy(mddev);
2377 					mddev_resume(mddev);
2378 					goto out;
2379 				}
2380 				mddev_resume(mddev);
2381 			}
2382 		}
2383 	}
2384 	if (!mddev->external) {
2385 		/* Ensure new bitmap info is stored in
2386 		 * metadata promptly.
2387 		 */
2388 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2389 		md_wakeup_thread(mddev->thread);
2390 	}
2391 	rv = 0;
2392 out:
2393 	mddev_unlock(mddev);
2394 	if (rv)
2395 		return rv;
2396 	return len;
2397 }
2398 
2399 static struct md_sysfs_entry bitmap_location =
2400 __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
2401 
2402 /* 'bitmap/space' is the space available at 'location' for the
2403  * bitmap.  This allows the kernel to know when it is safe to
2404  * resize the bitmap to match a resized array.
2405  */
2406 static ssize_t
2407 space_show(struct mddev *mddev, char *page)
2408 {
2409 	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
2410 }
2411 
2412 static ssize_t
2413 space_store(struct mddev *mddev, const char *buf, size_t len)
2414 {
2415 	unsigned long sectors;
2416 	int rv;
2417 
2418 	rv = kstrtoul(buf, 10, &sectors);
2419 	if (rv)
2420 		return rv;
2421 
2422 	if (sectors == 0)
2423 		return -EINVAL;
2424 
2425 	if (mddev->bitmap &&
2426 	    sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
2427 		return -EFBIG; /* Bitmap is too big for this small space */
2428 
2429 	/* could make sure it isn't too big, but that isn't really
2430 	 * needed - user-space should be careful.
2431 	 */
2432 	mddev->bitmap_info.space = sectors;
2433 	return len;
2434 }
2435 
2436 static struct md_sysfs_entry bitmap_space =
2437 __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
2438 
2439 static ssize_t
2440 timeout_show(struct mddev *mddev, char *page)
2441 {
2442 	ssize_t len;
2443 	unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
2444 	unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2445 
2446 	len = sprintf(page, "%lu", secs);
2447 	if (jifs)
2448 		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
2449 	len += sprintf(page+len, "\n");
2450 	return len;
2451 }
2452 
2453 static ssize_t
2454 timeout_store(struct mddev *mddev, const char *buf, size_t len)
2455 {
2456 	/* timeout can be set at any time */
2457 	unsigned long timeout;
2458 	int rv = strict_strtoul_scaled(buf, &timeout, 4);
2459 	if (rv)
2460 		return rv;
2461 
2462 	/* just to make sure we don't overflow... */
2463 	if (timeout >= LONG_MAX / HZ)
2464 		return -EINVAL;
2465 
2466 	timeout = timeout * HZ / 10000;
2467 
2468 	if (timeout >= MAX_SCHEDULE_TIMEOUT)
2469 		timeout = MAX_SCHEDULE_TIMEOUT-1;
2470 	if (timeout < 1)
2471 		timeout = 1;
2472 	mddev->bitmap_info.daemon_sleep = timeout;
2473 	if (mddev->thread) {
2474 		/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
2475 		 * the bitmap is all clean and we don't need to
2476 		 * adjust the timeout right now
2477 		 */
2478 		if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
2479 			mddev->thread->timeout = timeout;
2480 			md_wakeup_thread(mddev->thread);
2481 		}
2482 	}
2483 	return len;
2484 }
2485 
2486 static struct md_sysfs_entry bitmap_timeout =
2487 __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
2488 
2489 static ssize_t
2490 backlog_show(struct mddev *mddev, char *page)
2491 {
2492 	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
2493 }
2494 
2495 static ssize_t
2496 backlog_store(struct mddev *mddev, const char *buf, size_t len)
2497 {
2498 	unsigned long backlog;
2499 	unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
2500 	struct md_rdev *rdev;
2501 	bool has_write_mostly = false;
2502 	int rv = kstrtoul(buf, 10, &backlog);
2503 	if (rv)
2504 		return rv;
2505 	if (backlog > COUNTER_MAX)
2506 		return -EINVAL;
2507 
2508 	/*
2509 	 * Without write mostly device, it doesn't make sense to set
2510 	 * backlog for max_write_behind.
2511 	 */
2512 	rdev_for_each(rdev, mddev) {
2513 		if (test_bit(WriteMostly, &rdev->flags)) {
2514 			has_write_mostly = true;
2515 			break;
2516 		}
2517 	}
2518 	if (!has_write_mostly) {
2519 		pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n",
2520 				    mdname(mddev));
2521 		return -EINVAL;
2522 	}
2523 
2524 	mddev->bitmap_info.max_write_behind = backlog;
2525 	if (!backlog && mddev->serial_info_pool) {
2526 		/* serial_info_pool is not needed if backlog is zero */
2527 		if (!mddev->serialize_policy)
2528 			mddev_destroy_serial_pool(mddev, NULL, false);
2529 	} else if (backlog && !mddev->serial_info_pool) {
2530 		/* serial_info_pool is needed since backlog is not zero */
2531 		struct md_rdev *rdev;
2532 
2533 		rdev_for_each(rdev, mddev)
2534 			mddev_create_serial_pool(mddev, rdev, false);
2535 	}
2536 	if (old_mwb != backlog)
2537 		md_bitmap_update_sb(mddev->bitmap);
2538 	return len;
2539 }
2540 
2541 static struct md_sysfs_entry bitmap_backlog =
2542 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
2543 
2544 static ssize_t
2545 chunksize_show(struct mddev *mddev, char *page)
2546 {
2547 	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2548 }
2549 
2550 static ssize_t
2551 chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2552 {
2553 	/* Can only be changed when no bitmap is active */
2554 	int rv;
2555 	unsigned long csize;
2556 	if (mddev->bitmap)
2557 		return -EBUSY;
2558 	rv = kstrtoul(buf, 10, &csize);
2559 	if (rv)
2560 		return rv;
2561 	if (csize < 512 ||
2562 	    !is_power_of_2(csize))
2563 		return -EINVAL;
2564 	if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
2565 		sizeof(((bitmap_super_t *)0)->chunksize))))
2566 		return -EOVERFLOW;
2567 	mddev->bitmap_info.chunksize = csize;
2568 	return len;
2569 }
2570 
2571 static struct md_sysfs_entry bitmap_chunksize =
2572 __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2573 
2574 static ssize_t metadata_show(struct mddev *mddev, char *page)
2575 {
2576 	if (mddev_is_clustered(mddev))
2577 		return sprintf(page, "clustered\n");
2578 	return sprintf(page, "%s\n", (mddev->bitmap_info.external
2579 				      ? "external" : "internal"));
2580 }
2581 
2582 static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2583 {
2584 	if (mddev->bitmap ||
2585 	    mddev->bitmap_info.file ||
2586 	    mddev->bitmap_info.offset)
2587 		return -EBUSY;
2588 	if (strncmp(buf, "external", 8) == 0)
2589 		mddev->bitmap_info.external = 1;
2590 	else if ((strncmp(buf, "internal", 8) == 0) ||
2591 			(strncmp(buf, "clustered", 9) == 0))
2592 		mddev->bitmap_info.external = 0;
2593 	else
2594 		return -EINVAL;
2595 	return len;
2596 }
2597 
2598 static struct md_sysfs_entry bitmap_metadata =
2599 __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2600 
2601 static ssize_t can_clear_show(struct mddev *mddev, char *page)
2602 {
2603 	int len;
2604 	spin_lock(&mddev->lock);
2605 	if (mddev->bitmap)
2606 		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
2607 					     "false" : "true"));
2608 	else
2609 		len = sprintf(page, "\n");
2610 	spin_unlock(&mddev->lock);
2611 	return len;
2612 }
2613 
2614 static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2615 {
2616 	if (mddev->bitmap == NULL)
2617 		return -ENOENT;
2618 	if (strncmp(buf, "false", 5) == 0)
2619 		mddev->bitmap->need_sync = 1;
2620 	else if (strncmp(buf, "true", 4) == 0) {
2621 		if (mddev->degraded)
2622 			return -EBUSY;
2623 		mddev->bitmap->need_sync = 0;
2624 	} else
2625 		return -EINVAL;
2626 	return len;
2627 }
2628 
2629 static struct md_sysfs_entry bitmap_can_clear =
2630 __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2631 
2632 static ssize_t
2633 behind_writes_used_show(struct mddev *mddev, char *page)
2634 {
2635 	ssize_t ret;
2636 	spin_lock(&mddev->lock);
2637 	if (mddev->bitmap == NULL)
2638 		ret = sprintf(page, "0\n");
2639 	else
2640 		ret = sprintf(page, "%lu\n",
2641 			      mddev->bitmap->behind_writes_used);
2642 	spin_unlock(&mddev->lock);
2643 	return ret;
2644 }
2645 
2646 static ssize_t
2647 behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2648 {
2649 	if (mddev->bitmap)
2650 		mddev->bitmap->behind_writes_used = 0;
2651 	return len;
2652 }
2653 
2654 static struct md_sysfs_entry max_backlog_used =
2655 __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2656        behind_writes_used_show, behind_writes_used_reset);
2657 
2658 static struct attribute *md_bitmap_attrs[] = {
2659 	&bitmap_location.attr,
2660 	&bitmap_space.attr,
2661 	&bitmap_timeout.attr,
2662 	&bitmap_backlog.attr,
2663 	&bitmap_chunksize.attr,
2664 	&bitmap_metadata.attr,
2665 	&bitmap_can_clear.attr,
2666 	&max_backlog_used.attr,
2667 	NULL
2668 };
2669 const struct attribute_group md_bitmap_group = {
2670 	.name = "bitmap",
2671 	.attrs = md_bitmap_attrs,
2672 };
2673