1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
4 *
5 * bitmap_create - sets up the bitmap structure
6 * bitmap_destroy - destroys the bitmap structure
7 *
8 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9 * - added disk storage for bitmap
10 * - changes to allow various bitmap chunk sizes
11 */
12
13 /*
14 * Still to do:
15 *
16 * flush after percent set rather than just time based. (maybe both).
17 */
18
19 #include <linux/blkdev.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/timer.h>
25 #include <linux/sched.h>
26 #include <linux/list.h>
27 #include <linux/file.h>
28 #include <linux/mount.h>
29 #include <linux/buffer_head.h>
30 #include <linux/seq_file.h>
31 #include <trace/events/block.h>
32 #include "md.h"
33 #include "md-bitmap.h"
34
35 #define BITMAP_MAJOR_LO 3
36 /* version 4 insists the bitmap is in little-endian order
37 * with version 3, it is host-endian which is non-portable
38 * Version 5 is currently set only for clustered devices
39 */
40 #define BITMAP_MAJOR_HI 4
41 #define BITMAP_MAJOR_CLUSTERED 5
42 #define BITMAP_MAJOR_HOSTENDIAN 3
43
44 /*
45 * in-memory bitmap:
46 *
47 * Use 16 bit block counters to track pending writes to each "chunk".
48 * The 2 high order bits are special-purpose, the first is a flag indicating
49 * whether a resync is needed. The second is a flag indicating whether a
50 * resync is active.
51 * This means that the counter is actually 14 bits:
52 *
53 * +--------+--------+------------------------------------------------+
54 * | resync | resync | counter |
55 * | needed | active | |
56 * | (0-1) | (0-1) | (0-16383) |
57 * +--------+--------+------------------------------------------------+
58 *
59 * The "resync needed" bit is set when:
60 * a '1' bit is read from storage at startup.
61 * a write request fails on some drives
62 * a resync is aborted on a chunk with 'resync active' set
63 * It is cleared (and resync-active set) when a resync starts across all drives
64 * of the chunk.
65 *
66 *
67 * The "resync active" bit is set when:
68 * a resync is started on all drives, and resync_needed is set.
69 * resync_needed will be cleared (as long as resync_active wasn't already set).
70 * It is cleared when a resync completes.
71 *
72 * The counter counts pending write requests, plus the on-disk bit.
73 * When the counter is '1' and the resync bits are clear, the on-disk
74 * bit can be cleared as well, thus setting the counter to 0.
75 * When we set a bit, or in the counter (to start a write), if the fields is
76 * 0, we first set the disk bit and set the counter to 1.
77 *
78 * If the counter is 0, the on-disk bit is clear and the stripe is clean
79 * Anything that dirties the stripe pushes the counter to 2 (at least)
80 * and sets the on-disk bit (lazily).
81 * If a periodic sweep find the counter at 2, it is decremented to 1.
82 * If the sweep find the counter at 1, the on-disk bit is cleared and the
83 * counter goes to zero.
84 *
85 * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
86 * counters as a fallback when "page" memory cannot be allocated:
87 *
88 * Normal case (page memory allocated):
89 *
90 * page pointer (32-bit)
91 *
92 * [ ] ------+
93 * |
94 * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
95 * c1 c2 c2048
96 *
97 * Hijacked case (page memory allocation failed):
98 *
99 * hijacked page pointer (32-bit)
100 *
101 * [ ][ ] (no page memory allocated)
102 * counter #1 (16-bit) counter #2 (16-bit)
103 *
104 */
105
106 #define PAGE_BITS (PAGE_SIZE << 3)
107 #define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
108
109 #define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
110 #define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
111 #define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
112
113 /* how many counters per page? */
114 #define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
115 /* same, except a shift value for more efficient bitops */
116 #define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
117 /* same, except a mask value for more efficient bitops */
118 #define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
119
120 #define BITMAP_BLOCK_SHIFT 9
121
122 /*
123 * bitmap structures:
124 */
125
126 /* the in-memory bitmap is represented by bitmap_pages */
127 struct bitmap_page {
128 /*
129 * map points to the actual memory page
130 */
131 char *map;
132 /*
133 * in emergencies (when map cannot be alloced), hijack the map
134 * pointer and use it as two counters itself
135 */
136 unsigned int hijacked:1;
137 /*
138 * If any counter in this page is '1' or '2' - and so could be
139 * cleared then that page is marked as 'pending'
140 */
141 unsigned int pending:1;
142 /*
143 * count of dirty bits on the page
144 */
145 unsigned int count:30;
146 };
147
148 /* the main bitmap structure - one per mddev */
149 struct bitmap {
150
151 struct bitmap_counts {
152 spinlock_t lock;
153 struct bitmap_page *bp;
154 /* total number of pages in the bitmap */
155 unsigned long pages;
156 /* number of pages not yet allocated */
157 unsigned long missing_pages;
158 /* chunksize = 2^chunkshift (for bitops) */
159 unsigned long chunkshift;
160 /* total number of data chunks for the array */
161 unsigned long chunks;
162 } counts;
163
164 struct mddev *mddev; /* the md device that the bitmap is for */
165
166 __u64 events_cleared;
167 int need_sync;
168
169 struct bitmap_storage {
170 /* backing disk file */
171 struct file *file;
172 /* cached copy of the bitmap file superblock */
173 struct page *sb_page;
174 unsigned long sb_index;
175 /* list of cache pages for the file */
176 struct page **filemap;
177 /* attributes associated filemap pages */
178 unsigned long *filemap_attr;
179 /* number of pages in the file */
180 unsigned long file_pages;
181 /* total bytes in the bitmap */
182 unsigned long bytes;
183 } storage;
184
185 unsigned long flags;
186
187 int allclean;
188
189 atomic_t behind_writes;
190 /* highest actual value at runtime */
191 unsigned long behind_writes_used;
192
193 /*
194 * the bitmap daemon - periodically wakes up and sweeps the bitmap
195 * file, cleaning up bits and flushing out pages to disk as necessary
196 */
197 unsigned long daemon_lastrun; /* jiffies of last run */
198 /*
199 * when we lasted called end_sync to update bitmap with resync
200 * progress.
201 */
202 unsigned long last_end_sync;
203
204 /* pending writes to the bitmap file */
205 atomic_t pending_writes;
206 wait_queue_head_t write_wait;
207 wait_queue_head_t overflow_wait;
208 wait_queue_head_t behind_wait;
209
210 struct kernfs_node *sysfs_can_clear;
211 /* slot offset for clustered env */
212 int cluster_slot;
213 };
214
215 static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks,
216 int chunksize, bool init);
217
bmname(struct bitmap * bitmap)218 static inline char *bmname(struct bitmap *bitmap)
219 {
220 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
221 }
222
__bitmap_enabled(struct bitmap * bitmap)223 static bool __bitmap_enabled(struct bitmap *bitmap)
224 {
225 return bitmap->storage.filemap &&
226 !test_bit(BITMAP_STALE, &bitmap->flags);
227 }
228
bitmap_enabled(struct mddev * mddev)229 static bool bitmap_enabled(struct mddev *mddev)
230 {
231 struct bitmap *bitmap = mddev->bitmap;
232
233 if (!bitmap)
234 return false;
235
236 return __bitmap_enabled(bitmap);
237 }
238
239 /*
240 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
241 *
242 * 1) check to see if this page is allocated, if it's not then try to alloc
243 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
244 * page pointer directly as a counter
245 *
246 * if we find our page, we increment the page's refcount so that it stays
247 * allocated while we're using it
248 */
md_bitmap_checkpage(struct bitmap_counts * bitmap,unsigned long page,int create,int no_hijack)249 static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
250 unsigned long page, int create, int no_hijack)
251 __releases(bitmap->lock)
252 __acquires(bitmap->lock)
253 {
254 unsigned char *mappage;
255
256 WARN_ON_ONCE(page >= bitmap->pages);
257 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
258 return 0;
259
260 if (bitmap->bp[page].map) /* page is already allocated, just return */
261 return 0;
262
263 if (!create)
264 return -ENOENT;
265
266 /* this page has not been allocated yet */
267
268 spin_unlock_irq(&bitmap->lock);
269 /* It is possible that this is being called inside a
270 * prepare_to_wait/finish_wait loop from raid5c:make_request().
271 * In general it is not permitted to sleep in that context as it
272 * can cause the loop to spin freely.
273 * That doesn't apply here as we can only reach this point
274 * once with any loop.
275 * When this function completes, either bp[page].map or
276 * bp[page].hijacked. In either case, this function will
277 * abort before getting to this point again. So there is
278 * no risk of a free-spin, and so it is safe to assert
279 * that sleeping here is allowed.
280 */
281 sched_annotate_sleep();
282 mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
283 spin_lock_irq(&bitmap->lock);
284
285 if (mappage == NULL) {
286 pr_debug("md/bitmap: map page allocation failed, hijacking\n");
287 /* We don't support hijack for cluster raid */
288 if (no_hijack)
289 return -ENOMEM;
290 /* failed - set the hijacked flag so that we can use the
291 * pointer as a counter */
292 if (!bitmap->bp[page].map)
293 bitmap->bp[page].hijacked = 1;
294 } else if (bitmap->bp[page].map ||
295 bitmap->bp[page].hijacked) {
296 /* somebody beat us to getting the page */
297 kfree(mappage);
298 } else {
299
300 /* no page was in place and we have one, so install it */
301
302 bitmap->bp[page].map = mappage;
303 bitmap->missing_pages--;
304 }
305 return 0;
306 }
307
308 /* if page is completely empty, put it back on the free list, or dealloc it */
309 /* if page was hijacked, unmark the flag so it might get alloced next time */
310 /* Note: lock should be held when calling this */
md_bitmap_checkfree(struct bitmap_counts * bitmap,unsigned long page)311 static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
312 {
313 char *ptr;
314
315 if (bitmap->bp[page].count) /* page is still busy */
316 return;
317
318 /* page is no longer in use, it can be released */
319
320 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
321 bitmap->bp[page].hijacked = 0;
322 bitmap->bp[page].map = NULL;
323 } else {
324 /* normal case, free the page */
325 ptr = bitmap->bp[page].map;
326 bitmap->bp[page].map = NULL;
327 bitmap->missing_pages++;
328 kfree(ptr);
329 }
330 }
331
332 /*
333 * bitmap file handling - read and write the bitmap file and its superblock
334 */
335
336 /*
337 * basic page I/O operations
338 */
339
340 /* IO operations when bitmap is stored near all superblocks */
341
342 /* choose a good rdev and read the page from there */
read_sb_page(struct mddev * mddev,loff_t offset,struct page * page,unsigned long index,int size)343 static int read_sb_page(struct mddev *mddev, loff_t offset,
344 struct page *page, unsigned long index, int size)
345 {
346
347 sector_t sector = mddev->bitmap_info.offset + offset +
348 index * (PAGE_SIZE / SECTOR_SIZE);
349 struct md_rdev *rdev;
350
351 rdev_for_each(rdev, mddev) {
352 u32 iosize = roundup(size, bdev_logical_block_size(rdev->bdev));
353
354 if (!test_bit(In_sync, &rdev->flags) ||
355 test_bit(Faulty, &rdev->flags) ||
356 test_bit(Bitmap_sync, &rdev->flags))
357 continue;
358
359 if (sync_page_io(rdev, sector, iosize, page, REQ_OP_READ, true))
360 return 0;
361 }
362 return -EIO;
363 }
364
next_active_rdev(struct md_rdev * rdev,struct mddev * mddev)365 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
366 {
367 /* Iterate the disks of an mddev, using rcu to protect access to the
368 * linked list, and raising the refcount of devices we return to ensure
369 * they don't disappear while in use.
370 * As devices are only added or removed when raid_disk is < 0 and
371 * nr_pending is 0 and In_sync is clear, the entries we return will
372 * still be in the same position on the list when we re-enter
373 * list_for_each_entry_continue_rcu.
374 *
375 * Note that if entered with 'rdev == NULL' to start at the
376 * beginning, we temporarily assign 'rdev' to an address which
377 * isn't really an rdev, but which can be used by
378 * list_for_each_entry_continue_rcu() to find the first entry.
379 */
380 rcu_read_lock();
381 if (rdev == NULL)
382 /* start at the beginning */
383 rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
384 else {
385 /* release the previous rdev and start from there. */
386 rdev_dec_pending(rdev, mddev);
387 }
388 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
389 if (rdev->raid_disk >= 0 &&
390 !test_bit(Faulty, &rdev->flags)) {
391 /* this is a usable devices */
392 atomic_inc(&rdev->nr_pending);
393 rcu_read_unlock();
394 return rdev;
395 }
396 }
397 rcu_read_unlock();
398 return NULL;
399 }
400
optimal_io_size(struct block_device * bdev,unsigned int last_page_size,unsigned int io_size)401 static unsigned int optimal_io_size(struct block_device *bdev,
402 unsigned int last_page_size,
403 unsigned int io_size)
404 {
405 if (bdev_io_opt(bdev) > bdev_logical_block_size(bdev))
406 return roundup(last_page_size, bdev_io_opt(bdev));
407 return io_size;
408 }
409
bitmap_io_size(unsigned int io_size,unsigned int opt_size,loff_t start,loff_t boundary)410 static unsigned int bitmap_io_size(unsigned int io_size, unsigned int opt_size,
411 loff_t start, loff_t boundary)
412 {
413 if (io_size != opt_size &&
414 start + opt_size / SECTOR_SIZE <= boundary)
415 return opt_size;
416 if (start + io_size / SECTOR_SIZE <= boundary)
417 return io_size;
418
419 /* Overflows boundary */
420 return 0;
421 }
422
__write_sb_page(struct md_rdev * rdev,struct bitmap * bitmap,unsigned long pg_index,struct page * page)423 static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
424 unsigned long pg_index, struct page *page)
425 {
426 struct block_device *bdev;
427 struct mddev *mddev = bitmap->mddev;
428 struct bitmap_storage *store = &bitmap->storage;
429 unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) <<
430 PAGE_SHIFT;
431 loff_t sboff, offset = mddev->bitmap_info.offset;
432 sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
433 unsigned int size = PAGE_SIZE;
434 unsigned int opt_size = PAGE_SIZE;
435 sector_t doff;
436
437 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
438 /* we compare length (page numbers), not page offset. */
439 if ((pg_index - store->sb_index) == store->file_pages - 1) {
440 unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
441
442 if (last_page_size == 0)
443 last_page_size = PAGE_SIZE;
444 size = roundup(last_page_size, bdev_logical_block_size(bdev));
445 opt_size = optimal_io_size(bdev, last_page_size, size);
446 }
447
448 sboff = rdev->sb_start + offset;
449 doff = rdev->data_offset;
450
451 /* Just make sure we aren't corrupting data or metadata */
452 if (mddev->external) {
453 /* Bitmap could be anywhere. */
454 if (sboff + ps > doff &&
455 sboff < (doff + mddev->dev_sectors + PAGE_SIZE / SECTOR_SIZE))
456 return -EINVAL;
457 } else if (offset < 0) {
458 /* DATA BITMAP METADATA */
459 size = bitmap_io_size(size, opt_size, offset + ps, 0);
460 if (size == 0)
461 /* bitmap runs in to metadata */
462 return -EINVAL;
463
464 if (doff + mddev->dev_sectors > sboff)
465 /* data runs in to bitmap */
466 return -EINVAL;
467 } else if (rdev->sb_start < rdev->data_offset) {
468 /* METADATA BITMAP DATA */
469 size = bitmap_io_size(size, opt_size, sboff + ps, doff);
470 if (size == 0)
471 /* bitmap runs in to data */
472 return -EINVAL;
473 }
474
475 md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page);
476 return 0;
477 }
478
write_sb_page(struct bitmap * bitmap,unsigned long pg_index,struct page * page,bool wait)479 static void write_sb_page(struct bitmap *bitmap, unsigned long pg_index,
480 struct page *page, bool wait)
481 {
482 struct mddev *mddev = bitmap->mddev;
483
484 do {
485 struct md_rdev *rdev = NULL;
486
487 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
488 if (__write_sb_page(rdev, bitmap, pg_index, page) < 0) {
489 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
490 return;
491 }
492 }
493 } while (wait && md_super_wait(mddev) < 0);
494 }
495
496 static void md_bitmap_file_kick(struct bitmap *bitmap);
497
498 #ifdef CONFIG_MD_BITMAP_FILE
write_file_page(struct bitmap * bitmap,struct page * page,int wait)499 static void write_file_page(struct bitmap *bitmap, struct page *page, int wait)
500 {
501 struct buffer_head *bh = page_buffers(page);
502
503 while (bh && bh->b_blocknr) {
504 atomic_inc(&bitmap->pending_writes);
505 set_buffer_locked(bh);
506 set_buffer_mapped(bh);
507 submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
508 bh = bh->b_this_page;
509 }
510
511 if (wait)
512 wait_event(bitmap->write_wait,
513 atomic_read(&bitmap->pending_writes) == 0);
514 }
515
end_bitmap_write(struct buffer_head * bh,int uptodate)516 static void end_bitmap_write(struct buffer_head *bh, int uptodate)
517 {
518 struct bitmap *bitmap = bh->b_private;
519
520 if (!uptodate)
521 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
522 if (atomic_dec_and_test(&bitmap->pending_writes))
523 wake_up(&bitmap->write_wait);
524 }
525
free_buffers(struct page * page)526 static void free_buffers(struct page *page)
527 {
528 struct buffer_head *bh;
529
530 if (!PagePrivate(page))
531 return;
532
533 bh = page_buffers(page);
534 while (bh) {
535 struct buffer_head *next = bh->b_this_page;
536 free_buffer_head(bh);
537 bh = next;
538 }
539 detach_page_private(page);
540 put_page(page);
541 }
542
543 /* read a page from a file.
544 * We both read the page, and attach buffers to the page to record the
545 * address of each block (using bmap). These addresses will be used
546 * to write the block later, completely bypassing the filesystem.
547 * This usage is similar to how swap files are handled, and allows us
548 * to write to a file with no concerns of memory allocation failing.
549 */
read_file_page(struct file * file,unsigned long index,struct bitmap * bitmap,unsigned long count,struct page * page)550 static int read_file_page(struct file *file, unsigned long index,
551 struct bitmap *bitmap, unsigned long count, struct page *page)
552 {
553 int ret = 0;
554 struct inode *inode = file_inode(file);
555 struct buffer_head *bh;
556 sector_t block, blk_cur;
557 unsigned long blocksize = i_blocksize(inode);
558
559 pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
560 (unsigned long long)index << PAGE_SHIFT);
561
562 bh = alloc_page_buffers(page, blocksize);
563 if (!bh) {
564 ret = -ENOMEM;
565 goto out;
566 }
567 attach_page_private(page, bh);
568 blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
569 while (bh) {
570 block = blk_cur;
571
572 if (count == 0)
573 bh->b_blocknr = 0;
574 else {
575 ret = bmap(inode, &block);
576 if (ret || !block) {
577 ret = -EINVAL;
578 bh->b_blocknr = 0;
579 goto out;
580 }
581
582 bh->b_blocknr = block;
583 bh->b_bdev = inode->i_sb->s_bdev;
584 if (count < blocksize)
585 count = 0;
586 else
587 count -= blocksize;
588
589 bh->b_end_io = end_bitmap_write;
590 bh->b_private = bitmap;
591 atomic_inc(&bitmap->pending_writes);
592 set_buffer_locked(bh);
593 set_buffer_mapped(bh);
594 submit_bh(REQ_OP_READ, bh);
595 }
596 blk_cur++;
597 bh = bh->b_this_page;
598 }
599
600 wait_event(bitmap->write_wait,
601 atomic_read(&bitmap->pending_writes)==0);
602 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
603 ret = -EIO;
604 out:
605 if (ret)
606 pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
607 (int)PAGE_SIZE,
608 (unsigned long long)index << PAGE_SHIFT,
609 ret);
610 return ret;
611 }
612 #else /* CONFIG_MD_BITMAP_FILE */
write_file_page(struct bitmap * bitmap,struct page * page,int wait)613 static void write_file_page(struct bitmap *bitmap, struct page *page, int wait)
614 {
615 }
read_file_page(struct file * file,unsigned long index,struct bitmap * bitmap,unsigned long count,struct page * page)616 static int read_file_page(struct file *file, unsigned long index,
617 struct bitmap *bitmap, unsigned long count, struct page *page)
618 {
619 return -EIO;
620 }
free_buffers(struct page * page)621 static void free_buffers(struct page *page)
622 {
623 put_page(page);
624 }
625 #endif /* CONFIG_MD_BITMAP_FILE */
626
627 /*
628 * bitmap file superblock operations
629 */
630
631 /*
632 * write out a page to a file
633 */
filemap_write_page(struct bitmap * bitmap,unsigned long pg_index,bool wait)634 static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index,
635 bool wait)
636 {
637 struct bitmap_storage *store = &bitmap->storage;
638 struct page *page = store->filemap[pg_index];
639
640 if (mddev_is_clustered(bitmap->mddev)) {
641 /* go to node bitmap area starting point */
642 pg_index += store->sb_index;
643 }
644
645 if (store->file)
646 write_file_page(bitmap, page, wait);
647 else
648 write_sb_page(bitmap, pg_index, page, wait);
649 }
650
651 /*
652 * md_bitmap_wait_writes() should be called before writing any bitmap
653 * blocks, to ensure previous writes, particularly from
654 * md_bitmap_daemon_work(), have completed.
655 */
md_bitmap_wait_writes(struct bitmap * bitmap)656 static void md_bitmap_wait_writes(struct bitmap *bitmap)
657 {
658 if (bitmap->storage.file)
659 wait_event(bitmap->write_wait,
660 atomic_read(&bitmap->pending_writes)==0);
661 else
662 /* Note that we ignore the return value. The writes
663 * might have failed, but that would just mean that
664 * some bits which should be cleared haven't been,
665 * which is safe. The relevant bitmap blocks will
666 * probably get written again, but there is no great
667 * loss if they aren't.
668 */
669 md_super_wait(bitmap->mddev);
670 }
671
672
673 /* update the event counter and sync the superblock to disk */
bitmap_update_sb(void * data)674 static void bitmap_update_sb(void *data)
675 {
676 bitmap_super_t *sb;
677 struct bitmap *bitmap = data;
678
679 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
680 return;
681 if (bitmap->mddev->bitmap_info.external)
682 return;
683 if (!bitmap->storage.sb_page) /* no superblock */
684 return;
685 sb = kmap_atomic(bitmap->storage.sb_page);
686 sb->events = cpu_to_le64(bitmap->mddev->events);
687 if (bitmap->mddev->events < bitmap->events_cleared)
688 /* rocking back to read-only */
689 bitmap->events_cleared = bitmap->mddev->events;
690 sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
691 /*
692 * clear BITMAP_WRITE_ERROR bit to protect against the case that
693 * a bitmap write error occurred but the later writes succeeded.
694 */
695 sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
696 /* Just in case these have been changed via sysfs: */
697 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
698 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
699 /* This might have been changed by a reshape */
700 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
701 sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
702 sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
703 sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
704 bitmap_info.space);
705 kunmap_atomic(sb);
706
707 if (bitmap->storage.file)
708 write_file_page(bitmap, bitmap->storage.sb_page, 1);
709 else
710 write_sb_page(bitmap, bitmap->storage.sb_index,
711 bitmap->storage.sb_page, 1);
712 }
713
bitmap_print_sb(struct bitmap * bitmap)714 static void bitmap_print_sb(struct bitmap *bitmap)
715 {
716 bitmap_super_t *sb;
717
718 if (!bitmap || !bitmap->storage.sb_page)
719 return;
720 sb = kmap_atomic(bitmap->storage.sb_page);
721 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
722 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
723 pr_debug(" version: %u\n", le32_to_cpu(sb->version));
724 pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
725 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
726 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
727 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
728 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
729 pr_debug(" events: %llu\n",
730 (unsigned long long) le64_to_cpu(sb->events));
731 pr_debug("events cleared: %llu\n",
732 (unsigned long long) le64_to_cpu(sb->events_cleared));
733 pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
734 pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize));
735 pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
736 pr_debug(" sync size: %llu KB\n",
737 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
738 pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
739 kunmap_atomic(sb);
740 }
741
742 /*
743 * bitmap_new_disk_sb
744 * @bitmap
745 *
746 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
747 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
748 * This function verifies 'bitmap_info' and populates the on-disk bitmap
749 * structure, which is to be written to disk.
750 *
751 * Returns: 0 on success, -Exxx on error
752 */
md_bitmap_new_disk_sb(struct bitmap * bitmap)753 static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
754 {
755 bitmap_super_t *sb;
756 unsigned long chunksize, daemon_sleep, write_behind;
757
758 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
759 if (bitmap->storage.sb_page == NULL)
760 return -ENOMEM;
761 bitmap->storage.sb_index = 0;
762
763 sb = kmap_atomic(bitmap->storage.sb_page);
764
765 sb->magic = cpu_to_le32(BITMAP_MAGIC);
766 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
767
768 chunksize = bitmap->mddev->bitmap_info.chunksize;
769 BUG_ON(!chunksize);
770 if (!is_power_of_2(chunksize)) {
771 kunmap_atomic(sb);
772 pr_warn("bitmap chunksize not a power of 2\n");
773 return -EINVAL;
774 }
775 sb->chunksize = cpu_to_le32(chunksize);
776
777 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
778 if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
779 pr_debug("Choosing daemon_sleep default (5 sec)\n");
780 daemon_sleep = 5 * HZ;
781 }
782 sb->daemon_sleep = cpu_to_le32(daemon_sleep);
783 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
784
785 /*
786 * FIXME: write_behind for RAID1. If not specified, what
787 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
788 */
789 write_behind = bitmap->mddev->bitmap_info.max_write_behind;
790 if (write_behind > COUNTER_MAX)
791 write_behind = COUNTER_MAX / 2;
792 sb->write_behind = cpu_to_le32(write_behind);
793 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
794
795 /* keep the array size field of the bitmap superblock up to date */
796 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
797
798 memcpy(sb->uuid, bitmap->mddev->uuid, 16);
799
800 set_bit(BITMAP_STALE, &bitmap->flags);
801 sb->state = cpu_to_le32(bitmap->flags);
802 bitmap->events_cleared = bitmap->mddev->events;
803 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
804 bitmap->mddev->bitmap_info.nodes = 0;
805
806 kunmap_atomic(sb);
807
808 return 0;
809 }
810
811 /* read the superblock from the bitmap file and initialize some bitmap fields */
md_bitmap_read_sb(struct bitmap * bitmap)812 static int md_bitmap_read_sb(struct bitmap *bitmap)
813 {
814 char *reason = NULL;
815 bitmap_super_t *sb;
816 unsigned long chunksize, daemon_sleep, write_behind;
817 unsigned long long events;
818 int nodes = 0;
819 unsigned long sectors_reserved = 0;
820 int err = -EINVAL;
821 struct page *sb_page;
822 loff_t offset = 0;
823
824 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
825 chunksize = 128 * 1024 * 1024;
826 daemon_sleep = 5 * HZ;
827 write_behind = 0;
828 set_bit(BITMAP_STALE, &bitmap->flags);
829 err = 0;
830 goto out_no_sb;
831 }
832 /* page 0 is the superblock, read it... */
833 sb_page = alloc_page(GFP_KERNEL);
834 if (!sb_page)
835 return -ENOMEM;
836 bitmap->storage.sb_page = sb_page;
837
838 re_read:
839 /* If cluster_slot is set, the cluster is setup */
840 if (bitmap->cluster_slot >= 0) {
841 sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
842
843 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks,
844 (bitmap->mddev->bitmap_info.chunksize >> 9));
845 /* bits to bytes */
846 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
847 /* to 4k blocks */
848 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
849 offset = bitmap->cluster_slot * (bm_blocks << 3);
850 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
851 bitmap->cluster_slot, offset);
852 }
853
854 if (bitmap->storage.file) {
855 loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
856 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
857
858 err = read_file_page(bitmap->storage.file, 0,
859 bitmap, bytes, sb_page);
860 } else {
861 err = read_sb_page(bitmap->mddev, offset, sb_page, 0,
862 sizeof(bitmap_super_t));
863 }
864 if (err)
865 return err;
866
867 err = -EINVAL;
868 sb = kmap_atomic(sb_page);
869
870 chunksize = le32_to_cpu(sb->chunksize);
871 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
872 write_behind = le32_to_cpu(sb->write_behind);
873 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
874
875 /* verify that the bitmap-specific fields are valid */
876 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
877 reason = "bad magic";
878 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
879 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
880 reason = "unrecognized superblock version";
881 else if (chunksize < 512)
882 reason = "bitmap chunksize too small";
883 else if (!is_power_of_2(chunksize))
884 reason = "bitmap chunksize not a power of 2";
885 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
886 reason = "daemon sleep period out of range";
887 else if (write_behind > COUNTER_MAX)
888 reason = "write-behind limit out of range (0 - 16383)";
889 if (reason) {
890 pr_warn("%s: invalid bitmap file superblock: %s\n",
891 bmname(bitmap), reason);
892 goto out;
893 }
894
895 /*
896 * Setup nodes/clustername only if bitmap version is
897 * cluster-compatible
898 */
899 if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
900 nodes = le32_to_cpu(sb->nodes);
901 strscpy(bitmap->mddev->bitmap_info.cluster_name,
902 sb->cluster_name, 64);
903 }
904
905 /* keep the array size field of the bitmap superblock up to date */
906 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
907
908 if (bitmap->mddev->persistent) {
909 /*
910 * We have a persistent array superblock, so compare the
911 * bitmap's UUID and event counter to the mddev's
912 */
913 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
914 pr_warn("%s: bitmap superblock UUID mismatch\n",
915 bmname(bitmap));
916 goto out;
917 }
918 events = le64_to_cpu(sb->events);
919 if (!nodes && (events < bitmap->mddev->events)) {
920 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
921 bmname(bitmap), events,
922 (unsigned long long) bitmap->mddev->events);
923 set_bit(BITMAP_STALE, &bitmap->flags);
924 }
925 }
926
927 /* assign fields using values from superblock */
928 bitmap->flags |= le32_to_cpu(sb->state);
929 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
930 set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
931 bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
932 err = 0;
933
934 out:
935 kunmap_atomic(sb);
936 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
937 /* Assigning chunksize is required for "re_read" */
938 bitmap->mddev->bitmap_info.chunksize = chunksize;
939 err = md_setup_cluster(bitmap->mddev, nodes);
940 if (err) {
941 pr_warn("%s: Could not setup cluster service (%d)\n",
942 bmname(bitmap), err);
943 goto out_no_sb;
944 }
945 bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
946 goto re_read;
947 }
948
949 out_no_sb:
950 if (err == 0) {
951 if (test_bit(BITMAP_STALE, &bitmap->flags))
952 bitmap->events_cleared = bitmap->mddev->events;
953 bitmap->mddev->bitmap_info.chunksize = chunksize;
954 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
955 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
956 bitmap->mddev->bitmap_info.nodes = nodes;
957 if (bitmap->mddev->bitmap_info.space == 0 ||
958 bitmap->mddev->bitmap_info.space > sectors_reserved)
959 bitmap->mddev->bitmap_info.space = sectors_reserved;
960 } else {
961 bitmap_print_sb(bitmap);
962 if (bitmap->cluster_slot < 0)
963 md_cluster_stop(bitmap->mddev);
964 }
965 return err;
966 }
967
968 /*
969 * general bitmap file operations
970 */
971
972 /*
973 * on-disk bitmap:
974 *
975 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
976 * file a page at a time. There's a superblock at the start of the file.
977 */
978 /* calculate the index of the page that contains this bit */
file_page_index(struct bitmap_storage * store,unsigned long chunk)979 static inline unsigned long file_page_index(struct bitmap_storage *store,
980 unsigned long chunk)
981 {
982 if (store->sb_page)
983 chunk += sizeof(bitmap_super_t) << 3;
984 return chunk >> PAGE_BIT_SHIFT;
985 }
986
987 /* calculate the (bit) offset of this bit within a page */
file_page_offset(struct bitmap_storage * store,unsigned long chunk)988 static inline unsigned long file_page_offset(struct bitmap_storage *store,
989 unsigned long chunk)
990 {
991 if (store->sb_page)
992 chunk += sizeof(bitmap_super_t) << 3;
993 return chunk & (PAGE_BITS - 1);
994 }
995
996 /*
997 * return a pointer to the page in the filemap that contains the given bit
998 *
999 */
filemap_get_page(struct bitmap_storage * store,unsigned long chunk)1000 static inline struct page *filemap_get_page(struct bitmap_storage *store,
1001 unsigned long chunk)
1002 {
1003 if (file_page_index(store, chunk) >= store->file_pages)
1004 return NULL;
1005 return store->filemap[file_page_index(store, chunk)];
1006 }
1007
md_bitmap_storage_alloc(struct bitmap_storage * store,unsigned long chunks,int with_super,int slot_number)1008 static int md_bitmap_storage_alloc(struct bitmap_storage *store,
1009 unsigned long chunks, int with_super,
1010 int slot_number)
1011 {
1012 int pnum, offset = 0;
1013 unsigned long num_pages;
1014 unsigned long bytes;
1015
1016 bytes = DIV_ROUND_UP(chunks, 8);
1017 if (with_super)
1018 bytes += sizeof(bitmap_super_t);
1019
1020 num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
1021 offset = slot_number * num_pages;
1022
1023 store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
1024 GFP_KERNEL);
1025 if (!store->filemap)
1026 return -ENOMEM;
1027
1028 if (with_super && !store->sb_page) {
1029 store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
1030 if (store->sb_page == NULL)
1031 return -ENOMEM;
1032 }
1033
1034 pnum = 0;
1035 if (store->sb_page) {
1036 store->filemap[0] = store->sb_page;
1037 pnum = 1;
1038 store->sb_index = offset;
1039 }
1040
1041 for ( ; pnum < num_pages; pnum++) {
1042 store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
1043 if (!store->filemap[pnum]) {
1044 store->file_pages = pnum;
1045 return -ENOMEM;
1046 }
1047 }
1048 store->file_pages = pnum;
1049
1050 /* We need 4 bits per page, rounded up to a multiple
1051 * of sizeof(unsigned long) */
1052 store->filemap_attr = kzalloc(
1053 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
1054 GFP_KERNEL);
1055 if (!store->filemap_attr)
1056 return -ENOMEM;
1057
1058 store->bytes = bytes;
1059
1060 return 0;
1061 }
1062
md_bitmap_file_unmap(struct bitmap_storage * store)1063 static void md_bitmap_file_unmap(struct bitmap_storage *store)
1064 {
1065 struct file *file = store->file;
1066 struct page *sb_page = store->sb_page;
1067 struct page **map = store->filemap;
1068 int pages = store->file_pages;
1069
1070 while (pages--)
1071 if (map[pages] != sb_page) /* 0 is sb_page, release it below */
1072 free_buffers(map[pages]);
1073 kfree(map);
1074 kfree(store->filemap_attr);
1075
1076 if (sb_page)
1077 free_buffers(sb_page);
1078
1079 if (file) {
1080 struct inode *inode = file_inode(file);
1081 invalidate_mapping_pages(inode->i_mapping, 0, -1);
1082 fput(file);
1083 }
1084 }
1085
1086 /*
1087 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
1088 * then it is no longer reliable, so we stop using it and we mark the file
1089 * as failed in the superblock
1090 */
md_bitmap_file_kick(struct bitmap * bitmap)1091 static void md_bitmap_file_kick(struct bitmap *bitmap)
1092 {
1093 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
1094 bitmap_update_sb(bitmap);
1095
1096 if (bitmap->storage.file) {
1097 pr_warn("%s: kicking failed bitmap file %pD4 from array!\n",
1098 bmname(bitmap), bitmap->storage.file);
1099
1100 } else
1101 pr_warn("%s: disabling internal bitmap due to errors\n",
1102 bmname(bitmap));
1103 }
1104 }
1105
1106 enum bitmap_page_attr {
1107 BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
1108 BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
1109 * i.e. counter is 1 or 2. */
1110 BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
1111 };
1112
set_page_attr(struct bitmap * bitmap,int pnum,enum bitmap_page_attr attr)1113 static inline void set_page_attr(struct bitmap *bitmap, int pnum,
1114 enum bitmap_page_attr attr)
1115 {
1116 set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
1117 }
1118
clear_page_attr(struct bitmap * bitmap,int pnum,enum bitmap_page_attr attr)1119 static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
1120 enum bitmap_page_attr attr)
1121 {
1122 clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
1123 }
1124
test_page_attr(struct bitmap * bitmap,int pnum,enum bitmap_page_attr attr)1125 static inline int test_page_attr(struct bitmap *bitmap, int pnum,
1126 enum bitmap_page_attr attr)
1127 {
1128 return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
1129 }
1130
test_and_clear_page_attr(struct bitmap * bitmap,int pnum,enum bitmap_page_attr attr)1131 static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
1132 enum bitmap_page_attr attr)
1133 {
1134 return test_and_clear_bit((pnum<<2) + attr,
1135 bitmap->storage.filemap_attr);
1136 }
1137 /*
1138 * bitmap_file_set_bit -- called before performing a write to the md device
1139 * to set (and eventually sync) a particular bit in the bitmap file
1140 *
1141 * we set the bit immediately, then we record the page number so that
1142 * when an unplug occurs, we can flush the dirty pages out to disk
1143 */
md_bitmap_file_set_bit(struct bitmap * bitmap,sector_t block)1144 static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
1145 {
1146 unsigned long bit;
1147 struct page *page;
1148 void *kaddr;
1149 unsigned long chunk = block >> bitmap->counts.chunkshift;
1150 struct bitmap_storage *store = &bitmap->storage;
1151 unsigned long index = file_page_index(store, chunk);
1152 unsigned long node_offset = 0;
1153
1154 index += store->sb_index;
1155 if (mddev_is_clustered(bitmap->mddev))
1156 node_offset = bitmap->cluster_slot * store->file_pages;
1157
1158 page = filemap_get_page(&bitmap->storage, chunk);
1159 if (!page)
1160 return;
1161 bit = file_page_offset(&bitmap->storage, chunk);
1162
1163 /* set the bit */
1164 kaddr = kmap_atomic(page);
1165 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1166 set_bit(bit, kaddr);
1167 else
1168 set_bit_le(bit, kaddr);
1169 kunmap_atomic(kaddr);
1170 pr_debug("set file bit %lu page %lu\n", bit, index);
1171 /* record page number so it gets flushed to disk when unplug occurs */
1172 set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_DIRTY);
1173 }
1174
md_bitmap_file_clear_bit(struct bitmap * bitmap,sector_t block)1175 static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
1176 {
1177 unsigned long bit;
1178 struct page *page;
1179 void *paddr;
1180 unsigned long chunk = block >> bitmap->counts.chunkshift;
1181 struct bitmap_storage *store = &bitmap->storage;
1182 unsigned long index = file_page_index(store, chunk);
1183 unsigned long node_offset = 0;
1184
1185 index += store->sb_index;
1186 if (mddev_is_clustered(bitmap->mddev))
1187 node_offset = bitmap->cluster_slot * store->file_pages;
1188
1189 page = filemap_get_page(&bitmap->storage, chunk);
1190 if (!page)
1191 return;
1192 bit = file_page_offset(&bitmap->storage, chunk);
1193 paddr = kmap_atomic(page);
1194 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1195 clear_bit(bit, paddr);
1196 else
1197 clear_bit_le(bit, paddr);
1198 kunmap_atomic(paddr);
1199 if (!test_page_attr(bitmap, index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
1200 set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_PENDING);
1201 bitmap->allclean = 0;
1202 }
1203 }
1204
md_bitmap_file_test_bit(struct bitmap * bitmap,sector_t block)1205 static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
1206 {
1207 unsigned long bit;
1208 struct page *page;
1209 void *paddr;
1210 unsigned long chunk = block >> bitmap->counts.chunkshift;
1211 int set = 0;
1212
1213 page = filemap_get_page(&bitmap->storage, chunk);
1214 if (!page)
1215 return -EINVAL;
1216 bit = file_page_offset(&bitmap->storage, chunk);
1217 paddr = kmap_atomic(page);
1218 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1219 set = test_bit(bit, paddr);
1220 else
1221 set = test_bit_le(bit, paddr);
1222 kunmap_atomic(paddr);
1223 return set;
1224 }
1225
1226 /* this gets called when the md device is ready to unplug its underlying
1227 * (slave) device queues -- before we let any writes go down, we need to
1228 * sync the dirty pages of the bitmap file to disk */
__bitmap_unplug(struct bitmap * bitmap)1229 static void __bitmap_unplug(struct bitmap *bitmap)
1230 {
1231 unsigned long i;
1232 int dirty, need_write;
1233 int writing = 0;
1234
1235 if (!__bitmap_enabled(bitmap))
1236 return;
1237
1238 /* look at each page to see if there are any set bits that need to be
1239 * flushed out to disk */
1240 for (i = 0; i < bitmap->storage.file_pages; i++) {
1241 dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
1242 need_write = test_and_clear_page_attr(bitmap, i,
1243 BITMAP_PAGE_NEEDWRITE);
1244 if (dirty || need_write) {
1245 if (!writing) {
1246 md_bitmap_wait_writes(bitmap);
1247 mddev_add_trace_msg(bitmap->mddev,
1248 "md bitmap_unplug");
1249 }
1250 clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
1251 filemap_write_page(bitmap, i, false);
1252 writing = 1;
1253 }
1254 }
1255 if (writing)
1256 md_bitmap_wait_writes(bitmap);
1257
1258 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1259 md_bitmap_file_kick(bitmap);
1260 }
1261
1262 struct bitmap_unplug_work {
1263 struct work_struct work;
1264 struct bitmap *bitmap;
1265 struct completion *done;
1266 };
1267
md_bitmap_unplug_fn(struct work_struct * work)1268 static void md_bitmap_unplug_fn(struct work_struct *work)
1269 {
1270 struct bitmap_unplug_work *unplug_work =
1271 container_of(work, struct bitmap_unplug_work, work);
1272
1273 __bitmap_unplug(unplug_work->bitmap);
1274 complete(unplug_work->done);
1275 }
1276
bitmap_unplug_async(struct bitmap * bitmap)1277 static void bitmap_unplug_async(struct bitmap *bitmap)
1278 {
1279 DECLARE_COMPLETION_ONSTACK(done);
1280 struct bitmap_unplug_work unplug_work;
1281
1282 INIT_WORK_ONSTACK(&unplug_work.work, md_bitmap_unplug_fn);
1283 unplug_work.bitmap = bitmap;
1284 unplug_work.done = &done;
1285
1286 queue_work(md_bitmap_wq, &unplug_work.work);
1287 wait_for_completion(&done);
1288 }
1289
bitmap_unplug(struct mddev * mddev,bool sync)1290 static void bitmap_unplug(struct mddev *mddev, bool sync)
1291 {
1292 struct bitmap *bitmap = mddev->bitmap;
1293
1294 if (!bitmap)
1295 return;
1296
1297 if (sync)
1298 __bitmap_unplug(bitmap);
1299 else
1300 bitmap_unplug_async(bitmap);
1301 }
1302
1303 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1304
1305 /*
1306 * Initialize the in-memory bitmap from the on-disk bitmap and set up the memory
1307 * mapping of the bitmap file.
1308 *
1309 * Special case: If there's no bitmap file, or if the bitmap file had been
1310 * previously kicked from the array, we mark all the bits as 1's in order to
1311 * cause a full resync.
1312 *
1313 * We ignore all bits for sectors that end earlier than 'start'.
1314 * This is used when reading an out-of-date bitmap.
1315 */
md_bitmap_init_from_disk(struct bitmap * bitmap,sector_t start)1316 static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1317 {
1318 bool outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1319 struct mddev *mddev = bitmap->mddev;
1320 unsigned long chunks = bitmap->counts.chunks;
1321 struct bitmap_storage *store = &bitmap->storage;
1322 struct file *file = store->file;
1323 unsigned long node_offset = 0;
1324 unsigned long bit_cnt = 0;
1325 unsigned long i;
1326 int ret;
1327
1328 if (!file && !mddev->bitmap_info.offset) {
1329 /* No permanent bitmap - fill with '1s'. */
1330 store->filemap = NULL;
1331 store->file_pages = 0;
1332 for (i = 0; i < chunks ; i++) {
1333 /* if the disk bit is set, set the memory bit */
1334 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1335 >= start);
1336 md_bitmap_set_memory_bits(bitmap,
1337 (sector_t)i << bitmap->counts.chunkshift,
1338 needed);
1339 }
1340 return 0;
1341 }
1342
1343 if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1344 pr_warn("%s: bitmap file too short %lu < %lu\n",
1345 bmname(bitmap),
1346 (unsigned long) i_size_read(file->f_mapping->host),
1347 store->bytes);
1348 ret = -ENOSPC;
1349 goto err;
1350 }
1351
1352 if (mddev_is_clustered(mddev))
1353 node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
1354
1355 for (i = 0; i < store->file_pages; i++) {
1356 struct page *page = store->filemap[i];
1357 int count;
1358
1359 /* unmap the old page, we're done with it */
1360 if (i == store->file_pages - 1)
1361 count = store->bytes - i * PAGE_SIZE;
1362 else
1363 count = PAGE_SIZE;
1364
1365 if (file)
1366 ret = read_file_page(file, i, bitmap, count, page);
1367 else
1368 ret = read_sb_page(mddev, 0, page, i + node_offset,
1369 count);
1370 if (ret)
1371 goto err;
1372 }
1373
1374 if (outofdate) {
1375 pr_warn("%s: bitmap file is out of date, doing full recovery\n",
1376 bmname(bitmap));
1377
1378 for (i = 0; i < store->file_pages; i++) {
1379 struct page *page = store->filemap[i];
1380 unsigned long offset = 0;
1381 void *paddr;
1382
1383 if (i == 0 && !mddev->bitmap_info.external)
1384 offset = sizeof(bitmap_super_t);
1385
1386 /*
1387 * If the bitmap is out of date, dirty the whole page
1388 * and write it out
1389 */
1390 paddr = kmap_atomic(page);
1391 memset(paddr + offset, 0xff, PAGE_SIZE - offset);
1392 kunmap_atomic(paddr);
1393
1394 filemap_write_page(bitmap, i, true);
1395 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) {
1396 ret = -EIO;
1397 goto err;
1398 }
1399 }
1400 }
1401
1402 for (i = 0; i < chunks; i++) {
1403 struct page *page = filemap_get_page(&bitmap->storage, i);
1404 unsigned long bit = file_page_offset(&bitmap->storage, i);
1405 void *paddr;
1406 bool was_set;
1407
1408 paddr = kmap_atomic(page);
1409 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1410 was_set = test_bit(bit, paddr);
1411 else
1412 was_set = test_bit_le(bit, paddr);
1413 kunmap_atomic(paddr);
1414
1415 if (was_set) {
1416 /* if the disk bit is set, set the memory bit */
1417 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1418 >= start);
1419 md_bitmap_set_memory_bits(bitmap,
1420 (sector_t)i << bitmap->counts.chunkshift,
1421 needed);
1422 bit_cnt++;
1423 }
1424 }
1425
1426 pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1427 bmname(bitmap), store->file_pages,
1428 bit_cnt, chunks);
1429
1430 return 0;
1431
1432 err:
1433 pr_warn("%s: bitmap initialisation failed: %d\n",
1434 bmname(bitmap), ret);
1435 return ret;
1436 }
1437
1438 /* just flag bitmap pages as needing to be written. */
bitmap_write_all(struct mddev * mddev)1439 static void bitmap_write_all(struct mddev *mddev)
1440 {
1441 int i;
1442 struct bitmap *bitmap = mddev->bitmap;
1443
1444 if (!bitmap || !bitmap->storage.filemap)
1445 return;
1446
1447 /* Only one copy, so nothing needed */
1448 if (bitmap->storage.file)
1449 return;
1450
1451 for (i = 0; i < bitmap->storage.file_pages; i++)
1452 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
1453 bitmap->allclean = 0;
1454 }
1455
md_bitmap_count_page(struct bitmap_counts * bitmap,sector_t offset,int inc)1456 static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1457 sector_t offset, int inc)
1458 {
1459 sector_t chunk = offset >> bitmap->chunkshift;
1460 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1461 bitmap->bp[page].count += inc;
1462 md_bitmap_checkfree(bitmap, page);
1463 }
1464
md_bitmap_set_pending(struct bitmap_counts * bitmap,sector_t offset)1465 static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1466 {
1467 sector_t chunk = offset >> bitmap->chunkshift;
1468 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1469 struct bitmap_page *bp = &bitmap->bp[page];
1470
1471 if (!bp->pending)
1472 bp->pending = 1;
1473 }
1474
1475 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1476 sector_t offset, sector_t *blocks,
1477 int create);
1478
mddev_set_timeout(struct mddev * mddev,unsigned long timeout,bool force)1479 static void mddev_set_timeout(struct mddev *mddev, unsigned long timeout,
1480 bool force)
1481 {
1482 struct md_thread *thread;
1483
1484 rcu_read_lock();
1485 thread = rcu_dereference(mddev->thread);
1486
1487 if (!thread)
1488 goto out;
1489
1490 if (force || thread->timeout < MAX_SCHEDULE_TIMEOUT)
1491 thread->timeout = timeout;
1492
1493 out:
1494 rcu_read_unlock();
1495 }
1496
1497 /*
1498 * bitmap daemon -- periodically wakes up to clean bits and flush pages
1499 * out to disk
1500 */
bitmap_daemon_work(struct mddev * mddev)1501 static void bitmap_daemon_work(struct mddev *mddev)
1502 {
1503 struct bitmap *bitmap;
1504 unsigned long j;
1505 unsigned long nextpage;
1506 sector_t blocks;
1507 struct bitmap_counts *counts;
1508
1509 /* Use a mutex to guard daemon_work against
1510 * bitmap_destroy.
1511 */
1512 mutex_lock(&mddev->bitmap_info.mutex);
1513 bitmap = mddev->bitmap;
1514 if (bitmap == NULL) {
1515 mutex_unlock(&mddev->bitmap_info.mutex);
1516 return;
1517 }
1518 if (time_before(jiffies, bitmap->daemon_lastrun
1519 + mddev->bitmap_info.daemon_sleep))
1520 goto done;
1521
1522 bitmap->daemon_lastrun = jiffies;
1523 if (bitmap->allclean) {
1524 mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);
1525 goto done;
1526 }
1527 bitmap->allclean = 1;
1528
1529 mddev_add_trace_msg(bitmap->mddev, "md bitmap_daemon_work");
1530
1531 /* Any file-page which is PENDING now needs to be written.
1532 * So set NEEDWRITE now, then after we make any last-minute changes
1533 * we will write it.
1534 */
1535 for (j = 0; j < bitmap->storage.file_pages; j++)
1536 if (test_and_clear_page_attr(bitmap, j,
1537 BITMAP_PAGE_PENDING))
1538 set_page_attr(bitmap, j,
1539 BITMAP_PAGE_NEEDWRITE);
1540
1541 if (bitmap->need_sync &&
1542 mddev->bitmap_info.external == 0) {
1543 /* Arrange for superblock update as well as
1544 * other changes */
1545 bitmap_super_t *sb;
1546 bitmap->need_sync = 0;
1547 if (bitmap->storage.filemap) {
1548 sb = kmap_atomic(bitmap->storage.sb_page);
1549 sb->events_cleared =
1550 cpu_to_le64(bitmap->events_cleared);
1551 kunmap_atomic(sb);
1552 set_page_attr(bitmap, 0,
1553 BITMAP_PAGE_NEEDWRITE);
1554 }
1555 }
1556 /* Now look at the bitmap counters and if any are '2' or '1',
1557 * decrement and handle accordingly.
1558 */
1559 counts = &bitmap->counts;
1560 spin_lock_irq(&counts->lock);
1561 nextpage = 0;
1562 for (j = 0; j < counts->chunks; j++) {
1563 bitmap_counter_t *bmc;
1564 sector_t block = (sector_t)j << counts->chunkshift;
1565
1566 if (j == nextpage) {
1567 nextpage += PAGE_COUNTER_RATIO;
1568 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1569 j |= PAGE_COUNTER_MASK;
1570 continue;
1571 }
1572 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1573 }
1574
1575 bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
1576 if (!bmc) {
1577 j |= PAGE_COUNTER_MASK;
1578 continue;
1579 }
1580 if (*bmc == 1 && !bitmap->need_sync) {
1581 /* We can clear the bit */
1582 *bmc = 0;
1583 md_bitmap_count_page(counts, block, -1);
1584 md_bitmap_file_clear_bit(bitmap, block);
1585 } else if (*bmc && *bmc <= 2) {
1586 *bmc = 1;
1587 md_bitmap_set_pending(counts, block);
1588 bitmap->allclean = 0;
1589 }
1590 }
1591 spin_unlock_irq(&counts->lock);
1592
1593 md_bitmap_wait_writes(bitmap);
1594 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1595 * DIRTY pages need to be written by bitmap_unplug so it can wait
1596 * for them.
1597 * If we find any DIRTY page we stop there and let bitmap_unplug
1598 * handle all the rest. This is important in the case where
1599 * the first blocking holds the superblock and it has been updated.
1600 * We mustn't write any other blocks before the superblock.
1601 */
1602 for (j = 0;
1603 j < bitmap->storage.file_pages
1604 && !test_bit(BITMAP_STALE, &bitmap->flags);
1605 j++) {
1606 if (test_page_attr(bitmap, j,
1607 BITMAP_PAGE_DIRTY))
1608 /* bitmap_unplug will handle the rest */
1609 break;
1610 if (bitmap->storage.filemap &&
1611 test_and_clear_page_attr(bitmap, j,
1612 BITMAP_PAGE_NEEDWRITE))
1613 filemap_write_page(bitmap, j, false);
1614 }
1615
1616 done:
1617 if (bitmap->allclean == 0)
1618 mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
1619 mutex_unlock(&mddev->bitmap_info.mutex);
1620 }
1621
md_bitmap_get_counter(struct bitmap_counts * bitmap,sector_t offset,sector_t * blocks,int create)1622 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1623 sector_t offset, sector_t *blocks,
1624 int create)
1625 __releases(bitmap->lock)
1626 __acquires(bitmap->lock)
1627 {
1628 /* If 'create', we might release the lock and reclaim it.
1629 * The lock must have been taken with interrupts enabled.
1630 * If !create, we don't release the lock.
1631 */
1632 sector_t chunk = offset >> bitmap->chunkshift;
1633 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1634 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1635 sector_t csize = ((sector_t)1) << bitmap->chunkshift;
1636 int err;
1637
1638 if (page >= bitmap->pages) {
1639 /*
1640 * This can happen if bitmap_start_sync goes beyond
1641 * End-of-device while looking for a whole page or
1642 * user set a huge number to sysfs bitmap_set_bits.
1643 */
1644 *blocks = csize - (offset & (csize - 1));
1645 return NULL;
1646 }
1647 err = md_bitmap_checkpage(bitmap, page, create, 0);
1648
1649 if (bitmap->bp[page].hijacked ||
1650 bitmap->bp[page].map == NULL)
1651 csize = ((sector_t)1) << (bitmap->chunkshift +
1652 PAGE_COUNTER_SHIFT);
1653
1654 *blocks = csize - (offset & (csize - 1));
1655
1656 if (err < 0)
1657 return NULL;
1658
1659 /* now locked ... */
1660
1661 if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1662 /* should we use the first or second counter field
1663 * of the hijacked pointer? */
1664 int hi = (pageoff > PAGE_COUNTER_MASK);
1665 return &((bitmap_counter_t *)
1666 &bitmap->bp[page].map)[hi];
1667 } else /* page is allocated */
1668 return (bitmap_counter_t *)
1669 &(bitmap->bp[page].map[pageoff]);
1670 }
1671
bitmap_startwrite(struct mddev * mddev,sector_t offset,unsigned long sectors,bool behind)1672 static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
1673 unsigned long sectors, bool behind)
1674 {
1675 struct bitmap *bitmap = mddev->bitmap;
1676
1677 if (!bitmap)
1678 return 0;
1679
1680 if (behind) {
1681 int bw;
1682 atomic_inc(&bitmap->behind_writes);
1683 bw = atomic_read(&bitmap->behind_writes);
1684 if (bw > bitmap->behind_writes_used)
1685 bitmap->behind_writes_used = bw;
1686
1687 pr_debug("inc write-behind count %d/%lu\n",
1688 bw, bitmap->mddev->bitmap_info.max_write_behind);
1689 }
1690
1691 while (sectors) {
1692 sector_t blocks;
1693 bitmap_counter_t *bmc;
1694
1695 spin_lock_irq(&bitmap->counts.lock);
1696 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1697 if (!bmc) {
1698 spin_unlock_irq(&bitmap->counts.lock);
1699 return 0;
1700 }
1701
1702 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1703 DEFINE_WAIT(__wait);
1704 /* note that it is safe to do the prepare_to_wait
1705 * after the test as long as we do it before dropping
1706 * the spinlock.
1707 */
1708 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1709 TASK_UNINTERRUPTIBLE);
1710 spin_unlock_irq(&bitmap->counts.lock);
1711 schedule();
1712 finish_wait(&bitmap->overflow_wait, &__wait);
1713 continue;
1714 }
1715
1716 switch (*bmc) {
1717 case 0:
1718 md_bitmap_file_set_bit(bitmap, offset);
1719 md_bitmap_count_page(&bitmap->counts, offset, 1);
1720 fallthrough;
1721 case 1:
1722 *bmc = 2;
1723 }
1724
1725 (*bmc)++;
1726
1727 spin_unlock_irq(&bitmap->counts.lock);
1728
1729 offset += blocks;
1730 if (sectors > blocks)
1731 sectors -= blocks;
1732 else
1733 sectors = 0;
1734 }
1735 return 0;
1736 }
1737
bitmap_endwrite(struct mddev * mddev,sector_t offset,unsigned long sectors,bool success,bool behind)1738 static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
1739 unsigned long sectors, bool success, bool behind)
1740 {
1741 struct bitmap *bitmap = mddev->bitmap;
1742
1743 if (!bitmap)
1744 return;
1745
1746 if (behind) {
1747 if (atomic_dec_and_test(&bitmap->behind_writes))
1748 wake_up(&bitmap->behind_wait);
1749 pr_debug("dec write-behind count %d/%lu\n",
1750 atomic_read(&bitmap->behind_writes),
1751 bitmap->mddev->bitmap_info.max_write_behind);
1752 }
1753
1754 while (sectors) {
1755 sector_t blocks;
1756 unsigned long flags;
1757 bitmap_counter_t *bmc;
1758
1759 spin_lock_irqsave(&bitmap->counts.lock, flags);
1760 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1761 if (!bmc) {
1762 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1763 return;
1764 }
1765
1766 if (success && !bitmap->mddev->degraded &&
1767 bitmap->events_cleared < bitmap->mddev->events) {
1768 bitmap->events_cleared = bitmap->mddev->events;
1769 bitmap->need_sync = 1;
1770 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1771 }
1772
1773 if (!success && !NEEDED(*bmc))
1774 *bmc |= NEEDED_MASK;
1775
1776 if (COUNTER(*bmc) == COUNTER_MAX)
1777 wake_up(&bitmap->overflow_wait);
1778
1779 (*bmc)--;
1780 if (*bmc <= 2) {
1781 md_bitmap_set_pending(&bitmap->counts, offset);
1782 bitmap->allclean = 0;
1783 }
1784 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1785 offset += blocks;
1786 if (sectors > blocks)
1787 sectors -= blocks;
1788 else
1789 sectors = 0;
1790 }
1791 }
1792
__bitmap_start_sync(struct bitmap * bitmap,sector_t offset,sector_t * blocks,bool degraded)1793 static bool __bitmap_start_sync(struct bitmap *bitmap, sector_t offset,
1794 sector_t *blocks, bool degraded)
1795 {
1796 bitmap_counter_t *bmc;
1797 bool rv;
1798
1799 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1800 *blocks = 1024;
1801 return true; /* always resync if no bitmap */
1802 }
1803 spin_lock_irq(&bitmap->counts.lock);
1804
1805 rv = false;
1806 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1807 if (bmc) {
1808 /* locked */
1809 if (RESYNC(*bmc)) {
1810 rv = true;
1811 } else if (NEEDED(*bmc)) {
1812 rv = true;
1813 if (!degraded) { /* don't set/clear bits if degraded */
1814 *bmc |= RESYNC_MASK;
1815 *bmc &= ~NEEDED_MASK;
1816 }
1817 }
1818 }
1819 spin_unlock_irq(&bitmap->counts.lock);
1820
1821 return rv;
1822 }
1823
bitmap_start_sync(struct mddev * mddev,sector_t offset,sector_t * blocks,bool degraded)1824 static bool bitmap_start_sync(struct mddev *mddev, sector_t offset,
1825 sector_t *blocks, bool degraded)
1826 {
1827 /* bitmap_start_sync must always report on multiples of whole
1828 * pages, otherwise resync (which is very PAGE_SIZE based) will
1829 * get confused.
1830 * So call __bitmap_start_sync repeatedly (if needed) until
1831 * At least PAGE_SIZE>>9 blocks are covered.
1832 * Return the 'or' of the result.
1833 */
1834 bool rv = false;
1835 sector_t blocks1;
1836
1837 *blocks = 0;
1838 while (*blocks < (PAGE_SIZE>>9)) {
1839 rv |= __bitmap_start_sync(mddev->bitmap, offset,
1840 &blocks1, degraded);
1841 offset += blocks1;
1842 *blocks += blocks1;
1843 }
1844
1845 return rv;
1846 }
1847
__bitmap_end_sync(struct bitmap * bitmap,sector_t offset,sector_t * blocks,bool aborted)1848 static void __bitmap_end_sync(struct bitmap *bitmap, sector_t offset,
1849 sector_t *blocks, bool aborted)
1850 {
1851 bitmap_counter_t *bmc;
1852 unsigned long flags;
1853
1854 if (bitmap == NULL) {
1855 *blocks = 1024;
1856 return;
1857 }
1858 spin_lock_irqsave(&bitmap->counts.lock, flags);
1859 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1860 if (bmc == NULL)
1861 goto unlock;
1862 /* locked */
1863 if (RESYNC(*bmc)) {
1864 *bmc &= ~RESYNC_MASK;
1865
1866 if (!NEEDED(*bmc) && aborted)
1867 *bmc |= NEEDED_MASK;
1868 else {
1869 if (*bmc <= 2) {
1870 md_bitmap_set_pending(&bitmap->counts, offset);
1871 bitmap->allclean = 0;
1872 }
1873 }
1874 }
1875 unlock:
1876 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1877 }
1878
bitmap_end_sync(struct mddev * mddev,sector_t offset,sector_t * blocks)1879 static void bitmap_end_sync(struct mddev *mddev, sector_t offset,
1880 sector_t *blocks)
1881 {
1882 __bitmap_end_sync(mddev->bitmap, offset, blocks, true);
1883 }
1884
bitmap_close_sync(struct mddev * mddev)1885 static void bitmap_close_sync(struct mddev *mddev)
1886 {
1887 /* Sync has finished, and any bitmap chunks that weren't synced
1888 * properly have been aborted. It remains to us to clear the
1889 * RESYNC bit wherever it is still on
1890 */
1891 sector_t sector = 0;
1892 sector_t blocks;
1893 struct bitmap *bitmap = mddev->bitmap;
1894
1895 if (!bitmap)
1896 return;
1897
1898 while (sector < bitmap->mddev->resync_max_sectors) {
1899 __bitmap_end_sync(bitmap, sector, &blocks, false);
1900 sector += blocks;
1901 }
1902 }
1903
bitmap_cond_end_sync(struct mddev * mddev,sector_t sector,bool force)1904 static void bitmap_cond_end_sync(struct mddev *mddev, sector_t sector,
1905 bool force)
1906 {
1907 sector_t s = 0;
1908 sector_t blocks;
1909 struct bitmap *bitmap = mddev->bitmap;
1910
1911 if (!bitmap)
1912 return;
1913 if (sector == 0) {
1914 bitmap->last_end_sync = jiffies;
1915 return;
1916 }
1917 if (!force && time_before(jiffies, (bitmap->last_end_sync
1918 + bitmap->mddev->bitmap_info.daemon_sleep)))
1919 return;
1920 wait_event(bitmap->mddev->recovery_wait,
1921 atomic_read(&bitmap->mddev->recovery_active) == 0);
1922
1923 bitmap->mddev->curr_resync_completed = sector;
1924 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
1925 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
1926 s = 0;
1927 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
1928 __bitmap_end_sync(bitmap, s, &blocks, false);
1929 s += blocks;
1930 }
1931 bitmap->last_end_sync = jiffies;
1932 sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
1933 }
1934
bitmap_sync_with_cluster(struct mddev * mddev,sector_t old_lo,sector_t old_hi,sector_t new_lo,sector_t new_hi)1935 static void bitmap_sync_with_cluster(struct mddev *mddev,
1936 sector_t old_lo, sector_t old_hi,
1937 sector_t new_lo, sector_t new_hi)
1938 {
1939 struct bitmap *bitmap = mddev->bitmap;
1940 sector_t sector, blocks = 0;
1941
1942 for (sector = old_lo; sector < new_lo; ) {
1943 __bitmap_end_sync(bitmap, sector, &blocks, false);
1944 sector += blocks;
1945 }
1946 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1947
1948 for (sector = old_hi; sector < new_hi; ) {
1949 bitmap_start_sync(mddev, sector, &blocks, false);
1950 sector += blocks;
1951 }
1952 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1953 }
1954
md_bitmap_set_memory_bits(struct bitmap * bitmap,sector_t offset,int needed)1955 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1956 {
1957 /* For each chunk covered by any of these sectors, set the
1958 * counter to 2 and possibly set resync_needed. They should all
1959 * be 0 at this point
1960 */
1961
1962 sector_t secs;
1963 bitmap_counter_t *bmc;
1964 spin_lock_irq(&bitmap->counts.lock);
1965 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1966 if (!bmc) {
1967 spin_unlock_irq(&bitmap->counts.lock);
1968 return;
1969 }
1970 if (!*bmc) {
1971 *bmc = 2;
1972 md_bitmap_count_page(&bitmap->counts, offset, 1);
1973 md_bitmap_set_pending(&bitmap->counts, offset);
1974 bitmap->allclean = 0;
1975 }
1976 if (needed)
1977 *bmc |= NEEDED_MASK;
1978 spin_unlock_irq(&bitmap->counts.lock);
1979 }
1980
1981 /* dirty the memory and file bits for bitmap chunks "s" to "e" */
bitmap_dirty_bits(struct mddev * mddev,unsigned long s,unsigned long e)1982 static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s,
1983 unsigned long e)
1984 {
1985 unsigned long chunk;
1986 struct bitmap *bitmap = mddev->bitmap;
1987
1988 if (!bitmap)
1989 return;
1990
1991 for (chunk = s; chunk <= e; chunk++) {
1992 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1993
1994 md_bitmap_set_memory_bits(bitmap, sec, 1);
1995 md_bitmap_file_set_bit(bitmap, sec);
1996 if (sec < bitmap->mddev->recovery_cp)
1997 /* We are asserting that the array is dirty,
1998 * so move the recovery_cp address back so
1999 * that it is obvious that it is dirty
2000 */
2001 bitmap->mddev->recovery_cp = sec;
2002 }
2003 }
2004
bitmap_flush(struct mddev * mddev)2005 static void bitmap_flush(struct mddev *mddev)
2006 {
2007 struct bitmap *bitmap = mddev->bitmap;
2008 long sleep;
2009
2010 if (!bitmap) /* there was no bitmap */
2011 return;
2012
2013 /* run the daemon_work three time to ensure everything is flushed
2014 * that can be
2015 */
2016 sleep = mddev->bitmap_info.daemon_sleep * 2;
2017 bitmap->daemon_lastrun -= sleep;
2018 bitmap_daemon_work(mddev);
2019 bitmap->daemon_lastrun -= sleep;
2020 bitmap_daemon_work(mddev);
2021 bitmap->daemon_lastrun -= sleep;
2022 bitmap_daemon_work(mddev);
2023 if (mddev->bitmap_info.external)
2024 md_super_wait(mddev);
2025 bitmap_update_sb(bitmap);
2026 }
2027
md_bitmap_free(void * data)2028 static void md_bitmap_free(void *data)
2029 {
2030 unsigned long k, pages;
2031 struct bitmap_page *bp;
2032 struct bitmap *bitmap = data;
2033
2034 if (!bitmap) /* there was no bitmap */
2035 return;
2036
2037 if (bitmap->sysfs_can_clear)
2038 sysfs_put(bitmap->sysfs_can_clear);
2039
2040 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
2041 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
2042 md_cluster_stop(bitmap->mddev);
2043
2044 /* Shouldn't be needed - but just in case.... */
2045 wait_event(bitmap->write_wait,
2046 atomic_read(&bitmap->pending_writes) == 0);
2047
2048 /* release the bitmap file */
2049 md_bitmap_file_unmap(&bitmap->storage);
2050
2051 bp = bitmap->counts.bp;
2052 pages = bitmap->counts.pages;
2053
2054 /* free all allocated memory */
2055
2056 if (bp) /* deallocate the page memory */
2057 for (k = 0; k < pages; k++)
2058 if (bp[k].map && !bp[k].hijacked)
2059 kfree(bp[k].map);
2060 kfree(bp);
2061 kfree(bitmap);
2062 }
2063
bitmap_wait_behind_writes(struct mddev * mddev)2064 static void bitmap_wait_behind_writes(struct mddev *mddev)
2065 {
2066 struct bitmap *bitmap = mddev->bitmap;
2067
2068 /* wait for behind writes to complete */
2069 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2070 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
2071 mdname(mddev));
2072 /* need to kick something here to make sure I/O goes? */
2073 wait_event(bitmap->behind_wait,
2074 atomic_read(&bitmap->behind_writes) == 0);
2075 }
2076 }
2077
bitmap_destroy(struct mddev * mddev)2078 static void bitmap_destroy(struct mddev *mddev)
2079 {
2080 struct bitmap *bitmap = mddev->bitmap;
2081
2082 if (!bitmap) /* there was no bitmap */
2083 return;
2084
2085 bitmap_wait_behind_writes(mddev);
2086 if (!mddev->serialize_policy)
2087 mddev_destroy_serial_pool(mddev, NULL);
2088
2089 mutex_lock(&mddev->bitmap_info.mutex);
2090 spin_lock(&mddev->lock);
2091 mddev->bitmap = NULL; /* disconnect from the md device */
2092 spin_unlock(&mddev->lock);
2093 mutex_unlock(&mddev->bitmap_info.mutex);
2094 mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);
2095
2096 md_bitmap_free(bitmap);
2097 }
2098
2099 /*
2100 * initialize the bitmap structure
2101 * if this returns an error, bitmap_destroy must be called to do clean up
2102 * once mddev->bitmap is set
2103 */
__bitmap_create(struct mddev * mddev,int slot)2104 static struct bitmap *__bitmap_create(struct mddev *mddev, int slot)
2105 {
2106 struct bitmap *bitmap;
2107 sector_t blocks = mddev->resync_max_sectors;
2108 struct file *file = mddev->bitmap_info.file;
2109 int err;
2110 struct kernfs_node *bm = NULL;
2111
2112 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
2113
2114 BUG_ON(file && mddev->bitmap_info.offset);
2115
2116 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
2117 pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
2118 mdname(mddev));
2119 return ERR_PTR(-EBUSY);
2120 }
2121
2122 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
2123 if (!bitmap)
2124 return ERR_PTR(-ENOMEM);
2125
2126 spin_lock_init(&bitmap->counts.lock);
2127 atomic_set(&bitmap->pending_writes, 0);
2128 init_waitqueue_head(&bitmap->write_wait);
2129 init_waitqueue_head(&bitmap->overflow_wait);
2130 init_waitqueue_head(&bitmap->behind_wait);
2131
2132 bitmap->mddev = mddev;
2133 bitmap->cluster_slot = slot;
2134
2135 if (mddev->kobj.sd)
2136 bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
2137 if (bm) {
2138 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
2139 sysfs_put(bm);
2140 } else
2141 bitmap->sysfs_can_clear = NULL;
2142
2143 bitmap->storage.file = file;
2144 if (file) {
2145 get_file(file);
2146 /* As future accesses to this file will use bmap,
2147 * and bypass the page cache, we must sync the file
2148 * first.
2149 */
2150 vfs_fsync(file, 1);
2151 }
2152 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
2153 if (!mddev->bitmap_info.external) {
2154 /*
2155 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
2156 * instructing us to create a new on-disk bitmap instance.
2157 */
2158 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
2159 err = md_bitmap_new_disk_sb(bitmap);
2160 else
2161 err = md_bitmap_read_sb(bitmap);
2162 } else {
2163 err = 0;
2164 if (mddev->bitmap_info.chunksize == 0 ||
2165 mddev->bitmap_info.daemon_sleep == 0)
2166 /* chunksize and time_base need to be
2167 * set first. */
2168 err = -EINVAL;
2169 }
2170 if (err)
2171 goto error;
2172
2173 bitmap->daemon_lastrun = jiffies;
2174 err = __bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize,
2175 true);
2176 if (err)
2177 goto error;
2178
2179 pr_debug("created bitmap (%lu pages) for device %s\n",
2180 bitmap->counts.pages, bmname(bitmap));
2181
2182 err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
2183 if (err)
2184 goto error;
2185
2186 return bitmap;
2187 error:
2188 md_bitmap_free(bitmap);
2189 return ERR_PTR(err);
2190 }
2191
bitmap_create(struct mddev * mddev,int slot)2192 static int bitmap_create(struct mddev *mddev, int slot)
2193 {
2194 struct bitmap *bitmap = __bitmap_create(mddev, slot);
2195
2196 if (IS_ERR(bitmap))
2197 return PTR_ERR(bitmap);
2198
2199 mddev->bitmap = bitmap;
2200 return 0;
2201 }
2202
bitmap_load(struct mddev * mddev)2203 static int bitmap_load(struct mddev *mddev)
2204 {
2205 int err = 0;
2206 sector_t start = 0;
2207 sector_t sector = 0;
2208 struct bitmap *bitmap = mddev->bitmap;
2209 struct md_rdev *rdev;
2210
2211 if (!bitmap)
2212 goto out;
2213
2214 rdev_for_each(rdev, mddev)
2215 mddev_create_serial_pool(mddev, rdev);
2216
2217 if (mddev_is_clustered(mddev))
2218 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
2219
2220 /* Clear out old bitmap info first: Either there is none, or we
2221 * are resuming after someone else has possibly changed things,
2222 * so we should forget old cached info.
2223 * All chunks should be clean, but some might need_sync.
2224 */
2225 while (sector < mddev->resync_max_sectors) {
2226 sector_t blocks;
2227 bitmap_start_sync(mddev, sector, &blocks, false);
2228 sector += blocks;
2229 }
2230 bitmap_close_sync(mddev);
2231
2232 if (mddev->degraded == 0
2233 || bitmap->events_cleared == mddev->events)
2234 /* no need to keep dirty bits to optimise a
2235 * re-add of a missing device */
2236 start = mddev->recovery_cp;
2237
2238 mutex_lock(&mddev->bitmap_info.mutex);
2239 err = md_bitmap_init_from_disk(bitmap, start);
2240 mutex_unlock(&mddev->bitmap_info.mutex);
2241
2242 if (err)
2243 goto out;
2244 clear_bit(BITMAP_STALE, &bitmap->flags);
2245
2246 /* Kick recovery in case any bits were set */
2247 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
2248
2249 mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
2250 md_wakeup_thread(mddev->thread);
2251
2252 bitmap_update_sb(bitmap);
2253
2254 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
2255 err = -EIO;
2256 out:
2257 return err;
2258 }
2259
2260 /* caller need to free returned bitmap with md_bitmap_free() */
bitmap_get_from_slot(struct mddev * mddev,int slot)2261 static void *bitmap_get_from_slot(struct mddev *mddev, int slot)
2262 {
2263 int rv = 0;
2264 struct bitmap *bitmap;
2265
2266 bitmap = __bitmap_create(mddev, slot);
2267 if (IS_ERR(bitmap)) {
2268 rv = PTR_ERR(bitmap);
2269 return ERR_PTR(rv);
2270 }
2271
2272 rv = md_bitmap_init_from_disk(bitmap, 0);
2273 if (rv) {
2274 md_bitmap_free(bitmap);
2275 return ERR_PTR(rv);
2276 }
2277
2278 return bitmap;
2279 }
2280
2281 /* Loads the bitmap associated with slot and copies the resync information
2282 * to our bitmap
2283 */
bitmap_copy_from_slot(struct mddev * mddev,int slot,sector_t * low,sector_t * high,bool clear_bits)2284 static int bitmap_copy_from_slot(struct mddev *mddev, int slot, sector_t *low,
2285 sector_t *high, bool clear_bits)
2286 {
2287 int rv = 0, i, j;
2288 sector_t block, lo = 0, hi = 0;
2289 struct bitmap_counts *counts;
2290 struct bitmap *bitmap;
2291
2292 bitmap = bitmap_get_from_slot(mddev, slot);
2293 if (IS_ERR(bitmap)) {
2294 pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
2295 return -1;
2296 }
2297
2298 counts = &bitmap->counts;
2299 for (j = 0; j < counts->chunks; j++) {
2300 block = (sector_t)j << counts->chunkshift;
2301 if (md_bitmap_file_test_bit(bitmap, block)) {
2302 if (!lo)
2303 lo = block;
2304 hi = block;
2305 md_bitmap_file_clear_bit(bitmap, block);
2306 md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
2307 md_bitmap_file_set_bit(mddev->bitmap, block);
2308 }
2309 }
2310
2311 if (clear_bits) {
2312 bitmap_update_sb(bitmap);
2313 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2314 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
2315 for (i = 0; i < bitmap->storage.file_pages; i++)
2316 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2317 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
2318 __bitmap_unplug(bitmap);
2319 }
2320 __bitmap_unplug(mddev->bitmap);
2321 *low = lo;
2322 *high = hi;
2323 md_bitmap_free(bitmap);
2324
2325 return rv;
2326 }
2327
bitmap_set_pages(void * data,unsigned long pages)2328 static void bitmap_set_pages(void *data, unsigned long pages)
2329 {
2330 struct bitmap *bitmap = data;
2331
2332 bitmap->counts.pages = pages;
2333 }
2334
bitmap_get_stats(void * data,struct md_bitmap_stats * stats)2335 static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats)
2336 {
2337 struct bitmap_storage *storage;
2338 struct bitmap_counts *counts;
2339 struct bitmap *bitmap = data;
2340 bitmap_super_t *sb;
2341
2342 if (!bitmap)
2343 return -ENOENT;
2344
2345 sb = kmap_local_page(bitmap->storage.sb_page);
2346 stats->sync_size = le64_to_cpu(sb->sync_size);
2347 kunmap_local(sb);
2348
2349 counts = &bitmap->counts;
2350 stats->missing_pages = counts->missing_pages;
2351 stats->pages = counts->pages;
2352
2353 storage = &bitmap->storage;
2354 stats->file_pages = storage->file_pages;
2355 stats->file = storage->file;
2356
2357 stats->behind_writes = atomic_read(&bitmap->behind_writes);
2358 stats->behind_wait = wq_has_sleeper(&bitmap->behind_wait);
2359 stats->events_cleared = bitmap->events_cleared;
2360 return 0;
2361 }
2362
__bitmap_resize(struct bitmap * bitmap,sector_t blocks,int chunksize,bool init)2363 static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2364 int chunksize, bool init)
2365 {
2366 /* If chunk_size is 0, choose an appropriate chunk size.
2367 * Then possibly allocate new storage space.
2368 * Then quiesce, copy bits, replace bitmap, and re-start
2369 *
2370 * This function is called both to set up the initial bitmap
2371 * and to resize the bitmap while the array is active.
2372 * If this happens as a result of the array being resized,
2373 * chunksize will be zero, and we need to choose a suitable
2374 * chunksize, otherwise we use what we are given.
2375 */
2376 struct bitmap_storage store;
2377 struct bitmap_counts old_counts;
2378 unsigned long chunks;
2379 sector_t block;
2380 sector_t old_blocks, new_blocks;
2381 int chunkshift;
2382 int ret = 0;
2383 long pages;
2384 struct bitmap_page *new_bp;
2385
2386 if (bitmap->storage.file && !init) {
2387 pr_info("md: cannot resize file-based bitmap\n");
2388 return -EINVAL;
2389 }
2390
2391 if (chunksize == 0) {
2392 /* If there is enough space, leave the chunk size unchanged,
2393 * else increase by factor of two until there is enough space.
2394 */
2395 long bytes;
2396 long space = bitmap->mddev->bitmap_info.space;
2397
2398 if (space == 0) {
2399 /* We don't know how much space there is, so limit
2400 * to current size - in sectors.
2401 */
2402 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
2403 if (!bitmap->mddev->bitmap_info.external)
2404 bytes += sizeof(bitmap_super_t);
2405 space = DIV_ROUND_UP(bytes, 512);
2406 bitmap->mddev->bitmap_info.space = space;
2407 }
2408 chunkshift = bitmap->counts.chunkshift;
2409 chunkshift--;
2410 do {
2411 /* 'chunkshift' is shift from block size to chunk size */
2412 chunkshift++;
2413 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2414 bytes = DIV_ROUND_UP(chunks, 8);
2415 if (!bitmap->mddev->bitmap_info.external)
2416 bytes += sizeof(bitmap_super_t);
2417 } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
2418 (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
2419 } else
2420 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
2421
2422 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2423 memset(&store, 0, sizeof(store));
2424 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
2425 ret = md_bitmap_storage_alloc(&store, chunks,
2426 !bitmap->mddev->bitmap_info.external,
2427 mddev_is_clustered(bitmap->mddev)
2428 ? bitmap->cluster_slot : 0);
2429 if (ret) {
2430 md_bitmap_file_unmap(&store);
2431 goto err;
2432 }
2433
2434 pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
2435
2436 new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
2437 ret = -ENOMEM;
2438 if (!new_bp) {
2439 md_bitmap_file_unmap(&store);
2440 goto err;
2441 }
2442
2443 if (!init)
2444 bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
2445
2446 store.file = bitmap->storage.file;
2447 bitmap->storage.file = NULL;
2448
2449 if (store.sb_page && bitmap->storage.sb_page)
2450 memcpy(page_address(store.sb_page),
2451 page_address(bitmap->storage.sb_page),
2452 sizeof(bitmap_super_t));
2453 spin_lock_irq(&bitmap->counts.lock);
2454 md_bitmap_file_unmap(&bitmap->storage);
2455 bitmap->storage = store;
2456
2457 old_counts = bitmap->counts;
2458 bitmap->counts.bp = new_bp;
2459 bitmap->counts.pages = pages;
2460 bitmap->counts.missing_pages = pages;
2461 bitmap->counts.chunkshift = chunkshift;
2462 bitmap->counts.chunks = chunks;
2463 bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
2464 BITMAP_BLOCK_SHIFT);
2465
2466 blocks = min(old_counts.chunks << old_counts.chunkshift,
2467 chunks << chunkshift);
2468
2469 /* For cluster raid, need to pre-allocate bitmap */
2470 if (mddev_is_clustered(bitmap->mddev)) {
2471 unsigned long page;
2472 for (page = 0; page < pages; page++) {
2473 ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
2474 if (ret) {
2475 unsigned long k;
2476
2477 /* deallocate the page memory */
2478 for (k = 0; k < page; k++) {
2479 kfree(new_bp[k].map);
2480 }
2481 kfree(new_bp);
2482
2483 /* restore some fields from old_counts */
2484 bitmap->counts.bp = old_counts.bp;
2485 bitmap->counts.pages = old_counts.pages;
2486 bitmap->counts.missing_pages = old_counts.pages;
2487 bitmap->counts.chunkshift = old_counts.chunkshift;
2488 bitmap->counts.chunks = old_counts.chunks;
2489 bitmap->mddev->bitmap_info.chunksize =
2490 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
2491 blocks = old_counts.chunks << old_counts.chunkshift;
2492 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
2493 break;
2494 } else
2495 bitmap->counts.bp[page].count += 1;
2496 }
2497 }
2498
2499 for (block = 0; block < blocks; ) {
2500 bitmap_counter_t *bmc_old, *bmc_new;
2501 int set;
2502
2503 bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
2504 set = bmc_old && NEEDED(*bmc_old);
2505
2506 if (set) {
2507 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2508 if (bmc_new) {
2509 if (*bmc_new == 0) {
2510 /* need to set on-disk bits too. */
2511 sector_t end = block + new_blocks;
2512 sector_t start = block >> chunkshift;
2513
2514 start <<= chunkshift;
2515 while (start < end) {
2516 md_bitmap_file_set_bit(bitmap, block);
2517 start += 1 << chunkshift;
2518 }
2519 *bmc_new = 2;
2520 md_bitmap_count_page(&bitmap->counts, block, 1);
2521 md_bitmap_set_pending(&bitmap->counts, block);
2522 }
2523 *bmc_new |= NEEDED_MASK;
2524 }
2525 if (new_blocks < old_blocks)
2526 old_blocks = new_blocks;
2527 }
2528 block += old_blocks;
2529 }
2530
2531 if (bitmap->counts.bp != old_counts.bp) {
2532 unsigned long k;
2533 for (k = 0; k < old_counts.pages; k++)
2534 if (!old_counts.bp[k].hijacked)
2535 kfree(old_counts.bp[k].map);
2536 kfree(old_counts.bp);
2537 }
2538
2539 if (!init) {
2540 int i;
2541 while (block < (chunks << chunkshift)) {
2542 bitmap_counter_t *bmc;
2543 bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2544 if (bmc) {
2545 /* new space. It needs to be resynced, so
2546 * we set NEEDED_MASK.
2547 */
2548 if (*bmc == 0) {
2549 *bmc = NEEDED_MASK | 2;
2550 md_bitmap_count_page(&bitmap->counts, block, 1);
2551 md_bitmap_set_pending(&bitmap->counts, block);
2552 }
2553 }
2554 block += new_blocks;
2555 }
2556 for (i = 0; i < bitmap->storage.file_pages; i++)
2557 set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
2558 }
2559 spin_unlock_irq(&bitmap->counts.lock);
2560
2561 if (!init) {
2562 __bitmap_unplug(bitmap);
2563 bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2564 }
2565 ret = 0;
2566 err:
2567 return ret;
2568 }
2569
bitmap_resize(struct mddev * mddev,sector_t blocks,int chunksize,bool init)2570 static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize,
2571 bool init)
2572 {
2573 struct bitmap *bitmap = mddev->bitmap;
2574
2575 if (!bitmap)
2576 return 0;
2577
2578 return __bitmap_resize(bitmap, blocks, chunksize, init);
2579 }
2580
2581 static ssize_t
location_show(struct mddev * mddev,char * page)2582 location_show(struct mddev *mddev, char *page)
2583 {
2584 ssize_t len;
2585 if (mddev->bitmap_info.file)
2586 len = sprintf(page, "file");
2587 else if (mddev->bitmap_info.offset)
2588 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2589 else
2590 len = sprintf(page, "none");
2591 len += sprintf(page+len, "\n");
2592 return len;
2593 }
2594
2595 static ssize_t
location_store(struct mddev * mddev,const char * buf,size_t len)2596 location_store(struct mddev *mddev, const char *buf, size_t len)
2597 {
2598 int rv;
2599
2600 rv = mddev_suspend_and_lock(mddev);
2601 if (rv)
2602 return rv;
2603
2604 if (mddev->pers) {
2605 if (mddev->recovery || mddev->sync_thread) {
2606 rv = -EBUSY;
2607 goto out;
2608 }
2609 }
2610
2611 if (mddev->bitmap || mddev->bitmap_info.file ||
2612 mddev->bitmap_info.offset) {
2613 /* bitmap already configured. Only option is to clear it */
2614 if (strncmp(buf, "none", 4) != 0) {
2615 rv = -EBUSY;
2616 goto out;
2617 }
2618
2619 bitmap_destroy(mddev);
2620 mddev->bitmap_info.offset = 0;
2621 if (mddev->bitmap_info.file) {
2622 struct file *f = mddev->bitmap_info.file;
2623 mddev->bitmap_info.file = NULL;
2624 fput(f);
2625 }
2626 } else {
2627 /* No bitmap, OK to set a location */
2628 long long offset;
2629
2630 if (strncmp(buf, "none", 4) == 0)
2631 /* nothing to be done */;
2632 else if (strncmp(buf, "file:", 5) == 0) {
2633 /* Not supported yet */
2634 rv = -EINVAL;
2635 goto out;
2636 } else {
2637 if (buf[0] == '+')
2638 rv = kstrtoll(buf+1, 10, &offset);
2639 else
2640 rv = kstrtoll(buf, 10, &offset);
2641 if (rv)
2642 goto out;
2643 if (offset == 0) {
2644 rv = -EINVAL;
2645 goto out;
2646 }
2647 if (mddev->bitmap_info.external == 0 &&
2648 mddev->major_version == 0 &&
2649 offset != mddev->bitmap_info.default_offset) {
2650 rv = -EINVAL;
2651 goto out;
2652 }
2653
2654 mddev->bitmap_info.offset = offset;
2655 rv = bitmap_create(mddev, -1);
2656 if (rv)
2657 goto out;
2658
2659 rv = bitmap_load(mddev);
2660 if (rv) {
2661 mddev->bitmap_info.offset = 0;
2662 bitmap_destroy(mddev);
2663 goto out;
2664 }
2665 }
2666 }
2667 if (!mddev->external) {
2668 /* Ensure new bitmap info is stored in
2669 * metadata promptly.
2670 */
2671 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2672 md_wakeup_thread(mddev->thread);
2673 }
2674 rv = 0;
2675 out:
2676 mddev_unlock_and_resume(mddev);
2677 if (rv)
2678 return rv;
2679 return len;
2680 }
2681
2682 static struct md_sysfs_entry bitmap_location =
2683 __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
2684
2685 /* 'bitmap/space' is the space available at 'location' for the
2686 * bitmap. This allows the kernel to know when it is safe to
2687 * resize the bitmap to match a resized array.
2688 */
2689 static ssize_t
space_show(struct mddev * mddev,char * page)2690 space_show(struct mddev *mddev, char *page)
2691 {
2692 return sprintf(page, "%lu\n", mddev->bitmap_info.space);
2693 }
2694
2695 static ssize_t
space_store(struct mddev * mddev,const char * buf,size_t len)2696 space_store(struct mddev *mddev, const char *buf, size_t len)
2697 {
2698 struct bitmap *bitmap;
2699 unsigned long sectors;
2700 int rv;
2701
2702 rv = kstrtoul(buf, 10, §ors);
2703 if (rv)
2704 return rv;
2705
2706 if (sectors == 0)
2707 return -EINVAL;
2708
2709 bitmap = mddev->bitmap;
2710 if (bitmap && sectors < (bitmap->storage.bytes + 511) >> 9)
2711 return -EFBIG; /* Bitmap is too big for this small space */
2712
2713 /* could make sure it isn't too big, but that isn't really
2714 * needed - user-space should be careful.
2715 */
2716 mddev->bitmap_info.space = sectors;
2717 return len;
2718 }
2719
2720 static struct md_sysfs_entry bitmap_space =
2721 __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
2722
2723 static ssize_t
timeout_show(struct mddev * mddev,char * page)2724 timeout_show(struct mddev *mddev, char *page)
2725 {
2726 ssize_t len;
2727 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
2728 unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2729
2730 len = sprintf(page, "%lu", secs);
2731 if (jifs)
2732 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
2733 len += sprintf(page+len, "\n");
2734 return len;
2735 }
2736
2737 static ssize_t
timeout_store(struct mddev * mddev,const char * buf,size_t len)2738 timeout_store(struct mddev *mddev, const char *buf, size_t len)
2739 {
2740 /* timeout can be set at any time */
2741 unsigned long timeout;
2742 int rv = strict_strtoul_scaled(buf, &timeout, 4);
2743 if (rv)
2744 return rv;
2745
2746 /* just to make sure we don't overflow... */
2747 if (timeout >= LONG_MAX / HZ)
2748 return -EINVAL;
2749
2750 timeout = timeout * HZ / 10000;
2751
2752 if (timeout >= MAX_SCHEDULE_TIMEOUT)
2753 timeout = MAX_SCHEDULE_TIMEOUT-1;
2754 if (timeout < 1)
2755 timeout = 1;
2756
2757 mddev->bitmap_info.daemon_sleep = timeout;
2758 mddev_set_timeout(mddev, timeout, false);
2759 md_wakeup_thread(mddev->thread);
2760
2761 return len;
2762 }
2763
2764 static struct md_sysfs_entry bitmap_timeout =
2765 __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
2766
2767 static ssize_t
backlog_show(struct mddev * mddev,char * page)2768 backlog_show(struct mddev *mddev, char *page)
2769 {
2770 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
2771 }
2772
2773 static ssize_t
backlog_store(struct mddev * mddev,const char * buf,size_t len)2774 backlog_store(struct mddev *mddev, const char *buf, size_t len)
2775 {
2776 unsigned long backlog;
2777 unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
2778 struct md_rdev *rdev;
2779 bool has_write_mostly = false;
2780 int rv = kstrtoul(buf, 10, &backlog);
2781 if (rv)
2782 return rv;
2783 if (backlog > COUNTER_MAX)
2784 return -EINVAL;
2785
2786 rv = mddev_suspend_and_lock(mddev);
2787 if (rv)
2788 return rv;
2789
2790 /*
2791 * Without write mostly device, it doesn't make sense to set
2792 * backlog for max_write_behind.
2793 */
2794 rdev_for_each(rdev, mddev) {
2795 if (test_bit(WriteMostly, &rdev->flags)) {
2796 has_write_mostly = true;
2797 break;
2798 }
2799 }
2800 if (!has_write_mostly) {
2801 pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n",
2802 mdname(mddev));
2803 mddev_unlock(mddev);
2804 return -EINVAL;
2805 }
2806
2807 mddev->bitmap_info.max_write_behind = backlog;
2808 if (!backlog && mddev->serial_info_pool) {
2809 /* serial_info_pool is not needed if backlog is zero */
2810 if (!mddev->serialize_policy)
2811 mddev_destroy_serial_pool(mddev, NULL);
2812 } else if (backlog && !mddev->serial_info_pool) {
2813 /* serial_info_pool is needed since backlog is not zero */
2814 rdev_for_each(rdev, mddev)
2815 mddev_create_serial_pool(mddev, rdev);
2816 }
2817 if (old_mwb != backlog)
2818 bitmap_update_sb(mddev->bitmap);
2819
2820 mddev_unlock_and_resume(mddev);
2821 return len;
2822 }
2823
2824 static struct md_sysfs_entry bitmap_backlog =
2825 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
2826
2827 static ssize_t
chunksize_show(struct mddev * mddev,char * page)2828 chunksize_show(struct mddev *mddev, char *page)
2829 {
2830 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2831 }
2832
2833 static ssize_t
chunksize_store(struct mddev * mddev,const char * buf,size_t len)2834 chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2835 {
2836 /* Can only be changed when no bitmap is active */
2837 int rv;
2838 unsigned long csize;
2839 if (mddev->bitmap)
2840 return -EBUSY;
2841 rv = kstrtoul(buf, 10, &csize);
2842 if (rv)
2843 return rv;
2844 if (csize < 512 ||
2845 !is_power_of_2(csize))
2846 return -EINVAL;
2847 if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
2848 sizeof(((bitmap_super_t *)0)->chunksize))))
2849 return -EOVERFLOW;
2850 mddev->bitmap_info.chunksize = csize;
2851 return len;
2852 }
2853
2854 static struct md_sysfs_entry bitmap_chunksize =
2855 __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2856
metadata_show(struct mddev * mddev,char * page)2857 static ssize_t metadata_show(struct mddev *mddev, char *page)
2858 {
2859 if (mddev_is_clustered(mddev))
2860 return sprintf(page, "clustered\n");
2861 return sprintf(page, "%s\n", (mddev->bitmap_info.external
2862 ? "external" : "internal"));
2863 }
2864
metadata_store(struct mddev * mddev,const char * buf,size_t len)2865 static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2866 {
2867 if (mddev->bitmap ||
2868 mddev->bitmap_info.file ||
2869 mddev->bitmap_info.offset)
2870 return -EBUSY;
2871 if (strncmp(buf, "external", 8) == 0)
2872 mddev->bitmap_info.external = 1;
2873 else if ((strncmp(buf, "internal", 8) == 0) ||
2874 (strncmp(buf, "clustered", 9) == 0))
2875 mddev->bitmap_info.external = 0;
2876 else
2877 return -EINVAL;
2878 return len;
2879 }
2880
2881 static struct md_sysfs_entry bitmap_metadata =
2882 __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2883
can_clear_show(struct mddev * mddev,char * page)2884 static ssize_t can_clear_show(struct mddev *mddev, char *page)
2885 {
2886 int len;
2887 struct bitmap *bitmap;
2888
2889 spin_lock(&mddev->lock);
2890 bitmap = mddev->bitmap;
2891 if (bitmap)
2892 len = sprintf(page, "%s\n", (bitmap->need_sync ? "false" :
2893 "true"));
2894 else
2895 len = sprintf(page, "\n");
2896 spin_unlock(&mddev->lock);
2897 return len;
2898 }
2899
can_clear_store(struct mddev * mddev,const char * buf,size_t len)2900 static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2901 {
2902 struct bitmap *bitmap = mddev->bitmap;
2903
2904 if (!bitmap)
2905 return -ENOENT;
2906
2907 if (strncmp(buf, "false", 5) == 0) {
2908 bitmap->need_sync = 1;
2909 return len;
2910 }
2911
2912 if (strncmp(buf, "true", 4) == 0) {
2913 if (mddev->degraded)
2914 return -EBUSY;
2915 bitmap->need_sync = 0;
2916 return len;
2917 }
2918
2919 return -EINVAL;
2920 }
2921
2922 static struct md_sysfs_entry bitmap_can_clear =
2923 __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2924
2925 static ssize_t
behind_writes_used_show(struct mddev * mddev,char * page)2926 behind_writes_used_show(struct mddev *mddev, char *page)
2927 {
2928 ssize_t ret;
2929 struct bitmap *bitmap;
2930
2931 spin_lock(&mddev->lock);
2932 bitmap = mddev->bitmap;
2933 if (!bitmap)
2934 ret = sprintf(page, "0\n");
2935 else
2936 ret = sprintf(page, "%lu\n", bitmap->behind_writes_used);
2937 spin_unlock(&mddev->lock);
2938
2939 return ret;
2940 }
2941
2942 static ssize_t
behind_writes_used_reset(struct mddev * mddev,const char * buf,size_t len)2943 behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2944 {
2945 struct bitmap *bitmap = mddev->bitmap;
2946
2947 if (bitmap)
2948 bitmap->behind_writes_used = 0;
2949 return len;
2950 }
2951
2952 static struct md_sysfs_entry max_backlog_used =
2953 __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2954 behind_writes_used_show, behind_writes_used_reset);
2955
2956 static struct attribute *md_bitmap_attrs[] = {
2957 &bitmap_location.attr,
2958 &bitmap_space.attr,
2959 &bitmap_timeout.attr,
2960 &bitmap_backlog.attr,
2961 &bitmap_chunksize.attr,
2962 &bitmap_metadata.attr,
2963 &bitmap_can_clear.attr,
2964 &max_backlog_used.attr,
2965 NULL
2966 };
2967 const struct attribute_group md_bitmap_group = {
2968 .name = "bitmap",
2969 .attrs = md_bitmap_attrs,
2970 };
2971
2972 static struct bitmap_operations bitmap_ops = {
2973 .enabled = bitmap_enabled,
2974 .create = bitmap_create,
2975 .resize = bitmap_resize,
2976 .load = bitmap_load,
2977 .destroy = bitmap_destroy,
2978 .flush = bitmap_flush,
2979 .write_all = bitmap_write_all,
2980 .dirty_bits = bitmap_dirty_bits,
2981 .unplug = bitmap_unplug,
2982 .daemon_work = bitmap_daemon_work,
2983 .wait_behind_writes = bitmap_wait_behind_writes,
2984
2985 .startwrite = bitmap_startwrite,
2986 .endwrite = bitmap_endwrite,
2987 .start_sync = bitmap_start_sync,
2988 .end_sync = bitmap_end_sync,
2989 .cond_end_sync = bitmap_cond_end_sync,
2990 .close_sync = bitmap_close_sync,
2991
2992 .update_sb = bitmap_update_sb,
2993 .get_stats = bitmap_get_stats,
2994
2995 .sync_with_cluster = bitmap_sync_with_cluster,
2996 .get_from_slot = bitmap_get_from_slot,
2997 .copy_from_slot = bitmap_copy_from_slot,
2998 .set_pages = bitmap_set_pages,
2999 .free = md_bitmap_free,
3000 };
3001
mddev_set_bitmap_ops(struct mddev * mddev)3002 void mddev_set_bitmap_ops(struct mddev *mddev)
3003 {
3004 mddev->bitmap_ops = &bitmap_ops;
3005 }
3006