Lines Matching +full:touch +full:- +full:hold +full:- +full:ms

1 // SPDX-License-Identifier: GPL-2.0-only
7 Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
39 * 1 << (50 - 12) bits needed
40 * 38 --> we need u64 to index and count bits
41 * 1 << (38 - 3) bitmap bytes needed
42 * 35 --> we still need u64 to index and count bytes
44 * 1 << (35 - 2) 32bit longs needed
45 * 33 --> we'd even need u64 to index and count 32bit long words.
46 * 1 << (35 - 3) 64bit longs needed
47 * 32 --> we could get away with a 32bit unsigned int to index and count
51 * 1 << (35 - 12)
52 * 22 --> we need that much 4KiB pages of bitmap.
53 * 1 << (22 + 3) --> on a 64bit arch,
63 * core memory. Currently we still hold the full bitmap in core as long
67 * We plan to reduce the amount of in-core bitmap pages by paging them in
68 * and out against their on-disk location as necessary, but need to make
89 * and drbd_bm_write_hinted() -> bm_rw() called from there.
115 struct drbd_bitmap *b = device->bitmap; in __bm_print_lock_info()
119 current->comm, task_pid_nr(current), in __bm_print_lock_info()
120 func, b->bm_why ?: "?", in __bm_print_lock_info()
121 b->bm_task->comm, task_pid_nr(b->bm_task)); in __bm_print_lock_info()
126 struct drbd_bitmap *b = device->bitmap; in drbd_bm_lock()
134 trylock_failed = !mutex_trylock(&b->bm_change); in drbd_bm_lock()
138 current->comm, task_pid_nr(current), in drbd_bm_lock()
139 why, b->bm_why ?: "?", in drbd_bm_lock()
140 b->bm_task->comm, task_pid_nr(b->bm_task)); in drbd_bm_lock()
141 mutex_lock(&b->bm_change); in drbd_bm_lock()
143 if (BM_LOCKED_MASK & b->bm_flags) in drbd_bm_lock()
145 b->bm_flags |= flags & BM_LOCKED_MASK; in drbd_bm_lock()
147 b->bm_why = why; in drbd_bm_lock()
148 b->bm_task = current; in drbd_bm_lock()
153 struct drbd_bitmap *b = device->bitmap; in drbd_bm_unlock()
159 if (!(BM_LOCKED_MASK & device->bitmap->bm_flags)) in drbd_bm_unlock()
162 b->bm_flags &= ~BM_LOCKED_MASK; in drbd_bm_unlock()
163 b->bm_why = NULL; in drbd_bm_unlock()
164 b->bm_task = NULL; in drbd_bm_unlock()
165 mutex_unlock(&b->bm_change); in drbd_bm_unlock()
168 /* we store some "meta" info about our pages in page->private */
177 #define BM_PAGE_IDX_MASK ((1UL<<24)-1)
192 /* store_page_idx uses non-atomic assignment. It is only used directly after
213 struct drbd_bitmap *b = device->bitmap; in bm_page_lock_io()
214 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_lock_io()
215 wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); in bm_page_lock_io()
220 struct drbd_bitmap *b = device->bitmap; in bm_page_unlock_io()
221 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_unlock_io()
223 wake_up(&device->bitmap->bm_io_wait); in bm_page_unlock_io()
242 device->bitmap->n_bitmap_hints = 0; in drbd_bm_reset_al_hints()
246 * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
256 struct drbd_bitmap *b = device->bitmap; in drbd_bm_mark_for_writeout()
258 if (page_nr >= device->bitmap->bm_number_of_pages) { in drbd_bm_mark_for_writeout()
260 page_nr, (int)device->bitmap->bm_number_of_pages); in drbd_bm_mark_for_writeout()
263 page = device->bitmap->bm_pages[page_nr]; in drbd_bm_mark_for_writeout()
264 BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints)); in drbd_bm_mark_for_writeout()
266 b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr; in drbd_bm_mark_for_writeout()
299 unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); in bm_word_to_page_idx()
300 BUG_ON(page_nr >= b->bm_number_of_pages); in bm_word_to_page_idx()
308 BUG_ON(page_nr >= b->bm_number_of_pages); in bm_bit_to_page_idx()
314 struct page *page = b->bm_pages[idx]; in __bm_map_pidx()
334 #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
338 hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
341 #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
380 struct page **old_pages = b->bm_pages; in bm_realloc_pages()
383 unsigned long have = b->bm_number_of_pages; in bm_realloc_pages()
410 bm_free_pages(new_pages + have, i - have); in bm_realloc_pages()
423 bm_free_pages(old_pages + want, have - want); in bm_realloc_pages()
431 * allocates the drbd_bitmap and stores it in device->bitmap.
435 struct drbd_bitmap *b = device->bitmap; in drbd_bm_init()
439 return -ENOMEM; in drbd_bm_init()
440 spin_lock_init(&b->bm_lock); in drbd_bm_init()
441 mutex_init(&b->bm_change); in drbd_bm_init()
442 init_waitqueue_head(&b->bm_io_wait); in drbd_bm_init()
444 device->bitmap = b; in drbd_bm_init()
451 if (!expect(device, device->bitmap)) in drbd_bm_capacity()
453 return device->bitmap->bm_dev_capacity; in drbd_bm_capacity()
460 if (!expect(device, device->bitmap)) in drbd_bm_cleanup()
462 bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages); in drbd_bm_cleanup()
463 bm_vk_free(device->bitmap->bm_pages); in drbd_bm_cleanup()
464 kfree(device->bitmap); in drbd_bm_cleanup()
465 device->bitmap = NULL; in drbd_bm_cleanup()
469 * since (b->bm_bits % BITS_PER_LONG) != 0,
475 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
481 #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
490 tmp = (b->bm_bits & BITS_PER_PAGE_MASK); in bm_clear_surplus()
492 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; in bm_clear_surplus()
497 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); in bm_clear_surplus()
509 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { in bm_clear_surplus()
526 tmp = (b->bm_bits & BITS_PER_PAGE_MASK); in bm_set_surplus()
528 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; in bm_set_surplus()
533 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); in bm_set_surplus()
544 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { in bm_set_surplus()
558 unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; in bm_count_bits()
562 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { in bm_count_bits()
569 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; in bm_count_bits()
590 if (end > b->bm_words) { in bm_memset()
596 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; in bm_memset()
606 bm_set_page_need_writeout(b->bm_pages[idx]); in bm_memset()
615 if (ldev->md.al_offset == 8) in drbd_md_on_disk_bits()
616 bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset; in drbd_md_on_disk_bits()
618 bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset; in drbd_md_on_disk_bits()
626 * returns -ENOMEM if we could not allocate enough memory, 0 on success.
632 struct drbd_bitmap *b = device->bitmap; in drbd_bm_resize()
640 return -ENOMEM; in drbd_bm_resize()
647 if (capacity == b->bm_dev_capacity) in drbd_bm_resize()
651 spin_lock_irq(&b->bm_lock); in drbd_bm_resize()
652 opages = b->bm_pages; in drbd_bm_resize()
653 onpages = b->bm_number_of_pages; in drbd_bm_resize()
654 owords = b->bm_words; in drbd_bm_resize()
655 b->bm_pages = NULL; in drbd_bm_resize()
656 b->bm_number_of_pages = in drbd_bm_resize()
657 b->bm_set = in drbd_bm_resize()
658 b->bm_bits = in drbd_bm_resize()
659 b->bm_words = in drbd_bm_resize()
660 b->bm_dev_capacity = 0; in drbd_bm_resize()
661 spin_unlock_irq(&b->bm_lock); in drbd_bm_resize()
676 u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev); in drbd_bm_resize()
681 err = -ENOSPC; in drbd_bm_resize()
687 have = b->bm_number_of_pages; in drbd_bm_resize()
689 D_ASSERT(device, b->bm_pages != NULL); in drbd_bm_resize()
690 npages = b->bm_pages; in drbd_bm_resize()
699 err = -ENOMEM; in drbd_bm_resize()
703 spin_lock_irq(&b->bm_lock); in drbd_bm_resize()
704 opages = b->bm_pages; in drbd_bm_resize()
705 owords = b->bm_words; in drbd_bm_resize()
706 obits = b->bm_bits; in drbd_bm_resize()
712 b->bm_pages = npages; in drbd_bm_resize()
713 b->bm_number_of_pages = want; in drbd_bm_resize()
714 b->bm_bits = bits; in drbd_bm_resize()
715 b->bm_words = words; in drbd_bm_resize()
716 b->bm_dev_capacity = capacity; in drbd_bm_resize()
720 bm_memset(b, owords, 0xff, words-owords); in drbd_bm_resize()
721 b->bm_set += bits - obits; in drbd_bm_resize()
723 bm_memset(b, owords, 0x00, words-owords); in drbd_bm_resize()
729 bm_free_pages(opages + want, have - want); in drbd_bm_resize()
734 spin_unlock_irq(&b->bm_lock); in drbd_bm_resize()
738 b->bm_set = bm_count_bits(b); in drbd_bm_resize()
756 struct drbd_bitmap *b = device->bitmap; in _drbd_bm_total_weight()
762 if (!expect(device, b->bm_pages)) in _drbd_bm_total_weight()
765 spin_lock_irqsave(&b->bm_lock, flags); in _drbd_bm_total_weight()
766 s = b->bm_set; in _drbd_bm_total_weight()
767 spin_unlock_irqrestore(&b->bm_lock, flags); in _drbd_bm_total_weight()
775 /* if I don't have a disk, I don't know about out-of-sync status */ in drbd_bm_total_weight()
785 struct drbd_bitmap *b = device->bitmap; in drbd_bm_words()
788 if (!expect(device, b->bm_pages)) in drbd_bm_words()
791 return b->bm_words; in drbd_bm_words()
796 struct drbd_bitmap *b = device->bitmap; in drbd_bm_bits()
800 return b->bm_bits; in drbd_bm_bits()
811 struct drbd_bitmap *b = device->bitmap; in drbd_bm_merge_lel()
821 if (!expect(device, b->bm_pages)) in drbd_bm_merge_lel()
825 WARN_ON(offset >= b->bm_words); in drbd_bm_merge_lel()
826 WARN_ON(end > b->bm_words); in drbd_bm_merge_lel()
828 spin_lock_irq(&b->bm_lock); in drbd_bm_merge_lel()
830 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; in drbd_bm_merge_lel()
835 while (do_now--) { in drbd_bm_merge_lel()
839 b->bm_set += hweight_long(word) - bits; in drbd_bm_merge_lel()
842 bm_set_page_need_writeout(b->bm_pages[idx]); in drbd_bm_merge_lel()
844 /* with 32bit <-> 64bit cross-platform connect in drbd_bm_merge_lel()
849 if (end == b->bm_words) in drbd_bm_merge_lel()
850 b->bm_set -= bm_clear_surplus(b); in drbd_bm_merge_lel()
851 spin_unlock_irq(&b->bm_lock); in drbd_bm_merge_lel()
860 struct drbd_bitmap *b = device->bitmap; in drbd_bm_get_lel()
868 if (!expect(device, b->bm_pages)) in drbd_bm_get_lel()
871 spin_lock_irq(&b->bm_lock); in drbd_bm_get_lel()
872 if ((offset >= b->bm_words) || in drbd_bm_get_lel()
873 (end > b->bm_words) || in drbd_bm_get_lel()
878 (unsigned long) b->bm_words); in drbd_bm_get_lel()
881 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; in drbd_bm_get_lel()
885 while (do_now--) in drbd_bm_get_lel()
890 spin_unlock_irq(&b->bm_lock); in drbd_bm_get_lel()
896 struct drbd_bitmap *b = device->bitmap; in drbd_bm_set_all()
899 if (!expect(device, b->bm_pages)) in drbd_bm_set_all()
902 spin_lock_irq(&b->bm_lock); in drbd_bm_set_all()
903 bm_memset(b, 0, 0xff, b->bm_words); in drbd_bm_set_all()
905 b->bm_set = b->bm_bits; in drbd_bm_set_all()
906 spin_unlock_irq(&b->bm_lock); in drbd_bm_set_all()
912 struct drbd_bitmap *b = device->bitmap; in drbd_bm_clear_all()
915 if (!expect(device, b->bm_pages)) in drbd_bm_clear_all()
918 spin_lock_irq(&b->bm_lock); in drbd_bm_clear_all()
919 bm_memset(b, 0, 0, b->bm_words); in drbd_bm_clear_all()
920 b->bm_set = 0; in drbd_bm_clear_all()
921 spin_unlock_irq(&b->bm_lock); in drbd_bm_clear_all()
929 spin_lock_irqsave(&ctx->device->resource->req_lock, flags); in drbd_bm_aio_ctx_destroy()
930 list_del(&ctx->list); in drbd_bm_aio_ctx_destroy()
931 spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags); in drbd_bm_aio_ctx_destroy()
932 put_ldev(ctx->device); in drbd_bm_aio_ctx_destroy()
939 struct drbd_bm_aio_ctx *ctx = bio->bi_private; in drbd_bm_endio()
940 struct drbd_device *device = ctx->device; in drbd_bm_endio()
941 struct drbd_bitmap *b = device->bitmap; in drbd_bm_endio()
944 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && in drbd_bm_endio()
945 !bm_test_page_unchanged(b->bm_pages[idx])) in drbd_bm_endio()
948 if (bio->bi_status) { in drbd_bm_endio()
949 /* ctx error will hold the completed-last non-zero error code, in drbd_bm_endio()
951 ctx->error = blk_status_to_errno(bio->bi_status); in drbd_bm_endio()
952 bm_set_page_io_err(b->bm_pages[idx]); in drbd_bm_endio()
957 bio->bi_status, idx); in drbd_bm_endio()
959 bm_clear_page_io_err(b->bm_pages[idx]); in drbd_bm_endio()
965 if (ctx->flags & BM_AIO_COPY_PAGES) in drbd_bm_endio()
966 mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool); in drbd_bm_endio()
970 if (atomic_dec_and_test(&ctx->in_flight)) { in drbd_bm_endio()
971 ctx->done = 1; in drbd_bm_endio()
972 wake_up(&device->misc_wait); in drbd_bm_endio()
973 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); in drbd_bm_endio()
980 switch (bdev->md.meta_dev_idx) { in drbd_md_last_bitmap_sector()
983 return bdev->md.md_offset + bdev->md.al_offset -1; in drbd_md_last_bitmap_sector()
986 return bdev->md.md_offset + bdev->md.md_size_sect -1; in drbd_md_last_bitmap_sector()
992 struct drbd_device *device = ctx->device; in bm_page_io_async()
993 enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE; in bm_page_io_async()
994 struct drbd_bitmap *b = device->bitmap; in bm_page_io_async()
1002 first_bm_sect = device->ldev->md.md_offset + device->ldev->md.bm_offset; in bm_page_io_async()
1003 on_disk_sector = first_bm_sect + (((sector_t)page_nr) << (PAGE_SHIFT-SECTOR_SHIFT)); in bm_page_io_async()
1008 last_bm_sect = drbd_md_last_bitmap_sector(device->ldev); in bm_page_io_async()
1010 sector_t len_sect = last_bm_sect - on_disk_sector + 1; in bm_page_io_async()
1017 drbd_err(device, "Invalid offset during on-disk bitmap access: " in bm_page_io_async()
1020 ctx->error = -EIO; in bm_page_io_async()
1021 bm_set_page_io_err(b->bm_pages[page_nr]); in bm_page_io_async()
1022 if (atomic_dec_and_test(&ctx->in_flight)) { in bm_page_io_async()
1023 ctx->done = 1; in bm_page_io_async()
1024 wake_up(&device->misc_wait); in bm_page_io_async()
1025 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); in bm_page_io_async()
1034 bm_set_page_unchanged(b->bm_pages[page_nr]); in bm_page_io_async()
1036 if (ctx->flags & BM_AIO_COPY_PAGES) { in bm_page_io_async()
1039 copy_highpage(page, b->bm_pages[page_nr]); in bm_page_io_async()
1042 page = b->bm_pages[page_nr]; in bm_page_io_async()
1043 bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO, in bm_page_io_async()
1045 bio->bi_iter.bi_sector = on_disk_sector; in bm_page_io_async()
1047 bio->bi_private = ctx; in bm_page_io_async()
1048 bio->bi_end_io = drbd_bm_endio; in bm_page_io_async()
1055 * resync to throttle -- see drbd_rs_should_slow_down(). */ in bm_page_io_async()
1056 atomic_add(len >> 9, &device->rs_sect_ev); in bm_page_io_async()
1066 struct drbd_bitmap *b = device->bitmap; in bm_rw()
1083 return -ENOMEM; in bm_rw()
1098 return -ENODEV; in bm_rw()
1101 drbd_adm_attach(), after device->ldev was assigned. */ in bm_rw()
1103 if (0 == (ctx->flags & ~BM_AIO_READ)) in bm_rw()
1104 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); in bm_rw()
1106 spin_lock_irq(&device->resource->req_lock); in bm_rw()
1107 list_add_tail(&ctx->list, &device->pending_bitmap_io); in bm_rw()
1108 spin_unlock_irq(&device->resource->req_lock); in bm_rw()
1110 num_pages = b->bm_number_of_pages; in bm_rw()
1118 atomic_inc(&ctx->in_flight); in bm_rw()
1126 for (hint = 0; hint < b->n_bitmap_hints; hint++) { in bm_rw()
1127 i = b->al_bitmap_hints[hint]; in bm_rw()
1128 if (i >= num_pages) /* == -1U: no hint here. */ in bm_rw()
1130 /* Several AL-extents may point to the same page. */ in bm_rw()
1132 &page_private(b->bm_pages[i]))) in bm_rw()
1135 if (bm_test_page_unchanged(b->bm_pages[i])) in bm_rw()
1137 atomic_inc(&ctx->in_flight); in bm_rw()
1147 bm_test_page_unchanged(b->bm_pages[i])) { in bm_rw()
1154 !bm_test_page_lazy_writeout(b->bm_pages[i])) { in bm_rw()
1158 atomic_inc(&ctx->in_flight); in bm_rw()
1166 * We initialize ctx->in_flight to one to make sure drbd_bm_endio in bm_rw()
1167 * will not set ctx->done early, and decrement / test it here. If there in bm_rw()
1173 if (!atomic_dec_and_test(&ctx->in_flight)) in bm_rw()
1174 wait_until_done_or_force_detached(device, device->ldev, &ctx->done); in bm_rw()
1176 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); in bm_rw()
1180 unsigned int ms = jiffies_to_msecs(jiffies - now); in bm_rw() local
1181 if (ms > 5) { in bm_rw()
1182 drbd_info(device, "bitmap %s of %u pages took %u ms\n", in bm_rw()
1184 count, ms); in bm_rw()
1188 if (ctx->error) { in bm_rw()
1191 err = -EIO; /* ctx->error ? */ in bm_rw()
1194 if (atomic_read(&ctx->in_flight)) in bm_rw()
1195 err = -EIO; /* Disk timeout/force-detach during IO... */ in bm_rw()
1199 b->bm_set = bm_count_bits(b); in bm_rw()
1201 jiffies - now); in bm_rw()
1203 now = b->bm_set; in bm_rw()
1206 drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", in bm_rw()
1207 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); in bm_rw()
1209 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); in bm_rw()
1214 * drbd_bm_read() - Read the whole bitmap from its on disk location.
1225 * drbd_bm_write() - Write the whole bitmap to its on disk location.
1237 * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
1249 * drbd_bm_write_lazy() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1259 * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
1264 * to temporary writeout pages. It is intended to trigger a full write-out
1276 * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
1295 struct drbd_bitmap *b = device->bitmap; in __bm_find_next()
1301 if (bm_fo > b->bm_bits) { in __bm_find_next()
1302 drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); in __bm_find_next()
1305 while (bm_fo < b->bm_bits) { in __bm_find_next()
1320 if (bm_fo >= b->bm_bits) in __bm_find_next()
1335 struct drbd_bitmap *b = device->bitmap; in bm_find_next()
1340 if (!expect(device, b->bm_pages)) in bm_find_next()
1343 spin_lock_irq(&b->bm_lock); in bm_find_next()
1344 if (BM_DONT_TEST & b->bm_flags) in bm_find_next()
1349 spin_unlock_irq(&b->bm_lock); in bm_find_next()
1370 /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */ in _drbd_bm_find_next()
1376 /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */ in _drbd_bm_find_next_zero()
1381 * for val != 0, we change 0 -> 1, return code positive
1382 * for val == 0, we change 1 -> 0, return code negative
1384 * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1385 * Must hold bitmap lock already. */
1389 struct drbd_bitmap *b = device->bitmap; in __bm_change_bits_to()
1392 unsigned int last_page_nr = -1U; in __bm_change_bits_to()
1396 if (e >= b->bm_bits) { in __bm_change_bits_to()
1398 s, e, b->bm_bits); in __bm_change_bits_to()
1399 e = b->bm_bits ? b->bm_bits -1 : 0; in __bm_change_bits_to()
1407 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1409 bm_set_page_need_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1418 c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); in __bm_change_bits_to()
1423 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1425 bm_set_page_need_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1427 b->bm_set += changed_total; in __bm_change_bits_to()
1432 * for val != 0, we change 0 -> 1, return code positive
1433 * for val == 0, we change 1 -> 0, return code negative
1439 struct drbd_bitmap *b = device->bitmap; in bm_change_bits_to()
1444 if (!expect(device, b->bm_pages)) in bm_change_bits_to()
1447 spin_lock_irqsave(&b->bm_lock, flags); in bm_change_bits_to()
1448 if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) in bm_change_bits_to()
1453 spin_unlock_irqrestore(&b->bm_lock, flags); in bm_change_bits_to()
1457 /* returns number of bits changed 0 -> 1 */
1463 /* returns number of bits changed 1 -> 0 */
1466 return -bm_change_bits_to(device, s, e, 0); in drbd_bm_clear_bits()
1477 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]); in bm_set_full_words_within_one_page()
1484 changed += BITS_PER_LONG - bits; in bm_set_full_words_within_one_page()
1491 bm_set_page_lazy_writeout(b->bm_pages[page_nr]); in bm_set_full_words_within_one_page()
1492 b->bm_set += changed; in bm_set_full_words_within_one_page()
1511 struct drbd_bitmap *b = device->bitmap; in _drbd_bm_set_bits()
1513 unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); in _drbd_bm_set_bits()
1520 if (e - s <= 3*BITS_PER_LONG) { in _drbd_bm_set_bits()
1522 spin_lock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1524 spin_unlock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1530 spin_lock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1534 __bm_change_bits_to(device, s, sl-1, 1); in _drbd_bm_set_bits()
1546 bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word); in _drbd_bm_set_bits()
1547 spin_unlock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1550 spin_lock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1555 /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples). in _drbd_bm_set_bits()
1558 * We do not want to touch last_page in this case, in _drbd_bm_set_bits()
1559 * as we did not allocate it, it is not present in bitmap->bm_pages. in _drbd_bm_set_bits()
1562 bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word); in _drbd_bm_set_bits()
1571 spin_unlock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1579 * -1 ... first out of bounds access, stop testing for bits!
1584 struct drbd_bitmap *b = device->bitmap; in drbd_bm_test_bit()
1590 if (!expect(device, b->bm_pages)) in drbd_bm_test_bit()
1593 spin_lock_irqsave(&b->bm_lock, flags); in drbd_bm_test_bit()
1594 if (BM_DONT_TEST & b->bm_flags) in drbd_bm_test_bit()
1596 if (bitnr < b->bm_bits) { in drbd_bm_test_bit()
1600 } else if (bitnr == b->bm_bits) { in drbd_bm_test_bit()
1601 i = -1; in drbd_bm_test_bit()
1602 } else { /* (bitnr > b->bm_bits) */ in drbd_bm_test_bit()
1603 drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); in drbd_bm_test_bit()
1607 spin_unlock_irqrestore(&b->bm_lock, flags); in drbd_bm_test_bit()
1615 struct drbd_bitmap *b = device->bitmap; in drbd_bm_count_bits()
1618 unsigned int page_nr = -1U; in drbd_bm_count_bits()
1627 if (!expect(device, b->bm_pages)) in drbd_bm_count_bits()
1630 spin_lock_irqsave(&b->bm_lock, flags); in drbd_bm_count_bits()
1631 if (BM_DONT_TEST & b->bm_flags) in drbd_bm_count_bits()
1641 if (expect(device, bitnr < b->bm_bits)) in drbd_bm_count_bits()
1642 c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); in drbd_bm_count_bits()
1644 drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); in drbd_bm_count_bits()
1648 spin_unlock_irqrestore(&b->bm_lock, flags); in drbd_bm_count_bits()
1654 * return value may be already out-of-date when this function returns.
1659 * enr is bm-extent number, since we chose to name one sector (512 bytes)
1669 struct drbd_bitmap *b = device->bitmap; in drbd_bm_e_weight()
1676 if (!expect(device, b->bm_pages)) in drbd_bm_e_weight()
1679 spin_lock_irqsave(&b->bm_lock, flags); in drbd_bm_e_weight()
1680 if (BM_DONT_TEST & b->bm_flags) in drbd_bm_e_weight()
1684 e = min((size_t)S2W(enr+1), b->bm_words); in drbd_bm_e_weight()
1686 if (s < b->bm_words) { in drbd_bm_e_weight()
1687 int n = e-s; in drbd_bm_e_weight()
1695 spin_unlock_irqrestore(&b->bm_lock, flags); in drbd_bm_e_weight()