1 /* 2 drbd_bitmap.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2004-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25 #include <linux/bitops.h> 26 #include <linux/vmalloc.h> 27 #include <linux/string.h> 28 #include <linux/drbd.h> 29 #include <linux/slab.h> 30 #include <asm/kmap_types.h> 31 #include "drbd_int.h" 32 33 /* OPAQUE outside this file! 34 * interface defined in drbd_int.h 35 36 * convention: 37 * function name drbd_bm_... => used elsewhere, "public". 38 * function name bm_... => internal to implementation, "private". 39 40 * Note that since find_first_bit returns int, at the current granularity of 41 * the bitmap (4KB per byte), this implementation "only" supports up to 42 * 1<<(32+12) == 16 TB... 43 */ 44 45 /* 46 * NOTE 47 * Access to the *bm_pages is protected by bm_lock. 48 * It is safe to read the other members within the lock. 49 * 50 * drbd_bm_set_bits is called from bio_endio callbacks, 51 * We may be called with irq already disabled, 52 * so we need spin_lock_irqsave(). 53 * And we need the kmap_atomic. 54 */ 55 struct drbd_bitmap { 56 struct page **bm_pages; 57 spinlock_t bm_lock; 58 /* WARNING unsigned long bm_*: 59 * 32bit number of bit offset is just enough for 512 MB bitmap. 60 * it will blow up if we make the bitmap bigger... 61 * not that it makes much sense to have a bitmap that large, 62 * rather change the granularity to 16k or 64k or something. 63 * (that implies other problems, however...) 64 */ 65 unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ 66 unsigned long bm_bits; 67 size_t bm_words; 68 size_t bm_number_of_pages; 69 sector_t bm_dev_capacity; 70 struct mutex bm_change; /* serializes resize operations */ 71 72 atomic_t bm_async_io; 73 wait_queue_head_t bm_io_wait; 74 75 unsigned long bm_flags; 76 77 /* debugging aid, in case we are still racy somewhere */ 78 char *bm_why; 79 struct task_struct *bm_task; 80 }; 81 82 /* definition of bits in bm_flags */ 83 #define BM_LOCKED 0 84 #define BM_MD_IO_ERROR 1 85 #define BM_P_VMALLOCED 2 86 87 static int bm_is_locked(struct drbd_bitmap *b) 88 { 89 return test_bit(BM_LOCKED, &b->bm_flags); 90 } 91 92 #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) 93 static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) 94 { 95 struct drbd_bitmap *b = mdev->bitmap; 96 if (!__ratelimit(&drbd_ratelimit_state)) 97 return; 98 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n", 99 current == mdev->receiver.task ? "receiver" : 100 current == mdev->asender.task ? "asender" : 101 current == mdev->worker.task ? "worker" : current->comm, 102 func, b->bm_why ?: "?", 103 b->bm_task == mdev->receiver.task ? "receiver" : 104 b->bm_task == mdev->asender.task ? "asender" : 105 b->bm_task == mdev->worker.task ? "worker" : "?"); 106 } 107 108 void drbd_bm_lock(struct drbd_conf *mdev, char *why) 109 { 110 struct drbd_bitmap *b = mdev->bitmap; 111 int trylock_failed; 112 113 if (!b) { 114 dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n"); 115 return; 116 } 117 118 trylock_failed = !mutex_trylock(&b->bm_change); 119 120 if (trylock_failed) { 121 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 122 current == mdev->receiver.task ? "receiver" : 123 current == mdev->asender.task ? "asender" : 124 current == mdev->worker.task ? "worker" : current->comm, 125 why, b->bm_why ?: "?", 126 b->bm_task == mdev->receiver.task ? "receiver" : 127 b->bm_task == mdev->asender.task ? "asender" : 128 b->bm_task == mdev->worker.task ? "worker" : "?"); 129 mutex_lock(&b->bm_change); 130 } 131 if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) 132 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 133 134 b->bm_why = why; 135 b->bm_task = current; 136 } 137 138 void drbd_bm_unlock(struct drbd_conf *mdev) 139 { 140 struct drbd_bitmap *b = mdev->bitmap; 141 if (!b) { 142 dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n"); 143 return; 144 } 145 146 if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags)) 147 dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); 148 149 b->bm_why = NULL; 150 b->bm_task = NULL; 151 mutex_unlock(&b->bm_change); 152 } 153 154 /* word offset to long pointer */ 155 static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km) 156 { 157 struct page *page; 158 unsigned long page_nr; 159 160 /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ 161 page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); 162 BUG_ON(page_nr >= b->bm_number_of_pages); 163 page = b->bm_pages[page_nr]; 164 165 return (unsigned long *) kmap_atomic(page, km); 166 } 167 168 static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset) 169 { 170 return __bm_map_paddr(b, offset, KM_IRQ1); 171 } 172 173 static void __bm_unmap(unsigned long *p_addr, const enum km_type km) 174 { 175 kunmap_atomic(p_addr, km); 176 }; 177 178 static void bm_unmap(unsigned long *p_addr) 179 { 180 return __bm_unmap(p_addr, KM_IRQ1); 181 } 182 183 /* long word offset of _bitmap_ sector */ 184 #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 185 /* word offset from start of bitmap to word number _in_page_ 186 * modulo longs per page 187 #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) 188 hm, well, Philipp thinks gcc might not optimze the % into & (... - 1) 189 so do it explicitly: 190 */ 191 #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) 192 193 /* Long words per page */ 194 #define LWPP (PAGE_SIZE/sizeof(long)) 195 196 /* 197 * actually most functions herein should take a struct drbd_bitmap*, not a 198 * struct drbd_conf*, but for the debug macros I like to have the mdev around 199 * to be able to report device specific. 200 */ 201 202 static void bm_free_pages(struct page **pages, unsigned long number) 203 { 204 unsigned long i; 205 if (!pages) 206 return; 207 208 for (i = 0; i < number; i++) { 209 if (!pages[i]) { 210 printk(KERN_ALERT "drbd: bm_free_pages tried to free " 211 "a NULL pointer; i=%lu n=%lu\n", 212 i, number); 213 continue; 214 } 215 __free_page(pages[i]); 216 pages[i] = NULL; 217 } 218 } 219 220 static void bm_vk_free(void *ptr, int v) 221 { 222 if (v) 223 vfree(ptr); 224 else 225 kfree(ptr); 226 } 227 228 /* 229 * "have" and "want" are NUMBER OF PAGES. 230 */ 231 static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) 232 { 233 struct page **old_pages = b->bm_pages; 234 struct page **new_pages, *page; 235 unsigned int i, bytes, vmalloced = 0; 236 unsigned long have = b->bm_number_of_pages; 237 238 BUG_ON(have == 0 && old_pages != NULL); 239 BUG_ON(have != 0 && old_pages == NULL); 240 241 if (have == want) 242 return old_pages; 243 244 /* Trying kmalloc first, falling back to vmalloc. 245 * GFP_KERNEL is ok, as this is done when a lower level disk is 246 * "attached" to the drbd. Context is receiver thread or cqueue 247 * thread. As we have no disk yet, we are not in the IO path, 248 * not even the IO path of the peer. */ 249 bytes = sizeof(struct page *)*want; 250 new_pages = kmalloc(bytes, GFP_KERNEL); 251 if (!new_pages) { 252 new_pages = vmalloc(bytes); 253 if (!new_pages) 254 return NULL; 255 vmalloced = 1; 256 } 257 258 memset(new_pages, 0, bytes); 259 if (want >= have) { 260 for (i = 0; i < have; i++) 261 new_pages[i] = old_pages[i]; 262 for (; i < want; i++) { 263 page = alloc_page(GFP_HIGHUSER); 264 if (!page) { 265 bm_free_pages(new_pages + have, i - have); 266 bm_vk_free(new_pages, vmalloced); 267 return NULL; 268 } 269 new_pages[i] = page; 270 } 271 } else { 272 for (i = 0; i < want; i++) 273 new_pages[i] = old_pages[i]; 274 /* NOT HERE, we are outside the spinlock! 275 bm_free_pages(old_pages + want, have - want); 276 */ 277 } 278 279 if (vmalloced) 280 set_bit(BM_P_VMALLOCED, &b->bm_flags); 281 else 282 clear_bit(BM_P_VMALLOCED, &b->bm_flags); 283 284 return new_pages; 285 } 286 287 /* 288 * called on driver init only. TODO call when a device is created. 289 * allocates the drbd_bitmap, and stores it in mdev->bitmap. 290 */ 291 int drbd_bm_init(struct drbd_conf *mdev) 292 { 293 struct drbd_bitmap *b = mdev->bitmap; 294 WARN_ON(b != NULL); 295 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL); 296 if (!b) 297 return -ENOMEM; 298 spin_lock_init(&b->bm_lock); 299 mutex_init(&b->bm_change); 300 init_waitqueue_head(&b->bm_io_wait); 301 302 mdev->bitmap = b; 303 304 return 0; 305 } 306 307 sector_t drbd_bm_capacity(struct drbd_conf *mdev) 308 { 309 ERR_IF(!mdev->bitmap) return 0; 310 return mdev->bitmap->bm_dev_capacity; 311 } 312 313 /* called on driver unload. TODO: call when a device is destroyed. 314 */ 315 void drbd_bm_cleanup(struct drbd_conf *mdev) 316 { 317 ERR_IF (!mdev->bitmap) return; 318 bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); 319 bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags)); 320 kfree(mdev->bitmap); 321 mdev->bitmap = NULL; 322 } 323 324 /* 325 * since (b->bm_bits % BITS_PER_LONG) != 0, 326 * this masks out the remaining bits. 327 * Returns the number of bits cleared. 328 */ 329 static int bm_clear_surplus(struct drbd_bitmap *b) 330 { 331 const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; 332 size_t w = b->bm_bits >> LN2_BPL; 333 int cleared = 0; 334 unsigned long *p_addr, *bm; 335 336 p_addr = bm_map_paddr(b, w); 337 bm = p_addr + MLPP(w); 338 if (w < b->bm_words) { 339 cleared = hweight_long(*bm & ~mask); 340 *bm &= mask; 341 w++; bm++; 342 } 343 344 if (w < b->bm_words) { 345 cleared += hweight_long(*bm); 346 *bm = 0; 347 } 348 bm_unmap(p_addr); 349 return cleared; 350 } 351 352 static void bm_set_surplus(struct drbd_bitmap *b) 353 { 354 const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; 355 size_t w = b->bm_bits >> LN2_BPL; 356 unsigned long *p_addr, *bm; 357 358 p_addr = bm_map_paddr(b, w); 359 bm = p_addr + MLPP(w); 360 if (w < b->bm_words) { 361 *bm |= ~mask; 362 bm++; w++; 363 } 364 365 if (w < b->bm_words) { 366 *bm = ~(0UL); 367 } 368 bm_unmap(p_addr); 369 } 370 371 static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian) 372 { 373 unsigned long *p_addr, *bm, offset = 0; 374 unsigned long bits = 0; 375 unsigned long i, do_now; 376 377 while (offset < b->bm_words) { 378 i = do_now = min_t(size_t, b->bm_words-offset, LWPP); 379 p_addr = __bm_map_paddr(b, offset, KM_USER0); 380 bm = p_addr + MLPP(offset); 381 while (i--) { 382 #ifndef __LITTLE_ENDIAN 383 if (swap_endian) 384 *bm = lel_to_cpu(*bm); 385 #endif 386 bits += hweight_long(*bm++); 387 } 388 __bm_unmap(p_addr, KM_USER0); 389 offset += do_now; 390 cond_resched(); 391 } 392 393 return bits; 394 } 395 396 static unsigned long bm_count_bits(struct drbd_bitmap *b) 397 { 398 return __bm_count_bits(b, 0); 399 } 400 401 static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b) 402 { 403 return __bm_count_bits(b, 1); 404 } 405 406 /* offset and len in long words.*/ 407 static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) 408 { 409 unsigned long *p_addr, *bm; 410 size_t do_now, end; 411 412 #define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512) 413 414 end = offset + len; 415 416 if (end > b->bm_words) { 417 printk(KERN_ALERT "drbd: bm_memset end > bm_words\n"); 418 return; 419 } 420 421 while (offset < end) { 422 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; 423 p_addr = bm_map_paddr(b, offset); 424 bm = p_addr + MLPP(offset); 425 if (bm+do_now > p_addr + LWPP) { 426 printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", 427 p_addr, bm, (int)do_now); 428 break; /* breaks to after catch_oob_access_end() only! */ 429 } 430 memset(bm, c, do_now * sizeof(long)); 431 bm_unmap(p_addr); 432 offset += do_now; 433 } 434 } 435 436 /* 437 * make sure the bitmap has enough room for the attached storage, 438 * if necessary, resize. 439 * called whenever we may have changed the device size. 440 * returns -ENOMEM if we could not allocate enough memory, 0 on success. 441 * In case this is actually a resize, we copy the old bitmap into the new one. 442 * Otherwise, the bitmap is initialized to all bits set. 443 */ 444 int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity) 445 { 446 struct drbd_bitmap *b = mdev->bitmap; 447 unsigned long bits, words, owords, obits, *p_addr, *bm; 448 unsigned long want, have, onpages; /* number of pages */ 449 struct page **npages, **opages = NULL; 450 int err = 0, growing; 451 int opages_vmalloced; 452 453 ERR_IF(!b) return -ENOMEM; 454 455 drbd_bm_lock(mdev, "resize"); 456 457 dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", 458 (unsigned long long)capacity); 459 460 if (capacity == b->bm_dev_capacity) 461 goto out; 462 463 opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags); 464 465 if (capacity == 0) { 466 spin_lock_irq(&b->bm_lock); 467 opages = b->bm_pages; 468 onpages = b->bm_number_of_pages; 469 owords = b->bm_words; 470 b->bm_pages = NULL; 471 b->bm_number_of_pages = 472 b->bm_set = 473 b->bm_bits = 474 b->bm_words = 475 b->bm_dev_capacity = 0; 476 spin_unlock_irq(&b->bm_lock); 477 bm_free_pages(opages, onpages); 478 bm_vk_free(opages, opages_vmalloced); 479 goto out; 480 } 481 bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT)); 482 483 /* if we would use 484 words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL; 485 a 32bit host could present the wrong number of words 486 to a 64bit host. 487 */ 488 words = ALIGN(bits, 64) >> LN2_BPL; 489 490 if (get_ldev(mdev)) { 491 D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12)); 492 put_ldev(mdev); 493 } 494 495 /* one extra long to catch off by one errors */ 496 want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; 497 have = b->bm_number_of_pages; 498 if (want == have) { 499 D_ASSERT(b->bm_pages != NULL); 500 npages = b->bm_pages; 501 } else { 502 if (FAULT_ACTIVE(mdev, DRBD_FAULT_BM_ALLOC)) 503 npages = NULL; 504 else 505 npages = bm_realloc_pages(b, want); 506 } 507 508 if (!npages) { 509 err = -ENOMEM; 510 goto out; 511 } 512 513 spin_lock_irq(&b->bm_lock); 514 opages = b->bm_pages; 515 owords = b->bm_words; 516 obits = b->bm_bits; 517 518 growing = bits > obits; 519 if (opages) 520 bm_set_surplus(b); 521 522 b->bm_pages = npages; 523 b->bm_number_of_pages = want; 524 b->bm_bits = bits; 525 b->bm_words = words; 526 b->bm_dev_capacity = capacity; 527 528 if (growing) { 529 bm_memset(b, owords, 0xff, words-owords); 530 b->bm_set += bits - obits; 531 } 532 533 if (want < have) { 534 /* implicit: (opages != NULL) && (opages != npages) */ 535 bm_free_pages(opages + want, have - want); 536 } 537 538 p_addr = bm_map_paddr(b, words); 539 bm = p_addr + MLPP(words); 540 *bm = DRBD_MAGIC; 541 bm_unmap(p_addr); 542 543 (void)bm_clear_surplus(b); 544 545 spin_unlock_irq(&b->bm_lock); 546 if (opages != npages) 547 bm_vk_free(opages, opages_vmalloced); 548 if (!growing) 549 b->bm_set = bm_count_bits(b); 550 dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words); 551 552 out: 553 drbd_bm_unlock(mdev); 554 return err; 555 } 556 557 /* inherently racy: 558 * if not protected by other means, return value may be out of date when 559 * leaving this function... 560 * we still need to lock it, since it is important that this returns 561 * bm_set == 0 precisely. 562 * 563 * maybe bm_set should be atomic_t ? 564 */ 565 static unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev) 566 { 567 struct drbd_bitmap *b = mdev->bitmap; 568 unsigned long s; 569 unsigned long flags; 570 571 ERR_IF(!b) return 0; 572 ERR_IF(!b->bm_pages) return 0; 573 574 spin_lock_irqsave(&b->bm_lock, flags); 575 s = b->bm_set; 576 spin_unlock_irqrestore(&b->bm_lock, flags); 577 578 return s; 579 } 580 581 unsigned long drbd_bm_total_weight(struct drbd_conf *mdev) 582 { 583 unsigned long s; 584 /* if I don't have a disk, I don't know about out-of-sync status */ 585 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) 586 return 0; 587 s = _drbd_bm_total_weight(mdev); 588 put_ldev(mdev); 589 return s; 590 } 591 592 size_t drbd_bm_words(struct drbd_conf *mdev) 593 { 594 struct drbd_bitmap *b = mdev->bitmap; 595 ERR_IF(!b) return 0; 596 ERR_IF(!b->bm_pages) return 0; 597 598 return b->bm_words; 599 } 600 601 unsigned long drbd_bm_bits(struct drbd_conf *mdev) 602 { 603 struct drbd_bitmap *b = mdev->bitmap; 604 ERR_IF(!b) return 0; 605 606 return b->bm_bits; 607 } 608 609 /* merge number words from buffer into the bitmap starting at offset. 610 * buffer[i] is expected to be little endian unsigned long. 611 * bitmap must be locked by drbd_bm_lock. 612 * currently only used from receive_bitmap. 613 */ 614 void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, 615 unsigned long *buffer) 616 { 617 struct drbd_bitmap *b = mdev->bitmap; 618 unsigned long *p_addr, *bm; 619 unsigned long word, bits; 620 size_t end, do_now; 621 622 end = offset + number; 623 624 ERR_IF(!b) return; 625 ERR_IF(!b->bm_pages) return; 626 if (number == 0) 627 return; 628 WARN_ON(offset >= b->bm_words); 629 WARN_ON(end > b->bm_words); 630 631 spin_lock_irq(&b->bm_lock); 632 while (offset < end) { 633 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 634 p_addr = bm_map_paddr(b, offset); 635 bm = p_addr + MLPP(offset); 636 offset += do_now; 637 while (do_now--) { 638 bits = hweight_long(*bm); 639 word = *bm | lel_to_cpu(*buffer++); 640 *bm++ = word; 641 b->bm_set += hweight_long(word) - bits; 642 } 643 bm_unmap(p_addr); 644 } 645 /* with 32bit <-> 64bit cross-platform connect 646 * this is only correct for current usage, 647 * where we _know_ that we are 64 bit aligned, 648 * and know that this function is used in this way, too... 649 */ 650 if (end == b->bm_words) 651 b->bm_set -= bm_clear_surplus(b); 652 653 spin_unlock_irq(&b->bm_lock); 654 } 655 656 /* copy number words from the bitmap starting at offset into the buffer. 657 * buffer[i] will be little endian unsigned long. 658 */ 659 void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, 660 unsigned long *buffer) 661 { 662 struct drbd_bitmap *b = mdev->bitmap; 663 unsigned long *p_addr, *bm; 664 size_t end, do_now; 665 666 end = offset + number; 667 668 ERR_IF(!b) return; 669 ERR_IF(!b->bm_pages) return; 670 671 spin_lock_irq(&b->bm_lock); 672 if ((offset >= b->bm_words) || 673 (end > b->bm_words) || 674 (number <= 0)) 675 dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n", 676 (unsigned long) offset, 677 (unsigned long) number, 678 (unsigned long) b->bm_words); 679 else { 680 while (offset < end) { 681 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 682 p_addr = bm_map_paddr(b, offset); 683 bm = p_addr + MLPP(offset); 684 offset += do_now; 685 while (do_now--) 686 *buffer++ = cpu_to_lel(*bm++); 687 bm_unmap(p_addr); 688 } 689 } 690 spin_unlock_irq(&b->bm_lock); 691 } 692 693 /* set all bits in the bitmap */ 694 void drbd_bm_set_all(struct drbd_conf *mdev) 695 { 696 struct drbd_bitmap *b = mdev->bitmap; 697 ERR_IF(!b) return; 698 ERR_IF(!b->bm_pages) return; 699 700 spin_lock_irq(&b->bm_lock); 701 bm_memset(b, 0, 0xff, b->bm_words); 702 (void)bm_clear_surplus(b); 703 b->bm_set = b->bm_bits; 704 spin_unlock_irq(&b->bm_lock); 705 } 706 707 /* clear all bits in the bitmap */ 708 void drbd_bm_clear_all(struct drbd_conf *mdev) 709 { 710 struct drbd_bitmap *b = mdev->bitmap; 711 ERR_IF(!b) return; 712 ERR_IF(!b->bm_pages) return; 713 714 spin_lock_irq(&b->bm_lock); 715 bm_memset(b, 0, 0, b->bm_words); 716 b->bm_set = 0; 717 spin_unlock_irq(&b->bm_lock); 718 } 719 720 static void bm_async_io_complete(struct bio *bio, int error) 721 { 722 struct drbd_bitmap *b = bio->bi_private; 723 int uptodate = bio_flagged(bio, BIO_UPTODATE); 724 725 726 /* strange behavior of some lower level drivers... 727 * fail the request by clearing the uptodate flag, 728 * but do not return any error?! 729 * do we want to WARN() on this? */ 730 if (!error && !uptodate) 731 error = -EIO; 732 733 if (error) { 734 /* doh. what now? 735 * for now, set all bits, and flag MD_IO_ERROR */ 736 __set_bit(BM_MD_IO_ERROR, &b->bm_flags); 737 } 738 if (atomic_dec_and_test(&b->bm_async_io)) 739 wake_up(&b->bm_io_wait); 740 741 bio_put(bio); 742 } 743 744 static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local) 745 { 746 /* we are process context. we always get a bio */ 747 struct bio *bio = bio_alloc(GFP_KERNEL, 1); 748 unsigned int len; 749 sector_t on_disk_sector = 750 mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; 751 on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); 752 753 /* this might happen with very small 754 * flexible external meta data device */ 755 len = min_t(unsigned int, PAGE_SIZE, 756 (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); 757 758 bio->bi_bdev = mdev->ldev->md_bdev; 759 bio->bi_sector = on_disk_sector; 760 bio_add_page(bio, b->bm_pages[page_nr], len, 0); 761 bio->bi_private = b; 762 bio->bi_end_io = bm_async_io_complete; 763 764 if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 765 bio->bi_rw |= rw; 766 bio_endio(bio, -EIO); 767 } else { 768 submit_bio(rw, bio); 769 } 770 } 771 772 # if defined(__LITTLE_ENDIAN) 773 /* nothing to do, on disk == in memory */ 774 # define bm_cpu_to_lel(x) ((void)0) 775 # else 776 void bm_cpu_to_lel(struct drbd_bitmap *b) 777 { 778 /* need to cpu_to_lel all the pages ... 779 * this may be optimized by using 780 * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0; 781 * the following is still not optimal, but better than nothing */ 782 unsigned int i; 783 unsigned long *p_addr, *bm; 784 if (b->bm_set == 0) { 785 /* no page at all; avoid swap if all is 0 */ 786 i = b->bm_number_of_pages; 787 } else if (b->bm_set == b->bm_bits) { 788 /* only the last page */ 789 i = b->bm_number_of_pages - 1; 790 } else { 791 /* all pages */ 792 i = 0; 793 } 794 for (; i < b->bm_number_of_pages; i++) { 795 p_addr = kmap_atomic(b->bm_pages[i], KM_USER0); 796 for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++) 797 *bm = cpu_to_lel(*bm); 798 kunmap_atomic(p_addr, KM_USER0); 799 } 800 } 801 # endif 802 /* lel_to_cpu == cpu_to_lel */ 803 # define bm_lel_to_cpu(x) bm_cpu_to_lel(x) 804 805 /* 806 * bm_rw: read/write the whole bitmap from/to its on disk location. 807 */ 808 static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) 809 { 810 struct drbd_bitmap *b = mdev->bitmap; 811 /* sector_t sector; */ 812 int bm_words, num_pages, i; 813 unsigned long now; 814 char ppb[10]; 815 int err = 0; 816 817 WARN_ON(!bm_is_locked(b)); 818 819 /* no spinlock here, the drbd_bm_lock should be enough! */ 820 821 bm_words = drbd_bm_words(mdev); 822 num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT; 823 824 /* on disk bitmap is little endian */ 825 if (rw == WRITE) 826 bm_cpu_to_lel(b); 827 828 now = jiffies; 829 atomic_set(&b->bm_async_io, num_pages); 830 __clear_bit(BM_MD_IO_ERROR, &b->bm_flags); 831 832 /* let the layers below us try to merge these bios... */ 833 for (i = 0; i < num_pages; i++) 834 bm_page_io_async(mdev, b, i, rw); 835 836 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); 837 wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); 838 839 if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { 840 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); 841 drbd_chk_io_error(mdev, 1, TRUE); 842 err = -EIO; 843 } 844 845 now = jiffies; 846 if (rw == WRITE) { 847 /* swap back endianness */ 848 bm_lel_to_cpu(b); 849 /* flush bitmap to stable storage */ 850 drbd_md_flush(mdev); 851 } else /* rw == READ */ { 852 /* just read, if necessary adjust endianness */ 853 b->bm_set = bm_count_bits_swap_endian(b); 854 dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", 855 jiffies - now); 856 } 857 now = b->bm_set; 858 859 dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", 860 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); 861 862 return err; 863 } 864 865 /** 866 * drbd_bm_read() - Read the whole bitmap from its on disk location. 867 * @mdev: DRBD device. 868 */ 869 int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) 870 { 871 return bm_rw(mdev, READ); 872 } 873 874 /** 875 * drbd_bm_write() - Write the whole bitmap to its on disk location. 876 * @mdev: DRBD device. 877 */ 878 int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) 879 { 880 return bm_rw(mdev, WRITE); 881 } 882 883 /** 884 * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap 885 * @mdev: DRBD device. 886 * @enr: Extent number in the resync lru (happens to be sector offset) 887 * 888 * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered 889 * by a single sector write. Therefore enr == sector offset from the 890 * start of the bitmap. 891 */ 892 int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local) 893 { 894 sector_t on_disk_sector = enr + mdev->ldev->md.md_offset 895 + mdev->ldev->md.bm_offset; 896 int bm_words, num_words, offset; 897 int err = 0; 898 899 mutex_lock(&mdev->md_io_mutex); 900 bm_words = drbd_bm_words(mdev); 901 offset = S2W(enr); /* word offset into bitmap */ 902 num_words = min(S2W(1), bm_words - offset); 903 if (num_words < S2W(1)) 904 memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE); 905 drbd_bm_get_lel(mdev, offset, num_words, 906 page_address(mdev->md_io_page)); 907 if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) { 908 int i; 909 err = -EIO; 910 dev_err(DEV, "IO ERROR writing bitmap sector %lu " 911 "(meta-disk sector %llus)\n", 912 enr, (unsigned long long)on_disk_sector); 913 drbd_chk_io_error(mdev, 1, TRUE); 914 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) 915 drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i); 916 } 917 mdev->bm_writ_cnt++; 918 mutex_unlock(&mdev->md_io_mutex); 919 return err; 920 } 921 922 /* NOTE 923 * find_first_bit returns int, we return unsigned long. 924 * should not make much difference anyways, but ... 925 * 926 * this returns a bit number, NOT a sector! 927 */ 928 #define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1) 929 static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, 930 const int find_zero_bit, const enum km_type km) 931 { 932 struct drbd_bitmap *b = mdev->bitmap; 933 unsigned long i = -1UL; 934 unsigned long *p_addr; 935 unsigned long bit_offset; /* bit offset of the mapped page. */ 936 937 if (bm_fo > b->bm_bits) { 938 dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); 939 } else { 940 while (bm_fo < b->bm_bits) { 941 unsigned long offset; 942 bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */ 943 offset = bit_offset >> LN2_BPL; /* word offset of the page */ 944 p_addr = __bm_map_paddr(b, offset, km); 945 946 if (find_zero_bit) 947 i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); 948 else 949 i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); 950 951 __bm_unmap(p_addr, km); 952 if (i < PAGE_SIZE*8) { 953 i = bit_offset + i; 954 if (i >= b->bm_bits) 955 break; 956 goto found; 957 } 958 bm_fo = bit_offset + PAGE_SIZE*8; 959 } 960 i = -1UL; 961 } 962 found: 963 return i; 964 } 965 966 static unsigned long bm_find_next(struct drbd_conf *mdev, 967 unsigned long bm_fo, const int find_zero_bit) 968 { 969 struct drbd_bitmap *b = mdev->bitmap; 970 unsigned long i = -1UL; 971 972 ERR_IF(!b) return i; 973 ERR_IF(!b->bm_pages) return i; 974 975 spin_lock_irq(&b->bm_lock); 976 if (bm_is_locked(b)) 977 bm_print_lock_info(mdev); 978 979 i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); 980 981 spin_unlock_irq(&b->bm_lock); 982 return i; 983 } 984 985 unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 986 { 987 return bm_find_next(mdev, bm_fo, 0); 988 } 989 990 #if 0 991 /* not yet needed for anything. */ 992 unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 993 { 994 return bm_find_next(mdev, bm_fo, 1); 995 } 996 #endif 997 998 /* does not spin_lock_irqsave. 999 * you must take drbd_bm_lock() first */ 1000 unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1001 { 1002 /* WARN_ON(!bm_is_locked(mdev)); */ 1003 return __bm_find_next(mdev, bm_fo, 0, KM_USER1); 1004 } 1005 1006 unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1007 { 1008 /* WARN_ON(!bm_is_locked(mdev)); */ 1009 return __bm_find_next(mdev, bm_fo, 1, KM_USER1); 1010 } 1011 1012 /* returns number of bits actually changed. 1013 * for val != 0, we change 0 -> 1, return code positive 1014 * for val == 0, we change 1 -> 0, return code negative 1015 * wants bitnr, not sector. 1016 * expected to be called for only a few bits (e - s about BITS_PER_LONG). 1017 * Must hold bitmap lock already. */ 1018 int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1019 unsigned long e, int val, const enum km_type km) 1020 { 1021 struct drbd_bitmap *b = mdev->bitmap; 1022 unsigned long *p_addr = NULL; 1023 unsigned long bitnr; 1024 unsigned long last_page_nr = -1UL; 1025 int c = 0; 1026 1027 if (e >= b->bm_bits) { 1028 dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", 1029 s, e, b->bm_bits); 1030 e = b->bm_bits ? b->bm_bits -1 : 0; 1031 } 1032 for (bitnr = s; bitnr <= e; bitnr++) { 1033 unsigned long offset = bitnr>>LN2_BPL; 1034 unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); 1035 if (page_nr != last_page_nr) { 1036 if (p_addr) 1037 __bm_unmap(p_addr, km); 1038 p_addr = __bm_map_paddr(b, offset, km); 1039 last_page_nr = page_nr; 1040 } 1041 if (val) 1042 c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr)); 1043 else 1044 c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr)); 1045 } 1046 if (p_addr) 1047 __bm_unmap(p_addr, km); 1048 b->bm_set += c; 1049 return c; 1050 } 1051 1052 /* returns number of bits actually changed. 1053 * for val != 0, we change 0 -> 1, return code positive 1054 * for val == 0, we change 1 -> 0, return code negative 1055 * wants bitnr, not sector */ 1056 int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1057 const unsigned long e, int val) 1058 { 1059 unsigned long flags; 1060 struct drbd_bitmap *b = mdev->bitmap; 1061 int c = 0; 1062 1063 ERR_IF(!b) return 1; 1064 ERR_IF(!b->bm_pages) return 0; 1065 1066 spin_lock_irqsave(&b->bm_lock, flags); 1067 if (bm_is_locked(b)) 1068 bm_print_lock_info(mdev); 1069 1070 c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); 1071 1072 spin_unlock_irqrestore(&b->bm_lock, flags); 1073 return c; 1074 } 1075 1076 /* returns number of bits changed 0 -> 1 */ 1077 int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1078 { 1079 return bm_change_bits_to(mdev, s, e, 1); 1080 } 1081 1082 /* returns number of bits changed 1 -> 0 */ 1083 int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1084 { 1085 return -bm_change_bits_to(mdev, s, e, 0); 1086 } 1087 1088 /* sets all bits in full words, 1089 * from first_word up to, but not including, last_word */ 1090 static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, 1091 int page_nr, int first_word, int last_word) 1092 { 1093 int i; 1094 int bits; 1095 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0); 1096 for (i = first_word; i < last_word; i++) { 1097 bits = hweight_long(paddr[i]); 1098 paddr[i] = ~0UL; 1099 b->bm_set += BITS_PER_LONG - bits; 1100 } 1101 kunmap_atomic(paddr, KM_USER0); 1102 } 1103 1104 /* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave. 1105 * You must first drbd_bm_lock(). 1106 * Can be called to set the whole bitmap in one go. 1107 * Sets bits from s to e _inclusive_. */ 1108 void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1109 { 1110 /* First set_bit from the first bit (s) 1111 * up to the next long boundary (sl), 1112 * then assign full words up to the last long boundary (el), 1113 * then set_bit up to and including the last bit (e). 1114 * 1115 * Do not use memset, because we must account for changes, 1116 * so we need to loop over the words with hweight() anyways. 1117 */ 1118 unsigned long sl = ALIGN(s,BITS_PER_LONG); 1119 unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); 1120 int first_page; 1121 int last_page; 1122 int page_nr; 1123 int first_word; 1124 int last_word; 1125 1126 if (e - s <= 3*BITS_PER_LONG) { 1127 /* don't bother; el and sl may even be wrong. */ 1128 __bm_change_bits_to(mdev, s, e, 1, KM_USER0); 1129 return; 1130 } 1131 1132 /* difference is large enough that we can trust sl and el */ 1133 1134 /* bits filling the current long */ 1135 if (sl) 1136 __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0); 1137 1138 first_page = sl >> (3 + PAGE_SHIFT); 1139 last_page = el >> (3 + PAGE_SHIFT); 1140 1141 /* MLPP: modulo longs per page */ 1142 /* LWPP: long words per page */ 1143 first_word = MLPP(sl >> LN2_BPL); 1144 last_word = LWPP; 1145 1146 /* first and full pages, unless first page == last page */ 1147 for (page_nr = first_page; page_nr < last_page; page_nr++) { 1148 bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word); 1149 cond_resched(); 1150 first_word = 0; 1151 } 1152 1153 /* last page (respectively only page, for first page == last page) */ 1154 last_word = MLPP(el >> LN2_BPL); 1155 bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); 1156 1157 /* possibly trailing bits. 1158 * example: (e & 63) == 63, el will be e+1. 1159 * if that even was the very last bit, 1160 * it would trigger an assert in __bm_change_bits_to() 1161 */ 1162 if (el <= e) 1163 __bm_change_bits_to(mdev, el, e, 1, KM_USER0); 1164 } 1165 1166 /* returns bit state 1167 * wants bitnr, NOT sector. 1168 * inherently racy... area needs to be locked by means of {al,rs}_lru 1169 * 1 ... bit set 1170 * 0 ... bit not set 1171 * -1 ... first out of bounds access, stop testing for bits! 1172 */ 1173 int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) 1174 { 1175 unsigned long flags; 1176 struct drbd_bitmap *b = mdev->bitmap; 1177 unsigned long *p_addr; 1178 int i; 1179 1180 ERR_IF(!b) return 0; 1181 ERR_IF(!b->bm_pages) return 0; 1182 1183 spin_lock_irqsave(&b->bm_lock, flags); 1184 if (bm_is_locked(b)) 1185 bm_print_lock_info(mdev); 1186 if (bitnr < b->bm_bits) { 1187 unsigned long offset = bitnr>>LN2_BPL; 1188 p_addr = bm_map_paddr(b, offset); 1189 i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0; 1190 bm_unmap(p_addr); 1191 } else if (bitnr == b->bm_bits) { 1192 i = -1; 1193 } else { /* (bitnr > b->bm_bits) */ 1194 dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); 1195 i = 0; 1196 } 1197 1198 spin_unlock_irqrestore(&b->bm_lock, flags); 1199 return i; 1200 } 1201 1202 /* returns number of bits set in the range [s, e] */ 1203 int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1204 { 1205 unsigned long flags; 1206 struct drbd_bitmap *b = mdev->bitmap; 1207 unsigned long *p_addr = NULL, page_nr = -1; 1208 unsigned long bitnr; 1209 int c = 0; 1210 size_t w; 1211 1212 /* If this is called without a bitmap, that is a bug. But just to be 1213 * robust in case we screwed up elsewhere, in that case pretend there 1214 * was one dirty bit in the requested area, so we won't try to do a 1215 * local read there (no bitmap probably implies no disk) */ 1216 ERR_IF(!b) return 1; 1217 ERR_IF(!b->bm_pages) return 1; 1218 1219 spin_lock_irqsave(&b->bm_lock, flags); 1220 if (bm_is_locked(b)) 1221 bm_print_lock_info(mdev); 1222 for (bitnr = s; bitnr <= e; bitnr++) { 1223 w = bitnr >> LN2_BPL; 1224 if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) { 1225 page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3); 1226 if (p_addr) 1227 bm_unmap(p_addr); 1228 p_addr = bm_map_paddr(b, w); 1229 } 1230 ERR_IF (bitnr >= b->bm_bits) { 1231 dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); 1232 } else { 1233 c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); 1234 } 1235 } 1236 if (p_addr) 1237 bm_unmap(p_addr); 1238 spin_unlock_irqrestore(&b->bm_lock, flags); 1239 return c; 1240 } 1241 1242 1243 /* inherently racy... 1244 * return value may be already out-of-date when this function returns. 1245 * but the general usage is that this is only use during a cstate when bits are 1246 * only cleared, not set, and typically only care for the case when the return 1247 * value is zero, or we already "locked" this "bitmap extent" by other means. 1248 * 1249 * enr is bm-extent number, since we chose to name one sector (512 bytes) 1250 * worth of the bitmap a "bitmap extent". 1251 * 1252 * TODO 1253 * I think since we use it like a reference count, we should use the real 1254 * reference count of some bitmap extent element from some lru instead... 1255 * 1256 */ 1257 int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) 1258 { 1259 struct drbd_bitmap *b = mdev->bitmap; 1260 int count, s, e; 1261 unsigned long flags; 1262 unsigned long *p_addr, *bm; 1263 1264 ERR_IF(!b) return 0; 1265 ERR_IF(!b->bm_pages) return 0; 1266 1267 spin_lock_irqsave(&b->bm_lock, flags); 1268 if (bm_is_locked(b)) 1269 bm_print_lock_info(mdev); 1270 1271 s = S2W(enr); 1272 e = min((size_t)S2W(enr+1), b->bm_words); 1273 count = 0; 1274 if (s < b->bm_words) { 1275 int n = e-s; 1276 p_addr = bm_map_paddr(b, s); 1277 bm = p_addr + MLPP(s); 1278 while (n--) 1279 count += hweight_long(*bm++); 1280 bm_unmap(p_addr); 1281 } else { 1282 dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s); 1283 } 1284 spin_unlock_irqrestore(&b->bm_lock, flags); 1285 return count; 1286 } 1287 1288 /* set all bits covered by the AL-extent al_enr */ 1289 unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) 1290 { 1291 struct drbd_bitmap *b = mdev->bitmap; 1292 unsigned long *p_addr, *bm; 1293 unsigned long weight; 1294 int count, s, e, i, do_now; 1295 ERR_IF(!b) return 0; 1296 ERR_IF(!b->bm_pages) return 0; 1297 1298 spin_lock_irq(&b->bm_lock); 1299 if (bm_is_locked(b)) 1300 bm_print_lock_info(mdev); 1301 weight = b->bm_set; 1302 1303 s = al_enr * BM_WORDS_PER_AL_EXT; 1304 e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words); 1305 /* assert that s and e are on the same page */ 1306 D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3) 1307 == s >> (PAGE_SHIFT - LN2_BPL + 3)); 1308 count = 0; 1309 if (s < b->bm_words) { 1310 i = do_now = e-s; 1311 p_addr = bm_map_paddr(b, s); 1312 bm = p_addr + MLPP(s); 1313 while (i--) { 1314 count += hweight_long(*bm); 1315 *bm = -1UL; 1316 bm++; 1317 } 1318 bm_unmap(p_addr); 1319 b->bm_set += do_now*BITS_PER_LONG - count; 1320 if (e == b->bm_words) 1321 b->bm_set -= bm_clear_surplus(b); 1322 } else { 1323 dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s); 1324 } 1325 weight = b->bm_set - weight; 1326 spin_unlock_irq(&b->bm_lock); 1327 return weight; 1328 } 1329