1 /* 2 drbd_actlog.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26 #include <linux/slab.h> 27 #include <linux/drbd.h> 28 #include "drbd_int.h" 29 #include "drbd_wrappers.h" 30 31 /* We maintain a trivial check sum in our on disk activity log. 32 * With that we can ensure correct operation even when the storage 33 * device might do a partial (last) sector write while loosing power. 34 */ 35 struct __packed al_transaction { 36 u32 magic; 37 u32 tr_number; 38 struct __packed { 39 u32 pos; 40 u32 extent; } updates[1 + AL_EXTENTS_PT]; 41 u32 xor_sum; 42 }; 43 44 struct update_odbm_work { 45 struct drbd_work w; 46 unsigned int enr; 47 }; 48 49 struct update_al_work { 50 struct drbd_work w; 51 struct lc_element *al_ext; 52 struct completion event; 53 unsigned int enr; 54 /* if old_enr != LC_FREE, write corresponding bitmap sector, too */ 55 unsigned int old_enr; 56 }; 57 58 struct drbd_atodb_wait { 59 atomic_t count; 60 struct completion io_done; 61 struct drbd_conf *mdev; 62 int error; 63 }; 64 65 66 int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int); 67 68 static int _drbd_md_sync_page_io(struct drbd_conf *mdev, 69 struct drbd_backing_dev *bdev, 70 struct page *page, sector_t sector, 71 int rw, int size) 72 { 73 struct bio *bio; 74 struct drbd_md_io md_io; 75 int ok; 76 77 md_io.mdev = mdev; 78 init_completion(&md_io.event); 79 md_io.error = 0; 80 81 if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) 82 rw |= REQ_HARDBARRIER; 83 rw |= REQ_UNPLUG | REQ_SYNC; 84 85 retry: 86 bio = bio_alloc(GFP_NOIO, 1); 87 bio->bi_bdev = bdev->md_bdev; 88 bio->bi_sector = sector; 89 ok = (bio_add_page(bio, page, size, 0) == size); 90 if (!ok) 91 goto out; 92 bio->bi_private = &md_io; 93 bio->bi_end_io = drbd_md_io_complete; 94 bio->bi_rw = rw; 95 96 if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) 97 bio_endio(bio, -EIO); 98 else 99 submit_bio(rw, bio); 100 wait_for_completion(&md_io.event); 101 ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; 102 103 /* check for unsupported barrier op. 104 * would rather check on EOPNOTSUPP, but that is not reliable. 105 * don't try again for ANY return value != 0 */ 106 if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) { 107 /* Try again with no barrier */ 108 dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); 109 set_bit(MD_NO_BARRIER, &mdev->flags); 110 rw &= ~REQ_HARDBARRIER; 111 bio_put(bio); 112 goto retry; 113 } 114 out: 115 bio_put(bio); 116 return ok; 117 } 118 119 int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, 120 sector_t sector, int rw) 121 { 122 int logical_block_size, mask, ok; 123 int offset = 0; 124 struct page *iop = mdev->md_io_page; 125 126 D_ASSERT(mutex_is_locked(&mdev->md_io_mutex)); 127 128 BUG_ON(!bdev->md_bdev); 129 130 logical_block_size = bdev_logical_block_size(bdev->md_bdev); 131 if (logical_block_size == 0) 132 logical_block_size = MD_SECTOR_SIZE; 133 134 /* in case logical_block_size != 512 [ s390 only? ] */ 135 if (logical_block_size != MD_SECTOR_SIZE) { 136 mask = (logical_block_size / MD_SECTOR_SIZE) - 1; 137 D_ASSERT(mask == 1 || mask == 3 || mask == 7); 138 D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE); 139 offset = sector & mask; 140 sector = sector & ~mask; 141 iop = mdev->md_io_tmpp; 142 143 if (rw & WRITE) { 144 /* these are GFP_KERNEL pages, pre-allocated 145 * on device initialization */ 146 void *p = page_address(mdev->md_io_page); 147 void *hp = page_address(mdev->md_io_tmpp); 148 149 ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, 150 READ, logical_block_size); 151 152 if (unlikely(!ok)) { 153 dev_err(DEV, "drbd_md_sync_page_io(,%llus," 154 "READ [logical_block_size!=512]) failed!\n", 155 (unsigned long long)sector); 156 return 0; 157 } 158 159 memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE); 160 } 161 } 162 163 if (sector < drbd_md_first_sector(bdev) || 164 sector > drbd_md_last_sector(bdev)) 165 dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n", 166 current->comm, current->pid, __func__, 167 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); 168 169 ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size); 170 if (unlikely(!ok)) { 171 dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n", 172 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); 173 return 0; 174 } 175 176 if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) { 177 void *p = page_address(mdev->md_io_page); 178 void *hp = page_address(mdev->md_io_tmpp); 179 180 memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE); 181 } 182 183 return ok; 184 } 185 186 static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) 187 { 188 struct lc_element *al_ext; 189 struct lc_element *tmp; 190 unsigned long al_flags = 0; 191 192 spin_lock_irq(&mdev->al_lock); 193 tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); 194 if (unlikely(tmp != NULL)) { 195 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 196 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { 197 spin_unlock_irq(&mdev->al_lock); 198 return NULL; 199 } 200 } 201 al_ext = lc_get(mdev->act_log, enr); 202 al_flags = mdev->act_log->flags; 203 spin_unlock_irq(&mdev->al_lock); 204 205 /* 206 if (!al_ext) { 207 if (al_flags & LC_STARVING) 208 dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n"); 209 if (al_flags & LC_DIRTY) 210 dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n"); 211 } 212 */ 213 214 return al_ext; 215 } 216 217 void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector) 218 { 219 unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); 220 struct lc_element *al_ext; 221 struct update_al_work al_work; 222 223 D_ASSERT(atomic_read(&mdev->local_cnt) > 0); 224 225 wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr))); 226 227 if (al_ext->lc_number != enr) { 228 /* drbd_al_write_transaction(mdev,al_ext,enr); 229 * recurses into generic_make_request(), which 230 * disallows recursion, bios being serialized on the 231 * current->bio_tail list now. 232 * we have to delegate updates to the activity log 233 * to the worker thread. */ 234 init_completion(&al_work.event); 235 al_work.al_ext = al_ext; 236 al_work.enr = enr; 237 al_work.old_enr = al_ext->lc_number; 238 al_work.w.cb = w_al_write_transaction; 239 drbd_queue_work_front(&mdev->data.work, &al_work.w); 240 wait_for_completion(&al_work.event); 241 242 mdev->al_writ_cnt++; 243 244 spin_lock_irq(&mdev->al_lock); 245 lc_changed(mdev->act_log, al_ext); 246 spin_unlock_irq(&mdev->al_lock); 247 wake_up(&mdev->al_wait); 248 } 249 } 250 251 void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector) 252 { 253 unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); 254 struct lc_element *extent; 255 unsigned long flags; 256 257 spin_lock_irqsave(&mdev->al_lock, flags); 258 259 extent = lc_find(mdev->act_log, enr); 260 261 if (!extent) { 262 spin_unlock_irqrestore(&mdev->al_lock, flags); 263 dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr); 264 return; 265 } 266 267 if (lc_put(mdev->act_log, extent) == 0) 268 wake_up(&mdev->al_wait); 269 270 spin_unlock_irqrestore(&mdev->al_lock, flags); 271 } 272 273 int 274 w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) 275 { 276 struct update_al_work *aw = container_of(w, struct update_al_work, w); 277 struct lc_element *updated = aw->al_ext; 278 const unsigned int new_enr = aw->enr; 279 const unsigned int evicted = aw->old_enr; 280 struct al_transaction *buffer; 281 sector_t sector; 282 int i, n, mx; 283 unsigned int extent_nr; 284 u32 xor_sum = 0; 285 286 if (!get_ldev(mdev)) { 287 dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); 288 complete(&((struct update_al_work *)w)->event); 289 return 1; 290 } 291 /* do we have to do a bitmap write, first? 292 * TODO reduce maximum latency: 293 * submit both bios, then wait for both, 294 * instead of doing two synchronous sector writes. */ 295 if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) 296 drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); 297 298 mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ 299 buffer = (struct al_transaction *)page_address(mdev->md_io_page); 300 301 buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); 302 buffer->tr_number = cpu_to_be32(mdev->al_tr_number); 303 304 n = lc_index_of(mdev->act_log, updated); 305 306 buffer->updates[0].pos = cpu_to_be32(n); 307 buffer->updates[0].extent = cpu_to_be32(new_enr); 308 309 xor_sum ^= new_enr; 310 311 mx = min_t(int, AL_EXTENTS_PT, 312 mdev->act_log->nr_elements - mdev->al_tr_cycle); 313 for (i = 0; i < mx; i++) { 314 unsigned idx = mdev->al_tr_cycle + i; 315 extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number; 316 buffer->updates[i+1].pos = cpu_to_be32(idx); 317 buffer->updates[i+1].extent = cpu_to_be32(extent_nr); 318 xor_sum ^= extent_nr; 319 } 320 for (; i < AL_EXTENTS_PT; i++) { 321 buffer->updates[i+1].pos = __constant_cpu_to_be32(-1); 322 buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE); 323 xor_sum ^= LC_FREE; 324 } 325 mdev->al_tr_cycle += AL_EXTENTS_PT; 326 if (mdev->al_tr_cycle >= mdev->act_log->nr_elements) 327 mdev->al_tr_cycle = 0; 328 329 buffer->xor_sum = cpu_to_be32(xor_sum); 330 331 sector = mdev->ldev->md.md_offset 332 + mdev->ldev->md.al_offset + mdev->al_tr_pos; 333 334 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) 335 drbd_chk_io_error(mdev, 1, TRUE); 336 337 if (++mdev->al_tr_pos > 338 div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) 339 mdev->al_tr_pos = 0; 340 341 D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE); 342 mdev->al_tr_number++; 343 344 mutex_unlock(&mdev->md_io_mutex); 345 346 complete(&((struct update_al_work *)w)->event); 347 put_ldev(mdev); 348 349 return 1; 350 } 351 352 /** 353 * drbd_al_read_tr() - Read a single transaction from the on disk activity log 354 * @mdev: DRBD device. 355 * @bdev: Block device to read form. 356 * @b: pointer to an al_transaction. 357 * @index: On disk slot of the transaction to read. 358 * 359 * Returns -1 on IO error, 0 on checksum error and 1 upon success. 360 */ 361 static int drbd_al_read_tr(struct drbd_conf *mdev, 362 struct drbd_backing_dev *bdev, 363 struct al_transaction *b, 364 int index) 365 { 366 sector_t sector; 367 int rv, i; 368 u32 xor_sum = 0; 369 370 sector = bdev->md.md_offset + bdev->md.al_offset + index; 371 372 /* Dont process error normally, 373 * as this is done before disk is attached! */ 374 if (!drbd_md_sync_page_io(mdev, bdev, sector, READ)) 375 return -1; 376 377 rv = (be32_to_cpu(b->magic) == DRBD_MAGIC); 378 379 for (i = 0; i < AL_EXTENTS_PT + 1; i++) 380 xor_sum ^= be32_to_cpu(b->updates[i].extent); 381 rv &= (xor_sum == be32_to_cpu(b->xor_sum)); 382 383 return rv; 384 } 385 386 /** 387 * drbd_al_read_log() - Restores the activity log from its on disk representation. 388 * @mdev: DRBD device. 389 * @bdev: Block device to read form. 390 * 391 * Returns 1 on success, returns 0 when reading the log failed due to IO errors. 392 */ 393 int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 394 { 395 struct al_transaction *buffer; 396 int i; 397 int rv; 398 int mx; 399 int active_extents = 0; 400 int transactions = 0; 401 int found_valid = 0; 402 int from = 0; 403 int to = 0; 404 u32 from_tnr = 0; 405 u32 to_tnr = 0; 406 u32 cnr; 407 408 mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT); 409 410 /* lock out all other meta data io for now, 411 * and make sure the page is mapped. 412 */ 413 mutex_lock(&mdev->md_io_mutex); 414 buffer = page_address(mdev->md_io_page); 415 416 /* Find the valid transaction in the log */ 417 for (i = 0; i <= mx; i++) { 418 rv = drbd_al_read_tr(mdev, bdev, buffer, i); 419 if (rv == 0) 420 continue; 421 if (rv == -1) { 422 mutex_unlock(&mdev->md_io_mutex); 423 return 0; 424 } 425 cnr = be32_to_cpu(buffer->tr_number); 426 427 if (++found_valid == 1) { 428 from = i; 429 to = i; 430 from_tnr = cnr; 431 to_tnr = cnr; 432 continue; 433 } 434 if ((int)cnr - (int)from_tnr < 0) { 435 D_ASSERT(from_tnr - cnr + i - from == mx+1); 436 from = i; 437 from_tnr = cnr; 438 } 439 if ((int)cnr - (int)to_tnr > 0) { 440 D_ASSERT(cnr - to_tnr == i - to); 441 to = i; 442 to_tnr = cnr; 443 } 444 } 445 446 if (!found_valid) { 447 dev_warn(DEV, "No usable activity log found.\n"); 448 mutex_unlock(&mdev->md_io_mutex); 449 return 1; 450 } 451 452 /* Read the valid transactions. 453 * dev_info(DEV, "Reading from %d to %d.\n",from,to); */ 454 i = from; 455 while (1) { 456 int j, pos; 457 unsigned int extent_nr; 458 unsigned int trn; 459 460 rv = drbd_al_read_tr(mdev, bdev, buffer, i); 461 ERR_IF(rv == 0) goto cancel; 462 if (rv == -1) { 463 mutex_unlock(&mdev->md_io_mutex); 464 return 0; 465 } 466 467 trn = be32_to_cpu(buffer->tr_number); 468 469 spin_lock_irq(&mdev->al_lock); 470 471 /* This loop runs backwards because in the cyclic 472 elements there might be an old version of the 473 updated element (in slot 0). So the element in slot 0 474 can overwrite old versions. */ 475 for (j = AL_EXTENTS_PT; j >= 0; j--) { 476 pos = be32_to_cpu(buffer->updates[j].pos); 477 extent_nr = be32_to_cpu(buffer->updates[j].extent); 478 479 if (extent_nr == LC_FREE) 480 continue; 481 482 lc_set(mdev->act_log, extent_nr, pos); 483 active_extents++; 484 } 485 spin_unlock_irq(&mdev->al_lock); 486 487 transactions++; 488 489 cancel: 490 if (i == to) 491 break; 492 i++; 493 if (i > mx) 494 i = 0; 495 } 496 497 mdev->al_tr_number = to_tnr+1; 498 mdev->al_tr_pos = to; 499 if (++mdev->al_tr_pos > 500 div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) 501 mdev->al_tr_pos = 0; 502 503 /* ok, we are done with it */ 504 mutex_unlock(&mdev->md_io_mutex); 505 506 dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n", 507 transactions, active_extents); 508 509 return 1; 510 } 511 512 static void atodb_endio(struct bio *bio, int error) 513 { 514 struct drbd_atodb_wait *wc = bio->bi_private; 515 struct drbd_conf *mdev = wc->mdev; 516 struct page *page; 517 int uptodate = bio_flagged(bio, BIO_UPTODATE); 518 519 /* strange behavior of some lower level drivers... 520 * fail the request by clearing the uptodate flag, 521 * but do not return any error?! */ 522 if (!error && !uptodate) 523 error = -EIO; 524 525 drbd_chk_io_error(mdev, error, TRUE); 526 if (error && wc->error == 0) 527 wc->error = error; 528 529 if (atomic_dec_and_test(&wc->count)) 530 complete(&wc->io_done); 531 532 page = bio->bi_io_vec[0].bv_page; 533 put_page(page); 534 bio_put(bio); 535 mdev->bm_writ_cnt++; 536 put_ldev(mdev); 537 } 538 539 /* sector to word */ 540 #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 541 542 /* activity log to on disk bitmap -- prepare bio unless that sector 543 * is already covered by previously prepared bios */ 544 static int atodb_prepare_unless_covered(struct drbd_conf *mdev, 545 struct bio **bios, 546 unsigned int enr, 547 struct drbd_atodb_wait *wc) __must_hold(local) 548 { 549 struct bio *bio; 550 struct page *page; 551 sector_t on_disk_sector; 552 unsigned int page_offset = PAGE_SIZE; 553 int offset; 554 int i = 0; 555 int err = -ENOMEM; 556 557 /* We always write aligned, full 4k blocks, 558 * so we can ignore the logical_block_size (for now) */ 559 enr &= ~7U; 560 on_disk_sector = enr + mdev->ldev->md.md_offset 561 + mdev->ldev->md.bm_offset; 562 563 D_ASSERT(!(on_disk_sector & 7U)); 564 565 /* Check if that enr is already covered by an already created bio. 566 * Caution, bios[] is not NULL terminated, 567 * but only initialized to all NULL. 568 * For completely scattered activity log, 569 * the last invocation iterates over all bios, 570 * and finds the last NULL entry. 571 */ 572 while ((bio = bios[i])) { 573 if (bio->bi_sector == on_disk_sector) 574 return 0; 575 i++; 576 } 577 /* bios[i] == NULL, the next not yet used slot */ 578 579 /* GFP_KERNEL, we are not in the write-out path */ 580 bio = bio_alloc(GFP_KERNEL, 1); 581 if (bio == NULL) 582 return -ENOMEM; 583 584 if (i > 0) { 585 const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec; 586 page_offset = prev_bv->bv_offset + prev_bv->bv_len; 587 page = prev_bv->bv_page; 588 } 589 if (page_offset == PAGE_SIZE) { 590 page = alloc_page(__GFP_HIGHMEM); 591 if (page == NULL) 592 goto out_bio_put; 593 page_offset = 0; 594 } else { 595 get_page(page); 596 } 597 598 offset = S2W(enr); 599 drbd_bm_get_lel(mdev, offset, 600 min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset), 601 kmap(page) + page_offset); 602 kunmap(page); 603 604 bio->bi_private = wc; 605 bio->bi_end_io = atodb_endio; 606 bio->bi_bdev = mdev->ldev->md_bdev; 607 bio->bi_sector = on_disk_sector; 608 609 if (bio_add_page(bio, page, 4096, page_offset) != 4096) 610 goto out_put_page; 611 612 atomic_inc(&wc->count); 613 /* we already know that we may do this... 614 * get_ldev_if_state(mdev,D_ATTACHING); 615 * just get the extra reference, so that the local_cnt reflects 616 * the number of pending IO requests DRBD at its backing device. 617 */ 618 atomic_inc(&mdev->local_cnt); 619 620 bios[i] = bio; 621 622 return 0; 623 624 out_put_page: 625 err = -EINVAL; 626 put_page(page); 627 out_bio_put: 628 bio_put(bio); 629 return err; 630 } 631 632 /** 633 * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents 634 * @mdev: DRBD device. 635 * 636 * Called when we detach (unconfigure) local storage, 637 * or when we go from R_PRIMARY to R_SECONDARY role. 638 */ 639 void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) 640 { 641 int i, nr_elements; 642 unsigned int enr; 643 struct bio **bios; 644 struct drbd_atodb_wait wc; 645 646 ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) 647 return; /* sorry, I don't have any act_log etc... */ 648 649 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 650 651 nr_elements = mdev->act_log->nr_elements; 652 653 /* GFP_KERNEL, we are not in anyone's write-out path */ 654 bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); 655 if (!bios) 656 goto submit_one_by_one; 657 658 atomic_set(&wc.count, 0); 659 init_completion(&wc.io_done); 660 wc.mdev = mdev; 661 wc.error = 0; 662 663 for (i = 0; i < nr_elements; i++) { 664 enr = lc_element_by_index(mdev->act_log, i)->lc_number; 665 if (enr == LC_FREE) 666 continue; 667 /* next statement also does atomic_inc wc.count and local_cnt */ 668 if (atodb_prepare_unless_covered(mdev, bios, 669 enr/AL_EXT_PER_BM_SECT, 670 &wc)) 671 goto free_bios_submit_one_by_one; 672 } 673 674 /* unnecessary optimization? */ 675 lc_unlock(mdev->act_log); 676 wake_up(&mdev->al_wait); 677 678 /* all prepared, submit them */ 679 for (i = 0; i < nr_elements; i++) { 680 if (bios[i] == NULL) 681 break; 682 if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { 683 bios[i]->bi_rw = WRITE; 684 bio_endio(bios[i], -EIO); 685 } else { 686 submit_bio(WRITE, bios[i]); 687 } 688 } 689 690 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); 691 692 /* always (try to) flush bitmap to stable storage */ 693 drbd_md_flush(mdev); 694 695 /* In case we did not submit a single IO do not wait for 696 * them to complete. ( Because we would wait forever here. ) 697 * 698 * In case we had IOs and they are already complete, there 699 * is not point in waiting anyways. 700 * Therefore this if () ... */ 701 if (atomic_read(&wc.count)) 702 wait_for_completion(&wc.io_done); 703 704 put_ldev(mdev); 705 706 kfree(bios); 707 return; 708 709 free_bios_submit_one_by_one: 710 /* free everything by calling the endio callback directly. */ 711 for (i = 0; i < nr_elements && bios[i]; i++) 712 bio_endio(bios[i], 0); 713 714 kfree(bios); 715 716 submit_one_by_one: 717 dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); 718 719 for (i = 0; i < mdev->act_log->nr_elements; i++) { 720 enr = lc_element_by_index(mdev->act_log, i)->lc_number; 721 if (enr == LC_FREE) 722 continue; 723 /* Really slow: if we have al-extents 16..19 active, 724 * sector 4 will be written four times! Synchronous! */ 725 drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); 726 } 727 728 lc_unlock(mdev->act_log); 729 wake_up(&mdev->al_wait); 730 put_ldev(mdev); 731 } 732 733 /** 734 * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents 735 * @mdev: DRBD device. 736 */ 737 void drbd_al_apply_to_bm(struct drbd_conf *mdev) 738 { 739 unsigned int enr; 740 unsigned long add = 0; 741 char ppb[10]; 742 int i; 743 744 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 745 746 for (i = 0; i < mdev->act_log->nr_elements; i++) { 747 enr = lc_element_by_index(mdev->act_log, i)->lc_number; 748 if (enr == LC_FREE) 749 continue; 750 add += drbd_bm_ALe_set_all(mdev, enr); 751 } 752 753 lc_unlock(mdev->act_log); 754 wake_up(&mdev->al_wait); 755 756 dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n", 757 ppsize(ppb, Bit2KB(add))); 758 } 759 760 static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext) 761 { 762 int rv; 763 764 spin_lock_irq(&mdev->al_lock); 765 rv = (al_ext->refcnt == 0); 766 if (likely(rv)) 767 lc_del(mdev->act_log, al_ext); 768 spin_unlock_irq(&mdev->al_lock); 769 770 return rv; 771 } 772 773 /** 774 * drbd_al_shrink() - Removes all active extents form the activity log 775 * @mdev: DRBD device. 776 * 777 * Removes all active extents form the activity log, waiting until 778 * the reference count of each entry dropped to 0 first, of course. 779 * 780 * You need to lock mdev->act_log with lc_try_lock() / lc_unlock() 781 */ 782 void drbd_al_shrink(struct drbd_conf *mdev) 783 { 784 struct lc_element *al_ext; 785 int i; 786 787 D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags)); 788 789 for (i = 0; i < mdev->act_log->nr_elements; i++) { 790 al_ext = lc_element_by_index(mdev->act_log, i); 791 if (al_ext->lc_number == LC_FREE) 792 continue; 793 wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext)); 794 } 795 796 wake_up(&mdev->al_wait); 797 } 798 799 static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused) 800 { 801 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); 802 803 if (!get_ldev(mdev)) { 804 if (__ratelimit(&drbd_ratelimit_state)) 805 dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n"); 806 kfree(udw); 807 return 1; 808 } 809 810 drbd_bm_write_sect(mdev, udw->enr); 811 put_ldev(mdev); 812 813 kfree(udw); 814 815 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) { 816 switch (mdev->state.conn) { 817 case C_SYNC_SOURCE: case C_SYNC_TARGET: 818 case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T: 819 drbd_resync_finished(mdev); 820 default: 821 /* nothing to do */ 822 break; 823 } 824 } 825 drbd_bcast_sync_progress(mdev); 826 827 return 1; 828 } 829 830 831 /* ATTENTION. The AL's extents are 4MB each, while the extents in the 832 * resync LRU-cache are 16MB each. 833 * The caller of this function has to hold an get_ldev() reference. 834 * 835 * TODO will be obsoleted once we have a caching lru of the on disk bitmap 836 */ 837 static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, 838 int count, int success) 839 { 840 struct lc_element *e; 841 struct update_odbm_work *udw; 842 843 unsigned int enr; 844 845 D_ASSERT(atomic_read(&mdev->local_cnt)); 846 847 /* I simply assume that a sector/size pair never crosses 848 * a 16 MB extent border. (Currently this is true...) */ 849 enr = BM_SECT_TO_EXT(sector); 850 851 e = lc_get(mdev->resync, enr); 852 if (e) { 853 struct bm_extent *ext = lc_entry(e, struct bm_extent, lce); 854 if (ext->lce.lc_number == enr) { 855 if (success) 856 ext->rs_left -= count; 857 else 858 ext->rs_failed += count; 859 if (ext->rs_left < ext->rs_failed) { 860 dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d " 861 "rs_failed=%d count=%d\n", 862 (unsigned long long)sector, 863 ext->lce.lc_number, ext->rs_left, 864 ext->rs_failed, count); 865 dump_stack(); 866 867 lc_put(mdev->resync, &ext->lce); 868 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 869 return; 870 } 871 } else { 872 /* Normally this element should be in the cache, 873 * since drbd_rs_begin_io() pulled it already in. 874 * 875 * But maybe an application write finished, and we set 876 * something outside the resync lru_cache in sync. 877 */ 878 int rs_left = drbd_bm_e_weight(mdev, enr); 879 if (ext->flags != 0) { 880 dev_warn(DEV, "changing resync lce: %d[%u;%02lx]" 881 " -> %d[%u;00]\n", 882 ext->lce.lc_number, ext->rs_left, 883 ext->flags, enr, rs_left); 884 ext->flags = 0; 885 } 886 if (ext->rs_failed) { 887 dev_warn(DEV, "Kicking resync_lru element enr=%u " 888 "out with rs_failed=%d\n", 889 ext->lce.lc_number, ext->rs_failed); 890 set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); 891 } 892 ext->rs_left = rs_left; 893 ext->rs_failed = success ? 0 : count; 894 lc_changed(mdev->resync, &ext->lce); 895 } 896 lc_put(mdev->resync, &ext->lce); 897 /* no race, we are within the al_lock! */ 898 899 if (ext->rs_left == ext->rs_failed) { 900 ext->rs_failed = 0; 901 902 udw = kmalloc(sizeof(*udw), GFP_ATOMIC); 903 if (udw) { 904 udw->enr = ext->lce.lc_number; 905 udw->w.cb = w_update_odbm; 906 drbd_queue_work_front(&mdev->data.work, &udw->w); 907 } else { 908 dev_warn(DEV, "Could not kmalloc an udw\n"); 909 set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); 910 } 911 } 912 } else { 913 dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n", 914 mdev->resync_locked, 915 mdev->resync->nr_elements, 916 mdev->resync->flags); 917 } 918 } 919 920 /* clear the bit corresponding to the piece of storage in question: 921 * size byte of data starting from sector. Only clear a bits of the affected 922 * one ore more _aligned_ BM_BLOCK_SIZE blocks. 923 * 924 * called by worker on C_SYNC_TARGET and receiver on SyncSource. 925 * 926 */ 927 void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, 928 const char *file, const unsigned int line) 929 { 930 /* Is called from worker and receiver context _only_ */ 931 unsigned long sbnr, ebnr, lbnr; 932 unsigned long count = 0; 933 sector_t esector, nr_sectors; 934 int wake_up = 0; 935 unsigned long flags; 936 937 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 938 dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", 939 (unsigned long long)sector, size); 940 return; 941 } 942 nr_sectors = drbd_get_capacity(mdev->this_bdev); 943 esector = sector + (size >> 9) - 1; 944 945 ERR_IF(sector >= nr_sectors) return; 946 ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1); 947 948 lbnr = BM_SECT_TO_BIT(nr_sectors-1); 949 950 /* we clear it (in sync). 951 * round up start sector, round down end sector. we make sure we only 952 * clear full, aligned, BM_BLOCK_SIZE (4K) blocks */ 953 if (unlikely(esector < BM_SECT_PER_BIT-1)) 954 return; 955 if (unlikely(esector == (nr_sectors-1))) 956 ebnr = lbnr; 957 else 958 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); 959 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); 960 961 if (sbnr > ebnr) 962 return; 963 964 /* 965 * ok, (capacity & 7) != 0 sometimes, but who cares... 966 * we count rs_{total,left} in bits, not sectors. 967 */ 968 count = drbd_bm_clear_bits(mdev, sbnr, ebnr); 969 if (count && get_ldev(mdev)) { 970 unsigned long now = jiffies; 971 unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; 972 int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; 973 if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { 974 unsigned long tw = drbd_bm_total_weight(mdev); 975 if (mdev->rs_mark_left[mdev->rs_last_mark] != tw && 976 mdev->state.conn != C_PAUSED_SYNC_T && 977 mdev->state.conn != C_PAUSED_SYNC_S) { 978 mdev->rs_mark_time[next] = now; 979 mdev->rs_mark_left[next] = tw; 980 mdev->rs_last_mark = next; 981 } 982 } 983 spin_lock_irqsave(&mdev->al_lock, flags); 984 drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); 985 spin_unlock_irqrestore(&mdev->al_lock, flags); 986 987 /* just wake_up unconditional now, various lc_chaged(), 988 * lc_put() in drbd_try_clear_on_disk_bm(). */ 989 wake_up = 1; 990 put_ldev(mdev); 991 } 992 if (wake_up) 993 wake_up(&mdev->al_wait); 994 } 995 996 /* 997 * this is intended to set one request worth of data out of sync. 998 * affects at least 1 bit, 999 * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits. 1000 * 1001 * called by tl_clear and drbd_send_dblock (==drbd_make_request). 1002 * so this can be _any_ process. 1003 */ 1004 void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, 1005 const char *file, const unsigned int line) 1006 { 1007 unsigned long sbnr, ebnr, lbnr, flags; 1008 sector_t esector, nr_sectors; 1009 unsigned int enr, count; 1010 struct lc_element *e; 1011 1012 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1013 dev_err(DEV, "sector: %llus, size: %d\n", 1014 (unsigned long long)sector, size); 1015 return; 1016 } 1017 1018 if (!get_ldev(mdev)) 1019 return; /* no disk, no metadata, no bitmap to set bits in */ 1020 1021 nr_sectors = drbd_get_capacity(mdev->this_bdev); 1022 esector = sector + (size >> 9) - 1; 1023 1024 ERR_IF(sector >= nr_sectors) 1025 goto out; 1026 ERR_IF(esector >= nr_sectors) 1027 esector = (nr_sectors-1); 1028 1029 lbnr = BM_SECT_TO_BIT(nr_sectors-1); 1030 1031 /* we set it out of sync, 1032 * we do not need to round anything here */ 1033 sbnr = BM_SECT_TO_BIT(sector); 1034 ebnr = BM_SECT_TO_BIT(esector); 1035 1036 /* ok, (capacity & 7) != 0 sometimes, but who cares... 1037 * we count rs_{total,left} in bits, not sectors. */ 1038 spin_lock_irqsave(&mdev->al_lock, flags); 1039 count = drbd_bm_set_bits(mdev, sbnr, ebnr); 1040 1041 enr = BM_SECT_TO_EXT(sector); 1042 e = lc_find(mdev->resync, enr); 1043 if (e) 1044 lc_entry(e, struct bm_extent, lce)->rs_left += count; 1045 spin_unlock_irqrestore(&mdev->al_lock, flags); 1046 1047 out: 1048 put_ldev(mdev); 1049 } 1050 1051 static 1052 struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr) 1053 { 1054 struct lc_element *e; 1055 struct bm_extent *bm_ext; 1056 int wakeup = 0; 1057 unsigned long rs_flags; 1058 1059 spin_lock_irq(&mdev->al_lock); 1060 if (mdev->resync_locked > mdev->resync->nr_elements/2) { 1061 spin_unlock_irq(&mdev->al_lock); 1062 return NULL; 1063 } 1064 e = lc_get(mdev->resync, enr); 1065 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1066 if (bm_ext) { 1067 if (bm_ext->lce.lc_number != enr) { 1068 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); 1069 bm_ext->rs_failed = 0; 1070 lc_changed(mdev->resync, &bm_ext->lce); 1071 wakeup = 1; 1072 } 1073 if (bm_ext->lce.refcnt == 1) 1074 mdev->resync_locked++; 1075 set_bit(BME_NO_WRITES, &bm_ext->flags); 1076 } 1077 rs_flags = mdev->resync->flags; 1078 spin_unlock_irq(&mdev->al_lock); 1079 if (wakeup) 1080 wake_up(&mdev->al_wait); 1081 1082 if (!bm_ext) { 1083 if (rs_flags & LC_STARVING) 1084 dev_warn(DEV, "Have to wait for element" 1085 " (resync LRU too small?)\n"); 1086 BUG_ON(rs_flags & LC_DIRTY); 1087 } 1088 1089 return bm_ext; 1090 } 1091 1092 static int _is_in_al(struct drbd_conf *mdev, unsigned int enr) 1093 { 1094 struct lc_element *al_ext; 1095 int rv = 0; 1096 1097 spin_lock_irq(&mdev->al_lock); 1098 if (unlikely(enr == mdev->act_log->new_number)) 1099 rv = 1; 1100 else { 1101 al_ext = lc_find(mdev->act_log, enr); 1102 if (al_ext) { 1103 if (al_ext->refcnt) 1104 rv = 1; 1105 } 1106 } 1107 spin_unlock_irq(&mdev->al_lock); 1108 1109 /* 1110 if (unlikely(rv)) { 1111 dev_info(DEV, "Delaying sync read until app's write is done\n"); 1112 } 1113 */ 1114 return rv; 1115 } 1116 1117 /** 1118 * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED 1119 * @mdev: DRBD device. 1120 * @sector: The sector number. 1121 * 1122 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted. 1123 */ 1124 int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) 1125 { 1126 unsigned int enr = BM_SECT_TO_EXT(sector); 1127 struct bm_extent *bm_ext; 1128 int i, sig; 1129 1130 sig = wait_event_interruptible(mdev->al_wait, 1131 (bm_ext = _bme_get(mdev, enr))); 1132 if (sig) 1133 return -EINTR; 1134 1135 if (test_bit(BME_LOCKED, &bm_ext->flags)) 1136 return 0; 1137 1138 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 1139 sig = wait_event_interruptible(mdev->al_wait, 1140 !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i)); 1141 if (sig) { 1142 spin_lock_irq(&mdev->al_lock); 1143 if (lc_put(mdev->resync, &bm_ext->lce) == 0) { 1144 clear_bit(BME_NO_WRITES, &bm_ext->flags); 1145 mdev->resync_locked--; 1146 wake_up(&mdev->al_wait); 1147 } 1148 spin_unlock_irq(&mdev->al_lock); 1149 return -EINTR; 1150 } 1151 } 1152 set_bit(BME_LOCKED, &bm_ext->flags); 1153 return 0; 1154 } 1155 1156 /** 1157 * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep 1158 * @mdev: DRBD device. 1159 * @sector: The sector number. 1160 * 1161 * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then 1162 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN 1163 * if there is still application IO going on in this area. 1164 */ 1165 int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector) 1166 { 1167 unsigned int enr = BM_SECT_TO_EXT(sector); 1168 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT; 1169 struct lc_element *e; 1170 struct bm_extent *bm_ext; 1171 int i; 1172 1173 spin_lock_irq(&mdev->al_lock); 1174 if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) { 1175 /* in case you have very heavy scattered io, it may 1176 * stall the syncer undefined if we give up the ref count 1177 * when we try again and requeue. 1178 * 1179 * if we don't give up the refcount, but the next time 1180 * we are scheduled this extent has been "synced" by new 1181 * application writes, we'd miss the lc_put on the 1182 * extent we keep the refcount on. 1183 * so we remembered which extent we had to try again, and 1184 * if the next requested one is something else, we do 1185 * the lc_put here... 1186 * we also have to wake_up 1187 */ 1188 e = lc_find(mdev->resync, mdev->resync_wenr); 1189 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1190 if (bm_ext) { 1191 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1192 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); 1193 clear_bit(BME_NO_WRITES, &bm_ext->flags); 1194 mdev->resync_wenr = LC_FREE; 1195 if (lc_put(mdev->resync, &bm_ext->lce) == 0) 1196 mdev->resync_locked--; 1197 wake_up(&mdev->al_wait); 1198 } else { 1199 dev_alert(DEV, "LOGIC BUG\n"); 1200 } 1201 } 1202 /* TRY. */ 1203 e = lc_try_get(mdev->resync, enr); 1204 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1205 if (bm_ext) { 1206 if (test_bit(BME_LOCKED, &bm_ext->flags)) 1207 goto proceed; 1208 if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) { 1209 mdev->resync_locked++; 1210 } else { 1211 /* we did set the BME_NO_WRITES, 1212 * but then could not set BME_LOCKED, 1213 * so we tried again. 1214 * drop the extra reference. */ 1215 bm_ext->lce.refcnt--; 1216 D_ASSERT(bm_ext->lce.refcnt > 0); 1217 } 1218 goto check_al; 1219 } else { 1220 /* do we rather want to try later? */ 1221 if (mdev->resync_locked > mdev->resync->nr_elements-3) 1222 goto try_again; 1223 /* Do or do not. There is no try. -- Yoda */ 1224 e = lc_get(mdev->resync, enr); 1225 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1226 if (!bm_ext) { 1227 const unsigned long rs_flags = mdev->resync->flags; 1228 if (rs_flags & LC_STARVING) 1229 dev_warn(DEV, "Have to wait for element" 1230 " (resync LRU too small?)\n"); 1231 BUG_ON(rs_flags & LC_DIRTY); 1232 goto try_again; 1233 } 1234 if (bm_ext->lce.lc_number != enr) { 1235 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); 1236 bm_ext->rs_failed = 0; 1237 lc_changed(mdev->resync, &bm_ext->lce); 1238 wake_up(&mdev->al_wait); 1239 D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0); 1240 } 1241 set_bit(BME_NO_WRITES, &bm_ext->flags); 1242 D_ASSERT(bm_ext->lce.refcnt == 1); 1243 mdev->resync_locked++; 1244 goto check_al; 1245 } 1246 check_al: 1247 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 1248 if (unlikely(al_enr+i == mdev->act_log->new_number)) 1249 goto try_again; 1250 if (lc_is_used(mdev->act_log, al_enr+i)) 1251 goto try_again; 1252 } 1253 set_bit(BME_LOCKED, &bm_ext->flags); 1254 proceed: 1255 mdev->resync_wenr = LC_FREE; 1256 spin_unlock_irq(&mdev->al_lock); 1257 return 0; 1258 1259 try_again: 1260 if (bm_ext) 1261 mdev->resync_wenr = enr; 1262 spin_unlock_irq(&mdev->al_lock); 1263 return -EAGAIN; 1264 } 1265 1266 void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector) 1267 { 1268 unsigned int enr = BM_SECT_TO_EXT(sector); 1269 struct lc_element *e; 1270 struct bm_extent *bm_ext; 1271 unsigned long flags; 1272 1273 spin_lock_irqsave(&mdev->al_lock, flags); 1274 e = lc_find(mdev->resync, enr); 1275 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1276 if (!bm_ext) { 1277 spin_unlock_irqrestore(&mdev->al_lock, flags); 1278 if (__ratelimit(&drbd_ratelimit_state)) 1279 dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n"); 1280 return; 1281 } 1282 1283 if (bm_ext->lce.refcnt == 0) { 1284 spin_unlock_irqrestore(&mdev->al_lock, flags); 1285 dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, " 1286 "but refcnt is 0!?\n", 1287 (unsigned long long)sector, enr); 1288 return; 1289 } 1290 1291 if (lc_put(mdev->resync, &bm_ext->lce) == 0) { 1292 clear_bit(BME_LOCKED, &bm_ext->flags); 1293 clear_bit(BME_NO_WRITES, &bm_ext->flags); 1294 mdev->resync_locked--; 1295 wake_up(&mdev->al_wait); 1296 } 1297 1298 spin_unlock_irqrestore(&mdev->al_lock, flags); 1299 } 1300 1301 /** 1302 * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED) 1303 * @mdev: DRBD device. 1304 */ 1305 void drbd_rs_cancel_all(struct drbd_conf *mdev) 1306 { 1307 spin_lock_irq(&mdev->al_lock); 1308 1309 if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */ 1310 lc_reset(mdev->resync); 1311 put_ldev(mdev); 1312 } 1313 mdev->resync_locked = 0; 1314 mdev->resync_wenr = LC_FREE; 1315 spin_unlock_irq(&mdev->al_lock); 1316 wake_up(&mdev->al_wait); 1317 } 1318 1319 /** 1320 * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU 1321 * @mdev: DRBD device. 1322 * 1323 * Returns 0 upon success, -EAGAIN if at least one reference count was 1324 * not zero. 1325 */ 1326 int drbd_rs_del_all(struct drbd_conf *mdev) 1327 { 1328 struct lc_element *e; 1329 struct bm_extent *bm_ext; 1330 int i; 1331 1332 spin_lock_irq(&mdev->al_lock); 1333 1334 if (get_ldev_if_state(mdev, D_FAILED)) { 1335 /* ok, ->resync is there. */ 1336 for (i = 0; i < mdev->resync->nr_elements; i++) { 1337 e = lc_element_by_index(mdev->resync, i); 1338 bm_ext = lc_entry(e, struct bm_extent, lce); 1339 if (bm_ext->lce.lc_number == LC_FREE) 1340 continue; 1341 if (bm_ext->lce.lc_number == mdev->resync_wenr) { 1342 dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently" 1343 " got 'synced' by application io\n", 1344 mdev->resync_wenr); 1345 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1346 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); 1347 clear_bit(BME_NO_WRITES, &bm_ext->flags); 1348 mdev->resync_wenr = LC_FREE; 1349 lc_put(mdev->resync, &bm_ext->lce); 1350 } 1351 if (bm_ext->lce.refcnt != 0) { 1352 dev_info(DEV, "Retrying drbd_rs_del_all() later. " 1353 "refcnt=%d\n", bm_ext->lce.refcnt); 1354 put_ldev(mdev); 1355 spin_unlock_irq(&mdev->al_lock); 1356 return -EAGAIN; 1357 } 1358 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1359 D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags)); 1360 lc_del(mdev->resync, &bm_ext->lce); 1361 } 1362 D_ASSERT(mdev->resync->used == 0); 1363 put_ldev(mdev); 1364 } 1365 spin_unlock_irq(&mdev->al_lock); 1366 1367 return 0; 1368 } 1369 1370 /** 1371 * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks 1372 * @mdev: DRBD device. 1373 * @sector: The sector number. 1374 * @size: Size of failed IO operation, in byte. 1375 */ 1376 void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) 1377 { 1378 /* Is called from worker and receiver context _only_ */ 1379 unsigned long sbnr, ebnr, lbnr; 1380 unsigned long count; 1381 sector_t esector, nr_sectors; 1382 int wake_up = 0; 1383 1384 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1385 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", 1386 (unsigned long long)sector, size); 1387 return; 1388 } 1389 nr_sectors = drbd_get_capacity(mdev->this_bdev); 1390 esector = sector + (size >> 9) - 1; 1391 1392 ERR_IF(sector >= nr_sectors) return; 1393 ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1); 1394 1395 lbnr = BM_SECT_TO_BIT(nr_sectors-1); 1396 1397 /* 1398 * round up start sector, round down end sector. we make sure we only 1399 * handle full, aligned, BM_BLOCK_SIZE (4K) blocks */ 1400 if (unlikely(esector < BM_SECT_PER_BIT-1)) 1401 return; 1402 if (unlikely(esector == (nr_sectors-1))) 1403 ebnr = lbnr; 1404 else 1405 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); 1406 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); 1407 1408 if (sbnr > ebnr) 1409 return; 1410 1411 /* 1412 * ok, (capacity & 7) != 0 sometimes, but who cares... 1413 * we count rs_{total,left} in bits, not sectors. 1414 */ 1415 spin_lock_irq(&mdev->al_lock); 1416 count = drbd_bm_count_bits(mdev, sbnr, ebnr); 1417 if (count) { 1418 mdev->rs_failed += count; 1419 1420 if (get_ldev(mdev)) { 1421 drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE); 1422 put_ldev(mdev); 1423 } 1424 1425 /* just wake_up unconditional now, various lc_chaged(), 1426 * lc_put() in drbd_try_clear_on_disk_bm(). */ 1427 wake_up = 1; 1428 } 1429 spin_unlock_irq(&mdev->al_lock); 1430 if (wake_up) 1431 wake_up(&mdev->al_wait); 1432 } 1433