1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2009-2011 Red Hat, Inc. 4 * 5 * Author: Mikulas Patocka <mpatocka@redhat.com> 6 * 7 * This file is released under the GPL. 8 */ 9 10 #include <linux/dm-bufio.h> 11 12 #include <linux/device-mapper.h> 13 #include <linux/dm-io.h> 14 #include <linux/slab.h> 15 #include <linux/sched/mm.h> 16 #include <linux/jiffies.h> 17 #include <linux/vmalloc.h> 18 #include <linux/shrinker.h> 19 #include <linux/module.h> 20 #include <linux/rbtree.h> 21 #include <linux/stacktrace.h> 22 #include <linux/jump_label.h> 23 24 #include "dm.h" 25 26 #define DM_MSG_PREFIX "bufio" 27 28 /* 29 * Memory management policy: 30 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory 31 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower). 32 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers. 33 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT 34 * dirty buffers. 35 */ 36 #define DM_BUFIO_MIN_BUFFERS 8 37 38 #define DM_BUFIO_MEMORY_PERCENT 2 39 #define DM_BUFIO_VMALLOC_PERCENT 25 40 #define DM_BUFIO_WRITEBACK_RATIO 3 41 #define DM_BUFIO_LOW_WATERMARK_RATIO 16 42 43 /* 44 * The nr of bytes of cached data to keep around. 45 */ 46 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024) 47 48 /* 49 * Align buffer writes to this boundary. 50 * Tests show that SSDs have the highest IOPS when using 4k writes. 51 */ 52 #define DM_BUFIO_WRITE_ALIGN 4096 53 54 /* 55 * dm_buffer->list_mode 56 */ 57 #define LIST_CLEAN 0 58 #define LIST_DIRTY 1 59 #define LIST_SIZE 2 60 61 #define SCAN_RESCHED_CYCLE 16 62 63 /*--------------------------------------------------------------*/ 64 65 /* 66 * Rather than use an LRU list, we use a clock algorithm where entries 67 * are held in a circular list. When an entry is 'hit' a reference bit 68 * is set. The least recently used entry is approximated by running a 69 * cursor around the list selecting unreferenced entries. Referenced 70 * entries have their reference bit cleared as the cursor passes them. 71 */ 72 struct lru_entry { 73 struct list_head list; 74 atomic_t referenced; 75 }; 76 77 struct lru_iter { 78 struct lru *lru; 79 struct list_head list; 80 struct lru_entry *stop; 81 struct lru_entry *e; 82 }; 83 84 struct lru { 85 struct list_head *cursor; 86 unsigned long count; 87 88 struct list_head iterators; 89 }; 90 91 /*--------------*/ 92 93 static void lru_init(struct lru *lru) 94 { 95 lru->cursor = NULL; 96 lru->count = 0; 97 INIT_LIST_HEAD(&lru->iterators); 98 } 99 100 static void lru_destroy(struct lru *lru) 101 { 102 WARN_ON_ONCE(lru->cursor); 103 WARN_ON_ONCE(!list_empty(&lru->iterators)); 104 } 105 106 /* 107 * Insert a new entry into the lru. 108 */ 109 static void lru_insert(struct lru *lru, struct lru_entry *le) 110 { 111 /* 112 * Don't be tempted to set to 1, makes the lru aspect 113 * perform poorly. 114 */ 115 atomic_set(&le->referenced, 0); 116 117 if (lru->cursor) { 118 list_add_tail(&le->list, lru->cursor); 119 } else { 120 INIT_LIST_HEAD(&le->list); 121 lru->cursor = &le->list; 122 } 123 lru->count++; 124 } 125 126 /*--------------*/ 127 128 /* 129 * Convert a list_head pointer to an lru_entry pointer. 130 */ 131 static inline struct lru_entry *to_le(struct list_head *l) 132 { 133 return container_of(l, struct lru_entry, list); 134 } 135 136 /* 137 * Initialize an lru_iter and add it to the list of cursors in the lru. 138 */ 139 static void lru_iter_begin(struct lru *lru, struct lru_iter *it) 140 { 141 it->lru = lru; 142 it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL; 143 it->e = lru->cursor ? to_le(lru->cursor) : NULL; 144 list_add(&it->list, &lru->iterators); 145 } 146 147 /* 148 * Remove an lru_iter from the list of cursors in the lru. 149 */ 150 static inline void lru_iter_end(struct lru_iter *it) 151 { 152 list_del(&it->list); 153 } 154 155 /* Predicate function type to be used with lru_iter_next */ 156 typedef bool (*iter_predicate)(struct lru_entry *le, void *context); 157 158 /* 159 * Advance the cursor to the next entry that passes the 160 * predicate, and return that entry. Returns NULL if the 161 * iteration is complete. 162 */ 163 static struct lru_entry *lru_iter_next(struct lru_iter *it, 164 iter_predicate pred, void *context) 165 { 166 struct lru_entry *e; 167 168 while (it->e) { 169 e = it->e; 170 171 /* advance the cursor */ 172 if (it->e == it->stop) 173 it->e = NULL; 174 else 175 it->e = to_le(it->e->list.next); 176 177 if (pred(e, context)) 178 return e; 179 } 180 181 return NULL; 182 } 183 184 /* 185 * Invalidate a specific lru_entry and update all cursors in 186 * the lru accordingly. 187 */ 188 static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e) 189 { 190 struct lru_iter *it; 191 192 list_for_each_entry(it, &lru->iterators, list) { 193 /* Move c->e forwards if necc. */ 194 if (it->e == e) { 195 it->e = to_le(it->e->list.next); 196 if (it->e == e) 197 it->e = NULL; 198 } 199 200 /* Move it->stop backwards if necc. */ 201 if (it->stop == e) { 202 it->stop = to_le(it->stop->list.prev); 203 if (it->stop == e) 204 it->stop = NULL; 205 } 206 } 207 } 208 209 /*--------------*/ 210 211 /* 212 * Remove a specific entry from the lru. 213 */ 214 static void lru_remove(struct lru *lru, struct lru_entry *le) 215 { 216 lru_iter_invalidate(lru, le); 217 if (lru->count == 1) { 218 lru->cursor = NULL; 219 } else { 220 if (lru->cursor == &le->list) 221 lru->cursor = lru->cursor->next; 222 list_del(&le->list); 223 } 224 lru->count--; 225 } 226 227 /* 228 * Mark as referenced. 229 */ 230 static inline void lru_reference(struct lru_entry *le) 231 { 232 atomic_set(&le->referenced, 1); 233 } 234 235 /*--------------*/ 236 237 /* 238 * Remove the least recently used entry (approx), that passes the predicate. 239 * Returns NULL on failure. 240 */ 241 enum evict_result { 242 ER_EVICT, 243 ER_DONT_EVICT, 244 ER_STOP, /* stop looking for something to evict */ 245 }; 246 247 typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context); 248 249 static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep) 250 { 251 unsigned long tested = 0; 252 struct list_head *h = lru->cursor; 253 struct lru_entry *le; 254 255 if (!h) 256 return NULL; 257 /* 258 * In the worst case we have to loop around twice. Once to clear 259 * the reference flags, and then again to discover the predicate 260 * fails for all entries. 261 */ 262 while (tested < lru->count) { 263 le = container_of(h, struct lru_entry, list); 264 265 if (atomic_read(&le->referenced)) { 266 atomic_set(&le->referenced, 0); 267 } else { 268 tested++; 269 switch (pred(le, context)) { 270 case ER_EVICT: 271 /* 272 * Adjust the cursor, so we start the next 273 * search from here. 274 */ 275 lru->cursor = le->list.next; 276 lru_remove(lru, le); 277 return le; 278 279 case ER_DONT_EVICT: 280 break; 281 282 case ER_STOP: 283 lru->cursor = le->list.next; 284 return NULL; 285 } 286 } 287 288 h = h->next; 289 290 if (!no_sleep) 291 cond_resched(); 292 } 293 294 return NULL; 295 } 296 297 /*--------------------------------------------------------------*/ 298 299 /* 300 * Buffer state bits. 301 */ 302 #define B_READING 0 303 #define B_WRITING 1 304 #define B_DIRTY 2 305 306 /* 307 * Describes how the block was allocated: 308 * kmem_cache_alloc(), __get_free_pages() or vmalloc(). 309 * See the comment at alloc_buffer_data. 310 */ 311 enum data_mode { 312 DATA_MODE_SLAB = 0, 313 DATA_MODE_KMALLOC = 1, 314 DATA_MODE_GET_FREE_PAGES = 2, 315 DATA_MODE_VMALLOC = 3, 316 DATA_MODE_LIMIT = 4 317 }; 318 319 struct dm_buffer { 320 /* protected by the locks in dm_buffer_cache */ 321 struct rb_node node; 322 323 /* immutable, so don't need protecting */ 324 sector_t block; 325 void *data; 326 unsigned char data_mode; /* DATA_MODE_* */ 327 328 /* 329 * These two fields are used in isolation, so do not need 330 * a surrounding lock. 331 */ 332 atomic_t hold_count; 333 unsigned long last_accessed; 334 335 /* 336 * Everything else is protected by the mutex in 337 * dm_bufio_client 338 */ 339 unsigned long state; 340 struct lru_entry lru; 341 unsigned char list_mode; /* LIST_* */ 342 blk_status_t read_error; 343 blk_status_t write_error; 344 unsigned int dirty_start; 345 unsigned int dirty_end; 346 unsigned int write_start; 347 unsigned int write_end; 348 struct list_head write_list; 349 struct dm_bufio_client *c; 350 void (*end_io)(struct dm_buffer *b, blk_status_t bs); 351 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 352 #define MAX_STACK 10 353 unsigned int stack_len; 354 unsigned long stack_entries[MAX_STACK]; 355 #endif 356 }; 357 358 /*--------------------------------------------------------------*/ 359 360 /* 361 * The buffer cache manages buffers, particularly: 362 * - inc/dec of holder count 363 * - setting the last_accessed field 364 * - maintains clean/dirty state along with lru 365 * - selecting buffers that match predicates 366 * 367 * It does *not* handle: 368 * - allocation/freeing of buffers. 369 * - IO 370 * - Eviction or cache sizing. 371 * 372 * cache_get() and cache_put_and_wake() are threadsafe, you do not need 373 * to protect these calls with a surrounding mutex. All the other 374 * methods are not threadsafe; they do use locking primitives, but 375 * only enough to ensure get/put are threadsafe. 376 */ 377 378 struct buffer_tree { 379 union { 380 struct rw_semaphore lock; 381 rwlock_t spinlock; 382 } u; 383 struct rb_root root; 384 } ____cacheline_aligned_in_smp; 385 386 struct dm_buffer_cache { 387 struct lru lru[LIST_SIZE]; 388 /* 389 * We spread entries across multiple trees to reduce contention 390 * on the locks. 391 */ 392 unsigned int num_locks; 393 bool no_sleep; 394 struct buffer_tree trees[]; 395 }; 396 397 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); 398 399 static inline unsigned int cache_index(sector_t block, unsigned int num_locks) 400 { 401 return dm_hash_locks_index(block, num_locks); 402 } 403 404 /* Get the buffer tree in the cache for the given block. Doesn't lock it. */ 405 static inline struct buffer_tree *cache_get_tree(struct dm_buffer_cache *bc, 406 sector_t block) 407 { 408 return &bc->trees[cache_index(block, bc->num_locks)]; 409 } 410 411 /* Lock the given buffer tree in the cache for reading. */ 412 static inline void cache_read_lock(struct dm_buffer_cache *bc, 413 struct buffer_tree *tree) 414 { 415 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) 416 read_lock_bh(&tree->u.spinlock); 417 else 418 down_read(&tree->u.lock); 419 } 420 421 /* Unlock the given buffer tree in the cache for reading. */ 422 static inline void cache_read_unlock(struct dm_buffer_cache *bc, 423 struct buffer_tree *tree) 424 { 425 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) 426 read_unlock_bh(&tree->u.spinlock); 427 else 428 up_read(&tree->u.lock); 429 } 430 431 /* Lock the given buffer tree in the cache for writing. */ 432 static inline void cache_write_lock(struct dm_buffer_cache *bc, 433 struct buffer_tree *tree) 434 { 435 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) 436 write_lock_bh(&tree->u.spinlock); 437 else 438 down_write(&tree->u.lock); 439 } 440 441 /* Unlock the given buffer tree in the cache for writing. */ 442 static inline void cache_write_unlock(struct dm_buffer_cache *bc, 443 struct buffer_tree *tree) 444 { 445 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) 446 write_unlock_bh(&tree->u.spinlock); 447 else 448 up_write(&tree->u.lock); 449 } 450 451 /* 452 * Sometimes we want to repeatedly get and drop locks as part of an iteration. 453 * This struct helps avoid redundant drop and gets of the same lock. 454 */ 455 struct lock_history { 456 struct dm_buffer_cache *cache; 457 bool write; 458 unsigned int previous; 459 unsigned int no_previous; 460 }; 461 462 static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write) 463 { 464 lh->cache = cache; 465 lh->write = write; 466 lh->no_previous = cache->num_locks; 467 lh->previous = lh->no_previous; 468 } 469 470 static void __lh_lock(struct lock_history *lh, unsigned int index) 471 { 472 if (lh->write) { 473 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) 474 write_lock_bh(&lh->cache->trees[index].u.spinlock); 475 else 476 down_write(&lh->cache->trees[index].u.lock); 477 } else { 478 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) 479 read_lock_bh(&lh->cache->trees[index].u.spinlock); 480 else 481 down_read(&lh->cache->trees[index].u.lock); 482 } 483 } 484 485 static void __lh_unlock(struct lock_history *lh, unsigned int index) 486 { 487 if (lh->write) { 488 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) 489 write_unlock_bh(&lh->cache->trees[index].u.spinlock); 490 else 491 up_write(&lh->cache->trees[index].u.lock); 492 } else { 493 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) 494 read_unlock_bh(&lh->cache->trees[index].u.spinlock); 495 else 496 up_read(&lh->cache->trees[index].u.lock); 497 } 498 } 499 500 /* 501 * Make sure you call this since it will unlock the final lock. 502 */ 503 static void lh_exit(struct lock_history *lh) 504 { 505 if (lh->previous != lh->no_previous) { 506 __lh_unlock(lh, lh->previous); 507 lh->previous = lh->no_previous; 508 } 509 } 510 511 /* 512 * Named 'next' because there is no corresponding 513 * 'up/unlock' call since it's done automatically. 514 */ 515 static void lh_next(struct lock_history *lh, sector_t b) 516 { 517 unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */ 518 519 if (lh->previous != lh->no_previous) { 520 if (lh->previous != index) { 521 __lh_unlock(lh, lh->previous); 522 __lh_lock(lh, index); 523 lh->previous = index; 524 } 525 } else { 526 __lh_lock(lh, index); 527 lh->previous = index; 528 } 529 } 530 531 static inline struct dm_buffer *le_to_buffer(struct lru_entry *le) 532 { 533 return container_of(le, struct dm_buffer, lru); 534 } 535 536 static struct dm_buffer *list_to_buffer(struct list_head *l) 537 { 538 struct lru_entry *le = list_entry(l, struct lru_entry, list); 539 540 return le_to_buffer(le); 541 } 542 543 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep) 544 { 545 unsigned int i; 546 547 bc->num_locks = num_locks; 548 bc->no_sleep = no_sleep; 549 550 for (i = 0; i < bc->num_locks; i++) { 551 if (no_sleep) 552 rwlock_init(&bc->trees[i].u.spinlock); 553 else 554 init_rwsem(&bc->trees[i].u.lock); 555 bc->trees[i].root = RB_ROOT; 556 } 557 558 lru_init(&bc->lru[LIST_CLEAN]); 559 lru_init(&bc->lru[LIST_DIRTY]); 560 } 561 562 static void cache_destroy(struct dm_buffer_cache *bc) 563 { 564 unsigned int i; 565 566 for (i = 0; i < bc->num_locks; i++) 567 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); 568 569 lru_destroy(&bc->lru[LIST_CLEAN]); 570 lru_destroy(&bc->lru[LIST_DIRTY]); 571 } 572 573 /*--------------*/ 574 575 /* 576 * not threadsafe, or racey depending how you look at it 577 */ 578 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) 579 { 580 return bc->lru[list_mode].count; 581 } 582 583 static inline unsigned long cache_total(struct dm_buffer_cache *bc) 584 { 585 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); 586 } 587 588 /*--------------*/ 589 590 /* 591 * Gets a specific buffer, indexed by block. 592 * If the buffer is found then its holder count will be incremented and 593 * lru_reference will be called. 594 * 595 * threadsafe 596 */ 597 static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block) 598 { 599 struct rb_node *n = root->rb_node; 600 struct dm_buffer *b; 601 602 while (n) { 603 b = container_of(n, struct dm_buffer, node); 604 605 if (b->block == block) 606 return b; 607 608 n = block < b->block ? n->rb_left : n->rb_right; 609 } 610 611 return NULL; 612 } 613 614 static void __cache_inc_buffer(struct dm_buffer *b) 615 { 616 atomic_inc(&b->hold_count); 617 WRITE_ONCE(b->last_accessed, jiffies); 618 } 619 620 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, 621 struct buffer_tree *tree, sector_t block) 622 { 623 struct dm_buffer *b; 624 625 /* Assuming tree == cache_get_tree(bc, block) */ 626 cache_read_lock(bc, tree); 627 b = __cache_get(&tree->root, block); 628 if (b) { 629 lru_reference(&b->lru); 630 __cache_inc_buffer(b); 631 } 632 cache_read_unlock(bc, tree); 633 634 return b; 635 } 636 637 /*--------------*/ 638 639 typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *); 640 641 /* 642 * Evicts a buffer based on a predicate. The oldest buffer that 643 * matches the predicate will be selected. In addition to the 644 * predicate the hold_count of the selected buffer will be zero. 645 */ 646 struct evict_wrapper { 647 struct lock_history *lh; 648 b_predicate pred; 649 void *context; 650 }; 651 652 /* 653 * Wraps the buffer predicate turning it into an lru predicate. Adds 654 * extra test for hold_count. 655 */ 656 static enum evict_result __evict_pred(struct lru_entry *le, void *context) 657 { 658 struct evict_wrapper *w = context; 659 struct dm_buffer *b = le_to_buffer(le); 660 661 lh_next(w->lh, b->block); 662 663 if (atomic_read(&b->hold_count)) 664 return ER_DONT_EVICT; 665 666 return w->pred(b, w->context); 667 } 668 669 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, 670 b_predicate pred, void *context, 671 struct lock_history *lh) 672 { 673 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; 674 struct lru_entry *le; 675 struct dm_buffer *b; 676 677 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep); 678 if (!le) 679 return NULL; 680 681 b = le_to_buffer(le); 682 /* __evict_pred will have locked the appropriate tree. */ 683 rb_erase(&b->node, &cache_get_tree(bc, b->block)->root); 684 685 return b; 686 } 687 688 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, 689 b_predicate pred, void *context) 690 { 691 struct dm_buffer *b; 692 struct lock_history lh; 693 694 lh_init(&lh, bc, true); 695 b = __cache_evict(bc, list_mode, pred, context, &lh); 696 lh_exit(&lh); 697 698 return b; 699 } 700 701 /*--------------*/ 702 703 /* 704 * Mark a buffer as clean or dirty. Not threadsafe. 705 */ 706 static void cache_mark(struct dm_buffer_cache *bc, struct buffer_tree *tree, 707 struct dm_buffer *b, int list_mode) 708 { 709 /* Assuming tree == cache_get_tree(bc, b->block) */ 710 cache_write_lock(bc, tree); 711 if (list_mode != b->list_mode) { 712 lru_remove(&bc->lru[b->list_mode], &b->lru); 713 b->list_mode = list_mode; 714 lru_insert(&bc->lru[b->list_mode], &b->lru); 715 } 716 cache_write_unlock(bc, tree); 717 } 718 719 /*--------------*/ 720 721 /* 722 * Runs through the lru associated with 'old_mode', if the predicate matches then 723 * it moves them to 'new_mode'. Not threadsafe. 724 */ 725 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, 726 b_predicate pred, void *context, struct lock_history *lh) 727 { 728 struct lru_entry *le; 729 struct dm_buffer *b; 730 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; 731 732 while (true) { 733 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep); 734 if (!le) 735 break; 736 737 b = le_to_buffer(le); 738 b->list_mode = new_mode; 739 lru_insert(&bc->lru[b->list_mode], &b->lru); 740 } 741 } 742 743 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, 744 b_predicate pred, void *context) 745 { 746 struct lock_history lh; 747 748 lh_init(&lh, bc, true); 749 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); 750 lh_exit(&lh); 751 } 752 753 /*--------------*/ 754 755 /* 756 * Iterates through all clean or dirty entries calling a function for each 757 * entry. The callback may terminate the iteration early. Not threadsafe. 758 */ 759 760 /* 761 * Iterator functions should return one of these actions to indicate 762 * how the iteration should proceed. 763 */ 764 enum it_action { 765 IT_NEXT, 766 IT_COMPLETE, 767 }; 768 769 typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context); 770 771 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode, 772 iter_fn fn, void *context, struct lock_history *lh) 773 { 774 struct lru *lru = &bc->lru[list_mode]; 775 struct lru_entry *le, *first; 776 777 if (!lru->cursor) 778 return; 779 780 first = le = to_le(lru->cursor); 781 do { 782 struct dm_buffer *b = le_to_buffer(le); 783 784 lh_next(lh, b->block); 785 786 switch (fn(b, context)) { 787 case IT_NEXT: 788 break; 789 790 case IT_COMPLETE: 791 return; 792 } 793 cond_resched(); 794 795 le = to_le(le->list.next); 796 } while (le != first); 797 } 798 799 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, 800 iter_fn fn, void *context) 801 { 802 struct lock_history lh; 803 804 lh_init(&lh, bc, false); 805 __cache_iterate(bc, list_mode, fn, context, &lh); 806 lh_exit(&lh); 807 } 808 809 /*--------------*/ 810 811 /* 812 * Passes ownership of the buffer to the cache. Returns false if the 813 * buffer was already present (in which case ownership does not pass). 814 * eg, a race with another thread. 815 * 816 * Holder count should be 1 on insertion. 817 * 818 * Not threadsafe. 819 */ 820 static bool __cache_insert(struct rb_root *root, struct dm_buffer *b) 821 { 822 struct rb_node **new = &root->rb_node, *parent = NULL; 823 struct dm_buffer *found; 824 825 while (*new) { 826 found = container_of(*new, struct dm_buffer, node); 827 828 if (found->block == b->block) 829 return false; 830 831 parent = *new; 832 new = b->block < found->block ? 833 &found->node.rb_left : &found->node.rb_right; 834 } 835 836 rb_link_node(&b->node, parent, new); 837 rb_insert_color(&b->node, root); 838 839 return true; 840 } 841 842 static bool cache_insert(struct dm_buffer_cache *bc, struct buffer_tree *tree, 843 struct dm_buffer *b) 844 { 845 bool r; 846 847 if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE)) 848 return false; 849 850 /* Assuming tree == cache_get_tree(bc, b->block) */ 851 cache_write_lock(bc, tree); 852 BUG_ON(atomic_read(&b->hold_count) != 1); 853 r = __cache_insert(&tree->root, b); 854 if (r) 855 lru_insert(&bc->lru[b->list_mode], &b->lru); 856 cache_write_unlock(bc, tree); 857 858 return r; 859 } 860 861 /*--------------*/ 862 863 /* 864 * Removes buffer from cache, ownership of the buffer passes back to the caller. 865 * Fails if the hold_count is not one (ie. the caller holds the only reference). 866 * 867 * Not threadsafe. 868 */ 869 static bool cache_remove(struct dm_buffer_cache *bc, struct buffer_tree *tree, 870 struct dm_buffer *b) 871 { 872 bool r; 873 874 /* Assuming tree == cache_get_tree(bc, b->block) */ 875 cache_write_lock(bc, tree); 876 877 if (atomic_read(&b->hold_count) != 1) { 878 r = false; 879 } else { 880 r = true; 881 rb_erase(&b->node, &tree->root); 882 lru_remove(&bc->lru[b->list_mode], &b->lru); 883 } 884 885 cache_write_unlock(bc, tree); 886 887 return r; 888 } 889 890 /*--------------*/ 891 892 typedef void (*b_release)(struct dm_buffer *); 893 894 static struct dm_buffer *__find_next(struct rb_root *root, sector_t block) 895 { 896 struct rb_node *n = root->rb_node; 897 struct dm_buffer *b; 898 struct dm_buffer *best = NULL; 899 900 while (n) { 901 b = container_of(n, struct dm_buffer, node); 902 903 if (b->block == block) 904 return b; 905 906 if (block <= b->block) { 907 n = n->rb_left; 908 best = b; 909 } else { 910 n = n->rb_right; 911 } 912 } 913 914 return best; 915 } 916 917 static void __remove_range(struct dm_buffer_cache *bc, 918 struct rb_root *root, 919 sector_t begin, sector_t end, 920 b_predicate pred, b_release release) 921 { 922 struct dm_buffer *b; 923 924 while (true) { 925 cond_resched(); 926 927 b = __find_next(root, begin); 928 if (!b || (b->block >= end)) 929 break; 930 931 begin = b->block + 1; 932 933 if (atomic_read(&b->hold_count)) 934 continue; 935 936 if (pred(b, NULL) == ER_EVICT) { 937 rb_erase(&b->node, root); 938 lru_remove(&bc->lru[b->list_mode], &b->lru); 939 release(b); 940 } 941 } 942 } 943 944 static void cache_remove_range(struct dm_buffer_cache *bc, 945 sector_t begin, sector_t end, 946 b_predicate pred, b_release release) 947 { 948 unsigned int i; 949 950 BUG_ON(bc->no_sleep); 951 for (i = 0; i < bc->num_locks; i++) { 952 down_write(&bc->trees[i].u.lock); 953 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); 954 up_write(&bc->trees[i].u.lock); 955 } 956 } 957 958 /*----------------------------------------------------------------*/ 959 960 /* 961 * Linking of buffers: 962 * All buffers are linked to buffer_cache with their node field. 963 * 964 * Clean buffers that are not being written (B_WRITING not set) 965 * are linked to lru[LIST_CLEAN] with their lru_list field. 966 * 967 * Dirty and clean buffers that are being written are linked to 968 * lru[LIST_DIRTY] with their lru_list field. When the write 969 * finishes, the buffer cannot be relinked immediately (because we 970 * are in an interrupt context and relinking requires process 971 * context), so some clean-not-writing buffers can be held on 972 * dirty_lru too. They are later added to lru in the process 973 * context. 974 */ 975 struct dm_bufio_client { 976 struct block_device *bdev; 977 unsigned int block_size; 978 s8 sectors_per_block_bits; 979 980 bool no_sleep; 981 struct mutex lock; 982 spinlock_t spinlock; 983 984 int async_write_error; 985 986 void (*alloc_callback)(struct dm_buffer *buf); 987 void (*write_callback)(struct dm_buffer *buf); 988 struct kmem_cache *slab_buffer; 989 struct kmem_cache *slab_cache; 990 struct dm_io_client *dm_io; 991 992 struct list_head reserved_buffers; 993 unsigned int need_reserved_buffers; 994 995 unsigned int minimum_buffers; 996 997 sector_t start; 998 999 struct shrinker *shrinker; 1000 struct work_struct shrink_work; 1001 atomic_long_t need_shrink; 1002 1003 wait_queue_head_t free_buffer_wait; 1004 1005 struct list_head client_list; 1006 1007 /* 1008 * Used by global_cleanup to sort the clients list. 1009 */ 1010 unsigned long oldest_buffer; 1011 1012 struct dm_buffer_cache cache; /* must be last member */ 1013 }; 1014 1015 /*----------------------------------------------------------------*/ 1016 1017 #define dm_bufio_in_request() (!!current->bio_list) 1018 1019 static void dm_bufio_lock(struct dm_bufio_client *c) 1020 { 1021 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) 1022 spin_lock_bh(&c->spinlock); 1023 else 1024 mutex_lock_nested(&c->lock, dm_bufio_in_request()); 1025 } 1026 1027 static void dm_bufio_unlock(struct dm_bufio_client *c) 1028 { 1029 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) 1030 spin_unlock_bh(&c->spinlock); 1031 else 1032 mutex_unlock(&c->lock); 1033 } 1034 1035 /*----------------------------------------------------------------*/ 1036 1037 /* 1038 * Default cache size: available memory divided by the ratio. 1039 */ 1040 static unsigned long dm_bufio_default_cache_size; 1041 1042 /* 1043 * Total cache size set by the user. 1044 */ 1045 static unsigned long dm_bufio_cache_size; 1046 1047 /* 1048 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change 1049 * at any time. If it disagrees, the user has changed cache size. 1050 */ 1051 static unsigned long dm_bufio_cache_size_latch; 1052 1053 static DEFINE_SPINLOCK(global_spinlock); 1054 1055 static unsigned int dm_bufio_max_age; /* No longer does anything */ 1056 1057 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; 1058 1059 static unsigned long dm_bufio_peak_allocated; 1060 static unsigned long dm_bufio_allocated_kmem_cache; 1061 static unsigned long dm_bufio_allocated_kmalloc; 1062 static unsigned long dm_bufio_allocated_get_free_pages; 1063 static unsigned long dm_bufio_allocated_vmalloc; 1064 static unsigned long dm_bufio_current_allocated; 1065 1066 /*----------------------------------------------------------------*/ 1067 1068 /* 1069 * The current number of clients. 1070 */ 1071 static int dm_bufio_client_count; 1072 1073 /* 1074 * The list of all clients. 1075 */ 1076 static LIST_HEAD(dm_bufio_all_clients); 1077 1078 /* 1079 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count 1080 */ 1081 static DEFINE_MUTEX(dm_bufio_clients_lock); 1082 1083 static struct workqueue_struct *dm_bufio_wq; 1084 static struct work_struct dm_bufio_replacement_work; 1085 1086 1087 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 1088 static void buffer_record_stack(struct dm_buffer *b) 1089 { 1090 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); 1091 } 1092 #endif 1093 1094 /*----------------------------------------------------------------*/ 1095 1096 static void adjust_total_allocated(struct dm_buffer *b, bool unlink) 1097 { 1098 unsigned char data_mode; 1099 long diff; 1100 1101 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = { 1102 &dm_bufio_allocated_kmem_cache, 1103 &dm_bufio_allocated_kmalloc, 1104 &dm_bufio_allocated_get_free_pages, 1105 &dm_bufio_allocated_vmalloc, 1106 }; 1107 1108 data_mode = b->data_mode; 1109 diff = (long)b->c->block_size; 1110 if (unlink) 1111 diff = -diff; 1112 1113 spin_lock(&global_spinlock); 1114 1115 *class_ptr[data_mode] += diff; 1116 1117 dm_bufio_current_allocated += diff; 1118 1119 if (dm_bufio_current_allocated > dm_bufio_peak_allocated) 1120 dm_bufio_peak_allocated = dm_bufio_current_allocated; 1121 1122 if (!unlink) { 1123 if (dm_bufio_current_allocated > dm_bufio_cache_size) 1124 queue_work(dm_bufio_wq, &dm_bufio_replacement_work); 1125 } 1126 1127 spin_unlock(&global_spinlock); 1128 } 1129 1130 /* 1131 * Change the number of clients and recalculate per-client limit. 1132 */ 1133 static void __cache_size_refresh(void) 1134 { 1135 if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock))) 1136 return; 1137 if (WARN_ON(dm_bufio_client_count < 0)) 1138 return; 1139 1140 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); 1141 1142 /* 1143 * Use default if set to 0 and report the actual cache size used. 1144 */ 1145 if (!dm_bufio_cache_size_latch) { 1146 (void)cmpxchg(&dm_bufio_cache_size, 0, 1147 dm_bufio_default_cache_size); 1148 dm_bufio_cache_size_latch = dm_bufio_default_cache_size; 1149 } 1150 } 1151 1152 /* 1153 * Allocating buffer data. 1154 * 1155 * Small buffers are allocated with kmem_cache, to use space optimally. 1156 * 1157 * For large buffers, we choose between get_free_pages and vmalloc. 1158 * Each has advantages and disadvantages. 1159 * 1160 * __get_free_pages can randomly fail if the memory is fragmented. 1161 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be 1162 * as low as 128M) so using it for caching is not appropriate. 1163 * 1164 * If the allocation may fail we use __get_free_pages. Memory fragmentation 1165 * won't have a fatal effect here, but it just causes flushes of some other 1166 * buffers and more I/O will be performed. Don't use __get_free_pages if it 1167 * always fails (i.e. order > MAX_PAGE_ORDER). 1168 * 1169 * If the allocation shouldn't fail we use __vmalloc. This is only for the 1170 * initial reserve allocation, so there's no risk of wasting all vmalloc 1171 * space. 1172 */ 1173 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, 1174 unsigned char *data_mode) 1175 { 1176 if (unlikely(c->slab_cache != NULL)) { 1177 *data_mode = DATA_MODE_SLAB; 1178 return kmem_cache_alloc(c->slab_cache, gfp_mask); 1179 } 1180 1181 if (unlikely(c->block_size < PAGE_SIZE)) { 1182 *data_mode = DATA_MODE_KMALLOC; 1183 return kmalloc(c->block_size, gfp_mask | __GFP_RECLAIMABLE); 1184 } 1185 1186 if (c->block_size <= KMALLOC_MAX_SIZE && 1187 gfp_mask & __GFP_NORETRY) { 1188 *data_mode = DATA_MODE_GET_FREE_PAGES; 1189 return (void *)__get_free_pages(gfp_mask, 1190 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); 1191 } 1192 1193 *data_mode = DATA_MODE_VMALLOC; 1194 1195 return __vmalloc(c->block_size, gfp_mask); 1196 } 1197 1198 /* 1199 * Free buffer's data. 1200 */ 1201 static void free_buffer_data(struct dm_bufio_client *c, 1202 void *data, unsigned char data_mode) 1203 { 1204 switch (data_mode) { 1205 case DATA_MODE_SLAB: 1206 kmem_cache_free(c->slab_cache, data); 1207 break; 1208 1209 case DATA_MODE_KMALLOC: 1210 kfree(data); 1211 break; 1212 1213 case DATA_MODE_GET_FREE_PAGES: 1214 free_pages((unsigned long)data, 1215 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); 1216 break; 1217 1218 case DATA_MODE_VMALLOC: 1219 vfree(data); 1220 break; 1221 1222 default: 1223 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d", 1224 data_mode); 1225 BUG(); 1226 } 1227 } 1228 1229 /* 1230 * Allocate buffer and its data. 1231 */ 1232 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) 1233 { 1234 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); 1235 1236 if (!b) 1237 return NULL; 1238 1239 b->c = c; 1240 1241 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); 1242 if (!b->data) { 1243 kmem_cache_free(c->slab_buffer, b); 1244 return NULL; 1245 } 1246 adjust_total_allocated(b, false); 1247 1248 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 1249 b->stack_len = 0; 1250 #endif 1251 return b; 1252 } 1253 1254 /* 1255 * Free buffer and its data. 1256 */ 1257 static void free_buffer(struct dm_buffer *b) 1258 { 1259 struct dm_bufio_client *c = b->c; 1260 1261 adjust_total_allocated(b, true); 1262 free_buffer_data(c, b->data, b->data_mode); 1263 kmem_cache_free(c->slab_buffer, b); 1264 } 1265 1266 /* 1267 *-------------------------------------------------------------------------- 1268 * Submit I/O on the buffer. 1269 * 1270 * Bio interface is faster but it has some problems: 1271 * the vector list is limited (increasing this limit increases 1272 * memory-consumption per buffer, so it is not viable); 1273 * 1274 * the memory must be direct-mapped, not vmalloced; 1275 * 1276 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and 1277 * it is not vmalloced, try using the bio interface. 1278 * 1279 * If the buffer is big, if it is vmalloced or if the underlying device 1280 * rejects the bio because it is too large, use dm-io layer to do the I/O. 1281 * The dm-io layer splits the I/O into multiple requests, avoiding the above 1282 * shortcomings. 1283 *-------------------------------------------------------------------------- 1284 */ 1285 1286 /* 1287 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending 1288 * that the request was handled directly with bio interface. 1289 */ 1290 static void dmio_complete(unsigned long error, void *context) 1291 { 1292 struct dm_buffer *b = context; 1293 1294 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); 1295 } 1296 1297 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, 1298 unsigned int n_sectors, unsigned int offset, 1299 unsigned short ioprio) 1300 { 1301 int r; 1302 struct dm_io_request io_req = { 1303 .bi_opf = op, 1304 .notify.fn = dmio_complete, 1305 .notify.context = b, 1306 .client = b->c->dm_io, 1307 }; 1308 struct dm_io_region region = { 1309 .bdev = b->c->bdev, 1310 .sector = sector, 1311 .count = n_sectors, 1312 }; 1313 1314 if (b->data_mode != DATA_MODE_VMALLOC) { 1315 io_req.mem.type = DM_IO_KMEM; 1316 io_req.mem.ptr.addr = (char *)b->data + offset; 1317 } else { 1318 io_req.mem.type = DM_IO_VMA; 1319 io_req.mem.ptr.vma = (char *)b->data + offset; 1320 } 1321 1322 r = dm_io(&io_req, 1, ®ion, NULL, ioprio); 1323 if (unlikely(r)) 1324 b->end_io(b, errno_to_blk_status(r)); 1325 } 1326 1327 static void bio_complete(struct bio *bio) 1328 { 1329 struct dm_buffer *b = bio->bi_private; 1330 blk_status_t status = bio->bi_status; 1331 1332 bio_uninit(bio); 1333 kfree(bio); 1334 b->end_io(b, status); 1335 } 1336 1337 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, 1338 unsigned int n_sectors, unsigned int offset, 1339 unsigned short ioprio) 1340 { 1341 struct bio *bio; 1342 char *ptr; 1343 unsigned int len; 1344 1345 bio = bio_kmalloc(1, GFP_NOWAIT); 1346 if (!bio) { 1347 use_dmio(b, op, sector, n_sectors, offset, ioprio); 1348 return; 1349 } 1350 bio_init_inline(bio, b->c->bdev, 1, op); 1351 bio->bi_iter.bi_sector = sector; 1352 bio->bi_end_io = bio_complete; 1353 bio->bi_private = b; 1354 bio->bi_ioprio = ioprio; 1355 1356 ptr = (char *)b->data + offset; 1357 len = n_sectors << SECTOR_SHIFT; 1358 1359 bio_add_virt_nofail(bio, ptr, len); 1360 1361 submit_bio(bio); 1362 } 1363 1364 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) 1365 { 1366 sector_t sector; 1367 1368 if (likely(c->sectors_per_block_bits >= 0)) 1369 sector = block << c->sectors_per_block_bits; 1370 else 1371 sector = block * (c->block_size >> SECTOR_SHIFT); 1372 sector += c->start; 1373 1374 return sector; 1375 } 1376 1377 static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio, 1378 void (*end_io)(struct dm_buffer *, blk_status_t)) 1379 { 1380 unsigned int n_sectors; 1381 sector_t sector; 1382 unsigned int offset, end, align; 1383 1384 b->end_io = end_io; 1385 1386 sector = block_to_sector(b->c, b->block); 1387 1388 if (op != REQ_OP_WRITE) { 1389 n_sectors = b->c->block_size >> SECTOR_SHIFT; 1390 offset = 0; 1391 } else { 1392 if (b->c->write_callback) 1393 b->c->write_callback(b); 1394 offset = b->write_start; 1395 end = b->write_end; 1396 align = max(DM_BUFIO_WRITE_ALIGN, 1397 bdev_physical_block_size(b->c->bdev)); 1398 offset &= -align; 1399 end += align - 1; 1400 end &= -align; 1401 if (unlikely(end > b->c->block_size)) 1402 end = b->c->block_size; 1403 1404 sector += offset >> SECTOR_SHIFT; 1405 n_sectors = (end - offset) >> SECTOR_SHIFT; 1406 } 1407 1408 if (b->data_mode != DATA_MODE_VMALLOC) 1409 use_bio(b, op, sector, n_sectors, offset, ioprio); 1410 else 1411 use_dmio(b, op, sector, n_sectors, offset, ioprio); 1412 } 1413 1414 /* 1415 *-------------------------------------------------------------- 1416 * Writing dirty buffers 1417 *-------------------------------------------------------------- 1418 */ 1419 1420 /* 1421 * The endio routine for write. 1422 * 1423 * Set the error, clear B_WRITING bit and wake anyone who was waiting on 1424 * it. 1425 */ 1426 static void write_endio(struct dm_buffer *b, blk_status_t status) 1427 { 1428 b->write_error = status; 1429 if (unlikely(status)) { 1430 struct dm_bufio_client *c = b->c; 1431 1432 (void)cmpxchg(&c->async_write_error, 0, 1433 blk_status_to_errno(status)); 1434 } 1435 1436 BUG_ON(!test_bit(B_WRITING, &b->state)); 1437 1438 smp_mb__before_atomic(); 1439 clear_bit(B_WRITING, &b->state); 1440 smp_mb__after_atomic(); 1441 1442 wake_up_bit(&b->state, B_WRITING); 1443 } 1444 1445 /* 1446 * Initiate a write on a dirty buffer, but don't wait for it. 1447 * 1448 * - If the buffer is not dirty, exit. 1449 * - If there some previous write going on, wait for it to finish (we can't 1450 * have two writes on the same buffer simultaneously). 1451 * - Submit our write and don't wait on it. We set B_WRITING indicating 1452 * that there is a write in progress. 1453 */ 1454 static void __write_dirty_buffer(struct dm_buffer *b, 1455 struct list_head *write_list) 1456 { 1457 if (!test_bit(B_DIRTY, &b->state)) 1458 return; 1459 1460 clear_bit(B_DIRTY, &b->state); 1461 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); 1462 1463 b->write_start = b->dirty_start; 1464 b->write_end = b->dirty_end; 1465 1466 if (!write_list) 1467 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio); 1468 else 1469 list_add_tail(&b->write_list, write_list); 1470 } 1471 1472 static void __flush_write_list(struct list_head *write_list) 1473 { 1474 struct blk_plug plug; 1475 1476 blk_start_plug(&plug); 1477 while (!list_empty(write_list)) { 1478 struct dm_buffer *b = 1479 list_entry(write_list->next, struct dm_buffer, write_list); 1480 list_del(&b->write_list); 1481 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio); 1482 cond_resched(); 1483 } 1484 blk_finish_plug(&plug); 1485 } 1486 1487 /* 1488 * Wait until any activity on the buffer finishes. Possibly write the 1489 * buffer if it is dirty. When this function finishes, there is no I/O 1490 * running on the buffer and the buffer is not dirty. 1491 */ 1492 static void __make_buffer_clean(struct dm_buffer *b) 1493 { 1494 BUG_ON(atomic_read(&b->hold_count)); 1495 1496 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */ 1497 if (!smp_load_acquire(&b->state)) /* fast case */ 1498 return; 1499 1500 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); 1501 __write_dirty_buffer(b, NULL); 1502 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); 1503 } 1504 1505 static enum evict_result is_clean(struct dm_buffer *b, void *context) 1506 { 1507 struct dm_bufio_client *c = context; 1508 1509 /* These should never happen */ 1510 if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state))) 1511 return ER_DONT_EVICT; 1512 if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state))) 1513 return ER_DONT_EVICT; 1514 if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN)) 1515 return ER_DONT_EVICT; 1516 1517 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && 1518 unlikely(test_bit(B_READING, &b->state))) 1519 return ER_DONT_EVICT; 1520 1521 return ER_EVICT; 1522 } 1523 1524 static enum evict_result is_dirty(struct dm_buffer *b, void *context) 1525 { 1526 /* These should never happen */ 1527 if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) 1528 return ER_DONT_EVICT; 1529 if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY)) 1530 return ER_DONT_EVICT; 1531 1532 return ER_EVICT; 1533 } 1534 1535 /* 1536 * Find some buffer that is not held by anybody, clean it, unlink it and 1537 * return it. 1538 */ 1539 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) 1540 { 1541 struct dm_buffer *b; 1542 1543 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); 1544 if (b) { 1545 /* this also waits for pending reads */ 1546 __make_buffer_clean(b); 1547 return b; 1548 } 1549 1550 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) 1551 return NULL; 1552 1553 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); 1554 if (b) { 1555 __make_buffer_clean(b); 1556 return b; 1557 } 1558 1559 return NULL; 1560 } 1561 1562 /* 1563 * Wait until some other threads free some buffer or release hold count on 1564 * some buffer. 1565 * 1566 * This function is entered with c->lock held, drops it and regains it 1567 * before exiting. 1568 */ 1569 static void __wait_for_free_buffer(struct dm_bufio_client *c) 1570 { 1571 DECLARE_WAITQUEUE(wait, current); 1572 1573 add_wait_queue(&c->free_buffer_wait, &wait); 1574 set_current_state(TASK_UNINTERRUPTIBLE); 1575 dm_bufio_unlock(c); 1576 1577 /* 1578 * It's possible to miss a wake up event since we don't always 1579 * hold c->lock when wake_up is called. So we have a timeout here, 1580 * just in case. 1581 */ 1582 io_schedule_timeout(5 * HZ); 1583 1584 remove_wait_queue(&c->free_buffer_wait, &wait); 1585 1586 dm_bufio_lock(c); 1587 } 1588 1589 enum new_flag { 1590 NF_FRESH = 0, 1591 NF_READ = 1, 1592 NF_GET = 2, 1593 NF_PREFETCH = 3 1594 }; 1595 1596 /* 1597 * Allocate a new buffer. If the allocation is not possible, wait until 1598 * some other thread frees a buffer. 1599 * 1600 * May drop the lock and regain it. 1601 */ 1602 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) 1603 { 1604 struct dm_buffer *b; 1605 bool tried_noio_alloc = false; 1606 1607 /* 1608 * dm-bufio is resistant to allocation failures (it just keeps 1609 * one buffer reserved in cases all the allocations fail). 1610 * So set flags to not try too hard: 1611 * GFP_NOWAIT: don't wait and don't print a warning in case of 1612 * failure; if we need to sleep we'll release our mutex 1613 * and wait ourselves. 1614 * __GFP_NORETRY: don't retry and rather return failure 1615 * __GFP_NOMEMALLOC: don't use emergency reserves 1616 * 1617 * For debugging, if we set the cache size to 1, no new buffers will 1618 * be allocated. 1619 */ 1620 while (1) { 1621 if (dm_bufio_cache_size_latch != 1) { 1622 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC); 1623 if (b) 1624 return b; 1625 } 1626 1627 if (nf == NF_PREFETCH) 1628 return NULL; 1629 1630 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) { 1631 dm_bufio_unlock(c); 1632 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 1633 dm_bufio_lock(c); 1634 if (b) 1635 return b; 1636 tried_noio_alloc = true; 1637 } 1638 1639 if (!list_empty(&c->reserved_buffers)) { 1640 b = list_to_buffer(c->reserved_buffers.next); 1641 list_del(&b->lru.list); 1642 c->need_reserved_buffers++; 1643 1644 return b; 1645 } 1646 1647 b = __get_unclaimed_buffer(c); 1648 if (b) 1649 return b; 1650 1651 __wait_for_free_buffer(c); 1652 } 1653 } 1654 1655 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) 1656 { 1657 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); 1658 1659 if (!b) 1660 return NULL; 1661 1662 if (c->alloc_callback) 1663 c->alloc_callback(b); 1664 1665 return b; 1666 } 1667 1668 /* 1669 * Free a buffer and wake other threads waiting for free buffers. 1670 */ 1671 static void __free_buffer_wake(struct dm_buffer *b) 1672 { 1673 struct dm_bufio_client *c = b->c; 1674 1675 b->block = -1; 1676 if (!c->need_reserved_buffers) 1677 free_buffer(b); 1678 else { 1679 list_add(&b->lru.list, &c->reserved_buffers); 1680 c->need_reserved_buffers--; 1681 } 1682 1683 /* 1684 * We hold the bufio lock here, so no one can add entries to the 1685 * wait queue anyway. 1686 */ 1687 if (unlikely(waitqueue_active(&c->free_buffer_wait))) 1688 wake_up(&c->free_buffer_wait); 1689 } 1690 1691 static enum evict_result cleaned(struct dm_buffer *b, void *context) 1692 { 1693 if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) 1694 return ER_DONT_EVICT; /* should never happen */ 1695 1696 if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)) 1697 return ER_DONT_EVICT; 1698 else 1699 return ER_EVICT; 1700 } 1701 1702 static void __move_clean_buffers(struct dm_bufio_client *c) 1703 { 1704 cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL); 1705 } 1706 1707 struct write_context { 1708 int no_wait; 1709 struct list_head *write_list; 1710 }; 1711 1712 static enum it_action write_one(struct dm_buffer *b, void *context) 1713 { 1714 struct write_context *wc = context; 1715 1716 if (wc->no_wait && test_bit(B_WRITING, &b->state)) 1717 return IT_COMPLETE; 1718 1719 __write_dirty_buffer(b, wc->write_list); 1720 return IT_NEXT; 1721 } 1722 1723 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, 1724 struct list_head *write_list) 1725 { 1726 struct write_context wc = {.no_wait = no_wait, .write_list = write_list}; 1727 1728 __move_clean_buffers(c); 1729 cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc); 1730 } 1731 1732 /* 1733 * Check if we're over watermark. 1734 * If we are over threshold_buffers, start freeing buffers. 1735 * If we're over "limit_buffers", block until we get under the limit. 1736 */ 1737 static void __check_watermark(struct dm_bufio_client *c, 1738 struct list_head *write_list) 1739 { 1740 if (cache_count(&c->cache, LIST_DIRTY) > 1741 cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) 1742 __write_dirty_buffers_async(c, 1, write_list); 1743 } 1744 1745 /* 1746 *-------------------------------------------------------------- 1747 * Getting a buffer 1748 *-------------------------------------------------------------- 1749 */ 1750 1751 static void cache_put_and_wake(struct dm_bufio_client *c, 1752 struct buffer_tree *tree, struct dm_buffer *b) 1753 { 1754 bool wake; 1755 1756 /* Assuming tree == cache_get_tree(&c->cache, b->block) */ 1757 cache_read_lock(&c->cache, tree); 1758 BUG_ON(!atomic_read(&b->hold_count)); 1759 wake = atomic_dec_and_test(&b->hold_count); 1760 cache_read_unlock(&c->cache, tree); 1761 1762 /* 1763 * Relying on waitqueue_active() is racey, but we sleep 1764 * with schedule_timeout anyway. 1765 */ 1766 if (wake && unlikely(waitqueue_active(&c->free_buffer_wait))) 1767 wake_up(&c->free_buffer_wait); 1768 } 1769 1770 /* 1771 * This assumes you have already checked the cache to see if the buffer 1772 * is already present (it will recheck after dropping the lock for allocation). 1773 */ 1774 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, 1775 struct buffer_tree *tree, sector_t block, 1776 enum new_flag nf, int *need_submit, 1777 struct list_head *write_list) 1778 { 1779 struct dm_buffer *b, *new_b = NULL; 1780 1781 *need_submit = 0; 1782 1783 /* This can't be called with NF_GET */ 1784 if (WARN_ON_ONCE(nf == NF_GET)) 1785 return NULL; 1786 1787 new_b = __alloc_buffer_wait(c, nf); 1788 if (!new_b) 1789 return NULL; 1790 1791 /* 1792 * We've had a period where the mutex was unlocked, so need to 1793 * recheck the buffer tree. 1794 */ 1795 b = cache_get(&c->cache, tree, block); 1796 if (b) { 1797 __free_buffer_wake(new_b); 1798 goto found_buffer; 1799 } 1800 1801 __check_watermark(c, write_list); 1802 1803 b = new_b; 1804 atomic_set(&b->hold_count, 1); 1805 WRITE_ONCE(b->last_accessed, jiffies); 1806 b->block = block; 1807 b->read_error = 0; 1808 b->write_error = 0; 1809 b->list_mode = LIST_CLEAN; 1810 1811 if (nf == NF_FRESH) 1812 b->state = 0; 1813 else { 1814 b->state = 1 << B_READING; 1815 *need_submit = 1; 1816 } 1817 1818 /* 1819 * We mustn't insert into the cache until the B_READING state 1820 * is set. Otherwise another thread could get it and use 1821 * it before it had been read. 1822 */ 1823 cache_insert(&c->cache, tree, b); 1824 1825 return b; 1826 1827 found_buffer: 1828 if (nf == NF_PREFETCH) { 1829 cache_put_and_wake(c, tree, b); 1830 return NULL; 1831 } 1832 1833 /* 1834 * Note: it is essential that we don't wait for the buffer to be 1835 * read if dm_bufio_get function is used. Both dm_bufio_get and 1836 * dm_bufio_prefetch can be used in the driver request routine. 1837 * If the user called both dm_bufio_prefetch and dm_bufio_get on 1838 * the same buffer, it would deadlock if we waited. 1839 */ 1840 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { 1841 cache_put_and_wake(c, tree, b); 1842 return NULL; 1843 } 1844 1845 return b; 1846 } 1847 1848 /* 1849 * The endio routine for reading: set the error, clear the bit and wake up 1850 * anyone waiting on the buffer. 1851 */ 1852 static void read_endio(struct dm_buffer *b, blk_status_t status) 1853 { 1854 b->read_error = status; 1855 1856 BUG_ON(!test_bit(B_READING, &b->state)); 1857 1858 smp_mb__before_atomic(); 1859 clear_bit(B_READING, &b->state); 1860 smp_mb__after_atomic(); 1861 1862 wake_up_bit(&b->state, B_READING); 1863 } 1864 1865 /* 1866 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these 1867 * functions is similar except that dm_bufio_new doesn't read the 1868 * buffer from the disk (assuming that the caller overwrites all the data 1869 * and uses dm_bufio_mark_buffer_dirty to write new data back). 1870 */ 1871 static void *new_read(struct dm_bufio_client *c, sector_t block, 1872 enum new_flag nf, struct dm_buffer **bp, 1873 unsigned short ioprio) 1874 { 1875 struct buffer_tree *tree; 1876 int need_submit = 0; 1877 struct dm_buffer *b; 1878 1879 LIST_HEAD(write_list); 1880 1881 *bp = NULL; 1882 1883 /* 1884 * Fast path, hopefully the block is already in the cache. No need 1885 * to get the client lock for this. 1886 */ 1887 tree = cache_get_tree(&c->cache, block); 1888 b = cache_get(&c->cache, tree, block); 1889 if (b) { 1890 if (nf == NF_PREFETCH) { 1891 cache_put_and_wake(c, tree, b); 1892 return NULL; 1893 } 1894 1895 /* 1896 * Note: it is essential that we don't wait for the buffer to be 1897 * read if dm_bufio_get function is used. Both dm_bufio_get and 1898 * dm_bufio_prefetch can be used in the driver request routine. 1899 * If the user called both dm_bufio_prefetch and dm_bufio_get on 1900 * the same buffer, it would deadlock if we waited. 1901 */ 1902 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { 1903 cache_put_and_wake(c, tree, b); 1904 return NULL; 1905 } 1906 } 1907 1908 if (!b) { 1909 if (nf == NF_GET) 1910 return NULL; 1911 1912 dm_bufio_lock(c); 1913 b = __bufio_new(c, tree, block, nf, &need_submit, &write_list); 1914 dm_bufio_unlock(c); 1915 } 1916 1917 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 1918 if (b && (atomic_read(&b->hold_count) == 1)) 1919 buffer_record_stack(b); 1920 #endif 1921 1922 __flush_write_list(&write_list); 1923 1924 if (!b) 1925 return NULL; 1926 1927 if (need_submit) 1928 submit_io(b, REQ_OP_READ, ioprio, read_endio); 1929 1930 if (nf != NF_GET) /* we already tested this condition above */ 1931 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); 1932 1933 if (b->read_error) { 1934 int error = blk_status_to_errno(b->read_error); 1935 1936 dm_bufio_release(b); 1937 1938 return ERR_PTR(error); 1939 } 1940 1941 *bp = b; 1942 1943 return b->data; 1944 } 1945 1946 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, 1947 struct dm_buffer **bp) 1948 { 1949 return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT); 1950 } 1951 EXPORT_SYMBOL_GPL(dm_bufio_get); 1952 1953 static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block, 1954 struct dm_buffer **bp, unsigned short ioprio) 1955 { 1956 if (WARN_ON_ONCE(dm_bufio_in_request())) 1957 return ERR_PTR(-EINVAL); 1958 1959 return new_read(c, block, NF_READ, bp, ioprio); 1960 } 1961 1962 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, 1963 struct dm_buffer **bp) 1964 { 1965 return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT); 1966 } 1967 EXPORT_SYMBOL_GPL(dm_bufio_read); 1968 1969 void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block, 1970 struct dm_buffer **bp, unsigned short ioprio) 1971 { 1972 return __dm_bufio_read(c, block, bp, ioprio); 1973 } 1974 EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio); 1975 1976 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, 1977 struct dm_buffer **bp) 1978 { 1979 if (WARN_ON_ONCE(dm_bufio_in_request())) 1980 return ERR_PTR(-EINVAL); 1981 1982 return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT); 1983 } 1984 EXPORT_SYMBOL_GPL(dm_bufio_new); 1985 1986 static void __dm_bufio_prefetch(struct dm_bufio_client *c, 1987 sector_t block, unsigned int n_blocks, 1988 unsigned short ioprio) 1989 { 1990 struct blk_plug plug; 1991 1992 LIST_HEAD(write_list); 1993 1994 if (WARN_ON_ONCE(dm_bufio_in_request())) 1995 return; /* should never happen */ 1996 1997 blk_start_plug(&plug); 1998 1999 for (; n_blocks--; block++) { 2000 struct buffer_tree *tree; 2001 struct dm_buffer *b; 2002 int need_submit; 2003 2004 tree = cache_get_tree(&c->cache, block); 2005 b = cache_get(&c->cache, tree, block); 2006 if (b) { 2007 /* already in cache */ 2008 cache_put_and_wake(c, tree, b); 2009 continue; 2010 } 2011 2012 dm_bufio_lock(c); 2013 b = __bufio_new(c, tree, block, NF_PREFETCH, &need_submit, 2014 &write_list); 2015 if (unlikely(!list_empty(&write_list))) { 2016 dm_bufio_unlock(c); 2017 blk_finish_plug(&plug); 2018 __flush_write_list(&write_list); 2019 blk_start_plug(&plug); 2020 dm_bufio_lock(c); 2021 } 2022 if (unlikely(b != NULL)) { 2023 dm_bufio_unlock(c); 2024 2025 if (need_submit) 2026 submit_io(b, REQ_OP_READ, ioprio, read_endio); 2027 dm_bufio_release(b); 2028 2029 cond_resched(); 2030 2031 if (!n_blocks) 2032 goto flush_plug; 2033 dm_bufio_lock(c); 2034 } 2035 dm_bufio_unlock(c); 2036 } 2037 2038 flush_plug: 2039 blk_finish_plug(&plug); 2040 } 2041 2042 void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks) 2043 { 2044 return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT); 2045 } 2046 EXPORT_SYMBOL_GPL(dm_bufio_prefetch); 2047 2048 void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block, 2049 unsigned int n_blocks, unsigned short ioprio) 2050 { 2051 return __dm_bufio_prefetch(c, block, n_blocks, ioprio); 2052 } 2053 EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio); 2054 2055 void dm_bufio_release(struct dm_buffer *b) 2056 { 2057 struct dm_bufio_client *c = b->c; 2058 struct buffer_tree *tree = cache_get_tree(&c->cache, b->block); 2059 2060 /* 2061 * If there were errors on the buffer, and the buffer is not 2062 * to be written, free the buffer. There is no point in caching 2063 * invalid buffer. 2064 */ 2065 if ((b->read_error || b->write_error) && 2066 !test_bit_acquire(B_READING, &b->state) && 2067 !test_bit(B_WRITING, &b->state) && 2068 !test_bit(B_DIRTY, &b->state)) { 2069 dm_bufio_lock(c); 2070 2071 /* cache remove can fail if there are other holders */ 2072 if (cache_remove(&c->cache, tree, b)) { 2073 __free_buffer_wake(b); 2074 dm_bufio_unlock(c); 2075 return; 2076 } 2077 2078 dm_bufio_unlock(c); 2079 } 2080 2081 cache_put_and_wake(c, tree, b); 2082 } 2083 EXPORT_SYMBOL_GPL(dm_bufio_release); 2084 2085 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, 2086 unsigned int start, unsigned int end) 2087 { 2088 struct dm_bufio_client *c = b->c; 2089 2090 BUG_ON(start >= end); 2091 BUG_ON(end > b->c->block_size); 2092 2093 dm_bufio_lock(c); 2094 2095 BUG_ON(test_bit(B_READING, &b->state)); 2096 2097 if (!test_and_set_bit(B_DIRTY, &b->state)) { 2098 b->dirty_start = start; 2099 b->dirty_end = end; 2100 cache_mark(&c->cache, cache_get_tree(&c->cache, b->block), b, 2101 LIST_DIRTY); 2102 } else { 2103 if (start < b->dirty_start) 2104 b->dirty_start = start; 2105 if (end > b->dirty_end) 2106 b->dirty_end = end; 2107 } 2108 2109 dm_bufio_unlock(c); 2110 } 2111 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); 2112 2113 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) 2114 { 2115 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); 2116 } 2117 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); 2118 2119 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) 2120 { 2121 LIST_HEAD(write_list); 2122 2123 if (WARN_ON_ONCE(dm_bufio_in_request())) 2124 return; /* should never happen */ 2125 2126 dm_bufio_lock(c); 2127 __write_dirty_buffers_async(c, 0, &write_list); 2128 dm_bufio_unlock(c); 2129 __flush_write_list(&write_list); 2130 } 2131 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); 2132 2133 /* 2134 * For performance, it is essential that the buffers are written asynchronously 2135 * and simultaneously (so that the block layer can merge the writes) and then 2136 * waited upon. 2137 * 2138 * Finally, we flush hardware disk cache. 2139 */ 2140 static bool is_writing(struct lru_entry *e, void *context) 2141 { 2142 struct dm_buffer *b = le_to_buffer(e); 2143 2144 return test_bit(B_WRITING, &b->state); 2145 } 2146 2147 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) 2148 { 2149 int a, f; 2150 unsigned long nr_buffers; 2151 struct lru_entry *e; 2152 struct lru_iter it; 2153 2154 LIST_HEAD(write_list); 2155 2156 dm_bufio_lock(c); 2157 __write_dirty_buffers_async(c, 0, &write_list); 2158 dm_bufio_unlock(c); 2159 __flush_write_list(&write_list); 2160 dm_bufio_lock(c); 2161 2162 nr_buffers = cache_count(&c->cache, LIST_DIRTY); 2163 lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it); 2164 while ((e = lru_iter_next(&it, is_writing, c))) { 2165 struct dm_buffer *b = le_to_buffer(e); 2166 struct buffer_tree *tree; 2167 __cache_inc_buffer(b); 2168 2169 BUG_ON(test_bit(B_READING, &b->state)); 2170 2171 if (nr_buffers) { 2172 nr_buffers--; 2173 dm_bufio_unlock(c); 2174 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); 2175 dm_bufio_lock(c); 2176 } else { 2177 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); 2178 } 2179 2180 tree = cache_get_tree(&c->cache, b->block); 2181 2182 if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state)) 2183 cache_mark(&c->cache, tree, b, LIST_CLEAN); 2184 2185 cache_put_and_wake(c, tree, b); 2186 2187 cond_resched(); 2188 } 2189 lru_iter_end(&it); 2190 2191 wake_up(&c->free_buffer_wait); 2192 dm_bufio_unlock(c); 2193 2194 a = xchg(&c->async_write_error, 0); 2195 f = dm_bufio_issue_flush(c); 2196 if (a) 2197 return a; 2198 2199 return f; 2200 } 2201 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); 2202 2203 /* 2204 * Use dm-io to send an empty barrier to flush the device. 2205 */ 2206 int dm_bufio_issue_flush(struct dm_bufio_client *c) 2207 { 2208 struct dm_io_request io_req = { 2209 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, 2210 .mem.type = DM_IO_KMEM, 2211 .mem.ptr.addr = NULL, 2212 .client = c->dm_io, 2213 }; 2214 struct dm_io_region io_reg = { 2215 .bdev = c->bdev, 2216 .sector = 0, 2217 .count = 0, 2218 }; 2219 2220 if (WARN_ON_ONCE(dm_bufio_in_request())) 2221 return -EINVAL; 2222 2223 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT); 2224 } 2225 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); 2226 2227 /* 2228 * Use dm-io to send a discard request to flush the device. 2229 */ 2230 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) 2231 { 2232 struct dm_io_request io_req = { 2233 .bi_opf = REQ_OP_DISCARD | REQ_SYNC, 2234 .mem.type = DM_IO_KMEM, 2235 .mem.ptr.addr = NULL, 2236 .client = c->dm_io, 2237 }; 2238 struct dm_io_region io_reg = { 2239 .bdev = c->bdev, 2240 .sector = block_to_sector(c, block), 2241 .count = block_to_sector(c, count), 2242 }; 2243 2244 if (WARN_ON_ONCE(dm_bufio_in_request())) 2245 return -EINVAL; /* discards are optional */ 2246 2247 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT); 2248 } 2249 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard); 2250 2251 static void forget_buffer(struct dm_bufio_client *c, sector_t block) 2252 { 2253 struct buffer_tree *tree = cache_get_tree(&c->cache, block); 2254 struct dm_buffer *b; 2255 2256 b = cache_get(&c->cache, tree, block); 2257 if (b) { 2258 if (likely(!smp_load_acquire(&b->state))) { 2259 if (cache_remove(&c->cache, tree, b)) 2260 __free_buffer_wake(b); 2261 else 2262 cache_put_and_wake(c, tree, b); 2263 } else { 2264 cache_put_and_wake(c, tree, b); 2265 } 2266 } 2267 } 2268 2269 /* 2270 * Free the given buffer. 2271 * 2272 * This is just a hint, if the buffer is in use or dirty, this function 2273 * does nothing. 2274 */ 2275 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) 2276 { 2277 dm_bufio_lock(c); 2278 forget_buffer(c, block); 2279 dm_bufio_unlock(c); 2280 } 2281 EXPORT_SYMBOL_GPL(dm_bufio_forget); 2282 2283 static enum evict_result idle(struct dm_buffer *b, void *context) 2284 { 2285 return b->state ? ER_DONT_EVICT : ER_EVICT; 2286 } 2287 2288 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) 2289 { 2290 dm_bufio_lock(c); 2291 cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake); 2292 dm_bufio_unlock(c); 2293 } 2294 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers); 2295 2296 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n) 2297 { 2298 c->minimum_buffers = n; 2299 } 2300 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers); 2301 2302 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c) 2303 { 2304 return c->block_size; 2305 } 2306 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); 2307 2308 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) 2309 { 2310 sector_t s = bdev_nr_sectors(c->bdev); 2311 2312 if (s >= c->start) 2313 s -= c->start; 2314 else 2315 s = 0; 2316 if (likely(c->sectors_per_block_bits >= 0)) 2317 s >>= c->sectors_per_block_bits; 2318 else 2319 sector_div(s, c->block_size >> SECTOR_SHIFT); 2320 return s; 2321 } 2322 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); 2323 2324 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) 2325 { 2326 return c->dm_io; 2327 } 2328 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); 2329 2330 sector_t dm_bufio_get_block_number(struct dm_buffer *b) 2331 { 2332 return b->block; 2333 } 2334 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number); 2335 2336 void *dm_bufio_get_block_data(struct dm_buffer *b) 2337 { 2338 return b->data; 2339 } 2340 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data); 2341 2342 void *dm_bufio_get_aux_data(struct dm_buffer *b) 2343 { 2344 return b + 1; 2345 } 2346 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data); 2347 2348 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) 2349 { 2350 return b->c; 2351 } 2352 EXPORT_SYMBOL_GPL(dm_bufio_get_client); 2353 2354 static enum it_action warn_leak(struct dm_buffer *b, void *context) 2355 { 2356 bool *warned = context; 2357 2358 WARN_ON(!(*warned)); 2359 *warned = true; 2360 DMERR("leaked buffer %llx, hold count %u, list %d", 2361 (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode); 2362 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 2363 stack_trace_print(b->stack_entries, b->stack_len, 1); 2364 /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */ 2365 atomic_set(&b->hold_count, 0); 2366 #endif 2367 return IT_NEXT; 2368 } 2369 2370 static void drop_buffers(struct dm_bufio_client *c) 2371 { 2372 int i; 2373 struct dm_buffer *b; 2374 2375 if (WARN_ON(dm_bufio_in_request())) 2376 return; /* should never happen */ 2377 2378 /* 2379 * An optimization so that the buffers are not written one-by-one. 2380 */ 2381 dm_bufio_write_dirty_buffers_async(c); 2382 2383 dm_bufio_lock(c); 2384 2385 while ((b = __get_unclaimed_buffer(c))) 2386 __free_buffer_wake(b); 2387 2388 for (i = 0; i < LIST_SIZE; i++) { 2389 bool warned = false; 2390 2391 cache_iterate(&c->cache, i, warn_leak, &warned); 2392 } 2393 2394 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 2395 while ((b = __get_unclaimed_buffer(c))) 2396 __free_buffer_wake(b); 2397 #endif 2398 2399 for (i = 0; i < LIST_SIZE; i++) 2400 WARN_ON(cache_count(&c->cache, i)); 2401 2402 dm_bufio_unlock(c); 2403 } 2404 2405 static unsigned long get_retain_buffers(struct dm_bufio_client *c) 2406 { 2407 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); 2408 2409 if (likely(c->sectors_per_block_bits >= 0)) 2410 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; 2411 else 2412 retain_bytes /= c->block_size; 2413 2414 return retain_bytes; 2415 } 2416 2417 static void __scan(struct dm_bufio_client *c) 2418 { 2419 int l; 2420 struct dm_buffer *b; 2421 unsigned long freed = 0; 2422 unsigned long retain_target = get_retain_buffers(c); 2423 unsigned long count = cache_total(&c->cache); 2424 2425 for (l = 0; l < LIST_SIZE; l++) { 2426 while (true) { 2427 if (count - freed <= retain_target) 2428 atomic_long_set(&c->need_shrink, 0); 2429 if (!atomic_long_read(&c->need_shrink)) 2430 break; 2431 2432 b = cache_evict(&c->cache, l, 2433 l == LIST_CLEAN ? is_clean : is_dirty, c); 2434 if (!b) 2435 break; 2436 2437 __make_buffer_clean(b); 2438 __free_buffer_wake(b); 2439 2440 atomic_long_dec(&c->need_shrink); 2441 freed++; 2442 2443 if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) { 2444 dm_bufio_unlock(c); 2445 cond_resched(); 2446 dm_bufio_lock(c); 2447 } 2448 } 2449 } 2450 } 2451 2452 static void shrink_work(struct work_struct *w) 2453 { 2454 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work); 2455 2456 dm_bufio_lock(c); 2457 __scan(c); 2458 dm_bufio_unlock(c); 2459 } 2460 2461 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 2462 { 2463 struct dm_bufio_client *c; 2464 2465 c = shrink->private_data; 2466 atomic_long_add(sc->nr_to_scan, &c->need_shrink); 2467 queue_work(dm_bufio_wq, &c->shrink_work); 2468 2469 return sc->nr_to_scan; 2470 } 2471 2472 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 2473 { 2474 struct dm_bufio_client *c = shrink->private_data; 2475 unsigned long count = cache_total(&c->cache); 2476 unsigned long retain_target = get_retain_buffers(c); 2477 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); 2478 2479 if (unlikely(count < retain_target)) 2480 count = 0; 2481 else 2482 count -= retain_target; 2483 2484 if (unlikely(count < queued_for_cleanup)) 2485 count = 0; 2486 else 2487 count -= queued_for_cleanup; 2488 2489 return count; 2490 } 2491 2492 /* 2493 * Create the buffering interface 2494 */ 2495 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size, 2496 unsigned int reserved_buffers, unsigned int aux_size, 2497 void (*alloc_callback)(struct dm_buffer *), 2498 void (*write_callback)(struct dm_buffer *), 2499 unsigned int flags) 2500 { 2501 int r; 2502 unsigned int num_locks; 2503 struct dm_bufio_client *c; 2504 char slab_name[64]; 2505 static atomic_t seqno = ATOMIC_INIT(0); 2506 2507 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { 2508 DMERR("%s: block size not specified or is not multiple of 512b", __func__); 2509 r = -EINVAL; 2510 goto bad_client; 2511 } 2512 2513 num_locks = dm_num_hash_locks(); 2514 c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL); 2515 if (!c) { 2516 r = -ENOMEM; 2517 goto bad_client; 2518 } 2519 cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0); 2520 2521 c->bdev = bdev; 2522 c->block_size = block_size; 2523 if (is_power_of_2(block_size)) 2524 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; 2525 else 2526 c->sectors_per_block_bits = -1; 2527 2528 c->alloc_callback = alloc_callback; 2529 c->write_callback = write_callback; 2530 2531 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) { 2532 c->no_sleep = true; 2533 static_branch_inc(&no_sleep_enabled); 2534 } 2535 2536 mutex_init(&c->lock); 2537 spin_lock_init(&c->spinlock); 2538 INIT_LIST_HEAD(&c->reserved_buffers); 2539 c->need_reserved_buffers = reserved_buffers; 2540 2541 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS); 2542 2543 init_waitqueue_head(&c->free_buffer_wait); 2544 c->async_write_error = 0; 2545 2546 c->dm_io = dm_io_client_create(); 2547 if (IS_ERR(c->dm_io)) { 2548 r = PTR_ERR(c->dm_io); 2549 goto bad_dm_io; 2550 } 2551 2552 if (block_size <= KMALLOC_MAX_SIZE && !is_power_of_2(block_size)) { 2553 unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE); 2554 2555 snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u", 2556 block_size, atomic_inc_return(&seqno)); 2557 c->slab_cache = kmem_cache_create(slab_name, block_size, align, 2558 SLAB_RECLAIM_ACCOUNT, NULL); 2559 if (!c->slab_cache) { 2560 r = -ENOMEM; 2561 goto bad; 2562 } 2563 } 2564 if (aux_size) 2565 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u", 2566 aux_size, atomic_inc_return(&seqno)); 2567 else 2568 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", 2569 atomic_inc_return(&seqno)); 2570 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, 2571 0, SLAB_RECLAIM_ACCOUNT, NULL); 2572 if (!c->slab_buffer) { 2573 r = -ENOMEM; 2574 goto bad; 2575 } 2576 2577 while (c->need_reserved_buffers) { 2578 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); 2579 2580 if (!b) { 2581 r = -ENOMEM; 2582 goto bad; 2583 } 2584 __free_buffer_wake(b); 2585 } 2586 2587 INIT_WORK(&c->shrink_work, shrink_work); 2588 atomic_long_set(&c->need_shrink, 0); 2589 2590 c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)", 2591 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); 2592 if (!c->shrinker) { 2593 r = -ENOMEM; 2594 goto bad; 2595 } 2596 2597 c->shrinker->count_objects = dm_bufio_shrink_count; 2598 c->shrinker->scan_objects = dm_bufio_shrink_scan; 2599 c->shrinker->seeks = 1; 2600 c->shrinker->batch = 0; 2601 c->shrinker->private_data = c; 2602 2603 shrinker_register(c->shrinker); 2604 2605 mutex_lock(&dm_bufio_clients_lock); 2606 dm_bufio_client_count++; 2607 list_add(&c->client_list, &dm_bufio_all_clients); 2608 __cache_size_refresh(); 2609 mutex_unlock(&dm_bufio_clients_lock); 2610 2611 return c; 2612 2613 bad: 2614 while (!list_empty(&c->reserved_buffers)) { 2615 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); 2616 2617 list_del(&b->lru.list); 2618 free_buffer(b); 2619 } 2620 kmem_cache_destroy(c->slab_cache); 2621 kmem_cache_destroy(c->slab_buffer); 2622 dm_io_client_destroy(c->dm_io); 2623 bad_dm_io: 2624 mutex_destroy(&c->lock); 2625 if (c->no_sleep) 2626 static_branch_dec(&no_sleep_enabled); 2627 kfree(c); 2628 bad_client: 2629 return ERR_PTR(r); 2630 } 2631 EXPORT_SYMBOL_GPL(dm_bufio_client_create); 2632 2633 /* 2634 * Free the buffering interface. 2635 * It is required that there are no references on any buffers. 2636 */ 2637 void dm_bufio_client_destroy(struct dm_bufio_client *c) 2638 { 2639 unsigned int i; 2640 2641 drop_buffers(c); 2642 2643 shrinker_free(c->shrinker); 2644 flush_work(&c->shrink_work); 2645 2646 mutex_lock(&dm_bufio_clients_lock); 2647 2648 list_del(&c->client_list); 2649 dm_bufio_client_count--; 2650 __cache_size_refresh(); 2651 2652 mutex_unlock(&dm_bufio_clients_lock); 2653 2654 WARN_ON(c->need_reserved_buffers); 2655 2656 while (!list_empty(&c->reserved_buffers)) { 2657 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); 2658 2659 list_del(&b->lru.list); 2660 free_buffer(b); 2661 } 2662 2663 for (i = 0; i < LIST_SIZE; i++) 2664 if (cache_count(&c->cache, i)) 2665 DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i)); 2666 2667 for (i = 0; i < LIST_SIZE; i++) 2668 WARN_ON(cache_count(&c->cache, i)); 2669 2670 cache_destroy(&c->cache); 2671 kmem_cache_destroy(c->slab_cache); 2672 kmem_cache_destroy(c->slab_buffer); 2673 dm_io_client_destroy(c->dm_io); 2674 mutex_destroy(&c->lock); 2675 if (c->no_sleep) 2676 static_branch_dec(&no_sleep_enabled); 2677 kfree(c); 2678 } 2679 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); 2680 2681 void dm_bufio_client_reset(struct dm_bufio_client *c) 2682 { 2683 drop_buffers(c); 2684 flush_work(&c->shrink_work); 2685 } 2686 EXPORT_SYMBOL_GPL(dm_bufio_client_reset); 2687 2688 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) 2689 { 2690 c->start = start; 2691 } 2692 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); 2693 2694 /*--------------------------------------------------------------*/ 2695 2696 /* 2697 * Global cleanup tries to evict the oldest buffers from across _all_ 2698 * the clients. It does this by repeatedly evicting a few buffers from 2699 * the client that holds the oldest buffer. It's approximate, but hopefully 2700 * good enough. 2701 */ 2702 static struct dm_bufio_client *__pop_client(void) 2703 { 2704 struct list_head *h; 2705 2706 if (list_empty(&dm_bufio_all_clients)) 2707 return NULL; 2708 2709 h = dm_bufio_all_clients.next; 2710 list_del(h); 2711 return container_of(h, struct dm_bufio_client, client_list); 2712 } 2713 2714 /* 2715 * Inserts the client in the global client list based on its 2716 * 'oldest_buffer' field. 2717 */ 2718 static void __insert_client(struct dm_bufio_client *new_client) 2719 { 2720 struct dm_bufio_client *c; 2721 struct list_head *h = dm_bufio_all_clients.next; 2722 2723 while (h != &dm_bufio_all_clients) { 2724 c = container_of(h, struct dm_bufio_client, client_list); 2725 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) 2726 break; 2727 h = h->next; 2728 } 2729 2730 list_add_tail(&new_client->client_list, h); 2731 } 2732 2733 static enum evict_result select_for_evict(struct dm_buffer *b, void *context) 2734 { 2735 /* In no-sleep mode, we cannot wait on IO. */ 2736 if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) { 2737 if (test_bit_acquire(B_READING, &b->state) || 2738 test_bit(B_WRITING, &b->state) || 2739 test_bit(B_DIRTY, &b->state)) 2740 return ER_DONT_EVICT; 2741 } 2742 return ER_EVICT; 2743 } 2744 2745 static unsigned long __evict_a_few(unsigned long nr_buffers) 2746 { 2747 struct dm_bufio_client *c; 2748 unsigned long oldest_buffer = jiffies; 2749 unsigned long last_accessed; 2750 unsigned long count; 2751 struct dm_buffer *b; 2752 2753 c = __pop_client(); 2754 if (!c) 2755 return 0; 2756 2757 dm_bufio_lock(c); 2758 2759 for (count = 0; count < nr_buffers; count++) { 2760 b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL); 2761 if (!b) 2762 break; 2763 2764 last_accessed = READ_ONCE(b->last_accessed); 2765 if (time_after_eq(oldest_buffer, last_accessed)) 2766 oldest_buffer = last_accessed; 2767 2768 __make_buffer_clean(b); 2769 __free_buffer_wake(b); 2770 2771 if (need_resched()) { 2772 dm_bufio_unlock(c); 2773 cond_resched(); 2774 dm_bufio_lock(c); 2775 } 2776 } 2777 2778 dm_bufio_unlock(c); 2779 2780 if (count) 2781 c->oldest_buffer = oldest_buffer; 2782 __insert_client(c); 2783 2784 return count; 2785 } 2786 2787 static void check_watermarks(void) 2788 { 2789 LIST_HEAD(write_list); 2790 struct dm_bufio_client *c; 2791 2792 mutex_lock(&dm_bufio_clients_lock); 2793 list_for_each_entry(c, &dm_bufio_all_clients, client_list) { 2794 dm_bufio_lock(c); 2795 __check_watermark(c, &write_list); 2796 dm_bufio_unlock(c); 2797 } 2798 mutex_unlock(&dm_bufio_clients_lock); 2799 2800 __flush_write_list(&write_list); 2801 } 2802 2803 static void evict_old(void) 2804 { 2805 unsigned long threshold = dm_bufio_cache_size - 2806 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; 2807 2808 mutex_lock(&dm_bufio_clients_lock); 2809 while (dm_bufio_current_allocated > threshold) { 2810 if (!__evict_a_few(64)) 2811 break; 2812 cond_resched(); 2813 } 2814 mutex_unlock(&dm_bufio_clients_lock); 2815 } 2816 2817 static void do_global_cleanup(struct work_struct *w) 2818 { 2819 check_watermarks(); 2820 evict_old(); 2821 } 2822 2823 /* 2824 *-------------------------------------------------------------- 2825 * Module setup 2826 *-------------------------------------------------------------- 2827 */ 2828 2829 /* 2830 * This is called only once for the whole dm_bufio module. 2831 * It initializes memory limit. 2832 */ 2833 static int __init dm_bufio_init(void) 2834 { 2835 __u64 mem; 2836 2837 dm_bufio_allocated_kmem_cache = 0; 2838 dm_bufio_allocated_kmalloc = 0; 2839 dm_bufio_allocated_get_free_pages = 0; 2840 dm_bufio_allocated_vmalloc = 0; 2841 dm_bufio_current_allocated = 0; 2842 2843 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), 2844 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; 2845 2846 if (mem > ULONG_MAX) 2847 mem = ULONG_MAX; 2848 2849 #ifdef CONFIG_MMU 2850 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) 2851 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); 2852 #endif 2853 2854 dm_bufio_default_cache_size = mem; 2855 2856 mutex_lock(&dm_bufio_clients_lock); 2857 __cache_size_refresh(); 2858 mutex_unlock(&dm_bufio_clients_lock); 2859 2860 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", 2861 WQ_MEM_RECLAIM | WQ_PERCPU, 0); 2862 if (!dm_bufio_wq) 2863 return -ENOMEM; 2864 2865 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup); 2866 2867 return 0; 2868 } 2869 2870 /* 2871 * This is called once when unloading the dm_bufio module. 2872 */ 2873 static void __exit dm_bufio_exit(void) 2874 { 2875 int bug = 0; 2876 2877 destroy_workqueue(dm_bufio_wq); 2878 2879 if (dm_bufio_client_count) { 2880 DMCRIT("%s: dm_bufio_client_count leaked: %d", 2881 __func__, dm_bufio_client_count); 2882 bug = 1; 2883 } 2884 2885 if (dm_bufio_current_allocated) { 2886 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu", 2887 __func__, dm_bufio_current_allocated); 2888 bug = 1; 2889 } 2890 2891 if (dm_bufio_allocated_get_free_pages) { 2892 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu", 2893 __func__, dm_bufio_allocated_get_free_pages); 2894 bug = 1; 2895 } 2896 2897 if (dm_bufio_allocated_vmalloc) { 2898 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu", 2899 __func__, dm_bufio_allocated_vmalloc); 2900 bug = 1; 2901 } 2902 2903 WARN_ON(bug); /* leaks are not worth crashing the system */ 2904 } 2905 2906 module_init(dm_bufio_init) 2907 module_exit(dm_bufio_exit) 2908 2909 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644); 2910 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); 2911 2912 module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644); 2913 MODULE_PARM_DESC(max_age_seconds, "No longer does anything"); 2914 2915 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644); 2916 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); 2917 2918 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644); 2919 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory"); 2920 2921 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444); 2922 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc"); 2923 2924 module_param_named(allocated_kmalloc_bytes, dm_bufio_allocated_kmalloc, ulong, 0444); 2925 MODULE_PARM_DESC(allocated_kmalloc_bytes, "Memory allocated with kmalloc_alloc"); 2926 2927 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444); 2928 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages"); 2929 2930 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444); 2931 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc"); 2932 2933 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444); 2934 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache"); 2935 2936 MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>"); 2937 MODULE_DESCRIPTION(DM_NAME " buffered I/O library"); 2938 MODULE_LICENSE("GPL"); 2939