1 /* 2 * Copyright (C) 2009-2011 Red Hat, Inc. 3 * 4 * Author: Mikulas Patocka <mpatocka@redhat.com> 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/dm-bufio.h> 10 11 #include <linux/device-mapper.h> 12 #include <linux/dm-io.h> 13 #include <linux/slab.h> 14 #include <linux/sched/mm.h> 15 #include <linux/jiffies.h> 16 #include <linux/vmalloc.h> 17 #include <linux/shrinker.h> 18 #include <linux/module.h> 19 #include <linux/rbtree.h> 20 #include <linux/stacktrace.h> 21 #include <linux/jump_label.h> 22 23 #define DM_MSG_PREFIX "bufio" 24 25 /* 26 * Memory management policy: 27 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory 28 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower). 29 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers. 30 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT 31 * dirty buffers. 32 */ 33 #define DM_BUFIO_MIN_BUFFERS 8 34 35 #define DM_BUFIO_MEMORY_PERCENT 2 36 #define DM_BUFIO_VMALLOC_PERCENT 25 37 #define DM_BUFIO_WRITEBACK_RATIO 3 38 #define DM_BUFIO_LOW_WATERMARK_RATIO 16 39 40 /* 41 * Check buffer ages in this interval (seconds) 42 */ 43 #define DM_BUFIO_WORK_TIMER_SECS 30 44 45 /* 46 * Free buffers when they are older than this (seconds) 47 */ 48 #define DM_BUFIO_DEFAULT_AGE_SECS 300 49 50 /* 51 * The nr of bytes of cached data to keep around. 52 */ 53 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024) 54 55 /* 56 * Align buffer writes to this boundary. 57 * Tests show that SSDs have the highest IOPS when using 4k writes. 58 */ 59 #define DM_BUFIO_WRITE_ALIGN 4096 60 61 /* 62 * dm_buffer->list_mode 63 */ 64 #define LIST_CLEAN 0 65 #define LIST_DIRTY 1 66 #define LIST_SIZE 2 67 68 /* 69 * Linking of buffers: 70 * All buffers are linked to buffer_tree with their node field. 71 * 72 * Clean buffers that are not being written (B_WRITING not set) 73 * are linked to lru[LIST_CLEAN] with their lru_list field. 74 * 75 * Dirty and clean buffers that are being written are linked to 76 * lru[LIST_DIRTY] with their lru_list field. When the write 77 * finishes, the buffer cannot be relinked immediately (because we 78 * are in an interrupt context and relinking requires process 79 * context), so some clean-not-writing buffers can be held on 80 * dirty_lru too. They are later added to lru in the process 81 * context. 82 */ 83 struct dm_bufio_client { 84 struct mutex lock; 85 spinlock_t spinlock; 86 bool no_sleep; 87 88 struct list_head lru[LIST_SIZE]; 89 unsigned long n_buffers[LIST_SIZE]; 90 91 struct block_device *bdev; 92 unsigned block_size; 93 s8 sectors_per_block_bits; 94 void (*alloc_callback)(struct dm_buffer *); 95 void (*write_callback)(struct dm_buffer *); 96 struct kmem_cache *slab_buffer; 97 struct kmem_cache *slab_cache; 98 struct dm_io_client *dm_io; 99 100 struct list_head reserved_buffers; 101 unsigned need_reserved_buffers; 102 103 unsigned minimum_buffers; 104 105 struct rb_root buffer_tree; 106 wait_queue_head_t free_buffer_wait; 107 108 sector_t start; 109 110 int async_write_error; 111 112 struct list_head client_list; 113 114 struct shrinker shrinker; 115 struct work_struct shrink_work; 116 atomic_long_t need_shrink; 117 }; 118 119 /* 120 * Buffer state bits. 121 */ 122 #define B_READING 0 123 #define B_WRITING 1 124 #define B_DIRTY 2 125 126 /* 127 * Describes how the block was allocated: 128 * kmem_cache_alloc(), __get_free_pages() or vmalloc(). 129 * See the comment at alloc_buffer_data. 130 */ 131 enum data_mode { 132 DATA_MODE_SLAB = 0, 133 DATA_MODE_GET_FREE_PAGES = 1, 134 DATA_MODE_VMALLOC = 2, 135 DATA_MODE_LIMIT = 3 136 }; 137 138 struct dm_buffer { 139 struct rb_node node; 140 struct list_head lru_list; 141 struct list_head global_list; 142 sector_t block; 143 void *data; 144 unsigned char data_mode; /* DATA_MODE_* */ 145 unsigned char list_mode; /* LIST_* */ 146 blk_status_t read_error; 147 blk_status_t write_error; 148 unsigned accessed; 149 unsigned hold_count; 150 unsigned long state; 151 unsigned long last_accessed; 152 unsigned dirty_start; 153 unsigned dirty_end; 154 unsigned write_start; 155 unsigned write_end; 156 struct dm_bufio_client *c; 157 struct list_head write_list; 158 void (*end_io)(struct dm_buffer *, blk_status_t); 159 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 160 #define MAX_STACK 10 161 unsigned int stack_len; 162 unsigned long stack_entries[MAX_STACK]; 163 #endif 164 }; 165 166 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); 167 168 /*----------------------------------------------------------------*/ 169 170 #define dm_bufio_in_request() (!!current->bio_list) 171 172 static void dm_bufio_lock(struct dm_bufio_client *c) 173 { 174 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) 175 spin_lock_bh(&c->spinlock); 176 else 177 mutex_lock_nested(&c->lock, dm_bufio_in_request()); 178 } 179 180 static int dm_bufio_trylock(struct dm_bufio_client *c) 181 { 182 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) 183 return spin_trylock_bh(&c->spinlock); 184 else 185 return mutex_trylock(&c->lock); 186 } 187 188 static void dm_bufio_unlock(struct dm_bufio_client *c) 189 { 190 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) 191 spin_unlock_bh(&c->spinlock); 192 else 193 mutex_unlock(&c->lock); 194 } 195 196 /*----------------------------------------------------------------*/ 197 198 /* 199 * Default cache size: available memory divided by the ratio. 200 */ 201 static unsigned long dm_bufio_default_cache_size; 202 203 /* 204 * Total cache size set by the user. 205 */ 206 static unsigned long dm_bufio_cache_size; 207 208 /* 209 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change 210 * at any time. If it disagrees, the user has changed cache size. 211 */ 212 static unsigned long dm_bufio_cache_size_latch; 213 214 static DEFINE_SPINLOCK(global_spinlock); 215 216 static LIST_HEAD(global_queue); 217 218 static unsigned long global_num = 0; 219 220 /* 221 * Buffers are freed after this timeout 222 */ 223 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; 224 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; 225 226 static unsigned long dm_bufio_peak_allocated; 227 static unsigned long dm_bufio_allocated_kmem_cache; 228 static unsigned long dm_bufio_allocated_get_free_pages; 229 static unsigned long dm_bufio_allocated_vmalloc; 230 static unsigned long dm_bufio_current_allocated; 231 232 /*----------------------------------------------------------------*/ 233 234 /* 235 * The current number of clients. 236 */ 237 static int dm_bufio_client_count; 238 239 /* 240 * The list of all clients. 241 */ 242 static LIST_HEAD(dm_bufio_all_clients); 243 244 /* 245 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count 246 */ 247 static DEFINE_MUTEX(dm_bufio_clients_lock); 248 249 static struct workqueue_struct *dm_bufio_wq; 250 static struct delayed_work dm_bufio_cleanup_old_work; 251 static struct work_struct dm_bufio_replacement_work; 252 253 254 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 255 static void buffer_record_stack(struct dm_buffer *b) 256 { 257 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); 258 } 259 #endif 260 261 /*---------------------------------------------------------------- 262 * A red/black tree acts as an index for all the buffers. 263 *--------------------------------------------------------------*/ 264 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) 265 { 266 struct rb_node *n = c->buffer_tree.rb_node; 267 struct dm_buffer *b; 268 269 while (n) { 270 b = container_of(n, struct dm_buffer, node); 271 272 if (b->block == block) 273 return b; 274 275 n = block < b->block ? n->rb_left : n->rb_right; 276 } 277 278 return NULL; 279 } 280 281 static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block) 282 { 283 struct rb_node *n = c->buffer_tree.rb_node; 284 struct dm_buffer *b; 285 struct dm_buffer *best = NULL; 286 287 while (n) { 288 b = container_of(n, struct dm_buffer, node); 289 290 if (b->block == block) 291 return b; 292 293 if (block <= b->block) { 294 n = n->rb_left; 295 best = b; 296 } else { 297 n = n->rb_right; 298 } 299 } 300 301 return best; 302 } 303 304 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) 305 { 306 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL; 307 struct dm_buffer *found; 308 309 while (*new) { 310 found = container_of(*new, struct dm_buffer, node); 311 312 if (found->block == b->block) { 313 BUG_ON(found != b); 314 return; 315 } 316 317 parent = *new; 318 new = b->block < found->block ? 319 &found->node.rb_left : &found->node.rb_right; 320 } 321 322 rb_link_node(&b->node, parent, new); 323 rb_insert_color(&b->node, &c->buffer_tree); 324 } 325 326 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) 327 { 328 rb_erase(&b->node, &c->buffer_tree); 329 } 330 331 /*----------------------------------------------------------------*/ 332 333 static void adjust_total_allocated(struct dm_buffer *b, bool unlink) 334 { 335 unsigned char data_mode; 336 long diff; 337 338 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = { 339 &dm_bufio_allocated_kmem_cache, 340 &dm_bufio_allocated_get_free_pages, 341 &dm_bufio_allocated_vmalloc, 342 }; 343 344 data_mode = b->data_mode; 345 diff = (long)b->c->block_size; 346 if (unlink) 347 diff = -diff; 348 349 spin_lock(&global_spinlock); 350 351 *class_ptr[data_mode] += diff; 352 353 dm_bufio_current_allocated += diff; 354 355 if (dm_bufio_current_allocated > dm_bufio_peak_allocated) 356 dm_bufio_peak_allocated = dm_bufio_current_allocated; 357 358 b->accessed = 1; 359 360 if (!unlink) { 361 list_add(&b->global_list, &global_queue); 362 global_num++; 363 if (dm_bufio_current_allocated > dm_bufio_cache_size) 364 queue_work(dm_bufio_wq, &dm_bufio_replacement_work); 365 } else { 366 list_del(&b->global_list); 367 global_num--; 368 } 369 370 spin_unlock(&global_spinlock); 371 } 372 373 /* 374 * Change the number of clients and recalculate per-client limit. 375 */ 376 static void __cache_size_refresh(void) 377 { 378 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); 379 BUG_ON(dm_bufio_client_count < 0); 380 381 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); 382 383 /* 384 * Use default if set to 0 and report the actual cache size used. 385 */ 386 if (!dm_bufio_cache_size_latch) { 387 (void)cmpxchg(&dm_bufio_cache_size, 0, 388 dm_bufio_default_cache_size); 389 dm_bufio_cache_size_latch = dm_bufio_default_cache_size; 390 } 391 } 392 393 /* 394 * Allocating buffer data. 395 * 396 * Small buffers are allocated with kmem_cache, to use space optimally. 397 * 398 * For large buffers, we choose between get_free_pages and vmalloc. 399 * Each has advantages and disadvantages. 400 * 401 * __get_free_pages can randomly fail if the memory is fragmented. 402 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be 403 * as low as 128M) so using it for caching is not appropriate. 404 * 405 * If the allocation may fail we use __get_free_pages. Memory fragmentation 406 * won't have a fatal effect here, but it just causes flushes of some other 407 * buffers and more I/O will be performed. Don't use __get_free_pages if it 408 * always fails (i.e. order >= MAX_ORDER). 409 * 410 * If the allocation shouldn't fail we use __vmalloc. This is only for the 411 * initial reserve allocation, so there's no risk of wasting all vmalloc 412 * space. 413 */ 414 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, 415 unsigned char *data_mode) 416 { 417 if (unlikely(c->slab_cache != NULL)) { 418 *data_mode = DATA_MODE_SLAB; 419 return kmem_cache_alloc(c->slab_cache, gfp_mask); 420 } 421 422 if (c->block_size <= KMALLOC_MAX_SIZE && 423 gfp_mask & __GFP_NORETRY) { 424 *data_mode = DATA_MODE_GET_FREE_PAGES; 425 return (void *)__get_free_pages(gfp_mask, 426 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); 427 } 428 429 *data_mode = DATA_MODE_VMALLOC; 430 431 /* 432 * __vmalloc allocates the data pages and auxiliary structures with 433 * gfp_flags that were specified, but pagetables are always allocated 434 * with GFP_KERNEL, no matter what was specified as gfp_mask. 435 * 436 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that 437 * all allocations done by this process (including pagetables) are done 438 * as if GFP_NOIO was specified. 439 */ 440 if (gfp_mask & __GFP_NORETRY) { 441 unsigned noio_flag = memalloc_noio_save(); 442 void *ptr = __vmalloc(c->block_size, gfp_mask); 443 444 memalloc_noio_restore(noio_flag); 445 return ptr; 446 } 447 448 return __vmalloc(c->block_size, gfp_mask); 449 } 450 451 /* 452 * Free buffer's data. 453 */ 454 static void free_buffer_data(struct dm_bufio_client *c, 455 void *data, unsigned char data_mode) 456 { 457 switch (data_mode) { 458 case DATA_MODE_SLAB: 459 kmem_cache_free(c->slab_cache, data); 460 break; 461 462 case DATA_MODE_GET_FREE_PAGES: 463 free_pages((unsigned long)data, 464 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); 465 break; 466 467 case DATA_MODE_VMALLOC: 468 vfree(data); 469 break; 470 471 default: 472 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d", 473 data_mode); 474 BUG(); 475 } 476 } 477 478 /* 479 * Allocate buffer and its data. 480 */ 481 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) 482 { 483 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); 484 485 if (!b) 486 return NULL; 487 488 b->c = c; 489 490 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); 491 if (!b->data) { 492 kmem_cache_free(c->slab_buffer, b); 493 return NULL; 494 } 495 496 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 497 b->stack_len = 0; 498 #endif 499 return b; 500 } 501 502 /* 503 * Free buffer and its data. 504 */ 505 static void free_buffer(struct dm_buffer *b) 506 { 507 struct dm_bufio_client *c = b->c; 508 509 free_buffer_data(c, b->data, b->data_mode); 510 kmem_cache_free(c->slab_buffer, b); 511 } 512 513 /* 514 * Link buffer to the buffer tree and clean or dirty queue. 515 */ 516 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) 517 { 518 struct dm_bufio_client *c = b->c; 519 520 c->n_buffers[dirty]++; 521 b->block = block; 522 b->list_mode = dirty; 523 list_add(&b->lru_list, &c->lru[dirty]); 524 __insert(b->c, b); 525 b->last_accessed = jiffies; 526 527 adjust_total_allocated(b, false); 528 } 529 530 /* 531 * Unlink buffer from the buffer tree and dirty or clean queue. 532 */ 533 static void __unlink_buffer(struct dm_buffer *b) 534 { 535 struct dm_bufio_client *c = b->c; 536 537 BUG_ON(!c->n_buffers[b->list_mode]); 538 539 c->n_buffers[b->list_mode]--; 540 __remove(b->c, b); 541 list_del(&b->lru_list); 542 543 adjust_total_allocated(b, true); 544 } 545 546 /* 547 * Place the buffer to the head of dirty or clean LRU queue. 548 */ 549 static void __relink_lru(struct dm_buffer *b, int dirty) 550 { 551 struct dm_bufio_client *c = b->c; 552 553 b->accessed = 1; 554 555 BUG_ON(!c->n_buffers[b->list_mode]); 556 557 c->n_buffers[b->list_mode]--; 558 c->n_buffers[dirty]++; 559 b->list_mode = dirty; 560 list_move(&b->lru_list, &c->lru[dirty]); 561 b->last_accessed = jiffies; 562 } 563 564 /*---------------------------------------------------------------- 565 * Submit I/O on the buffer. 566 * 567 * Bio interface is faster but it has some problems: 568 * the vector list is limited (increasing this limit increases 569 * memory-consumption per buffer, so it is not viable); 570 * 571 * the memory must be direct-mapped, not vmalloced; 572 * 573 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and 574 * it is not vmalloced, try using the bio interface. 575 * 576 * If the buffer is big, if it is vmalloced or if the underlying device 577 * rejects the bio because it is too large, use dm-io layer to do the I/O. 578 * The dm-io layer splits the I/O into multiple requests, avoiding the above 579 * shortcomings. 580 *--------------------------------------------------------------*/ 581 582 /* 583 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending 584 * that the request was handled directly with bio interface. 585 */ 586 static void dmio_complete(unsigned long error, void *context) 587 { 588 struct dm_buffer *b = context; 589 590 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); 591 } 592 593 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, 594 unsigned n_sectors, unsigned offset) 595 { 596 int r; 597 struct dm_io_request io_req = { 598 .bi_opf = op, 599 .notify.fn = dmio_complete, 600 .notify.context = b, 601 .client = b->c->dm_io, 602 }; 603 struct dm_io_region region = { 604 .bdev = b->c->bdev, 605 .sector = sector, 606 .count = n_sectors, 607 }; 608 609 if (b->data_mode != DATA_MODE_VMALLOC) { 610 io_req.mem.type = DM_IO_KMEM; 611 io_req.mem.ptr.addr = (char *)b->data + offset; 612 } else { 613 io_req.mem.type = DM_IO_VMA; 614 io_req.mem.ptr.vma = (char *)b->data + offset; 615 } 616 617 r = dm_io(&io_req, 1, ®ion, NULL); 618 if (unlikely(r)) 619 b->end_io(b, errno_to_blk_status(r)); 620 } 621 622 static void bio_complete(struct bio *bio) 623 { 624 struct dm_buffer *b = bio->bi_private; 625 blk_status_t status = bio->bi_status; 626 bio_uninit(bio); 627 kfree(bio); 628 b->end_io(b, status); 629 } 630 631 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, 632 unsigned n_sectors, unsigned offset) 633 { 634 struct bio *bio; 635 char *ptr; 636 unsigned vec_size, len; 637 638 vec_size = b->c->block_size >> PAGE_SHIFT; 639 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) 640 vec_size += 2; 641 642 bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); 643 if (!bio) { 644 dmio: 645 use_dmio(b, op, sector, n_sectors, offset); 646 return; 647 } 648 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op); 649 bio->bi_iter.bi_sector = sector; 650 bio->bi_end_io = bio_complete; 651 bio->bi_private = b; 652 653 ptr = (char *)b->data + offset; 654 len = n_sectors << SECTOR_SHIFT; 655 656 do { 657 unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len); 658 if (!bio_add_page(bio, virt_to_page(ptr), this_step, 659 offset_in_page(ptr))) { 660 bio_put(bio); 661 goto dmio; 662 } 663 664 len -= this_step; 665 ptr += this_step; 666 } while (len > 0); 667 668 submit_bio(bio); 669 } 670 671 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) 672 { 673 sector_t sector; 674 675 if (likely(c->sectors_per_block_bits >= 0)) 676 sector = block << c->sectors_per_block_bits; 677 else 678 sector = block * (c->block_size >> SECTOR_SHIFT); 679 sector += c->start; 680 681 return sector; 682 } 683 684 static void submit_io(struct dm_buffer *b, enum req_op op, 685 void (*end_io)(struct dm_buffer *, blk_status_t)) 686 { 687 unsigned n_sectors; 688 sector_t sector; 689 unsigned offset, end; 690 691 b->end_io = end_io; 692 693 sector = block_to_sector(b->c, b->block); 694 695 if (op != REQ_OP_WRITE) { 696 n_sectors = b->c->block_size >> SECTOR_SHIFT; 697 offset = 0; 698 } else { 699 if (b->c->write_callback) 700 b->c->write_callback(b); 701 offset = b->write_start; 702 end = b->write_end; 703 offset &= -DM_BUFIO_WRITE_ALIGN; 704 end += DM_BUFIO_WRITE_ALIGN - 1; 705 end &= -DM_BUFIO_WRITE_ALIGN; 706 if (unlikely(end > b->c->block_size)) 707 end = b->c->block_size; 708 709 sector += offset >> SECTOR_SHIFT; 710 n_sectors = (end - offset) >> SECTOR_SHIFT; 711 } 712 713 if (b->data_mode != DATA_MODE_VMALLOC) 714 use_bio(b, op, sector, n_sectors, offset); 715 else 716 use_dmio(b, op, sector, n_sectors, offset); 717 } 718 719 /*---------------------------------------------------------------- 720 * Writing dirty buffers 721 *--------------------------------------------------------------*/ 722 723 /* 724 * The endio routine for write. 725 * 726 * Set the error, clear B_WRITING bit and wake anyone who was waiting on 727 * it. 728 */ 729 static void write_endio(struct dm_buffer *b, blk_status_t status) 730 { 731 b->write_error = status; 732 if (unlikely(status)) { 733 struct dm_bufio_client *c = b->c; 734 735 (void)cmpxchg(&c->async_write_error, 0, 736 blk_status_to_errno(status)); 737 } 738 739 BUG_ON(!test_bit(B_WRITING, &b->state)); 740 741 smp_mb__before_atomic(); 742 clear_bit(B_WRITING, &b->state); 743 smp_mb__after_atomic(); 744 745 wake_up_bit(&b->state, B_WRITING); 746 } 747 748 /* 749 * Initiate a write on a dirty buffer, but don't wait for it. 750 * 751 * - If the buffer is not dirty, exit. 752 * - If there some previous write going on, wait for it to finish (we can't 753 * have two writes on the same buffer simultaneously). 754 * - Submit our write and don't wait on it. We set B_WRITING indicating 755 * that there is a write in progress. 756 */ 757 static void __write_dirty_buffer(struct dm_buffer *b, 758 struct list_head *write_list) 759 { 760 if (!test_bit(B_DIRTY, &b->state)) 761 return; 762 763 clear_bit(B_DIRTY, &b->state); 764 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); 765 766 b->write_start = b->dirty_start; 767 b->write_end = b->dirty_end; 768 769 if (!write_list) 770 submit_io(b, REQ_OP_WRITE, write_endio); 771 else 772 list_add_tail(&b->write_list, write_list); 773 } 774 775 static void __flush_write_list(struct list_head *write_list) 776 { 777 struct blk_plug plug; 778 blk_start_plug(&plug); 779 while (!list_empty(write_list)) { 780 struct dm_buffer *b = 781 list_entry(write_list->next, struct dm_buffer, write_list); 782 list_del(&b->write_list); 783 submit_io(b, REQ_OP_WRITE, write_endio); 784 cond_resched(); 785 } 786 blk_finish_plug(&plug); 787 } 788 789 /* 790 * Wait until any activity on the buffer finishes. Possibly write the 791 * buffer if it is dirty. When this function finishes, there is no I/O 792 * running on the buffer and the buffer is not dirty. 793 */ 794 static void __make_buffer_clean(struct dm_buffer *b) 795 { 796 BUG_ON(b->hold_count); 797 798 if (!b->state) /* fast case */ 799 return; 800 801 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); 802 __write_dirty_buffer(b, NULL); 803 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); 804 } 805 806 /* 807 * Find some buffer that is not held by anybody, clean it, unlink it and 808 * return it. 809 */ 810 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) 811 { 812 struct dm_buffer *b; 813 814 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { 815 BUG_ON(test_bit(B_WRITING, &b->state)); 816 BUG_ON(test_bit(B_DIRTY, &b->state)); 817 818 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && 819 unlikely(test_bit(B_READING, &b->state))) 820 continue; 821 822 if (!b->hold_count) { 823 __make_buffer_clean(b); 824 __unlink_buffer(b); 825 return b; 826 } 827 cond_resched(); 828 } 829 830 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) 831 return NULL; 832 833 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { 834 BUG_ON(test_bit(B_READING, &b->state)); 835 836 if (!b->hold_count) { 837 __make_buffer_clean(b); 838 __unlink_buffer(b); 839 return b; 840 } 841 cond_resched(); 842 } 843 844 return NULL; 845 } 846 847 /* 848 * Wait until some other threads free some buffer or release hold count on 849 * some buffer. 850 * 851 * This function is entered with c->lock held, drops it and regains it 852 * before exiting. 853 */ 854 static void __wait_for_free_buffer(struct dm_bufio_client *c) 855 { 856 DECLARE_WAITQUEUE(wait, current); 857 858 add_wait_queue(&c->free_buffer_wait, &wait); 859 set_current_state(TASK_UNINTERRUPTIBLE); 860 dm_bufio_unlock(c); 861 862 io_schedule(); 863 864 remove_wait_queue(&c->free_buffer_wait, &wait); 865 866 dm_bufio_lock(c); 867 } 868 869 enum new_flag { 870 NF_FRESH = 0, 871 NF_READ = 1, 872 NF_GET = 2, 873 NF_PREFETCH = 3 874 }; 875 876 /* 877 * Allocate a new buffer. If the allocation is not possible, wait until 878 * some other thread frees a buffer. 879 * 880 * May drop the lock and regain it. 881 */ 882 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) 883 { 884 struct dm_buffer *b; 885 bool tried_noio_alloc = false; 886 887 /* 888 * dm-bufio is resistant to allocation failures (it just keeps 889 * one buffer reserved in cases all the allocations fail). 890 * So set flags to not try too hard: 891 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our 892 * mutex and wait ourselves. 893 * __GFP_NORETRY: don't retry and rather return failure 894 * __GFP_NOMEMALLOC: don't use emergency reserves 895 * __GFP_NOWARN: don't print a warning in case of failure 896 * 897 * For debugging, if we set the cache size to 1, no new buffers will 898 * be allocated. 899 */ 900 while (1) { 901 if (dm_bufio_cache_size_latch != 1) { 902 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 903 if (b) 904 return b; 905 } 906 907 if (nf == NF_PREFETCH) 908 return NULL; 909 910 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) { 911 dm_bufio_unlock(c); 912 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 913 dm_bufio_lock(c); 914 if (b) 915 return b; 916 tried_noio_alloc = true; 917 } 918 919 if (!list_empty(&c->reserved_buffers)) { 920 b = list_entry(c->reserved_buffers.next, 921 struct dm_buffer, lru_list); 922 list_del(&b->lru_list); 923 c->need_reserved_buffers++; 924 925 return b; 926 } 927 928 b = __get_unclaimed_buffer(c); 929 if (b) 930 return b; 931 932 __wait_for_free_buffer(c); 933 } 934 } 935 936 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) 937 { 938 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); 939 940 if (!b) 941 return NULL; 942 943 if (c->alloc_callback) 944 c->alloc_callback(b); 945 946 return b; 947 } 948 949 /* 950 * Free a buffer and wake other threads waiting for free buffers. 951 */ 952 static void __free_buffer_wake(struct dm_buffer *b) 953 { 954 struct dm_bufio_client *c = b->c; 955 956 if (!c->need_reserved_buffers) 957 free_buffer(b); 958 else { 959 list_add(&b->lru_list, &c->reserved_buffers); 960 c->need_reserved_buffers--; 961 } 962 963 wake_up(&c->free_buffer_wait); 964 } 965 966 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, 967 struct list_head *write_list) 968 { 969 struct dm_buffer *b, *tmp; 970 971 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { 972 BUG_ON(test_bit(B_READING, &b->state)); 973 974 if (!test_bit(B_DIRTY, &b->state) && 975 !test_bit(B_WRITING, &b->state)) { 976 __relink_lru(b, LIST_CLEAN); 977 continue; 978 } 979 980 if (no_wait && test_bit(B_WRITING, &b->state)) 981 return; 982 983 __write_dirty_buffer(b, write_list); 984 cond_resched(); 985 } 986 } 987 988 /* 989 * Check if we're over watermark. 990 * If we are over threshold_buffers, start freeing buffers. 991 * If we're over "limit_buffers", block until we get under the limit. 992 */ 993 static void __check_watermark(struct dm_bufio_client *c, 994 struct list_head *write_list) 995 { 996 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO) 997 __write_dirty_buffers_async(c, 1, write_list); 998 } 999 1000 /*---------------------------------------------------------------- 1001 * Getting a buffer 1002 *--------------------------------------------------------------*/ 1003 1004 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, 1005 enum new_flag nf, int *need_submit, 1006 struct list_head *write_list) 1007 { 1008 struct dm_buffer *b, *new_b = NULL; 1009 1010 *need_submit = 0; 1011 1012 b = __find(c, block); 1013 if (b) 1014 goto found_buffer; 1015 1016 if (nf == NF_GET) 1017 return NULL; 1018 1019 new_b = __alloc_buffer_wait(c, nf); 1020 if (!new_b) 1021 return NULL; 1022 1023 /* 1024 * We've had a period where the mutex was unlocked, so need to 1025 * recheck the buffer tree. 1026 */ 1027 b = __find(c, block); 1028 if (b) { 1029 __free_buffer_wake(new_b); 1030 goto found_buffer; 1031 } 1032 1033 __check_watermark(c, write_list); 1034 1035 b = new_b; 1036 b->hold_count = 1; 1037 b->read_error = 0; 1038 b->write_error = 0; 1039 __link_buffer(b, block, LIST_CLEAN); 1040 1041 if (nf == NF_FRESH) { 1042 b->state = 0; 1043 return b; 1044 } 1045 1046 b->state = 1 << B_READING; 1047 *need_submit = 1; 1048 1049 return b; 1050 1051 found_buffer: 1052 if (nf == NF_PREFETCH) 1053 return NULL; 1054 /* 1055 * Note: it is essential that we don't wait for the buffer to be 1056 * read if dm_bufio_get function is used. Both dm_bufio_get and 1057 * dm_bufio_prefetch can be used in the driver request routine. 1058 * If the user called both dm_bufio_prefetch and dm_bufio_get on 1059 * the same buffer, it would deadlock if we waited. 1060 */ 1061 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) 1062 return NULL; 1063 1064 b->hold_count++; 1065 __relink_lru(b, test_bit(B_DIRTY, &b->state) || 1066 test_bit(B_WRITING, &b->state)); 1067 return b; 1068 } 1069 1070 /* 1071 * The endio routine for reading: set the error, clear the bit and wake up 1072 * anyone waiting on the buffer. 1073 */ 1074 static void read_endio(struct dm_buffer *b, blk_status_t status) 1075 { 1076 b->read_error = status; 1077 1078 BUG_ON(!test_bit(B_READING, &b->state)); 1079 1080 smp_mb__before_atomic(); 1081 clear_bit(B_READING, &b->state); 1082 smp_mb__after_atomic(); 1083 1084 wake_up_bit(&b->state, B_READING); 1085 } 1086 1087 /* 1088 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these 1089 * functions is similar except that dm_bufio_new doesn't read the 1090 * buffer from the disk (assuming that the caller overwrites all the data 1091 * and uses dm_bufio_mark_buffer_dirty to write new data back). 1092 */ 1093 static void *new_read(struct dm_bufio_client *c, sector_t block, 1094 enum new_flag nf, struct dm_buffer **bp) 1095 { 1096 int need_submit; 1097 struct dm_buffer *b; 1098 1099 LIST_HEAD(write_list); 1100 1101 dm_bufio_lock(c); 1102 b = __bufio_new(c, block, nf, &need_submit, &write_list); 1103 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 1104 if (b && b->hold_count == 1) 1105 buffer_record_stack(b); 1106 #endif 1107 dm_bufio_unlock(c); 1108 1109 __flush_write_list(&write_list); 1110 1111 if (!b) 1112 return NULL; 1113 1114 if (need_submit) 1115 submit_io(b, REQ_OP_READ, read_endio); 1116 1117 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); 1118 1119 if (b->read_error) { 1120 int error = blk_status_to_errno(b->read_error); 1121 1122 dm_bufio_release(b); 1123 1124 return ERR_PTR(error); 1125 } 1126 1127 *bp = b; 1128 1129 return b->data; 1130 } 1131 1132 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, 1133 struct dm_buffer **bp) 1134 { 1135 return new_read(c, block, NF_GET, bp); 1136 } 1137 EXPORT_SYMBOL_GPL(dm_bufio_get); 1138 1139 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, 1140 struct dm_buffer **bp) 1141 { 1142 BUG_ON(dm_bufio_in_request()); 1143 1144 return new_read(c, block, NF_READ, bp); 1145 } 1146 EXPORT_SYMBOL_GPL(dm_bufio_read); 1147 1148 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, 1149 struct dm_buffer **bp) 1150 { 1151 BUG_ON(dm_bufio_in_request()); 1152 1153 return new_read(c, block, NF_FRESH, bp); 1154 } 1155 EXPORT_SYMBOL_GPL(dm_bufio_new); 1156 1157 void dm_bufio_prefetch(struct dm_bufio_client *c, 1158 sector_t block, unsigned n_blocks) 1159 { 1160 struct blk_plug plug; 1161 1162 LIST_HEAD(write_list); 1163 1164 BUG_ON(dm_bufio_in_request()); 1165 1166 blk_start_plug(&plug); 1167 dm_bufio_lock(c); 1168 1169 for (; n_blocks--; block++) { 1170 int need_submit; 1171 struct dm_buffer *b; 1172 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, 1173 &write_list); 1174 if (unlikely(!list_empty(&write_list))) { 1175 dm_bufio_unlock(c); 1176 blk_finish_plug(&plug); 1177 __flush_write_list(&write_list); 1178 blk_start_plug(&plug); 1179 dm_bufio_lock(c); 1180 } 1181 if (unlikely(b != NULL)) { 1182 dm_bufio_unlock(c); 1183 1184 if (need_submit) 1185 submit_io(b, REQ_OP_READ, read_endio); 1186 dm_bufio_release(b); 1187 1188 cond_resched(); 1189 1190 if (!n_blocks) 1191 goto flush_plug; 1192 dm_bufio_lock(c); 1193 } 1194 } 1195 1196 dm_bufio_unlock(c); 1197 1198 flush_plug: 1199 blk_finish_plug(&plug); 1200 } 1201 EXPORT_SYMBOL_GPL(dm_bufio_prefetch); 1202 1203 void dm_bufio_release(struct dm_buffer *b) 1204 { 1205 struct dm_bufio_client *c = b->c; 1206 1207 dm_bufio_lock(c); 1208 1209 BUG_ON(!b->hold_count); 1210 1211 b->hold_count--; 1212 if (!b->hold_count) { 1213 wake_up(&c->free_buffer_wait); 1214 1215 /* 1216 * If there were errors on the buffer, and the buffer is not 1217 * to be written, free the buffer. There is no point in caching 1218 * invalid buffer. 1219 */ 1220 if ((b->read_error || b->write_error) && 1221 !test_bit(B_READING, &b->state) && 1222 !test_bit(B_WRITING, &b->state) && 1223 !test_bit(B_DIRTY, &b->state)) { 1224 __unlink_buffer(b); 1225 __free_buffer_wake(b); 1226 } 1227 } 1228 1229 dm_bufio_unlock(c); 1230 } 1231 EXPORT_SYMBOL_GPL(dm_bufio_release); 1232 1233 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, 1234 unsigned start, unsigned end) 1235 { 1236 struct dm_bufio_client *c = b->c; 1237 1238 BUG_ON(start >= end); 1239 BUG_ON(end > b->c->block_size); 1240 1241 dm_bufio_lock(c); 1242 1243 BUG_ON(test_bit(B_READING, &b->state)); 1244 1245 if (!test_and_set_bit(B_DIRTY, &b->state)) { 1246 b->dirty_start = start; 1247 b->dirty_end = end; 1248 __relink_lru(b, LIST_DIRTY); 1249 } else { 1250 if (start < b->dirty_start) 1251 b->dirty_start = start; 1252 if (end > b->dirty_end) 1253 b->dirty_end = end; 1254 } 1255 1256 dm_bufio_unlock(c); 1257 } 1258 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); 1259 1260 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) 1261 { 1262 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); 1263 } 1264 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); 1265 1266 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) 1267 { 1268 LIST_HEAD(write_list); 1269 1270 BUG_ON(dm_bufio_in_request()); 1271 1272 dm_bufio_lock(c); 1273 __write_dirty_buffers_async(c, 0, &write_list); 1274 dm_bufio_unlock(c); 1275 __flush_write_list(&write_list); 1276 } 1277 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); 1278 1279 /* 1280 * For performance, it is essential that the buffers are written asynchronously 1281 * and simultaneously (so that the block layer can merge the writes) and then 1282 * waited upon. 1283 * 1284 * Finally, we flush hardware disk cache. 1285 */ 1286 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) 1287 { 1288 int a, f; 1289 unsigned long buffers_processed = 0; 1290 struct dm_buffer *b, *tmp; 1291 1292 LIST_HEAD(write_list); 1293 1294 dm_bufio_lock(c); 1295 __write_dirty_buffers_async(c, 0, &write_list); 1296 dm_bufio_unlock(c); 1297 __flush_write_list(&write_list); 1298 dm_bufio_lock(c); 1299 1300 again: 1301 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { 1302 int dropped_lock = 0; 1303 1304 if (buffers_processed < c->n_buffers[LIST_DIRTY]) 1305 buffers_processed++; 1306 1307 BUG_ON(test_bit(B_READING, &b->state)); 1308 1309 if (test_bit(B_WRITING, &b->state)) { 1310 if (buffers_processed < c->n_buffers[LIST_DIRTY]) { 1311 dropped_lock = 1; 1312 b->hold_count++; 1313 dm_bufio_unlock(c); 1314 wait_on_bit_io(&b->state, B_WRITING, 1315 TASK_UNINTERRUPTIBLE); 1316 dm_bufio_lock(c); 1317 b->hold_count--; 1318 } else 1319 wait_on_bit_io(&b->state, B_WRITING, 1320 TASK_UNINTERRUPTIBLE); 1321 } 1322 1323 if (!test_bit(B_DIRTY, &b->state) && 1324 !test_bit(B_WRITING, &b->state)) 1325 __relink_lru(b, LIST_CLEAN); 1326 1327 cond_resched(); 1328 1329 /* 1330 * If we dropped the lock, the list is no longer consistent, 1331 * so we must restart the search. 1332 * 1333 * In the most common case, the buffer just processed is 1334 * relinked to the clean list, so we won't loop scanning the 1335 * same buffer again and again. 1336 * 1337 * This may livelock if there is another thread simultaneously 1338 * dirtying buffers, so we count the number of buffers walked 1339 * and if it exceeds the total number of buffers, it means that 1340 * someone is doing some writes simultaneously with us. In 1341 * this case, stop, dropping the lock. 1342 */ 1343 if (dropped_lock) 1344 goto again; 1345 } 1346 wake_up(&c->free_buffer_wait); 1347 dm_bufio_unlock(c); 1348 1349 a = xchg(&c->async_write_error, 0); 1350 f = dm_bufio_issue_flush(c); 1351 if (a) 1352 return a; 1353 1354 return f; 1355 } 1356 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); 1357 1358 /* 1359 * Use dm-io to send an empty barrier to flush the device. 1360 */ 1361 int dm_bufio_issue_flush(struct dm_bufio_client *c) 1362 { 1363 struct dm_io_request io_req = { 1364 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, 1365 .mem.type = DM_IO_KMEM, 1366 .mem.ptr.addr = NULL, 1367 .client = c->dm_io, 1368 }; 1369 struct dm_io_region io_reg = { 1370 .bdev = c->bdev, 1371 .sector = 0, 1372 .count = 0, 1373 }; 1374 1375 BUG_ON(dm_bufio_in_request()); 1376 1377 return dm_io(&io_req, 1, &io_reg, NULL); 1378 } 1379 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); 1380 1381 /* 1382 * Use dm-io to send a discard request to flush the device. 1383 */ 1384 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) 1385 { 1386 struct dm_io_request io_req = { 1387 .bi_opf = REQ_OP_DISCARD | REQ_SYNC, 1388 .mem.type = DM_IO_KMEM, 1389 .mem.ptr.addr = NULL, 1390 .client = c->dm_io, 1391 }; 1392 struct dm_io_region io_reg = { 1393 .bdev = c->bdev, 1394 .sector = block_to_sector(c, block), 1395 .count = block_to_sector(c, count), 1396 }; 1397 1398 BUG_ON(dm_bufio_in_request()); 1399 1400 return dm_io(&io_req, 1, &io_reg, NULL); 1401 } 1402 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard); 1403 1404 /* 1405 * We first delete any other buffer that may be at that new location. 1406 * 1407 * Then, we write the buffer to the original location if it was dirty. 1408 * 1409 * Then, if we are the only one who is holding the buffer, relink the buffer 1410 * in the buffer tree for the new location. 1411 * 1412 * If there was someone else holding the buffer, we write it to the new 1413 * location but not relink it, because that other user needs to have the buffer 1414 * at the same place. 1415 */ 1416 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) 1417 { 1418 struct dm_bufio_client *c = b->c; 1419 struct dm_buffer *new; 1420 1421 BUG_ON(dm_bufio_in_request()); 1422 1423 dm_bufio_lock(c); 1424 1425 retry: 1426 new = __find(c, new_block); 1427 if (new) { 1428 if (new->hold_count) { 1429 __wait_for_free_buffer(c); 1430 goto retry; 1431 } 1432 1433 /* 1434 * FIXME: Is there any point waiting for a write that's going 1435 * to be overwritten in a bit? 1436 */ 1437 __make_buffer_clean(new); 1438 __unlink_buffer(new); 1439 __free_buffer_wake(new); 1440 } 1441 1442 BUG_ON(!b->hold_count); 1443 BUG_ON(test_bit(B_READING, &b->state)); 1444 1445 __write_dirty_buffer(b, NULL); 1446 if (b->hold_count == 1) { 1447 wait_on_bit_io(&b->state, B_WRITING, 1448 TASK_UNINTERRUPTIBLE); 1449 set_bit(B_DIRTY, &b->state); 1450 b->dirty_start = 0; 1451 b->dirty_end = c->block_size; 1452 __unlink_buffer(b); 1453 __link_buffer(b, new_block, LIST_DIRTY); 1454 } else { 1455 sector_t old_block; 1456 wait_on_bit_lock_io(&b->state, B_WRITING, 1457 TASK_UNINTERRUPTIBLE); 1458 /* 1459 * Relink buffer to "new_block" so that write_callback 1460 * sees "new_block" as a block number. 1461 * After the write, link the buffer back to old_block. 1462 * All this must be done in bufio lock, so that block number 1463 * change isn't visible to other threads. 1464 */ 1465 old_block = b->block; 1466 __unlink_buffer(b); 1467 __link_buffer(b, new_block, b->list_mode); 1468 submit_io(b, REQ_OP_WRITE, write_endio); 1469 wait_on_bit_io(&b->state, B_WRITING, 1470 TASK_UNINTERRUPTIBLE); 1471 __unlink_buffer(b); 1472 __link_buffer(b, old_block, b->list_mode); 1473 } 1474 1475 dm_bufio_unlock(c); 1476 dm_bufio_release(b); 1477 } 1478 EXPORT_SYMBOL_GPL(dm_bufio_release_move); 1479 1480 static void forget_buffer_locked(struct dm_buffer *b) 1481 { 1482 if (likely(!b->hold_count) && likely(!b->state)) { 1483 __unlink_buffer(b); 1484 __free_buffer_wake(b); 1485 } 1486 } 1487 1488 /* 1489 * Free the given buffer. 1490 * 1491 * This is just a hint, if the buffer is in use or dirty, this function 1492 * does nothing. 1493 */ 1494 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) 1495 { 1496 struct dm_buffer *b; 1497 1498 dm_bufio_lock(c); 1499 1500 b = __find(c, block); 1501 if (b) 1502 forget_buffer_locked(b); 1503 1504 dm_bufio_unlock(c); 1505 } 1506 EXPORT_SYMBOL_GPL(dm_bufio_forget); 1507 1508 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) 1509 { 1510 struct dm_buffer *b; 1511 sector_t end_block = block + n_blocks; 1512 1513 while (block < end_block) { 1514 dm_bufio_lock(c); 1515 1516 b = __find_next(c, block); 1517 if (b) { 1518 block = b->block + 1; 1519 forget_buffer_locked(b); 1520 } 1521 1522 dm_bufio_unlock(c); 1523 1524 if (!b) 1525 break; 1526 } 1527 1528 } 1529 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers); 1530 1531 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n) 1532 { 1533 c->minimum_buffers = n; 1534 } 1535 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers); 1536 1537 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c) 1538 { 1539 return c->block_size; 1540 } 1541 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); 1542 1543 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) 1544 { 1545 sector_t s = bdev_nr_sectors(c->bdev); 1546 if (s >= c->start) 1547 s -= c->start; 1548 else 1549 s = 0; 1550 if (likely(c->sectors_per_block_bits >= 0)) 1551 s >>= c->sectors_per_block_bits; 1552 else 1553 sector_div(s, c->block_size >> SECTOR_SHIFT); 1554 return s; 1555 } 1556 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); 1557 1558 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) 1559 { 1560 return c->dm_io; 1561 } 1562 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); 1563 1564 sector_t dm_bufio_get_block_number(struct dm_buffer *b) 1565 { 1566 return b->block; 1567 } 1568 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number); 1569 1570 void *dm_bufio_get_block_data(struct dm_buffer *b) 1571 { 1572 return b->data; 1573 } 1574 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data); 1575 1576 void *dm_bufio_get_aux_data(struct dm_buffer *b) 1577 { 1578 return b + 1; 1579 } 1580 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data); 1581 1582 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) 1583 { 1584 return b->c; 1585 } 1586 EXPORT_SYMBOL_GPL(dm_bufio_get_client); 1587 1588 static void drop_buffers(struct dm_bufio_client *c) 1589 { 1590 struct dm_buffer *b; 1591 int i; 1592 bool warned = false; 1593 1594 BUG_ON(dm_bufio_in_request()); 1595 1596 /* 1597 * An optimization so that the buffers are not written one-by-one. 1598 */ 1599 dm_bufio_write_dirty_buffers_async(c); 1600 1601 dm_bufio_lock(c); 1602 1603 while ((b = __get_unclaimed_buffer(c))) 1604 __free_buffer_wake(b); 1605 1606 for (i = 0; i < LIST_SIZE; i++) 1607 list_for_each_entry(b, &c->lru[i], lru_list) { 1608 WARN_ON(!warned); 1609 warned = true; 1610 DMERR("leaked buffer %llx, hold count %u, list %d", 1611 (unsigned long long)b->block, b->hold_count, i); 1612 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 1613 stack_trace_print(b->stack_entries, b->stack_len, 1); 1614 /* mark unclaimed to avoid BUG_ON below */ 1615 b->hold_count = 0; 1616 #endif 1617 } 1618 1619 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING 1620 while ((b = __get_unclaimed_buffer(c))) 1621 __free_buffer_wake(b); 1622 #endif 1623 1624 for (i = 0; i < LIST_SIZE; i++) 1625 BUG_ON(!list_empty(&c->lru[i])); 1626 1627 dm_bufio_unlock(c); 1628 } 1629 1630 /* 1631 * We may not be able to evict this buffer if IO pending or the client 1632 * is still using it. Caller is expected to know buffer is too old. 1633 * 1634 * And if GFP_NOFS is used, we must not do any I/O because we hold 1635 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets 1636 * rerouted to different bufio client. 1637 */ 1638 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) 1639 { 1640 if (!(gfp & __GFP_FS) || 1641 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { 1642 if (test_bit(B_READING, &b->state) || 1643 test_bit(B_WRITING, &b->state) || 1644 test_bit(B_DIRTY, &b->state)) 1645 return false; 1646 } 1647 1648 if (b->hold_count) 1649 return false; 1650 1651 __make_buffer_clean(b); 1652 __unlink_buffer(b); 1653 __free_buffer_wake(b); 1654 1655 return true; 1656 } 1657 1658 static unsigned long get_retain_buffers(struct dm_bufio_client *c) 1659 { 1660 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); 1661 if (likely(c->sectors_per_block_bits >= 0)) 1662 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; 1663 else 1664 retain_bytes /= c->block_size; 1665 return retain_bytes; 1666 } 1667 1668 static void __scan(struct dm_bufio_client *c) 1669 { 1670 int l; 1671 struct dm_buffer *b, *tmp; 1672 unsigned long freed = 0; 1673 unsigned long count = c->n_buffers[LIST_CLEAN] + 1674 c->n_buffers[LIST_DIRTY]; 1675 unsigned long retain_target = get_retain_buffers(c); 1676 1677 for (l = 0; l < LIST_SIZE; l++) { 1678 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { 1679 if (count - freed <= retain_target) 1680 atomic_long_set(&c->need_shrink, 0); 1681 if (!atomic_long_read(&c->need_shrink)) 1682 return; 1683 if (__try_evict_buffer(b, GFP_KERNEL)) { 1684 atomic_long_dec(&c->need_shrink); 1685 freed++; 1686 } 1687 cond_resched(); 1688 } 1689 } 1690 } 1691 1692 static void shrink_work(struct work_struct *w) 1693 { 1694 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work); 1695 1696 dm_bufio_lock(c); 1697 __scan(c); 1698 dm_bufio_unlock(c); 1699 } 1700 1701 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 1702 { 1703 struct dm_bufio_client *c; 1704 1705 c = container_of(shrink, struct dm_bufio_client, shrinker); 1706 atomic_long_add(sc->nr_to_scan, &c->need_shrink); 1707 queue_work(dm_bufio_wq, &c->shrink_work); 1708 1709 return sc->nr_to_scan; 1710 } 1711 1712 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 1713 { 1714 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); 1715 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) + 1716 READ_ONCE(c->n_buffers[LIST_DIRTY]); 1717 unsigned long retain_target = get_retain_buffers(c); 1718 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); 1719 1720 if (unlikely(count < retain_target)) 1721 count = 0; 1722 else 1723 count -= retain_target; 1724 1725 if (unlikely(count < queued_for_cleanup)) 1726 count = 0; 1727 else 1728 count -= queued_for_cleanup; 1729 1730 return count; 1731 } 1732 1733 /* 1734 * Create the buffering interface 1735 */ 1736 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size, 1737 unsigned reserved_buffers, unsigned aux_size, 1738 void (*alloc_callback)(struct dm_buffer *), 1739 void (*write_callback)(struct dm_buffer *), 1740 unsigned int flags) 1741 { 1742 int r; 1743 struct dm_bufio_client *c; 1744 unsigned i; 1745 char slab_name[27]; 1746 1747 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { 1748 DMERR("%s: block size not specified or is not multiple of 512b", __func__); 1749 r = -EINVAL; 1750 goto bad_client; 1751 } 1752 1753 c = kzalloc(sizeof(*c), GFP_KERNEL); 1754 if (!c) { 1755 r = -ENOMEM; 1756 goto bad_client; 1757 } 1758 c->buffer_tree = RB_ROOT; 1759 1760 c->bdev = bdev; 1761 c->block_size = block_size; 1762 if (is_power_of_2(block_size)) 1763 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; 1764 else 1765 c->sectors_per_block_bits = -1; 1766 1767 c->alloc_callback = alloc_callback; 1768 c->write_callback = write_callback; 1769 1770 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) { 1771 c->no_sleep = true; 1772 static_branch_inc(&no_sleep_enabled); 1773 } 1774 1775 for (i = 0; i < LIST_SIZE; i++) { 1776 INIT_LIST_HEAD(&c->lru[i]); 1777 c->n_buffers[i] = 0; 1778 } 1779 1780 mutex_init(&c->lock); 1781 spin_lock_init(&c->spinlock); 1782 INIT_LIST_HEAD(&c->reserved_buffers); 1783 c->need_reserved_buffers = reserved_buffers; 1784 1785 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS); 1786 1787 init_waitqueue_head(&c->free_buffer_wait); 1788 c->async_write_error = 0; 1789 1790 c->dm_io = dm_io_client_create(); 1791 if (IS_ERR(c->dm_io)) { 1792 r = PTR_ERR(c->dm_io); 1793 goto bad_dm_io; 1794 } 1795 1796 if (block_size <= KMALLOC_MAX_SIZE && 1797 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) { 1798 unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE); 1799 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size); 1800 c->slab_cache = kmem_cache_create(slab_name, block_size, align, 1801 SLAB_RECLAIM_ACCOUNT, NULL); 1802 if (!c->slab_cache) { 1803 r = -ENOMEM; 1804 goto bad; 1805 } 1806 } 1807 if (aux_size) 1808 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size); 1809 else 1810 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer"); 1811 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, 1812 0, SLAB_RECLAIM_ACCOUNT, NULL); 1813 if (!c->slab_buffer) { 1814 r = -ENOMEM; 1815 goto bad; 1816 } 1817 1818 while (c->need_reserved_buffers) { 1819 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); 1820 1821 if (!b) { 1822 r = -ENOMEM; 1823 goto bad; 1824 } 1825 __free_buffer_wake(b); 1826 } 1827 1828 INIT_WORK(&c->shrink_work, shrink_work); 1829 atomic_long_set(&c->need_shrink, 0); 1830 1831 c->shrinker.count_objects = dm_bufio_shrink_count; 1832 c->shrinker.scan_objects = dm_bufio_shrink_scan; 1833 c->shrinker.seeks = 1; 1834 c->shrinker.batch = 0; 1835 r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name, 1836 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); 1837 if (r) 1838 goto bad; 1839 1840 mutex_lock(&dm_bufio_clients_lock); 1841 dm_bufio_client_count++; 1842 list_add(&c->client_list, &dm_bufio_all_clients); 1843 __cache_size_refresh(); 1844 mutex_unlock(&dm_bufio_clients_lock); 1845 1846 return c; 1847 1848 bad: 1849 while (!list_empty(&c->reserved_buffers)) { 1850 struct dm_buffer *b = list_entry(c->reserved_buffers.next, 1851 struct dm_buffer, lru_list); 1852 list_del(&b->lru_list); 1853 free_buffer(b); 1854 } 1855 kmem_cache_destroy(c->slab_cache); 1856 kmem_cache_destroy(c->slab_buffer); 1857 dm_io_client_destroy(c->dm_io); 1858 bad_dm_io: 1859 mutex_destroy(&c->lock); 1860 kfree(c); 1861 bad_client: 1862 return ERR_PTR(r); 1863 } 1864 EXPORT_SYMBOL_GPL(dm_bufio_client_create); 1865 1866 /* 1867 * Free the buffering interface. 1868 * It is required that there are no references on any buffers. 1869 */ 1870 void dm_bufio_client_destroy(struct dm_bufio_client *c) 1871 { 1872 unsigned i; 1873 1874 drop_buffers(c); 1875 1876 unregister_shrinker(&c->shrinker); 1877 flush_work(&c->shrink_work); 1878 1879 mutex_lock(&dm_bufio_clients_lock); 1880 1881 list_del(&c->client_list); 1882 dm_bufio_client_count--; 1883 __cache_size_refresh(); 1884 1885 mutex_unlock(&dm_bufio_clients_lock); 1886 1887 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree)); 1888 BUG_ON(c->need_reserved_buffers); 1889 1890 while (!list_empty(&c->reserved_buffers)) { 1891 struct dm_buffer *b = list_entry(c->reserved_buffers.next, 1892 struct dm_buffer, lru_list); 1893 list_del(&b->lru_list); 1894 free_buffer(b); 1895 } 1896 1897 for (i = 0; i < LIST_SIZE; i++) 1898 if (c->n_buffers[i]) 1899 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]); 1900 1901 for (i = 0; i < LIST_SIZE; i++) 1902 BUG_ON(c->n_buffers[i]); 1903 1904 kmem_cache_destroy(c->slab_cache); 1905 kmem_cache_destroy(c->slab_buffer); 1906 dm_io_client_destroy(c->dm_io); 1907 mutex_destroy(&c->lock); 1908 if (c->no_sleep) 1909 static_branch_dec(&no_sleep_enabled); 1910 kfree(c); 1911 } 1912 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); 1913 1914 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) 1915 { 1916 c->start = start; 1917 } 1918 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); 1919 1920 static unsigned get_max_age_hz(void) 1921 { 1922 unsigned max_age = READ_ONCE(dm_bufio_max_age); 1923 1924 if (max_age > UINT_MAX / HZ) 1925 max_age = UINT_MAX / HZ; 1926 1927 return max_age * HZ; 1928 } 1929 1930 static bool older_than(struct dm_buffer *b, unsigned long age_hz) 1931 { 1932 return time_after_eq(jiffies, b->last_accessed + age_hz); 1933 } 1934 1935 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) 1936 { 1937 struct dm_buffer *b, *tmp; 1938 unsigned long retain_target = get_retain_buffers(c); 1939 unsigned long count; 1940 LIST_HEAD(write_list); 1941 1942 dm_bufio_lock(c); 1943 1944 __check_watermark(c, &write_list); 1945 if (unlikely(!list_empty(&write_list))) { 1946 dm_bufio_unlock(c); 1947 __flush_write_list(&write_list); 1948 dm_bufio_lock(c); 1949 } 1950 1951 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; 1952 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { 1953 if (count <= retain_target) 1954 break; 1955 1956 if (!older_than(b, age_hz)) 1957 break; 1958 1959 if (__try_evict_buffer(b, 0)) 1960 count--; 1961 1962 cond_resched(); 1963 } 1964 1965 dm_bufio_unlock(c); 1966 } 1967 1968 static void do_global_cleanup(struct work_struct *w) 1969 { 1970 struct dm_bufio_client *locked_client = NULL; 1971 struct dm_bufio_client *current_client; 1972 struct dm_buffer *b; 1973 unsigned spinlock_hold_count; 1974 unsigned long threshold = dm_bufio_cache_size - 1975 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; 1976 unsigned long loops = global_num * 2; 1977 1978 mutex_lock(&dm_bufio_clients_lock); 1979 1980 while (1) { 1981 cond_resched(); 1982 1983 spin_lock(&global_spinlock); 1984 if (unlikely(dm_bufio_current_allocated <= threshold)) 1985 break; 1986 1987 spinlock_hold_count = 0; 1988 get_next: 1989 if (!loops--) 1990 break; 1991 if (unlikely(list_empty(&global_queue))) 1992 break; 1993 b = list_entry(global_queue.prev, struct dm_buffer, global_list); 1994 1995 if (b->accessed) { 1996 b->accessed = 0; 1997 list_move(&b->global_list, &global_queue); 1998 if (likely(++spinlock_hold_count < 16)) 1999 goto get_next; 2000 spin_unlock(&global_spinlock); 2001 continue; 2002 } 2003 2004 current_client = b->c; 2005 if (unlikely(current_client != locked_client)) { 2006 if (locked_client) 2007 dm_bufio_unlock(locked_client); 2008 2009 if (!dm_bufio_trylock(current_client)) { 2010 spin_unlock(&global_spinlock); 2011 dm_bufio_lock(current_client); 2012 locked_client = current_client; 2013 continue; 2014 } 2015 2016 locked_client = current_client; 2017 } 2018 2019 spin_unlock(&global_spinlock); 2020 2021 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) { 2022 spin_lock(&global_spinlock); 2023 list_move(&b->global_list, &global_queue); 2024 spin_unlock(&global_spinlock); 2025 } 2026 } 2027 2028 spin_unlock(&global_spinlock); 2029 2030 if (locked_client) 2031 dm_bufio_unlock(locked_client); 2032 2033 mutex_unlock(&dm_bufio_clients_lock); 2034 } 2035 2036 static void cleanup_old_buffers(void) 2037 { 2038 unsigned long max_age_hz = get_max_age_hz(); 2039 struct dm_bufio_client *c; 2040 2041 mutex_lock(&dm_bufio_clients_lock); 2042 2043 __cache_size_refresh(); 2044 2045 list_for_each_entry(c, &dm_bufio_all_clients, client_list) 2046 __evict_old_buffers(c, max_age_hz); 2047 2048 mutex_unlock(&dm_bufio_clients_lock); 2049 } 2050 2051 static void work_fn(struct work_struct *w) 2052 { 2053 cleanup_old_buffers(); 2054 2055 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, 2056 DM_BUFIO_WORK_TIMER_SECS * HZ); 2057 } 2058 2059 /*---------------------------------------------------------------- 2060 * Module setup 2061 *--------------------------------------------------------------*/ 2062 2063 /* 2064 * This is called only once for the whole dm_bufio module. 2065 * It initializes memory limit. 2066 */ 2067 static int __init dm_bufio_init(void) 2068 { 2069 __u64 mem; 2070 2071 dm_bufio_allocated_kmem_cache = 0; 2072 dm_bufio_allocated_get_free_pages = 0; 2073 dm_bufio_allocated_vmalloc = 0; 2074 dm_bufio_current_allocated = 0; 2075 2076 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), 2077 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; 2078 2079 if (mem > ULONG_MAX) 2080 mem = ULONG_MAX; 2081 2082 #ifdef CONFIG_MMU 2083 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) 2084 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); 2085 #endif 2086 2087 dm_bufio_default_cache_size = mem; 2088 2089 mutex_lock(&dm_bufio_clients_lock); 2090 __cache_size_refresh(); 2091 mutex_unlock(&dm_bufio_clients_lock); 2092 2093 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0); 2094 if (!dm_bufio_wq) 2095 return -ENOMEM; 2096 2097 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn); 2098 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup); 2099 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, 2100 DM_BUFIO_WORK_TIMER_SECS * HZ); 2101 2102 return 0; 2103 } 2104 2105 /* 2106 * This is called once when unloading the dm_bufio module. 2107 */ 2108 static void __exit dm_bufio_exit(void) 2109 { 2110 int bug = 0; 2111 2112 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work); 2113 destroy_workqueue(dm_bufio_wq); 2114 2115 if (dm_bufio_client_count) { 2116 DMCRIT("%s: dm_bufio_client_count leaked: %d", 2117 __func__, dm_bufio_client_count); 2118 bug = 1; 2119 } 2120 2121 if (dm_bufio_current_allocated) { 2122 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu", 2123 __func__, dm_bufio_current_allocated); 2124 bug = 1; 2125 } 2126 2127 if (dm_bufio_allocated_get_free_pages) { 2128 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu", 2129 __func__, dm_bufio_allocated_get_free_pages); 2130 bug = 1; 2131 } 2132 2133 if (dm_bufio_allocated_vmalloc) { 2134 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu", 2135 __func__, dm_bufio_allocated_vmalloc); 2136 bug = 1; 2137 } 2138 2139 BUG_ON(bug); 2140 } 2141 2142 module_init(dm_bufio_init) 2143 module_exit(dm_bufio_exit) 2144 2145 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR); 2146 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); 2147 2148 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); 2149 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); 2150 2151 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR); 2152 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); 2153 2154 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); 2155 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory"); 2156 2157 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO); 2158 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc"); 2159 2160 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO); 2161 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages"); 2162 2163 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO); 2164 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc"); 2165 2166 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO); 2167 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache"); 2168 2169 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>"); 2170 MODULE_DESCRIPTION(DM_NAME " buffered I/O library"); 2171 MODULE_LICENSE("GPL"); 2172