1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018 Red Hat. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include <linux/device-mapper.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/vmalloc.h> 12 #include <linux/kthread.h> 13 #include <linux/dm-io.h> 14 #include <linux/dm-kcopyd.h> 15 #include <linux/dax.h> 16 #include <linux/pfn_t.h> 17 #include <linux/libnvdimm.h> 18 #include <linux/delay.h> 19 #include "dm-io-tracker.h" 20 21 #define DM_MSG_PREFIX "writecache" 22 23 #define HIGH_WATERMARK 50 24 #define LOW_WATERMARK 45 25 #define MAX_WRITEBACK_JOBS 0 26 #define ENDIO_LATENCY 16 27 #define WRITEBACK_LATENCY 64 28 #define AUTOCOMMIT_BLOCKS_SSD 65536 29 #define AUTOCOMMIT_BLOCKS_PMEM 64 30 #define AUTOCOMMIT_MSEC 1000 31 #define MAX_AGE_DIV 16 32 #define MAX_AGE_UNSPECIFIED -1UL 33 #define PAUSE_WRITEBACK (HZ * 3) 34 35 #define BITMAP_GRANULARITY 65536 36 #if BITMAP_GRANULARITY < PAGE_SIZE 37 #undef BITMAP_GRANULARITY 38 #define BITMAP_GRANULARITY PAGE_SIZE 39 #endif 40 41 #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER) 42 #define DM_WRITECACHE_HAS_PMEM 43 #endif 44 45 #ifdef DM_WRITECACHE_HAS_PMEM 46 #define pmem_assign(dest, src) \ 47 do { \ 48 typeof(dest) uniq = (src); \ 49 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \ 50 } while (0) 51 #else 52 #define pmem_assign(dest, src) ((dest) = (src)) 53 #endif 54 55 #if IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && defined(DM_WRITECACHE_HAS_PMEM) 56 #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS 57 #endif 58 59 #define MEMORY_SUPERBLOCK_MAGIC 0x23489321 60 #define MEMORY_SUPERBLOCK_VERSION 1 61 62 struct wc_memory_entry { 63 __le64 original_sector; 64 __le64 seq_count; 65 }; 66 67 struct wc_memory_superblock { 68 union { 69 struct { 70 __le32 magic; 71 __le32 version; 72 __le32 block_size; 73 __le32 pad; 74 __le64 n_blocks; 75 __le64 seq_count; 76 }; 77 __le64 padding[8]; 78 }; 79 struct wc_memory_entry entries[]; 80 }; 81 82 struct wc_entry { 83 struct rb_node rb_node; 84 struct list_head lru; 85 unsigned short wc_list_contiguous; 86 bool write_in_progress 87 #if BITS_PER_LONG == 64 88 :1 89 #endif 90 ; 91 unsigned long index 92 #if BITS_PER_LONG == 64 93 :47 94 #endif 95 ; 96 unsigned long age; 97 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS 98 uint64_t original_sector; 99 uint64_t seq_count; 100 #endif 101 }; 102 103 #ifdef DM_WRITECACHE_HAS_PMEM 104 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode) 105 #define WC_MODE_FUA(wc) ((wc)->writeback_fua) 106 #else 107 #define WC_MODE_PMEM(wc) false 108 #define WC_MODE_FUA(wc) false 109 #endif 110 #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc)) 111 112 struct dm_writecache { 113 struct mutex lock; 114 struct list_head lru; 115 union { 116 struct list_head freelist; 117 struct { 118 struct rb_root freetree; 119 struct wc_entry *current_free; 120 }; 121 }; 122 struct rb_root tree; 123 124 size_t freelist_size; 125 size_t writeback_size; 126 size_t freelist_high_watermark; 127 size_t freelist_low_watermark; 128 unsigned long max_age; 129 unsigned long pause; 130 131 unsigned uncommitted_blocks; 132 unsigned autocommit_blocks; 133 unsigned max_writeback_jobs; 134 135 int error; 136 137 unsigned long autocommit_jiffies; 138 struct timer_list autocommit_timer; 139 struct wait_queue_head freelist_wait; 140 141 struct timer_list max_age_timer; 142 143 atomic_t bio_in_progress[2]; 144 struct wait_queue_head bio_in_progress_wait[2]; 145 146 struct dm_target *ti; 147 struct dm_dev *dev; 148 struct dm_dev *ssd_dev; 149 sector_t start_sector; 150 void *memory_map; 151 uint64_t memory_map_size; 152 size_t metadata_sectors; 153 size_t n_blocks; 154 uint64_t seq_count; 155 sector_t data_device_sectors; 156 void *block_start; 157 struct wc_entry *entries; 158 unsigned block_size; 159 unsigned char block_size_bits; 160 161 bool pmem_mode:1; 162 bool writeback_fua:1; 163 164 bool overwrote_committed:1; 165 bool memory_vmapped:1; 166 167 bool start_sector_set:1; 168 bool high_wm_percent_set:1; 169 bool low_wm_percent_set:1; 170 bool max_writeback_jobs_set:1; 171 bool autocommit_blocks_set:1; 172 bool autocommit_time_set:1; 173 bool max_age_set:1; 174 bool writeback_fua_set:1; 175 bool flush_on_suspend:1; 176 bool cleaner:1; 177 bool cleaner_set:1; 178 bool metadata_only:1; 179 bool pause_set:1; 180 181 unsigned high_wm_percent_value; 182 unsigned low_wm_percent_value; 183 unsigned autocommit_time_value; 184 unsigned max_age_value; 185 unsigned pause_value; 186 187 unsigned writeback_all; 188 struct workqueue_struct *writeback_wq; 189 struct work_struct writeback_work; 190 struct work_struct flush_work; 191 192 struct dm_io_tracker iot; 193 194 struct dm_io_client *dm_io; 195 196 raw_spinlock_t endio_list_lock; 197 struct list_head endio_list; 198 struct task_struct *endio_thread; 199 200 struct task_struct *flush_thread; 201 struct bio_list flush_list; 202 203 struct dm_kcopyd_client *dm_kcopyd; 204 unsigned long *dirty_bitmap; 205 unsigned dirty_bitmap_size; 206 207 struct bio_set bio_set; 208 mempool_t copy_pool; 209 }; 210 211 #define WB_LIST_INLINE 16 212 213 struct writeback_struct { 214 struct list_head endio_entry; 215 struct dm_writecache *wc; 216 struct wc_entry **wc_list; 217 unsigned wc_list_n; 218 struct wc_entry *wc_list_inline[WB_LIST_INLINE]; 219 struct bio bio; 220 }; 221 222 struct copy_struct { 223 struct list_head endio_entry; 224 struct dm_writecache *wc; 225 struct wc_entry *e; 226 unsigned n_entries; 227 int error; 228 }; 229 230 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle, 231 "A percentage of time allocated for data copying"); 232 233 static void wc_lock(struct dm_writecache *wc) 234 { 235 mutex_lock(&wc->lock); 236 } 237 238 static void wc_unlock(struct dm_writecache *wc) 239 { 240 mutex_unlock(&wc->lock); 241 } 242 243 #ifdef DM_WRITECACHE_HAS_PMEM 244 static int persistent_memory_claim(struct dm_writecache *wc) 245 { 246 int r; 247 loff_t s; 248 long p, da; 249 pfn_t pfn; 250 int id; 251 struct page **pages; 252 sector_t offset; 253 254 wc->memory_vmapped = false; 255 256 s = wc->memory_map_size; 257 p = s >> PAGE_SHIFT; 258 if (!p) { 259 r = -EINVAL; 260 goto err1; 261 } 262 if (p != s >> PAGE_SHIFT) { 263 r = -EOVERFLOW; 264 goto err1; 265 } 266 267 offset = get_start_sect(wc->ssd_dev->bdev); 268 if (offset & (PAGE_SIZE / 512 - 1)) { 269 r = -EINVAL; 270 goto err1; 271 } 272 offset >>= PAGE_SHIFT - 9; 273 274 id = dax_read_lock(); 275 276 da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); 277 if (da < 0) { 278 wc->memory_map = NULL; 279 r = da; 280 goto err2; 281 } 282 if (!pfn_t_has_page(pfn)) { 283 wc->memory_map = NULL; 284 r = -EOPNOTSUPP; 285 goto err2; 286 } 287 if (da != p) { 288 long i; 289 wc->memory_map = NULL; 290 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL); 291 if (!pages) { 292 r = -ENOMEM; 293 goto err2; 294 } 295 i = 0; 296 do { 297 long daa; 298 daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, 299 NULL, &pfn); 300 if (daa <= 0) { 301 r = daa ? daa : -EINVAL; 302 goto err3; 303 } 304 if (!pfn_t_has_page(pfn)) { 305 r = -EOPNOTSUPP; 306 goto err3; 307 } 308 while (daa-- && i < p) { 309 pages[i++] = pfn_t_to_page(pfn); 310 pfn.val++; 311 if (!(i & 15)) 312 cond_resched(); 313 } 314 } while (i < p); 315 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); 316 if (!wc->memory_map) { 317 r = -ENOMEM; 318 goto err3; 319 } 320 kvfree(pages); 321 wc->memory_vmapped = true; 322 } 323 324 dax_read_unlock(id); 325 326 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT; 327 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT; 328 329 return 0; 330 err3: 331 kvfree(pages); 332 err2: 333 dax_read_unlock(id); 334 err1: 335 return r; 336 } 337 #else 338 static int persistent_memory_claim(struct dm_writecache *wc) 339 { 340 return -EOPNOTSUPP; 341 } 342 #endif 343 344 static void persistent_memory_release(struct dm_writecache *wc) 345 { 346 if (wc->memory_vmapped) 347 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT)); 348 } 349 350 static struct page *persistent_memory_page(void *addr) 351 { 352 if (is_vmalloc_addr(addr)) 353 return vmalloc_to_page(addr); 354 else 355 return virt_to_page(addr); 356 } 357 358 static unsigned persistent_memory_page_offset(void *addr) 359 { 360 return (unsigned long)addr & (PAGE_SIZE - 1); 361 } 362 363 static void persistent_memory_flush_cache(void *ptr, size_t size) 364 { 365 if (is_vmalloc_addr(ptr)) 366 flush_kernel_vmap_range(ptr, size); 367 } 368 369 static void persistent_memory_invalidate_cache(void *ptr, size_t size) 370 { 371 if (is_vmalloc_addr(ptr)) 372 invalidate_kernel_vmap_range(ptr, size); 373 } 374 375 static struct wc_memory_superblock *sb(struct dm_writecache *wc) 376 { 377 return wc->memory_map; 378 } 379 380 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e) 381 { 382 return &sb(wc)->entries[e->index]; 383 } 384 385 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e) 386 { 387 return (char *)wc->block_start + (e->index << wc->block_size_bits); 388 } 389 390 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) 391 { 392 return wc->start_sector + wc->metadata_sectors + 393 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); 394 } 395 396 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e) 397 { 398 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS 399 return e->original_sector; 400 #else 401 return le64_to_cpu(memory_entry(wc, e)->original_sector); 402 #endif 403 } 404 405 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e) 406 { 407 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS 408 return e->seq_count; 409 #else 410 return le64_to_cpu(memory_entry(wc, e)->seq_count); 411 #endif 412 } 413 414 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e) 415 { 416 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS 417 e->seq_count = -1; 418 #endif 419 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1)); 420 } 421 422 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e, 423 uint64_t original_sector, uint64_t seq_count) 424 { 425 struct wc_memory_entry me; 426 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS 427 e->original_sector = original_sector; 428 e->seq_count = seq_count; 429 #endif 430 me.original_sector = cpu_to_le64(original_sector); 431 me.seq_count = cpu_to_le64(seq_count); 432 pmem_assign(*memory_entry(wc, e), me); 433 } 434 435 #define writecache_error(wc, err, msg, arg...) \ 436 do { \ 437 if (!cmpxchg(&(wc)->error, 0, err)) \ 438 DMERR(msg, ##arg); \ 439 wake_up(&(wc)->freelist_wait); \ 440 } while (0) 441 442 #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error))) 443 444 static void writecache_flush_all_metadata(struct dm_writecache *wc) 445 { 446 if (!WC_MODE_PMEM(wc)) 447 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size); 448 } 449 450 static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size) 451 { 452 if (!WC_MODE_PMEM(wc)) 453 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY, 454 wc->dirty_bitmap); 455 } 456 457 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev); 458 459 struct io_notify { 460 struct dm_writecache *wc; 461 struct completion c; 462 atomic_t count; 463 }; 464 465 static void writecache_notify_io(unsigned long error, void *context) 466 { 467 struct io_notify *endio = context; 468 469 if (unlikely(error != 0)) 470 writecache_error(endio->wc, -EIO, "error writing metadata"); 471 BUG_ON(atomic_read(&endio->count) <= 0); 472 if (atomic_dec_and_test(&endio->count)) 473 complete(&endio->c); 474 } 475 476 static void writecache_wait_for_ios(struct dm_writecache *wc, int direction) 477 { 478 wait_event(wc->bio_in_progress_wait[direction], 479 !atomic_read(&wc->bio_in_progress[direction])); 480 } 481 482 static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) 483 { 484 struct dm_io_region region; 485 struct dm_io_request req; 486 struct io_notify endio = { 487 wc, 488 COMPLETION_INITIALIZER_ONSTACK(endio.c), 489 ATOMIC_INIT(1), 490 }; 491 unsigned bitmap_bits = wc->dirty_bitmap_size * 8; 492 unsigned i = 0; 493 494 while (1) { 495 unsigned j; 496 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i); 497 if (unlikely(i == bitmap_bits)) 498 break; 499 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i); 500 501 region.bdev = wc->ssd_dev->bdev; 502 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT); 503 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT); 504 505 if (unlikely(region.sector >= wc->metadata_sectors)) 506 break; 507 if (unlikely(region.sector + region.count > wc->metadata_sectors)) 508 region.count = wc->metadata_sectors - region.sector; 509 510 region.sector += wc->start_sector; 511 atomic_inc(&endio.count); 512 req.bi_op = REQ_OP_WRITE; 513 req.bi_op_flags = REQ_SYNC; 514 req.mem.type = DM_IO_VMA; 515 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY; 516 req.client = wc->dm_io; 517 req.notify.fn = writecache_notify_io; 518 req.notify.context = &endio; 519 520 /* writing via async dm-io (implied by notify.fn above) won't return an error */ 521 (void) dm_io(&req, 1, ®ion, NULL); 522 i = j; 523 } 524 525 writecache_notify_io(0, &endio); 526 wait_for_completion_io(&endio.c); 527 528 if (wait_for_ios) 529 writecache_wait_for_ios(wc, WRITE); 530 531 writecache_disk_flush(wc, wc->ssd_dev); 532 533 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size); 534 } 535 536 static void ssd_commit_superblock(struct dm_writecache *wc) 537 { 538 int r; 539 struct dm_io_region region; 540 struct dm_io_request req; 541 542 region.bdev = wc->ssd_dev->bdev; 543 region.sector = 0; 544 region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT; 545 546 if (unlikely(region.sector + region.count > wc->metadata_sectors)) 547 region.count = wc->metadata_sectors - region.sector; 548 549 region.sector += wc->start_sector; 550 551 req.bi_op = REQ_OP_WRITE; 552 req.bi_op_flags = REQ_SYNC | REQ_FUA; 553 req.mem.type = DM_IO_VMA; 554 req.mem.ptr.vma = (char *)wc->memory_map; 555 req.client = wc->dm_io; 556 req.notify.fn = NULL; 557 req.notify.context = NULL; 558 559 r = dm_io(&req, 1, ®ion, NULL); 560 if (unlikely(r)) 561 writecache_error(wc, r, "error writing superblock"); 562 } 563 564 static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) 565 { 566 if (WC_MODE_PMEM(wc)) 567 pmem_wmb(); 568 else 569 ssd_commit_flushed(wc, wait_for_ios); 570 } 571 572 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) 573 { 574 int r; 575 struct dm_io_region region; 576 struct dm_io_request req; 577 578 region.bdev = dev->bdev; 579 region.sector = 0; 580 region.count = 0; 581 req.bi_op = REQ_OP_WRITE; 582 req.bi_op_flags = REQ_PREFLUSH; 583 req.mem.type = DM_IO_KMEM; 584 req.mem.ptr.addr = NULL; 585 req.client = wc->dm_io; 586 req.notify.fn = NULL; 587 588 r = dm_io(&req, 1, ®ion, NULL); 589 if (unlikely(r)) 590 writecache_error(wc, r, "error flushing metadata: %d", r); 591 } 592 593 #define WFE_RETURN_FOLLOWING 1 594 #define WFE_LOWEST_SEQ 2 595 596 static struct wc_entry *writecache_find_entry(struct dm_writecache *wc, 597 uint64_t block, int flags) 598 { 599 struct wc_entry *e; 600 struct rb_node *node = wc->tree.rb_node; 601 602 if (unlikely(!node)) 603 return NULL; 604 605 while (1) { 606 e = container_of(node, struct wc_entry, rb_node); 607 if (read_original_sector(wc, e) == block) 608 break; 609 610 node = (read_original_sector(wc, e) >= block ? 611 e->rb_node.rb_left : e->rb_node.rb_right); 612 if (unlikely(!node)) { 613 if (!(flags & WFE_RETURN_FOLLOWING)) 614 return NULL; 615 if (read_original_sector(wc, e) >= block) { 616 return e; 617 } else { 618 node = rb_next(&e->rb_node); 619 if (unlikely(!node)) 620 return NULL; 621 e = container_of(node, struct wc_entry, rb_node); 622 return e; 623 } 624 } 625 } 626 627 while (1) { 628 struct wc_entry *e2; 629 if (flags & WFE_LOWEST_SEQ) 630 node = rb_prev(&e->rb_node); 631 else 632 node = rb_next(&e->rb_node); 633 if (unlikely(!node)) 634 return e; 635 e2 = container_of(node, struct wc_entry, rb_node); 636 if (read_original_sector(wc, e2) != block) 637 return e; 638 e = e2; 639 } 640 } 641 642 static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins) 643 { 644 struct wc_entry *e; 645 struct rb_node **node = &wc->tree.rb_node, *parent = NULL; 646 647 while (*node) { 648 e = container_of(*node, struct wc_entry, rb_node); 649 parent = &e->rb_node; 650 if (read_original_sector(wc, e) > read_original_sector(wc, ins)) 651 node = &parent->rb_left; 652 else 653 node = &parent->rb_right; 654 } 655 rb_link_node(&ins->rb_node, parent, node); 656 rb_insert_color(&ins->rb_node, &wc->tree); 657 list_add(&ins->lru, &wc->lru); 658 ins->age = jiffies; 659 } 660 661 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e) 662 { 663 list_del(&e->lru); 664 rb_erase(&e->rb_node, &wc->tree); 665 } 666 667 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e) 668 { 669 if (WC_MODE_SORT_FREELIST(wc)) { 670 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL; 671 if (unlikely(!*node)) 672 wc->current_free = e; 673 while (*node) { 674 parent = *node; 675 if (&e->rb_node < *node) 676 node = &parent->rb_left; 677 else 678 node = &parent->rb_right; 679 } 680 rb_link_node(&e->rb_node, parent, node); 681 rb_insert_color(&e->rb_node, &wc->freetree); 682 } else { 683 list_add_tail(&e->lru, &wc->freelist); 684 } 685 wc->freelist_size++; 686 } 687 688 static inline void writecache_verify_watermark(struct dm_writecache *wc) 689 { 690 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) 691 queue_work(wc->writeback_wq, &wc->writeback_work); 692 } 693 694 static void writecache_max_age_timer(struct timer_list *t) 695 { 696 struct dm_writecache *wc = from_timer(wc, t, max_age_timer); 697 698 if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) { 699 queue_work(wc->writeback_wq, &wc->writeback_work); 700 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV); 701 } 702 } 703 704 static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector) 705 { 706 struct wc_entry *e; 707 708 if (WC_MODE_SORT_FREELIST(wc)) { 709 struct rb_node *next; 710 if (unlikely(!wc->current_free)) 711 return NULL; 712 e = wc->current_free; 713 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector)) 714 return NULL; 715 next = rb_next(&e->rb_node); 716 rb_erase(&e->rb_node, &wc->freetree); 717 if (unlikely(!next)) 718 next = rb_first(&wc->freetree); 719 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL; 720 } else { 721 if (unlikely(list_empty(&wc->freelist))) 722 return NULL; 723 e = container_of(wc->freelist.next, struct wc_entry, lru); 724 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector)) 725 return NULL; 726 list_del(&e->lru); 727 } 728 wc->freelist_size--; 729 730 writecache_verify_watermark(wc); 731 732 return e; 733 } 734 735 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e) 736 { 737 writecache_unlink(wc, e); 738 writecache_add_to_freelist(wc, e); 739 clear_seq_count(wc, e); 740 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); 741 if (unlikely(waitqueue_active(&wc->freelist_wait))) 742 wake_up(&wc->freelist_wait); 743 } 744 745 static void writecache_wait_on_freelist(struct dm_writecache *wc) 746 { 747 DEFINE_WAIT(wait); 748 749 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE); 750 wc_unlock(wc); 751 io_schedule(); 752 finish_wait(&wc->freelist_wait, &wait); 753 wc_lock(wc); 754 } 755 756 static void writecache_poison_lists(struct dm_writecache *wc) 757 { 758 /* 759 * Catch incorrect access to these values while the device is suspended. 760 */ 761 memset(&wc->tree, -1, sizeof wc->tree); 762 wc->lru.next = LIST_POISON1; 763 wc->lru.prev = LIST_POISON2; 764 wc->freelist.next = LIST_POISON1; 765 wc->freelist.prev = LIST_POISON2; 766 } 767 768 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e) 769 { 770 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); 771 if (WC_MODE_PMEM(wc)) 772 writecache_flush_region(wc, memory_data(wc, e), wc->block_size); 773 } 774 775 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e) 776 { 777 return read_seq_count(wc, e) < wc->seq_count; 778 } 779 780 static void writecache_flush(struct dm_writecache *wc) 781 { 782 struct wc_entry *e, *e2; 783 bool need_flush_after_free; 784 785 wc->uncommitted_blocks = 0; 786 del_timer(&wc->autocommit_timer); 787 788 if (list_empty(&wc->lru)) 789 return; 790 791 e = container_of(wc->lru.next, struct wc_entry, lru); 792 if (writecache_entry_is_committed(wc, e)) { 793 if (wc->overwrote_committed) { 794 writecache_wait_for_ios(wc, WRITE); 795 writecache_disk_flush(wc, wc->ssd_dev); 796 wc->overwrote_committed = false; 797 } 798 return; 799 } 800 while (1) { 801 writecache_flush_entry(wc, e); 802 if (unlikely(e->lru.next == &wc->lru)) 803 break; 804 e2 = container_of(e->lru.next, struct wc_entry, lru); 805 if (writecache_entry_is_committed(wc, e2)) 806 break; 807 e = e2; 808 cond_resched(); 809 } 810 writecache_commit_flushed(wc, true); 811 812 wc->seq_count++; 813 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count)); 814 if (WC_MODE_PMEM(wc)) 815 writecache_commit_flushed(wc, false); 816 else 817 ssd_commit_superblock(wc); 818 819 wc->overwrote_committed = false; 820 821 need_flush_after_free = false; 822 while (1) { 823 /* Free another committed entry with lower seq-count */ 824 struct rb_node *rb_node = rb_prev(&e->rb_node); 825 826 if (rb_node) { 827 e2 = container_of(rb_node, struct wc_entry, rb_node); 828 if (read_original_sector(wc, e2) == read_original_sector(wc, e) && 829 likely(!e2->write_in_progress)) { 830 writecache_free_entry(wc, e2); 831 need_flush_after_free = true; 832 } 833 } 834 if (unlikely(e->lru.prev == &wc->lru)) 835 break; 836 e = container_of(e->lru.prev, struct wc_entry, lru); 837 cond_resched(); 838 } 839 840 if (need_flush_after_free) 841 writecache_commit_flushed(wc, false); 842 } 843 844 static void writecache_flush_work(struct work_struct *work) 845 { 846 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work); 847 848 wc_lock(wc); 849 writecache_flush(wc); 850 wc_unlock(wc); 851 } 852 853 static void writecache_autocommit_timer(struct timer_list *t) 854 { 855 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer); 856 if (!writecache_has_error(wc)) 857 queue_work(wc->writeback_wq, &wc->flush_work); 858 } 859 860 static void writecache_schedule_autocommit(struct dm_writecache *wc) 861 { 862 if (!timer_pending(&wc->autocommit_timer)) 863 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies); 864 } 865 866 static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end) 867 { 868 struct wc_entry *e; 869 bool discarded_something = false; 870 871 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ); 872 if (unlikely(!e)) 873 return; 874 875 while (read_original_sector(wc, e) < end) { 876 struct rb_node *node = rb_next(&e->rb_node); 877 878 if (likely(!e->write_in_progress)) { 879 if (!discarded_something) { 880 if (!WC_MODE_PMEM(wc)) { 881 writecache_wait_for_ios(wc, READ); 882 writecache_wait_for_ios(wc, WRITE); 883 } 884 discarded_something = true; 885 } 886 if (!writecache_entry_is_committed(wc, e)) 887 wc->uncommitted_blocks--; 888 writecache_free_entry(wc, e); 889 } 890 891 if (unlikely(!node)) 892 break; 893 894 e = container_of(node, struct wc_entry, rb_node); 895 } 896 897 if (discarded_something) 898 writecache_commit_flushed(wc, false); 899 } 900 901 static bool writecache_wait_for_writeback(struct dm_writecache *wc) 902 { 903 if (wc->writeback_size) { 904 writecache_wait_on_freelist(wc); 905 return true; 906 } 907 return false; 908 } 909 910 static void writecache_suspend(struct dm_target *ti) 911 { 912 struct dm_writecache *wc = ti->private; 913 bool flush_on_suspend; 914 915 del_timer_sync(&wc->autocommit_timer); 916 del_timer_sync(&wc->max_age_timer); 917 918 wc_lock(wc); 919 writecache_flush(wc); 920 flush_on_suspend = wc->flush_on_suspend; 921 if (flush_on_suspend) { 922 wc->flush_on_suspend = false; 923 wc->writeback_all++; 924 queue_work(wc->writeback_wq, &wc->writeback_work); 925 } 926 wc_unlock(wc); 927 928 drain_workqueue(wc->writeback_wq); 929 930 wc_lock(wc); 931 if (flush_on_suspend) 932 wc->writeback_all--; 933 while (writecache_wait_for_writeback(wc)); 934 935 if (WC_MODE_PMEM(wc)) 936 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size); 937 938 writecache_poison_lists(wc); 939 940 wc_unlock(wc); 941 } 942 943 static int writecache_alloc_entries(struct dm_writecache *wc) 944 { 945 size_t b; 946 947 if (wc->entries) 948 return 0; 949 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks)); 950 if (!wc->entries) 951 return -ENOMEM; 952 for (b = 0; b < wc->n_blocks; b++) { 953 struct wc_entry *e = &wc->entries[b]; 954 e->index = b; 955 e->write_in_progress = false; 956 cond_resched(); 957 } 958 959 return 0; 960 } 961 962 static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) 963 { 964 struct dm_io_region region; 965 struct dm_io_request req; 966 967 region.bdev = wc->ssd_dev->bdev; 968 region.sector = wc->start_sector; 969 region.count = n_sectors; 970 req.bi_op = REQ_OP_READ; 971 req.bi_op_flags = REQ_SYNC; 972 req.mem.type = DM_IO_VMA; 973 req.mem.ptr.vma = (char *)wc->memory_map; 974 req.client = wc->dm_io; 975 req.notify.fn = NULL; 976 977 return dm_io(&req, 1, ®ion, NULL); 978 } 979 980 static void writecache_resume(struct dm_target *ti) 981 { 982 struct dm_writecache *wc = ti->private; 983 size_t b; 984 bool need_flush = false; 985 __le64 sb_seq_count; 986 int r; 987 988 wc_lock(wc); 989 990 wc->data_device_sectors = bdev_nr_sectors(wc->dev->bdev); 991 992 if (WC_MODE_PMEM(wc)) { 993 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); 994 } else { 995 r = writecache_read_metadata(wc, wc->metadata_sectors); 996 if (r) { 997 size_t sb_entries_offset; 998 writecache_error(wc, r, "unable to read metadata: %d", r); 999 sb_entries_offset = offsetof(struct wc_memory_superblock, entries); 1000 memset((char *)wc->memory_map + sb_entries_offset, -1, 1001 (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset); 1002 } 1003 } 1004 1005 wc->tree = RB_ROOT; 1006 INIT_LIST_HEAD(&wc->lru); 1007 if (WC_MODE_SORT_FREELIST(wc)) { 1008 wc->freetree = RB_ROOT; 1009 wc->current_free = NULL; 1010 } else { 1011 INIT_LIST_HEAD(&wc->freelist); 1012 } 1013 wc->freelist_size = 0; 1014 1015 r = copy_mc_to_kernel(&sb_seq_count, &sb(wc)->seq_count, 1016 sizeof(uint64_t)); 1017 if (r) { 1018 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r); 1019 sb_seq_count = cpu_to_le64(0); 1020 } 1021 wc->seq_count = le64_to_cpu(sb_seq_count); 1022 1023 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS 1024 for (b = 0; b < wc->n_blocks; b++) { 1025 struct wc_entry *e = &wc->entries[b]; 1026 struct wc_memory_entry wme; 1027 if (writecache_has_error(wc)) { 1028 e->original_sector = -1; 1029 e->seq_count = -1; 1030 continue; 1031 } 1032 r = copy_mc_to_kernel(&wme, memory_entry(wc, e), 1033 sizeof(struct wc_memory_entry)); 1034 if (r) { 1035 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d", 1036 (unsigned long)b, r); 1037 e->original_sector = -1; 1038 e->seq_count = -1; 1039 } else { 1040 e->original_sector = le64_to_cpu(wme.original_sector); 1041 e->seq_count = le64_to_cpu(wme.seq_count); 1042 } 1043 cond_resched(); 1044 } 1045 #endif 1046 for (b = 0; b < wc->n_blocks; b++) { 1047 struct wc_entry *e = &wc->entries[b]; 1048 if (!writecache_entry_is_committed(wc, e)) { 1049 if (read_seq_count(wc, e) != -1) { 1050 erase_this: 1051 clear_seq_count(wc, e); 1052 need_flush = true; 1053 } 1054 writecache_add_to_freelist(wc, e); 1055 } else { 1056 struct wc_entry *old; 1057 1058 old = writecache_find_entry(wc, read_original_sector(wc, e), 0); 1059 if (!old) { 1060 writecache_insert_entry(wc, e); 1061 } else { 1062 if (read_seq_count(wc, old) == read_seq_count(wc, e)) { 1063 writecache_error(wc, -EINVAL, 1064 "two identical entries, position %llu, sector %llu, sequence %llu", 1065 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e), 1066 (unsigned long long)read_seq_count(wc, e)); 1067 } 1068 if (read_seq_count(wc, old) > read_seq_count(wc, e)) { 1069 goto erase_this; 1070 } else { 1071 writecache_free_entry(wc, old); 1072 writecache_insert_entry(wc, e); 1073 need_flush = true; 1074 } 1075 } 1076 } 1077 cond_resched(); 1078 } 1079 1080 if (need_flush) { 1081 writecache_flush_all_metadata(wc); 1082 writecache_commit_flushed(wc, false); 1083 } 1084 1085 writecache_verify_watermark(wc); 1086 1087 if (wc->max_age != MAX_AGE_UNSPECIFIED) 1088 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV); 1089 1090 wc_unlock(wc); 1091 } 1092 1093 static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc) 1094 { 1095 if (argc != 1) 1096 return -EINVAL; 1097 1098 wc_lock(wc); 1099 if (dm_suspended(wc->ti)) { 1100 wc_unlock(wc); 1101 return -EBUSY; 1102 } 1103 if (writecache_has_error(wc)) { 1104 wc_unlock(wc); 1105 return -EIO; 1106 } 1107 1108 writecache_flush(wc); 1109 wc->writeback_all++; 1110 queue_work(wc->writeback_wq, &wc->writeback_work); 1111 wc_unlock(wc); 1112 1113 flush_workqueue(wc->writeback_wq); 1114 1115 wc_lock(wc); 1116 wc->writeback_all--; 1117 if (writecache_has_error(wc)) { 1118 wc_unlock(wc); 1119 return -EIO; 1120 } 1121 wc_unlock(wc); 1122 1123 return 0; 1124 } 1125 1126 static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc) 1127 { 1128 if (argc != 1) 1129 return -EINVAL; 1130 1131 wc_lock(wc); 1132 wc->flush_on_suspend = true; 1133 wc_unlock(wc); 1134 1135 return 0; 1136 } 1137 1138 static void activate_cleaner(struct dm_writecache *wc) 1139 { 1140 wc->flush_on_suspend = true; 1141 wc->cleaner = true; 1142 wc->freelist_high_watermark = wc->n_blocks; 1143 wc->freelist_low_watermark = wc->n_blocks; 1144 } 1145 1146 static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc) 1147 { 1148 if (argc != 1) 1149 return -EINVAL; 1150 1151 wc_lock(wc); 1152 activate_cleaner(wc); 1153 if (!dm_suspended(wc->ti)) 1154 writecache_verify_watermark(wc); 1155 wc_unlock(wc); 1156 1157 return 0; 1158 } 1159 1160 static int writecache_message(struct dm_target *ti, unsigned argc, char **argv, 1161 char *result, unsigned maxlen) 1162 { 1163 int r = -EINVAL; 1164 struct dm_writecache *wc = ti->private; 1165 1166 if (!strcasecmp(argv[0], "flush")) 1167 r = process_flush_mesg(argc, argv, wc); 1168 else if (!strcasecmp(argv[0], "flush_on_suspend")) 1169 r = process_flush_on_suspend_mesg(argc, argv, wc); 1170 else if (!strcasecmp(argv[0], "cleaner")) 1171 r = process_cleaner_mesg(argc, argv, wc); 1172 else 1173 DMERR("unrecognised message received: %s", argv[0]); 1174 1175 return r; 1176 } 1177 1178 static void memcpy_flushcache_optimized(void *dest, void *source, size_t size) 1179 { 1180 /* 1181 * clflushopt performs better with block size 1024, 2048, 4096 1182 * non-temporal stores perform better with block size 512 1183 * 1184 * block size 512 1024 2048 4096 1185 * movnti 496 MB/s 642 MB/s 725 MB/s 744 MB/s 1186 * clflushopt 373 MB/s 688 MB/s 1.1 GB/s 1.2 GB/s 1187 * 1188 * We see that movnti performs better for 512-byte blocks, and 1189 * clflushopt performs better for 1024-byte and larger blocks. So, we 1190 * prefer clflushopt for sizes >= 768. 1191 * 1192 * NOTE: this happens to be the case now (with dm-writecache's single 1193 * threaded model) but re-evaluate this once memcpy_flushcache() is 1194 * enabled to use movdir64b which might invalidate this performance 1195 * advantage seen with cache-allocating-writes plus flushing. 1196 */ 1197 #ifdef CONFIG_X86 1198 if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) && 1199 likely(boot_cpu_data.x86_clflush_size == 64) && 1200 likely(size >= 768)) { 1201 do { 1202 memcpy((void *)dest, (void *)source, 64); 1203 clflushopt((void *)dest); 1204 dest += 64; 1205 source += 64; 1206 size -= 64; 1207 } while (size >= 64); 1208 return; 1209 } 1210 #endif 1211 memcpy_flushcache(dest, source, size); 1212 } 1213 1214 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data) 1215 { 1216 void *buf; 1217 unsigned size; 1218 int rw = bio_data_dir(bio); 1219 unsigned remaining_size = wc->block_size; 1220 1221 do { 1222 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter); 1223 buf = bvec_kmap_local(&bv); 1224 size = bv.bv_len; 1225 if (unlikely(size > remaining_size)) 1226 size = remaining_size; 1227 1228 if (rw == READ) { 1229 int r; 1230 r = copy_mc_to_kernel(buf, data, size); 1231 flush_dcache_page(bio_page(bio)); 1232 if (unlikely(r)) { 1233 writecache_error(wc, r, "hardware memory error when reading data: %d", r); 1234 bio->bi_status = BLK_STS_IOERR; 1235 } 1236 } else { 1237 flush_dcache_page(bio_page(bio)); 1238 memcpy_flushcache_optimized(data, buf, size); 1239 } 1240 1241 kunmap_local(buf); 1242 1243 data = (char *)data + size; 1244 remaining_size -= size; 1245 bio_advance(bio, size); 1246 } while (unlikely(remaining_size)); 1247 } 1248 1249 static int writecache_flush_thread(void *data) 1250 { 1251 struct dm_writecache *wc = data; 1252 1253 while (1) { 1254 struct bio *bio; 1255 1256 wc_lock(wc); 1257 bio = bio_list_pop(&wc->flush_list); 1258 if (!bio) { 1259 set_current_state(TASK_INTERRUPTIBLE); 1260 wc_unlock(wc); 1261 1262 if (unlikely(kthread_should_stop())) { 1263 set_current_state(TASK_RUNNING); 1264 break; 1265 } 1266 1267 schedule(); 1268 continue; 1269 } 1270 1271 if (bio_op(bio) == REQ_OP_DISCARD) { 1272 writecache_discard(wc, bio->bi_iter.bi_sector, 1273 bio_end_sector(bio)); 1274 wc_unlock(wc); 1275 bio_set_dev(bio, wc->dev->bdev); 1276 submit_bio_noacct(bio); 1277 } else { 1278 writecache_flush(wc); 1279 wc_unlock(wc); 1280 if (writecache_has_error(wc)) 1281 bio->bi_status = BLK_STS_IOERR; 1282 bio_endio(bio); 1283 } 1284 } 1285 1286 return 0; 1287 } 1288 1289 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio) 1290 { 1291 if (bio_list_empty(&wc->flush_list)) 1292 wake_up_process(wc->flush_thread); 1293 bio_list_add(&wc->flush_list, bio); 1294 } 1295 1296 static int writecache_map(struct dm_target *ti, struct bio *bio) 1297 { 1298 struct wc_entry *e; 1299 struct dm_writecache *wc = ti->private; 1300 1301 bio->bi_private = NULL; 1302 1303 wc_lock(wc); 1304 1305 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1306 if (writecache_has_error(wc)) 1307 goto unlock_error; 1308 if (WC_MODE_PMEM(wc)) { 1309 writecache_flush(wc); 1310 if (writecache_has_error(wc)) 1311 goto unlock_error; 1312 if (unlikely(wc->cleaner) || unlikely(wc->metadata_only)) 1313 goto unlock_remap_origin; 1314 goto unlock_submit; 1315 } else { 1316 if (dm_bio_get_target_bio_nr(bio)) 1317 goto unlock_remap_origin; 1318 writecache_offload_bio(wc, bio); 1319 goto unlock_return; 1320 } 1321 } 1322 1323 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); 1324 1325 if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & 1326 (wc->block_size / 512 - 1)) != 0)) { 1327 DMERR("I/O is not aligned, sector %llu, size %u, block size %u", 1328 (unsigned long long)bio->bi_iter.bi_sector, 1329 bio->bi_iter.bi_size, wc->block_size); 1330 goto unlock_error; 1331 } 1332 1333 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { 1334 if (writecache_has_error(wc)) 1335 goto unlock_error; 1336 if (WC_MODE_PMEM(wc)) { 1337 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio)); 1338 goto unlock_remap_origin; 1339 } else { 1340 writecache_offload_bio(wc, bio); 1341 goto unlock_return; 1342 } 1343 } 1344 1345 if (bio_data_dir(bio) == READ) { 1346 read_next_block: 1347 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); 1348 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) { 1349 if (WC_MODE_PMEM(wc)) { 1350 bio_copy_block(wc, bio, memory_data(wc, e)); 1351 if (bio->bi_iter.bi_size) 1352 goto read_next_block; 1353 goto unlock_submit; 1354 } else { 1355 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT); 1356 bio_set_dev(bio, wc->ssd_dev->bdev); 1357 bio->bi_iter.bi_sector = cache_sector(wc, e); 1358 if (!writecache_entry_is_committed(wc, e)) 1359 writecache_wait_for_ios(wc, WRITE); 1360 goto unlock_remap; 1361 } 1362 } else { 1363 if (e) { 1364 sector_t next_boundary = 1365 read_original_sector(wc, e) - bio->bi_iter.bi_sector; 1366 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) { 1367 dm_accept_partial_bio(bio, next_boundary); 1368 } 1369 } 1370 goto unlock_remap_origin; 1371 } 1372 } else { 1373 do { 1374 bool found_entry = false; 1375 bool search_used = false; 1376 if (writecache_has_error(wc)) 1377 goto unlock_error; 1378 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); 1379 if (e) { 1380 if (!writecache_entry_is_committed(wc, e)) { 1381 search_used = true; 1382 goto bio_copy; 1383 } 1384 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) { 1385 wc->overwrote_committed = true; 1386 search_used = true; 1387 goto bio_copy; 1388 } 1389 found_entry = true; 1390 } else { 1391 if (unlikely(wc->cleaner) || 1392 (wc->metadata_only && !(bio->bi_opf & REQ_META))) 1393 goto direct_write; 1394 } 1395 e = writecache_pop_from_freelist(wc, (sector_t)-1); 1396 if (unlikely(!e)) { 1397 if (!WC_MODE_PMEM(wc) && !found_entry) { 1398 direct_write: 1399 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); 1400 if (e) { 1401 sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector; 1402 BUG_ON(!next_boundary); 1403 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) { 1404 dm_accept_partial_bio(bio, next_boundary); 1405 } 1406 } 1407 goto unlock_remap_origin; 1408 } 1409 writecache_wait_on_freelist(wc); 1410 continue; 1411 } 1412 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count); 1413 writecache_insert_entry(wc, e); 1414 wc->uncommitted_blocks++; 1415 bio_copy: 1416 if (WC_MODE_PMEM(wc)) { 1417 bio_copy_block(wc, bio, memory_data(wc, e)); 1418 } else { 1419 unsigned bio_size = wc->block_size; 1420 sector_t start_cache_sec = cache_sector(wc, e); 1421 sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT); 1422 1423 while (bio_size < bio->bi_iter.bi_size) { 1424 if (!search_used) { 1425 struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec); 1426 if (!f) 1427 break; 1428 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector + 1429 (bio_size >> SECTOR_SHIFT), wc->seq_count); 1430 writecache_insert_entry(wc, f); 1431 wc->uncommitted_blocks++; 1432 } else { 1433 struct wc_entry *f; 1434 struct rb_node *next = rb_next(&e->rb_node); 1435 if (!next) 1436 break; 1437 f = container_of(next, struct wc_entry, rb_node); 1438 if (f != e + 1) 1439 break; 1440 if (read_original_sector(wc, f) != 1441 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) 1442 break; 1443 if (unlikely(f->write_in_progress)) 1444 break; 1445 if (writecache_entry_is_committed(wc, f)) 1446 wc->overwrote_committed = true; 1447 e = f; 1448 } 1449 bio_size += wc->block_size; 1450 current_cache_sec += wc->block_size >> SECTOR_SHIFT; 1451 } 1452 1453 bio_set_dev(bio, wc->ssd_dev->bdev); 1454 bio->bi_iter.bi_sector = start_cache_sec; 1455 dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT); 1456 1457 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) { 1458 wc->uncommitted_blocks = 0; 1459 queue_work(wc->writeback_wq, &wc->flush_work); 1460 } else { 1461 writecache_schedule_autocommit(wc); 1462 } 1463 goto unlock_remap; 1464 } 1465 } while (bio->bi_iter.bi_size); 1466 1467 if (unlikely(bio->bi_opf & REQ_FUA || 1468 wc->uncommitted_blocks >= wc->autocommit_blocks)) 1469 writecache_flush(wc); 1470 else 1471 writecache_schedule_autocommit(wc); 1472 goto unlock_submit; 1473 } 1474 1475 unlock_remap_origin: 1476 if (likely(wc->pause != 0)) { 1477 if (bio_op(bio) == REQ_OP_WRITE) { 1478 dm_iot_io_begin(&wc->iot, 1); 1479 bio->bi_private = (void *)2; 1480 } 1481 } 1482 bio_set_dev(bio, wc->dev->bdev); 1483 wc_unlock(wc); 1484 return DM_MAPIO_REMAPPED; 1485 1486 unlock_remap: 1487 /* make sure that writecache_end_io decrements bio_in_progress: */ 1488 bio->bi_private = (void *)1; 1489 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]); 1490 wc_unlock(wc); 1491 return DM_MAPIO_REMAPPED; 1492 1493 unlock_submit: 1494 wc_unlock(wc); 1495 bio_endio(bio); 1496 return DM_MAPIO_SUBMITTED; 1497 1498 unlock_return: 1499 wc_unlock(wc); 1500 return DM_MAPIO_SUBMITTED; 1501 1502 unlock_error: 1503 wc_unlock(wc); 1504 bio_io_error(bio); 1505 return DM_MAPIO_SUBMITTED; 1506 } 1507 1508 static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status) 1509 { 1510 struct dm_writecache *wc = ti->private; 1511 1512 if (bio->bi_private == (void *)1) { 1513 int dir = bio_data_dir(bio); 1514 if (atomic_dec_and_test(&wc->bio_in_progress[dir])) 1515 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir]))) 1516 wake_up(&wc->bio_in_progress_wait[dir]); 1517 } else if (bio->bi_private == (void *)2) { 1518 dm_iot_io_end(&wc->iot, 1); 1519 } 1520 return 0; 1521 } 1522 1523 static int writecache_iterate_devices(struct dm_target *ti, 1524 iterate_devices_callout_fn fn, void *data) 1525 { 1526 struct dm_writecache *wc = ti->private; 1527 1528 return fn(ti, wc->dev, 0, ti->len, data); 1529 } 1530 1531 static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits) 1532 { 1533 struct dm_writecache *wc = ti->private; 1534 1535 if (limits->logical_block_size < wc->block_size) 1536 limits->logical_block_size = wc->block_size; 1537 1538 if (limits->physical_block_size < wc->block_size) 1539 limits->physical_block_size = wc->block_size; 1540 1541 if (limits->io_min < wc->block_size) 1542 limits->io_min = wc->block_size; 1543 } 1544 1545 1546 static void writecache_writeback_endio(struct bio *bio) 1547 { 1548 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio); 1549 struct dm_writecache *wc = wb->wc; 1550 unsigned long flags; 1551 1552 raw_spin_lock_irqsave(&wc->endio_list_lock, flags); 1553 if (unlikely(list_empty(&wc->endio_list))) 1554 wake_up_process(wc->endio_thread); 1555 list_add_tail(&wb->endio_entry, &wc->endio_list); 1556 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags); 1557 } 1558 1559 static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr) 1560 { 1561 struct copy_struct *c = ptr; 1562 struct dm_writecache *wc = c->wc; 1563 1564 c->error = likely(!(read_err | write_err)) ? 0 : -EIO; 1565 1566 raw_spin_lock_irq(&wc->endio_list_lock); 1567 if (unlikely(list_empty(&wc->endio_list))) 1568 wake_up_process(wc->endio_thread); 1569 list_add_tail(&c->endio_entry, &wc->endio_list); 1570 raw_spin_unlock_irq(&wc->endio_list_lock); 1571 } 1572 1573 static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list) 1574 { 1575 unsigned i; 1576 struct writeback_struct *wb; 1577 struct wc_entry *e; 1578 unsigned long n_walked = 0; 1579 1580 do { 1581 wb = list_entry(list->next, struct writeback_struct, endio_entry); 1582 list_del(&wb->endio_entry); 1583 1584 if (unlikely(wb->bio.bi_status != BLK_STS_OK)) 1585 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status), 1586 "write error %d", wb->bio.bi_status); 1587 i = 0; 1588 do { 1589 e = wb->wc_list[i]; 1590 BUG_ON(!e->write_in_progress); 1591 e->write_in_progress = false; 1592 INIT_LIST_HEAD(&e->lru); 1593 if (!writecache_has_error(wc)) 1594 writecache_free_entry(wc, e); 1595 BUG_ON(!wc->writeback_size); 1596 wc->writeback_size--; 1597 n_walked++; 1598 if (unlikely(n_walked >= ENDIO_LATENCY)) { 1599 writecache_commit_flushed(wc, false); 1600 wc_unlock(wc); 1601 wc_lock(wc); 1602 n_walked = 0; 1603 } 1604 } while (++i < wb->wc_list_n); 1605 1606 if (wb->wc_list != wb->wc_list_inline) 1607 kfree(wb->wc_list); 1608 bio_put(&wb->bio); 1609 } while (!list_empty(list)); 1610 } 1611 1612 static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list) 1613 { 1614 struct copy_struct *c; 1615 struct wc_entry *e; 1616 1617 do { 1618 c = list_entry(list->next, struct copy_struct, endio_entry); 1619 list_del(&c->endio_entry); 1620 1621 if (unlikely(c->error)) 1622 writecache_error(wc, c->error, "copy error"); 1623 1624 e = c->e; 1625 do { 1626 BUG_ON(!e->write_in_progress); 1627 e->write_in_progress = false; 1628 INIT_LIST_HEAD(&e->lru); 1629 if (!writecache_has_error(wc)) 1630 writecache_free_entry(wc, e); 1631 1632 BUG_ON(!wc->writeback_size); 1633 wc->writeback_size--; 1634 e++; 1635 } while (--c->n_entries); 1636 mempool_free(c, &wc->copy_pool); 1637 } while (!list_empty(list)); 1638 } 1639 1640 static int writecache_endio_thread(void *data) 1641 { 1642 struct dm_writecache *wc = data; 1643 1644 while (1) { 1645 struct list_head list; 1646 1647 raw_spin_lock_irq(&wc->endio_list_lock); 1648 if (!list_empty(&wc->endio_list)) 1649 goto pop_from_list; 1650 set_current_state(TASK_INTERRUPTIBLE); 1651 raw_spin_unlock_irq(&wc->endio_list_lock); 1652 1653 if (unlikely(kthread_should_stop())) { 1654 set_current_state(TASK_RUNNING); 1655 break; 1656 } 1657 1658 schedule(); 1659 1660 continue; 1661 1662 pop_from_list: 1663 list = wc->endio_list; 1664 list.next->prev = list.prev->next = &list; 1665 INIT_LIST_HEAD(&wc->endio_list); 1666 raw_spin_unlock_irq(&wc->endio_list_lock); 1667 1668 if (!WC_MODE_FUA(wc)) 1669 writecache_disk_flush(wc, wc->dev); 1670 1671 wc_lock(wc); 1672 1673 if (WC_MODE_PMEM(wc)) { 1674 __writecache_endio_pmem(wc, &list); 1675 } else { 1676 __writecache_endio_ssd(wc, &list); 1677 writecache_wait_for_ios(wc, READ); 1678 } 1679 1680 writecache_commit_flushed(wc, false); 1681 1682 wc_unlock(wc); 1683 } 1684 1685 return 0; 1686 } 1687 1688 static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e) 1689 { 1690 struct dm_writecache *wc = wb->wc; 1691 unsigned block_size = wc->block_size; 1692 void *address = memory_data(wc, e); 1693 1694 persistent_memory_flush_cache(address, block_size); 1695 1696 if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors)) 1697 return true; 1698 1699 return bio_add_page(&wb->bio, persistent_memory_page(address), 1700 block_size, persistent_memory_page_offset(address)) != 0; 1701 } 1702 1703 struct writeback_list { 1704 struct list_head list; 1705 size_t size; 1706 }; 1707 1708 static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl) 1709 { 1710 if (unlikely(wc->max_writeback_jobs)) { 1711 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) { 1712 wc_lock(wc); 1713 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs) 1714 writecache_wait_on_freelist(wc); 1715 wc_unlock(wc); 1716 } 1717 } 1718 cond_resched(); 1719 } 1720 1721 static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl) 1722 { 1723 struct wc_entry *e, *f; 1724 struct bio *bio; 1725 struct writeback_struct *wb; 1726 unsigned max_pages; 1727 1728 while (wbl->size) { 1729 wbl->size--; 1730 e = container_of(wbl->list.prev, struct wc_entry, lru); 1731 list_del(&e->lru); 1732 1733 max_pages = e->wc_list_contiguous; 1734 1735 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set); 1736 wb = container_of(bio, struct writeback_struct, bio); 1737 wb->wc = wc; 1738 bio->bi_end_io = writecache_writeback_endio; 1739 bio_set_dev(bio, wc->dev->bdev); 1740 bio->bi_iter.bi_sector = read_original_sector(wc, e); 1741 if (max_pages <= WB_LIST_INLINE || 1742 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), 1743 GFP_NOIO | __GFP_NORETRY | 1744 __GFP_NOMEMALLOC | __GFP_NOWARN)))) { 1745 wb->wc_list = wb->wc_list_inline; 1746 max_pages = WB_LIST_INLINE; 1747 } 1748 1749 BUG_ON(!wc_add_block(wb, e)); 1750 1751 wb->wc_list[0] = e; 1752 wb->wc_list_n = 1; 1753 1754 while (wbl->size && wb->wc_list_n < max_pages) { 1755 f = container_of(wbl->list.prev, struct wc_entry, lru); 1756 if (read_original_sector(wc, f) != 1757 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) 1758 break; 1759 if (!wc_add_block(wb, f)) 1760 break; 1761 wbl->size--; 1762 list_del(&f->lru); 1763 wb->wc_list[wb->wc_list_n++] = f; 1764 e = f; 1765 } 1766 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA); 1767 if (writecache_has_error(wc)) { 1768 bio->bi_status = BLK_STS_IOERR; 1769 bio_endio(bio); 1770 } else if (unlikely(!bio_sectors(bio))) { 1771 bio->bi_status = BLK_STS_OK; 1772 bio_endio(bio); 1773 } else { 1774 submit_bio(bio); 1775 } 1776 1777 __writeback_throttle(wc, wbl); 1778 } 1779 } 1780 1781 static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl) 1782 { 1783 struct wc_entry *e, *f; 1784 struct dm_io_region from, to; 1785 struct copy_struct *c; 1786 1787 while (wbl->size) { 1788 unsigned n_sectors; 1789 1790 wbl->size--; 1791 e = container_of(wbl->list.prev, struct wc_entry, lru); 1792 list_del(&e->lru); 1793 1794 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT); 1795 1796 from.bdev = wc->ssd_dev->bdev; 1797 from.sector = cache_sector(wc, e); 1798 from.count = n_sectors; 1799 to.bdev = wc->dev->bdev; 1800 to.sector = read_original_sector(wc, e); 1801 to.count = n_sectors; 1802 1803 c = mempool_alloc(&wc->copy_pool, GFP_NOIO); 1804 c->wc = wc; 1805 c->e = e; 1806 c->n_entries = e->wc_list_contiguous; 1807 1808 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) { 1809 wbl->size--; 1810 f = container_of(wbl->list.prev, struct wc_entry, lru); 1811 BUG_ON(f != e + 1); 1812 list_del(&f->lru); 1813 e = f; 1814 } 1815 1816 if (unlikely(to.sector + to.count > wc->data_device_sectors)) { 1817 if (to.sector >= wc->data_device_sectors) { 1818 writecache_copy_endio(0, 0, c); 1819 continue; 1820 } 1821 from.count = to.count = wc->data_device_sectors - to.sector; 1822 } 1823 1824 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c); 1825 1826 __writeback_throttle(wc, wbl); 1827 } 1828 } 1829 1830 static void writecache_writeback(struct work_struct *work) 1831 { 1832 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work); 1833 struct blk_plug plug; 1834 struct wc_entry *f, *g, *e = NULL; 1835 struct rb_node *node, *next_node; 1836 struct list_head skipped; 1837 struct writeback_list wbl; 1838 unsigned long n_walked; 1839 1840 if (!WC_MODE_PMEM(wc)) { 1841 /* Wait for any active kcopyd work on behalf of ssd writeback */ 1842 dm_kcopyd_client_flush(wc->dm_kcopyd); 1843 } 1844 1845 if (likely(wc->pause != 0)) { 1846 while (1) { 1847 unsigned long idle; 1848 if (unlikely(wc->cleaner) || unlikely(wc->writeback_all) || 1849 unlikely(dm_suspended(wc->ti))) 1850 break; 1851 idle = dm_iot_idle_time(&wc->iot); 1852 if (idle >= wc->pause) 1853 break; 1854 idle = wc->pause - idle; 1855 if (idle > HZ) 1856 idle = HZ; 1857 schedule_timeout_idle(idle); 1858 } 1859 } 1860 1861 wc_lock(wc); 1862 restart: 1863 if (writecache_has_error(wc)) { 1864 wc_unlock(wc); 1865 return; 1866 } 1867 1868 if (unlikely(wc->writeback_all)) { 1869 if (writecache_wait_for_writeback(wc)) 1870 goto restart; 1871 } 1872 1873 if (wc->overwrote_committed) { 1874 writecache_wait_for_ios(wc, WRITE); 1875 } 1876 1877 n_walked = 0; 1878 INIT_LIST_HEAD(&skipped); 1879 INIT_LIST_HEAD(&wbl.list); 1880 wbl.size = 0; 1881 while (!list_empty(&wc->lru) && 1882 (wc->writeback_all || 1883 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark || 1884 (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >= 1885 wc->max_age - wc->max_age / MAX_AGE_DIV))) { 1886 1887 n_walked++; 1888 if (unlikely(n_walked > WRITEBACK_LATENCY) && 1889 likely(!wc->writeback_all)) { 1890 if (likely(!dm_suspended(wc->ti))) 1891 queue_work(wc->writeback_wq, &wc->writeback_work); 1892 break; 1893 } 1894 1895 if (unlikely(wc->writeback_all)) { 1896 if (unlikely(!e)) { 1897 writecache_flush(wc); 1898 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node); 1899 } else 1900 e = g; 1901 } else 1902 e = container_of(wc->lru.prev, struct wc_entry, lru); 1903 BUG_ON(e->write_in_progress); 1904 if (unlikely(!writecache_entry_is_committed(wc, e))) { 1905 writecache_flush(wc); 1906 } 1907 node = rb_prev(&e->rb_node); 1908 if (node) { 1909 f = container_of(node, struct wc_entry, rb_node); 1910 if (unlikely(read_original_sector(wc, f) == 1911 read_original_sector(wc, e))) { 1912 BUG_ON(!f->write_in_progress); 1913 list_move(&e->lru, &skipped); 1914 cond_resched(); 1915 continue; 1916 } 1917 } 1918 wc->writeback_size++; 1919 list_move(&e->lru, &wbl.list); 1920 wbl.size++; 1921 e->write_in_progress = true; 1922 e->wc_list_contiguous = 1; 1923 1924 f = e; 1925 1926 while (1) { 1927 next_node = rb_next(&f->rb_node); 1928 if (unlikely(!next_node)) 1929 break; 1930 g = container_of(next_node, struct wc_entry, rb_node); 1931 if (unlikely(read_original_sector(wc, g) == 1932 read_original_sector(wc, f))) { 1933 f = g; 1934 continue; 1935 } 1936 if (read_original_sector(wc, g) != 1937 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT)) 1938 break; 1939 if (unlikely(g->write_in_progress)) 1940 break; 1941 if (unlikely(!writecache_entry_is_committed(wc, g))) 1942 break; 1943 1944 if (!WC_MODE_PMEM(wc)) { 1945 if (g != f + 1) 1946 break; 1947 } 1948 1949 n_walked++; 1950 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all)) 1951 // break; 1952 1953 wc->writeback_size++; 1954 list_move(&g->lru, &wbl.list); 1955 wbl.size++; 1956 g->write_in_progress = true; 1957 g->wc_list_contiguous = BIO_MAX_VECS; 1958 f = g; 1959 e->wc_list_contiguous++; 1960 if (unlikely(e->wc_list_contiguous == BIO_MAX_VECS)) { 1961 if (unlikely(wc->writeback_all)) { 1962 next_node = rb_next(&f->rb_node); 1963 if (likely(next_node)) 1964 g = container_of(next_node, struct wc_entry, rb_node); 1965 } 1966 break; 1967 } 1968 } 1969 cond_resched(); 1970 } 1971 1972 if (!list_empty(&skipped)) { 1973 list_splice_tail(&skipped, &wc->lru); 1974 /* 1975 * If we didn't do any progress, we must wait until some 1976 * writeback finishes to avoid burning CPU in a loop 1977 */ 1978 if (unlikely(!wbl.size)) 1979 writecache_wait_for_writeback(wc); 1980 } 1981 1982 wc_unlock(wc); 1983 1984 blk_start_plug(&plug); 1985 1986 if (WC_MODE_PMEM(wc)) 1987 __writecache_writeback_pmem(wc, &wbl); 1988 else 1989 __writecache_writeback_ssd(wc, &wbl); 1990 1991 blk_finish_plug(&plug); 1992 1993 if (unlikely(wc->writeback_all)) { 1994 wc_lock(wc); 1995 while (writecache_wait_for_writeback(wc)); 1996 wc_unlock(wc); 1997 } 1998 } 1999 2000 static int calculate_memory_size(uint64_t device_size, unsigned block_size, 2001 size_t *n_blocks_p, size_t *n_metadata_blocks_p) 2002 { 2003 uint64_t n_blocks, offset; 2004 struct wc_entry e; 2005 2006 n_blocks = device_size; 2007 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry)); 2008 2009 while (1) { 2010 if (!n_blocks) 2011 return -ENOSPC; 2012 /* Verify the following entries[n_blocks] won't overflow */ 2013 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) / 2014 sizeof(struct wc_memory_entry))) 2015 return -EFBIG; 2016 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]); 2017 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1); 2018 if (offset + n_blocks * block_size <= device_size) 2019 break; 2020 n_blocks--; 2021 } 2022 2023 /* check if the bit field overflows */ 2024 e.index = n_blocks; 2025 if (e.index != n_blocks) 2026 return -EFBIG; 2027 2028 if (n_blocks_p) 2029 *n_blocks_p = n_blocks; 2030 if (n_metadata_blocks_p) 2031 *n_metadata_blocks_p = offset >> __ffs(block_size); 2032 return 0; 2033 } 2034 2035 static int init_memory(struct dm_writecache *wc) 2036 { 2037 size_t b; 2038 int r; 2039 2040 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL); 2041 if (r) 2042 return r; 2043 2044 r = writecache_alloc_entries(wc); 2045 if (r) 2046 return r; 2047 2048 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++) 2049 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0)); 2050 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION)); 2051 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size)); 2052 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks)); 2053 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0)); 2054 2055 for (b = 0; b < wc->n_blocks; b++) { 2056 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); 2057 cond_resched(); 2058 } 2059 2060 writecache_flush_all_metadata(wc); 2061 writecache_commit_flushed(wc, false); 2062 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC)); 2063 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic); 2064 writecache_commit_flushed(wc, false); 2065 2066 return 0; 2067 } 2068 2069 static void writecache_dtr(struct dm_target *ti) 2070 { 2071 struct dm_writecache *wc = ti->private; 2072 2073 if (!wc) 2074 return; 2075 2076 if (wc->endio_thread) 2077 kthread_stop(wc->endio_thread); 2078 2079 if (wc->flush_thread) 2080 kthread_stop(wc->flush_thread); 2081 2082 bioset_exit(&wc->bio_set); 2083 2084 mempool_exit(&wc->copy_pool); 2085 2086 if (wc->writeback_wq) 2087 destroy_workqueue(wc->writeback_wq); 2088 2089 if (wc->dev) 2090 dm_put_device(ti, wc->dev); 2091 2092 if (wc->ssd_dev) 2093 dm_put_device(ti, wc->ssd_dev); 2094 2095 vfree(wc->entries); 2096 2097 if (wc->memory_map) { 2098 if (WC_MODE_PMEM(wc)) 2099 persistent_memory_release(wc); 2100 else 2101 vfree(wc->memory_map); 2102 } 2103 2104 if (wc->dm_kcopyd) 2105 dm_kcopyd_client_destroy(wc->dm_kcopyd); 2106 2107 if (wc->dm_io) 2108 dm_io_client_destroy(wc->dm_io); 2109 2110 vfree(wc->dirty_bitmap); 2111 2112 kfree(wc); 2113 } 2114 2115 static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) 2116 { 2117 struct dm_writecache *wc; 2118 struct dm_arg_set as; 2119 const char *string; 2120 unsigned opt_params; 2121 size_t offset, data_size; 2122 int i, r; 2123 char dummy; 2124 int high_wm_percent = HIGH_WATERMARK; 2125 int low_wm_percent = LOW_WATERMARK; 2126 uint64_t x; 2127 struct wc_memory_superblock s; 2128 2129 static struct dm_arg _args[] = { 2130 {0, 18, "Invalid number of feature args"}, 2131 }; 2132 2133 as.argc = argc; 2134 as.argv = argv; 2135 2136 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL); 2137 if (!wc) { 2138 ti->error = "Cannot allocate writecache structure"; 2139 r = -ENOMEM; 2140 goto bad; 2141 } 2142 ti->private = wc; 2143 wc->ti = ti; 2144 2145 mutex_init(&wc->lock); 2146 wc->max_age = MAX_AGE_UNSPECIFIED; 2147 writecache_poison_lists(wc); 2148 init_waitqueue_head(&wc->freelist_wait); 2149 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0); 2150 timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0); 2151 2152 for (i = 0; i < 2; i++) { 2153 atomic_set(&wc->bio_in_progress[i], 0); 2154 init_waitqueue_head(&wc->bio_in_progress_wait[i]); 2155 } 2156 2157 wc->dm_io = dm_io_client_create(); 2158 if (IS_ERR(wc->dm_io)) { 2159 r = PTR_ERR(wc->dm_io); 2160 ti->error = "Unable to allocate dm-io client"; 2161 wc->dm_io = NULL; 2162 goto bad; 2163 } 2164 2165 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1); 2166 if (!wc->writeback_wq) { 2167 r = -ENOMEM; 2168 ti->error = "Could not allocate writeback workqueue"; 2169 goto bad; 2170 } 2171 INIT_WORK(&wc->writeback_work, writecache_writeback); 2172 INIT_WORK(&wc->flush_work, writecache_flush_work); 2173 2174 dm_iot_init(&wc->iot); 2175 2176 raw_spin_lock_init(&wc->endio_list_lock); 2177 INIT_LIST_HEAD(&wc->endio_list); 2178 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio"); 2179 if (IS_ERR(wc->endio_thread)) { 2180 r = PTR_ERR(wc->endio_thread); 2181 wc->endio_thread = NULL; 2182 ti->error = "Couldn't spawn endio thread"; 2183 goto bad; 2184 } 2185 wake_up_process(wc->endio_thread); 2186 2187 /* 2188 * Parse the mode (pmem or ssd) 2189 */ 2190 string = dm_shift_arg(&as); 2191 if (!string) 2192 goto bad_arguments; 2193 2194 if (!strcasecmp(string, "s")) { 2195 wc->pmem_mode = false; 2196 } else if (!strcasecmp(string, "p")) { 2197 #ifdef DM_WRITECACHE_HAS_PMEM 2198 wc->pmem_mode = true; 2199 wc->writeback_fua = true; 2200 #else 2201 /* 2202 * If the architecture doesn't support persistent memory or 2203 * the kernel doesn't support any DAX drivers, this driver can 2204 * only be used in SSD-only mode. 2205 */ 2206 r = -EOPNOTSUPP; 2207 ti->error = "Persistent memory or DAX not supported on this system"; 2208 goto bad; 2209 #endif 2210 } else { 2211 goto bad_arguments; 2212 } 2213 2214 if (WC_MODE_PMEM(wc)) { 2215 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE, 2216 offsetof(struct writeback_struct, bio), 2217 BIOSET_NEED_BVECS); 2218 if (r) { 2219 ti->error = "Could not allocate bio set"; 2220 goto bad; 2221 } 2222 } else { 2223 wc->pause = PAUSE_WRITEBACK; 2224 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct)); 2225 if (r) { 2226 ti->error = "Could not allocate mempool"; 2227 goto bad; 2228 } 2229 } 2230 2231 /* 2232 * Parse the origin data device 2233 */ 2234 string = dm_shift_arg(&as); 2235 if (!string) 2236 goto bad_arguments; 2237 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev); 2238 if (r) { 2239 ti->error = "Origin data device lookup failed"; 2240 goto bad; 2241 } 2242 2243 /* 2244 * Parse cache data device (be it pmem or ssd) 2245 */ 2246 string = dm_shift_arg(&as); 2247 if (!string) 2248 goto bad_arguments; 2249 2250 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev); 2251 if (r) { 2252 ti->error = "Cache data device lookup failed"; 2253 goto bad; 2254 } 2255 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); 2256 2257 /* 2258 * Parse the cache block size 2259 */ 2260 string = dm_shift_arg(&as); 2261 if (!string) 2262 goto bad_arguments; 2263 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 || 2264 wc->block_size < 512 || wc->block_size > PAGE_SIZE || 2265 (wc->block_size & (wc->block_size - 1))) { 2266 r = -EINVAL; 2267 ti->error = "Invalid block size"; 2268 goto bad; 2269 } 2270 if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) || 2271 wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) { 2272 r = -EINVAL; 2273 ti->error = "Block size is smaller than device logical block size"; 2274 goto bad; 2275 } 2276 wc->block_size_bits = __ffs(wc->block_size); 2277 2278 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; 2279 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM; 2280 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC); 2281 2282 /* 2283 * Parse optional arguments 2284 */ 2285 r = dm_read_arg_group(_args, &as, &opt_params, &ti->error); 2286 if (r) 2287 goto bad; 2288 2289 while (opt_params) { 2290 string = dm_shift_arg(&as), opt_params--; 2291 if (!strcasecmp(string, "start_sector") && opt_params >= 1) { 2292 unsigned long long start_sector; 2293 string = dm_shift_arg(&as), opt_params--; 2294 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1) 2295 goto invalid_optional; 2296 wc->start_sector = start_sector; 2297 wc->start_sector_set = true; 2298 if (wc->start_sector != start_sector || 2299 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT) 2300 goto invalid_optional; 2301 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) { 2302 string = dm_shift_arg(&as), opt_params--; 2303 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1) 2304 goto invalid_optional; 2305 if (high_wm_percent < 0 || high_wm_percent > 100) 2306 goto invalid_optional; 2307 wc->high_wm_percent_value = high_wm_percent; 2308 wc->high_wm_percent_set = true; 2309 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) { 2310 string = dm_shift_arg(&as), opt_params--; 2311 if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1) 2312 goto invalid_optional; 2313 if (low_wm_percent < 0 || low_wm_percent > 100) 2314 goto invalid_optional; 2315 wc->low_wm_percent_value = low_wm_percent; 2316 wc->low_wm_percent_set = true; 2317 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) { 2318 string = dm_shift_arg(&as), opt_params--; 2319 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1) 2320 goto invalid_optional; 2321 wc->max_writeback_jobs_set = true; 2322 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) { 2323 string = dm_shift_arg(&as), opt_params--; 2324 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1) 2325 goto invalid_optional; 2326 wc->autocommit_blocks_set = true; 2327 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) { 2328 unsigned autocommit_msecs; 2329 string = dm_shift_arg(&as), opt_params--; 2330 if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1) 2331 goto invalid_optional; 2332 if (autocommit_msecs > 3600000) 2333 goto invalid_optional; 2334 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs); 2335 wc->autocommit_time_value = autocommit_msecs; 2336 wc->autocommit_time_set = true; 2337 } else if (!strcasecmp(string, "max_age") && opt_params >= 1) { 2338 unsigned max_age_msecs; 2339 string = dm_shift_arg(&as), opt_params--; 2340 if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1) 2341 goto invalid_optional; 2342 if (max_age_msecs > 86400000) 2343 goto invalid_optional; 2344 wc->max_age = msecs_to_jiffies(max_age_msecs); 2345 wc->max_age_set = true; 2346 wc->max_age_value = max_age_msecs; 2347 } else if (!strcasecmp(string, "cleaner")) { 2348 wc->cleaner_set = true; 2349 wc->cleaner = true; 2350 } else if (!strcasecmp(string, "fua")) { 2351 if (WC_MODE_PMEM(wc)) { 2352 wc->writeback_fua = true; 2353 wc->writeback_fua_set = true; 2354 } else goto invalid_optional; 2355 } else if (!strcasecmp(string, "nofua")) { 2356 if (WC_MODE_PMEM(wc)) { 2357 wc->writeback_fua = false; 2358 wc->writeback_fua_set = true; 2359 } else goto invalid_optional; 2360 } else if (!strcasecmp(string, "metadata_only")) { 2361 wc->metadata_only = true; 2362 } else if (!strcasecmp(string, "pause_writeback") && opt_params >= 1) { 2363 unsigned pause_msecs; 2364 if (WC_MODE_PMEM(wc)) 2365 goto invalid_optional; 2366 string = dm_shift_arg(&as), opt_params--; 2367 if (sscanf(string, "%u%c", &pause_msecs, &dummy) != 1) 2368 goto invalid_optional; 2369 if (pause_msecs > 60000) 2370 goto invalid_optional; 2371 wc->pause = msecs_to_jiffies(pause_msecs); 2372 wc->pause_set = true; 2373 wc->pause_value = pause_msecs; 2374 } else { 2375 invalid_optional: 2376 r = -EINVAL; 2377 ti->error = "Invalid optional argument"; 2378 goto bad; 2379 } 2380 } 2381 2382 if (high_wm_percent < low_wm_percent) { 2383 r = -EINVAL; 2384 ti->error = "High watermark must be greater than or equal to low watermark"; 2385 goto bad; 2386 } 2387 2388 if (WC_MODE_PMEM(wc)) { 2389 if (!dax_synchronous(wc->ssd_dev->dax_dev)) { 2390 r = -EOPNOTSUPP; 2391 ti->error = "Asynchronous persistent memory not supported as pmem cache"; 2392 goto bad; 2393 } 2394 2395 r = persistent_memory_claim(wc); 2396 if (r) { 2397 ti->error = "Unable to map persistent memory for cache"; 2398 goto bad; 2399 } 2400 } else { 2401 size_t n_blocks, n_metadata_blocks; 2402 uint64_t n_bitmap_bits; 2403 2404 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT; 2405 2406 bio_list_init(&wc->flush_list); 2407 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); 2408 if (IS_ERR(wc->flush_thread)) { 2409 r = PTR_ERR(wc->flush_thread); 2410 wc->flush_thread = NULL; 2411 ti->error = "Couldn't spawn flush thread"; 2412 goto bad; 2413 } 2414 wake_up_process(wc->flush_thread); 2415 2416 r = calculate_memory_size(wc->memory_map_size, wc->block_size, 2417 &n_blocks, &n_metadata_blocks); 2418 if (r) { 2419 ti->error = "Invalid device size"; 2420 goto bad; 2421 } 2422 2423 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) + 2424 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY; 2425 /* this is limitation of test_bit functions */ 2426 if (n_bitmap_bits > 1U << 31) { 2427 r = -EFBIG; 2428 ti->error = "Invalid device size"; 2429 goto bad; 2430 } 2431 2432 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits); 2433 if (!wc->memory_map) { 2434 r = -ENOMEM; 2435 ti->error = "Unable to allocate memory for metadata"; 2436 goto bad; 2437 } 2438 2439 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle); 2440 if (IS_ERR(wc->dm_kcopyd)) { 2441 r = PTR_ERR(wc->dm_kcopyd); 2442 ti->error = "Unable to allocate dm-kcopyd client"; 2443 wc->dm_kcopyd = NULL; 2444 goto bad; 2445 } 2446 2447 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT); 2448 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) / 2449 BITS_PER_LONG * sizeof(unsigned long); 2450 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size); 2451 if (!wc->dirty_bitmap) { 2452 r = -ENOMEM; 2453 ti->error = "Unable to allocate dirty bitmap"; 2454 goto bad; 2455 } 2456 2457 r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT); 2458 if (r) { 2459 ti->error = "Unable to read first block of metadata"; 2460 goto bad; 2461 } 2462 } 2463 2464 r = copy_mc_to_kernel(&s, sb(wc), sizeof(struct wc_memory_superblock)); 2465 if (r) { 2466 ti->error = "Hardware memory error when reading superblock"; 2467 goto bad; 2468 } 2469 if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) { 2470 r = init_memory(wc); 2471 if (r) { 2472 ti->error = "Unable to initialize device"; 2473 goto bad; 2474 } 2475 r = copy_mc_to_kernel(&s, sb(wc), 2476 sizeof(struct wc_memory_superblock)); 2477 if (r) { 2478 ti->error = "Hardware memory error when reading superblock"; 2479 goto bad; 2480 } 2481 } 2482 2483 if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) { 2484 ti->error = "Invalid magic in the superblock"; 2485 r = -EINVAL; 2486 goto bad; 2487 } 2488 2489 if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) { 2490 ti->error = "Invalid version in the superblock"; 2491 r = -EINVAL; 2492 goto bad; 2493 } 2494 2495 if (le32_to_cpu(s.block_size) != wc->block_size) { 2496 ti->error = "Block size does not match superblock"; 2497 r = -EINVAL; 2498 goto bad; 2499 } 2500 2501 wc->n_blocks = le64_to_cpu(s.n_blocks); 2502 2503 offset = wc->n_blocks * sizeof(struct wc_memory_entry); 2504 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) { 2505 overflow: 2506 ti->error = "Overflow in size calculation"; 2507 r = -EINVAL; 2508 goto bad; 2509 } 2510 offset += sizeof(struct wc_memory_superblock); 2511 if (offset < sizeof(struct wc_memory_superblock)) 2512 goto overflow; 2513 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1); 2514 data_size = wc->n_blocks * (size_t)wc->block_size; 2515 if (!offset || (data_size / wc->block_size != wc->n_blocks) || 2516 (offset + data_size < offset)) 2517 goto overflow; 2518 if (offset + data_size > wc->memory_map_size) { 2519 ti->error = "Memory area is too small"; 2520 r = -EINVAL; 2521 goto bad; 2522 } 2523 2524 wc->metadata_sectors = offset >> SECTOR_SHIFT; 2525 wc->block_start = (char *)sb(wc) + offset; 2526 2527 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent); 2528 x += 50; 2529 do_div(x, 100); 2530 wc->freelist_high_watermark = x; 2531 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent); 2532 x += 50; 2533 do_div(x, 100); 2534 wc->freelist_low_watermark = x; 2535 2536 if (wc->cleaner) 2537 activate_cleaner(wc); 2538 2539 r = writecache_alloc_entries(wc); 2540 if (r) { 2541 ti->error = "Cannot allocate memory"; 2542 goto bad; 2543 } 2544 2545 ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2; 2546 ti->flush_supported = true; 2547 ti->num_discard_bios = 1; 2548 2549 if (WC_MODE_PMEM(wc)) 2550 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size); 2551 2552 return 0; 2553 2554 bad_arguments: 2555 r = -EINVAL; 2556 ti->error = "Bad arguments"; 2557 bad: 2558 writecache_dtr(ti); 2559 return r; 2560 } 2561 2562 static void writecache_status(struct dm_target *ti, status_type_t type, 2563 unsigned status_flags, char *result, unsigned maxlen) 2564 { 2565 struct dm_writecache *wc = ti->private; 2566 unsigned extra_args; 2567 unsigned sz = 0; 2568 2569 switch (type) { 2570 case STATUSTYPE_INFO: 2571 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc), 2572 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size, 2573 (unsigned long long)wc->writeback_size); 2574 break; 2575 case STATUSTYPE_TABLE: 2576 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's', 2577 wc->dev->name, wc->ssd_dev->name, wc->block_size); 2578 extra_args = 0; 2579 if (wc->start_sector_set) 2580 extra_args += 2; 2581 if (wc->high_wm_percent_set) 2582 extra_args += 2; 2583 if (wc->low_wm_percent_set) 2584 extra_args += 2; 2585 if (wc->max_writeback_jobs_set) 2586 extra_args += 2; 2587 if (wc->autocommit_blocks_set) 2588 extra_args += 2; 2589 if (wc->autocommit_time_set) 2590 extra_args += 2; 2591 if (wc->max_age_set) 2592 extra_args += 2; 2593 if (wc->cleaner_set) 2594 extra_args++; 2595 if (wc->writeback_fua_set) 2596 extra_args++; 2597 if (wc->metadata_only) 2598 extra_args++; 2599 if (wc->pause_set) 2600 extra_args += 2; 2601 2602 DMEMIT("%u", extra_args); 2603 if (wc->start_sector_set) 2604 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector); 2605 if (wc->high_wm_percent_set) 2606 DMEMIT(" high_watermark %u", wc->high_wm_percent_value); 2607 if (wc->low_wm_percent_set) 2608 DMEMIT(" low_watermark %u", wc->low_wm_percent_value); 2609 if (wc->max_writeback_jobs_set) 2610 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs); 2611 if (wc->autocommit_blocks_set) 2612 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks); 2613 if (wc->autocommit_time_set) 2614 DMEMIT(" autocommit_time %u", wc->autocommit_time_value); 2615 if (wc->max_age_set) 2616 DMEMIT(" max_age %u", wc->max_age_value); 2617 if (wc->cleaner_set) 2618 DMEMIT(" cleaner"); 2619 if (wc->writeback_fua_set) 2620 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no"); 2621 if (wc->metadata_only) 2622 DMEMIT(" metadata_only"); 2623 if (wc->pause_set) 2624 DMEMIT(" pause_writeback %u", wc->pause_value); 2625 break; 2626 } 2627 } 2628 2629 static struct target_type writecache_target = { 2630 .name = "writecache", 2631 .version = {1, 5, 0}, 2632 .module = THIS_MODULE, 2633 .ctr = writecache_ctr, 2634 .dtr = writecache_dtr, 2635 .status = writecache_status, 2636 .postsuspend = writecache_suspend, 2637 .resume = writecache_resume, 2638 .message = writecache_message, 2639 .map = writecache_map, 2640 .end_io = writecache_end_io, 2641 .iterate_devices = writecache_iterate_devices, 2642 .io_hints = writecache_io_hints, 2643 }; 2644 2645 static int __init dm_writecache_init(void) 2646 { 2647 int r; 2648 2649 r = dm_register_target(&writecache_target); 2650 if (r < 0) { 2651 DMERR("register failed %d", r); 2652 return r; 2653 } 2654 2655 return 0; 2656 } 2657 2658 static void __exit dm_writecache_exit(void) 2659 { 2660 dm_unregister_target(&writecache_target); 2661 } 2662 2663 module_init(dm_writecache_init); 2664 module_exit(dm_writecache_exit); 2665 2666 MODULE_DESCRIPTION(DM_NAME " writecache target"); 2667 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>"); 2668 MODULE_LICENSE("GPL"); 2669