1 /* 2 * Copyright (C) 2014 Facebook. All rights reserved. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include <linux/device-mapper.h> 8 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/blkdev.h> 12 #include <linux/bio.h> 13 #include <linux/slab.h> 14 #include <linux/kthread.h> 15 #include <linux/freezer.h> 16 17 #define DM_MSG_PREFIX "log-writes" 18 19 /* 20 * This target will sequentially log all writes to the target device onto the 21 * log device. This is helpful for replaying writes to check for fs consistency 22 * at all times. This target provides a mechanism to mark specific events to 23 * check data at a later time. So for example you would: 24 * 25 * write data 26 * fsync 27 * dmsetup message /dev/whatever mark mymark 28 * unmount /mnt/test 29 * 30 * Then replay the log up to mymark and check the contents of the replay to 31 * verify it matches what was written. 32 * 33 * We log writes only after they have been flushed, this makes the log describe 34 * close to the order in which the data hits the actual disk, not its cache. So 35 * for example the following sequence (W means write, C means complete) 36 * 37 * Wa,Wb,Wc,Cc,Ca,FLUSH,FUAd,Cb,CFLUSH,CFUAd 38 * 39 * Would result in the log looking like this: 40 * 41 * c,a,flush,fuad,b,<other writes>,<next flush> 42 * 43 * This is meant to help expose problems where file systems do not properly wait 44 * on data being written before invoking a FLUSH. FUA bypasses cache so once it 45 * completes it is added to the log as it should be on disk. 46 * 47 * We treat DISCARDs as if they don't bypass cache so that they are logged in 48 * order of completion along with the normal writes. If we didn't do it this 49 * way we would process all the discards first and then write all the data, when 50 * in fact we want to do the data and the discard in the order that they 51 * completed. 52 */ 53 #define LOG_FLUSH_FLAG (1 << 0) 54 #define LOG_FUA_FLAG (1 << 1) 55 #define LOG_DISCARD_FLAG (1 << 2) 56 #define LOG_MARK_FLAG (1 << 3) 57 58 #define WRITE_LOG_VERSION 1ULL 59 #define WRITE_LOG_MAGIC 0x6a736677736872ULL 60 61 /* 62 * The disk format for this is braindead simple. 63 * 64 * At byte 0 we have our super, followed by the following sequence for 65 * nr_entries: 66 * 67 * [ 1 sector ][ entry->nr_sectors ] 68 * [log_write_entry][ data written ] 69 * 70 * The log_write_entry takes up a full sector so we can have arbitrary length 71 * marks and it leaves us room for extra content in the future. 72 */ 73 74 /* 75 * Basic info about the log for userspace. 76 */ 77 struct log_write_super { 78 __le64 magic; 79 __le64 version; 80 __le64 nr_entries; 81 __le32 sectorsize; 82 }; 83 84 /* 85 * sector - the sector we wrote. 86 * nr_sectors - the number of sectors we wrote. 87 * flags - flags for this log entry. 88 * data_len - the size of the data in this log entry, this is for private log 89 * entry stuff, the MARK data provided by userspace for example. 90 */ 91 struct log_write_entry { 92 __le64 sector; 93 __le64 nr_sectors; 94 __le64 flags; 95 __le64 data_len; 96 }; 97 98 struct log_writes_c { 99 struct dm_dev *dev; 100 struct dm_dev *logdev; 101 u64 logged_entries; 102 u32 sectorsize; 103 atomic_t io_blocks; 104 atomic_t pending_blocks; 105 sector_t next_sector; 106 sector_t end_sector; 107 bool logging_enabled; 108 bool device_supports_discard; 109 spinlock_t blocks_lock; 110 struct list_head unflushed_blocks; 111 struct list_head logging_blocks; 112 wait_queue_head_t wait; 113 struct task_struct *log_kthread; 114 }; 115 116 struct pending_block { 117 int vec_cnt; 118 u64 flags; 119 sector_t sector; 120 sector_t nr_sectors; 121 char *data; 122 u32 datalen; 123 struct list_head list; 124 struct bio_vec vecs[0]; 125 }; 126 127 struct per_bio_data { 128 struct pending_block *block; 129 }; 130 131 static void put_pending_block(struct log_writes_c *lc) 132 { 133 if (atomic_dec_and_test(&lc->pending_blocks)) { 134 smp_mb__after_atomic(); 135 if (waitqueue_active(&lc->wait)) 136 wake_up(&lc->wait); 137 } 138 } 139 140 static void put_io_block(struct log_writes_c *lc) 141 { 142 if (atomic_dec_and_test(&lc->io_blocks)) { 143 smp_mb__after_atomic(); 144 if (waitqueue_active(&lc->wait)) 145 wake_up(&lc->wait); 146 } 147 } 148 149 static void log_end_io(struct bio *bio) 150 { 151 struct log_writes_c *lc = bio->bi_private; 152 153 if (bio->bi_status) { 154 unsigned long flags; 155 156 DMERR("Error writing log block, error=%d", bio->bi_status); 157 spin_lock_irqsave(&lc->blocks_lock, flags); 158 lc->logging_enabled = false; 159 spin_unlock_irqrestore(&lc->blocks_lock, flags); 160 } 161 162 bio_free_pages(bio); 163 put_io_block(lc); 164 bio_put(bio); 165 } 166 167 /* 168 * Meant to be called if there is an error, it will free all the pages 169 * associated with the block. 170 */ 171 static void free_pending_block(struct log_writes_c *lc, 172 struct pending_block *block) 173 { 174 int i; 175 176 for (i = 0; i < block->vec_cnt; i++) { 177 if (block->vecs[i].bv_page) 178 __free_page(block->vecs[i].bv_page); 179 } 180 kfree(block->data); 181 kfree(block); 182 put_pending_block(lc); 183 } 184 185 static int write_metadata(struct log_writes_c *lc, void *entry, 186 size_t entrylen, void *data, size_t datalen, 187 sector_t sector) 188 { 189 struct bio *bio; 190 struct page *page; 191 void *ptr; 192 size_t ret; 193 194 bio = bio_alloc(GFP_KERNEL, 1); 195 if (!bio) { 196 DMERR("Couldn't alloc log bio"); 197 goto error; 198 } 199 bio->bi_iter.bi_size = 0; 200 bio->bi_iter.bi_sector = sector; 201 bio->bi_bdev = lc->logdev->bdev; 202 bio->bi_end_io = log_end_io; 203 bio->bi_private = lc; 204 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 205 206 page = alloc_page(GFP_KERNEL); 207 if (!page) { 208 DMERR("Couldn't alloc log page"); 209 bio_put(bio); 210 goto error; 211 } 212 213 ptr = kmap_atomic(page); 214 memcpy(ptr, entry, entrylen); 215 if (datalen) 216 memcpy(ptr + entrylen, data, datalen); 217 memset(ptr + entrylen + datalen, 0, 218 lc->sectorsize - entrylen - datalen); 219 kunmap_atomic(ptr); 220 221 ret = bio_add_page(bio, page, lc->sectorsize, 0); 222 if (ret != lc->sectorsize) { 223 DMERR("Couldn't add page to the log block"); 224 goto error_bio; 225 } 226 submit_bio(bio); 227 return 0; 228 error_bio: 229 bio_put(bio); 230 __free_page(page); 231 error: 232 put_io_block(lc); 233 return -1; 234 } 235 236 static int log_one_block(struct log_writes_c *lc, 237 struct pending_block *block, sector_t sector) 238 { 239 struct bio *bio; 240 struct log_write_entry entry; 241 size_t ret; 242 int i; 243 244 entry.sector = cpu_to_le64(block->sector); 245 entry.nr_sectors = cpu_to_le64(block->nr_sectors); 246 entry.flags = cpu_to_le64(block->flags); 247 entry.data_len = cpu_to_le64(block->datalen); 248 if (write_metadata(lc, &entry, sizeof(entry), block->data, 249 block->datalen, sector)) { 250 free_pending_block(lc, block); 251 return -1; 252 } 253 254 if (!block->vec_cnt) 255 goto out; 256 sector++; 257 258 atomic_inc(&lc->io_blocks); 259 bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); 260 if (!bio) { 261 DMERR("Couldn't alloc log bio"); 262 goto error; 263 } 264 bio->bi_iter.bi_size = 0; 265 bio->bi_iter.bi_sector = sector; 266 bio->bi_bdev = lc->logdev->bdev; 267 bio->bi_end_io = log_end_io; 268 bio->bi_private = lc; 269 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 270 271 for (i = 0; i < block->vec_cnt; i++) { 272 /* 273 * The page offset is always 0 because we allocate a new page 274 * for every bvec in the original bio for simplicity sake. 275 */ 276 ret = bio_add_page(bio, block->vecs[i].bv_page, 277 block->vecs[i].bv_len, 0); 278 if (ret != block->vecs[i].bv_len) { 279 atomic_inc(&lc->io_blocks); 280 submit_bio(bio); 281 bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES)); 282 if (!bio) { 283 DMERR("Couldn't alloc log bio"); 284 goto error; 285 } 286 bio->bi_iter.bi_size = 0; 287 bio->bi_iter.bi_sector = sector; 288 bio->bi_bdev = lc->logdev->bdev; 289 bio->bi_end_io = log_end_io; 290 bio->bi_private = lc; 291 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 292 293 ret = bio_add_page(bio, block->vecs[i].bv_page, 294 block->vecs[i].bv_len, 0); 295 if (ret != block->vecs[i].bv_len) { 296 DMERR("Couldn't add page on new bio?"); 297 bio_put(bio); 298 goto error; 299 } 300 } 301 sector += block->vecs[i].bv_len >> SECTOR_SHIFT; 302 } 303 submit_bio(bio); 304 out: 305 kfree(block->data); 306 kfree(block); 307 put_pending_block(lc); 308 return 0; 309 error: 310 free_pending_block(lc, block); 311 put_io_block(lc); 312 return -1; 313 } 314 315 static int log_super(struct log_writes_c *lc) 316 { 317 struct log_write_super super; 318 319 super.magic = cpu_to_le64(WRITE_LOG_MAGIC); 320 super.version = cpu_to_le64(WRITE_LOG_VERSION); 321 super.nr_entries = cpu_to_le64(lc->logged_entries); 322 super.sectorsize = cpu_to_le32(lc->sectorsize); 323 324 if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) { 325 DMERR("Couldn't write super"); 326 return -1; 327 } 328 329 return 0; 330 } 331 332 static inline sector_t logdev_last_sector(struct log_writes_c *lc) 333 { 334 return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT; 335 } 336 337 static int log_writes_kthread(void *arg) 338 { 339 struct log_writes_c *lc = (struct log_writes_c *)arg; 340 sector_t sector = 0; 341 342 while (!kthread_should_stop()) { 343 bool super = false; 344 bool logging_enabled; 345 struct pending_block *block = NULL; 346 int ret; 347 348 spin_lock_irq(&lc->blocks_lock); 349 if (!list_empty(&lc->logging_blocks)) { 350 block = list_first_entry(&lc->logging_blocks, 351 struct pending_block, list); 352 list_del_init(&block->list); 353 if (!lc->logging_enabled) 354 goto next; 355 356 sector = lc->next_sector; 357 if (block->flags & LOG_DISCARD_FLAG) 358 lc->next_sector++; 359 else 360 lc->next_sector += block->nr_sectors + 1; 361 362 /* 363 * Apparently the size of the device may not be known 364 * right away, so handle this properly. 365 */ 366 if (!lc->end_sector) 367 lc->end_sector = logdev_last_sector(lc); 368 if (lc->end_sector && 369 lc->next_sector >= lc->end_sector) { 370 DMERR("Ran out of space on the logdev"); 371 lc->logging_enabled = false; 372 goto next; 373 } 374 lc->logged_entries++; 375 atomic_inc(&lc->io_blocks); 376 377 super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG)); 378 if (super) 379 atomic_inc(&lc->io_blocks); 380 } 381 next: 382 logging_enabled = lc->logging_enabled; 383 spin_unlock_irq(&lc->blocks_lock); 384 if (block) { 385 if (logging_enabled) { 386 ret = log_one_block(lc, block, sector); 387 if (!ret && super) 388 ret = log_super(lc); 389 if (ret) { 390 spin_lock_irq(&lc->blocks_lock); 391 lc->logging_enabled = false; 392 spin_unlock_irq(&lc->blocks_lock); 393 } 394 } else 395 free_pending_block(lc, block); 396 continue; 397 } 398 399 if (!try_to_freeze()) { 400 set_current_state(TASK_INTERRUPTIBLE); 401 if (!kthread_should_stop() && 402 !atomic_read(&lc->pending_blocks)) 403 schedule(); 404 __set_current_state(TASK_RUNNING); 405 } 406 } 407 return 0; 408 } 409 410 /* 411 * Construct a log-writes mapping: 412 * log-writes <dev_path> <log_dev_path> 413 */ 414 static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) 415 { 416 struct log_writes_c *lc; 417 struct dm_arg_set as; 418 const char *devname, *logdevname; 419 int ret; 420 421 as.argc = argc; 422 as.argv = argv; 423 424 if (argc < 2) { 425 ti->error = "Invalid argument count"; 426 return -EINVAL; 427 } 428 429 lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL); 430 if (!lc) { 431 ti->error = "Cannot allocate context"; 432 return -ENOMEM; 433 } 434 spin_lock_init(&lc->blocks_lock); 435 INIT_LIST_HEAD(&lc->unflushed_blocks); 436 INIT_LIST_HEAD(&lc->logging_blocks); 437 init_waitqueue_head(&lc->wait); 438 lc->sectorsize = 1 << SECTOR_SHIFT; 439 atomic_set(&lc->io_blocks, 0); 440 atomic_set(&lc->pending_blocks, 0); 441 442 devname = dm_shift_arg(&as); 443 ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev); 444 if (ret) { 445 ti->error = "Device lookup failed"; 446 goto bad; 447 } 448 449 logdevname = dm_shift_arg(&as); 450 ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), 451 &lc->logdev); 452 if (ret) { 453 ti->error = "Log device lookup failed"; 454 dm_put_device(ti, lc->dev); 455 goto bad; 456 } 457 458 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); 459 if (IS_ERR(lc->log_kthread)) { 460 ret = PTR_ERR(lc->log_kthread); 461 ti->error = "Couldn't alloc kthread"; 462 dm_put_device(ti, lc->dev); 463 dm_put_device(ti, lc->logdev); 464 goto bad; 465 } 466 467 /* We put the super at sector 0, start logging at sector 1 */ 468 lc->next_sector = 1; 469 lc->logging_enabled = true; 470 lc->end_sector = logdev_last_sector(lc); 471 lc->device_supports_discard = true; 472 473 ti->num_flush_bios = 1; 474 ti->flush_supported = true; 475 ti->num_discard_bios = 1; 476 ti->discards_supported = true; 477 ti->per_io_data_size = sizeof(struct per_bio_data); 478 ti->private = lc; 479 return 0; 480 481 bad: 482 kfree(lc); 483 return ret; 484 } 485 486 static int log_mark(struct log_writes_c *lc, char *data) 487 { 488 struct pending_block *block; 489 size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry); 490 491 block = kzalloc(sizeof(struct pending_block), GFP_KERNEL); 492 if (!block) { 493 DMERR("Error allocating pending block"); 494 return -ENOMEM; 495 } 496 497 block->data = kstrndup(data, maxsize, GFP_KERNEL); 498 if (!block->data) { 499 DMERR("Error copying mark data"); 500 kfree(block); 501 return -ENOMEM; 502 } 503 atomic_inc(&lc->pending_blocks); 504 block->datalen = strlen(block->data); 505 block->flags |= LOG_MARK_FLAG; 506 spin_lock_irq(&lc->blocks_lock); 507 list_add_tail(&block->list, &lc->logging_blocks); 508 spin_unlock_irq(&lc->blocks_lock); 509 wake_up_process(lc->log_kthread); 510 return 0; 511 } 512 513 static void log_writes_dtr(struct dm_target *ti) 514 { 515 struct log_writes_c *lc = ti->private; 516 517 spin_lock_irq(&lc->blocks_lock); 518 list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks); 519 spin_unlock_irq(&lc->blocks_lock); 520 521 /* 522 * This is just nice to have since it'll update the super to include the 523 * unflushed blocks, if it fails we don't really care. 524 */ 525 log_mark(lc, "dm-log-writes-end"); 526 wake_up_process(lc->log_kthread); 527 wait_event(lc->wait, !atomic_read(&lc->io_blocks) && 528 !atomic_read(&lc->pending_blocks)); 529 kthread_stop(lc->log_kthread); 530 531 WARN_ON(!list_empty(&lc->logging_blocks)); 532 WARN_ON(!list_empty(&lc->unflushed_blocks)); 533 dm_put_device(ti, lc->dev); 534 dm_put_device(ti, lc->logdev); 535 kfree(lc); 536 } 537 538 static void normal_map_bio(struct dm_target *ti, struct bio *bio) 539 { 540 struct log_writes_c *lc = ti->private; 541 542 bio->bi_bdev = lc->dev->bdev; 543 } 544 545 static int log_writes_map(struct dm_target *ti, struct bio *bio) 546 { 547 struct log_writes_c *lc = ti->private; 548 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 549 struct pending_block *block; 550 struct bvec_iter iter; 551 struct bio_vec bv; 552 size_t alloc_size; 553 int i = 0; 554 bool flush_bio = (bio->bi_opf & REQ_PREFLUSH); 555 bool fua_bio = (bio->bi_opf & REQ_FUA); 556 bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); 557 558 pb->block = NULL; 559 560 /* Don't bother doing anything if logging has been disabled */ 561 if (!lc->logging_enabled) 562 goto map_bio; 563 564 /* 565 * Map reads as normal. 566 */ 567 if (bio_data_dir(bio) == READ) 568 goto map_bio; 569 570 /* No sectors and not a flush? Don't care */ 571 if (!bio_sectors(bio) && !flush_bio) 572 goto map_bio; 573 574 /* 575 * Discards will have bi_size set but there's no actual data, so just 576 * allocate the size of the pending block. 577 */ 578 if (discard_bio) 579 alloc_size = sizeof(struct pending_block); 580 else 581 alloc_size = sizeof(struct pending_block) + sizeof(struct bio_vec) * bio_segments(bio); 582 583 block = kzalloc(alloc_size, GFP_NOIO); 584 if (!block) { 585 DMERR("Error allocating pending block"); 586 spin_lock_irq(&lc->blocks_lock); 587 lc->logging_enabled = false; 588 spin_unlock_irq(&lc->blocks_lock); 589 return DM_MAPIO_KILL; 590 } 591 INIT_LIST_HEAD(&block->list); 592 pb->block = block; 593 atomic_inc(&lc->pending_blocks); 594 595 if (flush_bio) 596 block->flags |= LOG_FLUSH_FLAG; 597 if (fua_bio) 598 block->flags |= LOG_FUA_FLAG; 599 if (discard_bio) 600 block->flags |= LOG_DISCARD_FLAG; 601 602 block->sector = bio->bi_iter.bi_sector; 603 block->nr_sectors = bio_sectors(bio); 604 605 /* We don't need the data, just submit */ 606 if (discard_bio) { 607 WARN_ON(flush_bio || fua_bio); 608 if (lc->device_supports_discard) 609 goto map_bio; 610 bio_endio(bio); 611 return DM_MAPIO_SUBMITTED; 612 } 613 614 /* Flush bio, splice the unflushed blocks onto this list and submit */ 615 if (flush_bio && !bio_sectors(bio)) { 616 spin_lock_irq(&lc->blocks_lock); 617 list_splice_init(&lc->unflushed_blocks, &block->list); 618 spin_unlock_irq(&lc->blocks_lock); 619 goto map_bio; 620 } 621 622 /* 623 * We will write this bio somewhere else way later so we need to copy 624 * the actual contents into new pages so we know the data will always be 625 * there. 626 * 627 * We do this because this could be a bio from O_DIRECT in which case we 628 * can't just hold onto the page until some later point, we have to 629 * manually copy the contents. 630 */ 631 bio_for_each_segment(bv, bio, iter) { 632 struct page *page; 633 void *src, *dst; 634 635 page = alloc_page(GFP_NOIO); 636 if (!page) { 637 DMERR("Error allocing page"); 638 free_pending_block(lc, block); 639 spin_lock_irq(&lc->blocks_lock); 640 lc->logging_enabled = false; 641 spin_unlock_irq(&lc->blocks_lock); 642 return DM_MAPIO_KILL; 643 } 644 645 src = kmap_atomic(bv.bv_page); 646 dst = kmap_atomic(page); 647 memcpy(dst, src + bv.bv_offset, bv.bv_len); 648 kunmap_atomic(dst); 649 kunmap_atomic(src); 650 block->vecs[i].bv_page = page; 651 block->vecs[i].bv_len = bv.bv_len; 652 block->vec_cnt++; 653 i++; 654 } 655 656 /* Had a flush with data in it, weird */ 657 if (flush_bio) { 658 spin_lock_irq(&lc->blocks_lock); 659 list_splice_init(&lc->unflushed_blocks, &block->list); 660 spin_unlock_irq(&lc->blocks_lock); 661 } 662 map_bio: 663 normal_map_bio(ti, bio); 664 return DM_MAPIO_REMAPPED; 665 } 666 667 static int normal_end_io(struct dm_target *ti, struct bio *bio, 668 blk_status_t *error) 669 { 670 struct log_writes_c *lc = ti->private; 671 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 672 673 if (bio_data_dir(bio) == WRITE && pb->block) { 674 struct pending_block *block = pb->block; 675 unsigned long flags; 676 677 spin_lock_irqsave(&lc->blocks_lock, flags); 678 if (block->flags & LOG_FLUSH_FLAG) { 679 list_splice_tail_init(&block->list, &lc->logging_blocks); 680 list_add_tail(&block->list, &lc->logging_blocks); 681 wake_up_process(lc->log_kthread); 682 } else if (block->flags & LOG_FUA_FLAG) { 683 list_add_tail(&block->list, &lc->logging_blocks); 684 wake_up_process(lc->log_kthread); 685 } else 686 list_add_tail(&block->list, &lc->unflushed_blocks); 687 spin_unlock_irqrestore(&lc->blocks_lock, flags); 688 } 689 690 return DM_ENDIO_DONE; 691 } 692 693 /* 694 * INFO format: <logged entries> <highest allocated sector> 695 */ 696 static void log_writes_status(struct dm_target *ti, status_type_t type, 697 unsigned status_flags, char *result, 698 unsigned maxlen) 699 { 700 unsigned sz = 0; 701 struct log_writes_c *lc = ti->private; 702 703 switch (type) { 704 case STATUSTYPE_INFO: 705 DMEMIT("%llu %llu", lc->logged_entries, 706 (unsigned long long)lc->next_sector - 1); 707 if (!lc->logging_enabled) 708 DMEMIT(" logging_disabled"); 709 break; 710 711 case STATUSTYPE_TABLE: 712 DMEMIT("%s %s", lc->dev->name, lc->logdev->name); 713 break; 714 } 715 } 716 717 static int log_writes_prepare_ioctl(struct dm_target *ti, 718 struct block_device **bdev, fmode_t *mode) 719 { 720 struct log_writes_c *lc = ti->private; 721 struct dm_dev *dev = lc->dev; 722 723 *bdev = dev->bdev; 724 /* 725 * Only pass ioctls through if the device sizes match exactly. 726 */ 727 if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) 728 return 1; 729 return 0; 730 } 731 732 static int log_writes_iterate_devices(struct dm_target *ti, 733 iterate_devices_callout_fn fn, 734 void *data) 735 { 736 struct log_writes_c *lc = ti->private; 737 738 return fn(ti, lc->dev, 0, ti->len, data); 739 } 740 741 /* 742 * Messages supported: 743 * mark <mark data> - specify the marked data. 744 */ 745 static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv) 746 { 747 int r = -EINVAL; 748 struct log_writes_c *lc = ti->private; 749 750 if (argc != 2) { 751 DMWARN("Invalid log-writes message arguments, expect 2 arguments, got %d", argc); 752 return r; 753 } 754 755 if (!strcasecmp(argv[0], "mark")) 756 r = log_mark(lc, argv[1]); 757 else 758 DMWARN("Unrecognised log writes target message received: %s", argv[0]); 759 760 return r; 761 } 762 763 static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits) 764 { 765 struct log_writes_c *lc = ti->private; 766 struct request_queue *q = bdev_get_queue(lc->dev->bdev); 767 768 if (!q || !blk_queue_discard(q)) { 769 lc->device_supports_discard = false; 770 limits->discard_granularity = 1 << SECTOR_SHIFT; 771 limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); 772 } 773 } 774 775 static struct target_type log_writes_target = { 776 .name = "log-writes", 777 .version = {1, 0, 0}, 778 .module = THIS_MODULE, 779 .ctr = log_writes_ctr, 780 .dtr = log_writes_dtr, 781 .map = log_writes_map, 782 .end_io = normal_end_io, 783 .status = log_writes_status, 784 .prepare_ioctl = log_writes_prepare_ioctl, 785 .message = log_writes_message, 786 .iterate_devices = log_writes_iterate_devices, 787 .io_hints = log_writes_io_hints, 788 }; 789 790 static int __init dm_log_writes_init(void) 791 { 792 int r = dm_register_target(&log_writes_target); 793 794 if (r < 0) 795 DMERR("register failed %d", r); 796 797 return r; 798 } 799 800 static void __exit dm_log_writes_exit(void) 801 { 802 dm_unregister_target(&log_writes_target); 803 } 804 805 module_init(dm_log_writes_init); 806 module_exit(dm_log_writes_exit); 807 808 MODULE_DESCRIPTION(DM_NAME " log writes target"); 809 MODULE_AUTHOR("Josef Bacik <jbacik@fb.com>"); 810 MODULE_LICENSE("GPL"); 811