1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2001 Sistina Software (UK) Limited. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the LGPL. 7 */ 8 9 #ifndef _LINUX_DEVICE_MAPPER_H 10 #define _LINUX_DEVICE_MAPPER_H 11 12 #include <linux/bio.h> 13 #include <linux/blkdev.h> 14 #include <linux/dm-ioctl.h> 15 #include <linux/math64.h> 16 #include <linux/ratelimit.h> 17 18 struct dm_dev; 19 struct dm_target; 20 struct dm_table; 21 struct dm_report_zones_args; 22 struct mapped_device; 23 struct bio_vec; 24 enum dax_access_mode; 25 26 /* 27 * Type of table, mapped_device's mempool and request_queue 28 */ 29 enum dm_queue_mode { 30 DM_TYPE_NONE = 0, 31 DM_TYPE_BIO_BASED = 1, 32 DM_TYPE_REQUEST_BASED = 2, 33 DM_TYPE_DAX_BIO_BASED = 3, 34 }; 35 36 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; 37 38 union map_info { 39 void *ptr; 40 }; 41 42 /* 43 * In the constructor the target parameter will already have the 44 * table, type, begin and len fields filled in. 45 */ 46 typedef int (*dm_ctr_fn) (struct dm_target *target, 47 unsigned int argc, char **argv); 48 49 /* 50 * The destructor doesn't need to free the dm_target, just 51 * anything hidden ti->private. 52 */ 53 typedef void (*dm_dtr_fn) (struct dm_target *ti); 54 55 /* 56 * The map function must return: 57 * < 0: error 58 * = 0: The target will handle the io by resubmitting it later 59 * = 1: simple remap complete 60 * = 2: The target wants to push back the io 61 */ 62 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 63 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, 64 struct request *rq, 65 union map_info *map_context, 66 struct request **clone); 67 typedef void (*dm_release_clone_request_fn) (struct request *clone, 68 union map_info *map_context); 69 70 /* 71 * Returns: 72 * < 0 : error (currently ignored) 73 * 0 : ended successfully 74 * 1 : for some reason the io has still not completed (eg, 75 * multipath target might want to requeue a failed io). 76 * 2 : The target wants to push back the io 77 */ 78 typedef int (*dm_endio_fn) (struct dm_target *ti, 79 struct bio *bio, blk_status_t *error); 80 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 81 struct request *clone, blk_status_t error, 82 union map_info *map_context); 83 84 typedef void (*dm_presuspend_fn) (struct dm_target *ti); 85 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); 86 typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 87 typedef int (*dm_preresume_fn) (struct dm_target *ti); 88 typedef void (*dm_resume_fn) (struct dm_target *ti); 89 90 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, 91 unsigned int status_flags, char *result, unsigned int maxlen); 92 93 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv, 94 char *result, unsigned int maxlen); 95 96 /* 97 * Called with *forward == true. If it remains true, the ioctl should be 98 * forwarded to bdev. If it is reset to false, the target already fully handled 99 * the ioctl and the return value is the return value for the whole ioctl. 100 */ 101 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev, 102 unsigned int cmd, unsigned long arg, 103 bool *forward); 104 105 #ifdef CONFIG_BLK_DEV_ZONED 106 typedef int (*dm_report_zones_fn) (struct dm_target *ti, 107 struct dm_report_zones_args *args, 108 unsigned int nr_zones); 109 #else 110 /* 111 * Define dm_report_zones_fn so that targets can assign to NULL if 112 * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do 113 * awkward #ifdefs in their target_type, etc. 114 */ 115 typedef int (*dm_report_zones_fn) (struct dm_target *dummy); 116 #endif 117 118 /* 119 * These iteration functions are typically used to check (and combine) 120 * properties of underlying devices. 121 * E.g. Does at least one underlying device support flush? 122 * Does any underlying device not support WRITE_SAME? 123 * 124 * The callout function is called once for each contiguous section of 125 * an underlying device. State can be maintained in *data. 126 * Return non-zero to stop iterating through any further devices. 127 */ 128 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 129 struct dm_dev *dev, 130 sector_t start, sector_t len, 131 void *data); 132 133 /* 134 * This function must iterate through each section of device used by the 135 * target until it encounters a non-zero return code, which it then returns. 136 * Returns zero if no callout returned non-zero. 137 */ 138 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 139 iterate_devices_callout_fn fn, 140 void *data); 141 142 typedef void (*dm_io_hints_fn) (struct dm_target *ti, 143 struct queue_limits *limits); 144 145 /* 146 * Returns: 147 * 0: The target can handle the next I/O immediately. 148 * 1: The target can't handle the next I/O immediately. 149 */ 150 typedef int (*dm_busy_fn) (struct dm_target *ti); 151 152 /* 153 * Returns: 154 * < 0 : error 155 * >= 0 : the number of bytes accessible at the address 156 */ 157 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, 158 long nr_pages, enum dax_access_mode node, void **kaddr, 159 unsigned long *pfn); 160 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, 161 size_t nr_pages); 162 163 /* 164 * Returns: 165 * != 0 : number of bytes transferred 166 * 0 : recovery write failed 167 */ 168 typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff, 169 void *addr, size_t bytes, struct iov_iter *i); 170 171 void dm_error(const char *message); 172 173 struct dm_dev { 174 struct block_device *bdev; 175 struct file *bdev_file; 176 struct dax_device *dax_dev; 177 blk_mode_t mode; 178 char name[16]; 179 }; 180 181 /* 182 * Constructors should call these functions to ensure destination devices 183 * are opened/closed correctly. 184 */ 185 int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, 186 struct dm_dev **result); 187 void dm_put_device(struct dm_target *ti, struct dm_dev *d); 188 189 /* 190 * Helper function for getting devices 191 */ 192 int dm_devt_from_path(const char *path, dev_t *dev_p); 193 194 /* 195 * Information about a target type 196 */ 197 198 struct target_type { 199 uint64_t features; 200 const char *name; 201 struct module *module; 202 unsigned int version[3]; 203 dm_ctr_fn ctr; 204 dm_dtr_fn dtr; 205 dm_map_fn map; 206 dm_clone_and_map_request_fn clone_and_map_rq; 207 dm_release_clone_request_fn release_clone_rq; 208 dm_endio_fn end_io; 209 dm_request_endio_fn rq_end_io; 210 dm_presuspend_fn presuspend; 211 dm_presuspend_undo_fn presuspend_undo; 212 dm_postsuspend_fn postsuspend; 213 dm_preresume_fn preresume; 214 dm_resume_fn resume; 215 dm_status_fn status; 216 dm_message_fn message; 217 dm_prepare_ioctl_fn prepare_ioctl; 218 dm_report_zones_fn report_zones; 219 dm_busy_fn busy; 220 dm_iterate_devices_fn iterate_devices; 221 dm_io_hints_fn io_hints; 222 dm_dax_direct_access_fn direct_access; 223 dm_dax_zero_page_range_fn dax_zero_page_range; 224 dm_dax_recovery_write_fn dax_recovery_write; 225 226 /* For internal device-mapper use. */ 227 struct list_head list; 228 }; 229 230 /* 231 * Target features 232 */ 233 234 /* 235 * Any table that contains an instance of this target must have only one. 236 */ 237 #define DM_TARGET_SINGLETON 0x00000001 238 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) 239 240 /* 241 * Indicates that a target does not support read-only devices. 242 */ 243 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 244 #define dm_target_always_writeable(type) \ 245 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) 246 247 /* 248 * Any device that contains a table with an instance of this target may never 249 * have tables containing any different target type. 250 */ 251 #define DM_TARGET_IMMUTABLE 0x00000004 252 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) 253 254 /* 255 * Indicates that a target may replace any target; even immutable targets. 256 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. 257 */ 258 #define DM_TARGET_WILDCARD 0x00000008 259 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) 260 261 /* 262 * A target implements own bio data integrity. 263 */ 264 #define DM_TARGET_INTEGRITY 0x00000010 265 #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) 266 267 /* 268 * A target passes integrity data to the lower device. 269 */ 270 #define DM_TARGET_PASSES_INTEGRITY 0x00000020 271 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) 272 273 /* 274 * Indicates support for zoned block devices: 275 * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned 276 * block devices but does not support combining different zoned models. 277 * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple 278 * devices with different zoned models. 279 */ 280 #ifdef CONFIG_BLK_DEV_ZONED 281 #define DM_TARGET_ZONED_HM 0x00000040 282 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) 283 #else 284 #define DM_TARGET_ZONED_HM 0x00000000 285 #define dm_target_supports_zoned_hm(type) (false) 286 #endif 287 288 /* 289 * A target handles REQ_NOWAIT 290 */ 291 #define DM_TARGET_NOWAIT 0x00000080 292 #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT) 293 294 /* 295 * A target supports passing through inline crypto support. 296 */ 297 #define DM_TARGET_PASSES_CRYPTO 0x00000100 298 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) 299 300 #ifdef CONFIG_BLK_DEV_ZONED 301 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200 302 #define dm_target_supports_mixed_zoned_model(type) \ 303 ((type)->features & DM_TARGET_MIXED_ZONED_MODEL) 304 #else 305 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000 306 #define dm_target_supports_mixed_zoned_model(type) (false) 307 #endif 308 309 #define DM_TARGET_ATOMIC_WRITES 0x00000400 310 #define dm_target_supports_atomic_writes(type) ((type)->features & DM_TARGET_ATOMIC_WRITES) 311 312 struct dm_target { 313 struct dm_table *table; 314 struct target_type *type; 315 316 /* target limits */ 317 sector_t begin; 318 sector_t len; 319 320 /* If non-zero, maximum size of I/O submitted to a target. */ 321 uint32_t max_io_len; 322 323 /* 324 * A number of zero-length barrier bios that will be submitted 325 * to the target for the purpose of flushing cache. 326 * 327 * The bio number can be accessed with dm_bio_get_target_bio_nr. 328 * It is a responsibility of the target driver to remap these bios 329 * to the real underlying devices. 330 */ 331 unsigned int num_flush_bios; 332 333 /* 334 * The number of discard bios that will be submitted to the target. 335 * The bio number can be accessed with dm_bio_get_target_bio_nr. 336 */ 337 unsigned int num_discard_bios; 338 339 /* 340 * The number of secure erase bios that will be submitted to the target. 341 * The bio number can be accessed with dm_bio_get_target_bio_nr. 342 */ 343 unsigned int num_secure_erase_bios; 344 345 /* 346 * The number of WRITE ZEROES bios that will be submitted to the target. 347 * The bio number can be accessed with dm_bio_get_target_bio_nr. 348 */ 349 unsigned int num_write_zeroes_bios; 350 351 /* 352 * The minimum number of extra bytes allocated in each io for the 353 * target to use. 354 */ 355 unsigned int per_io_data_size; 356 357 /* target specific data */ 358 void *private; 359 360 /* Used to provide an error string from the ctr */ 361 char *error; 362 363 /* 364 * Set if this target needs to receive flushes regardless of 365 * whether or not its underlying devices have support. 366 */ 367 bool flush_supported:1; 368 369 /* 370 * Set if this target needs to receive discards regardless of 371 * whether or not its underlying devices have support. 372 */ 373 bool discards_supported:1; 374 375 /* 376 * Automatically set by dm-core if this target supports 377 * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated 378 * using REQ_OP_ZONE_RESET. Target drivers must not set this manually. 379 */ 380 bool zone_reset_all_supported:1; 381 382 /* 383 * Set if this target requires that discards be split on 384 * 'max_discard_sectors' boundaries. 385 */ 386 bool max_discard_granularity:1; 387 388 /* 389 * Set if we need to limit the number of in-flight bios when swapping. 390 */ 391 bool limit_swap_bios:1; 392 393 /* 394 * Set if this target implements a zoned device and needs emulation of 395 * zone append operations using regular writes. 396 */ 397 bool emulate_zone_append:1; 398 399 /* 400 * Set if the target will submit IO using dm_submit_bio_remap() 401 * after returning DM_MAPIO_SUBMITTED from its map function. 402 */ 403 bool accounts_remapped_io:1; 404 405 /* 406 * Set if the target will submit the DM bio without first calling 407 * bio_set_dev(). NOTE: ideally a target should _not_ need this. 408 */ 409 bool needs_bio_set_dev:1; 410 411 /* 412 * Set if the target supports flush optimization. If all the targets in 413 * a table have flush_bypasses_map set, the dm core will not send 414 * flushes to the targets via a ->map method. It will iterate over 415 * dm_table->devices and send flushes to the devices directly. This 416 * optimization reduces the number of flushes being sent when multiple 417 * targets in a table use the same underlying device. 418 * 419 * This optimization may be enabled on targets that just pass the 420 * flushes to the underlying devices without performing any other 421 * actions on the flush request. Currently, dm-linear and dm-stripe 422 * support it. 423 */ 424 bool flush_bypasses_map:1; 425 426 /* 427 * Set if the target calls bio_integrity_alloc on bios received 428 * in the map method. 429 */ 430 bool mempool_needs_integrity:1; 431 }; 432 433 void *dm_per_bio_data(struct bio *bio, size_t data_size); 434 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); 435 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio); 436 437 u64 dm_start_time_ns_from_clone(struct bio *bio); 438 439 int dm_register_target(struct target_type *t); 440 void dm_unregister_target(struct target_type *t); 441 442 /* 443 * Target argument parsing. 444 */ 445 struct dm_arg_set { 446 unsigned int argc; 447 char **argv; 448 }; 449 450 /* 451 * The minimum and maximum value of a numeric argument, together with 452 * the error message to use if the number is found to be outside that range. 453 */ 454 struct dm_arg { 455 unsigned int min; 456 unsigned int max; 457 char *error; 458 }; 459 460 /* 461 * Validate the next argument, either returning it as *value or, if invalid, 462 * returning -EINVAL and setting *error. 463 */ 464 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 465 unsigned int *value, char **error); 466 467 /* 468 * Process the next argument as the start of a group containing between 469 * arg->min and arg->max further arguments. Either return the size as 470 * *num_args or, if invalid, return -EINVAL and set *error. 471 */ 472 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 473 unsigned int *num_args, char **error); 474 475 /* 476 * Return the current argument and shift to the next. 477 */ 478 const char *dm_shift_arg(struct dm_arg_set *as); 479 480 /* 481 * Move through num_args arguments. 482 */ 483 void dm_consume_args(struct dm_arg_set *as, unsigned int num_args); 484 485 /* 486 *---------------------------------------------------------------- 487 * Functions for creating and manipulating mapped devices. 488 * Drop the reference with dm_put when you finish with the object. 489 *---------------------------------------------------------------- 490 */ 491 492 /* 493 * DM_ANY_MINOR chooses the next available minor number. 494 */ 495 #define DM_ANY_MINOR (-1) 496 int dm_create(int minor, struct mapped_device **md); 497 498 /* 499 * Reference counting for md. 500 */ 501 struct mapped_device *dm_get_md(dev_t dev); 502 void dm_get(struct mapped_device *md); 503 int dm_hold(struct mapped_device *md); 504 void dm_put(struct mapped_device *md); 505 506 /* 507 * An arbitrary pointer may be stored alongside a mapped device. 508 */ 509 void dm_set_mdptr(struct mapped_device *md, void *ptr); 510 void *dm_get_mdptr(struct mapped_device *md); 511 512 /* 513 * A device can still be used while suspended, but I/O is deferred. 514 */ 515 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags); 516 int dm_resume(struct mapped_device *md); 517 518 /* 519 * Event functions. 520 */ 521 uint32_t dm_get_event_nr(struct mapped_device *md); 522 int dm_wait_event(struct mapped_device *md, int event_nr); 523 uint32_t dm_next_uevent_seq(struct mapped_device *md); 524 void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 525 526 /* 527 * Info functions. 528 */ 529 const char *dm_device_name(struct mapped_device *md); 530 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 531 struct gendisk *dm_disk(struct mapped_device *md); 532 int dm_suspended(struct dm_target *ti); 533 int dm_post_suspending(struct dm_target *ti); 534 int dm_noflush_suspending(struct dm_target *ti); 535 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors); 536 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone); 537 538 #ifdef CONFIG_BLK_DEV_ZONED 539 struct dm_report_zones_args { 540 struct dm_target *tgt; 541 struct gendisk *disk; 542 sector_t next_sector; 543 544 unsigned int zone_idx; 545 546 /* for block layer ->report_zones */ 547 struct blk_report_zones_args *rep_args; 548 549 /* for internal users */ 550 report_zones_cb cb; 551 void *data; 552 553 /* must be filled by ->report_zones before calling dm_report_zones_cb */ 554 sector_t start; 555 }; 556 int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, 557 struct dm_report_zones_args *args, unsigned int nr_zones); 558 #endif /* CONFIG_BLK_DEV_ZONED */ 559 560 /* 561 * Device mapper functions to parse and create devices specified by the 562 * parameter "dm-mod.create=" 563 */ 564 int __init dm_early_create(struct dm_ioctl *dmi, 565 struct dm_target_spec **spec_array, 566 char **target_params_array); 567 568 /* 569 * Geometry functions. 570 */ 571 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 572 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 573 574 /* 575 *--------------------------------------------------------------- 576 * Functions for manipulating device-mapper tables. 577 *--------------------------------------------------------------- 578 */ 579 580 /* 581 * First create an empty table. 582 */ 583 int dm_table_create(struct dm_table **result, blk_mode_t mode, 584 unsigned int num_targets, struct mapped_device *md); 585 586 /* 587 * Then call this once for each target. 588 */ 589 int dm_table_add_target(struct dm_table *t, const char *type, 590 sector_t start, sector_t len, char *params); 591 592 /* 593 * Target can use this to set the table's type. 594 * Can only ever be called from a target's ctr. 595 * Useful for "hybrid" target (supports both bio-based 596 * and request-based). 597 */ 598 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); 599 600 /* 601 * Finally call this to make the table ready for use. 602 */ 603 int dm_table_complete(struct dm_table *t); 604 605 /* 606 * Destroy the table when finished. 607 */ 608 void dm_table_destroy(struct dm_table *t); 609 610 /* 611 * Target may require that it is never sent I/O larger than len. 612 */ 613 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); 614 615 /* 616 * Table reference counting. 617 */ 618 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); 619 void dm_put_live_table(struct mapped_device *md, int srcu_idx); 620 void dm_sync_table(struct mapped_device *md); 621 622 /* 623 * Queries 624 */ 625 sector_t dm_table_get_size(struct dm_table *t); 626 blk_mode_t dm_table_get_mode(struct dm_table *t); 627 struct mapped_device *dm_table_get_md(struct dm_table *t); 628 const char *dm_table_device_name(struct dm_table *t); 629 630 /* 631 * Trigger an event. 632 */ 633 void dm_table_event(struct dm_table *t); 634 635 /* 636 * Run the queue for request-based targets. 637 */ 638 void dm_table_run_md_queue_async(struct dm_table *t); 639 640 /* 641 * The device must be suspended before calling this method. 642 * Returns the previous table, which the caller must destroy. 643 */ 644 struct dm_table *dm_swap_table(struct mapped_device *md, 645 struct dm_table *t); 646 647 /* 648 * Table blk_crypto_profile functions 649 */ 650 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile); 651 652 /* 653 *--------------------------------------------------------------- 654 * Macros. 655 *--------------------------------------------------------------- 656 */ 657 #define DM_NAME "device-mapper" 658 659 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" 660 661 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) 662 663 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) 664 #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 665 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) 666 #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 667 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) 668 #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 669 670 #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) 671 #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 672 673 #define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x))) 674 675 #define DMEMIT_TARGET_NAME_VERSION(y) \ 676 DMEMIT("target_name=%s,target_version=%u.%u.%u", \ 677 (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) 678 679 /** 680 * module_dm() - Helper macro for DM targets that don't do anything 681 * special in their module_init and module_exit. 682 * Each module may only use this macro once, and calling it replaces 683 * module_init() and module_exit(). 684 * 685 * @name: DM target's name 686 */ 687 #define module_dm(name) \ 688 static int __init dm_##name##_init(void) \ 689 { \ 690 return dm_register_target(&(name##_target)); \ 691 } \ 692 module_init(dm_##name##_init) \ 693 static void __exit dm_##name##_exit(void) \ 694 { \ 695 dm_unregister_target(&(name##_target)); \ 696 } \ 697 module_exit(dm_##name##_exit) 698 699 /* 700 * Definitions of return values from target end_io function. 701 */ 702 #define DM_ENDIO_DONE 0 703 #define DM_ENDIO_INCOMPLETE 1 704 #define DM_ENDIO_REQUEUE 2 705 #define DM_ENDIO_DELAY_REQUEUE 3 706 707 /* 708 * Definitions of return values from target map function. 709 */ 710 #define DM_MAPIO_SUBMITTED 0 711 #define DM_MAPIO_REMAPPED 1 712 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 713 #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE 714 #define DM_MAPIO_KILL 4 715 716 #define dm_sector_div64(x, y)( \ 717 { \ 718 u64 _res; \ 719 (x) = div64_u64_rem(x, y, &_res); \ 720 _res; \ 721 } \ 722 ) 723 724 /* 725 * Ceiling(n / sz) 726 */ 727 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) 728 729 #define dm_sector_div_up(n, sz) ( \ 730 { \ 731 sector_t _r = ((n) + (sz) - 1); \ 732 sector_div(_r, (sz)); \ 733 _r; \ 734 } \ 735 ) 736 737 /* 738 * ceiling(n / size) * size 739 */ 740 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 741 742 /* 743 * Sector offset taken relative to the start of the target instead of 744 * relative to the start of the device. 745 */ 746 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) 747 748 static inline sector_t to_sector(unsigned long long n) 749 { 750 return (n >> SECTOR_SHIFT); 751 } 752 753 static inline unsigned long to_bytes(sector_t n) 754 { 755 return (n << SECTOR_SHIFT); 756 } 757 758 #endif /* _LINUX_DEVICE_MAPPER_H */ 759