1 /* 2 * Internal header file _only_ for device mapper core 3 * 4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the LGPL. 7 */ 8 9 #ifndef DM_CORE_INTERNAL_H 10 #define DM_CORE_INTERNAL_H 11 12 #include <linux/kthread.h> 13 #include <linux/ktime.h> 14 #include <linux/blk-mq.h> 15 #include <linux/blk-crypto-profile.h> 16 #include <linux/jump_label.h> 17 18 #include <trace/events/block.h> 19 20 #include "dm.h" 21 #include "dm-ima.h" 22 23 #define DM_RESERVED_MAX_IOS 1024 24 25 struct dm_kobject_holder { 26 struct kobject kobj; 27 struct completion completion; 28 }; 29 30 /* 31 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. 32 * DM targets must _not_ deference a mapped_device or dm_table to directly 33 * access their members! 34 */ 35 36 struct mapped_device { 37 struct mutex suspend_lock; 38 39 struct mutex table_devices_lock; 40 struct list_head table_devices; 41 42 /* 43 * The current mapping (struct dm_table *). 44 * Use dm_get_live_table{_fast} or take suspend_lock for 45 * dereference. 46 */ 47 void __rcu *map; 48 49 unsigned long flags; 50 51 /* Protect queue and type against concurrent access. */ 52 struct mutex type_lock; 53 enum dm_queue_mode type; 54 55 int numa_node_id; 56 struct request_queue *queue; 57 58 atomic_t holders; 59 atomic_t open_count; 60 61 struct dm_target *immutable_target; 62 struct target_type *immutable_target_type; 63 64 char name[16]; 65 struct gendisk *disk; 66 struct dax_device *dax_dev; 67 68 wait_queue_head_t wait; 69 unsigned long __percpu *pending_io; 70 71 /* forced geometry settings */ 72 struct hd_geometry geometry; 73 74 /* 75 * Processing queue (flush) 76 */ 77 struct workqueue_struct *wq; 78 79 /* 80 * A list of ios that arrived while we were suspended. 81 */ 82 struct work_struct work; 83 spinlock_t deferred_lock; 84 struct bio_list deferred; 85 86 void *interface_ptr; 87 88 /* 89 * Event handling. 90 */ 91 wait_queue_head_t eventq; 92 atomic_t event_nr; 93 atomic_t uevent_seq; 94 struct list_head uevent_list; 95 spinlock_t uevent_lock; /* Protect access to uevent_list */ 96 97 /* for blk-mq request-based DM support */ 98 bool init_tio_pdu:1; 99 struct blk_mq_tag_set *tag_set; 100 101 struct dm_stats stats; 102 103 /* the number of internal suspends */ 104 unsigned internal_suspend_count; 105 106 int swap_bios; 107 struct semaphore swap_bios_semaphore; 108 struct mutex swap_bios_lock; 109 110 /* 111 * io objects are allocated from here. 112 */ 113 struct bio_set io_bs; 114 struct bio_set bs; 115 116 /* kobject and completion */ 117 struct dm_kobject_holder kobj_holder; 118 119 struct srcu_struct io_barrier; 120 121 #ifdef CONFIG_BLK_DEV_ZONED 122 unsigned int nr_zones; 123 unsigned int *zwp_offset; 124 #endif 125 126 #ifdef CONFIG_IMA 127 struct dm_ima_measurements ima; 128 #endif 129 }; 130 131 /* 132 * Bits for the flags field of struct mapped_device. 133 */ 134 #define DMF_BLOCK_IO_FOR_SUSPEND 0 135 #define DMF_SUSPENDED 1 136 #define DMF_FROZEN 2 137 #define DMF_FREEING 3 138 #define DMF_DELETING 4 139 #define DMF_NOFLUSH_SUSPENDING 5 140 #define DMF_DEFERRED_REMOVE 6 141 #define DMF_SUSPENDED_INTERNALLY 7 142 #define DMF_POST_SUSPENDING 8 143 #define DMF_EMULATE_ZONE_APPEND 9 144 145 void disable_discard(struct mapped_device *md); 146 void disable_write_zeroes(struct mapped_device *md); 147 148 static inline sector_t dm_get_size(struct mapped_device *md) 149 { 150 return get_capacity(md->disk); 151 } 152 153 static inline struct dm_stats *dm_get_stats(struct mapped_device *md) 154 { 155 return &md->stats; 156 } 157 158 DECLARE_STATIC_KEY_FALSE(stats_enabled); 159 DECLARE_STATIC_KEY_FALSE(swap_bios_enabled); 160 DECLARE_STATIC_KEY_FALSE(zoned_enabled); 161 162 static inline bool dm_emulate_zone_append(struct mapped_device *md) 163 { 164 if (blk_queue_is_zoned(md->queue)) 165 return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); 166 return false; 167 } 168 169 #define DM_TABLE_MAX_DEPTH 16 170 171 struct dm_table { 172 struct mapped_device *md; 173 enum dm_queue_mode type; 174 175 /* btree table */ 176 unsigned int depth; 177 unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ 178 sector_t *index[DM_TABLE_MAX_DEPTH]; 179 180 unsigned int num_targets; 181 unsigned int num_allocated; 182 sector_t *highs; 183 struct dm_target *targets; 184 185 struct target_type *immutable_target_type; 186 187 bool integrity_supported:1; 188 bool singleton:1; 189 unsigned integrity_added:1; 190 191 /* 192 * Indicates the rw permissions for the new logical 193 * device. This should be a combination of FMODE_READ 194 * and FMODE_WRITE. 195 */ 196 fmode_t mode; 197 198 /* a list of devices used by this table */ 199 struct list_head devices; 200 201 /* events get handed up using this callback */ 202 void (*event_fn)(void *); 203 void *event_context; 204 205 struct dm_md_mempools *mempools; 206 207 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 208 struct blk_crypto_profile *crypto_profile; 209 #endif 210 }; 211 212 /* 213 * One of these is allocated per clone bio. 214 */ 215 #define DM_TIO_MAGIC 28714 216 struct dm_target_io { 217 unsigned short magic; 218 blk_short_t flags; 219 unsigned int target_bio_nr; 220 struct dm_io *io; 221 struct dm_target *ti; 222 unsigned int *len_ptr; 223 sector_t old_sector; 224 struct bio clone; 225 }; 226 227 /* 228 * dm_target_io flags 229 */ 230 enum { 231 DM_TIO_INSIDE_DM_IO, 232 DM_TIO_IS_DUPLICATE_BIO 233 }; 234 235 static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit) 236 { 237 return (tio->flags & (1U << bit)) != 0; 238 } 239 240 static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit) 241 { 242 tio->flags |= (1U << bit); 243 } 244 245 static inline bool dm_tio_is_normal(struct dm_target_io *tio) 246 { 247 return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) && 248 !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 249 } 250 251 /* 252 * One of these is allocated per original bio. 253 * It contains the first clone used for that original. 254 */ 255 #define DM_IO_MAGIC 19577 256 struct dm_io { 257 unsigned short magic; 258 blk_short_t flags; 259 spinlock_t lock; 260 unsigned long start_time; 261 void *data; 262 struct dm_io *next; 263 struct dm_stats_aux stats_aux; 264 blk_status_t status; 265 atomic_t io_count; 266 struct mapped_device *md; 267 268 /* The three fields represent mapped part of original bio */ 269 struct bio *orig_bio; 270 unsigned int sector_offset; /* offset to end of orig_bio */ 271 unsigned int sectors; 272 273 /* last member of dm_target_io is 'struct bio' */ 274 struct dm_target_io tio; 275 }; 276 277 /* 278 * dm_io flags 279 */ 280 enum { 281 DM_IO_ACCOUNTED, 282 DM_IO_WAS_SPLIT 283 }; 284 285 static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit) 286 { 287 return (io->flags & (1U << bit)) != 0; 288 } 289 290 static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit) 291 { 292 io->flags |= (1U << bit); 293 } 294 295 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) 296 { 297 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; 298 } 299 300 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max); 301 302 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 303 { 304 return !maxlen || strlen(result) + 1 >= maxlen; 305 } 306 307 extern atomic_t dm_global_event_nr; 308 extern wait_queue_head_t dm_global_eventq; 309 void dm_issue_global_event(void); 310 311 #endif 312