xref: /linux/drivers/md/dm-core.h (revision 856e7c4b619af622d56b3b454f7bec32a170ac99)
1 /*
2  * Internal header file _only_ for device mapper core
3  *
4  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the LGPL.
7  */
8 
9 #ifndef DM_CORE_INTERNAL_H
10 #define DM_CORE_INTERNAL_H
11 
12 #include <linux/kthread.h>
13 #include <linux/ktime.h>
14 #include <linux/blk-mq.h>
15 
16 #include <trace/events/block.h>
17 
18 #include "dm.h"
19 
20 #define DM_RESERVED_MAX_IOS		1024
21 
22 struct dm_kobject_holder {
23 	struct kobject kobj;
24 	struct completion completion;
25 };
26 
27 /*
28  * DM core internal structure that used directly by dm.c and dm-rq.c
29  * DM targets must _not_ deference a mapped_device to directly access its members!
30  */
31 struct mapped_device {
32 	struct mutex suspend_lock;
33 
34 	struct mutex table_devices_lock;
35 	struct list_head table_devices;
36 
37 	/*
38 	 * The current mapping (struct dm_table *).
39 	 * Use dm_get_live_table{_fast} or take suspend_lock for
40 	 * dereference.
41 	 */
42 	void __rcu *map;
43 
44 	unsigned long flags;
45 
46 	/* Protect queue and type against concurrent access. */
47 	struct mutex type_lock;
48 	enum dm_queue_mode type;
49 
50 	int numa_node_id;
51 	struct request_queue *queue;
52 
53 	atomic_t holders;
54 	atomic_t open_count;
55 
56 	struct dm_target *immutable_target;
57 	struct target_type *immutable_target_type;
58 
59 	char name[16];
60 	struct gendisk *disk;
61 	struct dax_device *dax_dev;
62 
63 	/*
64 	 * A list of ios that arrived while we were suspended.
65 	 */
66 	struct work_struct work;
67 	wait_queue_head_t wait;
68 	atomic_t pending[2];
69 	spinlock_t deferred_lock;
70 	struct bio_list deferred;
71 
72 	void *interface_ptr;
73 
74 	/*
75 	 * Event handling.
76 	 */
77 	wait_queue_head_t eventq;
78 	atomic_t event_nr;
79 	atomic_t uevent_seq;
80 	struct list_head uevent_list;
81 	spinlock_t uevent_lock; /* Protect access to uevent_list */
82 
83 	/* the number of internal suspends */
84 	unsigned internal_suspend_count;
85 
86 	/*
87 	 * io objects are allocated from here.
88 	 */
89 	struct bio_set io_bs;
90 	struct bio_set bs;
91 
92 	/*
93 	 * Processing queue (flush)
94 	 */
95 	struct workqueue_struct *wq;
96 
97 	/*
98 	 * freeze/thaw support require holding onto a super block
99 	 */
100 	struct super_block *frozen_sb;
101 
102 	/* forced geometry settings */
103 	struct hd_geometry geometry;
104 
105 	/* kobject and completion */
106 	struct dm_kobject_holder kobj_holder;
107 
108 	struct block_device *bdev;
109 
110 	/* zero-length flush that will be cloned and submitted to targets */
111 	struct bio flush_bio;
112 
113 	struct dm_stats stats;
114 
115 	struct kthread_worker kworker;
116 	struct task_struct *kworker_task;
117 
118 	/* for request-based merge heuristic in dm_request_fn() */
119 	unsigned seq_rq_merge_deadline_usecs;
120 	int last_rq_rw;
121 	sector_t last_rq_pos;
122 	ktime_t last_rq_start_time;
123 
124 	/* for blk-mq request-based DM support */
125 	struct blk_mq_tag_set *tag_set;
126 	bool use_blk_mq:1;
127 	bool init_tio_pdu:1;
128 
129 	struct srcu_struct io_barrier;
130 };
131 
132 int md_in_flight(struct mapped_device *md);
133 void disable_write_same(struct mapped_device *md);
134 void disable_write_zeroes(struct mapped_device *md);
135 
136 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
137 {
138 	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
139 }
140 
141 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
142 
143 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
144 {
145 	return !maxlen || strlen(result) + 1 >= maxlen;
146 }
147 
148 extern atomic_t dm_global_event_nr;
149 extern wait_queue_head_t dm_global_eventq;
150 void dm_issue_global_event(void);
151 
152 #endif
153