xref: /linux/drivers/md/dm.h (revision 2fe05e1139a555ae91f00a812cb9520e7d3022ab)
1 /*
2  * Internal header file for device mapper
3  *
4  * Copyright (C) 2001, 2002 Sistina Software
5  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6  *
7  * This file is released under the LGPL.
8  */
9 
10 #ifndef DM_INTERNAL_H
11 #define DM_INTERNAL_H
12 
13 #include <linux/fs.h>
14 #include <linux/device-mapper.h>
15 #include <linux/list.h>
16 #include <linux/moduleparam.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/hdreg.h>
20 #include <linux/completion.h>
21 #include <linux/kobject.h>
22 
23 #include "dm-stats.h"
24 
25 /*
26  * Suspend feature flags
27  */
28 #define DM_SUSPEND_LOCKFS_FLAG		(1 << 0)
29 #define DM_SUSPEND_NOFLUSH_FLAG		(1 << 1)
30 
31 /*
32  * Status feature flags
33  */
34 #define DM_STATUS_NOFLUSH_FLAG		(1 << 0)
35 
36 /*
37  * List of devices that a metadevice uses and should open/close.
38  */
39 struct dm_dev_internal {
40 	struct list_head list;
41 	atomic_t count;
42 	struct dm_dev *dm_dev;
43 };
44 
45 struct dm_table;
46 struct dm_md_mempools;
47 
48 /*-----------------------------------------------------------------
49  * Internal table functions.
50  *---------------------------------------------------------------*/
51 void dm_table_destroy(struct dm_table *t);
52 void dm_table_event_callback(struct dm_table *t,
53 			     void (*fn)(void *), void *context);
54 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
55 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
56 bool dm_table_has_no_data_devices(struct dm_table *table);
57 int dm_calculate_queue_limits(struct dm_table *table,
58 			      struct queue_limits *limits);
59 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
60 			       struct queue_limits *limits);
61 struct list_head *dm_table_get_devices(struct dm_table *t);
62 void dm_table_presuspend_targets(struct dm_table *t);
63 void dm_table_presuspend_undo_targets(struct dm_table *t);
64 void dm_table_postsuspend_targets(struct dm_table *t);
65 int dm_table_resume_targets(struct dm_table *t);
66 int dm_table_any_congested(struct dm_table *t, int bdi_bits);
67 enum dm_queue_mode dm_table_get_type(struct dm_table *t);
68 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
69 struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
70 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
71 bool dm_table_bio_based(struct dm_table *t);
72 bool dm_table_request_based(struct dm_table *t);
73 bool dm_table_all_blk_mq_devices(struct dm_table *t);
74 void dm_table_free_md_mempools(struct dm_table *t);
75 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
76 
77 void dm_lock_md_type(struct mapped_device *md);
78 void dm_unlock_md_type(struct mapped_device *md);
79 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
80 enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
81 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
82 
83 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
84 
85 /*
86  * To check the return value from dm_table_find_target().
87  */
88 #define dm_target_is_valid(t) ((t)->table)
89 
90 /*
91  * To check whether the target type is bio-based or not (request-based).
92  */
93 #define dm_target_bio_based(t) ((t)->type->map != NULL)
94 
95 /*
96  * To check whether the target type is request-based or not (bio-based).
97  */
98 #define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
99 
100 /*
101  * To check whether the target type is a hybrid (capable of being
102  * either request-based or bio-based).
103  */
104 #define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
105 
106 /*-----------------------------------------------------------------
107  * A registry of target types.
108  *---------------------------------------------------------------*/
109 int dm_target_init(void);
110 void dm_target_exit(void);
111 struct target_type *dm_get_target_type(const char *name);
112 void dm_put_target_type(struct target_type *tt);
113 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
114 					void *param), void *param);
115 
116 int dm_split_args(int *argc, char ***argvp, char *input);
117 
118 /*
119  * Is this mapped_device being deleted?
120  */
121 int dm_deleting_md(struct mapped_device *md);
122 
123 /*
124  * Is this mapped_device suspended?
125  */
126 int dm_suspended_md(struct mapped_device *md);
127 
128 /*
129  * Internal suspend and resume methods.
130  */
131 int dm_suspended_internally_md(struct mapped_device *md);
132 void dm_internal_suspend_fast(struct mapped_device *md);
133 void dm_internal_resume_fast(struct mapped_device *md);
134 void dm_internal_suspend_noflush(struct mapped_device *md);
135 void dm_internal_resume(struct mapped_device *md);
136 
137 /*
138  * Test if the device is scheduled for deferred remove.
139  */
140 int dm_test_deferred_remove_flag(struct mapped_device *md);
141 
142 /*
143  * Try to remove devices marked for deferred removal.
144  */
145 void dm_deferred_remove(void);
146 
147 /*
148  * The device-mapper can be driven through one of two interfaces;
149  * ioctl or filesystem, depending which patch you have applied.
150  */
151 int dm_interface_init(void);
152 void dm_interface_exit(void);
153 
154 /*
155  * sysfs interface
156  */
157 int dm_sysfs_init(struct mapped_device *md);
158 void dm_sysfs_exit(struct mapped_device *md);
159 struct kobject *dm_kobject(struct mapped_device *md);
160 struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
161 
162 /*
163  * The kobject helper
164  */
165 void dm_kobject_release(struct kobject *kobj);
166 
167 /*
168  * Targets for linear and striped mappings
169  */
170 int dm_linear_init(void);
171 void dm_linear_exit(void);
172 
173 int dm_stripe_init(void);
174 void dm_stripe_exit(void);
175 
176 /*
177  * mapped_device operations
178  */
179 void dm_destroy(struct mapped_device *md);
180 void dm_destroy_immediate(struct mapped_device *md);
181 int dm_open_count(struct mapped_device *md);
182 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
183 int dm_cancel_deferred_remove(struct mapped_device *md);
184 int dm_request_based(struct mapped_device *md);
185 sector_t dm_get_size(struct mapped_device *md);
186 struct request_queue *dm_get_md_queue(struct mapped_device *md);
187 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
188 			struct dm_dev **result);
189 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
190 struct dm_stats *dm_get_stats(struct mapped_device *md);
191 
192 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
193 		      unsigned cookie);
194 
195 void dm_internal_suspend(struct mapped_device *md);
196 void dm_internal_resume(struct mapped_device *md);
197 
198 int dm_io_init(void);
199 void dm_io_exit(void);
200 
201 int dm_kcopyd_init(void);
202 void dm_kcopyd_exit(void);
203 
204 /*
205  * Mempool operations
206  */
207 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
208 					    unsigned integrity, unsigned per_bio_data_size);
209 void dm_free_md_mempools(struct dm_md_mempools *pools);
210 
211 /*
212  * Various helpers
213  */
214 unsigned dm_get_reserved_bio_based_ios(void);
215 
216 #endif
217