xref: /linux/drivers/md/dm.h (revision 5ff328836dfde0cef9f28c8b8791a90a36d7a183)
1 /*
2  * Internal header file for device mapper
3  *
4  * Copyright (C) 2001, 2002 Sistina Software
5  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6  *
7  * This file is released under the LGPL.
8  */
9 
10 #ifndef DM_INTERNAL_H
11 #define DM_INTERNAL_H
12 
13 #include <linux/fs.h>
14 #include <linux/device-mapper.h>
15 #include <linux/list.h>
16 #include <linux/moduleparam.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/hdreg.h>
20 #include <linux/completion.h>
21 #include <linux/kobject.h>
22 #include <linux/refcount.h>
23 
24 #include "dm-stats.h"
25 
26 /*
27  * Suspend feature flags
28  */
29 #define DM_SUSPEND_LOCKFS_FLAG		(1 << 0)
30 #define DM_SUSPEND_NOFLUSH_FLAG		(1 << 1)
31 
32 /*
33  * Status feature flags
34  */
35 #define DM_STATUS_NOFLUSH_FLAG		(1 << 0)
36 
37 /*
38  * List of devices that a metadevice uses and should open/close.
39  */
40 struct dm_dev_internal {
41 	struct list_head list;
42 	refcount_t count;
43 	struct dm_dev *dm_dev;
44 };
45 
46 struct dm_table;
47 struct dm_md_mempools;
48 
49 /*-----------------------------------------------------------------
50  * Internal table functions.
51  *---------------------------------------------------------------*/
52 void dm_table_event_callback(struct dm_table *t,
53 			     void (*fn)(void *), void *context);
54 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
55 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
56 bool dm_table_has_no_data_devices(struct dm_table *table);
57 int dm_calculate_queue_limits(struct dm_table *table,
58 			      struct queue_limits *limits);
59 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
60 			       struct queue_limits *limits);
61 struct list_head *dm_table_get_devices(struct dm_table *t);
62 void dm_table_presuspend_targets(struct dm_table *t);
63 void dm_table_presuspend_undo_targets(struct dm_table *t);
64 void dm_table_postsuspend_targets(struct dm_table *t);
65 int dm_table_resume_targets(struct dm_table *t);
66 int dm_table_any_congested(struct dm_table *t, int bdi_bits);
67 enum dm_queue_mode dm_table_get_type(struct dm_table *t);
68 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
69 struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
70 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
71 bool dm_table_bio_based(struct dm_table *t);
72 bool dm_table_request_based(struct dm_table *t);
73 void dm_table_free_md_mempools(struct dm_table *t);
74 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
75 
76 void dm_lock_md_type(struct mapped_device *md);
77 void dm_unlock_md_type(struct mapped_device *md);
78 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
79 enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
80 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
81 
82 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
83 
84 /*
85  * To check the return value from dm_table_find_target().
86  */
87 #define dm_target_is_valid(t) ((t)->table)
88 
89 /*
90  * To check whether the target type is bio-based or not (request-based).
91  */
92 #define dm_target_bio_based(t) ((t)->type->map != NULL)
93 
94 /*
95  * To check whether the target type is request-based or not (bio-based).
96  */
97 #define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
98 
99 /*
100  * To check whether the target type is a hybrid (capable of being
101  * either request-based or bio-based).
102  */
103 #define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
104 
105 /*-----------------------------------------------------------------
106  * A registry of target types.
107  *---------------------------------------------------------------*/
108 int dm_target_init(void);
109 void dm_target_exit(void);
110 struct target_type *dm_get_target_type(const char *name);
111 void dm_put_target_type(struct target_type *tt);
112 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
113 					void *param), void *param);
114 
115 int dm_split_args(int *argc, char ***argvp, char *input);
116 
117 /*
118  * Is this mapped_device being deleted?
119  */
120 int dm_deleting_md(struct mapped_device *md);
121 
122 /*
123  * Is this mapped_device suspended?
124  */
125 int dm_suspended_md(struct mapped_device *md);
126 
127 /*
128  * Internal suspend and resume methods.
129  */
130 int dm_suspended_internally_md(struct mapped_device *md);
131 void dm_internal_suspend_fast(struct mapped_device *md);
132 void dm_internal_resume_fast(struct mapped_device *md);
133 void dm_internal_suspend_noflush(struct mapped_device *md);
134 void dm_internal_resume(struct mapped_device *md);
135 
136 /*
137  * Test if the device is scheduled for deferred remove.
138  */
139 int dm_test_deferred_remove_flag(struct mapped_device *md);
140 
141 /*
142  * Try to remove devices marked for deferred removal.
143  */
144 void dm_deferred_remove(void);
145 
146 /*
147  * The device-mapper can be driven through one of two interfaces;
148  * ioctl or filesystem, depending which patch you have applied.
149  */
150 int dm_interface_init(void);
151 void dm_interface_exit(void);
152 
153 /*
154  * sysfs interface
155  */
156 int dm_sysfs_init(struct mapped_device *md);
157 void dm_sysfs_exit(struct mapped_device *md);
158 struct kobject *dm_kobject(struct mapped_device *md);
159 struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
160 
161 /*
162  * The kobject helper
163  */
164 void dm_kobject_release(struct kobject *kobj);
165 
166 /*
167  * Targets for linear and striped mappings
168  */
169 int dm_linear_init(void);
170 void dm_linear_exit(void);
171 
172 int dm_stripe_init(void);
173 void dm_stripe_exit(void);
174 
175 /*
176  * mapped_device operations
177  */
178 void dm_destroy(struct mapped_device *md);
179 void dm_destroy_immediate(struct mapped_device *md);
180 int dm_open_count(struct mapped_device *md);
181 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
182 int dm_cancel_deferred_remove(struct mapped_device *md);
183 int dm_request_based(struct mapped_device *md);
184 sector_t dm_get_size(struct mapped_device *md);
185 struct request_queue *dm_get_md_queue(struct mapped_device *md);
186 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
187 			struct dm_dev **result);
188 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
189 struct dm_stats *dm_get_stats(struct mapped_device *md);
190 
191 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
192 		      unsigned cookie);
193 
194 void dm_internal_suspend(struct mapped_device *md);
195 void dm_internal_resume(struct mapped_device *md);
196 
197 int dm_io_init(void);
198 void dm_io_exit(void);
199 
200 int dm_kcopyd_init(void);
201 void dm_kcopyd_exit(void);
202 
203 /*
204  * Mempool operations
205  */
206 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
207 					    unsigned integrity, unsigned per_bio_data_size,
208 					    unsigned min_pool_size);
209 void dm_free_md_mempools(struct dm_md_mempools *pools);
210 
211 /*
212  * Various helpers
213  */
214 unsigned dm_get_reserved_bio_based_ios(void);
215 
216 #endif
217