xref: /linux/drivers/md/dm-target.c (revision ebc733e54a1a79ea2dde2ba5121ae73a188e20d4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2001 Sistina Software (UK) Limited
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-core.h"
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kmod.h>
13 #include <linux/bio.h>
14 #include <linux/dax.h>
15 
16 #define DM_MSG_PREFIX "target"
17 
18 static LIST_HEAD(_targets);
19 static DECLARE_RWSEM(_lock);
20 
21 static inline struct target_type *__find_target_type(const char *name)
22 {
23 	struct target_type *tt;
24 
25 	list_for_each_entry(tt, &_targets, list)
26 		if (!strcmp(name, tt->name))
27 			return tt;
28 
29 	return NULL;
30 }
31 
32 static struct target_type *get_target_type(const char *name)
33 {
34 	struct target_type *tt;
35 
36 	down_read(&_lock);
37 
38 	tt = __find_target_type(name);
39 	if (tt && !try_module_get(tt->module))
40 		tt = NULL;
41 
42 	up_read(&_lock);
43 	return tt;
44 }
45 
46 static void load_module(const char *name)
47 {
48 	request_module("dm-%s", name);
49 }
50 
51 struct target_type *dm_get_target_type(const char *name)
52 {
53 	struct target_type *tt = get_target_type(name);
54 
55 	if (!tt) {
56 		load_module(name);
57 		tt = get_target_type(name);
58 	}
59 
60 	return tt;
61 }
62 
63 void dm_put_target_type(struct target_type *tt)
64 {
65 	down_read(&_lock);
66 	module_put(tt->module);
67 	up_read(&_lock);
68 }
69 
70 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
71 					void *param), void *param)
72 {
73 	struct target_type *tt;
74 
75 	down_read(&_lock);
76 	list_for_each_entry(tt, &_targets, list)
77 		iter_func(tt, param);
78 	up_read(&_lock);
79 
80 	return 0;
81 }
82 
83 int dm_register_target(struct target_type *tt)
84 {
85 	int rv = 0;
86 
87 	down_write(&_lock);
88 	if (__find_target_type(tt->name)) {
89 		DMERR("%s: '%s' target already registered",
90 		      __func__, tt->name);
91 		rv = -EEXIST;
92 	} else {
93 		list_add(&tt->list, &_targets);
94 	}
95 	up_write(&_lock);
96 
97 	return rv;
98 }
99 EXPORT_SYMBOL(dm_register_target);
100 
101 void dm_unregister_target(struct target_type *tt)
102 {
103 	down_write(&_lock);
104 	if (!__find_target_type(tt->name)) {
105 		DMCRIT("Unregistering unrecognised target: %s", tt->name);
106 		BUG();
107 	}
108 
109 	list_del(&tt->list);
110 
111 	up_write(&_lock);
112 }
113 EXPORT_SYMBOL(dm_unregister_target);
114 
115 /*
116  * io-err: always fails an io, useful for bringing
117  * up LVs that have holes in them.
118  */
119 struct io_err_c {
120 	struct dm_dev *dev;
121 	sector_t start;
122 };
123 
124 static int io_err_get_args(struct dm_target *tt, unsigned int argc, char **args)
125 {
126 	unsigned long long start;
127 	struct io_err_c *ioec;
128 	char dummy;
129 	int ret;
130 
131 	ioec = kmalloc(sizeof(*ioec), GFP_KERNEL);
132 	if (!ioec) {
133 		tt->error = "Cannot allocate io_err context";
134 		return -ENOMEM;
135 	}
136 
137 	ret = -EINVAL;
138 	if (sscanf(args[1], "%llu%c", &start, &dummy) != 1 ||
139 	    start != (sector_t)start) {
140 		tt->error = "Invalid device sector";
141 		goto bad;
142 	}
143 	ioec->start = start;
144 
145 	ret = dm_get_device(tt, args[0], dm_table_get_mode(tt->table), &ioec->dev);
146 	if (ret) {
147 		tt->error = "Device lookup failed";
148 		goto bad;
149 	}
150 
151 	tt->private = ioec;
152 
153 	return 0;
154 
155 bad:
156 	kfree(ioec);
157 
158 	return ret;
159 }
160 
161 static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
162 {
163 	/*
164 	 * If we have arguments, assume it is the path to the backing
165 	 * block device and its mapping start sector (same as dm-linear).
166 	 * In this case, get the device so that we can get its limits.
167 	 */
168 	if (argc == 2) {
169 		int ret = io_err_get_args(tt, argc, args);
170 
171 		if (ret)
172 			return ret;
173 	}
174 
175 	/*
176 	 * Return error for discards instead of -EOPNOTSUPP
177 	 */
178 	tt->num_discard_bios = 1;
179 	tt->discards_supported = true;
180 
181 	return 0;
182 }
183 
184 static void io_err_dtr(struct dm_target *tt)
185 {
186 	struct io_err_c *ioec = tt->private;
187 
188 	if (ioec) {
189 		dm_put_device(tt, ioec->dev);
190 		kfree(ioec);
191 	}
192 }
193 
194 static int io_err_map(struct dm_target *tt, struct bio *bio)
195 {
196 	return DM_MAPIO_KILL;
197 }
198 
199 static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
200 				   union map_info *map_context,
201 				   struct request **clone)
202 {
203 	return DM_MAPIO_KILL;
204 }
205 
206 static void io_err_release_clone_rq(struct request *clone,
207 				    union map_info *map_context)
208 {
209 }
210 
211 #ifdef CONFIG_BLK_DEV_ZONED
212 static sector_t io_err_map_sector(struct dm_target *ti, sector_t bi_sector)
213 {
214 	struct io_err_c *ioec = ti->private;
215 
216 	return ioec->start + dm_target_offset(ti, bi_sector);
217 }
218 
219 static int io_err_report_zones(struct dm_target *ti,
220 		struct dm_report_zones_args *args, unsigned int nr_zones)
221 {
222 	struct io_err_c *ioec = ti->private;
223 
224 	/*
225 	 * This should never be called when we do not have a backing device
226 	 * as that mean the target is not a zoned one.
227 	 */
228 	if (WARN_ON_ONCE(!ioec))
229 		return -EIO;
230 
231 	return dm_report_zones(ioec->dev->bdev, ioec->start,
232 			       io_err_map_sector(ti, args->next_sector),
233 			       args, nr_zones);
234 }
235 #else
236 #define io_err_report_zones NULL
237 #endif
238 
239 static int io_err_iterate_devices(struct dm_target *ti,
240 				  iterate_devices_callout_fn fn, void *data)
241 {
242 	struct io_err_c *ioec = ti->private;
243 
244 	if (!ioec)
245 		return 0;
246 
247 	return fn(ti, ioec->dev, ioec->start, ti->len, data);
248 }
249 
250 static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
251 {
252 	limits->max_discard_sectors = UINT_MAX;
253 	limits->max_hw_discard_sectors = UINT_MAX;
254 	limits->discard_granularity = 512;
255 }
256 
257 static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
258 		long nr_pages, enum dax_access_mode mode, void **kaddr,
259 		pfn_t *pfn)
260 {
261 	return -EIO;
262 }
263 
264 static struct target_type error_target = {
265 	.name = "error",
266 	.version = {1, 7, 0},
267 	.features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM,
268 	.ctr  = io_err_ctr,
269 	.dtr  = io_err_dtr,
270 	.map  = io_err_map,
271 	.clone_and_map_rq = io_err_clone_and_map_rq,
272 	.release_clone_rq = io_err_release_clone_rq,
273 	.iterate_devices = io_err_iterate_devices,
274 	.io_hints = io_err_io_hints,
275 	.direct_access = io_err_dax_direct_access,
276 	.report_zones = io_err_report_zones,
277 };
278 
279 int __init dm_target_init(void)
280 {
281 	return dm_register_target(&error_target);
282 }
283 
284 void dm_target_exit(void)
285 {
286 	dm_unregister_target(&error_target);
287 }
288