xref: /linux/drivers/md/dm-linear.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/blkdev.h>
12 #include <linux/bio.h>
13 #include <linux/dax.h>
14 #include <linux/slab.h>
15 #include <linux/device-mapper.h>
16 
17 #define DM_MSG_PREFIX "linear"
18 
19 /*
20  * Linear: maps a linear range of a device.
21  */
22 struct linear_c {
23 	struct dm_dev *dev;
24 	sector_t start;
25 };
26 
27 /*
28  * Construct a linear mapping: <dev_path> <offset>
29  */
linear_ctr(struct dm_target * ti,unsigned int argc,char ** argv)30 static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31 {
32 	struct linear_c *lc;
33 	unsigned long long tmp;
34 	char dummy;
35 	int ret;
36 
37 	if (argc != 2) {
38 		ti->error = "Invalid argument count";
39 		return -EINVAL;
40 	}
41 
42 	lc = kmalloc(sizeof(*lc), GFP_KERNEL);
43 	if (lc == NULL) {
44 		ti->error = "Cannot allocate linear context";
45 		return -ENOMEM;
46 	}
47 
48 	ret = -EINVAL;
49 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
50 		ti->error = "Invalid device sector";
51 		goto bad;
52 	}
53 	lc->start = tmp;
54 
55 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
56 	if (ret) {
57 		ti->error = "Device lookup failed";
58 		goto bad;
59 	}
60 
61 	ti->num_flush_bios = 1;
62 	ti->num_discard_bios = 1;
63 	ti->num_secure_erase_bios = 1;
64 	ti->num_write_zeroes_bios = 1;
65 	ti->flush_bypasses_map = true;
66 	ti->private = lc;
67 	return 0;
68 
69 bad:
70 	kfree(lc);
71 	return ret;
72 }
73 
linear_dtr(struct dm_target * ti)74 static void linear_dtr(struct dm_target *ti)
75 {
76 	struct linear_c *lc = ti->private;
77 
78 	dm_put_device(ti, lc->dev);
79 	kfree(lc);
80 }
81 
linear_map_sector(struct dm_target * ti,sector_t bi_sector)82 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
83 {
84 	struct linear_c *lc = ti->private;
85 
86 	return lc->start + dm_target_offset(ti, bi_sector);
87 }
88 
linear_map(struct dm_target * ti,struct bio * bio)89 int linear_map(struct dm_target *ti, struct bio *bio)
90 {
91 	struct linear_c *lc = ti->private;
92 
93 	bio_set_dev(bio, lc->dev->bdev);
94 	bio->bi_iter.bi_sector = linear_map_sector(ti, bio->bi_iter.bi_sector);
95 
96 	return DM_MAPIO_REMAPPED;
97 }
98 
linear_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)99 static void linear_status(struct dm_target *ti, status_type_t type,
100 			  unsigned int status_flags, char *result, unsigned int maxlen)
101 {
102 	struct linear_c *lc = ti->private;
103 	size_t sz = 0;
104 
105 	switch (type) {
106 	case STATUSTYPE_INFO:
107 		result[0] = '\0';
108 		break;
109 
110 	case STATUSTYPE_TABLE:
111 		DMEMIT("%s %llu", lc->dev->name, (unsigned long long)lc->start);
112 		break;
113 
114 	case STATUSTYPE_IMA:
115 		DMEMIT_TARGET_NAME_VERSION(ti->type);
116 		DMEMIT(",device_name=%s,start=%llu;", lc->dev->name,
117 		       (unsigned long long)lc->start);
118 		break;
119 	}
120 }
121 
linear_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev,unsigned int cmd,unsigned long arg,bool * forward)122 static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
123 				unsigned int cmd, unsigned long arg,
124 				bool *forward)
125 {
126 	struct linear_c *lc = ti->private;
127 	struct dm_dev *dev = lc->dev;
128 
129 	*bdev = dev->bdev;
130 
131 	/*
132 	 * Only pass ioctls through if the device sizes match exactly.
133 	 */
134 	if (lc->start || ti->len != bdev_nr_sectors(dev->bdev))
135 		return 1;
136 	return 0;
137 }
138 
139 #ifdef CONFIG_BLK_DEV_ZONED
linear_report_zones(struct dm_target * ti,struct dm_report_zones_args * args,unsigned int nr_zones)140 static int linear_report_zones(struct dm_target *ti,
141 		struct dm_report_zones_args *args, unsigned int nr_zones)
142 {
143 	struct linear_c *lc = ti->private;
144 
145 	return dm_report_zones(lc->dev->bdev, lc->start,
146 			       linear_map_sector(ti, args->next_sector),
147 			       args, nr_zones);
148 }
149 #else
150 #define linear_report_zones NULL
151 #endif
152 
linear_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)153 static int linear_iterate_devices(struct dm_target *ti,
154 				  iterate_devices_callout_fn fn, void *data)
155 {
156 	struct linear_c *lc = ti->private;
157 
158 	return fn(ti, lc->dev, lc->start, ti->len, data);
159 }
160 
161 #if IS_ENABLED(CONFIG_FS_DAX)
linear_dax_pgoff(struct dm_target * ti,pgoff_t * pgoff)162 static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
163 {
164 	struct linear_c *lc = ti->private;
165 	sector_t sector = linear_map_sector(ti, *pgoff << PAGE_SECTORS_SHIFT);
166 
167 	*pgoff = (get_start_sect(lc->dev->bdev) + sector) >> PAGE_SECTORS_SHIFT;
168 	return lc->dev->dax_dev;
169 }
170 
linear_dax_direct_access(struct dm_target * ti,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,unsigned long * pfn)171 static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
172 		long nr_pages, enum dax_access_mode mode, void **kaddr,
173 		unsigned long *pfn)
174 {
175 	struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
176 
177 	return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
178 }
179 
linear_dax_zero_page_range(struct dm_target * ti,pgoff_t pgoff,size_t nr_pages)180 static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
181 				      size_t nr_pages)
182 {
183 	struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
184 
185 	return dax_zero_page_range(dax_dev, pgoff, nr_pages);
186 }
187 
linear_dax_recovery_write(struct dm_target * ti,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)188 static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
189 		void *addr, size_t bytes, struct iov_iter *i)
190 {
191 	struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
192 
193 	return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
194 }
195 
196 #else
197 #define linear_dax_direct_access NULL
198 #define linear_dax_zero_page_range NULL
199 #define linear_dax_recovery_write NULL
200 #endif
201 
202 static struct target_type linear_target = {
203 	.name   = "linear",
204 	.version = {1, 5, 0},
205 	.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
206 		    DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO |
207 		    DM_TARGET_ATOMIC_WRITES,
208 	.report_zones = linear_report_zones,
209 	.module = THIS_MODULE,
210 	.ctr    = linear_ctr,
211 	.dtr    = linear_dtr,
212 	.map    = linear_map,
213 	.status = linear_status,
214 	.prepare_ioctl = linear_prepare_ioctl,
215 	.iterate_devices = linear_iterate_devices,
216 	.direct_access = linear_dax_direct_access,
217 	.dax_zero_page_range = linear_dax_zero_page_range,
218 	.dax_recovery_write = linear_dax_recovery_write,
219 };
220 
dm_linear_init(void)221 int __init dm_linear_init(void)
222 {
223 	int r = dm_register_target(&linear_target);
224 
225 	if (r < 0)
226 		DMERR("register failed %d", r);
227 
228 	return r;
229 }
230 
dm_linear_exit(void)231 void dm_linear_exit(void)
232 {
233 	dm_unregister_target(&linear_target);
234 }
235