1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Ram backed block device driver.
4 *
5 * Copyright (C) 2007 Nick Piggin
6 * Copyright (C) 2007 Novell Inc.
7 *
8 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
9 * of their respective owners.
10 */
11
12 #include <linux/init.h>
13 #include <linux/initrd.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/highmem.h>
20 #include <linux/mutex.h>
21 #include <linux/pagemap.h>
22 #include <linux/xarray.h>
23 #include <linux/fs.h>
24 #include <linux/slab.h>
25 #include <linux/backing-dev.h>
26 #include <linux/debugfs.h>
27
28 #include <linux/uaccess.h>
29
30 /*
31 * Each block ramdisk device has a xarray brd_pages of pages that stores
32 * the pages containing the block device's contents.
33 */
34 struct brd_device {
35 int brd_number;
36 struct gendisk *brd_disk;
37 struct list_head brd_list;
38
39 /*
40 * Backing store of pages. This is the contents of the block device.
41 */
42 struct xarray brd_pages;
43 u64 brd_nr_pages;
44 };
45
46 /*
47 * Look up and return a brd's page for a given sector.
48 */
brd_lookup_page(struct brd_device * brd,sector_t sector)49 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
50 {
51 return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
52 }
53
54 /*
55 * Insert a new page for a given sector, if one does not already exist.
56 */
brd_insert_page(struct brd_device * brd,sector_t sector,gfp_t gfp)57 static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
58 {
59 pgoff_t idx = sector >> PAGE_SECTORS_SHIFT;
60 struct page *page;
61 int ret = 0;
62
63 page = brd_lookup_page(brd, sector);
64 if (page)
65 return 0;
66
67 page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
68 if (!page)
69 return -ENOMEM;
70
71 xa_lock(&brd->brd_pages);
72 ret = __xa_insert(&brd->brd_pages, idx, page, gfp);
73 if (!ret)
74 brd->brd_nr_pages++;
75 xa_unlock(&brd->brd_pages);
76
77 if (ret < 0) {
78 __free_page(page);
79 if (ret == -EBUSY)
80 ret = 0;
81 }
82 return ret;
83 }
84
85 /*
86 * Free all backing store pages and xarray. This must only be called when
87 * there are no other users of the device.
88 */
brd_free_pages(struct brd_device * brd)89 static void brd_free_pages(struct brd_device *brd)
90 {
91 struct page *page;
92 pgoff_t idx;
93
94 xa_for_each(&brd->brd_pages, idx, page) {
95 __free_page(page);
96 cond_resched();
97 }
98
99 xa_destroy(&brd->brd_pages);
100 }
101
102 /*
103 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
104 */
copy_to_brd_setup(struct brd_device * brd,sector_t sector,size_t n,gfp_t gfp)105 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
106 gfp_t gfp)
107 {
108 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
109 size_t copy;
110 int ret;
111
112 copy = min_t(size_t, n, PAGE_SIZE - offset);
113 ret = brd_insert_page(brd, sector, gfp);
114 if (ret)
115 return ret;
116 if (copy < n) {
117 sector += copy >> SECTOR_SHIFT;
118 ret = brd_insert_page(brd, sector, gfp);
119 }
120 return ret;
121 }
122
123 /*
124 * Copy n bytes from src to the brd starting at sector. Does not sleep.
125 */
copy_to_brd(struct brd_device * brd,const void * src,sector_t sector,size_t n)126 static void copy_to_brd(struct brd_device *brd, const void *src,
127 sector_t sector, size_t n)
128 {
129 struct page *page;
130 void *dst;
131 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
132 size_t copy;
133
134 copy = min_t(size_t, n, PAGE_SIZE - offset);
135 page = brd_lookup_page(brd, sector);
136 BUG_ON(!page);
137
138 dst = kmap_atomic(page);
139 memcpy(dst + offset, src, copy);
140 kunmap_atomic(dst);
141
142 if (copy < n) {
143 src += copy;
144 sector += copy >> SECTOR_SHIFT;
145 copy = n - copy;
146 page = brd_lookup_page(brd, sector);
147 BUG_ON(!page);
148
149 dst = kmap_atomic(page);
150 memcpy(dst, src, copy);
151 kunmap_atomic(dst);
152 }
153 }
154
155 /*
156 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
157 */
copy_from_brd(void * dst,struct brd_device * brd,sector_t sector,size_t n)158 static void copy_from_brd(void *dst, struct brd_device *brd,
159 sector_t sector, size_t n)
160 {
161 struct page *page;
162 void *src;
163 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
164 size_t copy;
165
166 copy = min_t(size_t, n, PAGE_SIZE - offset);
167 page = brd_lookup_page(brd, sector);
168 if (page) {
169 src = kmap_atomic(page);
170 memcpy(dst, src + offset, copy);
171 kunmap_atomic(src);
172 } else
173 memset(dst, 0, copy);
174
175 if (copy < n) {
176 dst += copy;
177 sector += copy >> SECTOR_SHIFT;
178 copy = n - copy;
179 page = brd_lookup_page(brd, sector);
180 if (page) {
181 src = kmap_atomic(page);
182 memcpy(dst, src, copy);
183 kunmap_atomic(src);
184 } else
185 memset(dst, 0, copy);
186 }
187 }
188
189 /*
190 * Process a single bvec of a bio.
191 */
brd_do_bvec(struct brd_device * brd,struct page * page,unsigned int len,unsigned int off,blk_opf_t opf,sector_t sector)192 static int brd_do_bvec(struct brd_device *brd, struct page *page,
193 unsigned int len, unsigned int off, blk_opf_t opf,
194 sector_t sector)
195 {
196 void *mem;
197 int err = 0;
198
199 if (op_is_write(opf)) {
200 /*
201 * Must use NOIO because we don't want to recurse back into the
202 * block or filesystem layers from page reclaim.
203 */
204 gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
205
206 err = copy_to_brd_setup(brd, sector, len, gfp);
207 if (err)
208 goto out;
209 }
210
211 mem = kmap_atomic(page);
212 if (!op_is_write(opf)) {
213 copy_from_brd(mem + off, brd, sector, len);
214 flush_dcache_page(page);
215 } else {
216 flush_dcache_page(page);
217 copy_to_brd(brd, mem + off, sector, len);
218 }
219 kunmap_atomic(mem);
220
221 out:
222 return err;
223 }
224
brd_do_discard(struct brd_device * brd,sector_t sector,u32 size)225 static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
226 {
227 sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS;
228 struct page *page;
229
230 size -= (aligned_sector - sector) * SECTOR_SIZE;
231 xa_lock(&brd->brd_pages);
232 while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
233 page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
234 if (page)
235 __free_page(page);
236 aligned_sector += PAGE_SECTORS;
237 size -= PAGE_SIZE;
238 }
239 xa_unlock(&brd->brd_pages);
240 }
241
brd_submit_bio(struct bio * bio)242 static void brd_submit_bio(struct bio *bio)
243 {
244 struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
245 sector_t sector = bio->bi_iter.bi_sector;
246 struct bio_vec bvec;
247 struct bvec_iter iter;
248
249 if (unlikely(op_is_discard(bio->bi_opf))) {
250 brd_do_discard(brd, sector, bio->bi_iter.bi_size);
251 bio_endio(bio);
252 return;
253 }
254
255 bio_for_each_segment(bvec, bio, iter) {
256 unsigned int len = bvec.bv_len;
257 int err;
258
259 /* Don't support un-aligned buffer */
260 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
261 (len & (SECTOR_SIZE - 1)));
262
263 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
264 bio->bi_opf, sector);
265 if (err) {
266 if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
267 bio_wouldblock_error(bio);
268 return;
269 }
270 bio_io_error(bio);
271 return;
272 }
273 sector += len >> SECTOR_SHIFT;
274 }
275
276 bio_endio(bio);
277 }
278
279 static const struct block_device_operations brd_fops = {
280 .owner = THIS_MODULE,
281 .submit_bio = brd_submit_bio,
282 };
283
284 /*
285 * And now the modules code and kernel interface.
286 */
287 static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
288 module_param(rd_nr, int, 0444);
289 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
290
291 unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
292 module_param(rd_size, ulong, 0444);
293 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
294
295 static int max_part = 1;
296 module_param(max_part, int, 0444);
297 MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
298
299 MODULE_DESCRIPTION("Ram backed block device driver");
300 MODULE_LICENSE("GPL");
301 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
302 MODULE_ALIAS("rd");
303
304 #ifndef MODULE
305 /* Legacy boot options - nonmodular */
ramdisk_size(char * str)306 static int __init ramdisk_size(char *str)
307 {
308 rd_size = simple_strtol(str, NULL, 0);
309 return 1;
310 }
311 __setup("ramdisk_size=", ramdisk_size);
312 #endif
313
314 /*
315 * The device scheme is derived from loop.c. Keep them in synch where possible
316 * (should share code eventually).
317 */
318 static LIST_HEAD(brd_devices);
319 static struct dentry *brd_debugfs_dir;
320
brd_alloc(int i)321 static int brd_alloc(int i)
322 {
323 struct brd_device *brd;
324 struct gendisk *disk;
325 char buf[DISK_NAME_LEN];
326 int err = -ENOMEM;
327 struct queue_limits lim = {
328 /*
329 * This is so fdisk will align partitions on 4k, because of
330 * direct_access API needing 4k alignment, returning a PFN
331 * (This is only a problem on very small devices <= 4M,
332 * otherwise fdisk will align on 1M. Regardless this call
333 * is harmless)
334 */
335 .physical_block_size = PAGE_SIZE,
336 .max_hw_discard_sectors = UINT_MAX,
337 .max_discard_segments = 1,
338 .discard_granularity = PAGE_SIZE,
339 .features = BLK_FEAT_SYNCHRONOUS |
340 BLK_FEAT_NOWAIT,
341 };
342
343 list_for_each_entry(brd, &brd_devices, brd_list)
344 if (brd->brd_number == i)
345 return -EEXIST;
346 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
347 if (!brd)
348 return -ENOMEM;
349 brd->brd_number = i;
350 list_add_tail(&brd->brd_list, &brd_devices);
351
352 xa_init(&brd->brd_pages);
353
354 snprintf(buf, DISK_NAME_LEN, "ram%d", i);
355 if (!IS_ERR_OR_NULL(brd_debugfs_dir))
356 debugfs_create_u64(buf, 0444, brd_debugfs_dir,
357 &brd->brd_nr_pages);
358
359 disk = brd->brd_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
360 if (IS_ERR(disk)) {
361 err = PTR_ERR(disk);
362 goto out_free_dev;
363 }
364 disk->major = RAMDISK_MAJOR;
365 disk->first_minor = i * max_part;
366 disk->minors = max_part;
367 disk->fops = &brd_fops;
368 disk->private_data = brd;
369 strscpy(disk->disk_name, buf, DISK_NAME_LEN);
370 set_capacity(disk, rd_size * 2);
371
372 err = add_disk(disk);
373 if (err)
374 goto out_cleanup_disk;
375
376 return 0;
377
378 out_cleanup_disk:
379 put_disk(disk);
380 out_free_dev:
381 list_del(&brd->brd_list);
382 kfree(brd);
383 return err;
384 }
385
brd_probe(dev_t dev)386 static void brd_probe(dev_t dev)
387 {
388 brd_alloc(MINOR(dev) / max_part);
389 }
390
brd_cleanup(void)391 static void brd_cleanup(void)
392 {
393 struct brd_device *brd, *next;
394
395 debugfs_remove_recursive(brd_debugfs_dir);
396
397 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
398 del_gendisk(brd->brd_disk);
399 put_disk(brd->brd_disk);
400 brd_free_pages(brd);
401 list_del(&brd->brd_list);
402 kfree(brd);
403 }
404 }
405
brd_check_and_reset_par(void)406 static inline void brd_check_and_reset_par(void)
407 {
408 if (unlikely(!max_part))
409 max_part = 1;
410
411 /*
412 * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
413 * otherwise, it is possiable to get same dev_t when adding partitions.
414 */
415 if ((1U << MINORBITS) % max_part != 0)
416 max_part = 1UL << fls(max_part);
417
418 if (max_part > DISK_MAX_PARTS) {
419 pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
420 DISK_MAX_PARTS, DISK_MAX_PARTS);
421 max_part = DISK_MAX_PARTS;
422 }
423 }
424
brd_init(void)425 static int __init brd_init(void)
426 {
427 int err, i;
428
429 brd_check_and_reset_par();
430
431 brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
432
433 for (i = 0; i < rd_nr; i++) {
434 err = brd_alloc(i);
435 if (err)
436 goto out_free;
437 }
438
439 /*
440 * brd module now has a feature to instantiate underlying device
441 * structure on-demand, provided that there is an access dev node.
442 *
443 * (1) if rd_nr is specified, create that many upfront. else
444 * it defaults to CONFIG_BLK_DEV_RAM_COUNT
445 * (2) User can further extend brd devices by create dev node themselves
446 * and have kernel automatically instantiate actual device
447 * on-demand. Example:
448 * mknod /path/devnod_name b 1 X # 1 is the rd major
449 * fdisk -l /path/devnod_name
450 * If (X / max_part) was not already created it will be created
451 * dynamically.
452 */
453
454 if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
455 err = -EIO;
456 goto out_free;
457 }
458
459 pr_info("brd: module loaded\n");
460 return 0;
461
462 out_free:
463 brd_cleanup();
464
465 pr_info("brd: module NOT loaded !!!\n");
466 return err;
467 }
468
brd_exit(void)469 static void __exit brd_exit(void)
470 {
471
472 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
473 brd_cleanup();
474
475 pr_info("brd: module unloaded\n");
476 }
477
478 module_init(brd_init);
479 module_exit(brd_exit);
480
481