1 /* 2 * Ram backed block device driver. 3 * 4 * Copyright (C) 2007 Nick Piggin 5 * Copyright (C) 2007 Novell Inc. 6 * 7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright 8 * of their respective owners. 9 */ 10 11 #include <linux/init.h> 12 #include <linux/initrd.h> 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/major.h> 16 #include <linux/blkdev.h> 17 #include <linux/bio.h> 18 #include <linux/highmem.h> 19 #include <linux/mutex.h> 20 #include <linux/radix-tree.h> 21 #include <linux/fs.h> 22 #include <linux/slab.h> 23 #ifdef CONFIG_BLK_DEV_RAM_DAX 24 #include <linux/pfn_t.h> 25 #include <linux/dax.h> 26 #include <linux/uio.h> 27 #endif 28 29 #include <linux/uaccess.h> 30 31 #define SECTOR_SHIFT 9 32 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 33 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) 34 35 /* 36 * Each block ramdisk device has a radix_tree brd_pages of pages that stores 37 * the pages containing the block device's contents. A brd page's ->index is 38 * its offset in PAGE_SIZE units. This is similar to, but in no way connected 39 * with, the kernel's pagecache or buffer cache (which sit above our block 40 * device). 41 */ 42 struct brd_device { 43 int brd_number; 44 45 struct request_queue *brd_queue; 46 struct gendisk *brd_disk; 47 #ifdef CONFIG_BLK_DEV_RAM_DAX 48 struct dax_device *dax_dev; 49 #endif 50 struct list_head brd_list; 51 52 /* 53 * Backing store of pages and lock to protect it. This is the contents 54 * of the block device. 55 */ 56 spinlock_t brd_lock; 57 struct radix_tree_root brd_pages; 58 }; 59 60 /* 61 * Look up and return a brd's page for a given sector. 62 */ 63 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) 64 { 65 pgoff_t idx; 66 struct page *page; 67 68 /* 69 * The page lifetime is protected by the fact that we have opened the 70 * device node -- brd pages will never be deleted under us, so we 71 * don't need any further locking or refcounting. 72 * 73 * This is strictly true for the radix-tree nodes as well (ie. we 74 * don't actually need the rcu_read_lock()), however that is not a 75 * documented feature of the radix-tree API so it is better to be 76 * safe here (we don't have total exclusion from radix tree updates 77 * here, only deletes). 78 */ 79 rcu_read_lock(); 80 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ 81 page = radix_tree_lookup(&brd->brd_pages, idx); 82 rcu_read_unlock(); 83 84 BUG_ON(page && page->index != idx); 85 86 return page; 87 } 88 89 /* 90 * Look up and return a brd's page for a given sector. 91 * If one does not exist, allocate an empty page, and insert that. Then 92 * return it. 93 */ 94 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) 95 { 96 pgoff_t idx; 97 struct page *page; 98 gfp_t gfp_flags; 99 100 page = brd_lookup_page(brd, sector); 101 if (page) 102 return page; 103 104 /* 105 * Must use NOIO because we don't want to recurse back into the 106 * block or filesystem layers from page reclaim. 107 * 108 * Cannot support DAX and highmem, because our ->direct_access 109 * routine for DAX must return memory that is always addressable. 110 * If DAX was reworked to use pfns and kmap throughout, this 111 * restriction might be able to be lifted. 112 */ 113 gfp_flags = GFP_NOIO | __GFP_ZERO; 114 #ifndef CONFIG_BLK_DEV_RAM_DAX 115 gfp_flags |= __GFP_HIGHMEM; 116 #endif 117 page = alloc_page(gfp_flags); 118 if (!page) 119 return NULL; 120 121 if (radix_tree_preload(GFP_NOIO)) { 122 __free_page(page); 123 return NULL; 124 } 125 126 spin_lock(&brd->brd_lock); 127 idx = sector >> PAGE_SECTORS_SHIFT; 128 page->index = idx; 129 if (radix_tree_insert(&brd->brd_pages, idx, page)) { 130 __free_page(page); 131 page = radix_tree_lookup(&brd->brd_pages, idx); 132 BUG_ON(!page); 133 BUG_ON(page->index != idx); 134 } 135 spin_unlock(&brd->brd_lock); 136 137 radix_tree_preload_end(); 138 139 return page; 140 } 141 142 /* 143 * Free all backing store pages and radix tree. This must only be called when 144 * there are no other users of the device. 145 */ 146 #define FREE_BATCH 16 147 static void brd_free_pages(struct brd_device *brd) 148 { 149 unsigned long pos = 0; 150 struct page *pages[FREE_BATCH]; 151 int nr_pages; 152 153 do { 154 int i; 155 156 nr_pages = radix_tree_gang_lookup(&brd->brd_pages, 157 (void **)pages, pos, FREE_BATCH); 158 159 for (i = 0; i < nr_pages; i++) { 160 void *ret; 161 162 BUG_ON(pages[i]->index < pos); 163 pos = pages[i]->index; 164 ret = radix_tree_delete(&brd->brd_pages, pos); 165 BUG_ON(!ret || ret != pages[i]); 166 __free_page(pages[i]); 167 } 168 169 pos++; 170 171 /* 172 * This assumes radix_tree_gang_lookup always returns as 173 * many pages as possible. If the radix-tree code changes, 174 * so will this have to. 175 */ 176 } while (nr_pages == FREE_BATCH); 177 } 178 179 /* 180 * copy_to_brd_setup must be called before copy_to_brd. It may sleep. 181 */ 182 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) 183 { 184 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; 185 size_t copy; 186 187 copy = min_t(size_t, n, PAGE_SIZE - offset); 188 if (!brd_insert_page(brd, sector)) 189 return -ENOSPC; 190 if (copy < n) { 191 sector += copy >> SECTOR_SHIFT; 192 if (!brd_insert_page(brd, sector)) 193 return -ENOSPC; 194 } 195 return 0; 196 } 197 198 /* 199 * Copy n bytes from src to the brd starting at sector. Does not sleep. 200 */ 201 static void copy_to_brd(struct brd_device *brd, const void *src, 202 sector_t sector, size_t n) 203 { 204 struct page *page; 205 void *dst; 206 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; 207 size_t copy; 208 209 copy = min_t(size_t, n, PAGE_SIZE - offset); 210 page = brd_lookup_page(brd, sector); 211 BUG_ON(!page); 212 213 dst = kmap_atomic(page); 214 memcpy(dst + offset, src, copy); 215 kunmap_atomic(dst); 216 217 if (copy < n) { 218 src += copy; 219 sector += copy >> SECTOR_SHIFT; 220 copy = n - copy; 221 page = brd_lookup_page(brd, sector); 222 BUG_ON(!page); 223 224 dst = kmap_atomic(page); 225 memcpy(dst, src, copy); 226 kunmap_atomic(dst); 227 } 228 } 229 230 /* 231 * Copy n bytes to dst from the brd starting at sector. Does not sleep. 232 */ 233 static void copy_from_brd(void *dst, struct brd_device *brd, 234 sector_t sector, size_t n) 235 { 236 struct page *page; 237 void *src; 238 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; 239 size_t copy; 240 241 copy = min_t(size_t, n, PAGE_SIZE - offset); 242 page = brd_lookup_page(brd, sector); 243 if (page) { 244 src = kmap_atomic(page); 245 memcpy(dst, src + offset, copy); 246 kunmap_atomic(src); 247 } else 248 memset(dst, 0, copy); 249 250 if (copy < n) { 251 dst += copy; 252 sector += copy >> SECTOR_SHIFT; 253 copy = n - copy; 254 page = brd_lookup_page(brd, sector); 255 if (page) { 256 src = kmap_atomic(page); 257 memcpy(dst, src, copy); 258 kunmap_atomic(src); 259 } else 260 memset(dst, 0, copy); 261 } 262 } 263 264 /* 265 * Process a single bvec of a bio. 266 */ 267 static int brd_do_bvec(struct brd_device *brd, struct page *page, 268 unsigned int len, unsigned int off, bool is_write, 269 sector_t sector) 270 { 271 void *mem; 272 int err = 0; 273 274 if (is_write) { 275 err = copy_to_brd_setup(brd, sector, len); 276 if (err) 277 goto out; 278 } 279 280 mem = kmap_atomic(page); 281 if (!is_write) { 282 copy_from_brd(mem + off, brd, sector, len); 283 flush_dcache_page(page); 284 } else { 285 flush_dcache_page(page); 286 copy_to_brd(brd, mem + off, sector, len); 287 } 288 kunmap_atomic(mem); 289 290 out: 291 return err; 292 } 293 294 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) 295 { 296 struct brd_device *brd = bio->bi_disk->private_data; 297 struct bio_vec bvec; 298 sector_t sector; 299 struct bvec_iter iter; 300 301 sector = bio->bi_iter.bi_sector; 302 if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) 303 goto io_error; 304 305 bio_for_each_segment(bvec, bio, iter) { 306 unsigned int len = bvec.bv_len; 307 int err; 308 309 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, 310 op_is_write(bio_op(bio)), sector); 311 if (err) 312 goto io_error; 313 sector += len >> SECTOR_SHIFT; 314 } 315 316 bio_endio(bio); 317 return BLK_QC_T_NONE; 318 io_error: 319 bio_io_error(bio); 320 return BLK_QC_T_NONE; 321 } 322 323 static int brd_rw_page(struct block_device *bdev, sector_t sector, 324 struct page *page, bool is_write) 325 { 326 struct brd_device *brd = bdev->bd_disk->private_data; 327 int err; 328 329 if (PageTransHuge(page)) 330 return -ENOTSUPP; 331 err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector); 332 page_endio(page, is_write, err); 333 return err; 334 } 335 336 #ifdef CONFIG_BLK_DEV_RAM_DAX 337 static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff, 338 long nr_pages, void **kaddr, pfn_t *pfn) 339 { 340 struct page *page; 341 342 if (!brd) 343 return -ENODEV; 344 page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT); 345 if (!page) 346 return -ENOSPC; 347 *kaddr = page_address(page); 348 *pfn = page_to_pfn_t(page); 349 350 return 1; 351 } 352 353 static long brd_dax_direct_access(struct dax_device *dax_dev, 354 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) 355 { 356 struct brd_device *brd = dax_get_private(dax_dev); 357 358 return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn); 359 } 360 361 static size_t brd_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 362 void *addr, size_t bytes, struct iov_iter *i) 363 { 364 return copy_from_iter(addr, bytes, i); 365 } 366 367 static const struct dax_operations brd_dax_ops = { 368 .direct_access = brd_dax_direct_access, 369 .copy_from_iter = brd_dax_copy_from_iter, 370 }; 371 #endif 372 373 static const struct block_device_operations brd_fops = { 374 .owner = THIS_MODULE, 375 .rw_page = brd_rw_page, 376 }; 377 378 /* 379 * And now the modules code and kernel interface. 380 */ 381 static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT; 382 module_param(rd_nr, int, S_IRUGO); 383 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); 384 385 unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE; 386 module_param(rd_size, ulong, S_IRUGO); 387 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); 388 389 static int max_part = 1; 390 module_param(max_part, int, S_IRUGO); 391 MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices"); 392 393 MODULE_LICENSE("GPL"); 394 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); 395 MODULE_ALIAS("rd"); 396 397 #ifndef MODULE 398 /* Legacy boot options - nonmodular */ 399 static int __init ramdisk_size(char *str) 400 { 401 rd_size = simple_strtol(str, NULL, 0); 402 return 1; 403 } 404 __setup("ramdisk_size=", ramdisk_size); 405 #endif 406 407 /* 408 * The device scheme is derived from loop.c. Keep them in synch where possible 409 * (should share code eventually). 410 */ 411 static LIST_HEAD(brd_devices); 412 static DEFINE_MUTEX(brd_devices_mutex); 413 414 static struct brd_device *brd_alloc(int i) 415 { 416 struct brd_device *brd; 417 struct gendisk *disk; 418 419 brd = kzalloc(sizeof(*brd), GFP_KERNEL); 420 if (!brd) 421 goto out; 422 brd->brd_number = i; 423 spin_lock_init(&brd->brd_lock); 424 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); 425 426 brd->brd_queue = blk_alloc_queue(GFP_KERNEL); 427 if (!brd->brd_queue) 428 goto out_free_dev; 429 430 blk_queue_make_request(brd->brd_queue, brd_make_request); 431 blk_queue_max_hw_sectors(brd->brd_queue, 1024); 432 433 /* This is so fdisk will align partitions on 4k, because of 434 * direct_access API needing 4k alignment, returning a PFN 435 * (This is only a problem on very small devices <= 4M, 436 * otherwise fdisk will align on 1M. Regardless this call 437 * is harmless) 438 */ 439 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE); 440 disk = brd->brd_disk = alloc_disk(max_part); 441 if (!disk) 442 goto out_free_queue; 443 disk->major = RAMDISK_MAJOR; 444 disk->first_minor = i * max_part; 445 disk->fops = &brd_fops; 446 disk->private_data = brd; 447 disk->queue = brd->brd_queue; 448 disk->flags = GENHD_FL_EXT_DEVT; 449 sprintf(disk->disk_name, "ram%d", i); 450 set_capacity(disk, rd_size * 2); 451 452 #ifdef CONFIG_BLK_DEV_RAM_DAX 453 queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue); 454 brd->dax_dev = alloc_dax(brd, disk->disk_name, &brd_dax_ops); 455 if (!brd->dax_dev) 456 goto out_free_inode; 457 #endif 458 459 460 return brd; 461 462 #ifdef CONFIG_BLK_DEV_RAM_DAX 463 out_free_inode: 464 kill_dax(brd->dax_dev); 465 put_dax(brd->dax_dev); 466 #endif 467 out_free_queue: 468 blk_cleanup_queue(brd->brd_queue); 469 out_free_dev: 470 kfree(brd); 471 out: 472 return NULL; 473 } 474 475 static void brd_free(struct brd_device *brd) 476 { 477 put_disk(brd->brd_disk); 478 blk_cleanup_queue(brd->brd_queue); 479 brd_free_pages(brd); 480 kfree(brd); 481 } 482 483 static struct brd_device *brd_init_one(int i, bool *new) 484 { 485 struct brd_device *brd; 486 487 *new = false; 488 list_for_each_entry(brd, &brd_devices, brd_list) { 489 if (brd->brd_number == i) 490 goto out; 491 } 492 493 brd = brd_alloc(i); 494 if (brd) { 495 add_disk(brd->brd_disk); 496 list_add_tail(&brd->brd_list, &brd_devices); 497 } 498 *new = true; 499 out: 500 return brd; 501 } 502 503 static void brd_del_one(struct brd_device *brd) 504 { 505 list_del(&brd->brd_list); 506 #ifdef CONFIG_BLK_DEV_RAM_DAX 507 kill_dax(brd->dax_dev); 508 put_dax(brd->dax_dev); 509 #endif 510 del_gendisk(brd->brd_disk); 511 brd_free(brd); 512 } 513 514 static struct kobject *brd_probe(dev_t dev, int *part, void *data) 515 { 516 struct brd_device *brd; 517 struct kobject *kobj; 518 bool new; 519 520 mutex_lock(&brd_devices_mutex); 521 brd = brd_init_one(MINOR(dev) / max_part, &new); 522 kobj = brd ? get_disk(brd->brd_disk) : NULL; 523 mutex_unlock(&brd_devices_mutex); 524 525 if (new) 526 *part = 0; 527 528 return kobj; 529 } 530 531 static int __init brd_init(void) 532 { 533 struct brd_device *brd, *next; 534 int i; 535 536 /* 537 * brd module now has a feature to instantiate underlying device 538 * structure on-demand, provided that there is an access dev node. 539 * 540 * (1) if rd_nr is specified, create that many upfront. else 541 * it defaults to CONFIG_BLK_DEV_RAM_COUNT 542 * (2) User can further extend brd devices by create dev node themselves 543 * and have kernel automatically instantiate actual device 544 * on-demand. Example: 545 * mknod /path/devnod_name b 1 X # 1 is the rd major 546 * fdisk -l /path/devnod_name 547 * If (X / max_part) was not already created it will be created 548 * dynamically. 549 */ 550 551 if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) 552 return -EIO; 553 554 if (unlikely(!max_part)) 555 max_part = 1; 556 557 for (i = 0; i < rd_nr; i++) { 558 brd = brd_alloc(i); 559 if (!brd) 560 goto out_free; 561 list_add_tail(&brd->brd_list, &brd_devices); 562 } 563 564 /* point of no return */ 565 566 list_for_each_entry(brd, &brd_devices, brd_list) 567 add_disk(brd->brd_disk); 568 569 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS, 570 THIS_MODULE, brd_probe, NULL, NULL); 571 572 pr_info("brd: module loaded\n"); 573 return 0; 574 575 out_free: 576 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { 577 list_del(&brd->brd_list); 578 brd_free(brd); 579 } 580 unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 581 582 pr_info("brd: module NOT loaded !!!\n"); 583 return -ENOMEM; 584 } 585 586 static void __exit brd_exit(void) 587 { 588 struct brd_device *brd, *next; 589 590 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) 591 brd_del_one(brd); 592 593 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS); 594 unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 595 596 pr_info("brd: module unloaded\n"); 597 } 598 599 module_init(brd_init); 600 module_exit(brd_exit); 601 602