1 /* 2 * Ram backed block device driver. 3 * 4 * Copyright (C) 2007 Nick Piggin 5 * Copyright (C) 2007 Novell Inc. 6 * 7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright 8 * of their respective owners. 9 */ 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/major.h> 15 #include <linux/blkdev.h> 16 #include <linux/bio.h> 17 #include <linux/highmem.h> 18 #include <linux/smp_lock.h> 19 #include <linux/radix-tree.h> 20 #include <linux/buffer_head.h> /* invalidate_bh_lrus() */ 21 #include <linux/slab.h> 22 23 #include <asm/uaccess.h> 24 25 #define SECTOR_SHIFT 9 26 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 27 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) 28 29 /* 30 * Each block ramdisk device has a radix_tree brd_pages of pages that stores 31 * the pages containing the block device's contents. A brd page's ->index is 32 * its offset in PAGE_SIZE units. This is similar to, but in no way connected 33 * with, the kernel's pagecache or buffer cache (which sit above our block 34 * device). 35 */ 36 struct brd_device { 37 int brd_number; 38 int brd_refcnt; 39 loff_t brd_offset; 40 loff_t brd_sizelimit; 41 unsigned brd_blocksize; 42 43 struct request_queue *brd_queue; 44 struct gendisk *brd_disk; 45 struct list_head brd_list; 46 47 /* 48 * Backing store of pages and lock to protect it. This is the contents 49 * of the block device. 50 */ 51 spinlock_t brd_lock; 52 struct radix_tree_root brd_pages; 53 }; 54 55 /* 56 * Look up and return a brd's page for a given sector. 57 */ 58 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) 59 { 60 pgoff_t idx; 61 struct page *page; 62 63 /* 64 * The page lifetime is protected by the fact that we have opened the 65 * device node -- brd pages will never be deleted under us, so we 66 * don't need any further locking or refcounting. 67 * 68 * This is strictly true for the radix-tree nodes as well (ie. we 69 * don't actually need the rcu_read_lock()), however that is not a 70 * documented feature of the radix-tree API so it is better to be 71 * safe here (we don't have total exclusion from radix tree updates 72 * here, only deletes). 73 */ 74 rcu_read_lock(); 75 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ 76 page = radix_tree_lookup(&brd->brd_pages, idx); 77 rcu_read_unlock(); 78 79 BUG_ON(page && page->index != idx); 80 81 return page; 82 } 83 84 /* 85 * Look up and return a brd's page for a given sector. 86 * If one does not exist, allocate an empty page, and insert that. Then 87 * return it. 88 */ 89 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) 90 { 91 pgoff_t idx; 92 struct page *page; 93 gfp_t gfp_flags; 94 95 page = brd_lookup_page(brd, sector); 96 if (page) 97 return page; 98 99 /* 100 * Must use NOIO because we don't want to recurse back into the 101 * block or filesystem layers from page reclaim. 102 * 103 * Cannot support XIP and highmem, because our ->direct_access 104 * routine for XIP must return memory that is always addressable. 105 * If XIP was reworked to use pfns and kmap throughout, this 106 * restriction might be able to be lifted. 107 */ 108 gfp_flags = GFP_NOIO | __GFP_ZERO; 109 #ifndef CONFIG_BLK_DEV_XIP 110 gfp_flags |= __GFP_HIGHMEM; 111 #endif 112 page = alloc_page(gfp_flags); 113 if (!page) 114 return NULL; 115 116 if (radix_tree_preload(GFP_NOIO)) { 117 __free_page(page); 118 return NULL; 119 } 120 121 spin_lock(&brd->brd_lock); 122 idx = sector >> PAGE_SECTORS_SHIFT; 123 if (radix_tree_insert(&brd->brd_pages, idx, page)) { 124 __free_page(page); 125 page = radix_tree_lookup(&brd->brd_pages, idx); 126 BUG_ON(!page); 127 BUG_ON(page->index != idx); 128 } else 129 page->index = idx; 130 spin_unlock(&brd->brd_lock); 131 132 radix_tree_preload_end(); 133 134 return page; 135 } 136 137 static void brd_free_page(struct brd_device *brd, sector_t sector) 138 { 139 struct page *page; 140 pgoff_t idx; 141 142 spin_lock(&brd->brd_lock); 143 idx = sector >> PAGE_SECTORS_SHIFT; 144 page = radix_tree_delete(&brd->brd_pages, idx); 145 spin_unlock(&brd->brd_lock); 146 if (page) 147 __free_page(page); 148 } 149 150 static void brd_zero_page(struct brd_device *brd, sector_t sector) 151 { 152 struct page *page; 153 154 page = brd_lookup_page(brd, sector); 155 if (page) 156 clear_highpage(page); 157 } 158 159 /* 160 * Free all backing store pages and radix tree. This must only be called when 161 * there are no other users of the device. 162 */ 163 #define FREE_BATCH 16 164 static void brd_free_pages(struct brd_device *brd) 165 { 166 unsigned long pos = 0; 167 struct page *pages[FREE_BATCH]; 168 int nr_pages; 169 170 do { 171 int i; 172 173 nr_pages = radix_tree_gang_lookup(&brd->brd_pages, 174 (void **)pages, pos, FREE_BATCH); 175 176 for (i = 0; i < nr_pages; i++) { 177 void *ret; 178 179 BUG_ON(pages[i]->index < pos); 180 pos = pages[i]->index; 181 ret = radix_tree_delete(&brd->brd_pages, pos); 182 BUG_ON(!ret || ret != pages[i]); 183 __free_page(pages[i]); 184 } 185 186 pos++; 187 188 /* 189 * This assumes radix_tree_gang_lookup always returns as 190 * many pages as possible. If the radix-tree code changes, 191 * so will this have to. 192 */ 193 } while (nr_pages == FREE_BATCH); 194 } 195 196 /* 197 * copy_to_brd_setup must be called before copy_to_brd. It may sleep. 198 */ 199 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) 200 { 201 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; 202 size_t copy; 203 204 copy = min_t(size_t, n, PAGE_SIZE - offset); 205 if (!brd_insert_page(brd, sector)) 206 return -ENOMEM; 207 if (copy < n) { 208 sector += copy >> SECTOR_SHIFT; 209 if (!brd_insert_page(brd, sector)) 210 return -ENOMEM; 211 } 212 return 0; 213 } 214 215 static void discard_from_brd(struct brd_device *brd, 216 sector_t sector, size_t n) 217 { 218 while (n >= PAGE_SIZE) { 219 /* 220 * Don't want to actually discard pages here because 221 * re-allocating the pages can result in writeback 222 * deadlocks under heavy load. 223 */ 224 if (0) 225 brd_free_page(brd, sector); 226 else 227 brd_zero_page(brd, sector); 228 sector += PAGE_SIZE >> SECTOR_SHIFT; 229 n -= PAGE_SIZE; 230 } 231 } 232 233 /* 234 * Copy n bytes from src to the brd starting at sector. Does not sleep. 235 */ 236 static void copy_to_brd(struct brd_device *brd, const void *src, 237 sector_t sector, size_t n) 238 { 239 struct page *page; 240 void *dst; 241 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; 242 size_t copy; 243 244 copy = min_t(size_t, n, PAGE_SIZE - offset); 245 page = brd_lookup_page(brd, sector); 246 BUG_ON(!page); 247 248 dst = kmap_atomic(page, KM_USER1); 249 memcpy(dst + offset, src, copy); 250 kunmap_atomic(dst, KM_USER1); 251 252 if (copy < n) { 253 src += copy; 254 sector += copy >> SECTOR_SHIFT; 255 copy = n - copy; 256 page = brd_lookup_page(brd, sector); 257 BUG_ON(!page); 258 259 dst = kmap_atomic(page, KM_USER1); 260 memcpy(dst, src, copy); 261 kunmap_atomic(dst, KM_USER1); 262 } 263 } 264 265 /* 266 * Copy n bytes to dst from the brd starting at sector. Does not sleep. 267 */ 268 static void copy_from_brd(void *dst, struct brd_device *brd, 269 sector_t sector, size_t n) 270 { 271 struct page *page; 272 void *src; 273 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; 274 size_t copy; 275 276 copy = min_t(size_t, n, PAGE_SIZE - offset); 277 page = brd_lookup_page(brd, sector); 278 if (page) { 279 src = kmap_atomic(page, KM_USER1); 280 memcpy(dst, src + offset, copy); 281 kunmap_atomic(src, KM_USER1); 282 } else 283 memset(dst, 0, copy); 284 285 if (copy < n) { 286 dst += copy; 287 sector += copy >> SECTOR_SHIFT; 288 copy = n - copy; 289 page = brd_lookup_page(brd, sector); 290 if (page) { 291 src = kmap_atomic(page, KM_USER1); 292 memcpy(dst, src, copy); 293 kunmap_atomic(src, KM_USER1); 294 } else 295 memset(dst, 0, copy); 296 } 297 } 298 299 /* 300 * Process a single bvec of a bio. 301 */ 302 static int brd_do_bvec(struct brd_device *brd, struct page *page, 303 unsigned int len, unsigned int off, int rw, 304 sector_t sector) 305 { 306 void *mem; 307 int err = 0; 308 309 if (rw != READ) { 310 err = copy_to_brd_setup(brd, sector, len); 311 if (err) 312 goto out; 313 } 314 315 mem = kmap_atomic(page, KM_USER0); 316 if (rw == READ) { 317 copy_from_brd(mem + off, brd, sector, len); 318 flush_dcache_page(page); 319 } else { 320 flush_dcache_page(page); 321 copy_to_brd(brd, mem + off, sector, len); 322 } 323 kunmap_atomic(mem, KM_USER0); 324 325 out: 326 return err; 327 } 328 329 static int brd_make_request(struct request_queue *q, struct bio *bio) 330 { 331 struct block_device *bdev = bio->bi_bdev; 332 struct brd_device *brd = bdev->bd_disk->private_data; 333 int rw; 334 struct bio_vec *bvec; 335 sector_t sector; 336 int i; 337 int err = -EIO; 338 339 sector = bio->bi_sector; 340 if (sector + (bio->bi_size >> SECTOR_SHIFT) > 341 get_capacity(bdev->bd_disk)) 342 goto out; 343 344 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 345 err = 0; 346 discard_from_brd(brd, sector, bio->bi_size); 347 goto out; 348 } 349 350 rw = bio_rw(bio); 351 if (rw == READA) 352 rw = READ; 353 354 bio_for_each_segment(bvec, bio, i) { 355 unsigned int len = bvec->bv_len; 356 err = brd_do_bvec(brd, bvec->bv_page, len, 357 bvec->bv_offset, rw, sector); 358 if (err) 359 break; 360 sector += len >> SECTOR_SHIFT; 361 } 362 363 out: 364 bio_endio(bio, err); 365 366 return 0; 367 } 368 369 #ifdef CONFIG_BLK_DEV_XIP 370 static int brd_direct_access(struct block_device *bdev, sector_t sector, 371 void **kaddr, unsigned long *pfn) 372 { 373 struct brd_device *brd = bdev->bd_disk->private_data; 374 struct page *page; 375 376 if (!brd) 377 return -ENODEV; 378 if (sector & (PAGE_SECTORS-1)) 379 return -EINVAL; 380 if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk)) 381 return -ERANGE; 382 page = brd_insert_page(brd, sector); 383 if (!page) 384 return -ENOMEM; 385 *kaddr = page_address(page); 386 *pfn = page_to_pfn(page); 387 388 return 0; 389 } 390 #endif 391 392 static int brd_ioctl(struct block_device *bdev, fmode_t mode, 393 unsigned int cmd, unsigned long arg) 394 { 395 int error; 396 struct brd_device *brd = bdev->bd_disk->private_data; 397 398 if (cmd != BLKFLSBUF) 399 return -ENOTTY; 400 401 /* 402 * ram device BLKFLSBUF has special semantics, we want to actually 403 * release and destroy the ramdisk data. 404 */ 405 lock_kernel(); 406 mutex_lock(&bdev->bd_mutex); 407 error = -EBUSY; 408 if (bdev->bd_openers <= 1) { 409 /* 410 * Invalidate the cache first, so it isn't written 411 * back to the device. 412 * 413 * Another thread might instantiate more buffercache here, 414 * but there is not much we can do to close that race. 415 */ 416 invalidate_bh_lrus(); 417 truncate_inode_pages(bdev->bd_inode->i_mapping, 0); 418 brd_free_pages(brd); 419 error = 0; 420 } 421 mutex_unlock(&bdev->bd_mutex); 422 unlock_kernel(); 423 424 return error; 425 } 426 427 static const struct block_device_operations brd_fops = { 428 .owner = THIS_MODULE, 429 .ioctl = brd_ioctl, 430 #ifdef CONFIG_BLK_DEV_XIP 431 .direct_access = brd_direct_access, 432 #endif 433 }; 434 435 /* 436 * And now the modules code and kernel interface. 437 */ 438 static int rd_nr; 439 int rd_size = CONFIG_BLK_DEV_RAM_SIZE; 440 static int max_part; 441 static int part_shift; 442 module_param(rd_nr, int, 0); 443 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); 444 module_param(rd_size, int, 0); 445 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); 446 module_param(max_part, int, 0); 447 MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); 448 MODULE_LICENSE("GPL"); 449 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); 450 MODULE_ALIAS("rd"); 451 452 #ifndef MODULE 453 /* Legacy boot options - nonmodular */ 454 static int __init ramdisk_size(char *str) 455 { 456 rd_size = simple_strtol(str, NULL, 0); 457 return 1; 458 } 459 __setup("ramdisk_size=", ramdisk_size); 460 #endif 461 462 /* 463 * The device scheme is derived from loop.c. Keep them in synch where possible 464 * (should share code eventually). 465 */ 466 static LIST_HEAD(brd_devices); 467 static DEFINE_MUTEX(brd_devices_mutex); 468 469 static struct brd_device *brd_alloc(int i) 470 { 471 struct brd_device *brd; 472 struct gendisk *disk; 473 474 brd = kzalloc(sizeof(*brd), GFP_KERNEL); 475 if (!brd) 476 goto out; 477 brd->brd_number = i; 478 spin_lock_init(&brd->brd_lock); 479 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); 480 481 brd->brd_queue = blk_alloc_queue(GFP_KERNEL); 482 if (!brd->brd_queue) 483 goto out_free_dev; 484 blk_queue_make_request(brd->brd_queue, brd_make_request); 485 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG); 486 blk_queue_max_hw_sectors(brd->brd_queue, 1024); 487 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 488 489 brd->brd_queue->limits.discard_granularity = PAGE_SIZE; 490 brd->brd_queue->limits.max_discard_sectors = UINT_MAX; 491 brd->brd_queue->limits.discard_zeroes_data = 1; 492 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); 493 494 disk = brd->brd_disk = alloc_disk(1 << part_shift); 495 if (!disk) 496 goto out_free_queue; 497 disk->major = RAMDISK_MAJOR; 498 disk->first_minor = i << part_shift; 499 disk->fops = &brd_fops; 500 disk->private_data = brd; 501 disk->queue = brd->brd_queue; 502 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; 503 sprintf(disk->disk_name, "ram%d", i); 504 set_capacity(disk, rd_size * 2); 505 506 return brd; 507 508 out_free_queue: 509 blk_cleanup_queue(brd->brd_queue); 510 out_free_dev: 511 kfree(brd); 512 out: 513 return NULL; 514 } 515 516 static void brd_free(struct brd_device *brd) 517 { 518 put_disk(brd->brd_disk); 519 blk_cleanup_queue(brd->brd_queue); 520 brd_free_pages(brd); 521 kfree(brd); 522 } 523 524 static struct brd_device *brd_init_one(int i) 525 { 526 struct brd_device *brd; 527 528 list_for_each_entry(brd, &brd_devices, brd_list) { 529 if (brd->brd_number == i) 530 goto out; 531 } 532 533 brd = brd_alloc(i); 534 if (brd) { 535 add_disk(brd->brd_disk); 536 list_add_tail(&brd->brd_list, &brd_devices); 537 } 538 out: 539 return brd; 540 } 541 542 static void brd_del_one(struct brd_device *brd) 543 { 544 list_del(&brd->brd_list); 545 del_gendisk(brd->brd_disk); 546 brd_free(brd); 547 } 548 549 static struct kobject *brd_probe(dev_t dev, int *part, void *data) 550 { 551 struct brd_device *brd; 552 struct kobject *kobj; 553 554 mutex_lock(&brd_devices_mutex); 555 brd = brd_init_one(dev & MINORMASK); 556 kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM); 557 mutex_unlock(&brd_devices_mutex); 558 559 *part = 0; 560 return kobj; 561 } 562 563 static int __init brd_init(void) 564 { 565 int i, nr; 566 unsigned long range; 567 struct brd_device *brd, *next; 568 569 /* 570 * brd module now has a feature to instantiate underlying device 571 * structure on-demand, provided that there is an access dev node. 572 * However, this will not work well with user space tool that doesn't 573 * know about such "feature". In order to not break any existing 574 * tool, we do the following: 575 * 576 * (1) if rd_nr is specified, create that many upfront, and this 577 * also becomes a hard limit. 578 * (2) if rd_nr is not specified, create 1 rd device on module 579 * load, user can further extend brd device by create dev node 580 * themselves and have kernel automatically instantiate actual 581 * device on-demand. 582 */ 583 584 part_shift = 0; 585 if (max_part > 0) 586 part_shift = fls(max_part); 587 588 if (rd_nr > 1UL << (MINORBITS - part_shift)) 589 return -EINVAL; 590 591 if (rd_nr) { 592 nr = rd_nr; 593 range = rd_nr; 594 } else { 595 nr = CONFIG_BLK_DEV_RAM_COUNT; 596 range = 1UL << (MINORBITS - part_shift); 597 } 598 599 if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) 600 return -EIO; 601 602 for (i = 0; i < nr; i++) { 603 brd = brd_alloc(i); 604 if (!brd) 605 goto out_free; 606 list_add_tail(&brd->brd_list, &brd_devices); 607 } 608 609 /* point of no return */ 610 611 list_for_each_entry(brd, &brd_devices, brd_list) 612 add_disk(brd->brd_disk); 613 614 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range, 615 THIS_MODULE, brd_probe, NULL, NULL); 616 617 printk(KERN_INFO "brd: module loaded\n"); 618 return 0; 619 620 out_free: 621 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { 622 list_del(&brd->brd_list); 623 brd_free(brd); 624 } 625 unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 626 627 return -ENOMEM; 628 } 629 630 static void __exit brd_exit(void) 631 { 632 unsigned long range; 633 struct brd_device *brd, *next; 634 635 range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift); 636 637 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) 638 brd_del_one(brd); 639 640 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range); 641 unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 642 } 643 644 module_init(brd_init); 645 module_exit(brd_exit); 646 647