1 /* 2 * block2mtd.c - create an mtd from a block device 3 * 4 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk> 5 * Copyright (C) 2004-2006 Joern Engel <joern@wh.fh-wedel.de> 6 * 7 * Licence: GPL 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 /* 13 * When the first attempt at device initialization fails, we may need to 14 * wait a little bit and retry. This timeout, by default 3 seconds, gives 15 * device time to start up. Required on BCM2708 and a few other chipsets. 16 */ 17 #define MTD_DEFAULT_TIMEOUT 3 18 19 #include <linux/module.h> 20 #include <linux/delay.h> 21 #include <linux/fs.h> 22 #include <linux/blkdev.h> 23 #include <linux/backing-dev.h> 24 #include <linux/bio.h> 25 #include <linux/pagemap.h> 26 #include <linux/list.h> 27 #include <linux/init.h> 28 #include <linux/mtd/mtd.h> 29 #include <linux/mutex.h> 30 #include <linux/mount.h> 31 #include <linux/slab.h> 32 #include <linux/major.h> 33 34 /* Maximum number of comma-separated items in the 'block2mtd=' parameter */ 35 #define BLOCK2MTD_PARAM_MAX_COUNT 3 36 37 /* Info for the block device */ 38 struct block2mtd_dev { 39 struct list_head list; 40 struct bdev_handle *bdev_handle; 41 struct mtd_info mtd; 42 struct mutex write_mutex; 43 }; 44 45 46 /* Static info about the MTD, used in cleanup_module */ 47 static LIST_HEAD(blkmtd_device_list); 48 49 50 static struct page *page_read(struct address_space *mapping, pgoff_t index) 51 { 52 return read_mapping_page(mapping, index, NULL); 53 } 54 55 /* erase a specified part of the device */ 56 static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len) 57 { 58 struct address_space *mapping = 59 dev->bdev_handle->bdev->bd_inode->i_mapping; 60 struct page *page; 61 pgoff_t index = to >> PAGE_SHIFT; // page index 62 int pages = len >> PAGE_SHIFT; 63 u_long *p; 64 u_long *max; 65 66 while (pages) { 67 page = page_read(mapping, index); 68 if (IS_ERR(page)) 69 return PTR_ERR(page); 70 71 max = page_address(page) + PAGE_SIZE; 72 for (p=page_address(page); p<max; p++) 73 if (*p != -1UL) { 74 lock_page(page); 75 memset(page_address(page), 0xff, PAGE_SIZE); 76 set_page_dirty(page); 77 unlock_page(page); 78 balance_dirty_pages_ratelimited(mapping); 79 break; 80 } 81 82 put_page(page); 83 pages--; 84 index++; 85 } 86 return 0; 87 } 88 static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 89 { 90 struct block2mtd_dev *dev = mtd->priv; 91 size_t from = instr->addr; 92 size_t len = instr->len; 93 int err; 94 95 mutex_lock(&dev->write_mutex); 96 err = _block2mtd_erase(dev, from, len); 97 mutex_unlock(&dev->write_mutex); 98 if (err) 99 pr_err("erase failed err = %d\n", err); 100 101 return err; 102 } 103 104 105 static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len, 106 size_t *retlen, u_char *buf) 107 { 108 struct block2mtd_dev *dev = mtd->priv; 109 struct address_space *mapping = 110 dev->bdev_handle->bdev->bd_inode->i_mapping; 111 struct page *page; 112 pgoff_t index = from >> PAGE_SHIFT; 113 int offset = from & (PAGE_SIZE-1); 114 int cpylen; 115 116 while (len) { 117 if ((offset + len) > PAGE_SIZE) 118 cpylen = PAGE_SIZE - offset; // multiple pages 119 else 120 cpylen = len; // this page 121 len = len - cpylen; 122 123 page = page_read(mapping, index); 124 if (IS_ERR(page)) 125 return PTR_ERR(page); 126 127 memcpy(buf, page_address(page) + offset, cpylen); 128 put_page(page); 129 130 if (retlen) 131 *retlen += cpylen; 132 buf += cpylen; 133 offset = 0; 134 index++; 135 } 136 return 0; 137 } 138 139 140 /* write data to the underlying device */ 141 static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf, 142 loff_t to, size_t len, size_t *retlen) 143 { 144 struct page *page; 145 struct address_space *mapping = 146 dev->bdev_handle->bdev->bd_inode->i_mapping; 147 pgoff_t index = to >> PAGE_SHIFT; // page index 148 int offset = to & ~PAGE_MASK; // page offset 149 int cpylen; 150 151 while (len) { 152 if ((offset+len) > PAGE_SIZE) 153 cpylen = PAGE_SIZE - offset; // multiple pages 154 else 155 cpylen = len; // this page 156 len = len - cpylen; 157 158 page = page_read(mapping, index); 159 if (IS_ERR(page)) 160 return PTR_ERR(page); 161 162 if (memcmp(page_address(page)+offset, buf, cpylen)) { 163 lock_page(page); 164 memcpy(page_address(page) + offset, buf, cpylen); 165 set_page_dirty(page); 166 unlock_page(page); 167 balance_dirty_pages_ratelimited(mapping); 168 } 169 put_page(page); 170 171 if (retlen) 172 *retlen += cpylen; 173 174 buf += cpylen; 175 offset = 0; 176 index++; 177 } 178 return 0; 179 } 180 181 182 static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len, 183 size_t *retlen, const u_char *buf) 184 { 185 struct block2mtd_dev *dev = mtd->priv; 186 int err; 187 188 mutex_lock(&dev->write_mutex); 189 err = _block2mtd_write(dev, buf, to, len, retlen); 190 mutex_unlock(&dev->write_mutex); 191 if (err > 0) 192 err = 0; 193 return err; 194 } 195 196 197 /* sync the device - wait until the write queue is empty */ 198 static void block2mtd_sync(struct mtd_info *mtd) 199 { 200 struct block2mtd_dev *dev = mtd->priv; 201 sync_blockdev(dev->bdev_handle->bdev); 202 return; 203 } 204 205 206 static void block2mtd_free_device(struct block2mtd_dev *dev) 207 { 208 if (!dev) 209 return; 210 211 kfree(dev->mtd.name); 212 213 if (dev->bdev_handle) { 214 invalidate_mapping_pages( 215 dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1); 216 bdev_release(dev->bdev_handle); 217 } 218 219 kfree(dev); 220 } 221 222 /* 223 * This function is marked __ref because it calls the __init marked 224 * early_lookup_bdev when called from the early boot code. 225 */ 226 static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname, 227 blk_mode_t mode, int timeout, struct block2mtd_dev *dev) 228 { 229 struct bdev_handle *bdev_handle = ERR_PTR(-ENODEV); 230 #ifndef MODULE 231 int i; 232 233 /* 234 * We can't use early_lookup_bdev from a running system. 235 */ 236 if (system_state >= SYSTEM_RUNNING) 237 return bdev_handle; 238 239 /* 240 * We might not have the root device mounted at this point. 241 * Try to resolve the device name by other means. 242 */ 243 for (i = 0; i <= timeout; i++) { 244 dev_t devt; 245 246 if (i) 247 /* 248 * Calling wait_for_device_probe in the first loop 249 * was not enough, sleep for a bit in subsequent 250 * go-arounds. 251 */ 252 msleep(1000); 253 wait_for_device_probe(); 254 255 if (!early_lookup_bdev(devname, &devt)) { 256 bdev_handle = bdev_open_by_dev(devt, mode, dev, NULL); 257 if (!IS_ERR(bdev_handle)) 258 break; 259 } 260 } 261 #endif 262 return bdev_handle; 263 } 264 265 static struct block2mtd_dev *add_device(char *devname, int erase_size, 266 char *label, int timeout) 267 { 268 const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE; 269 struct bdev_handle *bdev_handle; 270 struct block_device *bdev; 271 struct block2mtd_dev *dev; 272 char *name; 273 274 if (!devname) 275 return NULL; 276 277 dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL); 278 if (!dev) 279 return NULL; 280 281 /* Get a handle on the device */ 282 bdev_handle = bdev_open_by_path(devname, mode, dev, NULL); 283 if (IS_ERR(bdev_handle)) 284 bdev_handle = mdtblock_early_get_bdev(devname, mode, timeout, 285 dev); 286 if (IS_ERR(bdev_handle)) { 287 pr_err("error: cannot open device %s\n", devname); 288 goto err_free_block2mtd; 289 } 290 dev->bdev_handle = bdev_handle; 291 bdev = bdev_handle->bdev; 292 293 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { 294 pr_err("attempting to use an MTD device as a block device\n"); 295 goto err_free_block2mtd; 296 } 297 298 if ((long)bdev->bd_inode->i_size % erase_size) { 299 pr_err("erasesize must be a divisor of device size\n"); 300 goto err_free_block2mtd; 301 } 302 303 mutex_init(&dev->write_mutex); 304 305 /* Setup the MTD structure */ 306 /* make the name contain the block device in */ 307 if (!label) 308 name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname); 309 else 310 name = kstrdup(label, GFP_KERNEL); 311 if (!name) 312 goto err_destroy_mutex; 313 314 dev->mtd.name = name; 315 316 dev->mtd.size = bdev->bd_inode->i_size & PAGE_MASK; 317 dev->mtd.erasesize = erase_size; 318 dev->mtd.writesize = 1; 319 dev->mtd.writebufsize = PAGE_SIZE; 320 dev->mtd.type = MTD_RAM; 321 dev->mtd.flags = MTD_CAP_RAM; 322 dev->mtd._erase = block2mtd_erase; 323 dev->mtd._write = block2mtd_write; 324 dev->mtd._sync = block2mtd_sync; 325 dev->mtd._read = block2mtd_read; 326 dev->mtd.priv = dev; 327 dev->mtd.owner = THIS_MODULE; 328 329 if (mtd_device_register(&dev->mtd, NULL, 0)) { 330 /* Device didn't get added, so free the entry */ 331 goto err_destroy_mutex; 332 } 333 334 list_add(&dev->list, &blkmtd_device_list); 335 pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n", 336 dev->mtd.index, 337 label ? label : dev->mtd.name + strlen("block2mtd: "), 338 dev->mtd.erasesize >> 10, dev->mtd.erasesize); 339 return dev; 340 341 err_destroy_mutex: 342 mutex_destroy(&dev->write_mutex); 343 err_free_block2mtd: 344 block2mtd_free_device(dev); 345 return NULL; 346 } 347 348 349 /* This function works similar to reguler strtoul. In addition, it 350 * allows some suffixes for a more human-readable number format: 351 * ki, Ki, kiB, KiB - multiply result with 1024 352 * Mi, MiB - multiply result with 1024^2 353 * Gi, GiB - multiply result with 1024^3 354 */ 355 static int ustrtoul(const char *cp, char **endp, unsigned int base) 356 { 357 unsigned long result = simple_strtoul(cp, endp, base); 358 switch (**endp) { 359 case 'G' : 360 result *= 1024; 361 fallthrough; 362 case 'M': 363 result *= 1024; 364 fallthrough; 365 case 'K': 366 case 'k': 367 result *= 1024; 368 /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */ 369 if ((*endp)[1] == 'i') { 370 if ((*endp)[2] == 'B') 371 (*endp) += 3; 372 else 373 (*endp) += 2; 374 } 375 } 376 return result; 377 } 378 379 380 static int parse_num(size_t *num, const char *token) 381 { 382 char *endp; 383 size_t n; 384 385 n = (size_t) ustrtoul(token, &endp, 0); 386 if (*endp) 387 return -EINVAL; 388 389 *num = n; 390 return 0; 391 } 392 393 394 static inline void kill_final_newline(char *str) 395 { 396 char *newline = strrchr(str, '\n'); 397 if (newline && !newline[1]) 398 *newline = 0; 399 } 400 401 402 #ifndef MODULE 403 static int block2mtd_init_called = 0; 404 /* 80 for device, 12 for erase size */ 405 static char block2mtd_paramline[80 + 12]; 406 #endif 407 408 static int block2mtd_setup2(const char *val) 409 { 410 /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */ 411 char buf[80 + 12 + 80 + 8]; 412 char *str = buf; 413 char *token[BLOCK2MTD_PARAM_MAX_COUNT]; 414 char *name; 415 char *label = NULL; 416 size_t erase_size = PAGE_SIZE; 417 unsigned long timeout = MTD_DEFAULT_TIMEOUT; 418 int i, ret; 419 420 if (strnlen(val, sizeof(buf)) >= sizeof(buf)) { 421 pr_err("parameter too long\n"); 422 return 0; 423 } 424 425 strcpy(str, val); 426 kill_final_newline(str); 427 428 for (i = 0; i < BLOCK2MTD_PARAM_MAX_COUNT; i++) 429 token[i] = strsep(&str, ","); 430 431 if (str) { 432 pr_err("too many arguments\n"); 433 return 0; 434 } 435 436 if (!token[0]) { 437 pr_err("no argument\n"); 438 return 0; 439 } 440 441 name = token[0]; 442 if (strlen(name) + 1 > 80) { 443 pr_err("device name too long\n"); 444 return 0; 445 } 446 447 /* Optional argument when custom label is used */ 448 if (token[1] && strlen(token[1])) { 449 ret = parse_num(&erase_size, token[1]); 450 if (ret) { 451 pr_err("illegal erase size\n"); 452 return 0; 453 } 454 } 455 456 if (token[2]) { 457 label = token[2]; 458 pr_info("Using custom MTD label '%s' for dev %s\n", label, name); 459 } 460 461 add_device(name, erase_size, label, timeout); 462 463 return 0; 464 } 465 466 467 static int block2mtd_setup(const char *val, const struct kernel_param *kp) 468 { 469 #ifdef MODULE 470 return block2mtd_setup2(val); 471 #else 472 /* If more parameters are later passed in via 473 /sys/module/block2mtd/parameters/block2mtd 474 and block2mtd_init() has already been called, 475 we can parse the argument now. */ 476 477 if (block2mtd_init_called) 478 return block2mtd_setup2(val); 479 480 /* During early boot stage, we only save the parameters 481 here. We must parse them later: if the param passed 482 from kernel boot command line, block2mtd_setup() is 483 called so early that it is not possible to resolve 484 the device (even kmalloc() fails). Deter that work to 485 block2mtd_setup2(). */ 486 487 strscpy(block2mtd_paramline, val, sizeof(block2mtd_paramline)); 488 489 return 0; 490 #endif 491 } 492 493 494 module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200); 495 MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,[<erasesize>][,<label>]]\""); 496 497 static int __init block2mtd_init(void) 498 { 499 int ret = 0; 500 501 #ifndef MODULE 502 if (strlen(block2mtd_paramline)) 503 ret = block2mtd_setup2(block2mtd_paramline); 504 block2mtd_init_called = 1; 505 #endif 506 507 return ret; 508 } 509 510 511 static void block2mtd_exit(void) 512 { 513 struct list_head *pos, *next; 514 515 /* Remove the MTD devices */ 516 list_for_each_safe(pos, next, &blkmtd_device_list) { 517 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list); 518 block2mtd_sync(&dev->mtd); 519 mtd_device_unregister(&dev->mtd); 520 mutex_destroy(&dev->write_mutex); 521 pr_info("mtd%d: [%s] removed\n", 522 dev->mtd.index, 523 dev->mtd.name + strlen("block2mtd: ")); 524 list_del(&dev->list); 525 block2mtd_free_device(dev); 526 } 527 } 528 529 late_initcall(block2mtd_init); 530 module_exit(block2mtd_exit); 531 532 MODULE_LICENSE("GPL"); 533 MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>"); 534 MODULE_DESCRIPTION("Emulate an MTD using a block device"); 535