1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * Copyright (c) Nokia Corporation, 2007 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * 19 * Author: Artem Bityutskiy (Битюцкий Артём), 20 * Frank Haverkamp 21 */ 22 23 /* 24 * This file includes UBI initialization and building of UBI devices. 25 * 26 * When UBI is initialized, it attaches all the MTD devices specified as the 27 * module load parameters or the kernel boot parameters. If MTD devices were 28 * specified, UBI does not attach any MTD device, but it is possible to do 29 * later using the "UBI control device". 30 * 31 * At the moment we only attach UBI devices by scanning, which will become a 32 * bottleneck when flashes reach certain large size. Then one may improve UBI 33 * and add other methods, although it does not seem to be easy to do. 34 */ 35 36 #include <linux/err.h> 37 #include <linux/module.h> 38 #include <linux/moduleparam.h> 39 #include <linux/stringify.h> 40 #include <linux/namei.h> 41 #include <linux/stat.h> 42 #include <linux/miscdevice.h> 43 #include <linux/log2.h> 44 #include <linux/kthread.h> 45 #include <linux/reboot.h> 46 #include <linux/kernel.h> 47 #include "ubi.h" 48 49 /* Maximum length of the 'mtd=' parameter */ 50 #define MTD_PARAM_LEN_MAX 64 51 52 /** 53 * struct mtd_dev_param - MTD device parameter description data structure. 54 * @name: MTD character device node path, MTD device name, or MTD device number 55 * string 56 * @vid_hdr_offs: VID header offset 57 */ 58 struct mtd_dev_param { 59 char name[MTD_PARAM_LEN_MAX]; 60 int vid_hdr_offs; 61 }; 62 63 /* Numbers of elements set in the @mtd_dev_param array */ 64 static int __initdata mtd_devs; 65 66 /* MTD devices specification parameters */ 67 static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; 68 69 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 70 struct class *ubi_class; 71 72 /* Slab cache for wear-leveling entries */ 73 struct kmem_cache *ubi_wl_entry_slab; 74 75 /* UBI control character device */ 76 static struct miscdevice ubi_ctrl_cdev = { 77 .minor = MISC_DYNAMIC_MINOR, 78 .name = "ubi_ctrl", 79 .fops = &ubi_ctrl_cdev_operations, 80 }; 81 82 /* All UBI devices in system */ 83 static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 84 85 /* Serializes UBI devices creations and removals */ 86 DEFINE_MUTEX(ubi_devices_mutex); 87 88 /* Protects @ubi_devices and @ubi->ref_count */ 89 static DEFINE_SPINLOCK(ubi_devices_lock); 90 91 /* "Show" method for files in '/<sysfs>/class/ubi/' */ 92 static ssize_t ubi_version_show(struct class *class, struct class_attribute *attr, 93 char *buf) 94 { 95 return sprintf(buf, "%d\n", UBI_VERSION); 96 } 97 98 /* UBI version attribute ('/<sysfs>/class/ubi/version') */ 99 static struct class_attribute ubi_version = 100 __ATTR(version, S_IRUGO, ubi_version_show, NULL); 101 102 static ssize_t dev_attribute_show(struct device *dev, 103 struct device_attribute *attr, char *buf); 104 105 /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ 106 static struct device_attribute dev_eraseblock_size = 107 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); 108 static struct device_attribute dev_avail_eraseblocks = 109 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 110 static struct device_attribute dev_total_eraseblocks = 111 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 112 static struct device_attribute dev_volumes_count = 113 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); 114 static struct device_attribute dev_max_ec = 115 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); 116 static struct device_attribute dev_reserved_for_bad = 117 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); 118 static struct device_attribute dev_bad_peb_count = 119 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); 120 static struct device_attribute dev_max_vol_count = 121 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); 122 static struct device_attribute dev_min_io_size = 123 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 124 static struct device_attribute dev_bgt_enabled = 125 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 126 static struct device_attribute dev_mtd_num = 127 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); 128 129 /** 130 * ubi_volume_notify - send a volume change notification. 131 * @ubi: UBI device description object 132 * @vol: volume description object of the changed volume 133 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 134 * 135 * This is a helper function which notifies all subscribers about a volume 136 * change event (creation, removal, re-sizing, re-naming, updating). Returns 137 * zero in case of success and a negative error code in case of failure. 138 */ 139 int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) 140 { 141 struct ubi_notification nt; 142 143 ubi_do_get_device_info(ubi, &nt.di); 144 ubi_do_get_volume_info(ubi, vol, &nt.vi); 145 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); 146 } 147 148 /** 149 * ubi_notify_all - send a notification to all volumes. 150 * @ubi: UBI device description object 151 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 152 * @nb: the notifier to call 153 * 154 * This function walks all volumes of UBI device @ubi and sends the @ntype 155 * notification for each volume. If @nb is %NULL, then all registered notifiers 156 * are called, otherwise only the @nb notifier is called. Returns the number of 157 * sent notifications. 158 */ 159 int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) 160 { 161 struct ubi_notification nt; 162 int i, count = 0; 163 164 ubi_do_get_device_info(ubi, &nt.di); 165 166 mutex_lock(&ubi->device_mutex); 167 for (i = 0; i < ubi->vtbl_slots; i++) { 168 /* 169 * Since the @ubi->device is locked, and we are not going to 170 * change @ubi->volumes, we do not have to lock 171 * @ubi->volumes_lock. 172 */ 173 if (!ubi->volumes[i]) 174 continue; 175 176 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); 177 if (nb) 178 nb->notifier_call(nb, ntype, &nt); 179 else 180 blocking_notifier_call_chain(&ubi_notifiers, ntype, 181 &nt); 182 count += 1; 183 } 184 mutex_unlock(&ubi->device_mutex); 185 186 return count; 187 } 188 189 /** 190 * ubi_enumerate_volumes - send "add" notification for all existing volumes. 191 * @nb: the notifier to call 192 * 193 * This function walks all UBI devices and volumes and sends the 194 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all 195 * registered notifiers are called, otherwise only the @nb notifier is called. 196 * Returns the number of sent notifications. 197 */ 198 int ubi_enumerate_volumes(struct notifier_block *nb) 199 { 200 int i, count = 0; 201 202 /* 203 * Since the @ubi_devices_mutex is locked, and we are not going to 204 * change @ubi_devices, we do not have to lock @ubi_devices_lock. 205 */ 206 for (i = 0; i < UBI_MAX_DEVICES; i++) { 207 struct ubi_device *ubi = ubi_devices[i]; 208 209 if (!ubi) 210 continue; 211 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); 212 } 213 214 return count; 215 } 216 217 /** 218 * ubi_get_device - get UBI device. 219 * @ubi_num: UBI device number 220 * 221 * This function returns UBI device description object for UBI device number 222 * @ubi_num, or %NULL if the device does not exist. This function increases the 223 * device reference count to prevent removal of the device. In other words, the 224 * device cannot be removed if its reference count is not zero. 225 */ 226 struct ubi_device *ubi_get_device(int ubi_num) 227 { 228 struct ubi_device *ubi; 229 230 spin_lock(&ubi_devices_lock); 231 ubi = ubi_devices[ubi_num]; 232 if (ubi) { 233 ubi_assert(ubi->ref_count >= 0); 234 ubi->ref_count += 1; 235 get_device(&ubi->dev); 236 } 237 spin_unlock(&ubi_devices_lock); 238 239 return ubi; 240 } 241 242 /** 243 * ubi_put_device - drop an UBI device reference. 244 * @ubi: UBI device description object 245 */ 246 void ubi_put_device(struct ubi_device *ubi) 247 { 248 spin_lock(&ubi_devices_lock); 249 ubi->ref_count -= 1; 250 put_device(&ubi->dev); 251 spin_unlock(&ubi_devices_lock); 252 } 253 254 /** 255 * ubi_get_by_major - get UBI device by character device major number. 256 * @major: major number 257 * 258 * This function is similar to 'ubi_get_device()', but it searches the device 259 * by its major number. 260 */ 261 struct ubi_device *ubi_get_by_major(int major) 262 { 263 int i; 264 struct ubi_device *ubi; 265 266 spin_lock(&ubi_devices_lock); 267 for (i = 0; i < UBI_MAX_DEVICES; i++) { 268 ubi = ubi_devices[i]; 269 if (ubi && MAJOR(ubi->cdev.dev) == major) { 270 ubi_assert(ubi->ref_count >= 0); 271 ubi->ref_count += 1; 272 get_device(&ubi->dev); 273 spin_unlock(&ubi_devices_lock); 274 return ubi; 275 } 276 } 277 spin_unlock(&ubi_devices_lock); 278 279 return NULL; 280 } 281 282 /** 283 * ubi_major2num - get UBI device number by character device major number. 284 * @major: major number 285 * 286 * This function searches UBI device number object by its major number. If UBI 287 * device was not found, this function returns -ENODEV, otherwise the UBI device 288 * number is returned. 289 */ 290 int ubi_major2num(int major) 291 { 292 int i, ubi_num = -ENODEV; 293 294 spin_lock(&ubi_devices_lock); 295 for (i = 0; i < UBI_MAX_DEVICES; i++) { 296 struct ubi_device *ubi = ubi_devices[i]; 297 298 if (ubi && MAJOR(ubi->cdev.dev) == major) { 299 ubi_num = ubi->ubi_num; 300 break; 301 } 302 } 303 spin_unlock(&ubi_devices_lock); 304 305 return ubi_num; 306 } 307 308 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 309 static ssize_t dev_attribute_show(struct device *dev, 310 struct device_attribute *attr, char *buf) 311 { 312 ssize_t ret; 313 struct ubi_device *ubi; 314 315 /* 316 * The below code looks weird, but it actually makes sense. We get the 317 * UBI device reference from the contained 'struct ubi_device'. But it 318 * is unclear if the device was removed or not yet. Indeed, if the 319 * device was removed before we increased its reference count, 320 * 'ubi_get_device()' will return -ENODEV and we fail. 321 * 322 * Remember, 'struct ubi_device' is freed in the release function, so 323 * we still can use 'ubi->ubi_num'. 324 */ 325 ubi = container_of(dev, struct ubi_device, dev); 326 ubi = ubi_get_device(ubi->ubi_num); 327 if (!ubi) 328 return -ENODEV; 329 330 if (attr == &dev_eraseblock_size) 331 ret = sprintf(buf, "%d\n", ubi->leb_size); 332 else if (attr == &dev_avail_eraseblocks) 333 ret = sprintf(buf, "%d\n", ubi->avail_pebs); 334 else if (attr == &dev_total_eraseblocks) 335 ret = sprintf(buf, "%d\n", ubi->good_peb_count); 336 else if (attr == &dev_volumes_count) 337 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); 338 else if (attr == &dev_max_ec) 339 ret = sprintf(buf, "%d\n", ubi->max_ec); 340 else if (attr == &dev_reserved_for_bad) 341 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 342 else if (attr == &dev_bad_peb_count) 343 ret = sprintf(buf, "%d\n", ubi->bad_peb_count); 344 else if (attr == &dev_max_vol_count) 345 ret = sprintf(buf, "%d\n", ubi->vtbl_slots); 346 else if (attr == &dev_min_io_size) 347 ret = sprintf(buf, "%d\n", ubi->min_io_size); 348 else if (attr == &dev_bgt_enabled) 349 ret = sprintf(buf, "%d\n", ubi->thread_enabled); 350 else if (attr == &dev_mtd_num) 351 ret = sprintf(buf, "%d\n", ubi->mtd->index); 352 else 353 ret = -EINVAL; 354 355 ubi_put_device(ubi); 356 return ret; 357 } 358 359 static void dev_release(struct device *dev) 360 { 361 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); 362 363 kfree(ubi); 364 } 365 366 /** 367 * ubi_sysfs_init - initialize sysfs for an UBI device. 368 * @ubi: UBI device description object 369 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 370 * taken 371 * 372 * This function returns zero in case of success and a negative error code in 373 * case of failure. 374 */ 375 static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) 376 { 377 int err; 378 379 ubi->dev.release = dev_release; 380 ubi->dev.devt = ubi->cdev.dev; 381 ubi->dev.class = ubi_class; 382 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); 383 err = device_register(&ubi->dev); 384 if (err) 385 return err; 386 387 *ref = 1; 388 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 389 if (err) 390 return err; 391 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 392 if (err) 393 return err; 394 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 395 if (err) 396 return err; 397 err = device_create_file(&ubi->dev, &dev_volumes_count); 398 if (err) 399 return err; 400 err = device_create_file(&ubi->dev, &dev_max_ec); 401 if (err) 402 return err; 403 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 404 if (err) 405 return err; 406 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 407 if (err) 408 return err; 409 err = device_create_file(&ubi->dev, &dev_max_vol_count); 410 if (err) 411 return err; 412 err = device_create_file(&ubi->dev, &dev_min_io_size); 413 if (err) 414 return err; 415 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 416 if (err) 417 return err; 418 err = device_create_file(&ubi->dev, &dev_mtd_num); 419 return err; 420 } 421 422 /** 423 * ubi_sysfs_close - close sysfs for an UBI device. 424 * @ubi: UBI device description object 425 */ 426 static void ubi_sysfs_close(struct ubi_device *ubi) 427 { 428 device_remove_file(&ubi->dev, &dev_mtd_num); 429 device_remove_file(&ubi->dev, &dev_bgt_enabled); 430 device_remove_file(&ubi->dev, &dev_min_io_size); 431 device_remove_file(&ubi->dev, &dev_max_vol_count); 432 device_remove_file(&ubi->dev, &dev_bad_peb_count); 433 device_remove_file(&ubi->dev, &dev_reserved_for_bad); 434 device_remove_file(&ubi->dev, &dev_max_ec); 435 device_remove_file(&ubi->dev, &dev_volumes_count); 436 device_remove_file(&ubi->dev, &dev_total_eraseblocks); 437 device_remove_file(&ubi->dev, &dev_avail_eraseblocks); 438 device_remove_file(&ubi->dev, &dev_eraseblock_size); 439 device_unregister(&ubi->dev); 440 } 441 442 /** 443 * kill_volumes - destroy all user volumes. 444 * @ubi: UBI device description object 445 */ 446 static void kill_volumes(struct ubi_device *ubi) 447 { 448 int i; 449 450 for (i = 0; i < ubi->vtbl_slots; i++) 451 if (ubi->volumes[i]) 452 ubi_free_volume(ubi, ubi->volumes[i]); 453 } 454 455 /** 456 * uif_init - initialize user interfaces for an UBI device. 457 * @ubi: UBI device description object 458 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 459 * taken, otherwise set to %0 460 * 461 * This function initializes various user interfaces for an UBI device. If the 462 * initialization fails at an early stage, this function frees all the 463 * resources it allocated, returns an error, and @ref is set to %0. However, 464 * if the initialization fails after the UBI device was registered in the 465 * driver core subsystem, this function takes a reference to @ubi->dev, because 466 * otherwise the release function ('dev_release()') would free whole @ubi 467 * object. The @ref argument is set to %1 in this case. The caller has to put 468 * this reference. 469 * 470 * This function returns zero in case of success and a negative error code in 471 * case of failure. 472 */ 473 static int uif_init(struct ubi_device *ubi, int *ref) 474 { 475 int i, err; 476 dev_t dev; 477 478 *ref = 0; 479 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 480 481 /* 482 * Major numbers for the UBI character devices are allocated 483 * dynamically. Major numbers of volume character devices are 484 * equivalent to ones of the corresponding UBI character device. Minor 485 * numbers of UBI character devices are 0, while minor numbers of 486 * volume character devices start from 1. Thus, we allocate one major 487 * number and ubi->vtbl_slots + 1 minor numbers. 488 */ 489 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); 490 if (err) { 491 ubi_err("cannot register UBI character devices"); 492 return err; 493 } 494 495 ubi_assert(MINOR(dev) == 0); 496 cdev_init(&ubi->cdev, &ubi_cdev_operations); 497 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); 498 ubi->cdev.owner = THIS_MODULE; 499 500 err = cdev_add(&ubi->cdev, dev, 1); 501 if (err) { 502 ubi_err("cannot add character device"); 503 goto out_unreg; 504 } 505 506 err = ubi_sysfs_init(ubi, ref); 507 if (err) 508 goto out_sysfs; 509 510 for (i = 0; i < ubi->vtbl_slots; i++) 511 if (ubi->volumes[i]) { 512 err = ubi_add_volume(ubi, ubi->volumes[i]); 513 if (err) { 514 ubi_err("cannot add volume %d", i); 515 goto out_volumes; 516 } 517 } 518 519 return 0; 520 521 out_volumes: 522 kill_volumes(ubi); 523 out_sysfs: 524 if (*ref) 525 get_device(&ubi->dev); 526 ubi_sysfs_close(ubi); 527 cdev_del(&ubi->cdev); 528 out_unreg: 529 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 530 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 531 return err; 532 } 533 534 /** 535 * uif_close - close user interfaces for an UBI device. 536 * @ubi: UBI device description object 537 * 538 * Note, since this function un-registers UBI volume device objects (@vol->dev), 539 * the memory allocated voe the volumes is freed as well (in the release 540 * function). 541 */ 542 static void uif_close(struct ubi_device *ubi) 543 { 544 kill_volumes(ubi); 545 ubi_sysfs_close(ubi); 546 cdev_del(&ubi->cdev); 547 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 548 } 549 550 /** 551 * free_internal_volumes - free internal volumes. 552 * @ubi: UBI device description object 553 */ 554 static void free_internal_volumes(struct ubi_device *ubi) 555 { 556 int i; 557 558 for (i = ubi->vtbl_slots; 559 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 560 kfree(ubi->volumes[i]->eba_tbl); 561 kfree(ubi->volumes[i]); 562 } 563 } 564 565 /** 566 * attach_by_scanning - attach an MTD device using scanning method. 567 * @ubi: UBI device descriptor 568 * 569 * This function returns zero in case of success and a negative error code in 570 * case of failure. 571 * 572 * Note, currently this is the only method to attach UBI devices. Hopefully in 573 * the future we'll have more scalable attaching methods and avoid full media 574 * scanning. But even in this case scanning will be needed as a fall-back 575 * attaching method if there are some on-flash table corruptions. 576 */ 577 static int attach_by_scanning(struct ubi_device *ubi) 578 { 579 int err; 580 struct ubi_scan_info *si; 581 582 si = ubi_scan(ubi); 583 if (IS_ERR(si)) 584 return PTR_ERR(si); 585 586 ubi->bad_peb_count = si->bad_peb_count; 587 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; 588 ubi->max_ec = si->max_ec; 589 ubi->mean_ec = si->mean_ec; 590 591 err = ubi_read_volume_table(ubi, si); 592 if (err) 593 goto out_si; 594 595 err = ubi_wl_init_scan(ubi, si); 596 if (err) 597 goto out_vtbl; 598 599 err = ubi_eba_init_scan(ubi, si); 600 if (err) 601 goto out_wl; 602 603 ubi_scan_destroy_si(si); 604 return 0; 605 606 out_wl: 607 ubi_wl_close(ubi); 608 out_vtbl: 609 free_internal_volumes(ubi); 610 vfree(ubi->vtbl); 611 out_si: 612 ubi_scan_destroy_si(si); 613 return err; 614 } 615 616 /** 617 * io_init - initialize I/O sub-system for a given UBI device. 618 * @ubi: UBI device description object 619 * 620 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are 621 * assumed: 622 * o EC header is always at offset zero - this cannot be changed; 623 * o VID header starts just after the EC header at the closest address 624 * aligned to @io->hdrs_min_io_size; 625 * o data starts just after the VID header at the closest address aligned to 626 * @io->min_io_size 627 * 628 * This function returns zero in case of success and a negative error code in 629 * case of failure. 630 */ 631 static int io_init(struct ubi_device *ubi) 632 { 633 if (ubi->mtd->numeraseregions != 0) { 634 /* 635 * Some flashes have several erase regions. Different regions 636 * may have different eraseblock size and other 637 * characteristics. It looks like mostly multi-region flashes 638 * have one "main" region and one or more small regions to 639 * store boot loader code or boot parameters or whatever. I 640 * guess we should just pick the largest region. But this is 641 * not implemented. 642 */ 643 ubi_err("multiple regions, not implemented"); 644 return -EINVAL; 645 } 646 647 if (ubi->vid_hdr_offset < 0) 648 return -EINVAL; 649 650 /* 651 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 652 * physical eraseblocks maximum. 653 */ 654 655 ubi->peb_size = ubi->mtd->erasesize; 656 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); 657 ubi->flash_size = ubi->mtd->size; 658 659 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) 660 ubi->bad_allowed = 1; 661 662 if (ubi->mtd->type == MTD_NORFLASH) { 663 ubi_assert(ubi->mtd->writesize == 1); 664 ubi->nor_flash = 1; 665 } 666 667 ubi->min_io_size = ubi->mtd->writesize; 668 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 669 670 /* 671 * Make sure minimal I/O unit is power of 2. Note, there is no 672 * fundamental reason for this assumption. It is just an optimization 673 * which allows us to avoid costly division operations. 674 */ 675 if (!is_power_of_2(ubi->min_io_size)) { 676 ubi_err("min. I/O unit (%d) is not power of 2", 677 ubi->min_io_size); 678 return -EINVAL; 679 } 680 681 ubi_assert(ubi->hdrs_min_io_size > 0); 682 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); 683 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); 684 685 /* Calculate default aligned sizes of EC and VID headers */ 686 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); 687 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); 688 689 dbg_msg("min_io_size %d", ubi->min_io_size); 690 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); 691 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); 692 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); 693 694 if (ubi->vid_hdr_offset == 0) 695 /* Default offset */ 696 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = 697 ubi->ec_hdr_alsize; 698 else { 699 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & 700 ~(ubi->hdrs_min_io_size - 1); 701 ubi->vid_hdr_shift = ubi->vid_hdr_offset - 702 ubi->vid_hdr_aloffset; 703 } 704 705 /* Similar for the data offset */ 706 ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; 707 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 708 709 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 710 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 711 dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); 712 dbg_msg("leb_start %d", ubi->leb_start); 713 714 /* The shift must be aligned to 32-bit boundary */ 715 if (ubi->vid_hdr_shift % 4) { 716 ubi_err("unaligned VID header shift %d", 717 ubi->vid_hdr_shift); 718 return -EINVAL; 719 } 720 721 /* Check sanity */ 722 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || 723 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || 724 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || 725 ubi->leb_start & (ubi->min_io_size - 1)) { 726 ubi_err("bad VID header (%d) or data offsets (%d)", 727 ubi->vid_hdr_offset, ubi->leb_start); 728 return -EINVAL; 729 } 730 731 /* 732 * Set maximum amount of physical erroneous eraseblocks to be 10%. 733 * Erroneous PEB are those which have read errors. 734 */ 735 ubi->max_erroneous = ubi->peb_count / 10; 736 if (ubi->max_erroneous < 16) 737 ubi->max_erroneous = 16; 738 dbg_msg("max_erroneous %d", ubi->max_erroneous); 739 740 /* 741 * It may happen that EC and VID headers are situated in one minimal 742 * I/O unit. In this case we can only accept this UBI image in 743 * read-only mode. 744 */ 745 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { 746 ubi_warn("EC and VID headers are in the same minimal I/O unit, " 747 "switch to read-only mode"); 748 ubi->ro_mode = 1; 749 } 750 751 ubi->leb_size = ubi->peb_size - ubi->leb_start; 752 753 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { 754 ubi_msg("MTD device %d is write-protected, attach in " 755 "read-only mode", ubi->mtd->index); 756 ubi->ro_mode = 1; 757 } 758 759 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 760 ubi->peb_size, ubi->peb_size >> 10); 761 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); 762 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); 763 if (ubi->hdrs_min_io_size != ubi->min_io_size) 764 ubi_msg("sub-page size: %d", 765 ubi->hdrs_min_io_size); 766 ubi_msg("VID header offset: %d (aligned %d)", 767 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); 768 ubi_msg("data offset: %d", ubi->leb_start); 769 770 /* 771 * Note, ideally, we have to initialize ubi->bad_peb_count here. But 772 * unfortunately, MTD does not provide this information. We should loop 773 * over all physical eraseblocks and invoke mtd->block_is_bad() for 774 * each physical eraseblock. So, we skip ubi->bad_peb_count 775 * uninitialized and initialize it after scanning. 776 */ 777 778 return 0; 779 } 780 781 /** 782 * autoresize - re-size the volume which has the "auto-resize" flag set. 783 * @ubi: UBI device description object 784 * @vol_id: ID of the volume to re-size 785 * 786 * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in 787 * the volume table to the largest possible size. See comments in ubi-header.h 788 * for more description of the flag. Returns zero in case of success and a 789 * negative error code in case of failure. 790 */ 791 static int autoresize(struct ubi_device *ubi, int vol_id) 792 { 793 struct ubi_volume_desc desc; 794 struct ubi_volume *vol = ubi->volumes[vol_id]; 795 int err, old_reserved_pebs = vol->reserved_pebs; 796 797 /* 798 * Clear the auto-resize flag in the volume in-memory copy of the 799 * volume table, and 'ubi_resize_volume()' will propagate this change 800 * to the flash. 801 */ 802 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; 803 804 if (ubi->avail_pebs == 0) { 805 struct ubi_vtbl_record vtbl_rec; 806 807 /* 808 * No available PEBs to re-size the volume, clear the flag on 809 * flash and exit. 810 */ 811 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], 812 sizeof(struct ubi_vtbl_record)); 813 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 814 if (err) 815 ubi_err("cannot clean auto-resize flag for volume %d", 816 vol_id); 817 } else { 818 desc.vol = vol; 819 err = ubi_resize_volume(&desc, 820 old_reserved_pebs + ubi->avail_pebs); 821 if (err) 822 ubi_err("cannot auto-resize volume %d", vol_id); 823 } 824 825 if (err) 826 return err; 827 828 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, 829 vol->name, old_reserved_pebs, vol->reserved_pebs); 830 return 0; 831 } 832 833 /** 834 * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot. 835 * @n: reboot notifier object 836 * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF 837 * @cmd: pointer to command string for RESTART2 838 * 839 * This function stops the UBI background thread so that the flash device 840 * remains quiescent when Linux restarts the system. Any queued work will be 841 * discarded, but this function will block until do_work() finishes if an 842 * operation is already in progress. 843 * 844 * This function solves a real-life problem observed on NOR flashes when an 845 * PEB erase operation starts, then the system is rebooted before the erase is 846 * finishes, and the boot loader gets confused and dies. So we prefer to finish 847 * the ongoing operation before rebooting. 848 */ 849 static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state, 850 void *cmd) 851 { 852 struct ubi_device *ubi; 853 854 ubi = container_of(n, struct ubi_device, reboot_notifier); 855 if (ubi->bgt_thread) 856 kthread_stop(ubi->bgt_thread); 857 ubi_sync(ubi->ubi_num); 858 return NOTIFY_DONE; 859 } 860 861 /** 862 * ubi_attach_mtd_dev - attach an MTD device. 863 * @mtd: MTD device description object 864 * @ubi_num: number to assign to the new UBI device 865 * @vid_hdr_offset: VID header offset 866 * 867 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 868 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 869 * which case this function finds a vacant device number and assigns it 870 * automatically. Returns the new UBI device number in case of success and a 871 * negative error code in case of failure. 872 * 873 * Note, the invocations of this function has to be serialized by the 874 * @ubi_devices_mutex. 875 */ 876 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 877 { 878 struct ubi_device *ubi; 879 int i, err, ref = 0; 880 881 /* 882 * Check if we already have the same MTD device attached. 883 * 884 * Note, this function assumes that UBI devices creations and deletions 885 * are serialized, so it does not take the &ubi_devices_lock. 886 */ 887 for (i = 0; i < UBI_MAX_DEVICES; i++) { 888 ubi = ubi_devices[i]; 889 if (ubi && mtd->index == ubi->mtd->index) { 890 dbg_err("mtd%d is already attached to ubi%d", 891 mtd->index, i); 892 return -EEXIST; 893 } 894 } 895 896 /* 897 * Make sure this MTD device is not emulated on top of an UBI volume 898 * already. Well, generally this recursion works fine, but there are 899 * different problems like the UBI module takes a reference to itself 900 * by attaching (and thus, opening) the emulated MTD device. This 901 * results in inability to unload the module. And in general it makes 902 * no sense to attach emulated MTD devices, so we prohibit this. 903 */ 904 if (mtd->type == MTD_UBIVOLUME) { 905 ubi_err("refuse attaching mtd%d - it is already emulated on " 906 "top of UBI", mtd->index); 907 return -EINVAL; 908 } 909 910 if (ubi_num == UBI_DEV_NUM_AUTO) { 911 /* Search for an empty slot in the @ubi_devices array */ 912 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) 913 if (!ubi_devices[ubi_num]) 914 break; 915 if (ubi_num == UBI_MAX_DEVICES) { 916 dbg_err("only %d UBI devices may be created", 917 UBI_MAX_DEVICES); 918 return -ENFILE; 919 } 920 } else { 921 if (ubi_num >= UBI_MAX_DEVICES) 922 return -EINVAL; 923 924 /* Make sure ubi_num is not busy */ 925 if (ubi_devices[ubi_num]) { 926 dbg_err("ubi%d already exists", ubi_num); 927 return -EEXIST; 928 } 929 } 930 931 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); 932 if (!ubi) 933 return -ENOMEM; 934 935 ubi->mtd = mtd; 936 ubi->ubi_num = ubi_num; 937 ubi->vid_hdr_offset = vid_hdr_offset; 938 ubi->autoresize_vol_id = -1; 939 940 mutex_init(&ubi->buf_mutex); 941 mutex_init(&ubi->ckvol_mutex); 942 mutex_init(&ubi->device_mutex); 943 spin_lock_init(&ubi->volumes_lock); 944 945 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 946 947 err = io_init(ubi); 948 if (err) 949 goto out_free; 950 951 err = -ENOMEM; 952 ubi->peb_buf1 = vmalloc(ubi->peb_size); 953 if (!ubi->peb_buf1) 954 goto out_free; 955 956 ubi->peb_buf2 = vmalloc(ubi->peb_size); 957 if (!ubi->peb_buf2) 958 goto out_free; 959 960 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 961 mutex_init(&ubi->dbg_buf_mutex); 962 ubi->dbg_peb_buf = vmalloc(ubi->peb_size); 963 if (!ubi->dbg_peb_buf) 964 goto out_free; 965 #endif 966 967 err = attach_by_scanning(ubi); 968 if (err) { 969 dbg_err("failed to attach by scanning, error %d", err); 970 goto out_free; 971 } 972 973 if (ubi->autoresize_vol_id != -1) { 974 err = autoresize(ubi, ubi->autoresize_vol_id); 975 if (err) 976 goto out_detach; 977 } 978 979 err = uif_init(ubi, &ref); 980 if (err) 981 goto out_detach; 982 983 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 984 if (IS_ERR(ubi->bgt_thread)) { 985 err = PTR_ERR(ubi->bgt_thread); 986 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 987 err); 988 goto out_uif; 989 } 990 991 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); 992 ubi_msg("MTD device name: \"%s\"", mtd->name); 993 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 994 ubi_msg("number of good PEBs: %d", ubi->good_peb_count); 995 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); 996 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); 997 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); 998 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); 999 ubi_msg("number of user volumes: %d", 1000 ubi->vol_count - UBI_INT_VOL_COUNT); 1001 ubi_msg("available PEBs: %d", ubi->avail_pebs); 1002 ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); 1003 ubi_msg("number of PEBs reserved for bad PEB handling: %d", 1004 ubi->beb_rsvd_pebs); 1005 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 1006 ubi_msg("image sequence number: %d", ubi->image_seq); 1007 1008 /* 1009 * The below lock makes sure we do not race with 'ubi_thread()' which 1010 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. 1011 */ 1012 spin_lock(&ubi->wl_lock); 1013 if (!DBG_DISABLE_BGT) 1014 ubi->thread_enabled = 1; 1015 wake_up_process(ubi->bgt_thread); 1016 spin_unlock(&ubi->wl_lock); 1017 1018 /* Flash device priority is 0 - UBI needs to shut down first */ 1019 ubi->reboot_notifier.priority = 1; 1020 ubi->reboot_notifier.notifier_call = ubi_reboot_notifier; 1021 register_reboot_notifier(&ubi->reboot_notifier); 1022 1023 ubi_devices[ubi_num] = ubi; 1024 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); 1025 return ubi_num; 1026 1027 out_uif: 1028 uif_close(ubi); 1029 out_detach: 1030 ubi_wl_close(ubi); 1031 free_internal_volumes(ubi); 1032 vfree(ubi->vtbl); 1033 out_free: 1034 vfree(ubi->peb_buf1); 1035 vfree(ubi->peb_buf2); 1036 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1037 vfree(ubi->dbg_peb_buf); 1038 #endif 1039 if (ref) 1040 put_device(&ubi->dev); 1041 else 1042 kfree(ubi); 1043 return err; 1044 } 1045 1046 /** 1047 * ubi_detach_mtd_dev - detach an MTD device. 1048 * @ubi_num: UBI device number to detach from 1049 * @anyway: detach MTD even if device reference count is not zero 1050 * 1051 * This function destroys an UBI device number @ubi_num and detaches the 1052 * underlying MTD device. Returns zero in case of success and %-EBUSY if the 1053 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not 1054 * exist. 1055 * 1056 * Note, the invocations of this function has to be serialized by the 1057 * @ubi_devices_mutex. 1058 */ 1059 int ubi_detach_mtd_dev(int ubi_num, int anyway) 1060 { 1061 struct ubi_device *ubi; 1062 1063 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 1064 return -EINVAL; 1065 1066 ubi = ubi_get_device(ubi_num); 1067 if (!ubi) 1068 return -EINVAL; 1069 1070 spin_lock(&ubi_devices_lock); 1071 put_device(&ubi->dev); 1072 ubi->ref_count -= 1; 1073 if (ubi->ref_count) { 1074 if (!anyway) { 1075 spin_unlock(&ubi_devices_lock); 1076 return -EBUSY; 1077 } 1078 /* This may only happen if there is a bug */ 1079 ubi_err("%s reference count %d, destroy anyway", 1080 ubi->ubi_name, ubi->ref_count); 1081 } 1082 ubi_devices[ubi_num] = NULL; 1083 spin_unlock(&ubi_devices_lock); 1084 1085 ubi_assert(ubi_num == ubi->ubi_num); 1086 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); 1087 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 1088 1089 /* 1090 * Before freeing anything, we have to stop the background thread to 1091 * prevent it from doing anything on this device while we are freeing. 1092 */ 1093 unregister_reboot_notifier(&ubi->reboot_notifier); 1094 if (ubi->bgt_thread) 1095 kthread_stop(ubi->bgt_thread); 1096 1097 /* 1098 * Get a reference to the device in order to prevent 'dev_release()' 1099 * from freeing the @ubi object. 1100 */ 1101 get_device(&ubi->dev); 1102 1103 uif_close(ubi); 1104 ubi_wl_close(ubi); 1105 free_internal_volumes(ubi); 1106 vfree(ubi->vtbl); 1107 put_mtd_device(ubi->mtd); 1108 vfree(ubi->peb_buf1); 1109 vfree(ubi->peb_buf2); 1110 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1111 vfree(ubi->dbg_peb_buf); 1112 #endif 1113 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1114 put_device(&ubi->dev); 1115 return 0; 1116 } 1117 1118 /** 1119 * open_mtd_by_chdev - open an MTD device by its character device node path. 1120 * @mtd_dev: MTD character device node path 1121 * 1122 * This helper function opens an MTD device by its character node device path. 1123 * Returns MTD device description object in case of success and a negative 1124 * error code in case of failure. 1125 */ 1126 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) 1127 { 1128 int err, major, minor, mode; 1129 struct path path; 1130 1131 /* Probably this is an MTD character device node path */ 1132 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); 1133 if (err) 1134 return ERR_PTR(err); 1135 1136 /* MTD device number is defined by the major / minor numbers */ 1137 major = imajor(path.dentry->d_inode); 1138 minor = iminor(path.dentry->d_inode); 1139 mode = path.dentry->d_inode->i_mode; 1140 path_put(&path); 1141 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode)) 1142 return ERR_PTR(-EINVAL); 1143 1144 if (minor & 1) 1145 /* 1146 * Just do not think the "/dev/mtdrX" devices support is need, 1147 * so do not support them to avoid doing extra work. 1148 */ 1149 return ERR_PTR(-EINVAL); 1150 1151 return get_mtd_device(NULL, minor / 2); 1152 } 1153 1154 /** 1155 * open_mtd_device - open MTD device by name, character device path, or number. 1156 * @mtd_dev: name, character device node path, or MTD device device number 1157 * 1158 * This function tries to open and MTD device described by @mtd_dev string, 1159 * which is first treated as ASCII MTD device number, and if it is not true, it 1160 * is treated as MTD device name, and if that is also not true, it is treated 1161 * as MTD character device node path. Returns MTD device description object in 1162 * case of success and a negative error code in case of failure. 1163 */ 1164 static struct mtd_info * __init open_mtd_device(const char *mtd_dev) 1165 { 1166 struct mtd_info *mtd; 1167 int mtd_num; 1168 char *endp; 1169 1170 mtd_num = simple_strtoul(mtd_dev, &endp, 0); 1171 if (*endp != '\0' || mtd_dev == endp) { 1172 /* 1173 * This does not look like an ASCII integer, probably this is 1174 * MTD device name. 1175 */ 1176 mtd = get_mtd_device_nm(mtd_dev); 1177 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV) 1178 /* Probably this is an MTD character device node path */ 1179 mtd = open_mtd_by_chdev(mtd_dev); 1180 } else 1181 mtd = get_mtd_device(NULL, mtd_num); 1182 1183 return mtd; 1184 } 1185 1186 static int __init ubi_init(void) 1187 { 1188 int err, i, k; 1189 1190 /* Ensure that EC and VID headers have correct size */ 1191 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); 1192 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 1193 1194 if (mtd_devs > UBI_MAX_DEVICES) { 1195 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); 1196 return -EINVAL; 1197 } 1198 1199 /* Create base sysfs directory and sysfs files */ 1200 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 1201 if (IS_ERR(ubi_class)) { 1202 err = PTR_ERR(ubi_class); 1203 ubi_err("cannot create UBI class"); 1204 goto out; 1205 } 1206 1207 err = class_create_file(ubi_class, &ubi_version); 1208 if (err) { 1209 ubi_err("cannot create sysfs file"); 1210 goto out_class; 1211 } 1212 1213 err = misc_register(&ubi_ctrl_cdev); 1214 if (err) { 1215 ubi_err("cannot register device"); 1216 goto out_version; 1217 } 1218 1219 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", 1220 sizeof(struct ubi_wl_entry), 1221 0, 0, NULL); 1222 if (!ubi_wl_entry_slab) 1223 goto out_dev_unreg; 1224 1225 /* Attach MTD devices */ 1226 for (i = 0; i < mtd_devs; i++) { 1227 struct mtd_dev_param *p = &mtd_dev_param[i]; 1228 struct mtd_info *mtd; 1229 1230 cond_resched(); 1231 1232 mtd = open_mtd_device(p->name); 1233 if (IS_ERR(mtd)) { 1234 err = PTR_ERR(mtd); 1235 goto out_detach; 1236 } 1237 1238 mutex_lock(&ubi_devices_mutex); 1239 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 1240 p->vid_hdr_offs); 1241 mutex_unlock(&ubi_devices_mutex); 1242 if (err < 0) { 1243 put_mtd_device(mtd); 1244 ubi_err("cannot attach mtd%d", mtd->index); 1245 goto out_detach; 1246 } 1247 } 1248 1249 return 0; 1250 1251 out_detach: 1252 for (k = 0; k < i; k++) 1253 if (ubi_devices[k]) { 1254 mutex_lock(&ubi_devices_mutex); 1255 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); 1256 mutex_unlock(&ubi_devices_mutex); 1257 } 1258 kmem_cache_destroy(ubi_wl_entry_slab); 1259 out_dev_unreg: 1260 misc_deregister(&ubi_ctrl_cdev); 1261 out_version: 1262 class_remove_file(ubi_class, &ubi_version); 1263 out_class: 1264 class_destroy(ubi_class); 1265 out: 1266 ubi_err("UBI error: cannot initialize UBI, error %d", err); 1267 return err; 1268 } 1269 module_init(ubi_init); 1270 1271 static void __exit ubi_exit(void) 1272 { 1273 int i; 1274 1275 for (i = 0; i < UBI_MAX_DEVICES; i++) 1276 if (ubi_devices[i]) { 1277 mutex_lock(&ubi_devices_mutex); 1278 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); 1279 mutex_unlock(&ubi_devices_mutex); 1280 } 1281 kmem_cache_destroy(ubi_wl_entry_slab); 1282 misc_deregister(&ubi_ctrl_cdev); 1283 class_remove_file(ubi_class, &ubi_version); 1284 class_destroy(ubi_class); 1285 } 1286 module_exit(ubi_exit); 1287 1288 /** 1289 * bytes_str_to_int - convert a number of bytes string into an integer. 1290 * @str: the string to convert 1291 * 1292 * This function returns positive resulting integer in case of success and a 1293 * negative error code in case of failure. 1294 */ 1295 static int __init bytes_str_to_int(const char *str) 1296 { 1297 char *endp; 1298 unsigned long result; 1299 1300 result = simple_strtoul(str, &endp, 0); 1301 if (str == endp || result >= INT_MAX) { 1302 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1303 str); 1304 return -EINVAL; 1305 } 1306 1307 switch (*endp) { 1308 case 'G': 1309 result *= 1024; 1310 case 'M': 1311 result *= 1024; 1312 case 'K': 1313 result *= 1024; 1314 if (endp[1] == 'i' && endp[2] == 'B') 1315 endp += 2; 1316 case '\0': 1317 break; 1318 default: 1319 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1320 str); 1321 return -EINVAL; 1322 } 1323 1324 return result; 1325 } 1326 1327 /** 1328 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. 1329 * @val: the parameter value to parse 1330 * @kp: not used 1331 * 1332 * This function returns zero in case of success and a negative error code in 1333 * case of error. 1334 */ 1335 static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) 1336 { 1337 int i, len; 1338 struct mtd_dev_param *p; 1339 char buf[MTD_PARAM_LEN_MAX]; 1340 char *pbuf = &buf[0]; 1341 char *tokens[2] = {NULL, NULL}; 1342 1343 if (!val) 1344 return -EINVAL; 1345 1346 if (mtd_devs == UBI_MAX_DEVICES) { 1347 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", 1348 UBI_MAX_DEVICES); 1349 return -EINVAL; 1350 } 1351 1352 len = strnlen(val, MTD_PARAM_LEN_MAX); 1353 if (len == MTD_PARAM_LEN_MAX) { 1354 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " 1355 "max. is %d\n", val, MTD_PARAM_LEN_MAX); 1356 return -EINVAL; 1357 } 1358 1359 if (len == 0) { 1360 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " 1361 "ignored\n"); 1362 return 0; 1363 } 1364 1365 strcpy(buf, val); 1366 1367 /* Get rid of the final newline */ 1368 if (buf[len - 1] == '\n') 1369 buf[len - 1] = '\0'; 1370 1371 for (i = 0; i < 2; i++) 1372 tokens[i] = strsep(&pbuf, ","); 1373 1374 if (pbuf) { 1375 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", 1376 val); 1377 return -EINVAL; 1378 } 1379 1380 p = &mtd_dev_param[mtd_devs]; 1381 strcpy(&p->name[0], tokens[0]); 1382 1383 if (tokens[1]) 1384 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1385 1386 if (p->vid_hdr_offs < 0) 1387 return p->vid_hdr_offs; 1388 1389 mtd_devs += 1; 1390 return 0; 1391 } 1392 1393 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1394 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1395 "mtd=<name|num|path>[,<vid_hdr_offs>].\n" 1396 "Multiple \"mtd\" parameters may be specified.\n" 1397 "MTD devices may be specified by their number, name, or " 1398 "path to the MTD character device node.\n" 1399 "Optional \"vid_hdr_offs\" parameter specifies UBI VID " 1400 "header position to be used by UBI.\n" 1401 "Example 1: mtd=/dev/mtd0 - attach MTD device " 1402 "/dev/mtd0.\n" 1403 "Example 2: mtd=content,1984 mtd=4 - attach MTD device " 1404 "with name \"content\" using VID header offset 1984, and " 1405 "MTD device number 4 with default VID header offset."); 1406 1407 MODULE_VERSION(__stringify(UBI_VERSION)); 1408 MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1409 MODULE_AUTHOR("Artem Bityutskiy"); 1410 MODULE_LICENSE("GPL"); 1411