xref: /linux/drivers/mtd/ubi/block.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Copyright (c) 2014 Ezequiel Garcia
3  * Copyright (c) 2011 Free Electrons
4  *
5  * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
6  *   Copyright (c) International Business Machines Corp., 2006
7  *   Copyright (c) Nokia Corporation, 2007
8  *   Authors: Artem Bityutskiy, Frank Haverkamp
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, version 2.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17  * the GNU General Public License for more details.
18  */
19 
20 /*
21  * Read-only block devices on top of UBI volumes
22  *
23  * A simple implementation to allow a block device to be layered on top of a
24  * UBI volume. The implementation is provided by creating a static 1-to-1
25  * mapping between the block device and the UBI volume.
26  *
27  * The addressed byte is obtained from the addressed block sector, which is
28  * mapped linearly into the corresponding LEB:
29  *
30  *   LEB number = addressed byte / LEB size
31  *
32  * This feature is compiled in the UBI core, and adds a 'block' parameter
33  * to allow early creation of block devices on top of UBI volumes. Runtime
34  * block creation/removal for UBI volumes is provided through two UBI ioctls:
35  * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
36  */
37 
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/err.h>
41 #include <linux/kernel.h>
42 #include <linux/list.h>
43 #include <linux/mutex.h>
44 #include <linux/slab.h>
45 #include <linux/mtd/ubi.h>
46 #include <linux/workqueue.h>
47 #include <linux/blkdev.h>
48 #include <linux/blk-mq.h>
49 #include <linux/hdreg.h>
50 #include <linux/scatterlist.h>
51 #include <linux/idr.h>
52 #include <asm/div64.h>
53 
54 #include "ubi-media.h"
55 #include "ubi.h"
56 
57 /* Maximum number of supported devices */
58 #define UBIBLOCK_MAX_DEVICES 32
59 
60 /* Maximum length of the 'block=' parameter */
61 #define UBIBLOCK_PARAM_LEN 63
62 
63 /* Maximum number of comma-separated items in the 'block=' parameter */
64 #define UBIBLOCK_PARAM_COUNT 2
65 
66 struct ubiblock_param {
67 	int ubi_num;
68 	int vol_id;
69 	char name[UBIBLOCK_PARAM_LEN+1];
70 };
71 
72 struct ubiblock_pdu {
73 	struct work_struct work;
74 	struct ubi_sgl usgl;
75 };
76 
77 /* Numbers of elements set in the @ubiblock_param array */
78 static int ubiblock_devs __initdata;
79 
80 /* MTD devices specification parameters */
81 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
82 
83 struct ubiblock {
84 	struct ubi_volume_desc *desc;
85 	int ubi_num;
86 	int vol_id;
87 	int refcnt;
88 	int leb_size;
89 
90 	struct gendisk *gd;
91 	struct request_queue *rq;
92 
93 	struct workqueue_struct *wq;
94 
95 	struct mutex dev_mutex;
96 	struct list_head list;
97 	struct blk_mq_tag_set tag_set;
98 };
99 
100 /* Linked list of all ubiblock instances */
101 static LIST_HEAD(ubiblock_devices);
102 static DEFINE_MUTEX(devices_mutex);
103 static int ubiblock_major;
104 
105 static int __init ubiblock_set_param(const char *val,
106 				     const struct kernel_param *kp)
107 {
108 	int i, ret;
109 	size_t len;
110 	struct ubiblock_param *param;
111 	char buf[UBIBLOCK_PARAM_LEN];
112 	char *pbuf = &buf[0];
113 	char *tokens[UBIBLOCK_PARAM_COUNT];
114 
115 	if (!val)
116 		return -EINVAL;
117 
118 	len = strnlen(val, UBIBLOCK_PARAM_LEN);
119 	if (len == 0) {
120 		pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
121 		return 0;
122 	}
123 
124 	if (len == UBIBLOCK_PARAM_LEN) {
125 		pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
126 		       val, UBIBLOCK_PARAM_LEN);
127 		return -EINVAL;
128 	}
129 
130 	strcpy(buf, val);
131 
132 	/* Get rid of the final newline */
133 	if (buf[len - 1] == '\n')
134 		buf[len - 1] = '\0';
135 
136 	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
137 		tokens[i] = strsep(&pbuf, ",");
138 
139 	param = &ubiblock_param[ubiblock_devs];
140 	if (tokens[1]) {
141 		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
142 		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
143 		if (ret < 0)
144 			return -EINVAL;
145 
146 		/* Second param can be a number or a name */
147 		ret = kstrtoint(tokens[1], 10, &param->vol_id);
148 		if (ret < 0) {
149 			param->vol_id = -1;
150 			strcpy(param->name, tokens[1]);
151 		}
152 
153 	} else {
154 		/* One parameter: must be device path */
155 		strcpy(param->name, tokens[0]);
156 		param->ubi_num = -1;
157 		param->vol_id = -1;
158 	}
159 
160 	ubiblock_devs++;
161 
162 	return 0;
163 }
164 
165 static const struct kernel_param_ops ubiblock_param_ops = {
166 	.set    = ubiblock_set_param,
167 };
168 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
169 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
170 			"Multiple \"block\" parameters may be specified.\n"
171 			"UBI volumes may be specified by their number, name, or path to the device node.\n"
172 			"Examples\n"
173 			"Using the UBI volume path:\n"
174 			"ubi.block=/dev/ubi0_0\n"
175 			"Using the UBI device, and the volume name:\n"
176 			"ubi.block=0,rootfs\n"
177 			"Using both UBI device number and UBI volume number:\n"
178 			"ubi.block=0,0\n");
179 
180 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
181 {
182 	struct ubiblock *dev;
183 
184 	list_for_each_entry(dev, &ubiblock_devices, list)
185 		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
186 			return dev;
187 	return NULL;
188 }
189 
190 static int ubiblock_read(struct ubiblock_pdu *pdu)
191 {
192 	int ret, leb, offset, bytes_left, to_read;
193 	u64 pos;
194 	struct request *req = blk_mq_rq_from_pdu(pdu);
195 	struct ubiblock *dev = req->q->queuedata;
196 
197 	to_read = blk_rq_bytes(req);
198 	pos = blk_rq_pos(req) << 9;
199 
200 	/* Get LEB:offset address to read from */
201 	offset = do_div(pos, dev->leb_size);
202 	leb = pos;
203 	bytes_left = to_read;
204 
205 	while (bytes_left) {
206 		/*
207 		 * We can only read one LEB at a time. Therefore if the read
208 		 * length is larger than one LEB size, we split the operation.
209 		 */
210 		if (offset + to_read > dev->leb_size)
211 			to_read = dev->leb_size - offset;
212 
213 		ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
214 		if (ret < 0)
215 			return ret;
216 
217 		bytes_left -= to_read;
218 		to_read = bytes_left;
219 		leb += 1;
220 		offset = 0;
221 	}
222 	return 0;
223 }
224 
225 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
226 {
227 	struct ubiblock *dev = bdev->bd_disk->private_data;
228 	int ret;
229 
230 	mutex_lock(&dev->dev_mutex);
231 	if (dev->refcnt > 0) {
232 		/*
233 		 * The volume is already open, just increase the reference
234 		 * counter.
235 		 */
236 		goto out_done;
237 	}
238 
239 	/*
240 	 * We want users to be aware they should only mount us as read-only.
241 	 * It's just a paranoid check, as write requests will get rejected
242 	 * in any case.
243 	 */
244 	if (mode & FMODE_WRITE) {
245 		ret = -EPERM;
246 		goto out_unlock;
247 	}
248 
249 	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
250 	if (IS_ERR(dev->desc)) {
251 		dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
252 			dev->ubi_num, dev->vol_id);
253 		ret = PTR_ERR(dev->desc);
254 		dev->desc = NULL;
255 		goto out_unlock;
256 	}
257 
258 out_done:
259 	dev->refcnt++;
260 	mutex_unlock(&dev->dev_mutex);
261 	return 0;
262 
263 out_unlock:
264 	mutex_unlock(&dev->dev_mutex);
265 	return ret;
266 }
267 
268 static void ubiblock_release(struct gendisk *gd, fmode_t mode)
269 {
270 	struct ubiblock *dev = gd->private_data;
271 
272 	mutex_lock(&dev->dev_mutex);
273 	dev->refcnt--;
274 	if (dev->refcnt == 0) {
275 		ubi_close_volume(dev->desc);
276 		dev->desc = NULL;
277 	}
278 	mutex_unlock(&dev->dev_mutex);
279 }
280 
281 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
282 {
283 	/* Some tools might require this information */
284 	geo->heads = 1;
285 	geo->cylinders = 1;
286 	geo->sectors = get_capacity(bdev->bd_disk);
287 	geo->start = 0;
288 	return 0;
289 }
290 
291 static const struct block_device_operations ubiblock_ops = {
292 	.owner = THIS_MODULE,
293 	.open = ubiblock_open,
294 	.release = ubiblock_release,
295 	.getgeo	= ubiblock_getgeo,
296 };
297 
298 static void ubiblock_do_work(struct work_struct *work)
299 {
300 	int ret;
301 	struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
302 	struct request *req = blk_mq_rq_from_pdu(pdu);
303 
304 	blk_mq_start_request(req);
305 
306 	/*
307 	 * It is safe to ignore the return value of blk_rq_map_sg() because
308 	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
309 	 * and ubi_read_sg() will check that limit.
310 	 */
311 	blk_rq_map_sg(req->q, req, pdu->usgl.sg);
312 
313 	ret = ubiblock_read(pdu);
314 	rq_flush_dcache_pages(req);
315 
316 	blk_mq_end_request(req, ret);
317 }
318 
319 static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
320 			     const struct blk_mq_queue_data *bd)
321 {
322 	struct request *req = bd->rq;
323 	struct ubiblock *dev = hctx->queue->queuedata;
324 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
325 
326 	switch (req_op(req)) {
327 	case REQ_OP_READ:
328 		ubi_sgl_init(&pdu->usgl);
329 		queue_work(dev->wq, &pdu->work);
330 		return BLK_MQ_RQ_QUEUE_OK;
331 	default:
332 		return BLK_MQ_RQ_QUEUE_ERROR;
333 	}
334 
335 }
336 
337 static int ubiblock_init_request(struct blk_mq_tag_set *set,
338 		struct request *req, unsigned int hctx_idx,
339 		unsigned int numa_node)
340 {
341 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
342 
343 	sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
344 	INIT_WORK(&pdu->work, ubiblock_do_work);
345 
346 	return 0;
347 }
348 
349 static const struct blk_mq_ops ubiblock_mq_ops = {
350 	.queue_rq       = ubiblock_queue_rq,
351 	.init_request	= ubiblock_init_request,
352 };
353 
354 static DEFINE_IDR(ubiblock_minor_idr);
355 
356 int ubiblock_create(struct ubi_volume_info *vi)
357 {
358 	struct ubiblock *dev;
359 	struct gendisk *gd;
360 	u64 disk_capacity = vi->used_bytes >> 9;
361 	int ret;
362 
363 	if ((sector_t)disk_capacity != disk_capacity)
364 		return -EFBIG;
365 	/* Check that the volume isn't already handled */
366 	mutex_lock(&devices_mutex);
367 	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
368 		mutex_unlock(&devices_mutex);
369 		return -EEXIST;
370 	}
371 	mutex_unlock(&devices_mutex);
372 
373 	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
374 	if (!dev)
375 		return -ENOMEM;
376 
377 	mutex_init(&dev->dev_mutex);
378 
379 	dev->ubi_num = vi->ubi_num;
380 	dev->vol_id = vi->vol_id;
381 	dev->leb_size = vi->usable_leb_size;
382 
383 	/* Initialize the gendisk of this ubiblock device */
384 	gd = alloc_disk(1);
385 	if (!gd) {
386 		pr_err("UBI: block: alloc_disk failed");
387 		ret = -ENODEV;
388 		goto out_free_dev;
389 	}
390 
391 	gd->fops = &ubiblock_ops;
392 	gd->major = ubiblock_major;
393 	gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
394 	if (gd->first_minor < 0) {
395 		dev_err(disk_to_dev(gd),
396 			"block: dynamic minor allocation failed");
397 		ret = -ENODEV;
398 		goto out_put_disk;
399 	}
400 	gd->private_data = dev;
401 	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
402 	set_capacity(gd, disk_capacity);
403 	dev->gd = gd;
404 
405 	dev->tag_set.ops = &ubiblock_mq_ops;
406 	dev->tag_set.queue_depth = 64;
407 	dev->tag_set.numa_node = NUMA_NO_NODE;
408 	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
409 	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
410 	dev->tag_set.driver_data = dev;
411 	dev->tag_set.nr_hw_queues = 1;
412 
413 	ret = blk_mq_alloc_tag_set(&dev->tag_set);
414 	if (ret) {
415 		dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
416 		goto out_remove_minor;
417 	}
418 
419 	dev->rq = blk_mq_init_queue(&dev->tag_set);
420 	if (IS_ERR(dev->rq)) {
421 		dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
422 		ret = PTR_ERR(dev->rq);
423 		goto out_free_tags;
424 	}
425 	blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
426 
427 	dev->rq->queuedata = dev;
428 	dev->gd->queue = dev->rq;
429 
430 	/*
431 	 * Create one workqueue per volume (per registered block device).
432 	 * Rembember workqueues are cheap, they're not threads.
433 	 */
434 	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
435 	if (!dev->wq) {
436 		ret = -ENOMEM;
437 		goto out_free_queue;
438 	}
439 
440 	mutex_lock(&devices_mutex);
441 	list_add_tail(&dev->list, &ubiblock_devices);
442 	mutex_unlock(&devices_mutex);
443 
444 	/* Must be the last step: anyone can call file ops from now on */
445 	add_disk(dev->gd);
446 	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
447 		 dev->ubi_num, dev->vol_id, vi->name);
448 	return 0;
449 
450 out_free_queue:
451 	blk_cleanup_queue(dev->rq);
452 out_free_tags:
453 	blk_mq_free_tag_set(&dev->tag_set);
454 out_remove_minor:
455 	idr_remove(&ubiblock_minor_idr, gd->first_minor);
456 out_put_disk:
457 	put_disk(dev->gd);
458 out_free_dev:
459 	kfree(dev);
460 
461 	return ret;
462 }
463 
464 static void ubiblock_cleanup(struct ubiblock *dev)
465 {
466 	/* Stop new requests to arrive */
467 	del_gendisk(dev->gd);
468 	/* Flush pending work */
469 	destroy_workqueue(dev->wq);
470 	/* Finally destroy the blk queue */
471 	blk_cleanup_queue(dev->rq);
472 	blk_mq_free_tag_set(&dev->tag_set);
473 	dev_info(disk_to_dev(dev->gd), "released");
474 	idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
475 	put_disk(dev->gd);
476 }
477 
478 int ubiblock_remove(struct ubi_volume_info *vi)
479 {
480 	struct ubiblock *dev;
481 
482 	mutex_lock(&devices_mutex);
483 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
484 	if (!dev) {
485 		mutex_unlock(&devices_mutex);
486 		return -ENODEV;
487 	}
488 
489 	/* Found a device, let's lock it so we can check if it's busy */
490 	mutex_lock(&dev->dev_mutex);
491 	if (dev->refcnt > 0) {
492 		mutex_unlock(&dev->dev_mutex);
493 		mutex_unlock(&devices_mutex);
494 		return -EBUSY;
495 	}
496 
497 	/* Remove from device list */
498 	list_del(&dev->list);
499 	mutex_unlock(&devices_mutex);
500 
501 	ubiblock_cleanup(dev);
502 	mutex_unlock(&dev->dev_mutex);
503 	kfree(dev);
504 	return 0;
505 }
506 
507 static int ubiblock_resize(struct ubi_volume_info *vi)
508 {
509 	struct ubiblock *dev;
510 	u64 disk_capacity = vi->used_bytes >> 9;
511 
512 	/*
513 	 * Need to lock the device list until we stop using the device,
514 	 * otherwise the device struct might get released in
515 	 * 'ubiblock_remove()'.
516 	 */
517 	mutex_lock(&devices_mutex);
518 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
519 	if (!dev) {
520 		mutex_unlock(&devices_mutex);
521 		return -ENODEV;
522 	}
523 	if ((sector_t)disk_capacity != disk_capacity) {
524 		mutex_unlock(&devices_mutex);
525 		dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize",
526 			 vi->size);
527 		return -EFBIG;
528 	}
529 
530 	mutex_lock(&dev->dev_mutex);
531 
532 	if (get_capacity(dev->gd) != disk_capacity) {
533 		set_capacity(dev->gd, disk_capacity);
534 		dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
535 			 vi->used_bytes);
536 	}
537 	mutex_unlock(&dev->dev_mutex);
538 	mutex_unlock(&devices_mutex);
539 	return 0;
540 }
541 
542 static int ubiblock_notify(struct notifier_block *nb,
543 			 unsigned long notification_type, void *ns_ptr)
544 {
545 	struct ubi_notification *nt = ns_ptr;
546 
547 	switch (notification_type) {
548 	case UBI_VOLUME_ADDED:
549 		/*
550 		 * We want to enforce explicit block device creation for
551 		 * volumes, so when a volume is added we do nothing.
552 		 */
553 		break;
554 	case UBI_VOLUME_REMOVED:
555 		ubiblock_remove(&nt->vi);
556 		break;
557 	case UBI_VOLUME_RESIZED:
558 		ubiblock_resize(&nt->vi);
559 		break;
560 	case UBI_VOLUME_UPDATED:
561 		/*
562 		 * If the volume is static, a content update might mean the
563 		 * size (i.e. used_bytes) was also changed.
564 		 */
565 		if (nt->vi.vol_type == UBI_STATIC_VOLUME)
566 			ubiblock_resize(&nt->vi);
567 		break;
568 	default:
569 		break;
570 	}
571 	return NOTIFY_OK;
572 }
573 
574 static struct notifier_block ubiblock_notifier = {
575 	.notifier_call = ubiblock_notify,
576 };
577 
578 static struct ubi_volume_desc * __init
579 open_volume_desc(const char *name, int ubi_num, int vol_id)
580 {
581 	if (ubi_num == -1)
582 		/* No ubi num, name must be a vol device path */
583 		return ubi_open_volume_path(name, UBI_READONLY);
584 	else if (vol_id == -1)
585 		/* No vol_id, must be vol_name */
586 		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
587 	else
588 		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
589 }
590 
591 static void __init ubiblock_create_from_param(void)
592 {
593 	int i, ret = 0;
594 	struct ubiblock_param *p;
595 	struct ubi_volume_desc *desc;
596 	struct ubi_volume_info vi;
597 
598 	/*
599 	 * If there is an error creating one of the ubiblocks, continue on to
600 	 * create the following ubiblocks. This helps in a circumstance where
601 	 * the kernel command-line specifies multiple block devices and some
602 	 * may be broken, but we still want the working ones to come up.
603 	 */
604 	for (i = 0; i < ubiblock_devs; i++) {
605 		p = &ubiblock_param[i];
606 
607 		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
608 		if (IS_ERR(desc)) {
609 			pr_err(
610 			       "UBI: block: can't open volume on ubi%d_%d, err=%ld",
611 			       p->ubi_num, p->vol_id, PTR_ERR(desc));
612 			continue;
613 		}
614 
615 		ubi_get_volume_info(desc, &vi);
616 		ubi_close_volume(desc);
617 
618 		ret = ubiblock_create(&vi);
619 		if (ret) {
620 			pr_err(
621 			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d",
622 			       vi.name, p->ubi_num, p->vol_id, ret);
623 			continue;
624 		}
625 	}
626 }
627 
628 static void ubiblock_remove_all(void)
629 {
630 	struct ubiblock *next;
631 	struct ubiblock *dev;
632 
633 	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
634 		/* The module is being forcefully removed */
635 		WARN_ON(dev->desc);
636 		/* Remove from device list */
637 		list_del(&dev->list);
638 		ubiblock_cleanup(dev);
639 		kfree(dev);
640 	}
641 }
642 
643 int __init ubiblock_init(void)
644 {
645 	int ret;
646 
647 	ubiblock_major = register_blkdev(0, "ubiblock");
648 	if (ubiblock_major < 0)
649 		return ubiblock_major;
650 
651 	/*
652 	 * Attach block devices from 'block=' module param.
653 	 * Even if one block device in the param list fails to come up,
654 	 * still allow the module to load and leave any others up.
655 	 */
656 	ubiblock_create_from_param();
657 
658 	/*
659 	 * Block devices are only created upon user requests, so we ignore
660 	 * existing volumes.
661 	 */
662 	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
663 	if (ret)
664 		goto err_unreg;
665 	return 0;
666 
667 err_unreg:
668 	unregister_blkdev(ubiblock_major, "ubiblock");
669 	ubiblock_remove_all();
670 	return ret;
671 }
672 
673 void __exit ubiblock_exit(void)
674 {
675 	ubi_unregister_volume_notifier(&ubiblock_notifier);
676 	ubiblock_remove_all();
677 	unregister_blkdev(ubiblock_major, "ubiblock");
678 }
679