xref: /linux/drivers/mtd/ubi/block.c (revision c358f53871605a1a8d7ed6e544a05ea00e9c80cb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014 Ezequiel Garcia
4  * Copyright (c) 2011 Free Electrons
5  *
6  * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
7  *   Copyright (c) International Business Machines Corp., 2006
8  *   Copyright (c) Nokia Corporation, 2007
9  *   Authors: Artem Bityutskiy, Frank Haverkamp
10  */
11 
12 /*
13  * Read-only block devices on top of UBI volumes
14  *
15  * A simple implementation to allow a block device to be layered on top of a
16  * UBI volume. The implementation is provided by creating a static 1-to-1
17  * mapping between the block device and the UBI volume.
18  *
19  * The addressed byte is obtained from the addressed block sector, which is
20  * mapped linearly into the corresponding LEB:
21  *
22  *   LEB number = addressed byte / LEB size
23  *
24  * This feature is compiled in the UBI core, and adds a 'block' parameter
25  * to allow early creation of block devices on top of UBI volumes. Runtime
26  * block creation/removal for UBI volumes is provided through two UBI ioctls:
27  * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
28  */
29 
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/err.h>
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/mutex.h>
36 #include <linux/slab.h>
37 #include <linux/mtd/ubi.h>
38 #include <linux/workqueue.h>
39 #include <linux/blkdev.h>
40 #include <linux/blk-mq.h>
41 #include <linux/hdreg.h>
42 #include <linux/scatterlist.h>
43 #include <linux/idr.h>
44 #include <asm/div64.h>
45 
46 #include "ubi-media.h"
47 #include "ubi.h"
48 
49 /* Maximum number of supported devices */
50 #define UBIBLOCK_MAX_DEVICES 32
51 
52 /* Maximum length of the 'block=' parameter */
53 #define UBIBLOCK_PARAM_LEN 63
54 
55 /* Maximum number of comma-separated items in the 'block=' parameter */
56 #define UBIBLOCK_PARAM_COUNT 2
57 
58 struct ubiblock_param {
59 	int ubi_num;
60 	int vol_id;
61 	char name[UBIBLOCK_PARAM_LEN+1];
62 };
63 
64 struct ubiblock_pdu {
65 	struct work_struct work;
66 	struct ubi_sgl usgl;
67 };
68 
69 /* Numbers of elements set in the @ubiblock_param array */
70 static int ubiblock_devs __initdata;
71 
72 /* MTD devices specification parameters */
73 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
74 
75 struct ubiblock {
76 	struct ubi_volume_desc *desc;
77 	int ubi_num;
78 	int vol_id;
79 	int refcnt;
80 	int leb_size;
81 
82 	struct gendisk *gd;
83 	struct request_queue *rq;
84 
85 	struct workqueue_struct *wq;
86 
87 	struct mutex dev_mutex;
88 	struct list_head list;
89 	struct blk_mq_tag_set tag_set;
90 };
91 
92 /* Linked list of all ubiblock instances */
93 static LIST_HEAD(ubiblock_devices);
94 static DEFINE_IDR(ubiblock_minor_idr);
95 /* Protects ubiblock_devices and ubiblock_minor_idr */
96 static DEFINE_MUTEX(devices_mutex);
97 static int ubiblock_major;
98 
99 static int __init ubiblock_set_param(const char *val,
100 				     const struct kernel_param *kp)
101 {
102 	int i, ret;
103 	size_t len;
104 	struct ubiblock_param *param;
105 	char buf[UBIBLOCK_PARAM_LEN];
106 	char *pbuf = &buf[0];
107 	char *tokens[UBIBLOCK_PARAM_COUNT];
108 
109 	if (!val)
110 		return -EINVAL;
111 
112 	len = strnlen(val, UBIBLOCK_PARAM_LEN);
113 	if (len == 0) {
114 		pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
115 		return 0;
116 	}
117 
118 	if (len == UBIBLOCK_PARAM_LEN) {
119 		pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
120 		       val, UBIBLOCK_PARAM_LEN);
121 		return -EINVAL;
122 	}
123 
124 	strcpy(buf, val);
125 
126 	/* Get rid of the final newline */
127 	if (buf[len - 1] == '\n')
128 		buf[len - 1] = '\0';
129 
130 	for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
131 		tokens[i] = strsep(&pbuf, ",");
132 
133 	param = &ubiblock_param[ubiblock_devs];
134 	if (tokens[1]) {
135 		/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
136 		ret = kstrtoint(tokens[0], 10, &param->ubi_num);
137 		if (ret < 0)
138 			return -EINVAL;
139 
140 		/* Second param can be a number or a name */
141 		ret = kstrtoint(tokens[1], 10, &param->vol_id);
142 		if (ret < 0) {
143 			param->vol_id = -1;
144 			strcpy(param->name, tokens[1]);
145 		}
146 
147 	} else {
148 		/* One parameter: must be device path */
149 		strcpy(param->name, tokens[0]);
150 		param->ubi_num = -1;
151 		param->vol_id = -1;
152 	}
153 
154 	ubiblock_devs++;
155 
156 	return 0;
157 }
158 
159 static const struct kernel_param_ops ubiblock_param_ops = {
160 	.set    = ubiblock_set_param,
161 };
162 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
163 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
164 			"Multiple \"block\" parameters may be specified.\n"
165 			"UBI volumes may be specified by their number, name, or path to the device node.\n"
166 			"Examples\n"
167 			"Using the UBI volume path:\n"
168 			"ubi.block=/dev/ubi0_0\n"
169 			"Using the UBI device, and the volume name:\n"
170 			"ubi.block=0,rootfs\n"
171 			"Using both UBI device number and UBI volume number:\n"
172 			"ubi.block=0,0\n");
173 
174 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
175 {
176 	struct ubiblock *dev;
177 
178 	list_for_each_entry(dev, &ubiblock_devices, list)
179 		if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
180 			return dev;
181 	return NULL;
182 }
183 
184 static int ubiblock_read(struct ubiblock_pdu *pdu)
185 {
186 	int ret, leb, offset, bytes_left, to_read;
187 	u64 pos;
188 	struct request *req = blk_mq_rq_from_pdu(pdu);
189 	struct ubiblock *dev = req->q->queuedata;
190 
191 	to_read = blk_rq_bytes(req);
192 	pos = blk_rq_pos(req) << 9;
193 
194 	/* Get LEB:offset address to read from */
195 	offset = do_div(pos, dev->leb_size);
196 	leb = pos;
197 	bytes_left = to_read;
198 
199 	while (bytes_left) {
200 		/*
201 		 * We can only read one LEB at a time. Therefore if the read
202 		 * length is larger than one LEB size, we split the operation.
203 		 */
204 		if (offset + to_read > dev->leb_size)
205 			to_read = dev->leb_size - offset;
206 
207 		ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
208 		if (ret < 0)
209 			return ret;
210 
211 		bytes_left -= to_read;
212 		to_read = bytes_left;
213 		leb += 1;
214 		offset = 0;
215 	}
216 	return 0;
217 }
218 
219 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
220 {
221 	struct ubiblock *dev = bdev->bd_disk->private_data;
222 	int ret;
223 
224 	mutex_lock(&dev->dev_mutex);
225 	if (dev->refcnt > 0) {
226 		/*
227 		 * The volume is already open, just increase the reference
228 		 * counter.
229 		 */
230 		goto out_done;
231 	}
232 
233 	/*
234 	 * We want users to be aware they should only mount us as read-only.
235 	 * It's just a paranoid check, as write requests will get rejected
236 	 * in any case.
237 	 */
238 	if (mode & FMODE_WRITE) {
239 		ret = -EROFS;
240 		goto out_unlock;
241 	}
242 
243 	dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
244 	if (IS_ERR(dev->desc)) {
245 		dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
246 			dev->ubi_num, dev->vol_id);
247 		ret = PTR_ERR(dev->desc);
248 		dev->desc = NULL;
249 		goto out_unlock;
250 	}
251 
252 out_done:
253 	dev->refcnt++;
254 	mutex_unlock(&dev->dev_mutex);
255 	return 0;
256 
257 out_unlock:
258 	mutex_unlock(&dev->dev_mutex);
259 	return ret;
260 }
261 
262 static void ubiblock_release(struct gendisk *gd, fmode_t mode)
263 {
264 	struct ubiblock *dev = gd->private_data;
265 
266 	mutex_lock(&dev->dev_mutex);
267 	dev->refcnt--;
268 	if (dev->refcnt == 0) {
269 		ubi_close_volume(dev->desc);
270 		dev->desc = NULL;
271 	}
272 	mutex_unlock(&dev->dev_mutex);
273 }
274 
275 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
276 {
277 	/* Some tools might require this information */
278 	geo->heads = 1;
279 	geo->cylinders = 1;
280 	geo->sectors = get_capacity(bdev->bd_disk);
281 	geo->start = 0;
282 	return 0;
283 }
284 
285 static const struct block_device_operations ubiblock_ops = {
286 	.owner = THIS_MODULE,
287 	.open = ubiblock_open,
288 	.release = ubiblock_release,
289 	.getgeo	= ubiblock_getgeo,
290 };
291 
292 static void ubiblock_do_work(struct work_struct *work)
293 {
294 	int ret;
295 	struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
296 	struct request *req = blk_mq_rq_from_pdu(pdu);
297 	struct req_iterator iter;
298 	struct bio_vec bvec;
299 
300 	blk_mq_start_request(req);
301 
302 	/*
303 	 * It is safe to ignore the return value of blk_rq_map_sg() because
304 	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
305 	 * and ubi_read_sg() will check that limit.
306 	 */
307 	blk_rq_map_sg(req->q, req, pdu->usgl.sg);
308 
309 	ret = ubiblock_read(pdu);
310 
311 	rq_for_each_segment(bvec, req, iter)
312 		flush_dcache_page(bvec.bv_page);
313 
314 	blk_mq_end_request(req, errno_to_blk_status(ret));
315 }
316 
317 static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
318 			     const struct blk_mq_queue_data *bd)
319 {
320 	struct request *req = bd->rq;
321 	struct ubiblock *dev = hctx->queue->queuedata;
322 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
323 
324 	switch (req_op(req)) {
325 	case REQ_OP_READ:
326 		ubi_sgl_init(&pdu->usgl);
327 		queue_work(dev->wq, &pdu->work);
328 		return BLK_STS_OK;
329 	default:
330 		return BLK_STS_IOERR;
331 	}
332 
333 }
334 
335 static int ubiblock_init_request(struct blk_mq_tag_set *set,
336 		struct request *req, unsigned int hctx_idx,
337 		unsigned int numa_node)
338 {
339 	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
340 
341 	sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
342 	INIT_WORK(&pdu->work, ubiblock_do_work);
343 
344 	return 0;
345 }
346 
347 static const struct blk_mq_ops ubiblock_mq_ops = {
348 	.queue_rq       = ubiblock_queue_rq,
349 	.init_request	= ubiblock_init_request,
350 };
351 
352 static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
353 {
354 	u64 size = vi->used_bytes >> 9;
355 
356 	if (vi->used_bytes % 512) {
357 		pr_warn("UBI: block: volume size is not a multiple of 512, "
358 			"last %llu bytes are ignored!\n",
359 			vi->used_bytes - (size << 9));
360 	}
361 
362 	if ((sector_t)size != size)
363 		return -EFBIG;
364 
365 	*disk_capacity = size;
366 
367 	return 0;
368 }
369 
370 int ubiblock_create(struct ubi_volume_info *vi)
371 {
372 	struct ubiblock *dev;
373 	struct gendisk *gd;
374 	u64 disk_capacity;
375 	int ret;
376 
377 	ret = calc_disk_capacity(vi, &disk_capacity);
378 	if (ret) {
379 		return ret;
380 	}
381 
382 	/* Check that the volume isn't already handled */
383 	mutex_lock(&devices_mutex);
384 	if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
385 		ret = -EEXIST;
386 		goto out_unlock;
387 	}
388 
389 	dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
390 	if (!dev) {
391 		ret = -ENOMEM;
392 		goto out_unlock;
393 	}
394 
395 	mutex_init(&dev->dev_mutex);
396 
397 	dev->ubi_num = vi->ubi_num;
398 	dev->vol_id = vi->vol_id;
399 	dev->leb_size = vi->usable_leb_size;
400 
401 	dev->tag_set.ops = &ubiblock_mq_ops;
402 	dev->tag_set.queue_depth = 64;
403 	dev->tag_set.numa_node = NUMA_NO_NODE;
404 	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
405 	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
406 	dev->tag_set.driver_data = dev;
407 	dev->tag_set.nr_hw_queues = 1;
408 
409 	ret = blk_mq_alloc_tag_set(&dev->tag_set);
410 	if (ret) {
411 		dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
412 		goto out_free_dev;
413 	}
414 
415 
416 	/* Initialize the gendisk of this ubiblock device */
417 	gd = blk_mq_alloc_disk(&dev->tag_set, dev);
418 	if (IS_ERR(gd)) {
419 		ret = PTR_ERR(gd);
420 		goto out_free_tags;
421 	}
422 
423 	gd->fops = &ubiblock_ops;
424 	gd->major = ubiblock_major;
425 	gd->minors = 1;
426 	gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
427 	if (gd->first_minor < 0) {
428 		dev_err(disk_to_dev(gd),
429 			"block: dynamic minor allocation failed");
430 		ret = -ENODEV;
431 		goto out_cleanup_disk;
432 	}
433 	gd->flags |= GENHD_FL_NO_PART;
434 	gd->private_data = dev;
435 	sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
436 	set_capacity(gd, disk_capacity);
437 	dev->gd = gd;
438 
439 	dev->rq = gd->queue;
440 	blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
441 
442 	/*
443 	 * Create one workqueue per volume (per registered block device).
444 	 * Remember workqueues are cheap, they're not threads.
445 	 */
446 	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
447 	if (!dev->wq) {
448 		ret = -ENOMEM;
449 		goto out_remove_minor;
450 	}
451 
452 	list_add_tail(&dev->list, &ubiblock_devices);
453 
454 	/* Must be the last step: anyone can call file ops from now on */
455 	ret = add_disk(dev->gd);
456 	if (ret)
457 		goto out_destroy_wq;
458 
459 	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
460 		 dev->ubi_num, dev->vol_id, vi->name);
461 	mutex_unlock(&devices_mutex);
462 	return 0;
463 
464 out_destroy_wq:
465 	list_del(&dev->list);
466 	destroy_workqueue(dev->wq);
467 out_remove_minor:
468 	idr_remove(&ubiblock_minor_idr, gd->first_minor);
469 out_cleanup_disk:
470 	put_disk(dev->gd);
471 out_free_tags:
472 	blk_mq_free_tag_set(&dev->tag_set);
473 out_free_dev:
474 	kfree(dev);
475 out_unlock:
476 	mutex_unlock(&devices_mutex);
477 
478 	return ret;
479 }
480 
481 static void ubiblock_cleanup(struct ubiblock *dev)
482 {
483 	/* Stop new requests to arrive */
484 	del_gendisk(dev->gd);
485 	/* Flush pending work */
486 	destroy_workqueue(dev->wq);
487 	/* Finally destroy the blk queue */
488 	dev_info(disk_to_dev(dev->gd), "released");
489 	put_disk(dev->gd);
490 	blk_mq_free_tag_set(&dev->tag_set);
491 	idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
492 }
493 
494 int ubiblock_remove(struct ubi_volume_info *vi)
495 {
496 	struct ubiblock *dev;
497 	int ret;
498 
499 	mutex_lock(&devices_mutex);
500 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
501 	if (!dev) {
502 		ret = -ENODEV;
503 		goto out_unlock;
504 	}
505 
506 	/* Found a device, let's lock it so we can check if it's busy */
507 	mutex_lock(&dev->dev_mutex);
508 	if (dev->refcnt > 0) {
509 		ret = -EBUSY;
510 		goto out_unlock_dev;
511 	}
512 
513 	/* Remove from device list */
514 	list_del(&dev->list);
515 	ubiblock_cleanup(dev);
516 	mutex_unlock(&dev->dev_mutex);
517 	mutex_unlock(&devices_mutex);
518 
519 	kfree(dev);
520 	return 0;
521 
522 out_unlock_dev:
523 	mutex_unlock(&dev->dev_mutex);
524 out_unlock:
525 	mutex_unlock(&devices_mutex);
526 	return ret;
527 }
528 
529 static int ubiblock_resize(struct ubi_volume_info *vi)
530 {
531 	struct ubiblock *dev;
532 	u64 disk_capacity;
533 	int ret;
534 
535 	/*
536 	 * Need to lock the device list until we stop using the device,
537 	 * otherwise the device struct might get released in
538 	 * 'ubiblock_remove()'.
539 	 */
540 	mutex_lock(&devices_mutex);
541 	dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
542 	if (!dev) {
543 		mutex_unlock(&devices_mutex);
544 		return -ENODEV;
545 	}
546 
547 	ret = calc_disk_capacity(vi, &disk_capacity);
548 	if (ret) {
549 		mutex_unlock(&devices_mutex);
550 		if (ret == -EFBIG) {
551 			dev_warn(disk_to_dev(dev->gd),
552 				 "the volume is too big (%d LEBs), cannot resize",
553 				 vi->size);
554 		}
555 		return ret;
556 	}
557 
558 	mutex_lock(&dev->dev_mutex);
559 
560 	if (get_capacity(dev->gd) != disk_capacity) {
561 		set_capacity(dev->gd, disk_capacity);
562 		dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
563 			 vi->used_bytes);
564 	}
565 	mutex_unlock(&dev->dev_mutex);
566 	mutex_unlock(&devices_mutex);
567 	return 0;
568 }
569 
570 static int ubiblock_notify(struct notifier_block *nb,
571 			 unsigned long notification_type, void *ns_ptr)
572 {
573 	struct ubi_notification *nt = ns_ptr;
574 
575 	switch (notification_type) {
576 	case UBI_VOLUME_ADDED:
577 		/*
578 		 * We want to enforce explicit block device creation for
579 		 * volumes, so when a volume is added we do nothing.
580 		 */
581 		break;
582 	case UBI_VOLUME_REMOVED:
583 		ubiblock_remove(&nt->vi);
584 		break;
585 	case UBI_VOLUME_RESIZED:
586 		ubiblock_resize(&nt->vi);
587 		break;
588 	case UBI_VOLUME_UPDATED:
589 		/*
590 		 * If the volume is static, a content update might mean the
591 		 * size (i.e. used_bytes) was also changed.
592 		 */
593 		if (nt->vi.vol_type == UBI_STATIC_VOLUME)
594 			ubiblock_resize(&nt->vi);
595 		break;
596 	default:
597 		break;
598 	}
599 	return NOTIFY_OK;
600 }
601 
602 static struct notifier_block ubiblock_notifier = {
603 	.notifier_call = ubiblock_notify,
604 };
605 
606 static struct ubi_volume_desc * __init
607 open_volume_desc(const char *name, int ubi_num, int vol_id)
608 {
609 	if (ubi_num == -1)
610 		/* No ubi num, name must be a vol device path */
611 		return ubi_open_volume_path(name, UBI_READONLY);
612 	else if (vol_id == -1)
613 		/* No vol_id, must be vol_name */
614 		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
615 	else
616 		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
617 }
618 
619 static void __init ubiblock_create_from_param(void)
620 {
621 	int i, ret = 0;
622 	struct ubiblock_param *p;
623 	struct ubi_volume_desc *desc;
624 	struct ubi_volume_info vi;
625 
626 	/*
627 	 * If there is an error creating one of the ubiblocks, continue on to
628 	 * create the following ubiblocks. This helps in a circumstance where
629 	 * the kernel command-line specifies multiple block devices and some
630 	 * may be broken, but we still want the working ones to come up.
631 	 */
632 	for (i = 0; i < ubiblock_devs; i++) {
633 		p = &ubiblock_param[i];
634 
635 		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
636 		if (IS_ERR(desc)) {
637 			pr_err(
638 			       "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
639 			       p->ubi_num, p->vol_id, PTR_ERR(desc));
640 			continue;
641 		}
642 
643 		ubi_get_volume_info(desc, &vi);
644 		ubi_close_volume(desc);
645 
646 		ret = ubiblock_create(&vi);
647 		if (ret) {
648 			pr_err(
649 			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
650 			       vi.name, p->ubi_num, p->vol_id, ret);
651 			continue;
652 		}
653 	}
654 }
655 
656 static void ubiblock_remove_all(void)
657 {
658 	struct ubiblock *next;
659 	struct ubiblock *dev;
660 
661 	mutex_lock(&devices_mutex);
662 	list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
663 		/* The module is being forcefully removed */
664 		WARN_ON(dev->desc);
665 		/* Remove from device list */
666 		list_del(&dev->list);
667 		ubiblock_cleanup(dev);
668 		kfree(dev);
669 	}
670 	mutex_unlock(&devices_mutex);
671 }
672 
673 int __init ubiblock_init(void)
674 {
675 	int ret;
676 
677 	ubiblock_major = register_blkdev(0, "ubiblock");
678 	if (ubiblock_major < 0)
679 		return ubiblock_major;
680 
681 	/*
682 	 * Attach block devices from 'block=' module param.
683 	 * Even if one block device in the param list fails to come up,
684 	 * still allow the module to load and leave any others up.
685 	 */
686 	ubiblock_create_from_param();
687 
688 	/*
689 	 * Block devices are only created upon user requests, so we ignore
690 	 * existing volumes.
691 	 */
692 	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
693 	if (ret)
694 		goto err_unreg;
695 	return 0;
696 
697 err_unreg:
698 	unregister_blkdev(ubiblock_major, "ubiblock");
699 	ubiblock_remove_all();
700 	return ret;
701 }
702 
703 void __exit ubiblock_exit(void)
704 {
705 	ubi_unregister_volume_notifier(&ubiblock_notifier);
706 	ubiblock_remove_all();
707 	unregister_blkdev(ubiblock_major, "ubiblock");
708 }
709