xref: /linux/drivers/mtd/mtd_blkdevs.c (revision a5c4300389bb33ade2515c082709217f0614cf15)
1 /*
2  * (C) 2003 David Woodhouse <dwmw2@infradead.org>
3  *
4  * Interface to Linux 2.5 block layer for MTD 'translation layers'.
5  *
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/fs.h>
13 #include <linux/mtd/blktrans.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/blkdev.h>
16 #include <linux/blkpg.h>
17 #include <linux/spinlock.h>
18 #include <linux/hdreg.h>
19 #include <linux/init.h>
20 #include <linux/mutex.h>
21 #include <linux/kthread.h>
22 #include <asm/uaccess.h>
23 
24 #include "mtdcore.h"
25 
26 static LIST_HEAD(blktrans_majors);
27 static DEFINE_MUTEX(blktrans_ref_mutex);
28 
29 void blktrans_dev_release(struct kref *kref)
30 {
31 	struct mtd_blktrans_dev *dev =
32 		container_of(kref, struct mtd_blktrans_dev, ref);
33 
34 	dev->disk->private_data = NULL;
35 	blk_cleanup_queue(dev->rq);
36 	put_disk(dev->disk);
37 	list_del(&dev->list);
38 	kfree(dev);
39 }
40 
41 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
42 {
43 	struct mtd_blktrans_dev *dev;
44 
45 	mutex_lock(&blktrans_ref_mutex);
46 	dev = disk->private_data;
47 
48 	if (!dev)
49 		goto unlock;
50 	kref_get(&dev->ref);
51 unlock:
52 	mutex_unlock(&blktrans_ref_mutex);
53 	return dev;
54 }
55 
56 void blktrans_dev_put(struct mtd_blktrans_dev *dev)
57 {
58 	mutex_lock(&blktrans_ref_mutex);
59 	kref_put(&dev->ref, blktrans_dev_release);
60 	mutex_unlock(&blktrans_ref_mutex);
61 }
62 
63 
64 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
65 			       struct mtd_blktrans_dev *dev,
66 			       struct request *req)
67 {
68 	unsigned long block, nsect;
69 	char *buf;
70 
71 	block = blk_rq_pos(req) << 9 >> tr->blkshift;
72 	nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
73 
74 	buf = req->buffer;
75 
76 	if (!blk_fs_request(req))
77 		return -EIO;
78 
79 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
80 	    get_capacity(req->rq_disk))
81 		return -EIO;
82 
83 	if (blk_discard_rq(req))
84 		return tr->discard(dev, block, nsect);
85 
86 	switch(rq_data_dir(req)) {
87 	case READ:
88 		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
89 			if (tr->readsect(dev, block, buf))
90 				return -EIO;
91 		rq_flush_dcache_pages(req);
92 		return 0;
93 	case WRITE:
94 		if (!tr->writesect)
95 			return -EIO;
96 
97 		rq_flush_dcache_pages(req);
98 		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
99 			if (tr->writesect(dev, block, buf))
100 				return -EIO;
101 		return 0;
102 	default:
103 		printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
104 		return -EIO;
105 	}
106 }
107 
108 static int mtd_blktrans_thread(void *arg)
109 {
110 	struct mtd_blktrans_dev *dev = arg;
111 	struct request_queue *rq = dev->rq;
112 	struct request *req = NULL;
113 
114 	spin_lock_irq(rq->queue_lock);
115 
116 	while (!kthread_should_stop()) {
117 		int res;
118 
119 		if (!req && !(req = blk_fetch_request(rq))) {
120 			set_current_state(TASK_INTERRUPTIBLE);
121 			spin_unlock_irq(rq->queue_lock);
122 			schedule();
123 			spin_lock_irq(rq->queue_lock);
124 			continue;
125 		}
126 
127 		spin_unlock_irq(rq->queue_lock);
128 
129 		mutex_lock(&dev->lock);
130 		res = do_blktrans_request(dev->tr, dev, req);
131 		mutex_unlock(&dev->lock);
132 
133 		spin_lock_irq(rq->queue_lock);
134 
135 		if (!__blk_end_request_cur(req, res))
136 			req = NULL;
137 	}
138 
139 	if (req)
140 		__blk_end_request_all(req, -EIO);
141 
142 	spin_unlock_irq(rq->queue_lock);
143 
144 	return 0;
145 }
146 
147 static void mtd_blktrans_request(struct request_queue *rq)
148 {
149 	struct mtd_blktrans_dev *dev;
150 	struct request *req = NULL;
151 
152 	dev = rq->queuedata;
153 
154 	if (!dev)
155 		while ((req = blk_fetch_request(rq)) != NULL)
156 			__blk_end_request_all(req, -ENODEV);
157 	else
158 		wake_up_process(dev->thread);
159 }
160 
161 static int blktrans_open(struct block_device *bdev, fmode_t mode)
162 {
163 	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
164 	int ret;
165 
166 	if (!dev)
167 		return -ERESTARTSYS;
168 
169 	mutex_lock(&dev->lock);
170 
171 	if (!dev->mtd) {
172 		ret = -ENXIO;
173 		goto unlock;
174 	}
175 
176 	ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
177 
178 	/* Take another reference on the device so it won't go away till
179 		last release */
180 	if (!ret)
181 		kref_get(&dev->ref);
182 unlock:
183 	mutex_unlock(&dev->lock);
184 	blktrans_dev_put(dev);
185 	return ret;
186 }
187 
188 static int blktrans_release(struct gendisk *disk, fmode_t mode)
189 {
190 	struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
191 	int ret = -ENXIO;
192 
193 	if (!dev)
194 		return ret;
195 
196 	mutex_lock(&dev->lock);
197 
198 	/* Release one reference, we sure its not the last one here*/
199 	kref_put(&dev->ref, blktrans_dev_release);
200 
201 	if (!dev->mtd)
202 		goto unlock;
203 
204 	ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
205 unlock:
206 	mutex_unlock(&dev->lock);
207 	blktrans_dev_put(dev);
208 	return ret;
209 }
210 
211 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
212 {
213 	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
214 	int ret = -ENXIO;
215 
216 	if (!dev)
217 		return ret;
218 
219 	mutex_lock(&dev->lock);
220 
221 	if (!dev->mtd)
222 		goto unlock;
223 
224 	ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
225 unlock:
226 	mutex_unlock(&dev->lock);
227 	blktrans_dev_put(dev);
228 	return ret;
229 }
230 
231 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
232 			      unsigned int cmd, unsigned long arg)
233 {
234 	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
235 	int ret = -ENXIO;
236 
237 	if (!dev)
238 		return ret;
239 
240 	mutex_lock(&dev->lock);
241 
242 	if (!dev->mtd)
243 		goto unlock;
244 
245 	switch (cmd) {
246 	case BLKFLSBUF:
247 		ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
248 	default:
249 		ret = -ENOTTY;
250 	}
251 unlock:
252 	mutex_unlock(&dev->lock);
253 	blktrans_dev_put(dev);
254 	return ret;
255 }
256 
257 static const struct block_device_operations mtd_blktrans_ops = {
258 	.owner		= THIS_MODULE,
259 	.open		= blktrans_open,
260 	.release	= blktrans_release,
261 	.locked_ioctl	= blktrans_ioctl,
262 	.getgeo		= blktrans_getgeo,
263 };
264 
265 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
266 {
267 	struct mtd_blktrans_ops *tr = new->tr;
268 	struct mtd_blktrans_dev *d;
269 	int last_devnum = -1;
270 	struct gendisk *gd;
271 	int ret;
272 
273 	if (mutex_trylock(&mtd_table_mutex)) {
274 		mutex_unlock(&mtd_table_mutex);
275 		BUG();
276 	}
277 
278 	mutex_lock(&blktrans_ref_mutex);
279 	list_for_each_entry(d, &tr->devs, list) {
280 		if (new->devnum == -1) {
281 			/* Use first free number */
282 			if (d->devnum != last_devnum+1) {
283 				/* Found a free devnum. Plug it in here */
284 				new->devnum = last_devnum+1;
285 				list_add_tail(&new->list, &d->list);
286 				goto added;
287 			}
288 		} else if (d->devnum == new->devnum) {
289 			/* Required number taken */
290 			mutex_unlock(&blktrans_ref_mutex);
291 			return -EBUSY;
292 		} else if (d->devnum > new->devnum) {
293 			/* Required number was free */
294 			list_add_tail(&new->list, &d->list);
295 			goto added;
296 		}
297 		last_devnum = d->devnum;
298 	}
299 
300 	ret = -EBUSY;
301 	if (new->devnum == -1)
302 		new->devnum = last_devnum+1;
303 
304 	/* Check that the device and any partitions will get valid
305 	 * minor numbers and that the disk naming code below can cope
306 	 * with this number. */
307 	if (new->devnum > (MINORMASK >> tr->part_bits) ||
308 	    (tr->part_bits && new->devnum >= 27 * 26)) {
309 		mutex_unlock(&blktrans_ref_mutex);
310 		goto error1;
311 	}
312 
313 	list_add_tail(&new->list, &tr->devs);
314  added:
315 	mutex_unlock(&blktrans_ref_mutex);
316 
317 	mutex_init(&new->lock);
318 	kref_init(&new->ref);
319 	if (!tr->writesect)
320 		new->readonly = 1;
321 
322 	/* Create gendisk */
323 	ret = -ENOMEM;
324 	gd = alloc_disk(1 << tr->part_bits);
325 
326 	if (!gd)
327 		goto error2;
328 
329 	new->disk = gd;
330 	gd->private_data = new;
331 	gd->major = tr->major;
332 	gd->first_minor = (new->devnum) << tr->part_bits;
333 	gd->fops = &mtd_blktrans_ops;
334 
335 	if (tr->part_bits)
336 		if (new->devnum < 26)
337 			snprintf(gd->disk_name, sizeof(gd->disk_name),
338 				 "%s%c", tr->name, 'a' + new->devnum);
339 		else
340 			snprintf(gd->disk_name, sizeof(gd->disk_name),
341 				 "%s%c%c", tr->name,
342 				 'a' - 1 + new->devnum / 26,
343 				 'a' + new->devnum % 26);
344 	else
345 		snprintf(gd->disk_name, sizeof(gd->disk_name),
346 			 "%s%d", tr->name, new->devnum);
347 
348 	set_capacity(gd, (new->size * tr->blksize) >> 9);
349 
350 	/* Create the request queue */
351 	spin_lock_init(&new->queue_lock);
352 	new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
353 
354 	if (!new->rq)
355 		goto error3;
356 
357 	new->rq->queuedata = new;
358 	blk_queue_logical_block_size(new->rq, tr->blksize);
359 
360 	if (tr->discard)
361 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
362 					new->rq);
363 
364 	gd->queue = new->rq;
365 
366 	__get_mtd_device(new->mtd);
367 	__module_get(tr->owner);
368 
369 	/* Create processing thread */
370 	/* TODO: workqueue ? */
371 	new->thread = kthread_run(mtd_blktrans_thread, new,
372 			"%s%d", tr->name, new->mtd->index);
373 	if (IS_ERR(new->thread)) {
374 		ret = PTR_ERR(new->thread);
375 		goto error4;
376 	}
377 	gd->driverfs_dev = &new->mtd->dev;
378 
379 	if (new->readonly)
380 		set_disk_ro(gd, 1);
381 
382 	add_disk(gd);
383 
384 	if (new->disk_attributes) {
385 		ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
386 					new->disk_attributes);
387 		WARN_ON(ret);
388 	}
389 	return 0;
390 error4:
391 	module_put(tr->owner);
392 	__put_mtd_device(new->mtd);
393 	blk_cleanup_queue(new->rq);
394 error3:
395 	put_disk(new->disk);
396 error2:
397 	list_del(&new->list);
398 error1:
399 	kfree(new);
400 	return ret;
401 }
402 
403 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
404 {
405 	unsigned long flags;
406 
407 	if (mutex_trylock(&mtd_table_mutex)) {
408 		mutex_unlock(&mtd_table_mutex);
409 		BUG();
410 	}
411 
412 	/* Stop new requests to arrive */
413 	del_gendisk(old->disk);
414 
415 	if (old->disk_attributes)
416 		sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
417 						old->disk_attributes);
418 
419 	/* Stop the thread */
420 	kthread_stop(old->thread);
421 
422 	/* Kill current requests */
423 	spin_lock_irqsave(&old->queue_lock, flags);
424 	old->rq->queuedata = NULL;
425 	blk_start_queue(old->rq);
426 	spin_unlock_irqrestore(&old->queue_lock, flags);
427 
428 	/* Ask trans driver for release to the mtd device */
429 	mutex_lock(&old->lock);
430 	if (old->open && old->tr->release) {
431 		old->tr->release(old);
432 		old->open = 0;
433 	}
434 
435 	__put_mtd_device(old->mtd);
436 	module_put(old->tr->owner);
437 
438 	/* At that point, we don't touch the mtd anymore */
439 	old->mtd = NULL;
440 
441 	mutex_unlock(&old->lock);
442 	blktrans_dev_put(old);
443 	return 0;
444 }
445 
446 static void blktrans_notify_remove(struct mtd_info *mtd)
447 {
448 	struct mtd_blktrans_ops *tr;
449 	struct mtd_blktrans_dev *dev, *next;
450 
451 	list_for_each_entry(tr, &blktrans_majors, list)
452 		list_for_each_entry_safe(dev, next, &tr->devs, list)
453 			if (dev->mtd == mtd)
454 				tr->remove_dev(dev);
455 }
456 
457 static void blktrans_notify_add(struct mtd_info *mtd)
458 {
459 	struct mtd_blktrans_ops *tr;
460 
461 	if (mtd->type == MTD_ABSENT)
462 		return;
463 
464 	list_for_each_entry(tr, &blktrans_majors, list)
465 		tr->add_mtd(tr, mtd);
466 }
467 
468 static struct mtd_notifier blktrans_notifier = {
469 	.add = blktrans_notify_add,
470 	.remove = blktrans_notify_remove,
471 };
472 
473 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
474 {
475 	struct mtd_info *mtd;
476 	int ret;
477 
478 	/* Register the notifier if/when the first device type is
479 	   registered, to prevent the link/init ordering from fucking
480 	   us over. */
481 	if (!blktrans_notifier.list.next)
482 		register_mtd_user(&blktrans_notifier);
483 
484 
485 	mutex_lock(&mtd_table_mutex);
486 
487 	ret = register_blkdev(tr->major, tr->name);
488 	if (ret) {
489 		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
490 		       tr->name, tr->major, ret);
491 		mutex_unlock(&mtd_table_mutex);
492 		return ret;
493 	}
494 
495 	tr->blkshift = ffs(tr->blksize) - 1;
496 
497 	INIT_LIST_HEAD(&tr->devs);
498 	list_add(&tr->list, &blktrans_majors);
499 
500 	mtd_for_each_device(mtd)
501 		if (mtd->type != MTD_ABSENT)
502 			tr->add_mtd(tr, mtd);
503 
504 	mutex_unlock(&mtd_table_mutex);
505 	return 0;
506 }
507 
508 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
509 {
510 	struct mtd_blktrans_dev *dev, *next;
511 
512 	mutex_lock(&mtd_table_mutex);
513 
514 	/* Remove it from the list of active majors */
515 	list_del(&tr->list);
516 
517 	list_for_each_entry_safe(dev, next, &tr->devs, list)
518 		tr->remove_dev(dev);
519 
520 	unregister_blkdev(tr->major, tr->name);
521 	mutex_unlock(&mtd_table_mutex);
522 
523 	BUG_ON(!list_empty(&tr->devs));
524 	return 0;
525 }
526 
527 static void __exit mtd_blktrans_exit(void)
528 {
529 	/* No race here -- if someone's currently in register_mtd_blktrans
530 	   we're screwed anyway. */
531 	if (blktrans_notifier.list.next)
532 		unregister_mtd_user(&blktrans_notifier);
533 }
534 
535 module_exit(mtd_blktrans_exit);
536 
537 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
538 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
539 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
540 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
541 
542 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
543 MODULE_LICENSE("GPL");
544 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
545