xref: /linux/drivers/mtd/mtd_blkdevs.c (revision ba6ec09911b805778a2fed6d626bfe77b011a717)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Interface to Linux block layer for MTD 'translation layers'.
4   *
5   * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
6   */
7  
8  #include <linux/kernel.h>
9  #include <linux/slab.h>
10  #include <linux/module.h>
11  #include <linux/list.h>
12  #include <linux/fs.h>
13  #include <linux/mtd/blktrans.h>
14  #include <linux/mtd/mtd.h>
15  #include <linux/blkdev.h>
16  #include <linux/blk-mq.h>
17  #include <linux/blkpg.h>
18  #include <linux/spinlock.h>
19  #include <linux/hdreg.h>
20  #include <linux/mutex.h>
21  #include <linux/uaccess.h>
22  
23  #include "mtdcore.h"
24  
25  static LIST_HEAD(blktrans_majors);
26  
blktrans_dev_release(struct kref * kref)27  static void blktrans_dev_release(struct kref *kref)
28  {
29  	struct mtd_blktrans_dev *dev =
30  		container_of(kref, struct mtd_blktrans_dev, ref);
31  
32  	put_disk(dev->disk);
33  	blk_mq_free_tag_set(dev->tag_set);
34  	kfree(dev->tag_set);
35  	list_del(&dev->list);
36  	kfree(dev);
37  }
38  
blktrans_dev_put(struct mtd_blktrans_dev * dev)39  static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
40  {
41  	kref_put(&dev->ref, blktrans_dev_release);
42  }
43  
44  
do_blktrans_request(struct mtd_blktrans_ops * tr,struct mtd_blktrans_dev * dev,struct request * req)45  static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
46  			       struct mtd_blktrans_dev *dev,
47  			       struct request *req)
48  {
49  	struct req_iterator iter;
50  	struct bio_vec bvec;
51  	unsigned long block, nsect;
52  	char *buf;
53  
54  	block = blk_rq_pos(req) << 9 >> tr->blkshift;
55  	nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
56  
57  	switch (req_op(req)) {
58  	case REQ_OP_FLUSH:
59  		if (tr->flush(dev))
60  			return BLK_STS_IOERR;
61  		return BLK_STS_OK;
62  	case REQ_OP_DISCARD:
63  		if (tr->discard(dev, block, nsect))
64  			return BLK_STS_IOERR;
65  		return BLK_STS_OK;
66  	case REQ_OP_READ:
67  		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
68  		for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
69  			if (tr->readsect(dev, block, buf)) {
70  				kunmap(bio_page(req->bio));
71  				return BLK_STS_IOERR;
72  			}
73  		}
74  		kunmap(bio_page(req->bio));
75  
76  		rq_for_each_segment(bvec, req, iter)
77  			flush_dcache_page(bvec.bv_page);
78  		return BLK_STS_OK;
79  	case REQ_OP_WRITE:
80  		if (!tr->writesect)
81  			return BLK_STS_IOERR;
82  
83  		rq_for_each_segment(bvec, req, iter)
84  			flush_dcache_page(bvec.bv_page);
85  
86  		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
87  		for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
88  			if (tr->writesect(dev, block, buf)) {
89  				kunmap(bio_page(req->bio));
90  				return BLK_STS_IOERR;
91  			}
92  		}
93  		kunmap(bio_page(req->bio));
94  		return BLK_STS_OK;
95  	default:
96  		return BLK_STS_IOERR;
97  	}
98  }
99  
mtd_blktrans_cease_background(struct mtd_blktrans_dev * dev)100  int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
101  {
102  	return dev->bg_stop;
103  }
104  EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
105  
mtd_next_request(struct mtd_blktrans_dev * dev)106  static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
107  {
108  	struct request *rq;
109  
110  	rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
111  	if (rq) {
112  		list_del_init(&rq->queuelist);
113  		blk_mq_start_request(rq);
114  		return rq;
115  	}
116  
117  	return NULL;
118  }
119  
mtd_blktrans_work(struct mtd_blktrans_dev * dev)120  static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
121  	__releases(&dev->queue_lock)
122  	__acquires(&dev->queue_lock)
123  {
124  	struct mtd_blktrans_ops *tr = dev->tr;
125  	struct request *req = NULL;
126  	int background_done = 0;
127  
128  	while (1) {
129  		blk_status_t res;
130  
131  		dev->bg_stop = false;
132  		if (!req && !(req = mtd_next_request(dev))) {
133  			if (tr->background && !background_done) {
134  				spin_unlock_irq(&dev->queue_lock);
135  				mutex_lock(&dev->lock);
136  				tr->background(dev);
137  				mutex_unlock(&dev->lock);
138  				spin_lock_irq(&dev->queue_lock);
139  				/*
140  				 * Do background processing just once per idle
141  				 * period.
142  				 */
143  				background_done = !dev->bg_stop;
144  				continue;
145  			}
146  			break;
147  		}
148  
149  		spin_unlock_irq(&dev->queue_lock);
150  
151  		mutex_lock(&dev->lock);
152  		res = do_blktrans_request(dev->tr, dev, req);
153  		mutex_unlock(&dev->lock);
154  
155  		if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
156  			__blk_mq_end_request(req, res);
157  			req = NULL;
158  		}
159  
160  		background_done = 0;
161  		cond_resched();
162  		spin_lock_irq(&dev->queue_lock);
163  	}
164  }
165  
mtd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)166  static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
167  				 const struct blk_mq_queue_data *bd)
168  {
169  	struct mtd_blktrans_dev *dev;
170  
171  	dev = hctx->queue->queuedata;
172  	if (!dev) {
173  		blk_mq_start_request(bd->rq);
174  		return BLK_STS_IOERR;
175  	}
176  
177  	spin_lock_irq(&dev->queue_lock);
178  	list_add_tail(&bd->rq->queuelist, &dev->rq_list);
179  	mtd_blktrans_work(dev);
180  	spin_unlock_irq(&dev->queue_lock);
181  
182  	return BLK_STS_OK;
183  }
184  
blktrans_open(struct gendisk * disk,blk_mode_t mode)185  static int blktrans_open(struct gendisk *disk, blk_mode_t mode)
186  {
187  	struct mtd_blktrans_dev *dev = disk->private_data;
188  	int ret = 0;
189  
190  	kref_get(&dev->ref);
191  
192  	mutex_lock(&dev->lock);
193  
194  	if (dev->open)
195  		goto unlock;
196  
197  	__module_get(dev->tr->owner);
198  
199  	if (!dev->mtd)
200  		goto unlock;
201  
202  	if (dev->tr->open) {
203  		ret = dev->tr->open(dev);
204  		if (ret)
205  			goto error_put;
206  	}
207  
208  	ret = __get_mtd_device(dev->mtd);
209  	if (ret)
210  		goto error_release;
211  	dev->writable = mode & BLK_OPEN_WRITE;
212  
213  unlock:
214  	dev->open++;
215  	mutex_unlock(&dev->lock);
216  	return ret;
217  
218  error_release:
219  	if (dev->tr->release)
220  		dev->tr->release(dev);
221  error_put:
222  	module_put(dev->tr->owner);
223  	mutex_unlock(&dev->lock);
224  	blktrans_dev_put(dev);
225  	return ret;
226  }
227  
blktrans_release(struct gendisk * disk)228  static void blktrans_release(struct gendisk *disk)
229  {
230  	struct mtd_blktrans_dev *dev = disk->private_data;
231  
232  	mutex_lock(&dev->lock);
233  
234  	if (--dev->open)
235  		goto unlock;
236  
237  	module_put(dev->tr->owner);
238  
239  	if (dev->mtd) {
240  		if (dev->tr->release)
241  			dev->tr->release(dev);
242  		__put_mtd_device(dev->mtd);
243  	}
244  unlock:
245  	mutex_unlock(&dev->lock);
246  	blktrans_dev_put(dev);
247  }
248  
blktrans_getgeo(struct block_device * bdev,struct hd_geometry * geo)249  static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
250  {
251  	struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
252  	int ret = -ENXIO;
253  
254  	mutex_lock(&dev->lock);
255  
256  	if (!dev->mtd)
257  		goto unlock;
258  
259  	ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
260  unlock:
261  	mutex_unlock(&dev->lock);
262  	return ret;
263  }
264  
265  static const struct block_device_operations mtd_block_ops = {
266  	.owner		= THIS_MODULE,
267  	.open		= blktrans_open,
268  	.release	= blktrans_release,
269  	.getgeo		= blktrans_getgeo,
270  };
271  
272  static const struct blk_mq_ops mtd_mq_ops = {
273  	.queue_rq	= mtd_queue_rq,
274  };
275  
add_mtd_blktrans_dev(struct mtd_blktrans_dev * new)276  int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
277  {
278  	struct mtd_blktrans_ops *tr = new->tr;
279  	struct mtd_blktrans_dev *d;
280  	struct queue_limits lim = { };
281  	int last_devnum = -1;
282  	struct gendisk *gd;
283  	int ret;
284  
285  	lockdep_assert_held(&mtd_table_mutex);
286  
287  	list_for_each_entry(d, &tr->devs, list) {
288  		if (new->devnum == -1) {
289  			/* Use first free number */
290  			if (d->devnum != last_devnum+1) {
291  				/* Found a free devnum. Plug it in here */
292  				new->devnum = last_devnum+1;
293  				list_add_tail(&new->list, &d->list);
294  				goto added;
295  			}
296  		} else if (d->devnum == new->devnum) {
297  			/* Required number taken */
298  			return -EBUSY;
299  		} else if (d->devnum > new->devnum) {
300  			/* Required number was free */
301  			list_add_tail(&new->list, &d->list);
302  			goto added;
303  		}
304  		last_devnum = d->devnum;
305  	}
306  
307  	ret = -EBUSY;
308  	if (new->devnum == -1)
309  		new->devnum = last_devnum+1;
310  
311  	/* Check that the device and any partitions will get valid
312  	 * minor numbers and that the disk naming code below can cope
313  	 * with this number. */
314  	if (new->devnum > (MINORMASK >> tr->part_bits) ||
315  	    (tr->part_bits && new->devnum >= 27 * 26))
316  		return ret;
317  
318  	list_add_tail(&new->list, &tr->devs);
319   added:
320  
321  	mutex_init(&new->lock);
322  	kref_init(&new->ref);
323  	if (!tr->writesect)
324  		new->readonly = 1;
325  
326  	ret = -ENOMEM;
327  	new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
328  	if (!new->tag_set)
329  		goto out_list_del;
330  
331  	ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
332  			BLK_MQ_F_BLOCKING);
333  	if (ret)
334  		goto out_kfree_tag_set;
335  
336  	lim.logical_block_size = tr->blksize;
337  	if (tr->discard)
338  		lim.max_hw_discard_sectors = UINT_MAX;
339  	if (tr->flush)
340  		lim.features |= BLK_FEAT_WRITE_CACHE;
341  
342  	/* Create gendisk */
343  	gd = blk_mq_alloc_disk(new->tag_set, &lim, new);
344  	if (IS_ERR(gd)) {
345  		ret = PTR_ERR(gd);
346  		goto out_free_tag_set;
347  	}
348  
349  	new->disk = gd;
350  	new->rq = new->disk->queue;
351  	gd->private_data = new;
352  	gd->major = tr->major;
353  	gd->first_minor = (new->devnum) << tr->part_bits;
354  	gd->minors = 1 << tr->part_bits;
355  	gd->fops = &mtd_block_ops;
356  
357  	if (tr->part_bits) {
358  		if (new->devnum < 26)
359  			snprintf(gd->disk_name, sizeof(gd->disk_name),
360  				 "%s%c", tr->name, 'a' + new->devnum);
361  		else
362  			snprintf(gd->disk_name, sizeof(gd->disk_name),
363  				 "%s%c%c", tr->name,
364  				 'a' - 1 + new->devnum / 26,
365  				 'a' + new->devnum % 26);
366  	} else {
367  		snprintf(gd->disk_name, sizeof(gd->disk_name),
368  			 "%s%d", tr->name, new->devnum);
369  		gd->flags |= GENHD_FL_NO_PART;
370  	}
371  
372  	set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
373  
374  	/* Create the request queue */
375  	spin_lock_init(&new->queue_lock);
376  	INIT_LIST_HEAD(&new->rq_list);
377  	gd->queue = new->rq;
378  
379  	if (new->readonly)
380  		set_disk_ro(gd, 1);
381  
382  	ret = device_add_disk(&new->mtd->dev, gd, NULL);
383  	if (ret)
384  		goto out_cleanup_disk;
385  
386  	if (new->disk_attributes) {
387  		ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
388  					new->disk_attributes);
389  		WARN_ON(ret);
390  	}
391  	return 0;
392  
393  out_cleanup_disk:
394  	put_disk(new->disk);
395  out_free_tag_set:
396  	blk_mq_free_tag_set(new->tag_set);
397  out_kfree_tag_set:
398  	kfree(new->tag_set);
399  out_list_del:
400  	list_del(&new->list);
401  	return ret;
402  }
403  
del_mtd_blktrans_dev(struct mtd_blktrans_dev * old)404  int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
405  {
406  	unsigned long flags;
407  	unsigned int memflags;
408  
409  	lockdep_assert_held(&mtd_table_mutex);
410  
411  	if (old->disk_attributes)
412  		sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
413  						old->disk_attributes);
414  
415  	/* Stop new requests to arrive */
416  	del_gendisk(old->disk);
417  
418  	/* Kill current requests */
419  	spin_lock_irqsave(&old->queue_lock, flags);
420  	old->rq->queuedata = NULL;
421  	spin_unlock_irqrestore(&old->queue_lock, flags);
422  
423  	/* freeze+quiesce queue to ensure all requests are flushed */
424  	memflags = blk_mq_freeze_queue(old->rq);
425  	blk_mq_quiesce_queue(old->rq);
426  	blk_mq_unquiesce_queue(old->rq);
427  	blk_mq_unfreeze_queue(old->rq, memflags);
428  
429  	/* If the device is currently open, tell trans driver to close it,
430  		then put mtd device, and don't touch it again */
431  	mutex_lock(&old->lock);
432  	if (old->open) {
433  		if (old->tr->release)
434  			old->tr->release(old);
435  		__put_mtd_device(old->mtd);
436  	}
437  
438  	old->mtd = NULL;
439  
440  	mutex_unlock(&old->lock);
441  	blktrans_dev_put(old);
442  	return 0;
443  }
444  
blktrans_notify_remove(struct mtd_info * mtd)445  static void blktrans_notify_remove(struct mtd_info *mtd)
446  {
447  	struct mtd_blktrans_ops *tr;
448  	struct mtd_blktrans_dev *dev, *next;
449  
450  	list_for_each_entry(tr, &blktrans_majors, list)
451  		list_for_each_entry_safe(dev, next, &tr->devs, list)
452  			if (dev->mtd == mtd)
453  				tr->remove_dev(dev);
454  }
455  
blktrans_notify_add(struct mtd_info * mtd)456  static void blktrans_notify_add(struct mtd_info *mtd)
457  {
458  	struct mtd_blktrans_ops *tr;
459  
460  	if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME)
461  		return;
462  
463  	list_for_each_entry(tr, &blktrans_majors, list)
464  		tr->add_mtd(tr, mtd);
465  }
466  
467  static struct mtd_notifier blktrans_notifier = {
468  	.add = blktrans_notify_add,
469  	.remove = blktrans_notify_remove,
470  };
471  
register_mtd_blktrans(struct mtd_blktrans_ops * tr)472  int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
473  {
474  	struct mtd_info *mtd;
475  	int ret;
476  
477  	/* Register the notifier if/when the first device type is
478  	   registered, to prevent the link/init ordering from fucking
479  	   us over. */
480  	if (!blktrans_notifier.list.next)
481  		register_mtd_user(&blktrans_notifier);
482  
483  	ret = register_blkdev(tr->major, tr->name);
484  	if (ret < 0) {
485  		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
486  		       tr->name, tr->major, ret);
487  		return ret;
488  	}
489  
490  	if (ret)
491  		tr->major = ret;
492  
493  	tr->blkshift = ffs(tr->blksize) - 1;
494  
495  	INIT_LIST_HEAD(&tr->devs);
496  
497  	mutex_lock(&mtd_table_mutex);
498  	list_add(&tr->list, &blktrans_majors);
499  	mtd_for_each_device(mtd)
500  		if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME)
501  			tr->add_mtd(tr, mtd);
502  	mutex_unlock(&mtd_table_mutex);
503  	return 0;
504  }
505  
deregister_mtd_blktrans(struct mtd_blktrans_ops * tr)506  int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
507  {
508  	struct mtd_blktrans_dev *dev, *next;
509  
510  	mutex_lock(&mtd_table_mutex);
511  
512  	/* Remove it from the list of active majors */
513  	list_del(&tr->list);
514  
515  	list_for_each_entry_safe(dev, next, &tr->devs, list)
516  		tr->remove_dev(dev);
517  
518  	mutex_unlock(&mtd_table_mutex);
519  	unregister_blkdev(tr->major, tr->name);
520  
521  	BUG_ON(!list_empty(&tr->devs));
522  	return 0;
523  }
524  
mtd_blktrans_exit(void)525  static void __exit mtd_blktrans_exit(void)
526  {
527  	/* No race here -- if someone's currently in register_mtd_blktrans
528  	   we're screwed anyway. */
529  	if (blktrans_notifier.list.next)
530  		unregister_mtd_user(&blktrans_notifier);
531  }
532  
533  module_exit(mtd_blktrans_exit);
534  
535  EXPORT_SYMBOL_GPL(register_mtd_blktrans);
536  EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
537  EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
538  EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
539  
540  MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
541  MODULE_LICENSE("GPL");
542  MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
543