xref: /linux/drivers/mtd/devices/block2mtd.c (revision b7019ac550eb3916f34d79db583e9b7ea2524afa)
1 /*
2  * block2mtd.c - create an mtd from a block device
3  *
4  * Copyright (C) 2001,2002	Simon Evans <spse@secret.org.uk>
5  * Copyright (C) 2004-2006	Joern Engel <joern@wh.fh-wedel.de>
6  *
7  * Licence: GPL
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 /*
13  * When the first attempt at device initialization fails, we may need to
14  * wait a little bit and retry. This timeout, by default 3 seconds, gives
15  * device time to start up. Required on BCM2708 and a few other chipsets.
16  */
17 #define MTD_DEFAULT_TIMEOUT	3
18 
19 #include <linux/module.h>
20 #include <linux/delay.h>
21 #include <linux/fs.h>
22 #include <linux/blkdev.h>
23 #include <linux/backing-dev.h>
24 #include <linux/bio.h>
25 #include <linux/pagemap.h>
26 #include <linux/list.h>
27 #include <linux/init.h>
28 #include <linux/mtd/mtd.h>
29 #include <linux/mutex.h>
30 #include <linux/mount.h>
31 #include <linux/slab.h>
32 #include <linux/major.h>
33 
34 /* Info for the block device */
35 struct block2mtd_dev {
36 	struct list_head list;
37 	struct block_device *blkdev;
38 	struct mtd_info mtd;
39 	struct mutex write_mutex;
40 };
41 
42 
43 /* Static info about the MTD, used in cleanup_module */
44 static LIST_HEAD(blkmtd_device_list);
45 
46 
47 static struct page *page_read(struct address_space *mapping, int index)
48 {
49 	return read_mapping_page(mapping, index, NULL);
50 }
51 
52 /* erase a specified part of the device */
53 static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
54 {
55 	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
56 	struct page *page;
57 	int index = to >> PAGE_SHIFT;	// page index
58 	int pages = len >> PAGE_SHIFT;
59 	u_long *p;
60 	u_long *max;
61 
62 	while (pages) {
63 		page = page_read(mapping, index);
64 		if (IS_ERR(page))
65 			return PTR_ERR(page);
66 
67 		max = page_address(page) + PAGE_SIZE;
68 		for (p=page_address(page); p<max; p++)
69 			if (*p != -1UL) {
70 				lock_page(page);
71 				memset(page_address(page), 0xff, PAGE_SIZE);
72 				set_page_dirty(page);
73 				unlock_page(page);
74 				balance_dirty_pages_ratelimited(mapping);
75 				break;
76 			}
77 
78 		put_page(page);
79 		pages--;
80 		index++;
81 	}
82 	return 0;
83 }
84 static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
85 {
86 	struct block2mtd_dev *dev = mtd->priv;
87 	size_t from = instr->addr;
88 	size_t len = instr->len;
89 	int err;
90 
91 	mutex_lock(&dev->write_mutex);
92 	err = _block2mtd_erase(dev, from, len);
93 	mutex_unlock(&dev->write_mutex);
94 	if (err)
95 		pr_err("erase failed err = %d\n", err);
96 
97 	return err;
98 }
99 
100 
101 static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
102 		size_t *retlen, u_char *buf)
103 {
104 	struct block2mtd_dev *dev = mtd->priv;
105 	struct page *page;
106 	int index = from >> PAGE_SHIFT;
107 	int offset = from & (PAGE_SIZE-1);
108 	int cpylen;
109 
110 	while (len) {
111 		if ((offset + len) > PAGE_SIZE)
112 			cpylen = PAGE_SIZE - offset;	// multiple pages
113 		else
114 			cpylen = len;	// this page
115 		len = len - cpylen;
116 
117 		page = page_read(dev->blkdev->bd_inode->i_mapping, index);
118 		if (IS_ERR(page))
119 			return PTR_ERR(page);
120 
121 		memcpy(buf, page_address(page) + offset, cpylen);
122 		put_page(page);
123 
124 		if (retlen)
125 			*retlen += cpylen;
126 		buf += cpylen;
127 		offset = 0;
128 		index++;
129 	}
130 	return 0;
131 }
132 
133 
134 /* write data to the underlying device */
135 static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
136 		loff_t to, size_t len, size_t *retlen)
137 {
138 	struct page *page;
139 	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
140 	int index = to >> PAGE_SHIFT;	// page index
141 	int offset = to & ~PAGE_MASK;	// page offset
142 	int cpylen;
143 
144 	while (len) {
145 		if ((offset+len) > PAGE_SIZE)
146 			cpylen = PAGE_SIZE - offset;	// multiple pages
147 		else
148 			cpylen = len;			// this page
149 		len = len - cpylen;
150 
151 		page = page_read(mapping, index);
152 		if (IS_ERR(page))
153 			return PTR_ERR(page);
154 
155 		if (memcmp(page_address(page)+offset, buf, cpylen)) {
156 			lock_page(page);
157 			memcpy(page_address(page) + offset, buf, cpylen);
158 			set_page_dirty(page);
159 			unlock_page(page);
160 			balance_dirty_pages_ratelimited(mapping);
161 		}
162 		put_page(page);
163 
164 		if (retlen)
165 			*retlen += cpylen;
166 
167 		buf += cpylen;
168 		offset = 0;
169 		index++;
170 	}
171 	return 0;
172 }
173 
174 
175 static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
176 		size_t *retlen, const u_char *buf)
177 {
178 	struct block2mtd_dev *dev = mtd->priv;
179 	int err;
180 
181 	mutex_lock(&dev->write_mutex);
182 	err = _block2mtd_write(dev, buf, to, len, retlen);
183 	mutex_unlock(&dev->write_mutex);
184 	if (err > 0)
185 		err = 0;
186 	return err;
187 }
188 
189 
190 /* sync the device - wait until the write queue is empty */
191 static void block2mtd_sync(struct mtd_info *mtd)
192 {
193 	struct block2mtd_dev *dev = mtd->priv;
194 	sync_blockdev(dev->blkdev);
195 	return;
196 }
197 
198 
199 static void block2mtd_free_device(struct block2mtd_dev *dev)
200 {
201 	if (!dev)
202 		return;
203 
204 	kfree(dev->mtd.name);
205 
206 	if (dev->blkdev) {
207 		invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
208 					0, -1);
209 		blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
210 	}
211 
212 	kfree(dev);
213 }
214 
215 
216 static struct block2mtd_dev *add_device(char *devname, int erase_size,
217 		int timeout)
218 {
219 #ifndef MODULE
220 	int i;
221 #endif
222 	const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
223 	struct block_device *bdev;
224 	struct block2mtd_dev *dev;
225 	char *name;
226 
227 	if (!devname)
228 		return NULL;
229 
230 	dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
231 	if (!dev)
232 		return NULL;
233 
234 	/* Get a handle on the device */
235 	bdev = blkdev_get_by_path(devname, mode, dev);
236 
237 #ifndef MODULE
238 	/*
239 	 * We might not have the root device mounted at this point.
240 	 * Try to resolve the device name by other means.
241 	 */
242 	for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
243 		dev_t devt;
244 
245 		if (i)
246 			/*
247 			 * Calling wait_for_device_probe in the first loop
248 			 * was not enough, sleep for a bit in subsequent
249 			 * go-arounds.
250 			 */
251 			msleep(1000);
252 		wait_for_device_probe();
253 
254 		devt = name_to_dev_t(devname);
255 		if (!devt)
256 			continue;
257 		bdev = blkdev_get_by_dev(devt, mode, dev);
258 	}
259 #endif
260 
261 	if (IS_ERR(bdev)) {
262 		pr_err("error: cannot open device %s\n", devname);
263 		goto err_free_block2mtd;
264 	}
265 	dev->blkdev = bdev;
266 
267 	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
268 		pr_err("attempting to use an MTD device as a block device\n");
269 		goto err_free_block2mtd;
270 	}
271 
272 	if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
273 		pr_err("erasesize must be a divisor of device size\n");
274 		goto err_free_block2mtd;
275 	}
276 
277 	mutex_init(&dev->write_mutex);
278 
279 	/* Setup the MTD structure */
280 	/* make the name contain the block device in */
281 	name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
282 	if (!name)
283 		goto err_destroy_mutex;
284 
285 	dev->mtd.name = name;
286 
287 	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
288 	dev->mtd.erasesize = erase_size;
289 	dev->mtd.writesize = 1;
290 	dev->mtd.writebufsize = PAGE_SIZE;
291 	dev->mtd.type = MTD_RAM;
292 	dev->mtd.flags = MTD_CAP_RAM;
293 	dev->mtd._erase = block2mtd_erase;
294 	dev->mtd._write = block2mtd_write;
295 	dev->mtd._sync = block2mtd_sync;
296 	dev->mtd._read = block2mtd_read;
297 	dev->mtd.priv = dev;
298 	dev->mtd.owner = THIS_MODULE;
299 
300 	if (mtd_device_register(&dev->mtd, NULL, 0)) {
301 		/* Device didn't get added, so free the entry */
302 		goto err_destroy_mutex;
303 	}
304 
305 	list_add(&dev->list, &blkmtd_device_list);
306 	pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
307 		dev->mtd.index,
308 		dev->mtd.name + strlen("block2mtd: "),
309 		dev->mtd.erasesize >> 10, dev->mtd.erasesize);
310 	return dev;
311 
312 err_destroy_mutex:
313 	mutex_destroy(&dev->write_mutex);
314 err_free_block2mtd:
315 	block2mtd_free_device(dev);
316 	return NULL;
317 }
318 
319 
320 /* This function works similar to reguler strtoul.  In addition, it
321  * allows some suffixes for a more human-readable number format:
322  * ki, Ki, kiB, KiB	- multiply result with 1024
323  * Mi, MiB		- multiply result with 1024^2
324  * Gi, GiB		- multiply result with 1024^3
325  */
326 static int ustrtoul(const char *cp, char **endp, unsigned int base)
327 {
328 	unsigned long result = simple_strtoul(cp, endp, base);
329 	switch (**endp) {
330 	case 'G' :
331 		result *= 1024;
332 		/* fall through */
333 	case 'M':
334 		result *= 1024;
335 		/* fall through */
336 	case 'K':
337 	case 'k':
338 		result *= 1024;
339 	/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
340 		if ((*endp)[1] == 'i') {
341 			if ((*endp)[2] == 'B')
342 				(*endp) += 3;
343 			else
344 				(*endp) += 2;
345 		}
346 	}
347 	return result;
348 }
349 
350 
351 static int parse_num(size_t *num, const char *token)
352 {
353 	char *endp;
354 	size_t n;
355 
356 	n = (size_t) ustrtoul(token, &endp, 0);
357 	if (*endp)
358 		return -EINVAL;
359 
360 	*num = n;
361 	return 0;
362 }
363 
364 
365 static inline void kill_final_newline(char *str)
366 {
367 	char *newline = strrchr(str, '\n');
368 	if (newline && !newline[1])
369 		*newline = 0;
370 }
371 
372 
373 #ifndef MODULE
374 static int block2mtd_init_called = 0;
375 /* 80 for device, 12 for erase size */
376 static char block2mtd_paramline[80 + 12];
377 #endif
378 
379 static int block2mtd_setup2(const char *val)
380 {
381 	/* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
382 	char buf[80 + 12 + 80 + 8];
383 	char *str = buf;
384 	char *token[2];
385 	char *name;
386 	size_t erase_size = PAGE_SIZE;
387 	unsigned long timeout = MTD_DEFAULT_TIMEOUT;
388 	int i, ret;
389 
390 	if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
391 		pr_err("parameter too long\n");
392 		return 0;
393 	}
394 
395 	strcpy(str, val);
396 	kill_final_newline(str);
397 
398 	for (i = 0; i < 2; i++)
399 		token[i] = strsep(&str, ",");
400 
401 	if (str) {
402 		pr_err("too many arguments\n");
403 		return 0;
404 	}
405 
406 	if (!token[0]) {
407 		pr_err("no argument\n");
408 		return 0;
409 	}
410 
411 	name = token[0];
412 	if (strlen(name) + 1 > 80) {
413 		pr_err("device name too long\n");
414 		return 0;
415 	}
416 
417 	if (token[1]) {
418 		ret = parse_num(&erase_size, token[1]);
419 		if (ret) {
420 			pr_err("illegal erase size\n");
421 			return 0;
422 		}
423 	}
424 
425 	add_device(name, erase_size, timeout);
426 
427 	return 0;
428 }
429 
430 
431 static int block2mtd_setup(const char *val, const struct kernel_param *kp)
432 {
433 #ifdef MODULE
434 	return block2mtd_setup2(val);
435 #else
436 	/* If more parameters are later passed in via
437 	   /sys/module/block2mtd/parameters/block2mtd
438 	   and block2mtd_init() has already been called,
439 	   we can parse the argument now. */
440 
441 	if (block2mtd_init_called)
442 		return block2mtd_setup2(val);
443 
444 	/* During early boot stage, we only save the parameters
445 	   here. We must parse them later: if the param passed
446 	   from kernel boot command line, block2mtd_setup() is
447 	   called so early that it is not possible to resolve
448 	   the device (even kmalloc() fails). Deter that work to
449 	   block2mtd_setup2(). */
450 
451 	strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
452 
453 	return 0;
454 #endif
455 }
456 
457 
458 module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
459 MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
460 
461 static int __init block2mtd_init(void)
462 {
463 	int ret = 0;
464 
465 #ifndef MODULE
466 	if (strlen(block2mtd_paramline))
467 		ret = block2mtd_setup2(block2mtd_paramline);
468 	block2mtd_init_called = 1;
469 #endif
470 
471 	return ret;
472 }
473 
474 
475 static void block2mtd_exit(void)
476 {
477 	struct list_head *pos, *next;
478 
479 	/* Remove the MTD devices */
480 	list_for_each_safe(pos, next, &blkmtd_device_list) {
481 		struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
482 		block2mtd_sync(&dev->mtd);
483 		mtd_device_unregister(&dev->mtd);
484 		mutex_destroy(&dev->write_mutex);
485 		pr_info("mtd%d: [%s] removed\n",
486 			dev->mtd.index,
487 			dev->mtd.name + strlen("block2mtd: "));
488 		list_del(&dev->list);
489 		block2mtd_free_device(dev);
490 	}
491 }
492 
493 late_initcall(block2mtd_init);
494 module_exit(block2mtd_exit);
495 
496 MODULE_LICENSE("GPL");
497 MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>");
498 MODULE_DESCRIPTION("Emulate an MTD using a block device");
499