xref: /linux/drivers/mtd/mtdblock.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Direct MTD block device access
4  *
5  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
6  * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net>
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/vmalloc.h>
17 
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/blktrans.h>
20 #include <linux/mutex.h>
21 #include <linux/major.h>
22 
23 
24 struct mtdblk_dev {
25 	struct mtd_blktrans_dev mbd;
26 	int count;
27 	struct mutex cache_mutex;
28 	unsigned char *cache_data;
29 	unsigned long cache_offset;
30 	unsigned int cache_size;
31 	enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
32 };
33 
34 /*
35  * Cache stuff...
36  *
37  * Since typical flash erasable sectors are much larger than what Linux's
38  * buffer cache can handle, we must implement read-modify-write on flash
39  * sectors for each block write requests.  To avoid over-erasing flash sectors
40  * and to speed things up, we locally cache a whole flash sector while it is
41  * being written to until a different sector is required.
42  */
43 
44 static int erase_write (struct mtd_info *mtd, unsigned long pos,
45 			unsigned int len, const char *buf)
46 {
47 	struct erase_info erase;
48 	size_t retlen;
49 	int ret;
50 
51 	/*
52 	 * First, let's erase the flash block.
53 	 */
54 	erase.addr = pos;
55 	erase.len = len;
56 
57 	ret = mtd_erase(mtd, &erase);
58 	if (ret) {
59 		printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
60 				     "on \"%s\" failed\n",
61 			pos, len, mtd->name);
62 		return ret;
63 	}
64 
65 	/*
66 	 * Next, write the data to flash.
67 	 */
68 
69 	ret = mtd_write(mtd, pos, len, &retlen, buf);
70 	if (ret)
71 		return ret;
72 	if (retlen != len)
73 		return -EIO;
74 	return 0;
75 }
76 
77 
78 static int write_cached_data (struct mtdblk_dev *mtdblk)
79 {
80 	struct mtd_info *mtd = mtdblk->mbd.mtd;
81 	int ret;
82 
83 	if (mtdblk->cache_state != STATE_DIRTY)
84 		return 0;
85 
86 	pr_debug("mtdblock: writing cached data for \"%s\" "
87 			"at 0x%lx, size 0x%x\n", mtd->name,
88 			mtdblk->cache_offset, mtdblk->cache_size);
89 
90 	ret = erase_write (mtd, mtdblk->cache_offset,
91 			   mtdblk->cache_size, mtdblk->cache_data);
92 
93 	/*
94 	 * Here we could arguably set the cache state to STATE_CLEAN.
95 	 * However this could lead to inconsistency since we will not
96 	 * be notified if this content is altered on the flash by other
97 	 * means.  Let's declare it empty and leave buffering tasks to
98 	 * the buffer cache instead.
99 	 *
100 	 * If this cache_offset points to a bad block, data cannot be
101 	 * written to the device. Clear cache_state to avoid writing to
102 	 * bad blocks repeatedly.
103 	 */
104 	if (ret == 0 || ret == -EIO)
105 		mtdblk->cache_state = STATE_EMPTY;
106 	return ret;
107 }
108 
109 
110 static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
111 			    int len, const char *buf)
112 {
113 	struct mtd_info *mtd = mtdblk->mbd.mtd;
114 	unsigned int sect_size = mtdblk->cache_size;
115 	size_t retlen;
116 	int ret;
117 
118 	pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
119 		mtd->name, pos, len);
120 
121 	if (!sect_size)
122 		return mtd_write(mtd, pos, len, &retlen, buf);
123 
124 	while (len > 0) {
125 		unsigned long sect_start = (pos/sect_size)*sect_size;
126 		unsigned int offset = pos - sect_start;
127 		unsigned int size = sect_size - offset;
128 		if( size > len )
129 			size = len;
130 
131 		if (size == sect_size) {
132 			/*
133 			 * We are covering a whole sector.  Thus there is no
134 			 * need to bother with the cache while it may still be
135 			 * useful for other partial writes.
136 			 */
137 			ret = erase_write (mtd, pos, size, buf);
138 			if (ret)
139 				return ret;
140 		} else {
141 			/* Partial sector: need to use the cache */
142 
143 			if (mtdblk->cache_state == STATE_DIRTY &&
144 			    mtdblk->cache_offset != sect_start) {
145 				ret = write_cached_data(mtdblk);
146 				if (ret)
147 					return ret;
148 			}
149 
150 			if (mtdblk->cache_state == STATE_EMPTY ||
151 			    mtdblk->cache_offset != sect_start) {
152 				/* fill the cache with the current sector */
153 				mtdblk->cache_state = STATE_EMPTY;
154 				ret = mtd_read(mtd, sect_start, sect_size,
155 					       &retlen, mtdblk->cache_data);
156 				if (ret)
157 					return ret;
158 				if (retlen != sect_size)
159 					return -EIO;
160 
161 				mtdblk->cache_offset = sect_start;
162 				mtdblk->cache_size = sect_size;
163 				mtdblk->cache_state = STATE_CLEAN;
164 			}
165 
166 			/* write data to our local cache */
167 			memcpy (mtdblk->cache_data + offset, buf, size);
168 			mtdblk->cache_state = STATE_DIRTY;
169 		}
170 
171 		buf += size;
172 		pos += size;
173 		len -= size;
174 	}
175 
176 	return 0;
177 }
178 
179 
180 static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
181 			   int len, char *buf)
182 {
183 	struct mtd_info *mtd = mtdblk->mbd.mtd;
184 	unsigned int sect_size = mtdblk->cache_size;
185 	size_t retlen;
186 	int ret;
187 
188 	pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
189 			mtd->name, pos, len);
190 
191 	if (!sect_size)
192 		return mtd_read(mtd, pos, len, &retlen, buf);
193 
194 	while (len > 0) {
195 		unsigned long sect_start = (pos/sect_size)*sect_size;
196 		unsigned int offset = pos - sect_start;
197 		unsigned int size = sect_size - offset;
198 		if (size > len)
199 			size = len;
200 
201 		/*
202 		 * Check if the requested data is already cached
203 		 * Read the requested amount of data from our internal cache if it
204 		 * contains what we want, otherwise we read the data directly
205 		 * from flash.
206 		 */
207 		if (mtdblk->cache_state != STATE_EMPTY &&
208 		    mtdblk->cache_offset == sect_start) {
209 			memcpy (buf, mtdblk->cache_data + offset, size);
210 		} else {
211 			ret = mtd_read(mtd, pos, size, &retlen, buf);
212 			if (ret)
213 				return ret;
214 			if (retlen != size)
215 				return -EIO;
216 		}
217 
218 		buf += size;
219 		pos += size;
220 		len -= size;
221 	}
222 
223 	return 0;
224 }
225 
226 static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
227 			      unsigned long block, char *buf)
228 {
229 	struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
230 	return do_cached_read(mtdblk, block<<9, 512, buf);
231 }
232 
233 static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
234 			      unsigned long block, char *buf)
235 {
236 	struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
237 	if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
238 		mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
239 		if (!mtdblk->cache_data)
240 			return -EINTR;
241 		/* -EINTR is not really correct, but it is the best match
242 		 * documented in man 2 write for all cases.  We could also
243 		 * return -EAGAIN sometimes, but why bother?
244 		 */
245 	}
246 	return do_cached_write(mtdblk, block<<9, 512, buf);
247 }
248 
249 static int mtdblock_open(struct mtd_blktrans_dev *mbd)
250 {
251 	struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
252 
253 	pr_debug("mtdblock_open\n");
254 
255 	if (mtdblk->count) {
256 		mtdblk->count++;
257 		return 0;
258 	}
259 
260 	if (mtd_type_is_nand(mbd->mtd))
261 		pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
262 			mbd->tr->name, mbd->mtd->name);
263 
264 	/* OK, it's not open. Create cache info for it */
265 	mtdblk->count = 1;
266 	mutex_init(&mtdblk->cache_mutex);
267 	mtdblk->cache_state = STATE_EMPTY;
268 	if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
269 		mtdblk->cache_size = mbd->mtd->erasesize;
270 		mtdblk->cache_data = NULL;
271 	}
272 
273 	pr_debug("ok\n");
274 
275 	return 0;
276 }
277 
278 static void mtdblock_release(struct mtd_blktrans_dev *mbd)
279 {
280 	struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
281 
282 	pr_debug("mtdblock_release\n");
283 
284 	mutex_lock(&mtdblk->cache_mutex);
285 	write_cached_data(mtdblk);
286 	mutex_unlock(&mtdblk->cache_mutex);
287 
288 	if (!--mtdblk->count) {
289 		/*
290 		 * It was the last usage. Free the cache, but only sync if
291 		 * opened for writing.
292 		 */
293 		if (mbd->file_mode & FMODE_WRITE)
294 			mtd_sync(mbd->mtd);
295 		vfree(mtdblk->cache_data);
296 	}
297 
298 	pr_debug("ok\n");
299 }
300 
301 static int mtdblock_flush(struct mtd_blktrans_dev *dev)
302 {
303 	struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
304 	int ret;
305 
306 	mutex_lock(&mtdblk->cache_mutex);
307 	ret = write_cached_data(mtdblk);
308 	mutex_unlock(&mtdblk->cache_mutex);
309 	mtd_sync(dev->mtd);
310 	return ret;
311 }
312 
313 static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
314 {
315 	struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
316 
317 	if (!dev)
318 		return;
319 
320 	dev->mbd.mtd = mtd;
321 	dev->mbd.devnum = mtd->index;
322 
323 	dev->mbd.size = mtd->size >> 9;
324 	dev->mbd.tr = tr;
325 
326 	if (!(mtd->flags & MTD_WRITEABLE))
327 		dev->mbd.readonly = 1;
328 
329 	if (add_mtd_blktrans_dev(&dev->mbd))
330 		kfree(dev);
331 }
332 
333 static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
334 {
335 	del_mtd_blktrans_dev(dev);
336 }
337 
338 static struct mtd_blktrans_ops mtdblock_tr = {
339 	.name		= "mtdblock",
340 	.major		= MTD_BLOCK_MAJOR,
341 	.part_bits	= 0,
342 	.blksize 	= 512,
343 	.open		= mtdblock_open,
344 	.flush		= mtdblock_flush,
345 	.release	= mtdblock_release,
346 	.readsect	= mtdblock_readsect,
347 	.writesect	= mtdblock_writesect,
348 	.add_mtd	= mtdblock_add_mtd,
349 	.remove_dev	= mtdblock_remove_dev,
350 	.owner		= THIS_MODULE,
351 };
352 
353 module_mtd_blktrans(mtdblock_tr);
354 
355 MODULE_LICENSE("GPL");
356 MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al.");
357 MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
358