xref: /linux/drivers/mtd/mtdpstore.c (revision b8265621f4888af9494e1d685620871ec81bc33d)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define dev_fmt(fmt) "mtdoops-pstore: " fmt
4 
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/pstore_blk.h>
8 #include <linux/mtd/mtd.h>
9 #include <linux/bitops.h>
10 
11 static struct mtdpstore_context {
12 	int index;
13 	struct pstore_blk_config info;
14 	struct pstore_device_info dev;
15 	struct mtd_info *mtd;
16 	unsigned long *rmmap;		/* removed bit map */
17 	unsigned long *usedmap;		/* used bit map */
18 	/*
19 	 * used for panic write
20 	 * As there are no block_isbad for panic case, we should keep this
21 	 * status before panic to ensure panic_write not failed.
22 	 */
23 	unsigned long *badmap;		/* bad block bit map */
24 } oops_cxt;
25 
26 static int mtdpstore_block_isbad(struct mtdpstore_context *cxt, loff_t off)
27 {
28 	int ret;
29 	struct mtd_info *mtd = cxt->mtd;
30 	u64 blknum;
31 
32 	off = ALIGN_DOWN(off, mtd->erasesize);
33 	blknum = div_u64(off, mtd->erasesize);
34 
35 	if (test_bit(blknum, cxt->badmap))
36 		return true;
37 	ret = mtd_block_isbad(mtd, off);
38 	if (ret < 0) {
39 		dev_err(&mtd->dev, "mtd_block_isbad failed, aborting\n");
40 		return ret;
41 	} else if (ret > 0) {
42 		set_bit(blknum, cxt->badmap);
43 		return true;
44 	}
45 	return false;
46 }
47 
48 static inline int mtdpstore_panic_block_isbad(struct mtdpstore_context *cxt,
49 		loff_t off)
50 {
51 	struct mtd_info *mtd = cxt->mtd;
52 	u64 blknum;
53 
54 	off = ALIGN_DOWN(off, mtd->erasesize);
55 	blknum = div_u64(off, mtd->erasesize);
56 	return test_bit(blknum, cxt->badmap);
57 }
58 
59 static inline void mtdpstore_mark_used(struct mtdpstore_context *cxt,
60 		loff_t off)
61 {
62 	struct mtd_info *mtd = cxt->mtd;
63 	u64 zonenum = div_u64(off, cxt->info.kmsg_size);
64 
65 	dev_dbg(&mtd->dev, "mark zone %llu used\n", zonenum);
66 	set_bit(zonenum, cxt->usedmap);
67 }
68 
69 static inline void mtdpstore_mark_unused(struct mtdpstore_context *cxt,
70 		loff_t off)
71 {
72 	struct mtd_info *mtd = cxt->mtd;
73 	u64 zonenum = div_u64(off, cxt->info.kmsg_size);
74 
75 	dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
76 	clear_bit(zonenum, cxt->usedmap);
77 }
78 
79 static inline void mtdpstore_block_mark_unused(struct mtdpstore_context *cxt,
80 		loff_t off)
81 {
82 	struct mtd_info *mtd = cxt->mtd;
83 	u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
84 	u64 zonenum;
85 
86 	off = ALIGN_DOWN(off, mtd->erasesize);
87 	zonenum = div_u64(off, cxt->info.kmsg_size);
88 	while (zonecnt > 0) {
89 		dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
90 		clear_bit(zonenum, cxt->usedmap);
91 		zonenum++;
92 		zonecnt--;
93 	}
94 }
95 
96 static inline int mtdpstore_is_used(struct mtdpstore_context *cxt, loff_t off)
97 {
98 	u64 zonenum = div_u64(off, cxt->info.kmsg_size);
99 	u64 blknum = div_u64(off, cxt->mtd->erasesize);
100 
101 	if (test_bit(blknum, cxt->badmap))
102 		return true;
103 	return test_bit(zonenum, cxt->usedmap);
104 }
105 
106 static int mtdpstore_block_is_used(struct mtdpstore_context *cxt,
107 		loff_t off)
108 {
109 	struct mtd_info *mtd = cxt->mtd;
110 	u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
111 	u64 zonenum;
112 
113 	off = ALIGN_DOWN(off, mtd->erasesize);
114 	zonenum = div_u64(off, cxt->info.kmsg_size);
115 	while (zonecnt > 0) {
116 		if (test_bit(zonenum, cxt->usedmap))
117 			return true;
118 		zonenum++;
119 		zonecnt--;
120 	}
121 	return false;
122 }
123 
124 static int mtdpstore_is_empty(struct mtdpstore_context *cxt, char *buf,
125 		size_t size)
126 {
127 	struct mtd_info *mtd = cxt->mtd;
128 	size_t sz;
129 	int i;
130 
131 	sz = min_t(uint32_t, size, mtd->writesize / 4);
132 	for (i = 0; i < sz; i++) {
133 		if (buf[i] != (char)0xFF)
134 			return false;
135 	}
136 	return true;
137 }
138 
139 static void mtdpstore_mark_removed(struct mtdpstore_context *cxt, loff_t off)
140 {
141 	struct mtd_info *mtd = cxt->mtd;
142 	u64 zonenum = div_u64(off, cxt->info.kmsg_size);
143 
144 	dev_dbg(&mtd->dev, "mark zone %llu removed\n", zonenum);
145 	set_bit(zonenum, cxt->rmmap);
146 }
147 
148 static void mtdpstore_block_clear_removed(struct mtdpstore_context *cxt,
149 		loff_t off)
150 {
151 	struct mtd_info *mtd = cxt->mtd;
152 	u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
153 	u64 zonenum;
154 
155 	off = ALIGN_DOWN(off, mtd->erasesize);
156 	zonenum = div_u64(off, cxt->info.kmsg_size);
157 	while (zonecnt > 0) {
158 		clear_bit(zonenum, cxt->rmmap);
159 		zonenum++;
160 		zonecnt--;
161 	}
162 }
163 
164 static int mtdpstore_block_is_removed(struct mtdpstore_context *cxt,
165 		loff_t off)
166 {
167 	struct mtd_info *mtd = cxt->mtd;
168 	u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
169 	u64 zonenum;
170 
171 	off = ALIGN_DOWN(off, mtd->erasesize);
172 	zonenum = div_u64(off, cxt->info.kmsg_size);
173 	while (zonecnt > 0) {
174 		if (test_bit(zonenum, cxt->rmmap))
175 			return true;
176 		zonenum++;
177 		zonecnt--;
178 	}
179 	return false;
180 }
181 
182 static int mtdpstore_erase_do(struct mtdpstore_context *cxt, loff_t off)
183 {
184 	struct mtd_info *mtd = cxt->mtd;
185 	struct erase_info erase;
186 	int ret;
187 
188 	off = ALIGN_DOWN(off, cxt->mtd->erasesize);
189 	dev_dbg(&mtd->dev, "try to erase off 0x%llx\n", off);
190 	erase.len = cxt->mtd->erasesize;
191 	erase.addr = off;
192 	ret = mtd_erase(cxt->mtd, &erase);
193 	if (!ret)
194 		mtdpstore_block_clear_removed(cxt, off);
195 	else
196 		dev_err(&mtd->dev, "erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
197 		       (unsigned long long)erase.addr,
198 		       (unsigned long long)erase.len, cxt->info.device);
199 	return ret;
200 }
201 
202 /*
203  * called while removing file
204  *
205  * Avoiding over erasing, do erase block only when the whole block is unused.
206  * If the block contains valid log, do erase lazily on flush_removed() when
207  * unregister.
208  */
209 static ssize_t mtdpstore_erase(size_t size, loff_t off)
210 {
211 	struct mtdpstore_context *cxt = &oops_cxt;
212 
213 	if (mtdpstore_block_isbad(cxt, off))
214 		return -EIO;
215 
216 	mtdpstore_mark_unused(cxt, off);
217 
218 	/* If the block still has valid data, mtdpstore do erase lazily */
219 	if (likely(mtdpstore_block_is_used(cxt, off))) {
220 		mtdpstore_mark_removed(cxt, off);
221 		return 0;
222 	}
223 
224 	/* all zones are unused, erase it */
225 	return mtdpstore_erase_do(cxt, off);
226 }
227 
228 /*
229  * What is security for mtdpstore?
230  * As there is no erase for panic case, we should ensure at least one zone
231  * is writable. Otherwise, panic write will fail.
232  * If zone is used, write operation will return -ENOMSG, which means that
233  * pstore/blk will try one by one until gets an empty zone. So, it is not
234  * needed to ensure the next zone is empty, but at least one.
235  */
236 static int mtdpstore_security(struct mtdpstore_context *cxt, loff_t off)
237 {
238 	int ret = 0, i;
239 	struct mtd_info *mtd = cxt->mtd;
240 	u32 zonenum = (u32)div_u64(off, cxt->info.kmsg_size);
241 	u32 zonecnt = (u32)div_u64(cxt->mtd->size, cxt->info.kmsg_size);
242 	u32 blkcnt = (u32)div_u64(cxt->mtd->size, cxt->mtd->erasesize);
243 	u32 erasesize = cxt->mtd->erasesize;
244 
245 	for (i = 0; i < zonecnt; i++) {
246 		u32 num = (zonenum + i) % zonecnt;
247 
248 		/* found empty zone */
249 		if (!test_bit(num, cxt->usedmap))
250 			return 0;
251 	}
252 
253 	/* If there is no any empty zone, we have no way but to do erase */
254 	while (blkcnt--) {
255 		div64_u64_rem(off + erasesize, cxt->mtd->size, (u64 *)&off);
256 
257 		if (mtdpstore_block_isbad(cxt, off))
258 			continue;
259 
260 		ret = mtdpstore_erase_do(cxt, off);
261 		if (!ret) {
262 			mtdpstore_block_mark_unused(cxt, off);
263 			break;
264 		}
265 	}
266 
267 	if (ret)
268 		dev_err(&mtd->dev, "all blocks bad!\n");
269 	dev_dbg(&mtd->dev, "end security\n");
270 	return ret;
271 }
272 
273 static ssize_t mtdpstore_write(const char *buf, size_t size, loff_t off)
274 {
275 	struct mtdpstore_context *cxt = &oops_cxt;
276 	struct mtd_info *mtd = cxt->mtd;
277 	size_t retlen;
278 	int ret;
279 
280 	if (mtdpstore_block_isbad(cxt, off))
281 		return -ENOMSG;
282 
283 	/* zone is used, please try next one */
284 	if (mtdpstore_is_used(cxt, off))
285 		return -ENOMSG;
286 
287 	dev_dbg(&mtd->dev, "try to write off 0x%llx size %zu\n", off, size);
288 	ret = mtd_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
289 	if (ret < 0 || retlen != size) {
290 		dev_err(&mtd->dev, "write failure at %lld (%zu of %zu written), err %d\n",
291 				off, retlen, size, ret);
292 		return -EIO;
293 	}
294 	mtdpstore_mark_used(cxt, off);
295 
296 	mtdpstore_security(cxt, off);
297 	return retlen;
298 }
299 
300 static inline bool mtdpstore_is_io_error(int ret)
301 {
302 	return ret < 0 && !mtd_is_bitflip(ret) && !mtd_is_eccerr(ret);
303 }
304 
305 /*
306  * All zones will be read as pstore/blk will read zone one by one when do
307  * recover.
308  */
309 static ssize_t mtdpstore_read(char *buf, size_t size, loff_t off)
310 {
311 	struct mtdpstore_context *cxt = &oops_cxt;
312 	struct mtd_info *mtd = cxt->mtd;
313 	size_t retlen, done;
314 	int ret;
315 
316 	if (mtdpstore_block_isbad(cxt, off))
317 		return -ENOMSG;
318 
319 	dev_dbg(&mtd->dev, "try to read off 0x%llx size %zu\n", off, size);
320 	for (done = 0, retlen = 0; done < size; done += retlen) {
321 		retlen = 0;
322 
323 		ret = mtd_read(cxt->mtd, off + done, size - done, &retlen,
324 				(u_char *)buf + done);
325 		if (mtdpstore_is_io_error(ret)) {
326 			dev_err(&mtd->dev, "read failure at %lld (%zu of %zu read), err %d\n",
327 					off + done, retlen, size - done, ret);
328 			/* the zone may be broken, try next one */
329 			return -ENOMSG;
330 		}
331 
332 		/*
333 		 * ECC error. The impact on log data is so small. Maybe we can
334 		 * still read it and try to understand. So mtdpstore just hands
335 		 * over what it gets and user can judge whether the data is
336 		 * valid or not.
337 		 */
338 		if (mtd_is_eccerr(ret)) {
339 			dev_err(&mtd->dev, "ecc error at %lld (%zu of %zu read), err %d\n",
340 					off + done, retlen, size - done, ret);
341 			/* driver may not set retlen when ecc error */
342 			retlen = retlen == 0 ? size - done : retlen;
343 		}
344 	}
345 
346 	if (mtdpstore_is_empty(cxt, buf, size))
347 		mtdpstore_mark_unused(cxt, off);
348 	else
349 		mtdpstore_mark_used(cxt, off);
350 
351 	mtdpstore_security(cxt, off);
352 	return retlen;
353 }
354 
355 static ssize_t mtdpstore_panic_write(const char *buf, size_t size, loff_t off)
356 {
357 	struct mtdpstore_context *cxt = &oops_cxt;
358 	struct mtd_info *mtd = cxt->mtd;
359 	size_t retlen;
360 	int ret;
361 
362 	if (mtdpstore_panic_block_isbad(cxt, off))
363 		return -ENOMSG;
364 
365 	/* zone is used, please try next one */
366 	if (mtdpstore_is_used(cxt, off))
367 		return -ENOMSG;
368 
369 	ret = mtd_panic_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
370 	if (ret < 0 || size != retlen) {
371 		dev_err(&mtd->dev, "panic write failure at %lld (%zu of %zu read), err %d\n",
372 				off, retlen, size, ret);
373 		return -EIO;
374 	}
375 	mtdpstore_mark_used(cxt, off);
376 
377 	return retlen;
378 }
379 
380 static void mtdpstore_notify_add(struct mtd_info *mtd)
381 {
382 	int ret;
383 	struct mtdpstore_context *cxt = &oops_cxt;
384 	struct pstore_blk_config *info = &cxt->info;
385 	unsigned long longcnt;
386 
387 	if (!strcmp(mtd->name, info->device))
388 		cxt->index = mtd->index;
389 
390 	if (mtd->index != cxt->index || cxt->index < 0)
391 		return;
392 
393 	dev_dbg(&mtd->dev, "found matching MTD device %s\n", mtd->name);
394 
395 	if (mtd->size < info->kmsg_size * 2) {
396 		dev_err(&mtd->dev, "MTD partition %d not big enough\n",
397 				mtd->index);
398 		return;
399 	}
400 	/*
401 	 * kmsg_size must be aligned to 4096 Bytes, which is limited by
402 	 * psblk. The default value of kmsg_size is 64KB. If kmsg_size
403 	 * is larger than erasesize, some errors will occur since mtdpsotre
404 	 * is designed on it.
405 	 */
406 	if (mtd->erasesize < info->kmsg_size) {
407 		dev_err(&mtd->dev, "eraseblock size of MTD partition %d too small\n",
408 				mtd->index);
409 		return;
410 	}
411 	if (unlikely(info->kmsg_size % mtd->writesize)) {
412 		dev_err(&mtd->dev, "record size %lu KB must align to write size %d KB\n",
413 				info->kmsg_size / 1024,
414 				mtd->writesize / 1024);
415 		return;
416 	}
417 
418 	longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
419 	cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
420 	cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
421 
422 	longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
423 	cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
424 
425 	cxt->dev.total_size = mtd->size;
426 	/* just support dmesg right now */
427 	cxt->dev.flags = PSTORE_FLAGS_DMESG;
428 	cxt->dev.read = mtdpstore_read;
429 	cxt->dev.write = mtdpstore_write;
430 	cxt->dev.erase = mtdpstore_erase;
431 	cxt->dev.panic_write = mtdpstore_panic_write;
432 
433 	ret = register_pstore_device(&cxt->dev);
434 	if (ret) {
435 		dev_err(&mtd->dev, "mtd%d register to psblk failed\n",
436 				mtd->index);
437 		return;
438 	}
439 	cxt->mtd = mtd;
440 	dev_info(&mtd->dev, "Attached to MTD device %d\n", mtd->index);
441 }
442 
443 static int mtdpstore_flush_removed_do(struct mtdpstore_context *cxt,
444 		loff_t off, size_t size)
445 {
446 	struct mtd_info *mtd = cxt->mtd;
447 	u_char *buf;
448 	int ret;
449 	size_t retlen;
450 	struct erase_info erase;
451 
452 	buf = kmalloc(mtd->erasesize, GFP_KERNEL);
453 	if (!buf)
454 		return -ENOMEM;
455 
456 	/* 1st. read to cache */
457 	ret = mtd_read(mtd, off, mtd->erasesize, &retlen, buf);
458 	if (mtdpstore_is_io_error(ret))
459 		goto free;
460 
461 	/* 2nd. erase block */
462 	erase.len = mtd->erasesize;
463 	erase.addr = off;
464 	ret = mtd_erase(mtd, &erase);
465 	if (ret)
466 		goto free;
467 
468 	/* 3rd. write back */
469 	while (size) {
470 		unsigned int zonesize = cxt->info.kmsg_size;
471 
472 		/* there is valid data on block, write back */
473 		if (mtdpstore_is_used(cxt, off)) {
474 			ret = mtd_write(mtd, off, zonesize, &retlen, buf);
475 			if (ret)
476 				dev_err(&mtd->dev, "write failure at %lld (%zu of %u written), err %d\n",
477 						off, retlen, zonesize, ret);
478 		}
479 
480 		off += zonesize;
481 		size -= min_t(unsigned int, zonesize, size);
482 	}
483 
484 free:
485 	kfree(buf);
486 	return ret;
487 }
488 
489 /*
490  * What does mtdpstore_flush_removed() do?
491  * When user remove any log file on pstore filesystem, mtdpstore should do
492  * something to ensure log file removed. If the whole block is no longer used,
493  * it's nice to erase the block. However if the block still contains valid log,
494  * what mtdpstore can do is to erase and write the valid log back.
495  */
496 static int mtdpstore_flush_removed(struct mtdpstore_context *cxt)
497 {
498 	struct mtd_info *mtd = cxt->mtd;
499 	int ret;
500 	loff_t off;
501 	u32 blkcnt = (u32)div_u64(mtd->size, mtd->erasesize);
502 
503 	for (off = 0; blkcnt > 0; blkcnt--, off += mtd->erasesize) {
504 		ret = mtdpstore_block_isbad(cxt, off);
505 		if (ret)
506 			continue;
507 
508 		ret = mtdpstore_block_is_removed(cxt, off);
509 		if (!ret)
510 			continue;
511 
512 		ret = mtdpstore_flush_removed_do(cxt, off, mtd->erasesize);
513 		if (ret)
514 			return ret;
515 	}
516 	return 0;
517 }
518 
519 static void mtdpstore_notify_remove(struct mtd_info *mtd)
520 {
521 	struct mtdpstore_context *cxt = &oops_cxt;
522 
523 	if (mtd->index != cxt->index || cxt->index < 0)
524 		return;
525 
526 	mtdpstore_flush_removed(cxt);
527 
528 	unregister_pstore_device(&cxt->dev);
529 	kfree(cxt->badmap);
530 	kfree(cxt->usedmap);
531 	kfree(cxt->rmmap);
532 	cxt->mtd = NULL;
533 	cxt->index = -1;
534 }
535 
536 static struct mtd_notifier mtdpstore_notifier = {
537 	.add	= mtdpstore_notify_add,
538 	.remove	= mtdpstore_notify_remove,
539 };
540 
541 static int __init mtdpstore_init(void)
542 {
543 	int ret;
544 	struct mtdpstore_context *cxt = &oops_cxt;
545 	struct pstore_blk_config *info = &cxt->info;
546 
547 	ret = pstore_blk_get_config(info);
548 	if (unlikely(ret))
549 		return ret;
550 
551 	if (strlen(info->device) == 0) {
552 		pr_err("mtd device must be supplied (device name is empty)\n");
553 		return -EINVAL;
554 	}
555 	if (!info->kmsg_size) {
556 		pr_err("no backend enabled (kmsg_size is 0)\n");
557 		return -EINVAL;
558 	}
559 
560 	/* Setup the MTD device to use */
561 	ret = kstrtoint((char *)info->device, 0, &cxt->index);
562 	if (ret)
563 		cxt->index = -1;
564 
565 	register_mtd_user(&mtdpstore_notifier);
566 	return 0;
567 }
568 module_init(mtdpstore_init);
569 
570 static void __exit mtdpstore_exit(void)
571 {
572 	unregister_mtd_user(&mtdpstore_notifier);
573 }
574 module_exit(mtdpstore_exit);
575 
576 MODULE_LICENSE("GPL");
577 MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
578 MODULE_DESCRIPTION("MTD backend for pstore/blk");
579