xref: /linux/drivers/mtd/mtdoops.c (revision 6ee738610f41b59733f63718f0bdbcba7d3a3f12)
1 /*
2  * MTD Oops/Panic logger
3  *
4  * Copyright (C) 2007 Nokia Corporation. All rights reserved.
5  *
6  * Author: Richard Purdie <rpurdie@openedhand.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/console.h>
27 #include <linux/vmalloc.h>
28 #include <linux/workqueue.h>
29 #include <linux/sched.h>
30 #include <linux/wait.h>
31 #include <linux/delay.h>
32 #include <linux/spinlock.h>
33 #include <linux/interrupt.h>
34 #include <linux/mtd/mtd.h>
35 
36 #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
37 #define OOPS_PAGE_SIZE 4096
38 
39 static struct mtdoops_context {
40 	int mtd_index;
41 	struct work_struct work_erase;
42 	struct work_struct work_write;
43 	struct mtd_info *mtd;
44 	int oops_pages;
45 	int nextpage;
46 	int nextcount;
47 	char *name;
48 
49 	void *oops_buf;
50 
51 	/* writecount and disabling ready are spin lock protected */
52 	spinlock_t writecount_lock;
53 	int ready;
54 	int writecount;
55 } oops_cxt;
56 
57 static void mtdoops_erase_callback(struct erase_info *done)
58 {
59 	wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
60 	wake_up(wait_q);
61 }
62 
63 static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
64 {
65 	struct erase_info erase;
66 	DECLARE_WAITQUEUE(wait, current);
67 	wait_queue_head_t wait_q;
68 	int ret;
69 
70 	init_waitqueue_head(&wait_q);
71 	erase.mtd = mtd;
72 	erase.callback = mtdoops_erase_callback;
73 	erase.addr = offset;
74 	erase.len = mtd->erasesize;
75 	erase.priv = (u_long)&wait_q;
76 
77 	set_current_state(TASK_INTERRUPTIBLE);
78 	add_wait_queue(&wait_q, &wait);
79 
80 	ret = mtd->erase(mtd, &erase);
81 	if (ret) {
82 		set_current_state(TASK_RUNNING);
83 		remove_wait_queue(&wait_q, &wait);
84 		printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] "
85 				     "on \"%s\" failed\n",
86 			(unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name);
87 		return ret;
88 	}
89 
90 	schedule();  /* Wait for erase to finish. */
91 	remove_wait_queue(&wait_q, &wait);
92 
93 	return 0;
94 }
95 
96 static void mtdoops_inc_counter(struct mtdoops_context *cxt)
97 {
98 	struct mtd_info *mtd = cxt->mtd;
99 	size_t retlen;
100 	u32 count;
101 	int ret;
102 
103 	cxt->nextpage++;
104 	if (cxt->nextpage >= cxt->oops_pages)
105 		cxt->nextpage = 0;
106 	cxt->nextcount++;
107 	if (cxt->nextcount == 0xffffffff)
108 		cxt->nextcount = 0;
109 
110 	ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
111 			&retlen, (u_char *) &count);
112 	if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
113 		printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
114 				", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
115 				retlen, ret);
116 		schedule_work(&cxt->work_erase);
117 		return;
118 	}
119 
120 	/* See if we need to erase the next block */
121 	if (count != 0xffffffff) {
122 		schedule_work(&cxt->work_erase);
123 		return;
124 	}
125 
126 	printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
127 			cxt->nextpage, cxt->nextcount);
128 	cxt->ready = 1;
129 }
130 
131 /* Scheduled work - when we can't proceed without erasing a block */
132 static void mtdoops_workfunc_erase(struct work_struct *work)
133 {
134 	struct mtdoops_context *cxt =
135 			container_of(work, struct mtdoops_context, work_erase);
136 	struct mtd_info *mtd = cxt->mtd;
137 	int i = 0, j, ret, mod;
138 
139 	/* We were unregistered */
140 	if (!mtd)
141 		return;
142 
143 	mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
144 	if (mod != 0) {
145 		cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
146 		if (cxt->nextpage >= cxt->oops_pages)
147 			cxt->nextpage = 0;
148 	}
149 
150 	while (mtd->block_isbad) {
151 		ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
152 		if (!ret)
153 			break;
154 		if (ret < 0) {
155 			printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
156 			return;
157 		}
158 badblock:
159 		printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
160 				cxt->nextpage * OOPS_PAGE_SIZE);
161 		i++;
162 		cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
163 		if (cxt->nextpage >= cxt->oops_pages)
164 			cxt->nextpage = 0;
165 		if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) {
166 			printk(KERN_ERR "mtdoops: All blocks bad!\n");
167 			return;
168 		}
169 	}
170 
171 	for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
172 		ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
173 
174 	if (ret >= 0) {
175 		printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
176 		cxt->ready = 1;
177 		return;
178 	}
179 
180 	if (mtd->block_markbad && (ret == -EIO)) {
181 		ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
182 		if (ret < 0) {
183 			printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
184 			return;
185 		}
186 	}
187 	goto badblock;
188 }
189 
190 static void mtdoops_write(struct mtdoops_context *cxt, int panic)
191 {
192 	struct mtd_info *mtd = cxt->mtd;
193 	size_t retlen;
194 	int ret;
195 
196 	if (cxt->writecount < OOPS_PAGE_SIZE)
197 		memset(cxt->oops_buf + cxt->writecount, 0xff,
198 					OOPS_PAGE_SIZE - cxt->writecount);
199 
200 	if (panic)
201 		ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
202 					OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
203 	else
204 		ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
205 					OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
206 
207 	cxt->writecount = 0;
208 
209 	if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
210 		printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
211 			cxt->nextpage * OOPS_PAGE_SIZE, retlen,	OOPS_PAGE_SIZE, ret);
212 
213 	mtdoops_inc_counter(cxt);
214 }
215 
216 
217 static void mtdoops_workfunc_write(struct work_struct *work)
218 {
219 	struct mtdoops_context *cxt =
220 			container_of(work, struct mtdoops_context, work_write);
221 
222 	mtdoops_write(cxt, 0);
223 }
224 
225 static void find_next_position(struct mtdoops_context *cxt)
226 {
227 	struct mtd_info *mtd = cxt->mtd;
228 	int ret, page, maxpos = 0;
229 	u32 count[2], maxcount = 0xffffffff;
230 	size_t retlen;
231 
232 	for (page = 0; page < cxt->oops_pages; page++) {
233 		ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]);
234 		if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) {
235 			printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)"
236 				", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
237 			continue;
238 		}
239 
240 		if (count[1] != MTDOOPS_KERNMSG_MAGIC)
241 			continue;
242 		if (count[0] == 0xffffffff)
243 			continue;
244 		if (maxcount == 0xffffffff) {
245 			maxcount = count[0];
246 			maxpos = page;
247 		} else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) {
248 			maxcount = count[0];
249 			maxpos = page;
250 		} else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) {
251 			maxcount = count[0];
252 			maxpos = page;
253 		} else if ((count[0] > maxcount) && (count[0] > 0xc0000000)
254 					&& (maxcount > 0x80000000)) {
255 			maxcount = count[0];
256 			maxpos = page;
257 		}
258 	}
259 	if (maxcount == 0xffffffff) {
260 		cxt->nextpage = 0;
261 		cxt->nextcount = 1;
262 		schedule_work(&cxt->work_erase);
263 		return;
264 	}
265 
266 	cxt->nextpage = maxpos;
267 	cxt->nextcount = maxcount;
268 
269 	mtdoops_inc_counter(cxt);
270 }
271 
272 
273 static void mtdoops_notify_add(struct mtd_info *mtd)
274 {
275 	struct mtdoops_context *cxt = &oops_cxt;
276 
277 	if (cxt->name && !strcmp(mtd->name, cxt->name))
278 		cxt->mtd_index = mtd->index;
279 
280 	if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
281 		return;
282 
283 	if (mtd->size < (mtd->erasesize * 2)) {
284 		printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n",
285 				mtd->index);
286 		return;
287 	}
288 
289 	if (mtd->erasesize < OOPS_PAGE_SIZE) {
290 		printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
291 				mtd->index);
292 		return;
293 	}
294 
295 	cxt->mtd = mtd;
296 	if (mtd->size > INT_MAX)
297 		cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
298 	else
299 		cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
300 
301 	find_next_position(cxt);
302 
303 	printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
304 }
305 
306 static void mtdoops_notify_remove(struct mtd_info *mtd)
307 {
308 	struct mtdoops_context *cxt = &oops_cxt;
309 
310 	if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
311 		return;
312 
313 	cxt->mtd = NULL;
314 	flush_scheduled_work();
315 }
316 
317 static void mtdoops_console_sync(void)
318 {
319 	struct mtdoops_context *cxt = &oops_cxt;
320 	struct mtd_info *mtd = cxt->mtd;
321 	unsigned long flags;
322 
323 	if (!cxt->ready || !mtd || cxt->writecount == 0)
324 		return;
325 
326 	/*
327 	 *  Once ready is 0 and we've held the lock no further writes to the
328 	 *  buffer will happen
329 	 */
330 	spin_lock_irqsave(&cxt->writecount_lock, flags);
331 	if (!cxt->ready) {
332 		spin_unlock_irqrestore(&cxt->writecount_lock, flags);
333 		return;
334 	}
335 	cxt->ready = 0;
336 	spin_unlock_irqrestore(&cxt->writecount_lock, flags);
337 
338 	if (mtd->panic_write && in_interrupt())
339 		/* Interrupt context, we're going to panic so try and log */
340 		mtdoops_write(cxt, 1);
341 	else
342 		schedule_work(&cxt->work_write);
343 }
344 
345 static void
346 mtdoops_console_write(struct console *co, const char *s, unsigned int count)
347 {
348 	struct mtdoops_context *cxt = co->data;
349 	struct mtd_info *mtd = cxt->mtd;
350 	unsigned long flags;
351 
352 	if (!oops_in_progress) {
353 		mtdoops_console_sync();
354 		return;
355 	}
356 
357 	if (!cxt->ready || !mtd)
358 		return;
359 
360 	/* Locking on writecount ensures sequential writes to the buffer */
361 	spin_lock_irqsave(&cxt->writecount_lock, flags);
362 
363 	/* Check ready status didn't change whilst waiting for the lock */
364 	if (!cxt->ready) {
365 		spin_unlock_irqrestore(&cxt->writecount_lock, flags);
366 		return;
367 	}
368 
369 	if (cxt->writecount == 0) {
370 		u32 *stamp = cxt->oops_buf;
371 		*stamp++ = cxt->nextcount;
372 		*stamp = MTDOOPS_KERNMSG_MAGIC;
373 		cxt->writecount = 8;
374 	}
375 
376 	if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
377 		count = OOPS_PAGE_SIZE - cxt->writecount;
378 
379 	memcpy(cxt->oops_buf + cxt->writecount, s, count);
380 	cxt->writecount += count;
381 
382 	spin_unlock_irqrestore(&cxt->writecount_lock, flags);
383 
384 	if (cxt->writecount == OOPS_PAGE_SIZE)
385 		mtdoops_console_sync();
386 }
387 
388 static int __init mtdoops_console_setup(struct console *co, char *options)
389 {
390 	struct mtdoops_context *cxt = co->data;
391 
392 	if (cxt->mtd_index != -1 || cxt->name)
393 		return -EBUSY;
394 	if (options) {
395 		cxt->name = kstrdup(options, GFP_KERNEL);
396 		return 0;
397 	}
398 	if (co->index == -1)
399 		return -EINVAL;
400 
401 	cxt->mtd_index = co->index;
402 	return 0;
403 }
404 
405 static struct mtd_notifier mtdoops_notifier = {
406 	.add	= mtdoops_notify_add,
407 	.remove	= mtdoops_notify_remove,
408 };
409 
410 static struct console mtdoops_console = {
411 	.name		= "ttyMTD",
412 	.write		= mtdoops_console_write,
413 	.setup		= mtdoops_console_setup,
414 	.unblank	= mtdoops_console_sync,
415 	.index		= -1,
416 	.data		= &oops_cxt,
417 };
418 
419 static int __init mtdoops_console_init(void)
420 {
421 	struct mtdoops_context *cxt = &oops_cxt;
422 
423 	cxt->mtd_index = -1;
424 	cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
425 	spin_lock_init(&cxt->writecount_lock);
426 
427 	if (!cxt->oops_buf) {
428 		printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
429 		return -ENOMEM;
430 	}
431 
432 	INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
433 	INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
434 
435 	register_console(&mtdoops_console);
436 	register_mtd_user(&mtdoops_notifier);
437 	return 0;
438 }
439 
440 static void __exit mtdoops_console_exit(void)
441 {
442 	struct mtdoops_context *cxt = &oops_cxt;
443 
444 	unregister_mtd_user(&mtdoops_notifier);
445 	unregister_console(&mtdoops_console);
446 	kfree(cxt->name);
447 	vfree(cxt->oops_buf);
448 }
449 
450 
451 subsys_initcall(mtdoops_console_init);
452 module_exit(mtdoops_console_exit);
453 
454 MODULE_LICENSE("GPL");
455 MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
456 MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");
457