1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * MTD Oops/Panic logger 4 * 5 * Copyright © 2007 Nokia Corporation. All rights reserved. 6 * 7 * Author: Richard Purdie <rpurdie@openedhand.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/console.h> 13 #include <linux/vmalloc.h> 14 #include <linux/workqueue.h> 15 #include <linux/sched.h> 16 #include <linux/wait.h> 17 #include <linux/delay.h> 18 #include <linux/interrupt.h> 19 #include <linux/mtd/mtd.h> 20 #include <linux/kmsg_dump.h> 21 22 /* Maximum MTD partition size */ 23 #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) 24 25 #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 26 #define MTDOOPS_HEADER_SIZE 8 27 28 static unsigned long record_size = 4096; 29 module_param(record_size, ulong, 0400); 30 MODULE_PARM_DESC(record_size, 31 "record size for MTD OOPS pages in bytes (default 4096)"); 32 33 static char mtddev[80]; 34 module_param_string(mtddev, mtddev, 80, 0400); 35 MODULE_PARM_DESC(mtddev, 36 "name or index number of the MTD device to use"); 37 38 static int dump_oops = 1; 39 module_param(dump_oops, int, 0600); 40 MODULE_PARM_DESC(dump_oops, 41 "set to 1 to dump oopses, 0 to only dump panics (default 1)"); 42 43 static struct mtdoops_context { 44 struct kmsg_dumper dump; 45 46 int mtd_index; 47 struct work_struct work_erase; 48 struct work_struct work_write; 49 struct mtd_info *mtd; 50 int oops_pages; 51 int nextpage; 52 int nextcount; 53 unsigned long *oops_page_used; 54 55 unsigned long oops_buf_busy; 56 void *oops_buf; 57 } oops_cxt; 58 59 static void mark_page_used(struct mtdoops_context *cxt, int page) 60 { 61 set_bit(page, cxt->oops_page_used); 62 } 63 64 static void mark_page_unused(struct mtdoops_context *cxt, int page) 65 { 66 clear_bit(page, cxt->oops_page_used); 67 } 68 69 static int page_is_used(struct mtdoops_context *cxt, int page) 70 { 71 return test_bit(page, cxt->oops_page_used); 72 } 73 74 static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) 75 { 76 struct mtd_info *mtd = cxt->mtd; 77 u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; 78 u32 start_page = start_page_offset / record_size; 79 u32 erase_pages = mtd->erasesize / record_size; 80 struct erase_info erase; 81 int ret; 82 int page; 83 84 erase.addr = offset; 85 erase.len = mtd->erasesize; 86 87 ret = mtd_erase(mtd, &erase); 88 if (ret) { 89 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", 90 (unsigned long long)erase.addr, 91 (unsigned long long)erase.len, mtddev); 92 return ret; 93 } 94 95 /* Mark pages as unused */ 96 for (page = start_page; page < start_page + erase_pages; page++) 97 mark_page_unused(cxt, page); 98 99 return 0; 100 } 101 102 static void mtdoops_inc_counter(struct mtdoops_context *cxt) 103 { 104 cxt->nextpage++; 105 if (cxt->nextpage >= cxt->oops_pages) 106 cxt->nextpage = 0; 107 cxt->nextcount++; 108 if (cxt->nextcount == 0xffffffff) 109 cxt->nextcount = 0; 110 111 if (page_is_used(cxt, cxt->nextpage)) { 112 schedule_work(&cxt->work_erase); 113 return; 114 } 115 116 printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", 117 cxt->nextpage, cxt->nextcount); 118 } 119 120 /* Scheduled work - when we can't proceed without erasing a block */ 121 static void mtdoops_workfunc_erase(struct work_struct *work) 122 { 123 struct mtdoops_context *cxt = 124 container_of(work, struct mtdoops_context, work_erase); 125 struct mtd_info *mtd = cxt->mtd; 126 int i = 0, j, ret, mod; 127 128 /* We were unregistered */ 129 if (!mtd) 130 return; 131 132 mod = (cxt->nextpage * record_size) % mtd->erasesize; 133 if (mod != 0) { 134 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); 135 if (cxt->nextpage >= cxt->oops_pages) 136 cxt->nextpage = 0; 137 } 138 139 while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) { 140 badblock: 141 printk(KERN_WARNING "mtdoops: bad block at %08lx\n", 142 cxt->nextpage * record_size); 143 i++; 144 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); 145 if (cxt->nextpage >= cxt->oops_pages) 146 cxt->nextpage = 0; 147 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { 148 printk(KERN_ERR "mtdoops: all blocks bad!\n"); 149 return; 150 } 151 } 152 153 if (ret < 0) { 154 printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n"); 155 return; 156 } 157 158 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 159 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); 160 161 if (ret >= 0) { 162 printk(KERN_DEBUG "mtdoops: ready %d, %d\n", 163 cxt->nextpage, cxt->nextcount); 164 return; 165 } 166 167 if (ret == -EIO) { 168 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); 169 if (ret < 0 && ret != -EOPNOTSUPP) { 170 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); 171 return; 172 } 173 } 174 goto badblock; 175 } 176 177 static void mtdoops_write(struct mtdoops_context *cxt, int panic) 178 { 179 struct mtd_info *mtd = cxt->mtd; 180 size_t retlen; 181 u32 *hdr; 182 int ret; 183 184 if (test_and_set_bit(0, &cxt->oops_buf_busy)) 185 return; 186 187 /* Add mtdoops header to the buffer */ 188 hdr = cxt->oops_buf; 189 hdr[0] = cxt->nextcount; 190 hdr[1] = MTDOOPS_KERNMSG_MAGIC; 191 192 if (panic) { 193 ret = mtd_panic_write(mtd, cxt->nextpage * record_size, 194 record_size, &retlen, cxt->oops_buf); 195 if (ret == -EOPNOTSUPP) { 196 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); 197 goto out; 198 } 199 } else 200 ret = mtd_write(mtd, cxt->nextpage * record_size, 201 record_size, &retlen, cxt->oops_buf); 202 203 if (retlen != record_size || ret < 0) 204 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", 205 cxt->nextpage * record_size, retlen, record_size, ret); 206 mark_page_used(cxt, cxt->nextpage); 207 memset(cxt->oops_buf, 0xff, record_size); 208 209 mtdoops_inc_counter(cxt); 210 out: 211 clear_bit(0, &cxt->oops_buf_busy); 212 } 213 214 static void mtdoops_workfunc_write(struct work_struct *work) 215 { 216 struct mtdoops_context *cxt = 217 container_of(work, struct mtdoops_context, work_write); 218 219 mtdoops_write(cxt, 0); 220 } 221 222 static void find_next_position(struct mtdoops_context *cxt) 223 { 224 struct mtd_info *mtd = cxt->mtd; 225 int ret, page, maxpos = 0; 226 u32 count[2], maxcount = 0xffffffff; 227 size_t retlen; 228 229 for (page = 0; page < cxt->oops_pages; page++) { 230 if (mtd_block_isbad(mtd, page * record_size)) 231 continue; 232 /* Assume the page is used */ 233 mark_page_used(cxt, page); 234 ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, 235 &retlen, (u_char *)&count[0]); 236 if (retlen != MTDOOPS_HEADER_SIZE || 237 (ret < 0 && !mtd_is_bitflip(ret))) { 238 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", 239 page * record_size, retlen, 240 MTDOOPS_HEADER_SIZE, ret); 241 continue; 242 } 243 244 if (count[0] == 0xffffffff && count[1] == 0xffffffff) 245 mark_page_unused(cxt, page); 246 if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC) 247 continue; 248 if (maxcount == 0xffffffff) { 249 maxcount = count[0]; 250 maxpos = page; 251 } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) { 252 maxcount = count[0]; 253 maxpos = page; 254 } else if (count[0] > maxcount && count[0] < 0xc0000000) { 255 maxcount = count[0]; 256 maxpos = page; 257 } else if (count[0] > maxcount && count[0] > 0xc0000000 258 && maxcount > 0x80000000) { 259 maxcount = count[0]; 260 maxpos = page; 261 } 262 } 263 if (maxcount == 0xffffffff) { 264 cxt->nextpage = cxt->oops_pages - 1; 265 cxt->nextcount = 0; 266 } 267 else { 268 cxt->nextpage = maxpos; 269 cxt->nextcount = maxcount; 270 } 271 272 mtdoops_inc_counter(cxt); 273 } 274 275 static void mtdoops_do_dump(struct kmsg_dumper *dumper, 276 enum kmsg_dump_reason reason) 277 { 278 struct mtdoops_context *cxt = container_of(dumper, 279 struct mtdoops_context, dump); 280 struct kmsg_dump_iter iter; 281 282 /* Only dump oopses if dump_oops is set */ 283 if (reason == KMSG_DUMP_OOPS && !dump_oops) 284 return; 285 286 kmsg_dump_rewind(&iter); 287 288 if (test_and_set_bit(0, &cxt->oops_buf_busy)) 289 return; 290 kmsg_dump_get_buffer(&iter, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, 291 record_size - MTDOOPS_HEADER_SIZE, NULL); 292 clear_bit(0, &cxt->oops_buf_busy); 293 294 if (reason != KMSG_DUMP_OOPS) { 295 /* Panics must be written immediately */ 296 mtdoops_write(cxt, 1); 297 } else { 298 /* For other cases, schedule work to write it "nicely" */ 299 schedule_work(&cxt->work_write); 300 } 301 } 302 303 static void mtdoops_notify_add(struct mtd_info *mtd) 304 { 305 struct mtdoops_context *cxt = &oops_cxt; 306 u64 mtdoops_pages = div_u64(mtd->size, record_size); 307 int err; 308 309 if (!strcmp(mtd->name, mtddev)) 310 cxt->mtd_index = mtd->index; 311 312 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) 313 return; 314 315 if (mtd->size < mtd->erasesize * 2) { 316 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n", 317 mtd->index); 318 return; 319 } 320 if (mtd->erasesize < record_size) { 321 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", 322 mtd->index); 323 return; 324 } 325 if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { 326 printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", 327 mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); 328 return; 329 } 330 331 /* oops_page_used is a bit field */ 332 cxt->oops_page_used = 333 vmalloc(array_size(sizeof(unsigned long), 334 DIV_ROUND_UP(mtdoops_pages, 335 BITS_PER_LONG))); 336 if (!cxt->oops_page_used) { 337 printk(KERN_ERR "mtdoops: could not allocate page array\n"); 338 return; 339 } 340 341 cxt->dump.max_reason = KMSG_DUMP_OOPS; 342 cxt->dump.dump = mtdoops_do_dump; 343 err = kmsg_dump_register(&cxt->dump); 344 if (err) { 345 printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err); 346 vfree(cxt->oops_page_used); 347 cxt->oops_page_used = NULL; 348 return; 349 } 350 351 cxt->mtd = mtd; 352 cxt->oops_pages = (int)mtd->size / record_size; 353 find_next_position(cxt); 354 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); 355 } 356 357 static void mtdoops_notify_remove(struct mtd_info *mtd) 358 { 359 struct mtdoops_context *cxt = &oops_cxt; 360 361 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) 362 return; 363 364 if (kmsg_dump_unregister(&cxt->dump) < 0) 365 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); 366 367 cxt->mtd = NULL; 368 flush_work(&cxt->work_erase); 369 flush_work(&cxt->work_write); 370 } 371 372 373 static struct mtd_notifier mtdoops_notifier = { 374 .add = mtdoops_notify_add, 375 .remove = mtdoops_notify_remove, 376 }; 377 378 static int __init mtdoops_init(void) 379 { 380 struct mtdoops_context *cxt = &oops_cxt; 381 int mtd_index; 382 char *endp; 383 384 if (strlen(mtddev) == 0) { 385 printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n"); 386 return -EINVAL; 387 } 388 if ((record_size & 4095) != 0) { 389 printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); 390 return -EINVAL; 391 } 392 if (record_size < 4096) { 393 printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); 394 return -EINVAL; 395 } 396 397 /* Setup the MTD device to use */ 398 cxt->mtd_index = -1; 399 mtd_index = simple_strtoul(mtddev, &endp, 0); 400 if (*endp == '\0') 401 cxt->mtd_index = mtd_index; 402 403 cxt->oops_buf = vmalloc(record_size); 404 if (!cxt->oops_buf) { 405 printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); 406 return -ENOMEM; 407 } 408 memset(cxt->oops_buf, 0xff, record_size); 409 cxt->oops_buf_busy = 0; 410 411 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); 412 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); 413 414 register_mtd_user(&mtdoops_notifier); 415 return 0; 416 } 417 418 static void __exit mtdoops_exit(void) 419 { 420 struct mtdoops_context *cxt = &oops_cxt; 421 422 unregister_mtd_user(&mtdoops_notifier); 423 vfree(cxt->oops_buf); 424 vfree(cxt->oops_page_used); 425 } 426 427 428 module_init(mtdoops_init); 429 module_exit(mtdoops_exit); 430 431 MODULE_LICENSE("GPL"); 432 MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); 433 MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver"); 434