1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * RAM Oops/Panic logger 4 * 5 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> 6 * Copyright (C) 2011 Kees Cook <keescook@chromium.org> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/err.h> 13 #include <linux/module.h> 14 #include <linux/version.h> 15 #include <linux/pstore.h> 16 #include <linux/io.h> 17 #include <linux/ioport.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 #include <linux/compiler.h> 21 #include <linux/of.h> 22 #include <linux/of_address.h> 23 #include <linux/mm.h> 24 25 #include "internal.h" 26 #include "ram_internal.h" 27 28 #define RAMOOPS_KERNMSG_HDR "====" 29 #define MIN_MEM_SIZE 4096UL 30 31 static ulong record_size = MIN_MEM_SIZE; 32 module_param(record_size, ulong, 0400); 33 MODULE_PARM_DESC(record_size, 34 "size of each dump done on oops/panic"); 35 36 static ulong ramoops_console_size = MIN_MEM_SIZE; 37 module_param_named(console_size, ramoops_console_size, ulong, 0400); 38 MODULE_PARM_DESC(console_size, "size of kernel console log"); 39 40 static ulong ramoops_ftrace_size = MIN_MEM_SIZE; 41 module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400); 42 MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); 43 44 static ulong ramoops_pmsg_size = MIN_MEM_SIZE; 45 module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400); 46 MODULE_PARM_DESC(pmsg_size, "size of user space message log"); 47 48 static unsigned long long mem_address; 49 module_param_hw(mem_address, ullong, other, 0400); 50 MODULE_PARM_DESC(mem_address, 51 "start of reserved RAM used to store oops/panic logs"); 52 53 static ulong mem_size; 54 module_param(mem_size, ulong, 0400); 55 MODULE_PARM_DESC(mem_size, 56 "size of reserved RAM used to store oops/panic logs"); 57 58 static unsigned int mem_type; 59 module_param(mem_type, uint, 0400); 60 MODULE_PARM_DESC(mem_type, 61 "memory type: 0=write-combined (default), 1=unbuffered, 2=cached"); 62 63 static int ramoops_max_reason = -1; 64 module_param_named(max_reason, ramoops_max_reason, int, 0400); 65 MODULE_PARM_DESC(max_reason, 66 "maximum reason for kmsg dump (default 2: Oops and Panic) "); 67 68 static int ramoops_ecc; 69 module_param_named(ecc, ramoops_ecc, int, 0400); 70 MODULE_PARM_DESC(ramoops_ecc, 71 "if non-zero, the option enables ECC support and specifies " 72 "ECC buffer size in bytes (1 is a special value, means 16 " 73 "bytes ECC)"); 74 75 static int ramoops_dump_oops = -1; 76 module_param_named(dump_oops, ramoops_dump_oops, int, 0400); 77 MODULE_PARM_DESC(dump_oops, 78 "(deprecated: use max_reason instead) set to 1 to dump oopses & panics, 0 to only dump panics"); 79 80 struct ramoops_context { 81 struct persistent_ram_zone **dprzs; /* Oops dump zones */ 82 struct persistent_ram_zone *cprz; /* Console zone */ 83 struct persistent_ram_zone **fprzs; /* Ftrace zones */ 84 struct persistent_ram_zone *mprz; /* PMSG zone */ 85 phys_addr_t phys_addr; 86 unsigned long size; 87 unsigned int memtype; 88 size_t record_size; 89 size_t console_size; 90 size_t ftrace_size; 91 size_t pmsg_size; 92 u32 flags; 93 struct persistent_ram_ecc_info ecc_info; 94 unsigned int max_dump_cnt; 95 unsigned int dump_write_cnt; 96 /* _read_cnt need clear on ramoops_pstore_open */ 97 unsigned int dump_read_cnt; 98 unsigned int console_read_cnt; 99 unsigned int max_ftrace_cnt; 100 unsigned int ftrace_read_cnt; 101 unsigned int pmsg_read_cnt; 102 struct pstore_info pstore; 103 }; 104 105 static struct platform_device *dummy; 106 107 static int ramoops_pstore_open(struct pstore_info *psi) 108 { 109 struct ramoops_context *cxt = psi->data; 110 111 cxt->dump_read_cnt = 0; 112 cxt->console_read_cnt = 0; 113 cxt->ftrace_read_cnt = 0; 114 cxt->pmsg_read_cnt = 0; 115 return 0; 116 } 117 118 static struct persistent_ram_zone * 119 ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, 120 struct pstore_record *record) 121 { 122 struct persistent_ram_zone *prz; 123 124 /* Give up if we never existed or have hit the end. */ 125 if (!przs) 126 return NULL; 127 128 prz = przs[id]; 129 if (!prz) 130 return NULL; 131 132 /* Update old/shadowed buffer. */ 133 if (prz->type == PSTORE_TYPE_DMESG) 134 persistent_ram_save_old(prz); 135 136 if (!persistent_ram_old_size(prz)) 137 return NULL; 138 139 record->type = prz->type; 140 record->id = id; 141 142 return prz; 143 } 144 145 static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time, 146 bool *compressed) 147 { 148 char data_type; 149 int header_length = 0; 150 151 if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n", 152 (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type, 153 &header_length) == 3) { 154 time->tv_nsec *= 1000; 155 if (data_type == 'C') 156 *compressed = true; 157 else 158 *compressed = false; 159 } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n", 160 (time64_t *)&time->tv_sec, &time->tv_nsec, 161 &header_length) == 2) { 162 time->tv_nsec *= 1000; 163 *compressed = false; 164 } else { 165 time->tv_sec = 0; 166 time->tv_nsec = 0; 167 *compressed = false; 168 } 169 return header_length; 170 } 171 172 static bool prz_ok(struct persistent_ram_zone *prz) 173 { 174 return !!prz && !!(persistent_ram_old_size(prz) + 175 persistent_ram_ecc_string(prz, NULL, 0)); 176 } 177 178 static ssize_t ramoops_pstore_read(struct pstore_record *record) 179 { 180 ssize_t size = 0; 181 struct ramoops_context *cxt = record->psi->data; 182 struct persistent_ram_zone *prz = NULL; 183 int header_length = 0; 184 bool free_prz = false; 185 186 /* 187 * Ramoops headers provide time stamps for PSTORE_TYPE_DMESG, but 188 * PSTORE_TYPE_CONSOLE and PSTORE_TYPE_FTRACE don't currently have 189 * valid time stamps, so it is initialized to zero. 190 */ 191 record->time.tv_sec = 0; 192 record->time.tv_nsec = 0; 193 record->compressed = false; 194 195 /* Find the next valid persistent_ram_zone for DMESG */ 196 while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) { 197 prz = ramoops_get_next_prz(cxt->dprzs, cxt->dump_read_cnt++, 198 record); 199 if (!prz_ok(prz)) 200 continue; 201 header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz), 202 &record->time, 203 &record->compressed); 204 /* Clear and skip this DMESG record if it has no valid header */ 205 if (!header_length) { 206 persistent_ram_free_old(prz); 207 persistent_ram_zap(prz); 208 prz = NULL; 209 } 210 } 211 212 if (!prz_ok(prz) && !cxt->console_read_cnt++) 213 prz = ramoops_get_next_prz(&cxt->cprz, 0 /* single */, record); 214 215 if (!prz_ok(prz) && !cxt->pmsg_read_cnt++) 216 prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record); 217 218 /* ftrace is last since it may want to dynamically allocate memory. */ 219 if (!prz_ok(prz)) { 220 if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) && 221 !cxt->ftrace_read_cnt++) { 222 prz = ramoops_get_next_prz(cxt->fprzs, 0 /* single */, 223 record); 224 } else { 225 /* 226 * Build a new dummy record which combines all the 227 * per-cpu records including metadata and ecc info. 228 */ 229 struct persistent_ram_zone *tmp_prz, *prz_next; 230 231 tmp_prz = kzalloc(sizeof(struct persistent_ram_zone), 232 GFP_KERNEL); 233 if (!tmp_prz) 234 return -ENOMEM; 235 prz = tmp_prz; 236 free_prz = true; 237 238 while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) { 239 prz_next = ramoops_get_next_prz(cxt->fprzs, 240 cxt->ftrace_read_cnt++, record); 241 242 if (!prz_ok(prz_next)) 243 continue; 244 245 tmp_prz->ecc_info = prz_next->ecc_info; 246 tmp_prz->corrected_bytes += 247 prz_next->corrected_bytes; 248 tmp_prz->bad_blocks += prz_next->bad_blocks; 249 250 size = pstore_ftrace_combine_log( 251 &tmp_prz->old_log, 252 &tmp_prz->old_log_size, 253 prz_next->old_log, 254 prz_next->old_log_size); 255 if (size) 256 goto out; 257 } 258 record->id = 0; 259 } 260 } 261 262 if (!prz_ok(prz)) { 263 size = 0; 264 goto out; 265 } 266 267 size = persistent_ram_old_size(prz) - header_length; 268 269 /* ECC correction notice */ 270 record->ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0); 271 272 record->buf = kvzalloc(size + record->ecc_notice_size + 1, GFP_KERNEL); 273 if (record->buf == NULL) { 274 size = -ENOMEM; 275 goto out; 276 } 277 278 memcpy(record->buf, (char *)persistent_ram_old(prz) + header_length, 279 size); 280 281 persistent_ram_ecc_string(prz, record->buf + size, 282 record->ecc_notice_size + 1); 283 284 out: 285 if (free_prz) { 286 kvfree(prz->old_log); 287 kfree(prz); 288 } 289 290 return size; 291 } 292 293 static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz, 294 struct pstore_record *record) 295 { 296 char hdr[36]; /* "===="(4), %lld(20), "."(1), %06lu(6), "-%c\n"(3) */ 297 size_t len; 298 299 len = scnprintf(hdr, sizeof(hdr), 300 RAMOOPS_KERNMSG_HDR "%lld.%06lu-%c\n", 301 (time64_t)record->time.tv_sec, 302 record->time.tv_nsec / 1000, 303 record->compressed ? 'C' : 'D'); 304 persistent_ram_write(prz, hdr, len); 305 306 return len; 307 } 308 309 static int notrace ramoops_pstore_write(struct pstore_record *record) 310 { 311 struct ramoops_context *cxt = record->psi->data; 312 struct persistent_ram_zone *prz; 313 size_t size, hlen; 314 315 if (record->type == PSTORE_TYPE_CONSOLE) { 316 if (!cxt->cprz) 317 return -ENOMEM; 318 persistent_ram_write(cxt->cprz, record->buf, record->size); 319 return 0; 320 } else if (record->type == PSTORE_TYPE_FTRACE) { 321 int zonenum; 322 323 if (!cxt->fprzs) 324 return -ENOMEM; 325 /* 326 * Choose zone by if we're using per-cpu buffers. 327 */ 328 if (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 329 zonenum = smp_processor_id(); 330 else 331 zonenum = 0; 332 333 persistent_ram_write(cxt->fprzs[zonenum], record->buf, 334 record->size); 335 return 0; 336 } else if (record->type == PSTORE_TYPE_PMSG) { 337 pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__); 338 return -EINVAL; 339 } 340 341 if (record->type != PSTORE_TYPE_DMESG) 342 return -EINVAL; 343 344 /* 345 * We could filter on record->reason here if we wanted to (which 346 * would duplicate what happened before the "max_reason" setting 347 * was added), but that would defeat the purpose of a system 348 * changing printk.always_kmsg_dump, so instead log everything that 349 * the kmsg dumper sends us, since it should be doing the filtering 350 * based on the combination of printk.always_kmsg_dump and our 351 * requested "max_reason". 352 */ 353 354 /* 355 * Explicitly only take the first part of any new crash. 356 * If our buffer is larger than kmsg_bytes, this can never happen, 357 * and if our buffer is smaller than kmsg_bytes, we don't want the 358 * report split across multiple records. 359 */ 360 if (record->part != 1) 361 return -ENOSPC; 362 363 if (!cxt->dprzs) 364 return -ENOSPC; 365 366 prz = cxt->dprzs[cxt->dump_write_cnt]; 367 368 /* 369 * Since this is a new crash dump, we need to reset the buffer in 370 * case it still has an old dump present. Without this, the new dump 371 * will get appended, which would seriously confuse anything trying 372 * to check dump file contents. Specifically, ramoops_read_kmsg_hdr() 373 * expects to find a dump header in the beginning of buffer data, so 374 * we must to reset the buffer values, in order to ensure that the 375 * header will be written to the beginning of the buffer. 376 */ 377 persistent_ram_zap(prz); 378 379 /* Build header and append record contents. */ 380 hlen = ramoops_write_kmsg_hdr(prz, record); 381 if (!hlen) 382 return -ENOMEM; 383 384 size = record->size; 385 if (size + hlen > prz->buffer_size) 386 size = prz->buffer_size - hlen; 387 persistent_ram_write(prz, record->buf, size); 388 389 cxt->dump_write_cnt = (cxt->dump_write_cnt + 1) % cxt->max_dump_cnt; 390 391 return 0; 392 } 393 394 static int notrace ramoops_pstore_write_user(struct pstore_record *record, 395 const char __user *buf) 396 { 397 if (record->type == PSTORE_TYPE_PMSG) { 398 struct ramoops_context *cxt = record->psi->data; 399 400 if (!cxt->mprz) 401 return -ENOMEM; 402 return persistent_ram_write_user(cxt->mprz, buf, record->size); 403 } 404 405 return -EINVAL; 406 } 407 408 static int ramoops_pstore_erase(struct pstore_record *record) 409 { 410 struct ramoops_context *cxt = record->psi->data; 411 struct persistent_ram_zone *prz; 412 413 switch (record->type) { 414 case PSTORE_TYPE_DMESG: 415 if (record->id >= cxt->max_dump_cnt) 416 return -EINVAL; 417 prz = cxt->dprzs[record->id]; 418 break; 419 case PSTORE_TYPE_CONSOLE: 420 prz = cxt->cprz; 421 break; 422 case PSTORE_TYPE_FTRACE: 423 if (record->id >= cxt->max_ftrace_cnt) 424 return -EINVAL; 425 prz = cxt->fprzs[record->id]; 426 break; 427 case PSTORE_TYPE_PMSG: 428 prz = cxt->mprz; 429 break; 430 default: 431 return -EINVAL; 432 } 433 434 persistent_ram_free_old(prz); 435 persistent_ram_zap(prz); 436 437 return 0; 438 } 439 440 static struct ramoops_context oops_cxt = { 441 .pstore = { 442 .owner = THIS_MODULE, 443 .name = "ramoops", 444 .open = ramoops_pstore_open, 445 .read = ramoops_pstore_read, 446 .write = ramoops_pstore_write, 447 .write_user = ramoops_pstore_write_user, 448 .erase = ramoops_pstore_erase, 449 }, 450 }; 451 452 static void ramoops_free_przs(struct ramoops_context *cxt) 453 { 454 int i; 455 456 /* Free pmsg PRZ */ 457 persistent_ram_free(&cxt->mprz); 458 459 /* Free console PRZ */ 460 persistent_ram_free(&cxt->cprz); 461 462 /* Free dump PRZs */ 463 if (cxt->dprzs) { 464 for (i = 0; i < cxt->max_dump_cnt; i++) 465 persistent_ram_free(&cxt->dprzs[i]); 466 467 kfree(cxt->dprzs); 468 cxt->dprzs = NULL; 469 cxt->max_dump_cnt = 0; 470 } 471 472 /* Free ftrace PRZs */ 473 if (cxt->fprzs) { 474 for (i = 0; i < cxt->max_ftrace_cnt; i++) 475 persistent_ram_free(&cxt->fprzs[i]); 476 kfree(cxt->fprzs); 477 cxt->fprzs = NULL; 478 cxt->max_ftrace_cnt = 0; 479 } 480 } 481 482 static int ramoops_init_przs(const char *name, 483 struct device *dev, struct ramoops_context *cxt, 484 struct persistent_ram_zone ***przs, 485 phys_addr_t *paddr, size_t mem_sz, 486 ssize_t record_size, 487 unsigned int *cnt, u32 sig, u32 flags) 488 { 489 int err = -ENOMEM; 490 int i; 491 size_t zone_sz; 492 struct persistent_ram_zone **prz_ar; 493 494 /* Allocate nothing for 0 mem_sz or 0 record_size. */ 495 if (mem_sz == 0 || record_size == 0) { 496 *cnt = 0; 497 return 0; 498 } 499 500 /* 501 * If we have a negative record size, calculate it based on 502 * mem_sz / *cnt. If we have a positive record size, calculate 503 * cnt from mem_sz / record_size. 504 */ 505 if (record_size < 0) { 506 if (*cnt == 0) 507 return 0; 508 record_size = mem_sz / *cnt; 509 if (record_size == 0) { 510 dev_err(dev, "%s record size == 0 (%zu / %u)\n", 511 name, mem_sz, *cnt); 512 goto fail; 513 } 514 } else { 515 *cnt = mem_sz / record_size; 516 if (*cnt == 0) { 517 dev_err(dev, "%s record count == 0 (%zu / %zu)\n", 518 name, mem_sz, record_size); 519 goto fail; 520 } 521 } 522 523 if (*paddr + mem_sz - cxt->phys_addr > cxt->size) { 524 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 525 name, 526 mem_sz, (unsigned long long)*paddr, 527 cxt->size, (unsigned long long)cxt->phys_addr); 528 goto fail; 529 } 530 531 zone_sz = mem_sz / *cnt; 532 zone_sz = ALIGN_DOWN(zone_sz, 2); 533 if (!zone_sz) { 534 dev_err(dev, "%s zone size == 0\n", name); 535 goto fail; 536 } 537 538 prz_ar = kcalloc(*cnt, sizeof(**przs), GFP_KERNEL); 539 if (!prz_ar) 540 goto fail; 541 542 for (i = 0; i < *cnt; i++) { 543 char *label; 544 545 if (*cnt == 1) 546 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 547 else 548 label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)", 549 name, i, *cnt - 1); 550 prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig, 551 &cxt->ecc_info, 552 cxt->memtype, flags, label); 553 kfree(label); 554 if (IS_ERR(prz_ar[i])) { 555 err = PTR_ERR(prz_ar[i]); 556 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 557 name, record_size, 558 (unsigned long long)*paddr, err); 559 560 while (i > 0) { 561 i--; 562 persistent_ram_free(&prz_ar[i]); 563 } 564 kfree(prz_ar); 565 prz_ar = NULL; 566 goto fail; 567 } 568 *paddr += zone_sz; 569 prz_ar[i]->type = pstore_name_to_type(name); 570 } 571 572 *przs = prz_ar; 573 return 0; 574 575 fail: 576 *cnt = 0; 577 return err; 578 } 579 580 static int ramoops_init_prz(const char *name, 581 struct device *dev, struct ramoops_context *cxt, 582 struct persistent_ram_zone **prz, 583 phys_addr_t *paddr, size_t sz, u32 sig) 584 { 585 char *label; 586 587 if (!sz) 588 return 0; 589 590 if (*paddr + sz - cxt->phys_addr > cxt->size) { 591 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 592 name, sz, (unsigned long long)*paddr, 593 cxt->size, (unsigned long long)cxt->phys_addr); 594 return -ENOMEM; 595 } 596 597 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 598 *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, 599 cxt->memtype, PRZ_FLAG_ZAP_OLD, label); 600 kfree(label); 601 if (IS_ERR(*prz)) { 602 int err = PTR_ERR(*prz); 603 604 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 605 name, sz, (unsigned long long)*paddr, err); 606 return err; 607 } 608 609 *paddr += sz; 610 (*prz)->type = pstore_name_to_type(name); 611 612 return 0; 613 } 614 615 /* Read a u32 from a dt property and make sure it's safe for an int. */ 616 static int ramoops_parse_dt_u32(struct platform_device *pdev, 617 const char *propname, 618 u32 default_value, u32 *value) 619 { 620 u32 val32 = 0; 621 int ret; 622 623 ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); 624 if (ret == -EINVAL) { 625 /* field is missing, use default value. */ 626 val32 = default_value; 627 } else if (ret < 0) { 628 dev_err(&pdev->dev, "failed to parse property %s: %d\n", 629 propname, ret); 630 return ret; 631 } 632 633 /* Sanity check our results. */ 634 if (val32 > INT_MAX) { 635 dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32); 636 return -EOVERFLOW; 637 } 638 639 *value = val32; 640 return 0; 641 } 642 643 static int ramoops_parse_dt(struct platform_device *pdev, 644 struct ramoops_platform_data *pdata) 645 { 646 struct device_node *of_node = pdev->dev.of_node; 647 struct device_node *parent_node; 648 struct resource *res; 649 u32 value; 650 int ret; 651 652 dev_dbg(&pdev->dev, "using Device Tree\n"); 653 654 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 655 if (!res) { 656 dev_err(&pdev->dev, 657 "failed to locate DT /reserved-memory resource\n"); 658 return -EINVAL; 659 } 660 661 pdata->mem_size = resource_size(res); 662 pdata->mem_address = res->start; 663 /* 664 * Setting "unbuffered" is deprecated and will be ignored if 665 * "mem_type" is also specified. 666 */ 667 pdata->mem_type = of_property_read_bool(of_node, "unbuffered"); 668 /* 669 * Setting "no-dump-oops" is deprecated and will be ignored if 670 * "max_reason" is also specified. 671 */ 672 if (of_property_read_bool(of_node, "no-dump-oops")) 673 pdata->max_reason = KMSG_DUMP_PANIC; 674 else 675 pdata->max_reason = KMSG_DUMP_OOPS; 676 677 #define parse_u32(name, field, default_value) { \ 678 ret = ramoops_parse_dt_u32(pdev, name, default_value, \ 679 &value); \ 680 if (ret < 0) \ 681 return ret; \ 682 field = value; \ 683 } 684 685 parse_u32("mem-type", pdata->mem_type, pdata->mem_type); 686 parse_u32("record-size", pdata->record_size, 0); 687 parse_u32("console-size", pdata->console_size, 0); 688 parse_u32("ftrace-size", pdata->ftrace_size, 0); 689 parse_u32("pmsg-size", pdata->pmsg_size, 0); 690 parse_u32("ecc-size", pdata->ecc_info.ecc_size, 0); 691 parse_u32("flags", pdata->flags, 0); 692 parse_u32("max-reason", pdata->max_reason, pdata->max_reason); 693 694 #undef parse_u32 695 696 /* 697 * Some old Chromebooks relied on the kernel setting the 698 * console_size and pmsg_size to the record size since that's 699 * what the downstream kernel did. These same Chromebooks had 700 * "ramoops" straight under the root node which isn't 701 * according to the current upstream bindings (though it was 702 * arguably acceptable under a prior version of the bindings). 703 * Let's make those old Chromebooks work by detecting that 704 * we're not a child of "reserved-memory" and mimicking the 705 * expected behavior. 706 */ 707 parent_node = of_get_parent(of_node); 708 if (!of_node_name_eq(parent_node, "reserved-memory") && 709 !pdata->console_size && !pdata->ftrace_size && 710 !pdata->pmsg_size && !pdata->ecc_info.ecc_size) { 711 pdata->console_size = pdata->record_size; 712 pdata->pmsg_size = pdata->record_size; 713 } 714 of_node_put(parent_node); 715 716 return 0; 717 } 718 719 static int ramoops_probe(struct platform_device *pdev) 720 { 721 struct device *dev = &pdev->dev; 722 struct ramoops_platform_data *pdata = dev->platform_data; 723 struct ramoops_platform_data pdata_local; 724 struct ramoops_context *cxt = &oops_cxt; 725 size_t dump_mem_sz; 726 phys_addr_t paddr; 727 int err = -EINVAL; 728 729 /* 730 * Only a single ramoops area allowed at a time, so fail extra 731 * probes. 732 */ 733 if (cxt->max_dump_cnt) { 734 pr_err("already initialized\n"); 735 goto fail_out; 736 } 737 738 if (dev_of_node(dev) && !pdata) { 739 pdata = &pdata_local; 740 memset(pdata, 0, sizeof(*pdata)); 741 742 err = ramoops_parse_dt(pdev, pdata); 743 if (err < 0) 744 goto fail_out; 745 } 746 747 /* Make sure we didn't get bogus platform data pointer. */ 748 if (!pdata) { 749 pr_err("NULL platform data\n"); 750 err = -EINVAL; 751 goto fail_out; 752 } 753 754 if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && 755 !pdata->ftrace_size && !pdata->pmsg_size)) { 756 pr_err("The memory size and the record/console size must be " 757 "non-zero\n"); 758 err = -EINVAL; 759 goto fail_out; 760 } 761 762 if (pdata->record_size && !is_power_of_2(pdata->record_size)) 763 pdata->record_size = rounddown_pow_of_two(pdata->record_size); 764 if (pdata->console_size && !is_power_of_2(pdata->console_size)) 765 pdata->console_size = rounddown_pow_of_two(pdata->console_size); 766 if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size)) 767 pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); 768 if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) 769 pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); 770 771 cxt->size = pdata->mem_size; 772 cxt->phys_addr = pdata->mem_address; 773 cxt->memtype = pdata->mem_type; 774 cxt->record_size = pdata->record_size; 775 cxt->console_size = pdata->console_size; 776 cxt->ftrace_size = pdata->ftrace_size; 777 cxt->pmsg_size = pdata->pmsg_size; 778 cxt->flags = pdata->flags; 779 cxt->ecc_info = pdata->ecc_info; 780 781 paddr = cxt->phys_addr; 782 783 dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size 784 - cxt->pmsg_size; 785 err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr, 786 dump_mem_sz, cxt->record_size, 787 &cxt->max_dump_cnt, 0, 0); 788 if (err) 789 goto fail_init; 790 791 err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr, 792 cxt->console_size, 0); 793 if (err) 794 goto fail_init; 795 796 err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr, 797 cxt->pmsg_size, 0); 798 if (err) 799 goto fail_init; 800 801 cxt->max_ftrace_cnt = (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 802 ? nr_cpu_ids 803 : 1; 804 err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr, 805 cxt->ftrace_size, -1, 806 &cxt->max_ftrace_cnt, LINUX_VERSION_CODE, 807 (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 808 ? PRZ_FLAG_NO_LOCK : 0); 809 if (err) 810 goto fail_init; 811 812 cxt->pstore.data = cxt; 813 /* 814 * Prepare frontend flags based on which areas are initialized. 815 * For ramoops_init_przs() cases, the "max count" variable tells 816 * if there are regions present. For ramoops_init_prz() cases, 817 * the single region size is how to check. 818 */ 819 cxt->pstore.flags = 0; 820 if (cxt->max_dump_cnt) { 821 cxt->pstore.flags |= PSTORE_FLAGS_DMESG; 822 cxt->pstore.max_reason = pdata->max_reason; 823 } 824 if (cxt->console_size) 825 cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; 826 if (cxt->max_ftrace_cnt) 827 cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; 828 if (cxt->pmsg_size) 829 cxt->pstore.flags |= PSTORE_FLAGS_PMSG; 830 831 /* 832 * Since bufsize is only used for dmesg crash dumps, it 833 * must match the size of the dprz record (after PRZ header 834 * and ECC bytes have been accounted for). 835 */ 836 if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) { 837 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size; 838 cxt->pstore.buf = kvzalloc(cxt->pstore.bufsize, GFP_KERNEL); 839 if (!cxt->pstore.buf) { 840 pr_err("cannot allocate pstore crash dump buffer\n"); 841 err = -ENOMEM; 842 goto fail_clear; 843 } 844 } 845 846 err = pstore_register(&cxt->pstore); 847 if (err) { 848 pr_err("registering with pstore failed\n"); 849 goto fail_buf; 850 } 851 852 /* 853 * Update the module parameter variables as well so they are visible 854 * through /sys/module/ramoops/parameters/ 855 */ 856 mem_size = pdata->mem_size; 857 mem_address = pdata->mem_address; 858 record_size = pdata->record_size; 859 ramoops_max_reason = pdata->max_reason; 860 ramoops_console_size = pdata->console_size; 861 ramoops_pmsg_size = pdata->pmsg_size; 862 ramoops_ftrace_size = pdata->ftrace_size; 863 864 pr_info("using 0x%lx@0x%llx, ecc: %d\n", 865 cxt->size, (unsigned long long)cxt->phys_addr, 866 cxt->ecc_info.ecc_size); 867 868 return 0; 869 870 fail_buf: 871 kvfree(cxt->pstore.buf); 872 fail_clear: 873 cxt->pstore.bufsize = 0; 874 fail_init: 875 ramoops_free_przs(cxt); 876 fail_out: 877 return err; 878 } 879 880 static void ramoops_remove(struct platform_device *pdev) 881 { 882 struct ramoops_context *cxt = &oops_cxt; 883 884 pstore_unregister(&cxt->pstore); 885 886 kvfree(cxt->pstore.buf); 887 cxt->pstore.bufsize = 0; 888 889 ramoops_free_przs(cxt); 890 } 891 892 static const struct of_device_id dt_match[] = { 893 { .compatible = "ramoops" }, 894 {} 895 }; 896 MODULE_DEVICE_TABLE(of, dt_match); 897 898 static struct platform_driver ramoops_driver = { 899 .probe = ramoops_probe, 900 .remove_new = ramoops_remove, 901 .driver = { 902 .name = "ramoops", 903 .of_match_table = dt_match, 904 }, 905 }; 906 907 static inline void ramoops_unregister_dummy(void) 908 { 909 platform_device_unregister(dummy); 910 dummy = NULL; 911 } 912 913 static void __init ramoops_register_dummy(void) 914 { 915 struct ramoops_platform_data pdata; 916 917 /* 918 * Prepare a dummy platform data structure to carry the module 919 * parameters. If mem_size isn't set, then there are no module 920 * parameters, and we can skip this. 921 */ 922 if (!mem_size) 923 return; 924 925 pr_info("using module parameters\n"); 926 927 memset(&pdata, 0, sizeof(pdata)); 928 pdata.mem_size = mem_size; 929 pdata.mem_address = mem_address; 930 pdata.mem_type = mem_type; 931 pdata.record_size = record_size; 932 pdata.console_size = ramoops_console_size; 933 pdata.ftrace_size = ramoops_ftrace_size; 934 pdata.pmsg_size = ramoops_pmsg_size; 935 /* If "max_reason" is set, its value has priority over "dump_oops". */ 936 if (ramoops_max_reason >= 0) 937 pdata.max_reason = ramoops_max_reason; 938 /* Otherwise, if "dump_oops" is set, parse it into "max_reason". */ 939 else if (ramoops_dump_oops != -1) 940 pdata.max_reason = ramoops_dump_oops ? KMSG_DUMP_OOPS 941 : KMSG_DUMP_PANIC; 942 /* And if neither are explicitly set, use the default. */ 943 else 944 pdata.max_reason = KMSG_DUMP_OOPS; 945 pdata.flags = RAMOOPS_FLAG_FTRACE_PER_CPU; 946 947 /* 948 * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC 949 * (using 1 byte for ECC isn't much of use anyway). 950 */ 951 pdata.ecc_info.ecc_size = ramoops_ecc == 1 ? 16 : ramoops_ecc; 952 953 dummy = platform_device_register_data(NULL, "ramoops", -1, 954 &pdata, sizeof(pdata)); 955 if (IS_ERR(dummy)) { 956 pr_info("could not create platform device: %ld\n", 957 PTR_ERR(dummy)); 958 dummy = NULL; 959 } 960 } 961 962 static int __init ramoops_init(void) 963 { 964 int ret; 965 966 ramoops_register_dummy(); 967 ret = platform_driver_register(&ramoops_driver); 968 if (ret != 0) 969 ramoops_unregister_dummy(); 970 971 return ret; 972 } 973 postcore_initcall(ramoops_init); 974 975 static void __exit ramoops_exit(void) 976 { 977 platform_driver_unregister(&ramoops_driver); 978 ramoops_unregister_dummy(); 979 } 980 module_exit(ramoops_exit); 981 982 MODULE_LICENSE("GPL"); 983 MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>"); 984 MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); 985