1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * RAM Oops/Panic logger 4 * 5 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> 6 * Copyright (C) 2011 Kees Cook <keescook@chromium.org> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/err.h> 13 #include <linux/module.h> 14 #include <linux/version.h> 15 #include <linux/pstore.h> 16 #include <linux/io.h> 17 #include <linux/ioport.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 #include <linux/compiler.h> 21 #include <linux/pstore_ram.h> 22 #include <linux/of.h> 23 #include <linux/of_address.h> 24 25 #define RAMOOPS_KERNMSG_HDR "====" 26 #define MIN_MEM_SIZE 4096UL 27 28 static ulong record_size = MIN_MEM_SIZE; 29 module_param(record_size, ulong, 0400); 30 MODULE_PARM_DESC(record_size, 31 "size of each dump done on oops/panic"); 32 33 static ulong ramoops_console_size = MIN_MEM_SIZE; 34 module_param_named(console_size, ramoops_console_size, ulong, 0400); 35 MODULE_PARM_DESC(console_size, "size of kernel console log"); 36 37 static ulong ramoops_ftrace_size = MIN_MEM_SIZE; 38 module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400); 39 MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); 40 41 static ulong ramoops_pmsg_size = MIN_MEM_SIZE; 42 module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400); 43 MODULE_PARM_DESC(pmsg_size, "size of user space message log"); 44 45 static unsigned long long mem_address; 46 module_param_hw(mem_address, ullong, other, 0400); 47 MODULE_PARM_DESC(mem_address, 48 "start of reserved RAM used to store oops/panic logs"); 49 50 static ulong mem_size; 51 module_param(mem_size, ulong, 0400); 52 MODULE_PARM_DESC(mem_size, 53 "size of reserved RAM used to store oops/panic logs"); 54 55 static unsigned int mem_type; 56 module_param(mem_type, uint, 0600); 57 MODULE_PARM_DESC(mem_type, 58 "set to 1 to try to use unbuffered memory (default 0)"); 59 60 static int dump_oops = 1; 61 module_param(dump_oops, int, 0600); 62 MODULE_PARM_DESC(dump_oops, 63 "set to 1 to dump oopses, 0 to only dump panics (default 1)"); 64 65 static int ramoops_ecc; 66 module_param_named(ecc, ramoops_ecc, int, 0600); 67 MODULE_PARM_DESC(ramoops_ecc, 68 "if non-zero, the option enables ECC support and specifies " 69 "ECC buffer size in bytes (1 is a special value, means 16 " 70 "bytes ECC)"); 71 72 struct ramoops_context { 73 struct persistent_ram_zone **dprzs; /* Oops dump zones */ 74 struct persistent_ram_zone *cprz; /* Console zone */ 75 struct persistent_ram_zone **fprzs; /* Ftrace zones */ 76 struct persistent_ram_zone *mprz; /* PMSG zone */ 77 phys_addr_t phys_addr; 78 unsigned long size; 79 unsigned int memtype; 80 size_t record_size; 81 size_t console_size; 82 size_t ftrace_size; 83 size_t pmsg_size; 84 int dump_oops; 85 u32 flags; 86 struct persistent_ram_ecc_info ecc_info; 87 unsigned int max_dump_cnt; 88 unsigned int dump_write_cnt; 89 /* _read_cnt need clear on ramoops_pstore_open */ 90 unsigned int dump_read_cnt; 91 unsigned int console_read_cnt; 92 unsigned int max_ftrace_cnt; 93 unsigned int ftrace_read_cnt; 94 unsigned int pmsg_read_cnt; 95 struct pstore_info pstore; 96 }; 97 98 static struct platform_device *dummy; 99 100 static int ramoops_pstore_open(struct pstore_info *psi) 101 { 102 struct ramoops_context *cxt = psi->data; 103 104 cxt->dump_read_cnt = 0; 105 cxt->console_read_cnt = 0; 106 cxt->ftrace_read_cnt = 0; 107 cxt->pmsg_read_cnt = 0; 108 return 0; 109 } 110 111 static struct persistent_ram_zone * 112 ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, 113 struct pstore_record *record) 114 { 115 struct persistent_ram_zone *prz; 116 117 /* Give up if we never existed or have hit the end. */ 118 if (!przs) 119 return NULL; 120 121 prz = przs[id]; 122 if (!prz) 123 return NULL; 124 125 /* Update old/shadowed buffer. */ 126 if (prz->type == PSTORE_TYPE_DMESG) 127 persistent_ram_save_old(prz); 128 129 if (!persistent_ram_old_size(prz)) 130 return NULL; 131 132 record->type = prz->type; 133 record->id = id; 134 135 return prz; 136 } 137 138 static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time, 139 bool *compressed) 140 { 141 char data_type; 142 int header_length = 0; 143 144 if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n", 145 (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type, 146 &header_length) == 3) { 147 time->tv_nsec *= 1000; 148 if (data_type == 'C') 149 *compressed = true; 150 else 151 *compressed = false; 152 } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n", 153 (time64_t *)&time->tv_sec, &time->tv_nsec, 154 &header_length) == 2) { 155 time->tv_nsec *= 1000; 156 *compressed = false; 157 } else { 158 time->tv_sec = 0; 159 time->tv_nsec = 0; 160 *compressed = false; 161 } 162 return header_length; 163 } 164 165 static bool prz_ok(struct persistent_ram_zone *prz) 166 { 167 return !!prz && !!(persistent_ram_old_size(prz) + 168 persistent_ram_ecc_string(prz, NULL, 0)); 169 } 170 171 static ssize_t ftrace_log_combine(struct persistent_ram_zone *dest, 172 struct persistent_ram_zone *src) 173 { 174 size_t dest_size, src_size, total, dest_off, src_off; 175 size_t dest_idx = 0, src_idx = 0, merged_idx = 0; 176 void *merged_buf; 177 struct pstore_ftrace_record *drec, *srec, *mrec; 178 size_t record_size = sizeof(struct pstore_ftrace_record); 179 180 dest_off = dest->old_log_size % record_size; 181 dest_size = dest->old_log_size - dest_off; 182 183 src_off = src->old_log_size % record_size; 184 src_size = src->old_log_size - src_off; 185 186 total = dest_size + src_size; 187 merged_buf = kmalloc(total, GFP_KERNEL); 188 if (!merged_buf) 189 return -ENOMEM; 190 191 drec = (struct pstore_ftrace_record *)(dest->old_log + dest_off); 192 srec = (struct pstore_ftrace_record *)(src->old_log + src_off); 193 mrec = (struct pstore_ftrace_record *)(merged_buf); 194 195 while (dest_size > 0 && src_size > 0) { 196 if (pstore_ftrace_read_timestamp(&drec[dest_idx]) < 197 pstore_ftrace_read_timestamp(&srec[src_idx])) { 198 mrec[merged_idx++] = drec[dest_idx++]; 199 dest_size -= record_size; 200 } else { 201 mrec[merged_idx++] = srec[src_idx++]; 202 src_size -= record_size; 203 } 204 } 205 206 while (dest_size > 0) { 207 mrec[merged_idx++] = drec[dest_idx++]; 208 dest_size -= record_size; 209 } 210 211 while (src_size > 0) { 212 mrec[merged_idx++] = srec[src_idx++]; 213 src_size -= record_size; 214 } 215 216 kfree(dest->old_log); 217 dest->old_log = merged_buf; 218 dest->old_log_size = total; 219 220 return 0; 221 } 222 223 static ssize_t ramoops_pstore_read(struct pstore_record *record) 224 { 225 ssize_t size = 0; 226 struct ramoops_context *cxt = record->psi->data; 227 struct persistent_ram_zone *prz = NULL; 228 int header_length = 0; 229 bool free_prz = false; 230 231 /* 232 * Ramoops headers provide time stamps for PSTORE_TYPE_DMESG, but 233 * PSTORE_TYPE_CONSOLE and PSTORE_TYPE_FTRACE don't currently have 234 * valid time stamps, so it is initialized to zero. 235 */ 236 record->time.tv_sec = 0; 237 record->time.tv_nsec = 0; 238 record->compressed = false; 239 240 /* Find the next valid persistent_ram_zone for DMESG */ 241 while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) { 242 prz = ramoops_get_next_prz(cxt->dprzs, cxt->dump_read_cnt++, 243 record); 244 if (!prz_ok(prz)) 245 continue; 246 header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz), 247 &record->time, 248 &record->compressed); 249 /* Clear and skip this DMESG record if it has no valid header */ 250 if (!header_length) { 251 persistent_ram_free_old(prz); 252 persistent_ram_zap(prz); 253 prz = NULL; 254 } 255 } 256 257 if (!prz_ok(prz) && !cxt->console_read_cnt++) 258 prz = ramoops_get_next_prz(&cxt->cprz, 0 /* single */, record); 259 260 if (!prz_ok(prz) && !cxt->pmsg_read_cnt++) 261 prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record); 262 263 /* ftrace is last since it may want to dynamically allocate memory. */ 264 if (!prz_ok(prz)) { 265 if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) && 266 !cxt->ftrace_read_cnt++) { 267 prz = ramoops_get_next_prz(cxt->fprzs, 0 /* single */, 268 record); 269 } else { 270 /* 271 * Build a new dummy record which combines all the 272 * per-cpu records including metadata and ecc info. 273 */ 274 struct persistent_ram_zone *tmp_prz, *prz_next; 275 276 tmp_prz = kzalloc(sizeof(struct persistent_ram_zone), 277 GFP_KERNEL); 278 if (!tmp_prz) 279 return -ENOMEM; 280 prz = tmp_prz; 281 free_prz = true; 282 283 while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) { 284 prz_next = ramoops_get_next_prz(cxt->fprzs, 285 cxt->ftrace_read_cnt++, record); 286 287 if (!prz_ok(prz_next)) 288 continue; 289 290 tmp_prz->ecc_info = prz_next->ecc_info; 291 tmp_prz->corrected_bytes += 292 prz_next->corrected_bytes; 293 tmp_prz->bad_blocks += prz_next->bad_blocks; 294 size = ftrace_log_combine(tmp_prz, prz_next); 295 if (size) 296 goto out; 297 } 298 record->id = 0; 299 } 300 } 301 302 if (!prz_ok(prz)) { 303 size = 0; 304 goto out; 305 } 306 307 size = persistent_ram_old_size(prz) - header_length; 308 309 /* ECC correction notice */ 310 record->ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0); 311 312 record->buf = kmalloc(size + record->ecc_notice_size + 1, GFP_KERNEL); 313 if (record->buf == NULL) { 314 size = -ENOMEM; 315 goto out; 316 } 317 318 memcpy(record->buf, (char *)persistent_ram_old(prz) + header_length, 319 size); 320 321 persistent_ram_ecc_string(prz, record->buf + size, 322 record->ecc_notice_size + 1); 323 324 out: 325 if (free_prz) { 326 kfree(prz->old_log); 327 kfree(prz); 328 } 329 330 return size; 331 } 332 333 static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz, 334 struct pstore_record *record) 335 { 336 char hdr[36]; /* "===="(4), %lld(20), "."(1), %06lu(6), "-%c\n"(3) */ 337 size_t len; 338 339 len = scnprintf(hdr, sizeof(hdr), 340 RAMOOPS_KERNMSG_HDR "%lld.%06lu-%c\n", 341 (time64_t)record->time.tv_sec, 342 record->time.tv_nsec / 1000, 343 record->compressed ? 'C' : 'D'); 344 persistent_ram_write(prz, hdr, len); 345 346 return len; 347 } 348 349 static int notrace ramoops_pstore_write(struct pstore_record *record) 350 { 351 struct ramoops_context *cxt = record->psi->data; 352 struct persistent_ram_zone *prz; 353 size_t size, hlen; 354 355 if (record->type == PSTORE_TYPE_CONSOLE) { 356 if (!cxt->cprz) 357 return -ENOMEM; 358 persistent_ram_write(cxt->cprz, record->buf, record->size); 359 return 0; 360 } else if (record->type == PSTORE_TYPE_FTRACE) { 361 int zonenum; 362 363 if (!cxt->fprzs) 364 return -ENOMEM; 365 /* 366 * Choose zone by if we're using per-cpu buffers. 367 */ 368 if (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 369 zonenum = smp_processor_id(); 370 else 371 zonenum = 0; 372 373 persistent_ram_write(cxt->fprzs[zonenum], record->buf, 374 record->size); 375 return 0; 376 } else if (record->type == PSTORE_TYPE_PMSG) { 377 pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__); 378 return -EINVAL; 379 } 380 381 if (record->type != PSTORE_TYPE_DMESG) 382 return -EINVAL; 383 384 /* 385 * Out of the various dmesg dump types, ramoops is currently designed 386 * to only store crash logs, rather than storing general kernel logs. 387 */ 388 if (record->reason != KMSG_DUMP_OOPS && 389 record->reason != KMSG_DUMP_PANIC) 390 return -EINVAL; 391 392 /* Skip Oopes when configured to do so. */ 393 if (record->reason == KMSG_DUMP_OOPS && !cxt->dump_oops) 394 return -EINVAL; 395 396 /* 397 * Explicitly only take the first part of any new crash. 398 * If our buffer is larger than kmsg_bytes, this can never happen, 399 * and if our buffer is smaller than kmsg_bytes, we don't want the 400 * report split across multiple records. 401 */ 402 if (record->part != 1) 403 return -ENOSPC; 404 405 if (!cxt->dprzs) 406 return -ENOSPC; 407 408 prz = cxt->dprzs[cxt->dump_write_cnt]; 409 410 /* 411 * Since this is a new crash dump, we need to reset the buffer in 412 * case it still has an old dump present. Without this, the new dump 413 * will get appended, which would seriously confuse anything trying 414 * to check dump file contents. Specifically, ramoops_read_kmsg_hdr() 415 * expects to find a dump header in the beginning of buffer data, so 416 * we must to reset the buffer values, in order to ensure that the 417 * header will be written to the beginning of the buffer. 418 */ 419 persistent_ram_zap(prz); 420 421 /* Build header and append record contents. */ 422 hlen = ramoops_write_kmsg_hdr(prz, record); 423 if (!hlen) 424 return -ENOMEM; 425 426 size = record->size; 427 if (size + hlen > prz->buffer_size) 428 size = prz->buffer_size - hlen; 429 persistent_ram_write(prz, record->buf, size); 430 431 cxt->dump_write_cnt = (cxt->dump_write_cnt + 1) % cxt->max_dump_cnt; 432 433 return 0; 434 } 435 436 static int notrace ramoops_pstore_write_user(struct pstore_record *record, 437 const char __user *buf) 438 { 439 if (record->type == PSTORE_TYPE_PMSG) { 440 struct ramoops_context *cxt = record->psi->data; 441 442 if (!cxt->mprz) 443 return -ENOMEM; 444 return persistent_ram_write_user(cxt->mprz, buf, record->size); 445 } 446 447 return -EINVAL; 448 } 449 450 static int ramoops_pstore_erase(struct pstore_record *record) 451 { 452 struct ramoops_context *cxt = record->psi->data; 453 struct persistent_ram_zone *prz; 454 455 switch (record->type) { 456 case PSTORE_TYPE_DMESG: 457 if (record->id >= cxt->max_dump_cnt) 458 return -EINVAL; 459 prz = cxt->dprzs[record->id]; 460 break; 461 case PSTORE_TYPE_CONSOLE: 462 prz = cxt->cprz; 463 break; 464 case PSTORE_TYPE_FTRACE: 465 if (record->id >= cxt->max_ftrace_cnt) 466 return -EINVAL; 467 prz = cxt->fprzs[record->id]; 468 break; 469 case PSTORE_TYPE_PMSG: 470 prz = cxt->mprz; 471 break; 472 default: 473 return -EINVAL; 474 } 475 476 persistent_ram_free_old(prz); 477 persistent_ram_zap(prz); 478 479 return 0; 480 } 481 482 static struct ramoops_context oops_cxt = { 483 .pstore = { 484 .owner = THIS_MODULE, 485 .name = "ramoops", 486 .open = ramoops_pstore_open, 487 .read = ramoops_pstore_read, 488 .write = ramoops_pstore_write, 489 .write_user = ramoops_pstore_write_user, 490 .erase = ramoops_pstore_erase, 491 }, 492 }; 493 494 static void ramoops_free_przs(struct ramoops_context *cxt) 495 { 496 int i; 497 498 /* Free dump PRZs */ 499 if (cxt->dprzs) { 500 for (i = 0; i < cxt->max_dump_cnt; i++) 501 persistent_ram_free(cxt->dprzs[i]); 502 503 kfree(cxt->dprzs); 504 cxt->max_dump_cnt = 0; 505 } 506 507 /* Free ftrace PRZs */ 508 if (cxt->fprzs) { 509 for (i = 0; i < cxt->max_ftrace_cnt; i++) 510 persistent_ram_free(cxt->fprzs[i]); 511 kfree(cxt->fprzs); 512 cxt->max_ftrace_cnt = 0; 513 } 514 } 515 516 static int ramoops_init_przs(const char *name, 517 struct device *dev, struct ramoops_context *cxt, 518 struct persistent_ram_zone ***przs, 519 phys_addr_t *paddr, size_t mem_sz, 520 ssize_t record_size, 521 unsigned int *cnt, u32 sig, u32 flags) 522 { 523 int err = -ENOMEM; 524 int i; 525 size_t zone_sz; 526 struct persistent_ram_zone **prz_ar; 527 528 /* Allocate nothing for 0 mem_sz or 0 record_size. */ 529 if (mem_sz == 0 || record_size == 0) { 530 *cnt = 0; 531 return 0; 532 } 533 534 /* 535 * If we have a negative record size, calculate it based on 536 * mem_sz / *cnt. If we have a positive record size, calculate 537 * cnt from mem_sz / record_size. 538 */ 539 if (record_size < 0) { 540 if (*cnt == 0) 541 return 0; 542 record_size = mem_sz / *cnt; 543 if (record_size == 0) { 544 dev_err(dev, "%s record size == 0 (%zu / %u)\n", 545 name, mem_sz, *cnt); 546 goto fail; 547 } 548 } else { 549 *cnt = mem_sz / record_size; 550 if (*cnt == 0) { 551 dev_err(dev, "%s record count == 0 (%zu / %zu)\n", 552 name, mem_sz, record_size); 553 goto fail; 554 } 555 } 556 557 if (*paddr + mem_sz - cxt->phys_addr > cxt->size) { 558 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 559 name, 560 mem_sz, (unsigned long long)*paddr, 561 cxt->size, (unsigned long long)cxt->phys_addr); 562 goto fail; 563 } 564 565 zone_sz = mem_sz / *cnt; 566 if (!zone_sz) { 567 dev_err(dev, "%s zone size == 0\n", name); 568 goto fail; 569 } 570 571 prz_ar = kcalloc(*cnt, sizeof(**przs), GFP_KERNEL); 572 if (!prz_ar) 573 goto fail; 574 575 for (i = 0; i < *cnt; i++) { 576 char *label; 577 578 if (*cnt == 1) 579 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 580 else 581 label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)", 582 name, i, *cnt - 1); 583 prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig, 584 &cxt->ecc_info, 585 cxt->memtype, flags, label); 586 kfree(label); 587 if (IS_ERR(prz_ar[i])) { 588 err = PTR_ERR(prz_ar[i]); 589 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 590 name, record_size, 591 (unsigned long long)*paddr, err); 592 593 while (i > 0) { 594 i--; 595 persistent_ram_free(prz_ar[i]); 596 } 597 kfree(prz_ar); 598 goto fail; 599 } 600 *paddr += zone_sz; 601 prz_ar[i]->type = pstore_name_to_type(name); 602 } 603 604 *przs = prz_ar; 605 return 0; 606 607 fail: 608 *cnt = 0; 609 return err; 610 } 611 612 static int ramoops_init_prz(const char *name, 613 struct device *dev, struct ramoops_context *cxt, 614 struct persistent_ram_zone **prz, 615 phys_addr_t *paddr, size_t sz, u32 sig) 616 { 617 char *label; 618 619 if (!sz) 620 return 0; 621 622 if (*paddr + sz - cxt->phys_addr > cxt->size) { 623 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 624 name, sz, (unsigned long long)*paddr, 625 cxt->size, (unsigned long long)cxt->phys_addr); 626 return -ENOMEM; 627 } 628 629 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 630 *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, 631 cxt->memtype, PRZ_FLAG_ZAP_OLD, label); 632 kfree(label); 633 if (IS_ERR(*prz)) { 634 int err = PTR_ERR(*prz); 635 636 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 637 name, sz, (unsigned long long)*paddr, err); 638 return err; 639 } 640 641 *paddr += sz; 642 (*prz)->type = pstore_name_to_type(name); 643 644 return 0; 645 } 646 647 static int ramoops_parse_dt_size(struct platform_device *pdev, 648 const char *propname, u32 *value) 649 { 650 u32 val32 = 0; 651 int ret; 652 653 ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); 654 if (ret < 0 && ret != -EINVAL) { 655 dev_err(&pdev->dev, "failed to parse property %s: %d\n", 656 propname, ret); 657 return ret; 658 } 659 660 if (val32 > INT_MAX) { 661 dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32); 662 return -EOVERFLOW; 663 } 664 665 *value = val32; 666 return 0; 667 } 668 669 static int ramoops_parse_dt(struct platform_device *pdev, 670 struct ramoops_platform_data *pdata) 671 { 672 struct device_node *of_node = pdev->dev.of_node; 673 struct device_node *parent_node; 674 struct resource *res; 675 u32 value; 676 int ret; 677 678 dev_dbg(&pdev->dev, "using Device Tree\n"); 679 680 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 681 if (!res) { 682 dev_err(&pdev->dev, 683 "failed to locate DT /reserved-memory resource\n"); 684 return -EINVAL; 685 } 686 687 pdata->mem_size = resource_size(res); 688 pdata->mem_address = res->start; 689 pdata->mem_type = of_property_read_bool(of_node, "unbuffered"); 690 pdata->dump_oops = !of_property_read_bool(of_node, "no-dump-oops"); 691 692 #define parse_size(name, field) { \ 693 ret = ramoops_parse_dt_size(pdev, name, &value); \ 694 if (ret < 0) \ 695 return ret; \ 696 field = value; \ 697 } 698 699 parse_size("record-size", pdata->record_size); 700 parse_size("console-size", pdata->console_size); 701 parse_size("ftrace-size", pdata->ftrace_size); 702 parse_size("pmsg-size", pdata->pmsg_size); 703 parse_size("ecc-size", pdata->ecc_info.ecc_size); 704 parse_size("flags", pdata->flags); 705 706 #undef parse_size 707 708 /* 709 * Some old Chromebooks relied on the kernel setting the 710 * console_size and pmsg_size to the record size since that's 711 * what the downstream kernel did. These same Chromebooks had 712 * "ramoops" straight under the root node which isn't 713 * according to the current upstream bindings (though it was 714 * arguably acceptable under a prior version of the bindings). 715 * Let's make those old Chromebooks work by detecting that 716 * we're not a child of "reserved-memory" and mimicking the 717 * expected behavior. 718 */ 719 parent_node = of_get_parent(of_node); 720 if (!of_node_name_eq(parent_node, "reserved-memory") && 721 !pdata->console_size && !pdata->ftrace_size && 722 !pdata->pmsg_size && !pdata->ecc_info.ecc_size) { 723 pdata->console_size = pdata->record_size; 724 pdata->pmsg_size = pdata->record_size; 725 } 726 of_node_put(parent_node); 727 728 return 0; 729 } 730 731 static int ramoops_probe(struct platform_device *pdev) 732 { 733 struct device *dev = &pdev->dev; 734 struct ramoops_platform_data *pdata = dev->platform_data; 735 struct ramoops_platform_data pdata_local; 736 struct ramoops_context *cxt = &oops_cxt; 737 size_t dump_mem_sz; 738 phys_addr_t paddr; 739 int err = -EINVAL; 740 741 /* 742 * Only a single ramoops area allowed at a time, so fail extra 743 * probes. 744 */ 745 if (cxt->max_dump_cnt) { 746 pr_err("already initialized\n"); 747 goto fail_out; 748 } 749 750 if (dev_of_node(dev) && !pdata) { 751 pdata = &pdata_local; 752 memset(pdata, 0, sizeof(*pdata)); 753 754 err = ramoops_parse_dt(pdev, pdata); 755 if (err < 0) 756 goto fail_out; 757 } 758 759 /* Make sure we didn't get bogus platform data pointer. */ 760 if (!pdata) { 761 pr_err("NULL platform data\n"); 762 goto fail_out; 763 } 764 765 if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && 766 !pdata->ftrace_size && !pdata->pmsg_size)) { 767 pr_err("The memory size and the record/console size must be " 768 "non-zero\n"); 769 goto fail_out; 770 } 771 772 if (pdata->record_size && !is_power_of_2(pdata->record_size)) 773 pdata->record_size = rounddown_pow_of_two(pdata->record_size); 774 if (pdata->console_size && !is_power_of_2(pdata->console_size)) 775 pdata->console_size = rounddown_pow_of_two(pdata->console_size); 776 if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size)) 777 pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); 778 if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) 779 pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); 780 781 cxt->size = pdata->mem_size; 782 cxt->phys_addr = pdata->mem_address; 783 cxt->memtype = pdata->mem_type; 784 cxt->record_size = pdata->record_size; 785 cxt->console_size = pdata->console_size; 786 cxt->ftrace_size = pdata->ftrace_size; 787 cxt->pmsg_size = pdata->pmsg_size; 788 cxt->dump_oops = pdata->dump_oops; 789 cxt->flags = pdata->flags; 790 cxt->ecc_info = pdata->ecc_info; 791 792 paddr = cxt->phys_addr; 793 794 dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size 795 - cxt->pmsg_size; 796 err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr, 797 dump_mem_sz, cxt->record_size, 798 &cxt->max_dump_cnt, 0, 0); 799 if (err) 800 goto fail_out; 801 802 err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr, 803 cxt->console_size, 0); 804 if (err) 805 goto fail_init_cprz; 806 807 cxt->max_ftrace_cnt = (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 808 ? nr_cpu_ids 809 : 1; 810 err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr, 811 cxt->ftrace_size, -1, 812 &cxt->max_ftrace_cnt, LINUX_VERSION_CODE, 813 (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 814 ? PRZ_FLAG_NO_LOCK : 0); 815 if (err) 816 goto fail_init_fprz; 817 818 err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr, 819 cxt->pmsg_size, 0); 820 if (err) 821 goto fail_init_mprz; 822 823 cxt->pstore.data = cxt; 824 /* 825 * Prepare frontend flags based on which areas are initialized. 826 * For ramoops_init_przs() cases, the "max count" variable tells 827 * if there are regions present. For ramoops_init_prz() cases, 828 * the single region size is how to check. 829 */ 830 cxt->pstore.flags = 0; 831 if (cxt->max_dump_cnt) 832 cxt->pstore.flags |= PSTORE_FLAGS_DMESG; 833 if (cxt->console_size) 834 cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; 835 if (cxt->max_ftrace_cnt) 836 cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; 837 if (cxt->pmsg_size) 838 cxt->pstore.flags |= PSTORE_FLAGS_PMSG; 839 840 /* 841 * Since bufsize is only used for dmesg crash dumps, it 842 * must match the size of the dprz record (after PRZ header 843 * and ECC bytes have been accounted for). 844 */ 845 if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) { 846 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size; 847 cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL); 848 if (!cxt->pstore.buf) { 849 pr_err("cannot allocate pstore crash dump buffer\n"); 850 err = -ENOMEM; 851 goto fail_clear; 852 } 853 } 854 855 err = pstore_register(&cxt->pstore); 856 if (err) { 857 pr_err("registering with pstore failed\n"); 858 goto fail_buf; 859 } 860 861 /* 862 * Update the module parameter variables as well so they are visible 863 * through /sys/module/ramoops/parameters/ 864 */ 865 mem_size = pdata->mem_size; 866 mem_address = pdata->mem_address; 867 record_size = pdata->record_size; 868 dump_oops = pdata->dump_oops; 869 ramoops_console_size = pdata->console_size; 870 ramoops_pmsg_size = pdata->pmsg_size; 871 ramoops_ftrace_size = pdata->ftrace_size; 872 873 pr_info("using 0x%lx@0x%llx, ecc: %d\n", 874 cxt->size, (unsigned long long)cxt->phys_addr, 875 cxt->ecc_info.ecc_size); 876 877 return 0; 878 879 fail_buf: 880 kfree(cxt->pstore.buf); 881 fail_clear: 882 cxt->pstore.bufsize = 0; 883 persistent_ram_free(cxt->mprz); 884 fail_init_mprz: 885 fail_init_fprz: 886 persistent_ram_free(cxt->cprz); 887 fail_init_cprz: 888 ramoops_free_przs(cxt); 889 fail_out: 890 return err; 891 } 892 893 static int ramoops_remove(struct platform_device *pdev) 894 { 895 struct ramoops_context *cxt = &oops_cxt; 896 897 pstore_unregister(&cxt->pstore); 898 899 kfree(cxt->pstore.buf); 900 cxt->pstore.bufsize = 0; 901 902 persistent_ram_free(cxt->mprz); 903 persistent_ram_free(cxt->cprz); 904 ramoops_free_przs(cxt); 905 906 return 0; 907 } 908 909 static const struct of_device_id dt_match[] = { 910 { .compatible = "ramoops" }, 911 {} 912 }; 913 914 static struct platform_driver ramoops_driver = { 915 .probe = ramoops_probe, 916 .remove = ramoops_remove, 917 .driver = { 918 .name = "ramoops", 919 .of_match_table = dt_match, 920 }, 921 }; 922 923 static inline void ramoops_unregister_dummy(void) 924 { 925 platform_device_unregister(dummy); 926 dummy = NULL; 927 } 928 929 static void __init ramoops_register_dummy(void) 930 { 931 struct ramoops_platform_data pdata; 932 933 /* 934 * Prepare a dummy platform data structure to carry the module 935 * parameters. If mem_size isn't set, then there are no module 936 * parameters, and we can skip this. 937 */ 938 if (!mem_size) 939 return; 940 941 pr_info("using module parameters\n"); 942 943 memset(&pdata, 0, sizeof(pdata)); 944 pdata.mem_size = mem_size; 945 pdata.mem_address = mem_address; 946 pdata.mem_type = mem_type; 947 pdata.record_size = record_size; 948 pdata.console_size = ramoops_console_size; 949 pdata.ftrace_size = ramoops_ftrace_size; 950 pdata.pmsg_size = ramoops_pmsg_size; 951 pdata.dump_oops = dump_oops; 952 pdata.flags = RAMOOPS_FLAG_FTRACE_PER_CPU; 953 954 /* 955 * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC 956 * (using 1 byte for ECC isn't much of use anyway). 957 */ 958 pdata.ecc_info.ecc_size = ramoops_ecc == 1 ? 16 : ramoops_ecc; 959 960 dummy = platform_device_register_data(NULL, "ramoops", -1, 961 &pdata, sizeof(pdata)); 962 if (IS_ERR(dummy)) { 963 pr_info("could not create platform device: %ld\n", 964 PTR_ERR(dummy)); 965 dummy = NULL; 966 ramoops_unregister_dummy(); 967 } 968 } 969 970 static int __init ramoops_init(void) 971 { 972 int ret; 973 974 ramoops_register_dummy(); 975 ret = platform_driver_register(&ramoops_driver); 976 if (ret != 0) 977 ramoops_unregister_dummy(); 978 979 return ret; 980 } 981 postcore_initcall(ramoops_init); 982 983 static void __exit ramoops_exit(void) 984 { 985 platform_driver_unregister(&ramoops_driver); 986 ramoops_unregister_dummy(); 987 } 988 module_exit(ramoops_exit); 989 990 MODULE_LICENSE("GPL"); 991 MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>"); 992 MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); 993