1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * RAM Oops/Panic logger 4 * 5 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> 6 * Copyright (C) 2011 Kees Cook <keescook@chromium.org> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/err.h> 13 #include <linux/module.h> 14 #include <linux/version.h> 15 #include <linux/pstore.h> 16 #include <linux/io.h> 17 #include <linux/ioport.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 #include <linux/compiler.h> 21 #include <linux/pstore_ram.h> 22 #include <linux/of.h> 23 #include <linux/of_address.h> 24 25 #define RAMOOPS_KERNMSG_HDR "====" 26 #define MIN_MEM_SIZE 4096UL 27 28 static ulong record_size = MIN_MEM_SIZE; 29 module_param(record_size, ulong, 0400); 30 MODULE_PARM_DESC(record_size, 31 "size of each dump done on oops/panic"); 32 33 static ulong ramoops_console_size = MIN_MEM_SIZE; 34 module_param_named(console_size, ramoops_console_size, ulong, 0400); 35 MODULE_PARM_DESC(console_size, "size of kernel console log"); 36 37 static ulong ramoops_ftrace_size = MIN_MEM_SIZE; 38 module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400); 39 MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); 40 41 static ulong ramoops_pmsg_size = MIN_MEM_SIZE; 42 module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400); 43 MODULE_PARM_DESC(pmsg_size, "size of user space message log"); 44 45 static unsigned long long mem_address; 46 module_param_hw(mem_address, ullong, other, 0400); 47 MODULE_PARM_DESC(mem_address, 48 "start of reserved RAM used to store oops/panic logs"); 49 50 static ulong mem_size; 51 module_param(mem_size, ulong, 0400); 52 MODULE_PARM_DESC(mem_size, 53 "size of reserved RAM used to store oops/panic logs"); 54 55 static unsigned int mem_type; 56 module_param(mem_type, uint, 0600); 57 MODULE_PARM_DESC(mem_type, 58 "set to 1 to try to use unbuffered memory (default 0)"); 59 60 static int dump_oops = 1; 61 module_param(dump_oops, int, 0600); 62 MODULE_PARM_DESC(dump_oops, 63 "set to 1 to dump oopses, 0 to only dump panics (default 1)"); 64 65 static int ramoops_ecc; 66 module_param_named(ecc, ramoops_ecc, int, 0600); 67 MODULE_PARM_DESC(ramoops_ecc, 68 "if non-zero, the option enables ECC support and specifies " 69 "ECC buffer size in bytes (1 is a special value, means 16 " 70 "bytes ECC)"); 71 72 struct ramoops_context { 73 struct persistent_ram_zone **dprzs; /* Oops dump zones */ 74 struct persistent_ram_zone *cprz; /* Console zone */ 75 struct persistent_ram_zone **fprzs; /* Ftrace zones */ 76 struct persistent_ram_zone *mprz; /* PMSG zone */ 77 phys_addr_t phys_addr; 78 unsigned long size; 79 unsigned int memtype; 80 size_t record_size; 81 size_t console_size; 82 size_t ftrace_size; 83 size_t pmsg_size; 84 int dump_oops; 85 u32 flags; 86 struct persistent_ram_ecc_info ecc_info; 87 unsigned int max_dump_cnt; 88 unsigned int dump_write_cnt; 89 /* _read_cnt need clear on ramoops_pstore_open */ 90 unsigned int dump_read_cnt; 91 unsigned int console_read_cnt; 92 unsigned int max_ftrace_cnt; 93 unsigned int ftrace_read_cnt; 94 unsigned int pmsg_read_cnt; 95 struct pstore_info pstore; 96 }; 97 98 static struct platform_device *dummy; 99 100 static int ramoops_pstore_open(struct pstore_info *psi) 101 { 102 struct ramoops_context *cxt = psi->data; 103 104 cxt->dump_read_cnt = 0; 105 cxt->console_read_cnt = 0; 106 cxt->ftrace_read_cnt = 0; 107 cxt->pmsg_read_cnt = 0; 108 return 0; 109 } 110 111 static struct persistent_ram_zone * 112 ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, 113 struct pstore_record *record) 114 { 115 struct persistent_ram_zone *prz; 116 117 /* Give up if we never existed or have hit the end. */ 118 if (!przs) 119 return NULL; 120 121 prz = przs[id]; 122 if (!prz) 123 return NULL; 124 125 /* Update old/shadowed buffer. */ 126 if (prz->type == PSTORE_TYPE_DMESG) 127 persistent_ram_save_old(prz); 128 129 if (!persistent_ram_old_size(prz)) 130 return NULL; 131 132 record->type = prz->type; 133 record->id = id; 134 135 return prz; 136 } 137 138 static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time, 139 bool *compressed) 140 { 141 char data_type; 142 int header_length = 0; 143 144 if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n", 145 (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type, 146 &header_length) == 3) { 147 if (data_type == 'C') 148 *compressed = true; 149 else 150 *compressed = false; 151 } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n", 152 (time64_t *)&time->tv_sec, &time->tv_nsec, 153 &header_length) == 2) { 154 *compressed = false; 155 } else { 156 time->tv_sec = 0; 157 time->tv_nsec = 0; 158 *compressed = false; 159 } 160 return header_length; 161 } 162 163 static bool prz_ok(struct persistent_ram_zone *prz) 164 { 165 return !!prz && !!(persistent_ram_old_size(prz) + 166 persistent_ram_ecc_string(prz, NULL, 0)); 167 } 168 169 static ssize_t ftrace_log_combine(struct persistent_ram_zone *dest, 170 struct persistent_ram_zone *src) 171 { 172 size_t dest_size, src_size, total, dest_off, src_off; 173 size_t dest_idx = 0, src_idx = 0, merged_idx = 0; 174 void *merged_buf; 175 struct pstore_ftrace_record *drec, *srec, *mrec; 176 size_t record_size = sizeof(struct pstore_ftrace_record); 177 178 dest_off = dest->old_log_size % record_size; 179 dest_size = dest->old_log_size - dest_off; 180 181 src_off = src->old_log_size % record_size; 182 src_size = src->old_log_size - src_off; 183 184 total = dest_size + src_size; 185 merged_buf = kmalloc(total, GFP_KERNEL); 186 if (!merged_buf) 187 return -ENOMEM; 188 189 drec = (struct pstore_ftrace_record *)(dest->old_log + dest_off); 190 srec = (struct pstore_ftrace_record *)(src->old_log + src_off); 191 mrec = (struct pstore_ftrace_record *)(merged_buf); 192 193 while (dest_size > 0 && src_size > 0) { 194 if (pstore_ftrace_read_timestamp(&drec[dest_idx]) < 195 pstore_ftrace_read_timestamp(&srec[src_idx])) { 196 mrec[merged_idx++] = drec[dest_idx++]; 197 dest_size -= record_size; 198 } else { 199 mrec[merged_idx++] = srec[src_idx++]; 200 src_size -= record_size; 201 } 202 } 203 204 while (dest_size > 0) { 205 mrec[merged_idx++] = drec[dest_idx++]; 206 dest_size -= record_size; 207 } 208 209 while (src_size > 0) { 210 mrec[merged_idx++] = srec[src_idx++]; 211 src_size -= record_size; 212 } 213 214 kfree(dest->old_log); 215 dest->old_log = merged_buf; 216 dest->old_log_size = total; 217 218 return 0; 219 } 220 221 static ssize_t ramoops_pstore_read(struct pstore_record *record) 222 { 223 ssize_t size = 0; 224 struct ramoops_context *cxt = record->psi->data; 225 struct persistent_ram_zone *prz = NULL; 226 int header_length = 0; 227 bool free_prz = false; 228 229 /* 230 * Ramoops headers provide time stamps for PSTORE_TYPE_DMESG, but 231 * PSTORE_TYPE_CONSOLE and PSTORE_TYPE_FTRACE don't currently have 232 * valid time stamps, so it is initialized to zero. 233 */ 234 record->time.tv_sec = 0; 235 record->time.tv_nsec = 0; 236 record->compressed = false; 237 238 /* Find the next valid persistent_ram_zone for DMESG */ 239 while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) { 240 prz = ramoops_get_next_prz(cxt->dprzs, cxt->dump_read_cnt++, 241 record); 242 if (!prz_ok(prz)) 243 continue; 244 header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz), 245 &record->time, 246 &record->compressed); 247 /* Clear and skip this DMESG record if it has no valid header */ 248 if (!header_length) { 249 persistent_ram_free_old(prz); 250 persistent_ram_zap(prz); 251 prz = NULL; 252 } 253 } 254 255 if (!prz_ok(prz) && !cxt->console_read_cnt++) 256 prz = ramoops_get_next_prz(&cxt->cprz, 0 /* single */, record); 257 258 if (!prz_ok(prz) && !cxt->pmsg_read_cnt++) 259 prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record); 260 261 /* ftrace is last since it may want to dynamically allocate memory. */ 262 if (!prz_ok(prz)) { 263 if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) && 264 !cxt->ftrace_read_cnt++) { 265 prz = ramoops_get_next_prz(cxt->fprzs, 0 /* single */, 266 record); 267 } else { 268 /* 269 * Build a new dummy record which combines all the 270 * per-cpu records including metadata and ecc info. 271 */ 272 struct persistent_ram_zone *tmp_prz, *prz_next; 273 274 tmp_prz = kzalloc(sizeof(struct persistent_ram_zone), 275 GFP_KERNEL); 276 if (!tmp_prz) 277 return -ENOMEM; 278 prz = tmp_prz; 279 free_prz = true; 280 281 while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) { 282 prz_next = ramoops_get_next_prz(cxt->fprzs, 283 cxt->ftrace_read_cnt++, record); 284 285 if (!prz_ok(prz_next)) 286 continue; 287 288 tmp_prz->ecc_info = prz_next->ecc_info; 289 tmp_prz->corrected_bytes += 290 prz_next->corrected_bytes; 291 tmp_prz->bad_blocks += prz_next->bad_blocks; 292 size = ftrace_log_combine(tmp_prz, prz_next); 293 if (size) 294 goto out; 295 } 296 record->id = 0; 297 } 298 } 299 300 if (!prz_ok(prz)) { 301 size = 0; 302 goto out; 303 } 304 305 size = persistent_ram_old_size(prz) - header_length; 306 307 /* ECC correction notice */ 308 record->ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0); 309 310 record->buf = kmalloc(size + record->ecc_notice_size + 1, GFP_KERNEL); 311 if (record->buf == NULL) { 312 size = -ENOMEM; 313 goto out; 314 } 315 316 memcpy(record->buf, (char *)persistent_ram_old(prz) + header_length, 317 size); 318 319 persistent_ram_ecc_string(prz, record->buf + size, 320 record->ecc_notice_size + 1); 321 322 out: 323 if (free_prz) { 324 kfree(prz->old_log); 325 kfree(prz); 326 } 327 328 return size; 329 } 330 331 static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz, 332 struct pstore_record *record) 333 { 334 char hdr[36]; /* "===="(4), %lld(20), "."(1), %06lu(6), "-%c\n"(3) */ 335 size_t len; 336 337 len = scnprintf(hdr, sizeof(hdr), 338 RAMOOPS_KERNMSG_HDR "%lld.%06lu-%c\n", 339 (time64_t)record->time.tv_sec, 340 record->time.tv_nsec / 1000, 341 record->compressed ? 'C' : 'D'); 342 persistent_ram_write(prz, hdr, len); 343 344 return len; 345 } 346 347 static int notrace ramoops_pstore_write(struct pstore_record *record) 348 { 349 struct ramoops_context *cxt = record->psi->data; 350 struct persistent_ram_zone *prz; 351 size_t size, hlen; 352 353 if (record->type == PSTORE_TYPE_CONSOLE) { 354 if (!cxt->cprz) 355 return -ENOMEM; 356 persistent_ram_write(cxt->cprz, record->buf, record->size); 357 return 0; 358 } else if (record->type == PSTORE_TYPE_FTRACE) { 359 int zonenum; 360 361 if (!cxt->fprzs) 362 return -ENOMEM; 363 /* 364 * Choose zone by if we're using per-cpu buffers. 365 */ 366 if (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 367 zonenum = smp_processor_id(); 368 else 369 zonenum = 0; 370 371 persistent_ram_write(cxt->fprzs[zonenum], record->buf, 372 record->size); 373 return 0; 374 } else if (record->type == PSTORE_TYPE_PMSG) { 375 pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__); 376 return -EINVAL; 377 } 378 379 if (record->type != PSTORE_TYPE_DMESG) 380 return -EINVAL; 381 382 /* 383 * Out of the various dmesg dump types, ramoops is currently designed 384 * to only store crash logs, rather than storing general kernel logs. 385 */ 386 if (record->reason != KMSG_DUMP_OOPS && 387 record->reason != KMSG_DUMP_PANIC) 388 return -EINVAL; 389 390 /* Skip Oopes when configured to do so. */ 391 if (record->reason == KMSG_DUMP_OOPS && !cxt->dump_oops) 392 return -EINVAL; 393 394 /* 395 * Explicitly only take the first part of any new crash. 396 * If our buffer is larger than kmsg_bytes, this can never happen, 397 * and if our buffer is smaller than kmsg_bytes, we don't want the 398 * report split across multiple records. 399 */ 400 if (record->part != 1) 401 return -ENOSPC; 402 403 if (!cxt->dprzs) 404 return -ENOSPC; 405 406 prz = cxt->dprzs[cxt->dump_write_cnt]; 407 408 /* Build header and append record contents. */ 409 hlen = ramoops_write_kmsg_hdr(prz, record); 410 if (!hlen) 411 return -ENOMEM; 412 413 size = record->size; 414 if (size + hlen > prz->buffer_size) 415 size = prz->buffer_size - hlen; 416 persistent_ram_write(prz, record->buf, size); 417 418 cxt->dump_write_cnt = (cxt->dump_write_cnt + 1) % cxt->max_dump_cnt; 419 420 return 0; 421 } 422 423 static int notrace ramoops_pstore_write_user(struct pstore_record *record, 424 const char __user *buf) 425 { 426 if (record->type == PSTORE_TYPE_PMSG) { 427 struct ramoops_context *cxt = record->psi->data; 428 429 if (!cxt->mprz) 430 return -ENOMEM; 431 return persistent_ram_write_user(cxt->mprz, buf, record->size); 432 } 433 434 return -EINVAL; 435 } 436 437 static int ramoops_pstore_erase(struct pstore_record *record) 438 { 439 struct ramoops_context *cxt = record->psi->data; 440 struct persistent_ram_zone *prz; 441 442 switch (record->type) { 443 case PSTORE_TYPE_DMESG: 444 if (record->id >= cxt->max_dump_cnt) 445 return -EINVAL; 446 prz = cxt->dprzs[record->id]; 447 break; 448 case PSTORE_TYPE_CONSOLE: 449 prz = cxt->cprz; 450 break; 451 case PSTORE_TYPE_FTRACE: 452 if (record->id >= cxt->max_ftrace_cnt) 453 return -EINVAL; 454 prz = cxt->fprzs[record->id]; 455 break; 456 case PSTORE_TYPE_PMSG: 457 prz = cxt->mprz; 458 break; 459 default: 460 return -EINVAL; 461 } 462 463 persistent_ram_free_old(prz); 464 persistent_ram_zap(prz); 465 466 return 0; 467 } 468 469 static struct ramoops_context oops_cxt = { 470 .pstore = { 471 .owner = THIS_MODULE, 472 .name = "ramoops", 473 .open = ramoops_pstore_open, 474 .read = ramoops_pstore_read, 475 .write = ramoops_pstore_write, 476 .write_user = ramoops_pstore_write_user, 477 .erase = ramoops_pstore_erase, 478 }, 479 }; 480 481 static void ramoops_free_przs(struct ramoops_context *cxt) 482 { 483 int i; 484 485 /* Free dump PRZs */ 486 if (cxt->dprzs) { 487 for (i = 0; i < cxt->max_dump_cnt; i++) 488 persistent_ram_free(cxt->dprzs[i]); 489 490 kfree(cxt->dprzs); 491 cxt->max_dump_cnt = 0; 492 } 493 494 /* Free ftrace PRZs */ 495 if (cxt->fprzs) { 496 for (i = 0; i < cxt->max_ftrace_cnt; i++) 497 persistent_ram_free(cxt->fprzs[i]); 498 kfree(cxt->fprzs); 499 cxt->max_ftrace_cnt = 0; 500 } 501 } 502 503 static int ramoops_init_przs(const char *name, 504 struct device *dev, struct ramoops_context *cxt, 505 struct persistent_ram_zone ***przs, 506 phys_addr_t *paddr, size_t mem_sz, 507 ssize_t record_size, 508 unsigned int *cnt, u32 sig, u32 flags) 509 { 510 int err = -ENOMEM; 511 int i; 512 size_t zone_sz; 513 struct persistent_ram_zone **prz_ar; 514 515 /* Allocate nothing for 0 mem_sz or 0 record_size. */ 516 if (mem_sz == 0 || record_size == 0) { 517 *cnt = 0; 518 return 0; 519 } 520 521 /* 522 * If we have a negative record size, calculate it based on 523 * mem_sz / *cnt. If we have a positive record size, calculate 524 * cnt from mem_sz / record_size. 525 */ 526 if (record_size < 0) { 527 if (*cnt == 0) 528 return 0; 529 record_size = mem_sz / *cnt; 530 if (record_size == 0) { 531 dev_err(dev, "%s record size == 0 (%zu / %u)\n", 532 name, mem_sz, *cnt); 533 goto fail; 534 } 535 } else { 536 *cnt = mem_sz / record_size; 537 if (*cnt == 0) { 538 dev_err(dev, "%s record count == 0 (%zu / %zu)\n", 539 name, mem_sz, record_size); 540 goto fail; 541 } 542 } 543 544 if (*paddr + mem_sz - cxt->phys_addr > cxt->size) { 545 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 546 name, 547 mem_sz, (unsigned long long)*paddr, 548 cxt->size, (unsigned long long)cxt->phys_addr); 549 goto fail; 550 } 551 552 zone_sz = mem_sz / *cnt; 553 if (!zone_sz) { 554 dev_err(dev, "%s zone size == 0\n", name); 555 goto fail; 556 } 557 558 prz_ar = kcalloc(*cnt, sizeof(**przs), GFP_KERNEL); 559 if (!prz_ar) 560 goto fail; 561 562 for (i = 0; i < *cnt; i++) { 563 char *label; 564 565 if (*cnt == 1) 566 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 567 else 568 label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)", 569 name, i, *cnt - 1); 570 prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig, 571 &cxt->ecc_info, 572 cxt->memtype, flags, label); 573 if (IS_ERR(prz_ar[i])) { 574 err = PTR_ERR(prz_ar[i]); 575 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 576 name, record_size, 577 (unsigned long long)*paddr, err); 578 579 while (i > 0) { 580 i--; 581 persistent_ram_free(prz_ar[i]); 582 } 583 kfree(prz_ar); 584 goto fail; 585 } 586 *paddr += zone_sz; 587 prz_ar[i]->type = pstore_name_to_type(name); 588 } 589 590 *przs = prz_ar; 591 return 0; 592 593 fail: 594 *cnt = 0; 595 return err; 596 } 597 598 static int ramoops_init_prz(const char *name, 599 struct device *dev, struct ramoops_context *cxt, 600 struct persistent_ram_zone **prz, 601 phys_addr_t *paddr, size_t sz, u32 sig) 602 { 603 char *label; 604 605 if (!sz) 606 return 0; 607 608 if (*paddr + sz - cxt->phys_addr > cxt->size) { 609 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 610 name, sz, (unsigned long long)*paddr, 611 cxt->size, (unsigned long long)cxt->phys_addr); 612 return -ENOMEM; 613 } 614 615 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 616 *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, 617 cxt->memtype, PRZ_FLAG_ZAP_OLD, label); 618 if (IS_ERR(*prz)) { 619 int err = PTR_ERR(*prz); 620 621 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 622 name, sz, (unsigned long long)*paddr, err); 623 return err; 624 } 625 626 *paddr += sz; 627 (*prz)->type = pstore_name_to_type(name); 628 629 return 0; 630 } 631 632 static int ramoops_parse_dt_size(struct platform_device *pdev, 633 const char *propname, u32 *value) 634 { 635 u32 val32 = 0; 636 int ret; 637 638 ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); 639 if (ret < 0 && ret != -EINVAL) { 640 dev_err(&pdev->dev, "failed to parse property %s: %d\n", 641 propname, ret); 642 return ret; 643 } 644 645 if (val32 > INT_MAX) { 646 dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32); 647 return -EOVERFLOW; 648 } 649 650 *value = val32; 651 return 0; 652 } 653 654 static int ramoops_parse_dt(struct platform_device *pdev, 655 struct ramoops_platform_data *pdata) 656 { 657 struct device_node *of_node = pdev->dev.of_node; 658 struct device_node *parent_node; 659 struct resource *res; 660 u32 value; 661 int ret; 662 663 dev_dbg(&pdev->dev, "using Device Tree\n"); 664 665 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 666 if (!res) { 667 dev_err(&pdev->dev, 668 "failed to locate DT /reserved-memory resource\n"); 669 return -EINVAL; 670 } 671 672 pdata->mem_size = resource_size(res); 673 pdata->mem_address = res->start; 674 pdata->mem_type = of_property_read_bool(of_node, "unbuffered"); 675 pdata->dump_oops = !of_property_read_bool(of_node, "no-dump-oops"); 676 677 #define parse_size(name, field) { \ 678 ret = ramoops_parse_dt_size(pdev, name, &value); \ 679 if (ret < 0) \ 680 return ret; \ 681 field = value; \ 682 } 683 684 parse_size("record-size", pdata->record_size); 685 parse_size("console-size", pdata->console_size); 686 parse_size("ftrace-size", pdata->ftrace_size); 687 parse_size("pmsg-size", pdata->pmsg_size); 688 parse_size("ecc-size", pdata->ecc_info.ecc_size); 689 parse_size("flags", pdata->flags); 690 691 #undef parse_size 692 693 /* 694 * Some old Chromebooks relied on the kernel setting the 695 * console_size and pmsg_size to the record size since that's 696 * what the downstream kernel did. These same Chromebooks had 697 * "ramoops" straight under the root node which isn't 698 * according to the current upstream bindings (though it was 699 * arguably acceptable under a prior version of the bindings). 700 * Let's make those old Chromebooks work by detecting that 701 * we're not a child of "reserved-memory" and mimicking the 702 * expected behavior. 703 */ 704 parent_node = of_get_parent(of_node); 705 if (!of_node_name_eq(parent_node, "reserved-memory") && 706 !pdata->console_size && !pdata->ftrace_size && 707 !pdata->pmsg_size && !pdata->ecc_info.ecc_size) { 708 pdata->console_size = pdata->record_size; 709 pdata->pmsg_size = pdata->record_size; 710 } 711 of_node_put(parent_node); 712 713 return 0; 714 } 715 716 static int ramoops_probe(struct platform_device *pdev) 717 { 718 struct device *dev = &pdev->dev; 719 struct ramoops_platform_data *pdata = dev->platform_data; 720 struct ramoops_platform_data pdata_local; 721 struct ramoops_context *cxt = &oops_cxt; 722 size_t dump_mem_sz; 723 phys_addr_t paddr; 724 int err = -EINVAL; 725 726 /* 727 * Only a single ramoops area allowed at a time, so fail extra 728 * probes. 729 */ 730 if (cxt->max_dump_cnt) { 731 pr_err("already initialized\n"); 732 goto fail_out; 733 } 734 735 if (dev_of_node(dev) && !pdata) { 736 pdata = &pdata_local; 737 memset(pdata, 0, sizeof(*pdata)); 738 739 err = ramoops_parse_dt(pdev, pdata); 740 if (err < 0) 741 goto fail_out; 742 } 743 744 /* Make sure we didn't get bogus platform data pointer. */ 745 if (!pdata) { 746 pr_err("NULL platform data\n"); 747 goto fail_out; 748 } 749 750 if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && 751 !pdata->ftrace_size && !pdata->pmsg_size)) { 752 pr_err("The memory size and the record/console size must be " 753 "non-zero\n"); 754 goto fail_out; 755 } 756 757 if (pdata->record_size && !is_power_of_2(pdata->record_size)) 758 pdata->record_size = rounddown_pow_of_two(pdata->record_size); 759 if (pdata->console_size && !is_power_of_2(pdata->console_size)) 760 pdata->console_size = rounddown_pow_of_two(pdata->console_size); 761 if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size)) 762 pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); 763 if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) 764 pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); 765 766 cxt->size = pdata->mem_size; 767 cxt->phys_addr = pdata->mem_address; 768 cxt->memtype = pdata->mem_type; 769 cxt->record_size = pdata->record_size; 770 cxt->console_size = pdata->console_size; 771 cxt->ftrace_size = pdata->ftrace_size; 772 cxt->pmsg_size = pdata->pmsg_size; 773 cxt->dump_oops = pdata->dump_oops; 774 cxt->flags = pdata->flags; 775 cxt->ecc_info = pdata->ecc_info; 776 777 paddr = cxt->phys_addr; 778 779 dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size 780 - cxt->pmsg_size; 781 err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr, 782 dump_mem_sz, cxt->record_size, 783 &cxt->max_dump_cnt, 0, 0); 784 if (err) 785 goto fail_out; 786 787 err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr, 788 cxt->console_size, 0); 789 if (err) 790 goto fail_init_cprz; 791 792 cxt->max_ftrace_cnt = (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 793 ? nr_cpu_ids 794 : 1; 795 err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr, 796 cxt->ftrace_size, -1, 797 &cxt->max_ftrace_cnt, LINUX_VERSION_CODE, 798 (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 799 ? PRZ_FLAG_NO_LOCK : 0); 800 if (err) 801 goto fail_init_fprz; 802 803 err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr, 804 cxt->pmsg_size, 0); 805 if (err) 806 goto fail_init_mprz; 807 808 cxt->pstore.data = cxt; 809 /* 810 * Prepare frontend flags based on which areas are initialized. 811 * For ramoops_init_przs() cases, the "max count" variable tells 812 * if there are regions present. For ramoops_init_prz() cases, 813 * the single region size is how to check. 814 */ 815 cxt->pstore.flags = 0; 816 if (cxt->max_dump_cnt) 817 cxt->pstore.flags |= PSTORE_FLAGS_DMESG; 818 if (cxt->console_size) 819 cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; 820 if (cxt->max_ftrace_cnt) 821 cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; 822 if (cxt->pmsg_size) 823 cxt->pstore.flags |= PSTORE_FLAGS_PMSG; 824 825 /* 826 * Since bufsize is only used for dmesg crash dumps, it 827 * must match the size of the dprz record (after PRZ header 828 * and ECC bytes have been accounted for). 829 */ 830 if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) { 831 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size; 832 cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL); 833 if (!cxt->pstore.buf) { 834 pr_err("cannot allocate pstore crash dump buffer\n"); 835 err = -ENOMEM; 836 goto fail_clear; 837 } 838 } 839 840 err = pstore_register(&cxt->pstore); 841 if (err) { 842 pr_err("registering with pstore failed\n"); 843 goto fail_buf; 844 } 845 846 /* 847 * Update the module parameter variables as well so they are visible 848 * through /sys/module/ramoops/parameters/ 849 */ 850 mem_size = pdata->mem_size; 851 mem_address = pdata->mem_address; 852 record_size = pdata->record_size; 853 dump_oops = pdata->dump_oops; 854 ramoops_console_size = pdata->console_size; 855 ramoops_pmsg_size = pdata->pmsg_size; 856 ramoops_ftrace_size = pdata->ftrace_size; 857 858 pr_info("using 0x%lx@0x%llx, ecc: %d\n", 859 cxt->size, (unsigned long long)cxt->phys_addr, 860 cxt->ecc_info.ecc_size); 861 862 return 0; 863 864 fail_buf: 865 kfree(cxt->pstore.buf); 866 fail_clear: 867 cxt->pstore.bufsize = 0; 868 persistent_ram_free(cxt->mprz); 869 fail_init_mprz: 870 fail_init_fprz: 871 persistent_ram_free(cxt->cprz); 872 fail_init_cprz: 873 ramoops_free_przs(cxt); 874 fail_out: 875 return err; 876 } 877 878 static int ramoops_remove(struct platform_device *pdev) 879 { 880 struct ramoops_context *cxt = &oops_cxt; 881 882 pstore_unregister(&cxt->pstore); 883 884 kfree(cxt->pstore.buf); 885 cxt->pstore.bufsize = 0; 886 887 persistent_ram_free(cxt->mprz); 888 persistent_ram_free(cxt->cprz); 889 ramoops_free_przs(cxt); 890 891 return 0; 892 } 893 894 static const struct of_device_id dt_match[] = { 895 { .compatible = "ramoops" }, 896 {} 897 }; 898 899 static struct platform_driver ramoops_driver = { 900 .probe = ramoops_probe, 901 .remove = ramoops_remove, 902 .driver = { 903 .name = "ramoops", 904 .of_match_table = dt_match, 905 }, 906 }; 907 908 static inline void ramoops_unregister_dummy(void) 909 { 910 platform_device_unregister(dummy); 911 dummy = NULL; 912 } 913 914 static void __init ramoops_register_dummy(void) 915 { 916 struct ramoops_platform_data pdata; 917 918 /* 919 * Prepare a dummy platform data structure to carry the module 920 * parameters. If mem_size isn't set, then there are no module 921 * parameters, and we can skip this. 922 */ 923 if (!mem_size) 924 return; 925 926 pr_info("using module parameters\n"); 927 928 memset(&pdata, 0, sizeof(pdata)); 929 pdata.mem_size = mem_size; 930 pdata.mem_address = mem_address; 931 pdata.mem_type = mem_type; 932 pdata.record_size = record_size; 933 pdata.console_size = ramoops_console_size; 934 pdata.ftrace_size = ramoops_ftrace_size; 935 pdata.pmsg_size = ramoops_pmsg_size; 936 pdata.dump_oops = dump_oops; 937 pdata.flags = RAMOOPS_FLAG_FTRACE_PER_CPU; 938 939 /* 940 * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC 941 * (using 1 byte for ECC isn't much of use anyway). 942 */ 943 pdata.ecc_info.ecc_size = ramoops_ecc == 1 ? 16 : ramoops_ecc; 944 945 dummy = platform_device_register_data(NULL, "ramoops", -1, 946 &pdata, sizeof(pdata)); 947 if (IS_ERR(dummy)) { 948 pr_info("could not create platform device: %ld\n", 949 PTR_ERR(dummy)); 950 dummy = NULL; 951 ramoops_unregister_dummy(); 952 } 953 } 954 955 static int __init ramoops_init(void) 956 { 957 int ret; 958 959 ramoops_register_dummy(); 960 ret = platform_driver_register(&ramoops_driver); 961 if (ret != 0) 962 ramoops_unregister_dummy(); 963 964 return ret; 965 } 966 postcore_initcall(ramoops_init); 967 968 static void __exit ramoops_exit(void) 969 { 970 platform_driver_unregister(&ramoops_driver); 971 ramoops_unregister_dummy(); 972 } 973 module_exit(ramoops_exit); 974 975 MODULE_LICENSE("GPL"); 976 MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>"); 977 MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); 978