1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Firmware Assisted dump: A robust mechanism to get reliable kernel crash 4 * dump with assistance from firmware. This approach does not use kexec, 5 * instead firmware assists in booting the kdump kernel while preserving 6 * memory contents. The most of the code implementation has been adapted 7 * from phyp assisted dump implementation written by Linas Vepstas and 8 * Manish Ahuja 9 * 10 * Copyright 2011 IBM Corporation 11 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> 12 */ 13 14 #undef DEBUG 15 #define pr_fmt(fmt) "fadump: " fmt 16 17 #include <linux/string.h> 18 #include <linux/memblock.h> 19 #include <linux/delay.h> 20 #include <linux/seq_file.h> 21 #include <linux/crash_dump.h> 22 #include <linux/kobject.h> 23 #include <linux/sysfs.h> 24 #include <linux/slab.h> 25 #include <linux/cma.h> 26 #include <linux/hugetlb.h> 27 28 #include <asm/debugfs.h> 29 #include <asm/page.h> 30 #include <asm/prom.h> 31 #include <asm/fadump.h> 32 #include <asm/fadump-internal.h> 33 #include <asm/setup.h> 34 #include <asm/interrupt.h> 35 36 /* 37 * The CPU who acquired the lock to trigger the fadump crash should 38 * wait for other CPUs to enter. 39 * 40 * The timeout is in milliseconds. 41 */ 42 #define CRASH_TIMEOUT 500 43 44 static struct fw_dump fw_dump; 45 46 static void __init fadump_reserve_crash_area(u64 base); 47 48 struct kobject *fadump_kobj; 49 50 #ifndef CONFIG_PRESERVE_FA_DUMP 51 52 static atomic_t cpus_in_fadump; 53 static DEFINE_MUTEX(fadump_mutex); 54 55 struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false }; 56 57 #define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */ 58 #define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \ 59 sizeof(struct fadump_memory_range)) 60 static struct fadump_memory_range rngs[RESERVED_RNGS_CNT]; 61 struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs, 62 RESERVED_RNGS_SZ, 0, 63 RESERVED_RNGS_CNT, true }; 64 65 static void __init early_init_dt_scan_reserved_ranges(unsigned long node); 66 67 #ifdef CONFIG_CMA 68 static struct cma *fadump_cma; 69 70 /* 71 * fadump_cma_init() - Initialize CMA area from a fadump reserved memory 72 * 73 * This function initializes CMA area from fadump reserved memory. 74 * The total size of fadump reserved memory covers for boot memory size 75 * + cpu data size + hpte size and metadata. 76 * Initialize only the area equivalent to boot memory size for CMA use. 77 * The reamining portion of fadump reserved memory will be not given 78 * to CMA and pages for thoes will stay reserved. boot memory size is 79 * aligned per CMA requirement to satisy cma_init_reserved_mem() call. 80 * But for some reason even if it fails we still have the memory reservation 81 * with us and we can still continue doing fadump. 82 */ 83 int __init fadump_cma_init(void) 84 { 85 unsigned long long base, size; 86 int rc; 87 88 if (!fw_dump.fadump_enabled) 89 return 0; 90 91 /* 92 * Do not use CMA if user has provided fadump=nocma kernel parameter. 93 * Return 1 to continue with fadump old behaviour. 94 */ 95 if (fw_dump.nocma) 96 return 1; 97 98 base = fw_dump.reserve_dump_area_start; 99 size = fw_dump.boot_memory_size; 100 101 if (!size) 102 return 0; 103 104 rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma); 105 if (rc) { 106 pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc); 107 /* 108 * Though the CMA init has failed we still have memory 109 * reservation with us. The reserved memory will be 110 * blocked from production system usage. Hence return 1, 111 * so that we can continue with fadump. 112 */ 113 return 1; 114 } 115 116 /* 117 * So we now have successfully initialized cma area for fadump. 118 */ 119 pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx " 120 "bytes of memory reserved for firmware-assisted dump\n", 121 cma_get_size(fadump_cma), 122 (unsigned long)cma_get_base(fadump_cma) >> 20, 123 fw_dump.reserve_dump_area_size); 124 return 1; 125 } 126 #else 127 static int __init fadump_cma_init(void) { return 1; } 128 #endif /* CONFIG_CMA */ 129 130 /* Scan the Firmware Assisted dump configuration details. */ 131 int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, 132 int depth, void *data) 133 { 134 if (depth == 0) { 135 early_init_dt_scan_reserved_ranges(node); 136 return 0; 137 } 138 139 if (depth != 1) 140 return 0; 141 142 if (strcmp(uname, "rtas") == 0) { 143 rtas_fadump_dt_scan(&fw_dump, node); 144 return 1; 145 } 146 147 if (strcmp(uname, "ibm,opal") == 0) { 148 opal_fadump_dt_scan(&fw_dump, node); 149 return 1; 150 } 151 152 return 0; 153 } 154 155 /* 156 * If fadump is registered, check if the memory provided 157 * falls within boot memory area and reserved memory area. 158 */ 159 int is_fadump_memory_area(u64 addr, unsigned long size) 160 { 161 u64 d_start, d_end; 162 163 if (!fw_dump.dump_registered) 164 return 0; 165 166 if (!size) 167 return 0; 168 169 d_start = fw_dump.reserve_dump_area_start; 170 d_end = d_start + fw_dump.reserve_dump_area_size; 171 if (((addr + size) > d_start) && (addr <= d_end)) 172 return 1; 173 174 return (addr <= fw_dump.boot_mem_top); 175 } 176 177 int should_fadump_crash(void) 178 { 179 if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr) 180 return 0; 181 return 1; 182 } 183 184 int is_fadump_active(void) 185 { 186 return fw_dump.dump_active; 187 } 188 189 /* 190 * Returns true, if there are no holes in memory area between d_start to d_end, 191 * false otherwise. 192 */ 193 static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end) 194 { 195 phys_addr_t reg_start, reg_end; 196 bool ret = false; 197 u64 i, start, end; 198 199 for_each_mem_range(i, ®_start, ®_end) { 200 start = max_t(u64, d_start, reg_start); 201 end = min_t(u64, d_end, reg_end); 202 if (d_start < end) { 203 /* Memory hole from d_start to start */ 204 if (start > d_start) 205 break; 206 207 if (end == d_end) { 208 ret = true; 209 break; 210 } 211 212 d_start = end + 1; 213 } 214 } 215 216 return ret; 217 } 218 219 /* 220 * Returns true, if there are no holes in boot memory area, 221 * false otherwise. 222 */ 223 bool is_fadump_boot_mem_contiguous(void) 224 { 225 unsigned long d_start, d_end; 226 bool ret = false; 227 int i; 228 229 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { 230 d_start = fw_dump.boot_mem_addr[i]; 231 d_end = d_start + fw_dump.boot_mem_sz[i]; 232 233 ret = is_fadump_mem_area_contiguous(d_start, d_end); 234 if (!ret) 235 break; 236 } 237 238 return ret; 239 } 240 241 /* 242 * Returns true, if there are no holes in reserved memory area, 243 * false otherwise. 244 */ 245 bool is_fadump_reserved_mem_contiguous(void) 246 { 247 u64 d_start, d_end; 248 249 d_start = fw_dump.reserve_dump_area_start; 250 d_end = d_start + fw_dump.reserve_dump_area_size; 251 return is_fadump_mem_area_contiguous(d_start, d_end); 252 } 253 254 /* Print firmware assisted dump configurations for debugging purpose. */ 255 static void fadump_show_config(void) 256 { 257 int i; 258 259 pr_debug("Support for firmware-assisted dump (fadump): %s\n", 260 (fw_dump.fadump_supported ? "present" : "no support")); 261 262 if (!fw_dump.fadump_supported) 263 return; 264 265 pr_debug("Fadump enabled : %s\n", 266 (fw_dump.fadump_enabled ? "yes" : "no")); 267 pr_debug("Dump Active : %s\n", 268 (fw_dump.dump_active ? "yes" : "no")); 269 pr_debug("Dump section sizes:\n"); 270 pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size); 271 pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size); 272 pr_debug(" Boot memory size : %lx\n", fw_dump.boot_memory_size); 273 pr_debug(" Boot memory top : %llx\n", fw_dump.boot_mem_top); 274 pr_debug("Boot memory regions cnt: %llx\n", fw_dump.boot_mem_regs_cnt); 275 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { 276 pr_debug("[%03d] base = %llx, size = %llx\n", i, 277 fw_dump.boot_mem_addr[i], fw_dump.boot_mem_sz[i]); 278 } 279 } 280 281 /** 282 * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM 283 * 284 * Function to find the largest memory size we need to reserve during early 285 * boot process. This will be the size of the memory that is required for a 286 * kernel to boot successfully. 287 * 288 * This function has been taken from phyp-assisted dump feature implementation. 289 * 290 * returns larger of 256MB or 5% rounded down to multiples of 256MB. 291 * 292 * TODO: Come up with better approach to find out more accurate memory size 293 * that is required for a kernel to boot successfully. 294 * 295 */ 296 static __init u64 fadump_calculate_reserve_size(void) 297 { 298 u64 base, size, bootmem_min; 299 int ret; 300 301 if (fw_dump.reserve_bootvar) 302 pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n"); 303 304 /* 305 * Check if the size is specified through crashkernel= cmdline 306 * option. If yes, then use that but ignore base as fadump reserves 307 * memory at a predefined offset. 308 */ 309 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), 310 &size, &base); 311 if (ret == 0 && size > 0) { 312 unsigned long max_size; 313 314 if (fw_dump.reserve_bootvar) 315 pr_info("Using 'crashkernel=' parameter for memory reservation.\n"); 316 317 fw_dump.reserve_bootvar = (unsigned long)size; 318 319 /* 320 * Adjust if the boot memory size specified is above 321 * the upper limit. 322 */ 323 max_size = memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO; 324 if (fw_dump.reserve_bootvar > max_size) { 325 fw_dump.reserve_bootvar = max_size; 326 pr_info("Adjusted boot memory size to %luMB\n", 327 (fw_dump.reserve_bootvar >> 20)); 328 } 329 330 return fw_dump.reserve_bootvar; 331 } else if (fw_dump.reserve_bootvar) { 332 /* 333 * 'fadump_reserve_mem=' is being used to reserve memory 334 * for firmware-assisted dump. 335 */ 336 return fw_dump.reserve_bootvar; 337 } 338 339 /* divide by 20 to get 5% of value */ 340 size = memblock_phys_mem_size() / 20; 341 342 /* round it down in multiples of 256 */ 343 size = size & ~0x0FFFFFFFUL; 344 345 /* Truncate to memory_limit. We don't want to over reserve the memory.*/ 346 if (memory_limit && size > memory_limit) 347 size = memory_limit; 348 349 bootmem_min = fw_dump.ops->fadump_get_bootmem_min(); 350 return (size > bootmem_min ? size : bootmem_min); 351 } 352 353 /* 354 * Calculate the total memory size required to be reserved for 355 * firmware-assisted dump registration. 356 */ 357 static unsigned long get_fadump_area_size(void) 358 { 359 unsigned long size = 0; 360 361 size += fw_dump.cpu_state_data_size; 362 size += fw_dump.hpte_region_size; 363 size += fw_dump.boot_memory_size; 364 size += sizeof(struct fadump_crash_info_header); 365 size += sizeof(struct elfhdr); /* ELF core header.*/ 366 size += sizeof(struct elf_phdr); /* place holder for cpu notes */ 367 /* Program headers for crash memory regions. */ 368 size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2); 369 370 size = PAGE_ALIGN(size); 371 372 /* This is to hold kernel metadata on platforms that support it */ 373 size += (fw_dump.ops->fadump_get_metadata_size ? 374 fw_dump.ops->fadump_get_metadata_size() : 0); 375 return size; 376 } 377 378 static int __init add_boot_mem_region(unsigned long rstart, 379 unsigned long rsize) 380 { 381 int i = fw_dump.boot_mem_regs_cnt++; 382 383 if (fw_dump.boot_mem_regs_cnt > FADUMP_MAX_MEM_REGS) { 384 fw_dump.boot_mem_regs_cnt = FADUMP_MAX_MEM_REGS; 385 return 0; 386 } 387 388 pr_debug("Added boot memory range[%d] [%#016lx-%#016lx)\n", 389 i, rstart, (rstart + rsize)); 390 fw_dump.boot_mem_addr[i] = rstart; 391 fw_dump.boot_mem_sz[i] = rsize; 392 return 1; 393 } 394 395 /* 396 * Firmware usually has a hard limit on the data it can copy per region. 397 * Honour that by splitting a memory range into multiple regions. 398 */ 399 static int __init add_boot_mem_regions(unsigned long mstart, 400 unsigned long msize) 401 { 402 unsigned long rstart, rsize, max_size; 403 int ret = 1; 404 405 rstart = mstart; 406 max_size = fw_dump.max_copy_size ? fw_dump.max_copy_size : msize; 407 while (msize) { 408 if (msize > max_size) 409 rsize = max_size; 410 else 411 rsize = msize; 412 413 ret = add_boot_mem_region(rstart, rsize); 414 if (!ret) 415 break; 416 417 msize -= rsize; 418 rstart += rsize; 419 } 420 421 return ret; 422 } 423 424 static int __init fadump_get_boot_mem_regions(void) 425 { 426 unsigned long size, cur_size, hole_size, last_end; 427 unsigned long mem_size = fw_dump.boot_memory_size; 428 phys_addr_t reg_start, reg_end; 429 int ret = 1; 430 u64 i; 431 432 fw_dump.boot_mem_regs_cnt = 0; 433 434 last_end = 0; 435 hole_size = 0; 436 cur_size = 0; 437 for_each_mem_range(i, ®_start, ®_end) { 438 size = reg_end - reg_start; 439 hole_size += (reg_start - last_end); 440 441 if ((cur_size + size) >= mem_size) { 442 size = (mem_size - cur_size); 443 ret = add_boot_mem_regions(reg_start, size); 444 break; 445 } 446 447 mem_size -= size; 448 cur_size += size; 449 ret = add_boot_mem_regions(reg_start, size); 450 if (!ret) 451 break; 452 453 last_end = reg_end; 454 } 455 fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size); 456 457 return ret; 458 } 459 460 /* 461 * Returns true, if the given range overlaps with reserved memory ranges 462 * starting at idx. Also, updates idx to index of overlapping memory range 463 * with the given memory range. 464 * False, otherwise. 465 */ 466 static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx) 467 { 468 bool ret = false; 469 int i; 470 471 for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) { 472 u64 rbase = reserved_mrange_info.mem_ranges[i].base; 473 u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size; 474 475 if (end <= rbase) 476 break; 477 478 if ((end > rbase) && (base < rend)) { 479 *idx = i; 480 ret = true; 481 break; 482 } 483 } 484 485 return ret; 486 } 487 488 /* 489 * Locate a suitable memory area to reserve memory for FADump. While at it, 490 * lookup reserved-ranges & avoid overlap with them, as they are used by F/W. 491 */ 492 static u64 __init fadump_locate_reserve_mem(u64 base, u64 size) 493 { 494 struct fadump_memory_range *mrngs; 495 phys_addr_t mstart, mend; 496 int idx = 0; 497 u64 i, ret = 0; 498 499 mrngs = reserved_mrange_info.mem_ranges; 500 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, 501 &mstart, &mend, NULL) { 502 pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n", 503 i, mstart, mend, base); 504 505 if (mstart > base) 506 base = PAGE_ALIGN(mstart); 507 508 while ((mend > base) && ((mend - base) >= size)) { 509 if (!overlaps_reserved_ranges(base, base+size, &idx)) { 510 ret = base; 511 goto out; 512 } 513 514 base = mrngs[idx].base + mrngs[idx].size; 515 base = PAGE_ALIGN(base); 516 } 517 } 518 519 out: 520 return ret; 521 } 522 523 int __init fadump_reserve_mem(void) 524 { 525 u64 base, size, mem_boundary, bootmem_min; 526 int ret = 1; 527 528 if (!fw_dump.fadump_enabled) 529 return 0; 530 531 if (!fw_dump.fadump_supported) { 532 pr_info("Firmware-Assisted Dump is not supported on this hardware\n"); 533 goto error_out; 534 } 535 536 /* 537 * Initialize boot memory size 538 * If dump is active then we have already calculated the size during 539 * first kernel. 540 */ 541 if (!fw_dump.dump_active) { 542 fw_dump.boot_memory_size = 543 PAGE_ALIGN(fadump_calculate_reserve_size()); 544 #ifdef CONFIG_CMA 545 if (!fw_dump.nocma) { 546 fw_dump.boot_memory_size = 547 ALIGN(fw_dump.boot_memory_size, 548 FADUMP_CMA_ALIGNMENT); 549 } 550 #endif 551 552 bootmem_min = fw_dump.ops->fadump_get_bootmem_min(); 553 if (fw_dump.boot_memory_size < bootmem_min) { 554 pr_err("Can't enable fadump with boot memory size (0x%lx) less than 0x%llx\n", 555 fw_dump.boot_memory_size, bootmem_min); 556 goto error_out; 557 } 558 559 if (!fadump_get_boot_mem_regions()) { 560 pr_err("Too many holes in boot memory area to enable fadump\n"); 561 goto error_out; 562 } 563 } 564 565 /* 566 * Calculate the memory boundary. 567 * If memory_limit is less than actual memory boundary then reserve 568 * the memory for fadump beyond the memory_limit and adjust the 569 * memory_limit accordingly, so that the running kernel can run with 570 * specified memory_limit. 571 */ 572 if (memory_limit && memory_limit < memblock_end_of_DRAM()) { 573 size = get_fadump_area_size(); 574 if ((memory_limit + size) < memblock_end_of_DRAM()) 575 memory_limit += size; 576 else 577 memory_limit = memblock_end_of_DRAM(); 578 printk(KERN_INFO "Adjusted memory_limit for firmware-assisted" 579 " dump, now %#016llx\n", memory_limit); 580 } 581 if (memory_limit) 582 mem_boundary = memory_limit; 583 else 584 mem_boundary = memblock_end_of_DRAM(); 585 586 base = fw_dump.boot_mem_top; 587 size = get_fadump_area_size(); 588 fw_dump.reserve_dump_area_size = size; 589 if (fw_dump.dump_active) { 590 pr_info("Firmware-assisted dump is active.\n"); 591 592 #ifdef CONFIG_HUGETLB_PAGE 593 /* 594 * FADump capture kernel doesn't care much about hugepages. 595 * In fact, handling hugepages in capture kernel is asking for 596 * trouble. So, disable HugeTLB support when fadump is active. 597 */ 598 hugetlb_disabled = true; 599 #endif 600 /* 601 * If last boot has crashed then reserve all the memory 602 * above boot memory size so that we don't touch it until 603 * dump is written to disk by userspace tool. This memory 604 * can be released for general use by invalidating fadump. 605 */ 606 fadump_reserve_crash_area(base); 607 608 pr_debug("fadumphdr_addr = %#016lx\n", fw_dump.fadumphdr_addr); 609 pr_debug("Reserve dump area start address: 0x%lx\n", 610 fw_dump.reserve_dump_area_start); 611 } else { 612 /* 613 * Reserve memory at an offset closer to bottom of the RAM to 614 * minimize the impact of memory hot-remove operation. 615 */ 616 base = fadump_locate_reserve_mem(base, size); 617 618 if (!base || (base + size > mem_boundary)) { 619 pr_err("Failed to find memory chunk for reservation!\n"); 620 goto error_out; 621 } 622 fw_dump.reserve_dump_area_start = base; 623 624 /* 625 * Calculate the kernel metadata address and register it with 626 * f/w if the platform supports. 627 */ 628 if (fw_dump.ops->fadump_setup_metadata && 629 (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0)) 630 goto error_out; 631 632 if (memblock_reserve(base, size)) { 633 pr_err("Failed to reserve memory!\n"); 634 goto error_out; 635 } 636 637 pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n", 638 (size >> 20), base, (memblock_phys_mem_size() >> 20)); 639 640 ret = fadump_cma_init(); 641 } 642 643 return ret; 644 error_out: 645 fw_dump.fadump_enabled = 0; 646 return 0; 647 } 648 649 /* Look for fadump= cmdline option. */ 650 static int __init early_fadump_param(char *p) 651 { 652 if (!p) 653 return 1; 654 655 if (strncmp(p, "on", 2) == 0) 656 fw_dump.fadump_enabled = 1; 657 else if (strncmp(p, "off", 3) == 0) 658 fw_dump.fadump_enabled = 0; 659 else if (strncmp(p, "nocma", 5) == 0) { 660 fw_dump.fadump_enabled = 1; 661 fw_dump.nocma = 1; 662 } 663 664 return 0; 665 } 666 early_param("fadump", early_fadump_param); 667 668 /* 669 * Look for fadump_reserve_mem= cmdline option 670 * TODO: Remove references to 'fadump_reserve_mem=' parameter, 671 * the sooner 'crashkernel=' parameter is accustomed to. 672 */ 673 static int __init early_fadump_reserve_mem(char *p) 674 { 675 if (p) 676 fw_dump.reserve_bootvar = memparse(p, &p); 677 return 0; 678 } 679 early_param("fadump_reserve_mem", early_fadump_reserve_mem); 680 681 void crash_fadump(struct pt_regs *regs, const char *str) 682 { 683 unsigned int msecs; 684 struct fadump_crash_info_header *fdh = NULL; 685 int old_cpu, this_cpu; 686 /* Do not include first CPU */ 687 unsigned int ncpus = num_online_cpus() - 1; 688 689 if (!should_fadump_crash()) 690 return; 691 692 /* 693 * old_cpu == -1 means this is the first CPU which has come here, 694 * go ahead and trigger fadump. 695 * 696 * old_cpu != -1 means some other CPU has already on it's way 697 * to trigger fadump, just keep looping here. 698 */ 699 this_cpu = smp_processor_id(); 700 old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu); 701 702 if (old_cpu != -1) { 703 atomic_inc(&cpus_in_fadump); 704 705 /* 706 * We can't loop here indefinitely. Wait as long as fadump 707 * is in force. If we race with fadump un-registration this 708 * loop will break and then we go down to normal panic path 709 * and reboot. If fadump is in force the first crashing 710 * cpu will definitely trigger fadump. 711 */ 712 while (fw_dump.dump_registered) 713 cpu_relax(); 714 return; 715 } 716 717 fdh = __va(fw_dump.fadumphdr_addr); 718 fdh->crashing_cpu = crashing_cpu; 719 crash_save_vmcoreinfo(); 720 721 if (regs) 722 fdh->regs = *regs; 723 else 724 ppc_save_regs(&fdh->regs); 725 726 fdh->online_mask = *cpu_online_mask; 727 728 /* 729 * If we came in via system reset, wait a while for the secondary 730 * CPUs to enter. 731 */ 732 if (TRAP(&(fdh->regs)) == INTERRUPT_SYSTEM_RESET) { 733 msecs = CRASH_TIMEOUT; 734 while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0)) 735 mdelay(1); 736 } 737 738 fw_dump.ops->fadump_trigger(fdh, str); 739 } 740 741 u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) 742 { 743 struct elf_prstatus prstatus; 744 745 memset(&prstatus, 0, sizeof(prstatus)); 746 /* 747 * FIXME: How do i get PID? Do I really need it? 748 * prstatus.pr_pid = ???? 749 */ 750 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); 751 buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS, 752 &prstatus, sizeof(prstatus)); 753 return buf; 754 } 755 756 void fadump_update_elfcore_header(char *bufp) 757 { 758 struct elf_phdr *phdr; 759 760 bufp += sizeof(struct elfhdr); 761 762 /* First note is a place holder for cpu notes info. */ 763 phdr = (struct elf_phdr *)bufp; 764 765 if (phdr->p_type == PT_NOTE) { 766 phdr->p_paddr = __pa(fw_dump.cpu_notes_buf_vaddr); 767 phdr->p_offset = phdr->p_paddr; 768 phdr->p_filesz = fw_dump.cpu_notes_buf_size; 769 phdr->p_memsz = fw_dump.cpu_notes_buf_size; 770 } 771 return; 772 } 773 774 static void *fadump_alloc_buffer(unsigned long size) 775 { 776 unsigned long count, i; 777 struct page *page; 778 void *vaddr; 779 780 vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 781 if (!vaddr) 782 return NULL; 783 784 count = PAGE_ALIGN(size) / PAGE_SIZE; 785 page = virt_to_page(vaddr); 786 for (i = 0; i < count; i++) 787 mark_page_reserved(page + i); 788 return vaddr; 789 } 790 791 static void fadump_free_buffer(unsigned long vaddr, unsigned long size) 792 { 793 free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL); 794 } 795 796 s32 fadump_setup_cpu_notes_buf(u32 num_cpus) 797 { 798 /* Allocate buffer to hold cpu crash notes. */ 799 fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t); 800 fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size); 801 fw_dump.cpu_notes_buf_vaddr = 802 (unsigned long)fadump_alloc_buffer(fw_dump.cpu_notes_buf_size); 803 if (!fw_dump.cpu_notes_buf_vaddr) { 804 pr_err("Failed to allocate %ld bytes for CPU notes buffer\n", 805 fw_dump.cpu_notes_buf_size); 806 return -ENOMEM; 807 } 808 809 pr_debug("Allocated buffer for cpu notes of size %ld at 0x%lx\n", 810 fw_dump.cpu_notes_buf_size, 811 fw_dump.cpu_notes_buf_vaddr); 812 return 0; 813 } 814 815 void fadump_free_cpu_notes_buf(void) 816 { 817 if (!fw_dump.cpu_notes_buf_vaddr) 818 return; 819 820 fadump_free_buffer(fw_dump.cpu_notes_buf_vaddr, 821 fw_dump.cpu_notes_buf_size); 822 fw_dump.cpu_notes_buf_vaddr = 0; 823 fw_dump.cpu_notes_buf_size = 0; 824 } 825 826 static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info) 827 { 828 if (mrange_info->is_static) { 829 mrange_info->mem_range_cnt = 0; 830 return; 831 } 832 833 kfree(mrange_info->mem_ranges); 834 memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0, 835 (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ)); 836 } 837 838 /* 839 * Allocate or reallocate mem_ranges array in incremental units 840 * of PAGE_SIZE. 841 */ 842 static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info) 843 { 844 struct fadump_memory_range *new_array; 845 u64 new_size; 846 847 new_size = mrange_info->mem_ranges_sz + PAGE_SIZE; 848 pr_debug("Allocating %llu bytes of memory for %s memory ranges\n", 849 new_size, mrange_info->name); 850 851 new_array = krealloc(mrange_info->mem_ranges, new_size, GFP_KERNEL); 852 if (new_array == NULL) { 853 pr_err("Insufficient memory for setting up %s memory ranges\n", 854 mrange_info->name); 855 fadump_free_mem_ranges(mrange_info); 856 return -ENOMEM; 857 } 858 859 mrange_info->mem_ranges = new_array; 860 mrange_info->mem_ranges_sz = new_size; 861 mrange_info->max_mem_ranges = (new_size / 862 sizeof(struct fadump_memory_range)); 863 return 0; 864 } 865 866 static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, 867 u64 base, u64 end) 868 { 869 struct fadump_memory_range *mem_ranges = mrange_info->mem_ranges; 870 bool is_adjacent = false; 871 u64 start, size; 872 873 if (base == end) 874 return 0; 875 876 /* 877 * Fold adjacent memory ranges to bring down the memory ranges/ 878 * PT_LOAD segments count. 879 */ 880 if (mrange_info->mem_range_cnt) { 881 start = mem_ranges[mrange_info->mem_range_cnt - 1].base; 882 size = mem_ranges[mrange_info->mem_range_cnt - 1].size; 883 884 if ((start + size) == base) 885 is_adjacent = true; 886 } 887 if (!is_adjacent) { 888 /* resize the array on reaching the limit */ 889 if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) { 890 int ret; 891 892 if (mrange_info->is_static) { 893 pr_err("Reached array size limit for %s memory ranges\n", 894 mrange_info->name); 895 return -ENOSPC; 896 } 897 898 ret = fadump_alloc_mem_ranges(mrange_info); 899 if (ret) 900 return ret; 901 902 /* Update to the new resized array */ 903 mem_ranges = mrange_info->mem_ranges; 904 } 905 906 start = base; 907 mem_ranges[mrange_info->mem_range_cnt].base = start; 908 mrange_info->mem_range_cnt++; 909 } 910 911 mem_ranges[mrange_info->mem_range_cnt - 1].size = (end - start); 912 pr_debug("%s_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n", 913 mrange_info->name, (mrange_info->mem_range_cnt - 1), 914 start, end - 1, (end - start)); 915 return 0; 916 } 917 918 static int fadump_exclude_reserved_area(u64 start, u64 end) 919 { 920 u64 ra_start, ra_end; 921 int ret = 0; 922 923 ra_start = fw_dump.reserve_dump_area_start; 924 ra_end = ra_start + fw_dump.reserve_dump_area_size; 925 926 if ((ra_start < end) && (ra_end > start)) { 927 if ((start < ra_start) && (end > ra_end)) { 928 ret = fadump_add_mem_range(&crash_mrange_info, 929 start, ra_start); 930 if (ret) 931 return ret; 932 933 ret = fadump_add_mem_range(&crash_mrange_info, 934 ra_end, end); 935 } else if (start < ra_start) { 936 ret = fadump_add_mem_range(&crash_mrange_info, 937 start, ra_start); 938 } else if (ra_end < end) { 939 ret = fadump_add_mem_range(&crash_mrange_info, 940 ra_end, end); 941 } 942 } else 943 ret = fadump_add_mem_range(&crash_mrange_info, start, end); 944 945 return ret; 946 } 947 948 static int fadump_init_elfcore_header(char *bufp) 949 { 950 struct elfhdr *elf; 951 952 elf = (struct elfhdr *) bufp; 953 bufp += sizeof(struct elfhdr); 954 memcpy(elf->e_ident, ELFMAG, SELFMAG); 955 elf->e_ident[EI_CLASS] = ELF_CLASS; 956 elf->e_ident[EI_DATA] = ELF_DATA; 957 elf->e_ident[EI_VERSION] = EV_CURRENT; 958 elf->e_ident[EI_OSABI] = ELF_OSABI; 959 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); 960 elf->e_type = ET_CORE; 961 elf->e_machine = ELF_ARCH; 962 elf->e_version = EV_CURRENT; 963 elf->e_entry = 0; 964 elf->e_phoff = sizeof(struct elfhdr); 965 elf->e_shoff = 0; 966 #if defined(_CALL_ELF) 967 elf->e_flags = _CALL_ELF; 968 #else 969 elf->e_flags = 0; 970 #endif 971 elf->e_ehsize = sizeof(struct elfhdr); 972 elf->e_phentsize = sizeof(struct elf_phdr); 973 elf->e_phnum = 0; 974 elf->e_shentsize = 0; 975 elf->e_shnum = 0; 976 elf->e_shstrndx = 0; 977 978 return 0; 979 } 980 981 /* 982 * Traverse through memblock structure and setup crash memory ranges. These 983 * ranges will be used create PT_LOAD program headers in elfcore header. 984 */ 985 static int fadump_setup_crash_memory_ranges(void) 986 { 987 u64 i, start, end; 988 int ret; 989 990 pr_debug("Setup crash memory ranges.\n"); 991 crash_mrange_info.mem_range_cnt = 0; 992 993 /* 994 * Boot memory region(s) registered with firmware are moved to 995 * different location at the time of crash. Create separate program 996 * header(s) for this memory chunk(s) with the correct offset. 997 */ 998 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { 999 start = fw_dump.boot_mem_addr[i]; 1000 end = start + fw_dump.boot_mem_sz[i]; 1001 ret = fadump_add_mem_range(&crash_mrange_info, start, end); 1002 if (ret) 1003 return ret; 1004 } 1005 1006 for_each_mem_range(i, &start, &end) { 1007 /* 1008 * skip the memory chunk that is already added 1009 * (0 through boot_memory_top). 1010 */ 1011 if (start < fw_dump.boot_mem_top) { 1012 if (end > fw_dump.boot_mem_top) 1013 start = fw_dump.boot_mem_top; 1014 else 1015 continue; 1016 } 1017 1018 /* add this range excluding the reserved dump area. */ 1019 ret = fadump_exclude_reserved_area(start, end); 1020 if (ret) 1021 return ret; 1022 } 1023 1024 return 0; 1025 } 1026 1027 /* 1028 * If the given physical address falls within the boot memory region then 1029 * return the relocated address that points to the dump region reserved 1030 * for saving initial boot memory contents. 1031 */ 1032 static inline unsigned long fadump_relocate(unsigned long paddr) 1033 { 1034 unsigned long raddr, rstart, rend, rlast, hole_size; 1035 int i; 1036 1037 hole_size = 0; 1038 rlast = 0; 1039 raddr = paddr; 1040 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { 1041 rstart = fw_dump.boot_mem_addr[i]; 1042 rend = rstart + fw_dump.boot_mem_sz[i]; 1043 hole_size += (rstart - rlast); 1044 1045 if (paddr >= rstart && paddr < rend) { 1046 raddr += fw_dump.boot_mem_dest_addr - hole_size; 1047 break; 1048 } 1049 1050 rlast = rend; 1051 } 1052 1053 pr_debug("vmcoreinfo: paddr = 0x%lx, raddr = 0x%lx\n", paddr, raddr); 1054 return raddr; 1055 } 1056 1057 static int fadump_create_elfcore_headers(char *bufp) 1058 { 1059 unsigned long long raddr, offset; 1060 struct elf_phdr *phdr; 1061 struct elfhdr *elf; 1062 int i, j; 1063 1064 fadump_init_elfcore_header(bufp); 1065 elf = (struct elfhdr *)bufp; 1066 bufp += sizeof(struct elfhdr); 1067 1068 /* 1069 * setup ELF PT_NOTE, place holder for cpu notes info. The notes info 1070 * will be populated during second kernel boot after crash. Hence 1071 * this PT_NOTE will always be the first elf note. 1072 * 1073 * NOTE: Any new ELF note addition should be placed after this note. 1074 */ 1075 phdr = (struct elf_phdr *)bufp; 1076 bufp += sizeof(struct elf_phdr); 1077 phdr->p_type = PT_NOTE; 1078 phdr->p_flags = 0; 1079 phdr->p_vaddr = 0; 1080 phdr->p_align = 0; 1081 1082 phdr->p_offset = 0; 1083 phdr->p_paddr = 0; 1084 phdr->p_filesz = 0; 1085 phdr->p_memsz = 0; 1086 1087 (elf->e_phnum)++; 1088 1089 /* setup ELF PT_NOTE for vmcoreinfo */ 1090 phdr = (struct elf_phdr *)bufp; 1091 bufp += sizeof(struct elf_phdr); 1092 phdr->p_type = PT_NOTE; 1093 phdr->p_flags = 0; 1094 phdr->p_vaddr = 0; 1095 phdr->p_align = 0; 1096 1097 phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note()); 1098 phdr->p_offset = phdr->p_paddr; 1099 phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE; 1100 1101 /* Increment number of program headers. */ 1102 (elf->e_phnum)++; 1103 1104 /* setup PT_LOAD sections. */ 1105 j = 0; 1106 offset = 0; 1107 raddr = fw_dump.boot_mem_addr[0]; 1108 for (i = 0; i < crash_mrange_info.mem_range_cnt; i++) { 1109 u64 mbase, msize; 1110 1111 mbase = crash_mrange_info.mem_ranges[i].base; 1112 msize = crash_mrange_info.mem_ranges[i].size; 1113 if (!msize) 1114 continue; 1115 1116 phdr = (struct elf_phdr *)bufp; 1117 bufp += sizeof(struct elf_phdr); 1118 phdr->p_type = PT_LOAD; 1119 phdr->p_flags = PF_R|PF_W|PF_X; 1120 phdr->p_offset = mbase; 1121 1122 if (mbase == raddr) { 1123 /* 1124 * The entire real memory region will be moved by 1125 * firmware to the specified destination_address. 1126 * Hence set the correct offset. 1127 */ 1128 phdr->p_offset = fw_dump.boot_mem_dest_addr + offset; 1129 if (j < (fw_dump.boot_mem_regs_cnt - 1)) { 1130 offset += fw_dump.boot_mem_sz[j]; 1131 raddr = fw_dump.boot_mem_addr[++j]; 1132 } 1133 } 1134 1135 phdr->p_paddr = mbase; 1136 phdr->p_vaddr = (unsigned long)__va(mbase); 1137 phdr->p_filesz = msize; 1138 phdr->p_memsz = msize; 1139 phdr->p_align = 0; 1140 1141 /* Increment number of program headers. */ 1142 (elf->e_phnum)++; 1143 } 1144 return 0; 1145 } 1146 1147 static unsigned long init_fadump_header(unsigned long addr) 1148 { 1149 struct fadump_crash_info_header *fdh; 1150 1151 if (!addr) 1152 return 0; 1153 1154 fdh = __va(addr); 1155 addr += sizeof(struct fadump_crash_info_header); 1156 1157 memset(fdh, 0, sizeof(struct fadump_crash_info_header)); 1158 fdh->magic_number = FADUMP_CRASH_INFO_MAGIC; 1159 fdh->elfcorehdr_addr = addr; 1160 /* We will set the crashing cpu id in crash_fadump() during crash. */ 1161 fdh->crashing_cpu = FADUMP_CPU_UNKNOWN; 1162 1163 return addr; 1164 } 1165 1166 static int register_fadump(void) 1167 { 1168 unsigned long addr; 1169 void *vaddr; 1170 int ret; 1171 1172 /* 1173 * If no memory is reserved then we can not register for firmware- 1174 * assisted dump. 1175 */ 1176 if (!fw_dump.reserve_dump_area_size) 1177 return -ENODEV; 1178 1179 ret = fadump_setup_crash_memory_ranges(); 1180 if (ret) 1181 return ret; 1182 1183 addr = fw_dump.fadumphdr_addr; 1184 1185 /* Initialize fadump crash info header. */ 1186 addr = init_fadump_header(addr); 1187 vaddr = __va(addr); 1188 1189 pr_debug("Creating ELF core headers at %#016lx\n", addr); 1190 fadump_create_elfcore_headers(vaddr); 1191 1192 /* register the future kernel dump with firmware. */ 1193 pr_debug("Registering for firmware-assisted kernel dump...\n"); 1194 return fw_dump.ops->fadump_register(&fw_dump); 1195 } 1196 1197 void fadump_cleanup(void) 1198 { 1199 if (!fw_dump.fadump_supported) 1200 return; 1201 1202 /* Invalidate the registration only if dump is active. */ 1203 if (fw_dump.dump_active) { 1204 pr_debug("Invalidating firmware-assisted dump registration\n"); 1205 fw_dump.ops->fadump_invalidate(&fw_dump); 1206 } else if (fw_dump.dump_registered) { 1207 /* Un-register Firmware-assisted dump if it was registered. */ 1208 fw_dump.ops->fadump_unregister(&fw_dump); 1209 fadump_free_mem_ranges(&crash_mrange_info); 1210 } 1211 1212 if (fw_dump.ops->fadump_cleanup) 1213 fw_dump.ops->fadump_cleanup(&fw_dump); 1214 } 1215 1216 static void fadump_free_reserved_memory(unsigned long start_pfn, 1217 unsigned long end_pfn) 1218 { 1219 unsigned long pfn; 1220 unsigned long time_limit = jiffies + HZ; 1221 1222 pr_info("freeing reserved memory (0x%llx - 0x%llx)\n", 1223 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn)); 1224 1225 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1226 free_reserved_page(pfn_to_page(pfn)); 1227 1228 if (time_after(jiffies, time_limit)) { 1229 cond_resched(); 1230 time_limit = jiffies + HZ; 1231 } 1232 } 1233 } 1234 1235 /* 1236 * Skip memory holes and free memory that was actually reserved. 1237 */ 1238 static void fadump_release_reserved_area(u64 start, u64 end) 1239 { 1240 unsigned long reg_spfn, reg_epfn; 1241 u64 tstart, tend, spfn, epfn; 1242 int i; 1243 1244 spfn = PHYS_PFN(start); 1245 epfn = PHYS_PFN(end); 1246 1247 for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { 1248 tstart = max_t(u64, spfn, reg_spfn); 1249 tend = min_t(u64, epfn, reg_epfn); 1250 1251 if (tstart < tend) { 1252 fadump_free_reserved_memory(tstart, tend); 1253 1254 if (tend == epfn) 1255 break; 1256 1257 spfn = tend; 1258 } 1259 } 1260 } 1261 1262 /* 1263 * Sort the mem ranges in-place and merge adjacent ranges 1264 * to minimize the memory ranges count. 1265 */ 1266 static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info) 1267 { 1268 struct fadump_memory_range *mem_ranges; 1269 struct fadump_memory_range tmp_range; 1270 u64 base, size; 1271 int i, j, idx; 1272 1273 if (!reserved_mrange_info.mem_range_cnt) 1274 return; 1275 1276 /* Sort the memory ranges */ 1277 mem_ranges = mrange_info->mem_ranges; 1278 for (i = 0; i < mrange_info->mem_range_cnt; i++) { 1279 idx = i; 1280 for (j = (i + 1); j < mrange_info->mem_range_cnt; j++) { 1281 if (mem_ranges[idx].base > mem_ranges[j].base) 1282 idx = j; 1283 } 1284 if (idx != i) { 1285 tmp_range = mem_ranges[idx]; 1286 mem_ranges[idx] = mem_ranges[i]; 1287 mem_ranges[i] = tmp_range; 1288 } 1289 } 1290 1291 /* Merge adjacent reserved ranges */ 1292 idx = 0; 1293 for (i = 1; i < mrange_info->mem_range_cnt; i++) { 1294 base = mem_ranges[i-1].base; 1295 size = mem_ranges[i-1].size; 1296 if (mem_ranges[i].base == (base + size)) 1297 mem_ranges[idx].size += mem_ranges[i].size; 1298 else { 1299 idx++; 1300 if (i == idx) 1301 continue; 1302 1303 mem_ranges[idx] = mem_ranges[i]; 1304 } 1305 } 1306 mrange_info->mem_range_cnt = idx + 1; 1307 } 1308 1309 /* 1310 * Scan reserved-ranges to consider them while reserving/releasing 1311 * memory for FADump. 1312 */ 1313 static void __init early_init_dt_scan_reserved_ranges(unsigned long node) 1314 { 1315 const __be32 *prop; 1316 int len, ret = -1; 1317 unsigned long i; 1318 1319 /* reserved-ranges already scanned */ 1320 if (reserved_mrange_info.mem_range_cnt != 0) 1321 return; 1322 1323 prop = of_get_flat_dt_prop(node, "reserved-ranges", &len); 1324 if (!prop) 1325 return; 1326 1327 /* 1328 * Each reserved range is an (address,size) pair, 2 cells each, 1329 * totalling 4 cells per range. 1330 */ 1331 for (i = 0; i < len / (sizeof(*prop) * 4); i++) { 1332 u64 base, size; 1333 1334 base = of_read_number(prop + (i * 4) + 0, 2); 1335 size = of_read_number(prop + (i * 4) + 2, 2); 1336 1337 if (size) { 1338 ret = fadump_add_mem_range(&reserved_mrange_info, 1339 base, base + size); 1340 if (ret < 0) { 1341 pr_warn("some reserved ranges are ignored!\n"); 1342 break; 1343 } 1344 } 1345 } 1346 1347 /* Compact reserved ranges */ 1348 sort_and_merge_mem_ranges(&reserved_mrange_info); 1349 } 1350 1351 /* 1352 * Release the memory that was reserved during early boot to preserve the 1353 * crash'ed kernel's memory contents except reserved dump area (permanent 1354 * reservation) and reserved ranges used by F/W. The released memory will 1355 * be available for general use. 1356 */ 1357 static void fadump_release_memory(u64 begin, u64 end) 1358 { 1359 u64 ra_start, ra_end, tstart; 1360 int i, ret; 1361 1362 ra_start = fw_dump.reserve_dump_area_start; 1363 ra_end = ra_start + fw_dump.reserve_dump_area_size; 1364 1365 /* 1366 * If reserved ranges array limit is hit, overwrite the last reserved 1367 * memory range with reserved dump area to ensure it is excluded from 1368 * the memory being released (reused for next FADump registration). 1369 */ 1370 if (reserved_mrange_info.mem_range_cnt == 1371 reserved_mrange_info.max_mem_ranges) 1372 reserved_mrange_info.mem_range_cnt--; 1373 1374 ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); 1375 if (ret != 0) 1376 return; 1377 1378 /* Get the reserved ranges list in order first. */ 1379 sort_and_merge_mem_ranges(&reserved_mrange_info); 1380 1381 /* Exclude reserved ranges and release remaining memory */ 1382 tstart = begin; 1383 for (i = 0; i < reserved_mrange_info.mem_range_cnt; i++) { 1384 ra_start = reserved_mrange_info.mem_ranges[i].base; 1385 ra_end = ra_start + reserved_mrange_info.mem_ranges[i].size; 1386 1387 if (tstart >= ra_end) 1388 continue; 1389 1390 if (tstart < ra_start) 1391 fadump_release_reserved_area(tstart, ra_start); 1392 tstart = ra_end; 1393 } 1394 1395 if (tstart < end) 1396 fadump_release_reserved_area(tstart, end); 1397 } 1398 1399 static void fadump_invalidate_release_mem(void) 1400 { 1401 mutex_lock(&fadump_mutex); 1402 if (!fw_dump.dump_active) { 1403 mutex_unlock(&fadump_mutex); 1404 return; 1405 } 1406 1407 fadump_cleanup(); 1408 mutex_unlock(&fadump_mutex); 1409 1410 fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM()); 1411 fadump_free_cpu_notes_buf(); 1412 1413 /* 1414 * Setup kernel metadata and initialize the kernel dump 1415 * memory structure for FADump re-registration. 1416 */ 1417 if (fw_dump.ops->fadump_setup_metadata && 1418 (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0)) 1419 pr_warn("Failed to setup kernel metadata!\n"); 1420 fw_dump.ops->fadump_init_mem_struct(&fw_dump); 1421 } 1422 1423 static ssize_t release_mem_store(struct kobject *kobj, 1424 struct kobj_attribute *attr, 1425 const char *buf, size_t count) 1426 { 1427 int input = -1; 1428 1429 if (!fw_dump.dump_active) 1430 return -EPERM; 1431 1432 if (kstrtoint(buf, 0, &input)) 1433 return -EINVAL; 1434 1435 if (input == 1) { 1436 /* 1437 * Take away the '/proc/vmcore'. We are releasing the dump 1438 * memory, hence it will not be valid anymore. 1439 */ 1440 #ifdef CONFIG_PROC_VMCORE 1441 vmcore_cleanup(); 1442 #endif 1443 fadump_invalidate_release_mem(); 1444 1445 } else 1446 return -EINVAL; 1447 return count; 1448 } 1449 1450 /* Release the reserved memory and disable the FADump */ 1451 static void unregister_fadump(void) 1452 { 1453 fadump_cleanup(); 1454 fadump_release_memory(fw_dump.reserve_dump_area_start, 1455 fw_dump.reserve_dump_area_size); 1456 fw_dump.fadump_enabled = 0; 1457 kobject_put(fadump_kobj); 1458 } 1459 1460 static ssize_t enabled_show(struct kobject *kobj, 1461 struct kobj_attribute *attr, 1462 char *buf) 1463 { 1464 return sprintf(buf, "%d\n", fw_dump.fadump_enabled); 1465 } 1466 1467 static ssize_t mem_reserved_show(struct kobject *kobj, 1468 struct kobj_attribute *attr, 1469 char *buf) 1470 { 1471 return sprintf(buf, "%ld\n", fw_dump.reserve_dump_area_size); 1472 } 1473 1474 static ssize_t registered_show(struct kobject *kobj, 1475 struct kobj_attribute *attr, 1476 char *buf) 1477 { 1478 return sprintf(buf, "%d\n", fw_dump.dump_registered); 1479 } 1480 1481 static ssize_t registered_store(struct kobject *kobj, 1482 struct kobj_attribute *attr, 1483 const char *buf, size_t count) 1484 { 1485 int ret = 0; 1486 int input = -1; 1487 1488 if (!fw_dump.fadump_enabled || fw_dump.dump_active) 1489 return -EPERM; 1490 1491 if (kstrtoint(buf, 0, &input)) 1492 return -EINVAL; 1493 1494 mutex_lock(&fadump_mutex); 1495 1496 switch (input) { 1497 case 0: 1498 if (fw_dump.dump_registered == 0) { 1499 goto unlock_out; 1500 } 1501 1502 /* Un-register Firmware-assisted dump */ 1503 pr_debug("Un-register firmware-assisted dump\n"); 1504 fw_dump.ops->fadump_unregister(&fw_dump); 1505 break; 1506 case 1: 1507 if (fw_dump.dump_registered == 1) { 1508 /* Un-register Firmware-assisted dump */ 1509 fw_dump.ops->fadump_unregister(&fw_dump); 1510 } 1511 /* Register Firmware-assisted dump */ 1512 ret = register_fadump(); 1513 break; 1514 default: 1515 ret = -EINVAL; 1516 break; 1517 } 1518 1519 unlock_out: 1520 mutex_unlock(&fadump_mutex); 1521 return ret < 0 ? ret : count; 1522 } 1523 1524 static int fadump_region_show(struct seq_file *m, void *private) 1525 { 1526 if (!fw_dump.fadump_enabled) 1527 return 0; 1528 1529 mutex_lock(&fadump_mutex); 1530 fw_dump.ops->fadump_region_show(&fw_dump, m); 1531 mutex_unlock(&fadump_mutex); 1532 return 0; 1533 } 1534 1535 static struct kobj_attribute release_attr = __ATTR_WO(release_mem); 1536 static struct kobj_attribute enable_attr = __ATTR_RO(enabled); 1537 static struct kobj_attribute register_attr = __ATTR_RW(registered); 1538 static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved); 1539 1540 static struct attribute *fadump_attrs[] = { 1541 &enable_attr.attr, 1542 ®ister_attr.attr, 1543 &mem_reserved_attr.attr, 1544 NULL, 1545 }; 1546 1547 ATTRIBUTE_GROUPS(fadump); 1548 1549 DEFINE_SHOW_ATTRIBUTE(fadump_region); 1550 1551 static void fadump_init_files(void) 1552 { 1553 int rc = 0; 1554 1555 fadump_kobj = kobject_create_and_add("fadump", kernel_kobj); 1556 if (!fadump_kobj) { 1557 pr_err("failed to create fadump kobject\n"); 1558 return; 1559 } 1560 1561 debugfs_create_file("fadump_region", 0444, powerpc_debugfs_root, NULL, 1562 &fadump_region_fops); 1563 1564 if (fw_dump.dump_active) { 1565 rc = sysfs_create_file(fadump_kobj, &release_attr.attr); 1566 if (rc) 1567 pr_err("unable to create release_mem sysfs file (%d)\n", 1568 rc); 1569 } 1570 1571 rc = sysfs_create_groups(fadump_kobj, fadump_groups); 1572 if (rc) { 1573 pr_err("sysfs group creation failed (%d), unregistering FADump", 1574 rc); 1575 unregister_fadump(); 1576 return; 1577 } 1578 1579 /* 1580 * The FADump sysfs are moved from kernel_kobj to fadump_kobj need to 1581 * create symlink at old location to maintain backward compatibility. 1582 * 1583 * - fadump_enabled -> fadump/enabled 1584 * - fadump_registered -> fadump/registered 1585 * - fadump_release_mem -> fadump/release_mem 1586 */ 1587 rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj, 1588 "enabled", "fadump_enabled"); 1589 if (rc) { 1590 pr_err("unable to create fadump_enabled symlink (%d)", rc); 1591 return; 1592 } 1593 1594 rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj, 1595 "registered", 1596 "fadump_registered"); 1597 if (rc) { 1598 pr_err("unable to create fadump_registered symlink (%d)", rc); 1599 sysfs_remove_link(kernel_kobj, "fadump_enabled"); 1600 return; 1601 } 1602 1603 if (fw_dump.dump_active) { 1604 rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, 1605 fadump_kobj, 1606 "release_mem", 1607 "fadump_release_mem"); 1608 if (rc) 1609 pr_err("unable to create fadump_release_mem symlink (%d)", 1610 rc); 1611 } 1612 return; 1613 } 1614 1615 /* 1616 * Prepare for firmware-assisted dump. 1617 */ 1618 int __init setup_fadump(void) 1619 { 1620 if (!fw_dump.fadump_supported) 1621 return 0; 1622 1623 fadump_init_files(); 1624 fadump_show_config(); 1625 1626 if (!fw_dump.fadump_enabled) 1627 return 1; 1628 1629 /* 1630 * If dump data is available then see if it is valid and prepare for 1631 * saving it to the disk. 1632 */ 1633 if (fw_dump.dump_active) { 1634 /* 1635 * if dump process fails then invalidate the registration 1636 * and release memory before proceeding for re-registration. 1637 */ 1638 if (fw_dump.ops->fadump_process(&fw_dump) < 0) 1639 fadump_invalidate_release_mem(); 1640 } 1641 /* Initialize the kernel dump memory structure for FAD registration. */ 1642 else if (fw_dump.reserve_dump_area_size) 1643 fw_dump.ops->fadump_init_mem_struct(&fw_dump); 1644 1645 return 1; 1646 } 1647 subsys_initcall(setup_fadump); 1648 #else /* !CONFIG_PRESERVE_FA_DUMP */ 1649 1650 /* Scan the Firmware Assisted dump configuration details. */ 1651 int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, 1652 int depth, void *data) 1653 { 1654 if ((depth != 1) || (strcmp(uname, "ibm,opal") != 0)) 1655 return 0; 1656 1657 opal_fadump_dt_scan(&fw_dump, node); 1658 return 1; 1659 } 1660 1661 /* 1662 * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel, 1663 * preserve crash data. The subsequent memory preserving kernel boot 1664 * is likely to process this crash data. 1665 */ 1666 int __init fadump_reserve_mem(void) 1667 { 1668 if (fw_dump.dump_active) { 1669 /* 1670 * If last boot has crashed then reserve all the memory 1671 * above boot memory to preserve crash data. 1672 */ 1673 pr_info("Preserving crash data for processing in next boot.\n"); 1674 fadump_reserve_crash_area(fw_dump.boot_mem_top); 1675 } else 1676 pr_debug("FADump-aware kernel..\n"); 1677 1678 return 1; 1679 } 1680 #endif /* CONFIG_PRESERVE_FA_DUMP */ 1681 1682 /* Preserve everything above the base address */ 1683 static void __init fadump_reserve_crash_area(u64 base) 1684 { 1685 u64 i, mstart, mend, msize; 1686 1687 for_each_mem_range(i, &mstart, &mend) { 1688 msize = mend - mstart; 1689 1690 if ((mstart + msize) < base) 1691 continue; 1692 1693 if (mstart < base) { 1694 msize -= (base - mstart); 1695 mstart = base; 1696 } 1697 1698 pr_info("Reserving %lluMB of memory at %#016llx for preserving crash data", 1699 (msize >> 20), mstart); 1700 memblock_reserve(mstart, msize); 1701 } 1702 } 1703 1704 unsigned long __init arch_reserved_kernel_pages(void) 1705 { 1706 return memblock_reserved_size() / PAGE_SIZE; 1707 } 1708