1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/proc/kcore.c kernel ELF core dumper 4 * 5 * Modelled on fs/exec.c:aout_core_dump() 6 * Jeremy Fitzhardinge <jeremy@sw.oz.au> 7 * ELF version written by David Howells <David.Howells@nexor.co.uk> 8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com> 9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com> 10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com> 11 */ 12 13 #include <linux/crash_core.h> 14 #include <linux/mm.h> 15 #include <linux/proc_fs.h> 16 #include <linux/kcore.h> 17 #include <linux/user.h> 18 #include <linux/capability.h> 19 #include <linux/elf.h> 20 #include <linux/elfcore.h> 21 #include <linux/vmalloc.h> 22 #include <linux/highmem.h> 23 #include <linux/printk.h> 24 #include <linux/memblock.h> 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <linux/uio.h> 28 #include <asm/io.h> 29 #include <linux/list.h> 30 #include <linux/ioport.h> 31 #include <linux/memory.h> 32 #include <linux/sched/task.h> 33 #include <linux/security.h> 34 #include <asm/sections.h> 35 #include "internal.h" 36 37 #define CORE_STR "CORE" 38 39 #ifndef ELF_CORE_EFLAGS 40 #define ELF_CORE_EFLAGS 0 41 #endif 42 43 static struct proc_dir_entry *proc_root_kcore; 44 45 46 #ifndef kc_vaddr_to_offset 47 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET) 48 #endif 49 #ifndef kc_offset_to_vaddr 50 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET) 51 #endif 52 53 static LIST_HEAD(kclist_head); 54 static DECLARE_RWSEM(kclist_lock); 55 static int kcore_need_update = 1; 56 57 /* 58 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error 59 * Same as oldmem_pfn_is_ram in vmcore 60 */ 61 static int (*mem_pfn_is_ram)(unsigned long pfn); 62 63 int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)) 64 { 65 if (mem_pfn_is_ram) 66 return -EBUSY; 67 mem_pfn_is_ram = fn; 68 return 0; 69 } 70 71 static int pfn_is_ram(unsigned long pfn) 72 { 73 if (mem_pfn_is_ram) 74 return mem_pfn_is_ram(pfn); 75 else 76 return 1; 77 } 78 79 /* This doesn't grab kclist_lock, so it should only be used at init time. */ 80 void __init kclist_add(struct kcore_list *new, void *addr, size_t size, 81 int type) 82 { 83 new->addr = (unsigned long)addr; 84 new->size = size; 85 new->type = type; 86 87 list_add_tail(&new->list, &kclist_head); 88 } 89 90 static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len, 91 size_t *data_offset) 92 { 93 size_t try, size; 94 struct kcore_list *m; 95 96 *nphdr = 1; /* PT_NOTE */ 97 size = 0; 98 99 list_for_each_entry(m, &kclist_head, list) { 100 try = kc_vaddr_to_offset((size_t)m->addr + m->size); 101 if (try > size) 102 size = try; 103 *nphdr = *nphdr + 1; 104 } 105 106 *phdrs_len = *nphdr * sizeof(struct elf_phdr); 107 *notes_len = (4 * sizeof(struct elf_note) + 108 3 * ALIGN(sizeof(CORE_STR), 4) + 109 VMCOREINFO_NOTE_NAME_BYTES + 110 ALIGN(sizeof(struct elf_prstatus), 4) + 111 ALIGN(sizeof(struct elf_prpsinfo), 4) + 112 ALIGN(arch_task_struct_size, 4) + 113 ALIGN(vmcoreinfo_size, 4)); 114 *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len + 115 *notes_len); 116 return *data_offset + size; 117 } 118 119 #ifdef CONFIG_HIGHMEM 120 /* 121 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory 122 * because memory hole is not as big as !HIGHMEM case. 123 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.) 124 */ 125 static int kcore_ram_list(struct list_head *head) 126 { 127 struct kcore_list *ent; 128 129 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 130 if (!ent) 131 return -ENOMEM; 132 ent->addr = (unsigned long)__va(0); 133 ent->size = max_low_pfn << PAGE_SHIFT; 134 ent->type = KCORE_RAM; 135 list_add(&ent->list, head); 136 return 0; 137 } 138 139 #else /* !CONFIG_HIGHMEM */ 140 141 #ifdef CONFIG_SPARSEMEM_VMEMMAP 142 /* calculate vmemmap's address from given system ram pfn and register it */ 143 static int 144 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) 145 { 146 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT; 147 unsigned long nr_pages = ent->size >> PAGE_SHIFT; 148 unsigned long start, end; 149 struct kcore_list *vmm, *tmp; 150 151 152 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK; 153 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1; 154 end = PAGE_ALIGN(end); 155 /* overlap check (because we have to align page */ 156 list_for_each_entry(tmp, head, list) { 157 if (tmp->type != KCORE_VMEMMAP) 158 continue; 159 if (start < tmp->addr + tmp->size) 160 if (end > tmp->addr) 161 end = tmp->addr; 162 } 163 if (start < end) { 164 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL); 165 if (!vmm) 166 return 0; 167 vmm->addr = start; 168 vmm->size = end - start; 169 vmm->type = KCORE_VMEMMAP; 170 list_add_tail(&vmm->list, head); 171 } 172 return 1; 173 174 } 175 #else 176 static int 177 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) 178 { 179 return 1; 180 } 181 182 #endif 183 184 static int 185 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) 186 { 187 struct list_head *head = (struct list_head *)arg; 188 struct kcore_list *ent; 189 struct page *p; 190 191 if (!pfn_valid(pfn)) 192 return 1; 193 194 p = pfn_to_page(pfn); 195 196 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 197 if (!ent) 198 return -ENOMEM; 199 ent->addr = (unsigned long)page_to_virt(p); 200 ent->size = nr_pages << PAGE_SHIFT; 201 202 if (!virt_addr_valid((void *)ent->addr)) 203 goto free_out; 204 205 /* cut not-mapped area. ....from ppc-32 code. */ 206 if (ULONG_MAX - ent->addr < ent->size) 207 ent->size = ULONG_MAX - ent->addr; 208 209 /* 210 * We've already checked virt_addr_valid so we know this address 211 * is a valid pointer, therefore we can check against it to determine 212 * if we need to trim 213 */ 214 if (VMALLOC_START > ent->addr) { 215 if (VMALLOC_START - ent->addr < ent->size) 216 ent->size = VMALLOC_START - ent->addr; 217 } 218 219 ent->type = KCORE_RAM; 220 list_add_tail(&ent->list, head); 221 222 if (!get_sparsemem_vmemmap_info(ent, head)) { 223 list_del(&ent->list); 224 goto free_out; 225 } 226 227 return 0; 228 free_out: 229 kfree(ent); 230 return 1; 231 } 232 233 static int kcore_ram_list(struct list_head *list) 234 { 235 int nid, ret; 236 unsigned long end_pfn; 237 238 /* Not inialized....update now */ 239 /* find out "max pfn" */ 240 end_pfn = 0; 241 for_each_node_state(nid, N_MEMORY) { 242 unsigned long node_end; 243 node_end = node_end_pfn(nid); 244 if (end_pfn < node_end) 245 end_pfn = node_end; 246 } 247 /* scan 0 to max_pfn */ 248 ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private); 249 if (ret) 250 return -ENOMEM; 251 return 0; 252 } 253 #endif /* CONFIG_HIGHMEM */ 254 255 static int kcore_update_ram(void) 256 { 257 LIST_HEAD(list); 258 LIST_HEAD(garbage); 259 int nphdr; 260 size_t phdrs_len, notes_len, data_offset; 261 struct kcore_list *tmp, *pos; 262 int ret = 0; 263 264 down_write(&kclist_lock); 265 if (!xchg(&kcore_need_update, 0)) 266 goto out; 267 268 ret = kcore_ram_list(&list); 269 if (ret) { 270 /* Couldn't get the RAM list, try again next time. */ 271 WRITE_ONCE(kcore_need_update, 1); 272 list_splice_tail(&list, &garbage); 273 goto out; 274 } 275 276 list_for_each_entry_safe(pos, tmp, &kclist_head, list) { 277 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP) 278 list_move(&pos->list, &garbage); 279 } 280 list_splice_tail(&list, &kclist_head); 281 282 proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, ¬es_len, 283 &data_offset); 284 285 out: 286 up_write(&kclist_lock); 287 list_for_each_entry_safe(pos, tmp, &garbage, list) { 288 list_del(&pos->list); 289 kfree(pos); 290 } 291 return ret; 292 } 293 294 static void append_kcore_note(char *notes, size_t *i, const char *name, 295 unsigned int type, const void *desc, 296 size_t descsz) 297 { 298 struct elf_note *note = (struct elf_note *)¬es[*i]; 299 300 note->n_namesz = strlen(name) + 1; 301 note->n_descsz = descsz; 302 note->n_type = type; 303 *i += sizeof(*note); 304 memcpy(¬es[*i], name, note->n_namesz); 305 *i = ALIGN(*i + note->n_namesz, 4); 306 memcpy(¬es[*i], desc, descsz); 307 *i = ALIGN(*i + descsz, 4); 308 } 309 310 static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) 311 { 312 loff_t *fpos = &iocb->ki_pos; 313 size_t phdrs_offset, notes_offset, data_offset; 314 size_t page_offline_frozen = 1; 315 size_t phdrs_len, notes_len; 316 struct kcore_list *m; 317 size_t tsz; 318 int nphdr; 319 unsigned long start; 320 size_t buflen = iov_iter_count(iter); 321 size_t orig_buflen = buflen; 322 int ret = 0; 323 324 down_read(&kclist_lock); 325 /* 326 * Don't race against drivers that set PageOffline() and expect no 327 * further page access. 328 */ 329 page_offline_freeze(); 330 331 get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset); 332 phdrs_offset = sizeof(struct elfhdr); 333 notes_offset = phdrs_offset + phdrs_len; 334 335 /* ELF file header. */ 336 if (buflen && *fpos < sizeof(struct elfhdr)) { 337 struct elfhdr ehdr = { 338 .e_ident = { 339 [EI_MAG0] = ELFMAG0, 340 [EI_MAG1] = ELFMAG1, 341 [EI_MAG2] = ELFMAG2, 342 [EI_MAG3] = ELFMAG3, 343 [EI_CLASS] = ELF_CLASS, 344 [EI_DATA] = ELF_DATA, 345 [EI_VERSION] = EV_CURRENT, 346 [EI_OSABI] = ELF_OSABI, 347 }, 348 .e_type = ET_CORE, 349 .e_machine = ELF_ARCH, 350 .e_version = EV_CURRENT, 351 .e_phoff = sizeof(struct elfhdr), 352 .e_flags = ELF_CORE_EFLAGS, 353 .e_ehsize = sizeof(struct elfhdr), 354 .e_phentsize = sizeof(struct elf_phdr), 355 .e_phnum = nphdr, 356 }; 357 358 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos); 359 if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) { 360 ret = -EFAULT; 361 goto out; 362 } 363 364 buflen -= tsz; 365 *fpos += tsz; 366 } 367 368 /* ELF program headers. */ 369 if (buflen && *fpos < phdrs_offset + phdrs_len) { 370 struct elf_phdr *phdrs, *phdr; 371 372 phdrs = kzalloc(phdrs_len, GFP_KERNEL); 373 if (!phdrs) { 374 ret = -ENOMEM; 375 goto out; 376 } 377 378 phdrs[0].p_type = PT_NOTE; 379 phdrs[0].p_offset = notes_offset; 380 phdrs[0].p_filesz = notes_len; 381 382 phdr = &phdrs[1]; 383 list_for_each_entry(m, &kclist_head, list) { 384 phdr->p_type = PT_LOAD; 385 phdr->p_flags = PF_R | PF_W | PF_X; 386 phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset; 387 phdr->p_vaddr = (size_t)m->addr; 388 if (m->type == KCORE_RAM) 389 phdr->p_paddr = __pa(m->addr); 390 else if (m->type == KCORE_TEXT) 391 phdr->p_paddr = __pa_symbol(m->addr); 392 else 393 phdr->p_paddr = (elf_addr_t)-1; 394 phdr->p_filesz = phdr->p_memsz = m->size; 395 phdr->p_align = PAGE_SIZE; 396 phdr++; 397 } 398 399 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos); 400 if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz, 401 iter) != tsz) { 402 kfree(phdrs); 403 ret = -EFAULT; 404 goto out; 405 } 406 kfree(phdrs); 407 408 buflen -= tsz; 409 *fpos += tsz; 410 } 411 412 /* ELF note segment. */ 413 if (buflen && *fpos < notes_offset + notes_len) { 414 struct elf_prstatus prstatus = {}; 415 struct elf_prpsinfo prpsinfo = { 416 .pr_sname = 'R', 417 .pr_fname = "vmlinux", 418 }; 419 char *notes; 420 size_t i = 0; 421 422 strscpy(prpsinfo.pr_psargs, saved_command_line, 423 sizeof(prpsinfo.pr_psargs)); 424 425 notes = kzalloc(notes_len, GFP_KERNEL); 426 if (!notes) { 427 ret = -ENOMEM; 428 goto out; 429 } 430 431 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus, 432 sizeof(prstatus)); 433 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo, 434 sizeof(prpsinfo)); 435 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current, 436 arch_task_struct_size); 437 /* 438 * vmcoreinfo_size is mostly constant after init time, but it 439 * can be changed by crash_save_vmcoreinfo(). Racing here with a 440 * panic on another CPU before the machine goes down is insanely 441 * unlikely, but it's better to not leave potential buffer 442 * overflows lying around, regardless. 443 */ 444 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0, 445 vmcoreinfo_data, 446 min(vmcoreinfo_size, notes_len - i)); 447 448 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos); 449 if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) { 450 kfree(notes); 451 ret = -EFAULT; 452 goto out; 453 } 454 kfree(notes); 455 456 buflen -= tsz; 457 *fpos += tsz; 458 } 459 460 /* 461 * Check to see if our file offset matches with any of 462 * the addresses in the elf_phdr on our list. 463 */ 464 start = kc_offset_to_vaddr(*fpos - data_offset); 465 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) 466 tsz = buflen; 467 468 m = NULL; 469 while (buflen) { 470 struct page *page; 471 unsigned long pfn; 472 473 /* 474 * If this is the first iteration or the address is not within 475 * the previous entry, search for a matching entry. 476 */ 477 if (!m || start < m->addr || start >= m->addr + m->size) { 478 struct kcore_list *iter; 479 480 m = NULL; 481 list_for_each_entry(iter, &kclist_head, list) { 482 if (start >= iter->addr && 483 start < iter->addr + iter->size) { 484 m = iter; 485 break; 486 } 487 } 488 } 489 490 if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) { 491 page_offline_thaw(); 492 cond_resched(); 493 page_offline_freeze(); 494 } 495 496 if (!m) { 497 if (iov_iter_zero(tsz, iter) != tsz) { 498 ret = -EFAULT; 499 goto out; 500 } 501 goto skip; 502 } 503 504 switch (m->type) { 505 case KCORE_VMALLOC: 506 { 507 const char *src = (char *)start; 508 size_t read = 0, left = tsz; 509 510 /* 511 * vmalloc uses spinlocks, so we optimistically try to 512 * read memory. If this fails, fault pages in and try 513 * again until we are done. 514 */ 515 while (true) { 516 read += vread_iter(iter, src, left); 517 if (read == tsz) 518 break; 519 520 src += read; 521 left -= read; 522 523 if (fault_in_iov_iter_writeable(iter, left)) { 524 ret = -EFAULT; 525 goto out; 526 } 527 } 528 break; 529 } 530 case KCORE_USER: 531 /* User page is handled prior to normal kernel page: */ 532 if (copy_to_iter((char *)start, tsz, iter) != tsz) { 533 ret = -EFAULT; 534 goto out; 535 } 536 break; 537 case KCORE_RAM: 538 pfn = __pa(start) >> PAGE_SHIFT; 539 page = pfn_to_online_page(pfn); 540 541 /* 542 * Don't read offline sections, logically offline pages 543 * (e.g., inflated in a balloon), hwpoisoned pages, 544 * and explicitly excluded physical ranges. 545 */ 546 if (!page || PageOffline(page) || 547 is_page_hwpoison(page) || !pfn_is_ram(pfn)) { 548 if (iov_iter_zero(tsz, iter) != tsz) { 549 ret = -EFAULT; 550 goto out; 551 } 552 break; 553 } 554 fallthrough; 555 case KCORE_VMEMMAP: 556 case KCORE_TEXT: 557 /* 558 * We use _copy_to_iter() to bypass usermode hardening 559 * which would otherwise prevent this operation. 560 */ 561 if (_copy_to_iter((char *)start, tsz, iter) != tsz) { 562 ret = -EFAULT; 563 goto out; 564 } 565 break; 566 default: 567 pr_warn_once("Unhandled KCORE type: %d\n", m->type); 568 if (iov_iter_zero(tsz, iter) != tsz) { 569 ret = -EFAULT; 570 goto out; 571 } 572 } 573 skip: 574 buflen -= tsz; 575 *fpos += tsz; 576 start += tsz; 577 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen); 578 } 579 580 out: 581 page_offline_thaw(); 582 up_read(&kclist_lock); 583 if (ret) 584 return ret; 585 return orig_buflen - buflen; 586 } 587 588 static int open_kcore(struct inode *inode, struct file *filp) 589 { 590 int ret = security_locked_down(LOCKDOWN_KCORE); 591 592 if (!capable(CAP_SYS_RAWIO)) 593 return -EPERM; 594 595 if (ret) 596 return ret; 597 598 if (kcore_need_update) 599 kcore_update_ram(); 600 if (i_size_read(inode) != proc_root_kcore->size) { 601 inode_lock(inode); 602 i_size_write(inode, proc_root_kcore->size); 603 inode_unlock(inode); 604 } 605 return 0; 606 } 607 608 static const struct proc_ops kcore_proc_ops = { 609 .proc_read_iter = read_kcore_iter, 610 .proc_open = open_kcore, 611 .proc_lseek = default_llseek, 612 }; 613 614 /* just remember that we have to update kcore */ 615 static int __meminit kcore_callback(struct notifier_block *self, 616 unsigned long action, void *arg) 617 { 618 switch (action) { 619 case MEM_ONLINE: 620 case MEM_OFFLINE: 621 kcore_need_update = 1; 622 break; 623 } 624 return NOTIFY_OK; 625 } 626 627 628 static struct kcore_list kcore_vmalloc; 629 630 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT 631 static struct kcore_list kcore_text; 632 /* 633 * If defined, special segment is used for mapping kernel text instead of 634 * direct-map area. We need to create special TEXT section. 635 */ 636 static void __init proc_kcore_text_init(void) 637 { 638 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT); 639 } 640 #else 641 static void __init proc_kcore_text_init(void) 642 { 643 } 644 #endif 645 646 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 647 /* 648 * MODULES_VADDR has no intersection with VMALLOC_ADDR. 649 */ 650 static struct kcore_list kcore_modules; 651 static void __init add_modules_range(void) 652 { 653 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) { 654 kclist_add(&kcore_modules, (void *)MODULES_VADDR, 655 MODULES_END - MODULES_VADDR, KCORE_VMALLOC); 656 } 657 } 658 #else 659 static void __init add_modules_range(void) 660 { 661 } 662 #endif 663 664 static int __init proc_kcore_init(void) 665 { 666 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops); 667 if (!proc_root_kcore) { 668 pr_err("couldn't create /proc/kcore\n"); 669 return 0; /* Always returns 0. */ 670 } 671 /* Store text area if it's special */ 672 proc_kcore_text_init(); 673 /* Store vmalloc area */ 674 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 675 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC); 676 add_modules_range(); 677 /* Store direct-map area from physical memory map */ 678 kcore_update_ram(); 679 hotplug_memory_notifier(kcore_callback, DEFAULT_CALLBACK_PRI); 680 681 return 0; 682 } 683 fs_initcall(proc_kcore_init); 684