19a163ed8SThomas Gleixner /* 2835c34a1SDave Jones * Memory preserving reboot related code. 39a163ed8SThomas Gleixner * 49a163ed8SThomas Gleixner * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 59a163ed8SThomas Gleixner * Copyright (C) IBM Corporation, 2004. All rights reserved 69a163ed8SThomas Gleixner */ 79a163ed8SThomas Gleixner 8*5a0e3ad6STejun Heo #include <linux/slab.h> 99a163ed8SThomas Gleixner #include <linux/errno.h> 109a163ed8SThomas Gleixner #include <linux/highmem.h> 119a163ed8SThomas Gleixner #include <linux/crash_dump.h> 129a163ed8SThomas Gleixner 139a163ed8SThomas Gleixner #include <asm/uaccess.h> 149a163ed8SThomas Gleixner 159a163ed8SThomas Gleixner static void *kdump_buf_page; 169a163ed8SThomas Gleixner 1757cac4d1SVivek Goyal /* Stores the physical address of elf header of crash image. */ 1857cac4d1SVivek Goyal unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; 1957cac4d1SVivek Goyal 2072ed7de7SJiri Slaby static inline bool is_crashed_pfn_valid(unsigned long pfn) 2172ed7de7SJiri Slaby { 2272ed7de7SJiri Slaby #ifndef CONFIG_X86_PAE 2372ed7de7SJiri Slaby /* 2472ed7de7SJiri Slaby * non-PAE kdump kernel executed from a PAE one will crop high pte 2572ed7de7SJiri Slaby * bits and poke unwanted space counting again from address 0, we 2672ed7de7SJiri Slaby * don't want that. pte must fit into unsigned long. In fact the 2772ed7de7SJiri Slaby * test checks high 12 bits for being zero (pfn will be shifted left 2872ed7de7SJiri Slaby * by PAGE_SHIFT). 2972ed7de7SJiri Slaby */ 3072ed7de7SJiri Slaby return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn; 3172ed7de7SJiri Slaby #else 3272ed7de7SJiri Slaby return true; 3372ed7de7SJiri Slaby #endif 3472ed7de7SJiri Slaby } 3572ed7de7SJiri Slaby 369a163ed8SThomas Gleixner /** 379a163ed8SThomas Gleixner * copy_oldmem_page - copy one page from "oldmem" 389a163ed8SThomas Gleixner * @pfn: page frame number to be copied 399a163ed8SThomas Gleixner * @buf: target memory address for the copy; this can be in kernel address 409a163ed8SThomas Gleixner * space or user address space (see @userbuf) 419a163ed8SThomas Gleixner * @csize: number of bytes to copy 429a163ed8SThomas Gleixner * @offset: offset in bytes into the page (based on pfn) to begin the copy 439a163ed8SThomas Gleixner * @userbuf: if set, @buf is in user address space, use copy_to_user(), 449a163ed8SThomas Gleixner * otherwise @buf is in kernel address space, use memcpy(). 459a163ed8SThomas Gleixner * 469a163ed8SThomas Gleixner * Copy a page from "oldmem". For this page, there is no pte mapped 479a163ed8SThomas Gleixner * in the current kernel. We stitch up a pte, similar to kmap_atomic. 489a163ed8SThomas Gleixner * 499a163ed8SThomas Gleixner * Calling copy_to_user() in atomic context is not desirable. Hence first 509a163ed8SThomas Gleixner * copying the data to a pre-allocated kernel page and then copying to user 519a163ed8SThomas Gleixner * space in non-atomic context. 529a163ed8SThomas Gleixner */ 539a163ed8SThomas Gleixner ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 549a163ed8SThomas Gleixner size_t csize, unsigned long offset, int userbuf) 559a163ed8SThomas Gleixner { 569a163ed8SThomas Gleixner void *vaddr; 579a163ed8SThomas Gleixner 589a163ed8SThomas Gleixner if (!csize) 599a163ed8SThomas Gleixner return 0; 609a163ed8SThomas Gleixner 6172ed7de7SJiri Slaby if (!is_crashed_pfn_valid(pfn)) 6272ed7de7SJiri Slaby return -EFAULT; 6372ed7de7SJiri Slaby 649a163ed8SThomas Gleixner vaddr = kmap_atomic_pfn(pfn, KM_PTE0); 659a163ed8SThomas Gleixner 669a163ed8SThomas Gleixner if (!userbuf) { 679a163ed8SThomas Gleixner memcpy(buf, (vaddr + offset), csize); 689a163ed8SThomas Gleixner kunmap_atomic(vaddr, KM_PTE0); 699a163ed8SThomas Gleixner } else { 709a163ed8SThomas Gleixner if (!kdump_buf_page) { 719a163ed8SThomas Gleixner printk(KERN_WARNING "Kdump: Kdump buffer page not" 729a163ed8SThomas Gleixner " allocated\n"); 7322124c99SFernando Luis Vázquez Cao kunmap_atomic(vaddr, KM_PTE0); 749a163ed8SThomas Gleixner return -EFAULT; 759a163ed8SThomas Gleixner } 769a163ed8SThomas Gleixner copy_page(kdump_buf_page, vaddr); 779a163ed8SThomas Gleixner kunmap_atomic(vaddr, KM_PTE0); 789a163ed8SThomas Gleixner if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 799a163ed8SThomas Gleixner return -EFAULT; 809a163ed8SThomas Gleixner } 819a163ed8SThomas Gleixner 829a163ed8SThomas Gleixner return csize; 839a163ed8SThomas Gleixner } 849a163ed8SThomas Gleixner 859a163ed8SThomas Gleixner static int __init kdump_buf_page_init(void) 869a163ed8SThomas Gleixner { 879a163ed8SThomas Gleixner int ret = 0; 889a163ed8SThomas Gleixner 899a163ed8SThomas Gleixner kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 909a163ed8SThomas Gleixner if (!kdump_buf_page) { 919a163ed8SThomas Gleixner printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer" 929a163ed8SThomas Gleixner " page\n"); 939a163ed8SThomas Gleixner ret = -ENOMEM; 949a163ed8SThomas Gleixner } 959a163ed8SThomas Gleixner 969a163ed8SThomas Gleixner return ret; 979a163ed8SThomas Gleixner } 989a163ed8SThomas Gleixner arch_initcall(kdump_buf_page_init); 99