19a163ed8SThomas Gleixner /* 2835c34a1SDave Jones * Memory preserving reboot related code. 39a163ed8SThomas Gleixner * 49a163ed8SThomas Gleixner * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 59a163ed8SThomas Gleixner * Copyright (C) IBM Corporation, 2004. All rights reserved 69a163ed8SThomas Gleixner */ 79a163ed8SThomas Gleixner 85a0e3ad6STejun Heo #include <linux/slab.h> 99a163ed8SThomas Gleixner #include <linux/errno.h> 109a163ed8SThomas Gleixner #include <linux/highmem.h> 119a163ed8SThomas Gleixner #include <linux/crash_dump.h> 129a163ed8SThomas Gleixner 139a163ed8SThomas Gleixner #include <asm/uaccess.h> 149a163ed8SThomas Gleixner 159a163ed8SThomas Gleixner static void *kdump_buf_page; 169a163ed8SThomas Gleixner 1772ed7de7SJiri Slaby static inline bool is_crashed_pfn_valid(unsigned long pfn) 1872ed7de7SJiri Slaby { 1972ed7de7SJiri Slaby #ifndef CONFIG_X86_PAE 2072ed7de7SJiri Slaby /* 2172ed7de7SJiri Slaby * non-PAE kdump kernel executed from a PAE one will crop high pte 2272ed7de7SJiri Slaby * bits and poke unwanted space counting again from address 0, we 2372ed7de7SJiri Slaby * don't want that. pte must fit into unsigned long. In fact the 2472ed7de7SJiri Slaby * test checks high 12 bits for being zero (pfn will be shifted left 2572ed7de7SJiri Slaby * by PAGE_SHIFT). 2672ed7de7SJiri Slaby */ 2772ed7de7SJiri Slaby return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn; 2872ed7de7SJiri Slaby #else 2972ed7de7SJiri Slaby return true; 3072ed7de7SJiri Slaby #endif 3172ed7de7SJiri Slaby } 3272ed7de7SJiri Slaby 339a163ed8SThomas Gleixner /** 349a163ed8SThomas Gleixner * copy_oldmem_page - copy one page from "oldmem" 359a163ed8SThomas Gleixner * @pfn: page frame number to be copied 369a163ed8SThomas Gleixner * @buf: target memory address for the copy; this can be in kernel address 379a163ed8SThomas Gleixner * space or user address space (see @userbuf) 389a163ed8SThomas Gleixner * @csize: number of bytes to copy 399a163ed8SThomas Gleixner * @offset: offset in bytes into the page (based on pfn) to begin the copy 409a163ed8SThomas Gleixner * @userbuf: if set, @buf is in user address space, use copy_to_user(), 419a163ed8SThomas Gleixner * otherwise @buf is in kernel address space, use memcpy(). 429a163ed8SThomas Gleixner * 439a163ed8SThomas Gleixner * Copy a page from "oldmem". For this page, there is no pte mapped 449a163ed8SThomas Gleixner * in the current kernel. We stitch up a pte, similar to kmap_atomic. 459a163ed8SThomas Gleixner * 469a163ed8SThomas Gleixner * Calling copy_to_user() in atomic context is not desirable. Hence first 479a163ed8SThomas Gleixner * copying the data to a pre-allocated kernel page and then copying to user 489a163ed8SThomas Gleixner * space in non-atomic context. 499a163ed8SThomas Gleixner */ 509a163ed8SThomas Gleixner ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 519a163ed8SThomas Gleixner size_t csize, unsigned long offset, int userbuf) 529a163ed8SThomas Gleixner { 539a163ed8SThomas Gleixner void *vaddr; 549a163ed8SThomas Gleixner 559a163ed8SThomas Gleixner if (!csize) 569a163ed8SThomas Gleixner return 0; 579a163ed8SThomas Gleixner 5872ed7de7SJiri Slaby if (!is_crashed_pfn_valid(pfn)) 5972ed7de7SJiri Slaby return -EFAULT; 6072ed7de7SJiri Slaby 613e4d3af5SPeter Zijlstra vaddr = kmap_atomic_pfn(pfn); 629a163ed8SThomas Gleixner 639a163ed8SThomas Gleixner if (!userbuf) { 649a163ed8SThomas Gleixner memcpy(buf, (vaddr + offset), csize); 65*8fd75e12SCong Wang kunmap_atomic(vaddr); 669a163ed8SThomas Gleixner } else { 679a163ed8SThomas Gleixner if (!kdump_buf_page) { 689a163ed8SThomas Gleixner printk(KERN_WARNING "Kdump: Kdump buffer page not" 699a163ed8SThomas Gleixner " allocated\n"); 70*8fd75e12SCong Wang kunmap_atomic(vaddr); 719a163ed8SThomas Gleixner return -EFAULT; 729a163ed8SThomas Gleixner } 739a163ed8SThomas Gleixner copy_page(kdump_buf_page, vaddr); 74*8fd75e12SCong Wang kunmap_atomic(vaddr); 759a163ed8SThomas Gleixner if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 769a163ed8SThomas Gleixner return -EFAULT; 779a163ed8SThomas Gleixner } 789a163ed8SThomas Gleixner 799a163ed8SThomas Gleixner return csize; 809a163ed8SThomas Gleixner } 819a163ed8SThomas Gleixner 829a163ed8SThomas Gleixner static int __init kdump_buf_page_init(void) 839a163ed8SThomas Gleixner { 849a163ed8SThomas Gleixner int ret = 0; 859a163ed8SThomas Gleixner 869a163ed8SThomas Gleixner kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 879a163ed8SThomas Gleixner if (!kdump_buf_page) { 889a163ed8SThomas Gleixner printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer" 899a163ed8SThomas Gleixner " page\n"); 909a163ed8SThomas Gleixner ret = -ENOMEM; 919a163ed8SThomas Gleixner } 929a163ed8SThomas Gleixner 939a163ed8SThomas Gleixner return ret; 949a163ed8SThomas Gleixner } 959a163ed8SThomas Gleixner arch_initcall(kdump_buf_page_init); 96