1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 29a163ed8SThomas Gleixner /* 3835c34a1SDave Jones * Memory preserving reboot related code. 49a163ed8SThomas Gleixner * 59a163ed8SThomas Gleixner * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 69a163ed8SThomas Gleixner * Copyright (C) IBM Corporation, 2004. All rights reserved 79a163ed8SThomas Gleixner */ 89a163ed8SThomas Gleixner 95a0e3ad6STejun Heo #include <linux/slab.h> 109a163ed8SThomas Gleixner #include <linux/errno.h> 119a163ed8SThomas Gleixner #include <linux/highmem.h> 129a163ed8SThomas Gleixner #include <linux/crash_dump.h> 139a163ed8SThomas Gleixner 147c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 159a163ed8SThomas Gleixner 169a163ed8SThomas Gleixner static void *kdump_buf_page; 179a163ed8SThomas Gleixner 1872ed7de7SJiri Slaby static inline bool is_crashed_pfn_valid(unsigned long pfn) 1972ed7de7SJiri Slaby { 2072ed7de7SJiri Slaby #ifndef CONFIG_X86_PAE 2172ed7de7SJiri Slaby /* 2272ed7de7SJiri Slaby * non-PAE kdump kernel executed from a PAE one will crop high pte 2372ed7de7SJiri Slaby * bits and poke unwanted space counting again from address 0, we 2472ed7de7SJiri Slaby * don't want that. pte must fit into unsigned long. In fact the 2572ed7de7SJiri Slaby * test checks high 12 bits for being zero (pfn will be shifted left 2672ed7de7SJiri Slaby * by PAGE_SHIFT). 2772ed7de7SJiri Slaby */ 2872ed7de7SJiri Slaby return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn; 2972ed7de7SJiri Slaby #else 3072ed7de7SJiri Slaby return true; 3172ed7de7SJiri Slaby #endif 3272ed7de7SJiri Slaby } 3372ed7de7SJiri Slaby 349a163ed8SThomas Gleixner /** 359a163ed8SThomas Gleixner * copy_oldmem_page - copy one page from "oldmem" 369a163ed8SThomas Gleixner * @pfn: page frame number to be copied 379a163ed8SThomas Gleixner * @buf: target memory address for the copy; this can be in kernel address 389a163ed8SThomas Gleixner * space or user address space (see @userbuf) 399a163ed8SThomas Gleixner * @csize: number of bytes to copy 409a163ed8SThomas Gleixner * @offset: offset in bytes into the page (based on pfn) to begin the copy 419a163ed8SThomas Gleixner * @userbuf: if set, @buf is in user address space, use copy_to_user(), 429a163ed8SThomas Gleixner * otherwise @buf is in kernel address space, use memcpy(). 439a163ed8SThomas Gleixner * 449a163ed8SThomas Gleixner * Copy a page from "oldmem". For this page, there is no pte mapped 459a163ed8SThomas Gleixner * in the current kernel. We stitch up a pte, similar to kmap_atomic. 469a163ed8SThomas Gleixner * 479a163ed8SThomas Gleixner * Calling copy_to_user() in atomic context is not desirable. Hence first 489a163ed8SThomas Gleixner * copying the data to a pre-allocated kernel page and then copying to user 499a163ed8SThomas Gleixner * space in non-atomic context. 509a163ed8SThomas Gleixner */ 519a163ed8SThomas Gleixner ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 529a163ed8SThomas Gleixner size_t csize, unsigned long offset, int userbuf) 539a163ed8SThomas Gleixner { 549a163ed8SThomas Gleixner void *vaddr; 559a163ed8SThomas Gleixner 569a163ed8SThomas Gleixner if (!csize) 579a163ed8SThomas Gleixner return 0; 589a163ed8SThomas Gleixner 5972ed7de7SJiri Slaby if (!is_crashed_pfn_valid(pfn)) 6072ed7de7SJiri Slaby return -EFAULT; 6172ed7de7SJiri Slaby 623e4d3af5SPeter Zijlstra vaddr = kmap_atomic_pfn(pfn); 639a163ed8SThomas Gleixner 649a163ed8SThomas Gleixner if (!userbuf) { 659a163ed8SThomas Gleixner memcpy(buf, (vaddr + offset), csize); 668fd75e12SCong Wang kunmap_atomic(vaddr); 679a163ed8SThomas Gleixner } else { 689a163ed8SThomas Gleixner if (!kdump_buf_page) { 699a163ed8SThomas Gleixner printk(KERN_WARNING "Kdump: Kdump buffer page not" 709a163ed8SThomas Gleixner " allocated\n"); 718fd75e12SCong Wang kunmap_atomic(vaddr); 729a163ed8SThomas Gleixner return -EFAULT; 739a163ed8SThomas Gleixner } 749a163ed8SThomas Gleixner copy_page(kdump_buf_page, vaddr); 758fd75e12SCong Wang kunmap_atomic(vaddr); 769a163ed8SThomas Gleixner if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 779a163ed8SThomas Gleixner return -EFAULT; 789a163ed8SThomas Gleixner } 799a163ed8SThomas Gleixner 809a163ed8SThomas Gleixner return csize; 819a163ed8SThomas Gleixner } 829a163ed8SThomas Gleixner 839a163ed8SThomas Gleixner static int __init kdump_buf_page_init(void) 849a163ed8SThomas Gleixner { 859a163ed8SThomas Gleixner int ret = 0; 869a163ed8SThomas Gleixner 879a163ed8SThomas Gleixner kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 889a163ed8SThomas Gleixner if (!kdump_buf_page) { 899a163ed8SThomas Gleixner printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer" 909a163ed8SThomas Gleixner " page\n"); 919a163ed8SThomas Gleixner ret = -ENOMEM; 929a163ed8SThomas Gleixner } 939a163ed8SThomas Gleixner 949a163ed8SThomas Gleixner return ret; 959a163ed8SThomas Gleixner } 969a163ed8SThomas Gleixner arch_initcall(kdump_buf_page_init); 97