1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_KEXEC_H 3 #define _ASM_X86_KEXEC_H 4 5 #ifdef CONFIG_X86_32 6 # define PA_CONTROL_PAGE 0 7 # define VA_CONTROL_PAGE 1 8 # define PA_PGD 2 9 # define PA_SWAP_PAGE 3 10 # define PAGES_NR 4 11 #endif 12 13 # define KEXEC_CONTROL_PAGE_SIZE 4096 14 # define KEXEC_CONTROL_CODE_MAX_SIZE 2048 15 16 #ifndef __ASSEMBLY__ 17 18 #include <linux/string.h> 19 #include <linux/kernel.h> 20 21 #include <asm/page.h> 22 #include <asm/ptrace.h> 23 24 struct kimage; 25 26 /* 27 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. 28 * I.e. Maximum page that is mapped directly into kernel memory, 29 * and kmap is not required. 30 * 31 * So far x86_64 is limited to 40 physical address bits. 32 */ 33 #ifdef CONFIG_X86_32 34 /* Maximum physical address we can use pages from */ 35 # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) 36 /* Maximum address we can reach in physical address mode */ 37 # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) 38 /* Maximum address we can use for the control code buffer */ 39 # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE 40 41 42 /* The native architecture */ 43 # define KEXEC_ARCH KEXEC_ARCH_386 44 45 /* We can also handle crash dumps from 64 bit kernel. */ 46 # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) 47 #else 48 /* Maximum physical address we can use pages from */ 49 # define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1) 50 /* Maximum address we can reach in physical address mode */ 51 # define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1) 52 /* Maximum address we can use for the control pages */ 53 # define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1) 54 55 /* The native architecture */ 56 # define KEXEC_ARCH KEXEC_ARCH_X86_64 57 58 extern unsigned long kexec_va_control_page; 59 extern unsigned long kexec_pa_table_page; 60 extern unsigned long kexec_pa_swap_page; 61 #endif 62 63 /* 64 * This function is responsible for capturing register states if coming 65 * via panic otherwise just fix up the ss and sp if coming via kernel 66 * mode exception. 67 */ 68 static inline void crash_setup_regs(struct pt_regs *newregs, 69 struct pt_regs *oldregs) 70 { 71 if (oldregs) { 72 memcpy(newregs, oldregs, sizeof(*newregs)); 73 } else { 74 #ifdef CONFIG_X86_32 75 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx)); 76 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx)); 77 asm volatile("movl %%edx,%0" : "=m"(newregs->dx)); 78 asm volatile("movl %%esi,%0" : "=m"(newregs->si)); 79 asm volatile("movl %%edi,%0" : "=m"(newregs->di)); 80 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp)); 81 asm volatile("movl %%eax,%0" : "=m"(newregs->ax)); 82 asm volatile("movl %%esp,%0" : "=m"(newregs->sp)); 83 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); 84 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); 85 asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds)); 86 asm volatile("movl %%es, %%eax;" :"=a"(newregs->es)); 87 asm volatile("pushfl; popl %0" :"=m"(newregs->flags)); 88 #else 89 asm volatile("movq %%rbx,%0" : "=m"(newregs->bx)); 90 asm volatile("movq %%rcx,%0" : "=m"(newregs->cx)); 91 asm volatile("movq %%rdx,%0" : "=m"(newregs->dx)); 92 asm volatile("movq %%rsi,%0" : "=m"(newregs->si)); 93 asm volatile("movq %%rdi,%0" : "=m"(newregs->di)); 94 asm volatile("movq %%rbp,%0" : "=m"(newregs->bp)); 95 asm volatile("movq %%rax,%0" : "=m"(newregs->ax)); 96 asm volatile("movq %%rsp,%0" : "=m"(newregs->sp)); 97 asm volatile("movq %%r8,%0" : "=m"(newregs->r8)); 98 asm volatile("movq %%r9,%0" : "=m"(newregs->r9)); 99 asm volatile("movq %%r10,%0" : "=m"(newregs->r10)); 100 asm volatile("movq %%r11,%0" : "=m"(newregs->r11)); 101 asm volatile("movq %%r12,%0" : "=m"(newregs->r12)); 102 asm volatile("movq %%r13,%0" : "=m"(newregs->r13)); 103 asm volatile("movq %%r14,%0" : "=m"(newregs->r14)); 104 asm volatile("movq %%r15,%0" : "=m"(newregs->r15)); 105 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); 106 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); 107 asm volatile("pushfq; popq %0" :"=m"(newregs->flags)); 108 #endif 109 newregs->ip = _THIS_IP_; 110 } 111 } 112 113 #ifdef CONFIG_X86_32 114 typedef asmlinkage unsigned long 115 relocate_kernel_fn(unsigned long indirection_page, 116 unsigned long control_page, 117 unsigned long start_address, 118 unsigned int has_pae, 119 unsigned int preserve_context); 120 #else 121 typedef unsigned long 122 relocate_kernel_fn(unsigned long indirection_page, 123 unsigned long pa_control_page, 124 unsigned long start_address, 125 unsigned int preserve_context, 126 unsigned int host_mem_enc_active); 127 #endif 128 extern relocate_kernel_fn relocate_kernel; 129 #define ARCH_HAS_KIMAGE_ARCH 130 131 #ifdef CONFIG_X86_32 132 struct kimage_arch { 133 pgd_t *pgd; 134 #ifdef CONFIG_X86_PAE 135 pmd_t *pmd0; 136 pmd_t *pmd1; 137 #endif 138 pte_t *pte0; 139 pte_t *pte1; 140 }; 141 #else 142 struct kimage_arch { 143 /* 144 * This is a kimage control page, as it must not overlap with either 145 * source or destination address ranges. 146 */ 147 pgd_t *pgd; 148 /* 149 * The virtual mapping of the control code page itself is used only 150 * during the transition, while the current kernel's pages are all 151 * in place. Thus the intermediate page table pages used to map it 152 * are not control pages, but instead just normal pages obtained 153 * with get_zeroed_page(). And have to be tracked (below) so that 154 * they can be freed. 155 */ 156 p4d_t *p4d; 157 pud_t *pud; 158 pmd_t *pmd; 159 pte_t *pte; 160 }; 161 #endif /* CONFIG_X86_32 */ 162 163 #ifdef CONFIG_X86_64 164 /* 165 * Number of elements and order of elements in this structure should match 166 * with the ones in arch/x86/purgatory/entry64.S. If you make a change here 167 * make an appropriate change in purgatory too. 168 */ 169 struct kexec_entry64_regs { 170 uint64_t rax; 171 uint64_t rcx; 172 uint64_t rdx; 173 uint64_t rbx; 174 uint64_t rsp; 175 uint64_t rbp; 176 uint64_t rsi; 177 uint64_t rdi; 178 uint64_t r8; 179 uint64_t r9; 180 uint64_t r10; 181 uint64_t r11; 182 uint64_t r12; 183 uint64_t r13; 184 uint64_t r14; 185 uint64_t r15; 186 uint64_t rip; 187 }; 188 189 extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, 190 gfp_t gfp); 191 #define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages 192 193 extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); 194 #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages 195 196 void arch_kexec_protect_crashkres(void); 197 #define arch_kexec_protect_crashkres arch_kexec_protect_crashkres 198 199 void arch_kexec_unprotect_crashkres(void); 200 #define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres 201 202 #ifdef CONFIG_KEXEC_FILE 203 struct purgatory_info; 204 int arch_kexec_apply_relocations_add(struct purgatory_info *pi, 205 Elf_Shdr *section, 206 const Elf_Shdr *relsec, 207 const Elf_Shdr *symtab); 208 #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add 209 210 int arch_kimage_file_post_load_cleanup(struct kimage *image); 211 #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup 212 #endif 213 #endif 214 215 extern void kdump_nmi_shootdown_cpus(void); 216 217 #ifdef CONFIG_CRASH_HOTPLUG 218 void arch_crash_handle_hotplug_event(struct kimage *image, void *arg); 219 #define arch_crash_handle_hotplug_event arch_crash_handle_hotplug_event 220 221 int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags); 222 #define arch_crash_hotplug_support arch_crash_hotplug_support 223 224 unsigned int arch_crash_get_elfcorehdr_size(void); 225 #define crash_get_elfcorehdr_size arch_crash_get_elfcorehdr_size 226 #endif 227 228 #endif /* __ASSEMBLY__ */ 229 230 #endif /* _ASM_X86_KEXEC_H */ 231