1 #ifndef _ASM_X86_KEXEC_H 2 #define _ASM_X86_KEXEC_H 3 4 #ifdef CONFIG_X86_32 5 # define PA_CONTROL_PAGE 0 6 # define VA_CONTROL_PAGE 1 7 # define PA_PGD 2 8 # define VA_PGD 3 9 # define PA_PTE_0 4 10 # define VA_PTE_0 5 11 # define PA_PTE_1 6 12 # define VA_PTE_1 7 13 # define PA_SWAP_PAGE 8 14 # ifdef CONFIG_X86_PAE 15 # define PA_PMD_0 9 16 # define VA_PMD_0 10 17 # define PA_PMD_1 11 18 # define VA_PMD_1 12 19 # define PAGES_NR 13 20 # else 21 # define PAGES_NR 9 22 # endif 23 #else 24 # define PA_CONTROL_PAGE 0 25 # define VA_CONTROL_PAGE 1 26 # define PA_PGD 2 27 # define VA_PGD 3 28 # define PA_PUD_0 4 29 # define VA_PUD_0 5 30 # define PA_PMD_0 6 31 # define VA_PMD_0 7 32 # define PA_PTE_0 8 33 # define VA_PTE_0 9 34 # define PA_PUD_1 10 35 # define VA_PUD_1 11 36 # define PA_PMD_1 12 37 # define VA_PMD_1 13 38 # define PA_PTE_1 14 39 # define VA_PTE_1 15 40 # define PA_TABLE_PAGE 16 41 # define PAGES_NR 17 42 #endif 43 44 #ifdef CONFIG_X86_32 45 # define KEXEC_CONTROL_CODE_MAX_SIZE 2048 46 #endif 47 48 #ifndef __ASSEMBLY__ 49 50 #include <linux/string.h> 51 52 #include <asm/page.h> 53 #include <asm/ptrace.h> 54 55 /* 56 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. 57 * I.e. Maximum page that is mapped directly into kernel memory, 58 * and kmap is not required. 59 * 60 * So far x86_64 is limited to 40 physical address bits. 61 */ 62 #ifdef CONFIG_X86_32 63 /* Maximum physical address we can use pages from */ 64 # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) 65 /* Maximum address we can reach in physical address mode */ 66 # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) 67 /* Maximum address we can use for the control code buffer */ 68 # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE 69 70 # define KEXEC_CONTROL_PAGE_SIZE 4096 71 72 /* The native architecture */ 73 # define KEXEC_ARCH KEXEC_ARCH_386 74 75 /* We can also handle crash dumps from 64 bit kernel. */ 76 # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) 77 #else 78 /* Maximum physical address we can use pages from */ 79 # define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL) 80 /* Maximum address we can reach in physical address mode */ 81 # define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL) 82 /* Maximum address we can use for the control pages */ 83 # define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) 84 85 /* Allocate one page for the pdp and the second for the code */ 86 # define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL) 87 88 /* The native architecture */ 89 # define KEXEC_ARCH KEXEC_ARCH_X86_64 90 #endif 91 92 /* 93 * CPU does not save ss and sp on stack if execution is already 94 * running in kernel mode at the time of NMI occurrence. This code 95 * fixes it. 96 */ 97 static inline void crash_fixup_ss_esp(struct pt_regs *newregs, 98 struct pt_regs *oldregs) 99 { 100 #ifdef CONFIG_X86_32 101 newregs->sp = (unsigned long)&(oldregs->sp); 102 asm volatile("xorl %%eax, %%eax\n\t" 103 "movw %%ss, %%ax\n\t" 104 :"=a"(newregs->ss)); 105 #endif 106 } 107 108 /* 109 * This function is responsible for capturing register states if coming 110 * via panic otherwise just fix up the ss and sp if coming via kernel 111 * mode exception. 112 */ 113 static inline void crash_setup_regs(struct pt_regs *newregs, 114 struct pt_regs *oldregs) 115 { 116 if (oldregs) { 117 memcpy(newregs, oldregs, sizeof(*newregs)); 118 crash_fixup_ss_esp(newregs, oldregs); 119 } else { 120 #ifdef CONFIG_X86_32 121 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx)); 122 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx)); 123 asm volatile("movl %%edx,%0" : "=m"(newregs->dx)); 124 asm volatile("movl %%esi,%0" : "=m"(newregs->si)); 125 asm volatile("movl %%edi,%0" : "=m"(newregs->di)); 126 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp)); 127 asm volatile("movl %%eax,%0" : "=m"(newregs->ax)); 128 asm volatile("movl %%esp,%0" : "=m"(newregs->sp)); 129 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); 130 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); 131 asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds)); 132 asm volatile("movl %%es, %%eax;" :"=a"(newregs->es)); 133 asm volatile("pushfl; popl %0" :"=m"(newregs->flags)); 134 #else 135 asm volatile("movq %%rbx,%0" : "=m"(newregs->bx)); 136 asm volatile("movq %%rcx,%0" : "=m"(newregs->cx)); 137 asm volatile("movq %%rdx,%0" : "=m"(newregs->dx)); 138 asm volatile("movq %%rsi,%0" : "=m"(newregs->si)); 139 asm volatile("movq %%rdi,%0" : "=m"(newregs->di)); 140 asm volatile("movq %%rbp,%0" : "=m"(newregs->bp)); 141 asm volatile("movq %%rax,%0" : "=m"(newregs->ax)); 142 asm volatile("movq %%rsp,%0" : "=m"(newregs->sp)); 143 asm volatile("movq %%r8,%0" : "=m"(newregs->r8)); 144 asm volatile("movq %%r9,%0" : "=m"(newregs->r9)); 145 asm volatile("movq %%r10,%0" : "=m"(newregs->r10)); 146 asm volatile("movq %%r11,%0" : "=m"(newregs->r11)); 147 asm volatile("movq %%r12,%0" : "=m"(newregs->r12)); 148 asm volatile("movq %%r13,%0" : "=m"(newregs->r13)); 149 asm volatile("movq %%r14,%0" : "=m"(newregs->r14)); 150 asm volatile("movq %%r15,%0" : "=m"(newregs->r15)); 151 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); 152 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); 153 asm volatile("pushfq; popq %0" :"=m"(newregs->flags)); 154 #endif 155 newregs->ip = (unsigned long)current_text_addr(); 156 } 157 } 158 159 #ifdef CONFIG_X86_32 160 asmlinkage unsigned long 161 relocate_kernel(unsigned long indirection_page, 162 unsigned long control_page, 163 unsigned long start_address, 164 unsigned int has_pae, 165 unsigned int preserve_context); 166 #else 167 NORET_TYPE void 168 relocate_kernel(unsigned long indirection_page, 169 unsigned long page_list, 170 unsigned long start_address) ATTRIB_NORET; 171 #endif 172 173 #endif /* __ASSEMBLY__ */ 174 175 #endif /* _ASM_X86_KEXEC_H */ 176