kexec.h (3eb66e91a25497065c5322b1268cbc3953642227) kexec.h (3c88c692c28746473791276f8b42d2c989d6cbe6)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_KEXEC_H
3#define _ASM_X86_KEXEC_H
4
5#ifdef CONFIG_X86_32
6# define PA_CONTROL_PAGE 0
7# define VA_CONTROL_PAGE 1
8# define PA_PGD 2

--- 57 unchanged lines hidden (view full) ---

66# define KEXEC_ARCH KEXEC_ARCH_X86_64
67#endif
68
69/* Memory to backup during crash kdump */
70#define KEXEC_BACKUP_SRC_START (0UL)
71#define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */
72
73/*
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_KEXEC_H
3#define _ASM_X86_KEXEC_H
4
5#ifdef CONFIG_X86_32
6# define PA_CONTROL_PAGE 0
7# define VA_CONTROL_PAGE 1
8# define PA_PGD 2

--- 57 unchanged lines hidden (view full) ---

66# define KEXEC_ARCH KEXEC_ARCH_X86_64
67#endif
68
69/* Memory to backup during crash kdump */
70#define KEXEC_BACKUP_SRC_START (0UL)
71#define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */
72
73/*
74 * CPU does not save ss and sp on stack if execution is already
75 * running in kernel mode at the time of NMI occurrence. This code
76 * fixes it.
77 */
78static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
79 struct pt_regs *oldregs)
80{
81#ifdef CONFIG_X86_32
82 newregs->sp = (unsigned long)&(oldregs->sp);
83 asm volatile("xorl %%eax, %%eax\n\t"
84 "movw %%ss, %%ax\n\t"
85 :"=a"(newregs->ss));
86#endif
87}
88
89/*
90 * This function is responsible for capturing register states if coming
91 * via panic otherwise just fix up the ss and sp if coming via kernel
92 * mode exception.
93 */
94static inline void crash_setup_regs(struct pt_regs *newregs,
95 struct pt_regs *oldregs)
96{
97 if (oldregs) {
98 memcpy(newregs, oldregs, sizeof(*newregs));
74 * This function is responsible for capturing register states if coming
75 * via panic otherwise just fix up the ss and sp if coming via kernel
76 * mode exception.
77 */
78static inline void crash_setup_regs(struct pt_regs *newregs,
79 struct pt_regs *oldregs)
80{
81 if (oldregs) {
82 memcpy(newregs, oldregs, sizeof(*newregs));
99 crash_fixup_ss_esp(newregs, oldregs);
100 } else {
101#ifdef CONFIG_X86_32
102 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
103 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
104 asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
105 asm volatile("movl %%esi,%0" : "=m"(newregs->si));
106 asm volatile("movl %%edi,%0" : "=m"(newregs->di));
107 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));

--- 122 unchanged lines hidden ---
83 } else {
84#ifdef CONFIG_X86_32
85 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
86 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
87 asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
88 asm volatile("movl %%esi,%0" : "=m"(newregs->si));
89 asm volatile("movl %%edi,%0" : "=m"(newregs->di));
90 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));

--- 122 unchanged lines hidden ---