xref: /linux/arch/arm64/include/asm/kexec.h (revision 6a4aee277740d04ac0fd54cfa17cc28261932ddc)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * kexec for arm64
4  *
5  * Copyright (C) Linaro.
6  * Copyright (C) Huawei Futurewei Technologies.
7  */
8 
9 #ifndef _ARM64_KEXEC_H
10 #define _ARM64_KEXEC_H
11 
12 /* Maximum physical address we can use pages from */
13 
14 #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
15 
16 /* Maximum address we can reach in physical address mode */
17 
18 #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
19 
20 /* Maximum address we can use for the control code buffer */
21 
22 #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
23 
24 #define KEXEC_CONTROL_PAGE_SIZE 4096
25 
26 #define KEXEC_ARCH KEXEC_ARCH_AARCH64
27 
28 #ifndef __ASSEMBLY__
29 
30 /**
31  * crash_setup_regs() - save registers for the panic kernel
32  *
33  * @newregs: registers are saved here
34  * @oldregs: registers to be saved (may be %NULL)
35  */
36 
37 static inline void crash_setup_regs(struct pt_regs *newregs,
38 				    struct pt_regs *oldregs)
39 {
40 	if (oldregs) {
41 		memcpy(newregs, oldregs, sizeof(*newregs));
42 	} else {
43 		u64 tmp1, tmp2;
44 
45 		__asm__ __volatile__ (
46 			"stp	 x0,   x1, [%2, #16 *  0]\n"
47 			"stp	 x2,   x3, [%2, #16 *  1]\n"
48 			"stp	 x4,   x5, [%2, #16 *  2]\n"
49 			"stp	 x6,   x7, [%2, #16 *  3]\n"
50 			"stp	 x8,   x9, [%2, #16 *  4]\n"
51 			"stp	x10,  x11, [%2, #16 *  5]\n"
52 			"stp	x12,  x13, [%2, #16 *  6]\n"
53 			"stp	x14,  x15, [%2, #16 *  7]\n"
54 			"stp	x16,  x17, [%2, #16 *  8]\n"
55 			"stp	x18,  x19, [%2, #16 *  9]\n"
56 			"stp	x20,  x21, [%2, #16 * 10]\n"
57 			"stp	x22,  x23, [%2, #16 * 11]\n"
58 			"stp	x24,  x25, [%2, #16 * 12]\n"
59 			"stp	x26,  x27, [%2, #16 * 13]\n"
60 			"stp	x28,  x29, [%2, #16 * 14]\n"
61 			"mov	 %0,  sp\n"
62 			"stp	x30,  %0,  [%2, #16 * 15]\n"
63 
64 			"/* faked current PSTATE */\n"
65 			"mrs	 %0, CurrentEL\n"
66 			"mrs	 %1, SPSEL\n"
67 			"orr	 %0, %0, %1\n"
68 			"mrs	 %1, DAIF\n"
69 			"orr	 %0, %0, %1\n"
70 			"mrs	 %1, NZCV\n"
71 			"orr	 %0, %0, %1\n"
72 			/* pc */
73 			"adr	 %1, 1f\n"
74 		"1:\n"
75 			"stp	 %1, %0,   [%2, #16 * 16]\n"
76 			: "=&r" (tmp1), "=&r" (tmp2)
77 			: "r" (newregs)
78 			: "memory"
79 		);
80 	}
81 }
82 
83 #if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_HIBERNATION)
84 extern bool crash_is_nosave(unsigned long pfn);
85 extern void crash_prepare_suspend(void);
86 extern void crash_post_resume(void);
87 
88 void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
89 #define crash_free_reserved_phys_range crash_free_reserved_phys_range
90 #else
91 static inline bool crash_is_nosave(unsigned long pfn) {return false; }
92 static inline void crash_prepare_suspend(void) {}
93 static inline void crash_post_resume(void) {}
94 #endif
95 
96 struct kimage;
97 
98 #if defined(CONFIG_KEXEC_CORE)
99 void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
100 		      unsigned long arg0, unsigned long arg1,
101 		      unsigned long arg2);
102 
103 int machine_kexec_post_load(struct kimage *image);
104 #define machine_kexec_post_load machine_kexec_post_load
105 #endif
106 
107 #define ARCH_HAS_KIMAGE_ARCH
108 
109 struct kimage_arch {
110 	void *dtb;
111 	phys_addr_t dtb_mem;
112 	phys_addr_t kern_reloc;
113 	phys_addr_t el2_vectors;
114 	phys_addr_t ttbr0;
115 	phys_addr_t ttbr1;
116 	phys_addr_t zero_page;
117 	unsigned long phys_offset;
118 	unsigned long t0sz;
119 };
120 
121 #ifdef CONFIG_KEXEC_FILE
122 extern const struct kexec_file_ops kexec_image_ops;
123 
124 int arch_kimage_file_post_load_cleanup(struct kimage *image);
125 #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
126 
127 extern int load_other_segments(struct kimage *image,
128 		unsigned long kernel_load_addr, unsigned long kernel_size,
129 		char *initrd, unsigned long initrd_len,
130 		char *cmdline);
131 #endif
132 
133 #endif /* __ASSEMBLY__ */
134 
135 #endif
136