1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * machine_kexec.c for kexec
4 *
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
6 */
7 #include <linux/compiler.h>
8 #include <linux/cpu.h>
9 #include <linux/kexec.h>
10 #include <linux/crash_dump.h>
11 #include <linux/delay.h>
12 #include <linux/irq.h>
13 #include <linux/libfdt.h>
14 #include <linux/mm.h>
15 #include <linux/of_fdt.h>
16 #include <linux/reboot.h>
17 #include <linux/sched.h>
18 #include <linux/sched/task_stack.h>
19
20 #include <asm/bootinfo.h>
21 #include <asm/cacheflush.h>
22 #include <asm/page.h>
23
24 /* 0x100000 ~ 0x200000 is safe */
25 #define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL)
26 #define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL)
27
28 static unsigned long reboot_code_buffer;
29 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
30
31 #ifdef CONFIG_SMP
32 static void (*relocated_kexec_smp_wait)(void *);
33 atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
34 #endif
35
36 static unsigned long efi_boot;
37 static unsigned long cmdline_ptr;
38 static unsigned long systable_ptr;
39 static unsigned long start_addr;
40 static unsigned long first_ind_entry;
41
kexec_image_info(const struct kimage * kimage)42 static void kexec_image_info(const struct kimage *kimage)
43 {
44 unsigned long i;
45
46 pr_debug("kexec kimage info:\n");
47 pr_debug("\ttype: %d\n", kimage->type);
48 pr_debug("\tstart: %lx\n", kimage->start);
49 pr_debug("\thead: %lx\n", kimage->head);
50 pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
51
52 for (i = 0; i < kimage->nr_segments; i++) {
53 pr_debug("\t segment[%lu]: %016lx - %016lx", i,
54 kimage->segment[i].mem,
55 kimage->segment[i].mem + kimage->segment[i].memsz);
56 pr_debug("\t\t0x%lx bytes, %lu pages\n",
57 (unsigned long)kimage->segment[i].memsz,
58 (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
59 }
60 }
61
machine_kexec_prepare(struct kimage * kimage)62 int machine_kexec_prepare(struct kimage *kimage)
63 {
64 int i;
65 char *bootloader = "kexec";
66 void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
67
68 kexec_image_info(kimage);
69
70 kimage->arch.efi_boot = fw_arg0;
71 kimage->arch.systable_ptr = fw_arg2;
72
73 if (kimage->file_mode == 1) {
74 /*
75 * kimage->cmdline_buf will be released in kexec_file_load, so copy
76 * to the KEXEC_CMDLINE_ADDR safe area.
77 */
78 memcpy((void *)KEXEC_CMDLINE_ADDR, (void *)kimage->arch.cmdline_ptr,
79 strlen((char *)kimage->arch.cmdline_ptr) + 1);
80 kimage->arch.cmdline_ptr = (unsigned long)KEXEC_CMDLINE_ADDR;
81 } else {
82 /* Find the command line */
83 for (i = 0; i < kimage->nr_segments; i++) {
84 if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
85 if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
86 kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
87 break;
88 }
89 }
90
91 if (!kimage->arch.cmdline_ptr) {
92 pr_err("Command line not included in the provided image\n");
93 return -EINVAL;
94 }
95 }
96
97 /* kexec/kdump need a safe page to save reboot_code_buffer */
98 kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE);
99
100 reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page);
101 memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
102
103 #ifdef CONFIG_SMP
104 /* All secondary cpus now may jump to kexec_smp_wait cycle */
105 relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel);
106 #endif
107
108 return 0;
109 }
110
machine_kexec_cleanup(struct kimage * kimage)111 void machine_kexec_cleanup(struct kimage *kimage)
112 {
113 }
114
kexec_reboot(void)115 void kexec_reboot(void)
116 {
117 do_kexec_t do_kexec = NULL;
118
119 /*
120 * We know we were online, and there will be no incoming IPIs at
121 * this point. Mark online again before rebooting so that the crash
122 * analysis tool will see us correctly.
123 */
124 set_cpu_online(smp_processor_id(), true);
125
126 /* Ensure remote CPUs observe that we're online before rebooting. */
127 smp_mb__after_atomic();
128
129 /*
130 * Make sure we get correct instructions written by the
131 * machine_kexec_prepare() CPU.
132 */
133 __asm__ __volatile__ ("\tibar 0\n"::);
134
135 #ifdef CONFIG_SMP
136 /* All secondary cpus go to kexec_smp_wait */
137 if (smp_processor_id() > 0) {
138 relocated_kexec_smp_wait(NULL);
139 BUG();
140 }
141 #endif
142
143 do_kexec = (void *)reboot_code_buffer;
144 do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
145
146 BUG();
147 }
148
149
150 #ifdef CONFIG_SMP
kexec_shutdown_secondary(void * regs)151 static void kexec_shutdown_secondary(void *regs)
152 {
153 int cpu = smp_processor_id();
154
155 if (!cpu_online(cpu))
156 return;
157
158 /* We won't be sent IPIs any more. */
159 set_cpu_online(cpu, false);
160
161 local_irq_disable();
162 while (!atomic_read(&kexec_ready_to_reboot))
163 cpu_relax();
164
165 kexec_reboot();
166 }
167
crash_shutdown_secondary(void * passed_regs)168 static void crash_shutdown_secondary(void *passed_regs)
169 {
170 int cpu = smp_processor_id();
171 struct pt_regs *regs = passed_regs;
172
173 /*
174 * If we are passed registers, use those. Otherwise get the
175 * regs from the last interrupt, which should be correct, as
176 * we are in an interrupt. But if the regs are not there,
177 * pull them from the top of the stack. They are probably
178 * wrong, but we need something to keep from crashing again.
179 */
180 if (!regs)
181 regs = get_irq_regs();
182 if (!regs)
183 regs = task_pt_regs(current);
184
185 if (!cpu_online(cpu))
186 return;
187
188 /* We won't be sent IPIs any more. */
189 set_cpu_online(cpu, false);
190
191 local_irq_disable();
192 if (!cpumask_test_cpu(cpu, &cpus_in_crash))
193 crash_save_cpu(regs, cpu);
194 cpumask_set_cpu(cpu, &cpus_in_crash);
195
196 while (!atomic_read(&kexec_ready_to_reboot))
197 cpu_relax();
198
199 kexec_reboot();
200 }
201
crash_smp_send_stop(void)202 void crash_smp_send_stop(void)
203 {
204 unsigned int ncpus;
205 unsigned long timeout;
206 static int cpus_stopped;
207
208 /*
209 * This function can be called twice in panic path, but obviously
210 * we should execute this only once.
211 */
212 if (cpus_stopped)
213 return;
214
215 cpus_stopped = 1;
216
217 /* Excluding the panic cpu */
218 ncpus = num_online_cpus() - 1;
219
220 smp_call_function(crash_shutdown_secondary, NULL, 0);
221 smp_wmb();
222
223 /*
224 * The crash CPU sends an IPI and wait for other CPUs to
225 * respond. Delay of at least 10 seconds.
226 */
227 timeout = MSEC_PER_SEC * 10;
228 pr_emerg("Sending IPI to other cpus...\n");
229 while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) {
230 mdelay(1);
231 cpu_relax();
232 }
233 }
234 #endif /* defined(CONFIG_SMP) */
235
machine_shutdown(void)236 void machine_shutdown(void)
237 {
238 #ifdef CONFIG_SMP
239 int cpu;
240
241 /* All CPUs go to reboot_code_buffer */
242 for_each_possible_cpu(cpu)
243 if (!cpu_online(cpu))
244 cpu_device_up(get_cpu_device(cpu));
245
246 smp_call_function(kexec_shutdown_secondary, NULL, 0);
247 #endif
248 }
249
machine_crash_shutdown(struct pt_regs * regs)250 void machine_crash_shutdown(struct pt_regs *regs)
251 {
252 int crashing_cpu;
253
254 local_irq_disable();
255
256 crashing_cpu = smp_processor_id();
257 crash_save_cpu(regs, crashing_cpu);
258
259 #ifdef CONFIG_SMP
260 crash_smp_send_stop();
261 #endif
262 cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
263
264 pr_info("Starting crashdump kernel...\n");
265 }
266
machine_kexec(struct kimage * image)267 void machine_kexec(struct kimage *image)
268 {
269 unsigned long entry, *ptr;
270 struct kimage_arch *internal = &image->arch;
271
272 efi_boot = internal->efi_boot;
273 cmdline_ptr = internal->cmdline_ptr;
274 systable_ptr = internal->systable_ptr;
275
276 start_addr = (unsigned long)phys_to_virt(image->start);
277
278 first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ?
279 (unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0;
280
281 /*
282 * The generic kexec code builds a page list with physical
283 * addresses. they are directly accessible through XKPRANGE
284 * hence the phys_to_virt() call.
285 */
286 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
287 ptr = (entry & IND_INDIRECTION) ?
288 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
289 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
290 *ptr & IND_DESTINATION)
291 *ptr = (unsigned long) phys_to_virt(*ptr);
292 }
293
294 /* Mark offline before disabling local irq. */
295 set_cpu_online(smp_processor_id(), false);
296
297 /* We do not want to be bothered. */
298 local_irq_disable();
299
300 pr_notice("EFI boot flag: 0x%lx\n", efi_boot);
301 pr_notice("Command line addr: 0x%lx\n", cmdline_ptr);
302 pr_notice("Command line string: %s\n", (char *)cmdline_ptr);
303 pr_notice("System table addr: 0x%lx\n", systable_ptr);
304 pr_notice("We will call new kernel at 0x%lx\n", start_addr);
305 pr_notice("Bye ...\n");
306
307 /* Make reboot code buffer available to the boot CPU. */
308 flush_cache_all();
309
310 #ifdef CONFIG_SMP
311 atomic_set(&kexec_ready_to_reboot, 1);
312 #endif
313
314 kexec_reboot();
315 }
316