1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * machine_kexec.c for kexec
4 *
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
6 */
7 #include <linux/compiler.h>
8 #include <linux/cpu.h>
9 #include <linux/kexec.h>
10 #include <linux/crash_dump.h>
11 #include <linux/delay.h>
12 #include <linux/irq.h>
13 #include <linux/libfdt.h>
14 #include <linux/mm.h>
15 #include <linux/of_fdt.h>
16 #include <linux/reboot.h>
17 #include <linux/sched.h>
18 #include <linux/sched/task_stack.h>
19
20 #include <asm/bootinfo.h>
21 #include <asm/cacheflush.h>
22 #include <asm/page.h>
23
24 /* 0x100000 ~ 0x200000 is safe */
25 #define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL)
26 #define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL)
27
28 static unsigned long reboot_code_buffer;
29 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
30
31 #ifdef CONFIG_SMP
32 static void (*relocated_kexec_smp_wait)(void *);
33 atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
34 #endif
35
36 static unsigned long efi_boot;
37 static unsigned long cmdline_ptr;
38 static unsigned long systable_ptr;
39 static unsigned long start_addr;
40 static unsigned long first_ind_entry;
41
machine_kexec_prepare(struct kimage * kimage)42 int machine_kexec_prepare(struct kimage *kimage)
43 {
44 int i;
45 char *bootloader = "kexec";
46 void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
47
48 kimage->arch.efi_boot = fw_arg0;
49 kimage->arch.systable_ptr = fw_arg2;
50
51 if (kimage->file_mode == 1) {
52 /*
53 * kimage->cmdline_buf will be released in kexec_file_load, so copy
54 * to the KEXEC_CMDLINE_ADDR safe area.
55 */
56 memcpy((void *)KEXEC_CMDLINE_ADDR, (void *)kimage->arch.cmdline_ptr,
57 strlen((char *)kimage->arch.cmdline_ptr) + 1);
58 kimage->arch.cmdline_ptr = (unsigned long)KEXEC_CMDLINE_ADDR;
59 } else {
60 /* Find the command line */
61 for (i = 0; i < kimage->nr_segments; i++) {
62 if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
63 if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
64 kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
65 break;
66 }
67 }
68
69 if (!kimage->arch.cmdline_ptr) {
70 pr_err("Command line not included in the provided image\n");
71 return -EINVAL;
72 }
73 }
74
75 /* kexec/kdump need a safe page to save reboot_code_buffer */
76 kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE);
77
78 reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page);
79 memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
80
81 #ifdef CONFIG_SMP
82 /* All secondary cpus now may jump to kexec_smp_wait cycle */
83 relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel);
84 #endif
85
86 return 0;
87 }
88
machine_kexec_cleanup(struct kimage * kimage)89 void machine_kexec_cleanup(struct kimage *kimage)
90 {
91 }
92
kexec_reboot(void)93 void kexec_reboot(void)
94 {
95 do_kexec_t do_kexec = NULL;
96
97 /*
98 * We know we were online, and there will be no incoming IPIs at
99 * this point. Mark online again before rebooting so that the crash
100 * analysis tool will see us correctly.
101 */
102 set_cpu_online(smp_processor_id(), true);
103
104 /* Ensure remote CPUs observe that we're online before rebooting. */
105 smp_mb__after_atomic();
106
107 /*
108 * Make sure we get correct instructions written by the
109 * machine_kexec_prepare() CPU.
110 */
111 __asm__ __volatile__ ("\tibar 0\n"::);
112
113 #ifdef CONFIG_SMP
114 /* All secondary cpus go to kexec_smp_wait */
115 if (smp_processor_id() > 0) {
116 relocated_kexec_smp_wait(NULL);
117 BUG();
118 }
119 #endif
120
121 do_kexec = (void *)reboot_code_buffer;
122 do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
123
124 BUG();
125 }
126
127
128 #ifdef CONFIG_SMP
kexec_shutdown_secondary(void * regs)129 static void kexec_shutdown_secondary(void *regs)
130 {
131 int cpu = smp_processor_id();
132
133 if (!cpu_online(cpu))
134 return;
135
136 /* We won't be sent IPIs any more. */
137 set_cpu_online(cpu, false);
138
139 local_irq_disable();
140 while (!atomic_read(&kexec_ready_to_reboot))
141 cpu_relax();
142
143 kexec_reboot();
144 }
145
crash_shutdown_secondary(void * passed_regs)146 static void crash_shutdown_secondary(void *passed_regs)
147 {
148 int cpu = smp_processor_id();
149 struct pt_regs *regs = passed_regs;
150
151 /*
152 * If we are passed registers, use those. Otherwise get the
153 * regs from the last interrupt, which should be correct, as
154 * we are in an interrupt. But if the regs are not there,
155 * pull them from the top of the stack. They are probably
156 * wrong, but we need something to keep from crashing again.
157 */
158 if (!regs)
159 regs = get_irq_regs();
160 if (!regs)
161 regs = task_pt_regs(current);
162
163 if (!cpu_online(cpu))
164 return;
165
166 /* We won't be sent IPIs any more. */
167 set_cpu_online(cpu, false);
168
169 local_irq_disable();
170 if (!cpumask_test_cpu(cpu, &cpus_in_crash))
171 crash_save_cpu(regs, cpu);
172 cpumask_set_cpu(cpu, &cpus_in_crash);
173
174 while (!atomic_read(&kexec_ready_to_reboot))
175 cpu_relax();
176
177 kexec_reboot();
178 }
179
crash_smp_send_stop(void)180 void crash_smp_send_stop(void)
181 {
182 unsigned int ncpus;
183 unsigned long timeout;
184 static int cpus_stopped;
185
186 /*
187 * This function can be called twice in panic path, but obviously
188 * we should execute this only once.
189 */
190 if (cpus_stopped)
191 return;
192
193 cpus_stopped = 1;
194
195 /* Excluding the panic cpu */
196 ncpus = num_online_cpus() - 1;
197
198 smp_call_function(crash_shutdown_secondary, NULL, 0);
199 smp_wmb();
200
201 /*
202 * The crash CPU sends an IPI and wait for other CPUs to
203 * respond. Delay of at least 10 seconds.
204 */
205 timeout = MSEC_PER_SEC * 10;
206 pr_emerg("Sending IPI to other cpus...\n");
207 while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) {
208 mdelay(1);
209 cpu_relax();
210 }
211 }
212 #endif /* defined(CONFIG_SMP) */
213
machine_shutdown(void)214 void machine_shutdown(void)
215 {
216 #ifdef CONFIG_SMP
217 int cpu;
218
219 /* All CPUs go to reboot_code_buffer */
220 for_each_possible_cpu(cpu)
221 if (!cpu_online(cpu))
222 cpu_device_up(get_cpu_device(cpu));
223
224 smp_call_function(kexec_shutdown_secondary, NULL, 0);
225 #endif
226 }
227
machine_crash_shutdown(struct pt_regs * regs)228 void machine_crash_shutdown(struct pt_regs *regs)
229 {
230 int crashing_cpu;
231
232 local_irq_disable();
233
234 crashing_cpu = smp_processor_id();
235 crash_save_cpu(regs, crashing_cpu);
236
237 #ifdef CONFIG_SMP
238 crash_smp_send_stop();
239 #endif
240 machine_kexec_mask_interrupts();
241 cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
242
243 pr_info("Starting crashdump kernel...\n");
244 }
245
machine_kexec(struct kimage * image)246 void machine_kexec(struct kimage *image)
247 {
248 unsigned long entry, *ptr;
249 struct kimage_arch *internal = &image->arch;
250
251 efi_boot = internal->efi_boot;
252 cmdline_ptr = internal->cmdline_ptr;
253 systable_ptr = internal->systable_ptr;
254
255 start_addr = (unsigned long)phys_to_virt(image->start);
256
257 first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ?
258 (unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0;
259
260 /*
261 * The generic kexec code builds a page list with physical
262 * addresses. they are directly accessible through XKPRANGE
263 * hence the phys_to_virt() call.
264 */
265 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
266 ptr = (entry & IND_INDIRECTION) ?
267 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
268 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
269 *ptr & IND_DESTINATION)
270 *ptr = (unsigned long) phys_to_virt(*ptr);
271 }
272
273 /* Mark offline before disabling local irq. */
274 set_cpu_online(smp_processor_id(), false);
275
276 /* We do not want to be bothered. */
277 local_irq_disable();
278 machine_kexec_mask_interrupts();
279
280 pr_notice("EFI boot flag: 0x%lx\n", efi_boot);
281 pr_notice("Command line addr: 0x%lx\n", cmdline_ptr);
282 pr_notice("Command line string: %s\n", (char *)cmdline_ptr);
283 pr_notice("System table addr: 0x%lx\n", systable_ptr);
284 pr_notice("We will call new kernel at 0x%lx\n", start_addr);
285 pr_notice("Bye ...\n");
286
287 /* Make reboot code buffer available to the boot CPU. */
288 flush_cache_all();
289
290 #ifdef CONFIG_SMP
291 atomic_set(&kexec_ready_to_reboot, 1);
292 #endif
293
294 kexec_reboot();
295 }
296