xref: /linux/arch/mips/kernel/machine_kexec.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * machine_kexec.c for kexec
4  * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
5  */
6 #include <linux/compiler.h>
7 #include <linux/kexec.h>
8 #include <linux/mm.h>
9 #include <linux/delay.h>
10 #include <linux/libfdt.h>
11 
12 #include <asm/cacheflush.h>
13 #include <asm/page.h>
14 
15 extern const unsigned char relocate_new_kernel[];
16 extern const size_t relocate_new_kernel_size;
17 
18 extern unsigned long kexec_start_address;
19 extern unsigned long kexec_indirection_page;
20 
21 static unsigned long reboot_code_buffer;
22 
23 #ifdef CONFIG_SMP
24 static void (*relocated_kexec_smp_wait)(void *);
25 
26 atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
27 void (*_crash_smp_send_stop)(void) = NULL;
28 #endif
29 
30 void (*_machine_kexec_shutdown)(void) = NULL;
31 void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
32 
33 static void kexec_image_info(const struct kimage *kimage)
34 {
35 	unsigned long i;
36 
37 	pr_debug("kexec kimage info:\n");
38 	pr_debug("  type:        %d\n", kimage->type);
39 	pr_debug("  start:       %lx\n", kimage->start);
40 	pr_debug("  head:        %lx\n", kimage->head);
41 	pr_debug("  nr_segments: %lu\n", kimage->nr_segments);
42 
43 	for (i = 0; i < kimage->nr_segments; i++) {
44 		pr_debug("    segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
45 			i,
46 			kimage->segment[i].mem,
47 			kimage->segment[i].mem + kimage->segment[i].memsz,
48 			(unsigned long)kimage->segment[i].memsz,
49 			(unsigned long)kimage->segment[i].memsz /  PAGE_SIZE);
50 	}
51 }
52 
53 #ifdef CONFIG_UHI_BOOT
54 
55 static int uhi_machine_kexec_prepare(struct kimage *kimage)
56 {
57 	int i;
58 
59 	/*
60 	 * In case DTB file is not passed to the new kernel, a flat device
61 	 * tree will be created by kexec tool. It holds modified command
62 	 * line for the new kernel.
63 	 */
64 	for (i = 0; i < kimage->nr_segments; i++) {
65 		struct fdt_header fdt;
66 
67 		if (kimage->segment[i].memsz <= sizeof(fdt))
68 			continue;
69 
70 		if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
71 			continue;
72 
73 		if (fdt_check_header(&fdt))
74 			continue;
75 
76 		kexec_args[0] = -2;
77 		kexec_args[1] = (unsigned long)
78 			phys_to_virt((unsigned long)kimage->segment[i].mem);
79 		break;
80 	}
81 
82 	return 0;
83 }
84 
85 int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
86 
87 #else
88 
89 int (*_machine_kexec_prepare)(struct kimage *) = NULL;
90 
91 #endif /* CONFIG_UHI_BOOT */
92 
93 int
94 machine_kexec_prepare(struct kimage *kimage)
95 {
96 #ifdef CONFIG_SMP
97 	if (!kexec_nonboot_cpu_func())
98 		return -EINVAL;
99 #endif
100 
101 	kexec_image_info(kimage);
102 
103 	if (_machine_kexec_prepare)
104 		return _machine_kexec_prepare(kimage);
105 
106 	return 0;
107 }
108 
109 void
110 machine_kexec_cleanup(struct kimage *kimage)
111 {
112 }
113 
114 #ifdef CONFIG_SMP
115 static void kexec_shutdown_secondary(void *param)
116 {
117 	int cpu = smp_processor_id();
118 
119 	if (!cpu_online(cpu))
120 		return;
121 
122 	/* We won't be sent IPIs any more. */
123 	set_cpu_online(cpu, false);
124 
125 	local_irq_disable();
126 	while (!atomic_read(&kexec_ready_to_reboot))
127 		cpu_relax();
128 
129 	kexec_reboot();
130 
131 	/* NOTREACHED */
132 }
133 #endif
134 
135 void
136 machine_shutdown(void)
137 {
138 	if (_machine_kexec_shutdown)
139 		_machine_kexec_shutdown();
140 
141 #ifdef CONFIG_SMP
142 	smp_call_function(kexec_shutdown_secondary, NULL, 0);
143 
144 	while (num_online_cpus() > 1) {
145 		cpu_relax();
146 		mdelay(1);
147 	}
148 #endif
149 }
150 
151 void
152 machine_crash_shutdown(struct pt_regs *regs)
153 {
154 	if (_machine_crash_shutdown)
155 		_machine_crash_shutdown(regs);
156 	else
157 		default_machine_crash_shutdown(regs);
158 }
159 
160 #ifdef CONFIG_SMP
161 void kexec_nonboot_cpu_jump(void)
162 {
163 	local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
164 				 reboot_code_buffer + relocate_new_kernel_size);
165 
166 	relocated_kexec_smp_wait(NULL);
167 }
168 #endif
169 
170 void kexec_reboot(void)
171 {
172 	void (*do_kexec)(void) __noreturn;
173 
174 	/*
175 	 * We know we were online, and there will be no incoming IPIs at
176 	 * this point. Mark online again before rebooting so that the crash
177 	 * analysis tool will see us correctly.
178 	 */
179 	set_cpu_online(smp_processor_id(), true);
180 
181 	/* Ensure remote CPUs observe that we're online before rebooting. */
182 	smp_mb__after_atomic();
183 
184 #ifdef CONFIG_SMP
185 	if (smp_processor_id() > 0) {
186 		/*
187 		 * Instead of cpu_relax() or wait, this is needed for kexec
188 		 * smp reboot. Kdump usually doesn't require an smp new
189 		 * kernel, but kexec may do.
190 		 */
191 		kexec_nonboot_cpu();
192 
193 		/* NOTREACHED */
194 	}
195 #endif
196 
197 	/*
198 	 * Make sure we get correct instructions written by the
199 	 * machine_kexec() CPU.
200 	 */
201 	local_flush_icache_range(reboot_code_buffer,
202 				 reboot_code_buffer + relocate_new_kernel_size);
203 
204 	do_kexec = (void *)reboot_code_buffer;
205 	do_kexec();
206 }
207 
208 void
209 machine_kexec(struct kimage *image)
210 {
211 	unsigned long entry;
212 	unsigned long *ptr;
213 
214 	reboot_code_buffer =
215 	  (unsigned long)page_address(image->control_code_page);
216 
217 	kexec_start_address =
218 		(unsigned long) phys_to_virt(image->start);
219 
220 	if (image->type == KEXEC_TYPE_DEFAULT) {
221 		kexec_indirection_page =
222 			(unsigned long) phys_to_virt(image->head & PAGE_MASK);
223 	} else {
224 		kexec_indirection_page = (unsigned long)&image->head;
225 	}
226 
227 	memcpy((void*)reboot_code_buffer, relocate_new_kernel,
228 	       relocate_new_kernel_size);
229 
230 	/*
231 	 * The generic kexec code builds a page list with physical
232 	 * addresses. they are directly accessible through KSEG0 (or
233 	 * CKSEG0 or XPHYS if on 64bit system), hence the
234 	 * phys_to_virt() call.
235 	 */
236 	for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
237 	     ptr = (entry & IND_INDIRECTION) ?
238 	       phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
239 		if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
240 		    *ptr & IND_DESTINATION)
241 			*ptr = (unsigned long) phys_to_virt(*ptr);
242 	}
243 
244 	/* Mark offline BEFORE disabling local irq. */
245 	set_cpu_online(smp_processor_id(), false);
246 
247 	/*
248 	 * we do not want to be bothered.
249 	 */
250 	local_irq_disable();
251 
252 	printk("Will call new kernel at %08lx\n", image->start);
253 	printk("Bye ...\n");
254 	/* Make reboot code buffer available to the boot CPU. */
255 	__flush_cache_all();
256 #ifdef CONFIG_SMP
257 	/* All secondary cpus now may jump to kexec_wait cycle */
258 	relocated_kexec_smp_wait = reboot_code_buffer +
259 		(void *)(kexec_smp_wait - relocate_new_kernel);
260 	smp_wmb();
261 	atomic_set(&kexec_ready_to_reboot, 1);
262 #endif
263 	kexec_reboot();
264 }
265