xref: /freebsd/sys/arm64/arm64/kexec_support.c (revision 1ae25866767d686067fe6678b62681b7a8f0d361)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2025 Juniper Networks, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/kernel.h>
30 #include <sys/systm.h>
31 #include <sys/kexec.h>
32 #include <vm/vm.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_object.h>
35 #include <vm/vm_phys.h>
36 #include <vm/vm_radix.h>
37 #include <vm/pmap.h>
38 #include <vm/vm_page.h>
39 
40 #include <machine/armreg.h>
41 #include <machine/pmap.h>
42 #include <machine/pte.h>
43 
44 /*
45  * Idea behind this:
46  *
47  * kexec_load_md():
48  * - Update boot page tables (identity map) to include all pages needed before
49  *   disabling MMU.
50  *
51  * kexec_reboot_md():
52  * - Copy pages into target(s)
53  * - Do "other stuff"
54  * - Does not return
55  */
56 
57 extern pt_entry_t pagetable_l0_ttbr0_bootstrap[];
58 extern unsigned long initstack_end[];
59 void switch_stack(void *, void (*)(void *, void *, struct kexec_image *), void *);
60 
61 #define	SCTLR_EL1_NO_MMU	(SCTLR_RES1 | SCTLR_LSMAOE | SCTLR_nTLSMD | \
62 		SCTLR_EIS | SCTLR_TSCXT | SCTLR_EOS)
63 #define	vm_page_offset(m)	((vm_offset_t)(m) - vm_page_base)
64 static inline vm_page_t
phys_vm_page(vm_page_t m,vm_offset_t vm_page_v,vm_paddr_t vm_page_p)65 phys_vm_page(vm_page_t m, vm_offset_t vm_page_v, vm_paddr_t vm_page_p)
66 {
67 	return ((vm_page_t)((vm_offset_t)m - vm_page_v + vm_page_p));
68 }
69 
70 /* First 2 args are filler for switch_stack() */
71 static void __aligned(16) __dead2
kexec_reboot_bottom(void * arg1 __unused,void * arg2 __unused,struct kexec_image * image)72 kexec_reboot_bottom( void *arg1 __unused, void *arg2 __unused,
73     struct kexec_image *image)
74 {
75 	void (*e)(void) = (void *)image->entry;
76 	vm_offset_t	vm_page_base = (vm_offset_t)vm_page_array;
77 	vm_paddr_t	vm_page_phys = pmap_kextract((vm_offset_t)vm_page_array);
78 	struct kexec_segment_stage *phys_segs =
79 	    (void *)pmap_kextract((vm_offset_t)&image->segments);
80 	vm_paddr_t from_pa, to_pa;
81 	vm_size_t size;
82 	vm_page_t	first, m, mp;
83 	struct pctrie_iter pct_i;
84 
85 	/*
86 	 * Create a linked list of all pages in the object before we disable the
87 	 * MMU.  Once the MMU is disabled we can't use the vm_radix iterators,
88 	 * as they rely on virtual address pointers.
89 	 */
90 	first = NULL;
91 	vm_radix_iter_init(&pct_i, &image->map_obj->rtree);
92 	VM_RADIX_FORALL(m, &pct_i) {
93 		if (first == NULL)
94 			first = m;
95 		else
96 			SLIST_INSERT_AFTER(mp, m, plinks.s.ss);
97 		mp = m;
98 	}
99 
100 	/*
101 	 * We're running out of the identity map now, disable the MMU before we
102 	 * continue.  It's possible page tables can be overwritten, which would
103 	 * be very bad if we were running with the MMU enabled.
104 	 */
105 	WRITE_SPECIALREG(sctlr_el1, SCTLR_EL1_NO_MMU);
106 	isb();
107 	for (int i = 0; i < KEXEC_SEGMENT_MAX; i++) {
108 		if (phys_segs[i].size == 0)
109 			break;
110 		to_pa = phys_segs[i].target;
111 		/* Copy the segment here... */
112 		for (vm_page_t p = phys_segs[i].first_page;
113 		    p != NULL && to_pa - phys_segs[i].target < phys_segs[i].size;
114 		    p = SLIST_NEXT(p, plinks.s.ss)) {
115 			p = phys_vm_page(p, vm_page_base, vm_page_phys);
116 			from_pa = p->phys_addr;
117 			if (p->phys_addr == to_pa) {
118 				to_pa += PAGE_SIZE;
119 				continue;
120 			}
121 			for (size = PAGE_SIZE / sizeof(register_t);
122 			    size > 0; --size) {
123 				*(register_t *)to_pa = *(register_t *)from_pa;
124 				to_pa += sizeof(register_t);
125 				from_pa += sizeof(register_t);
126 			}
127 		}
128 	}
129 	invalidate_icache();
130 	e();
131 	while (1)
132 		;
133 }
134 
135 void
kexec_reboot_md(struct kexec_image * image)136 kexec_reboot_md(struct kexec_image *image)
137 {
138 	uintptr_t ptr;
139 	register_t reg;
140 
141 	for (int i = 0; i < KEXEC_SEGMENT_MAX; i++) {
142 		if (image->segments[i].size > 0)
143 			cpu_dcache_inv_range((void *)PHYS_TO_DMAP(image->segments[i].target),
144 			    image->segments[i].size);
145 	}
146 	ptr = pmap_kextract((vm_offset_t)kexec_reboot_bottom);
147 	serror_disable();
148 
149 	reg = pmap_kextract((vm_offset_t)pagetable_l0_ttbr0_bootstrap);
150 	set_ttbr0(reg);
151 	cpu_tlb_flushID();
152 
153 	typeof(kexec_reboot_bottom) *p = (void *)ptr;
154 	switch_stack((void *)pmap_kextract((vm_offset_t)initstack_end),
155 	    p, image);
156 	while (1)
157 		;
158 }
159 
160 int
kexec_load_md(struct kexec_image * image)161 kexec_load_md(struct kexec_image *image)
162 {
163 	vm_paddr_t tmp;
164 	pt_entry_t *pte;
165 
166 	/* Create L2 page blocks for the trampoline. L0/L1 are from the startup. */
167 
168 	/*
169 	 * There are exactly 2 pages before the pagetable_l0_ttbr0_bootstrap, so
170 	 * move to there.
171 	 */
172 	pte = pagetable_l0_ttbr0_bootstrap;
173 	pte -= (Ln_ENTRIES * 2);	/* move to start of L2 pages */
174 
175 	/*
176 	 * Populate the identity map with symbols we know we'll need before we
177 	 * turn off the MMU.
178 	 */
179 	tmp = pmap_kextract((vm_offset_t)kexec_reboot_bottom);
180 	pte[pmap_l2_index(tmp)] = (tmp | L2_BLOCK | ATTR_AF | ATTR_S1_UXN);
181 	tmp = pmap_kextract((vm_offset_t)initstack_end);
182 	pte[pmap_l2_index(tmp)] = (tmp | L2_BLOCK | ATTR_AF | ATTR_S1_UXN);
183 	/* We'll need vm_page_array for doing offset calculations. */
184 	tmp = pmap_kextract((vm_offset_t)&vm_page_array);
185 	pte[pmap_l2_index(tmp)] = (tmp | L2_BLOCK | ATTR_AF | ATTR_S1_UXN);
186 
187 	return (0);
188 }
189