xref: /freebsd/sys/arm64/arm64/efirt_machdep.c (revision d7f930b80e8928efd9f0bdc0fd48585f8b0b7061)
1 /*-
2  * Copyright (c) 2004 Marcel Moolenaar
3  * Copyright (c) 2001 Doug Rabson
4  * Copyright (c) 2016 The FreeBSD Foundation
5  * Copyright (c) 2017 Andrew Turner
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Konstantin Belousov
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * This software was developed by SRI International and the University of
12  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
13  * ("CTSRD"), as part of the DARPA CRASH research programme.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/efi.h>
39 #include <sys/kernel.h>
40 #include <sys/linker.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/rwlock.h>
45 #include <sys/systm.h>
46 #include <sys/vmmeter.h>
47 
48 #include <machine/pte.h>
49 #include <machine/vmparam.h>
50 
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_pager.h>
58 
59 static vm_object_t obj_1t1_pt;
60 static vm_pindex_t efi_1t1_idx;
61 static pd_entry_t *efi_l0;
62 static uint64_t efi_ttbr0;
63 
64 void
efi_destroy_1t1_map(void)65 efi_destroy_1t1_map(void)
66 {
67 	vm_page_t m;
68 
69 	if (obj_1t1_pt != NULL) {
70 		VM_OBJECT_RLOCK(obj_1t1_pt);
71 		TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
72 			m->ref_count = VPRC_OBJREF;
73 		vm_wire_sub(obj_1t1_pt->resident_page_count);
74 		VM_OBJECT_RUNLOCK(obj_1t1_pt);
75 		vm_object_deallocate(obj_1t1_pt);
76 	}
77 
78 	obj_1t1_pt = NULL;
79 	efi_1t1_idx = 0;
80 	efi_l0 = NULL;
81 	efi_ttbr0 = 0;
82 }
83 
84 static vm_page_t
efi_1t1_page(void)85 efi_1t1_page(void)
86 {
87 
88 	return (vm_page_grab(obj_1t1_pt, efi_1t1_idx++, VM_ALLOC_NOBUSY |
89 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO));
90 }
91 
92 static pt_entry_t *
efi_1t1_l3(vm_offset_t va)93 efi_1t1_l3(vm_offset_t va)
94 {
95 	pd_entry_t *l0, *l1, *l2;
96 	pt_entry_t *l3;
97 	vm_pindex_t l0_idx, l1_idx, l2_idx;
98 	vm_page_t m;
99 	vm_paddr_t mphys;
100 
101 	l0_idx = pmap_l0_index(va);
102 	l0 = &efi_l0[l0_idx];
103 	if (*l0 == 0) {
104 		m = efi_1t1_page();
105 		mphys = VM_PAGE_TO_PHYS(m);
106 		*l0 = PHYS_TO_PTE(mphys) | L0_TABLE;
107 	} else {
108 		mphys = PTE_TO_PHYS(*l0);
109 	}
110 
111 	l1 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
112 	l1_idx = pmap_l1_index(va);
113 	l1 += l1_idx;
114 	if (*l1 == 0) {
115 		m = efi_1t1_page();
116 		mphys = VM_PAGE_TO_PHYS(m);
117 		*l1 = PHYS_TO_PTE(mphys) | L1_TABLE;
118 	} else {
119 		mphys = PTE_TO_PHYS(*l1);
120 	}
121 
122 	l2 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
123 	l2_idx = pmap_l2_index(va);
124 	l2 += l2_idx;
125 	if (*l2 == 0) {
126 		m = efi_1t1_page();
127 		mphys = VM_PAGE_TO_PHYS(m);
128 		*l2 = PHYS_TO_PTE(mphys) | L2_TABLE;
129 	} else {
130 		mphys = PTE_TO_PHYS(*l2);
131 	}
132 
133 	l3 = (pt_entry_t *)PHYS_TO_DMAP(mphys);
134 	l3 += pmap_l3_index(va);
135 	KASSERT(*l3 == 0, ("%s: Already mapped: va %#jx *pt %#jx", __func__,
136 	    va, *l3));
137 
138 	return (l3);
139 }
140 
141 /*
142  * Map a physical address from EFI runtime space into KVA space.  Returns 0 to
143  * indicate a failed mapping so that the caller may handle error.
144  */
145 vm_offset_t
efi_phys_to_kva(vm_paddr_t paddr)146 efi_phys_to_kva(vm_paddr_t paddr)
147 {
148 	if (PHYS_IN_DMAP(paddr))
149 		return (PHYS_TO_DMAP(paddr));
150 
151 	/* TODO: Map memory not in the DMAP */
152 
153 	return (0);
154 }
155 
156 /*
157  * Create the 1:1 virtual to physical map for EFI
158  */
159 bool
efi_create_1t1_map(struct efi_md * map,int ndesc,int descsz)160 efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
161 {
162 	struct efi_md *p;
163 	pt_entry_t *l3, l3_attr;
164 	vm_offset_t va;
165 	vm_page_t efi_l0_page;
166 	uint64_t idx;
167 	int i, mode;
168 
169 	obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, L0_ENTRIES +
170 	    L0_ENTRIES * Ln_ENTRIES + L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES +
171 	    L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES * Ln_ENTRIES,
172 	    VM_PROT_ALL, 0, NULL);
173 	VM_OBJECT_WLOCK(obj_1t1_pt);
174 	efi_l0_page = efi_1t1_page();
175 	VM_OBJECT_WUNLOCK(obj_1t1_pt);
176 	efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page));
177 	efi_ttbr0 = ASID_TO_OPERAND(ASID_RESERVED_FOR_EFI) |
178 	    VM_PAGE_TO_PHYS(efi_l0_page);
179 
180 	for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
181 	    descsz)) {
182 		if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
183 			continue;
184 		if (p->md_virt != 0 && p->md_virt != p->md_phys) {
185 			if (bootverbose)
186 				printf("EFI Runtime entry %d is mapped\n", i);
187 			goto fail;
188 		}
189 		if ((p->md_phys & EFI_PAGE_MASK) != 0) {
190 			if (bootverbose)
191 				printf("EFI Runtime entry %d is not aligned\n",
192 				    i);
193 			goto fail;
194 		}
195 		if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
196 		    p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
197 		    VM_MAXUSER_ADDRESS) {
198 			printf("EFI Runtime entry %d is not in mappable for RT:"
199 			    "base %#016jx %#jx pages\n",
200 			    i, (uintmax_t)p->md_phys,
201 			    (uintmax_t)p->md_pages);
202 			goto fail;
203 		}
204 		if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
205 			mode = VM_MEMATTR_WRITE_BACK;
206 		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
207 			mode = VM_MEMATTR_WRITE_THROUGH;
208 		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
209 			mode = VM_MEMATTR_WRITE_COMBINING;
210 		else
211 			mode = VM_MEMATTR_DEVICE;
212 
213 		if (bootverbose) {
214 			printf("MAP %lx mode %x pages %lu\n",
215 			    p->md_phys, mode, p->md_pages);
216 		}
217 
218 		l3_attr = ATTR_AF | pmap_sh_attr | ATTR_S1_IDX(mode) |
219 		    ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
220 		if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
221 			l3_attr |= ATTR_S1_XN;
222 
223 		VM_OBJECT_WLOCK(obj_1t1_pt);
224 		for (va = p->md_phys, idx = 0; idx < p->md_pages;
225 		    idx += (PAGE_SIZE / EFI_PAGE_SIZE), va += PAGE_SIZE) {
226 			l3 = efi_1t1_l3(va);
227 			*l3 = va | l3_attr;
228 		}
229 		VM_OBJECT_WUNLOCK(obj_1t1_pt);
230 	}
231 
232 	return (true);
233 fail:
234 	efi_destroy_1t1_map();
235 	return (false);
236 }
237 
238 int
efi_arch_enter(void)239 efi_arch_enter(void)
240 {
241 
242 	CRITICAL_ASSERT(curthread);
243 	curthread->td_md.md_efirt_dis_pf = vm_fault_disable_pagefaults();
244 
245 	/*
246 	 * Temporarily switch to EFI's page table.  However, we leave curpmap
247 	 * unchanged in order to prevent its ASID from being reclaimed before
248 	 * we switch back to its page table in efi_arch_leave().
249 	 */
250 	set_ttbr0(efi_ttbr0);
251 	if (PCPU_GET(bcast_tlbi_workaround) != 0)
252 		invalidate_local_icache();
253 
254 	return (0);
255 }
256 
257 void
efi_arch_leave(void)258 efi_arch_leave(void)
259 {
260 
261 	/*
262 	 * Restore the pcpu pointer. Some UEFI implementations trash it and
263 	 * we don't store it before calling into them. To fix this we need
264 	 * to restore it after returning to the kernel context. As reading
265 	 * curpmap will access x18 we need to restore it before loading
266 	 * the pmap pointer.
267 	 */
268 	__asm __volatile(
269 	    "mrs x18, tpidr_el1	\n"
270 	);
271 	set_ttbr0(pmap_to_ttbr0(PCPU_GET(curpmap)));
272 	if (PCPU_GET(bcast_tlbi_workaround) != 0)
273 		invalidate_local_icache();
274 	vm_fault_enable_pagefaults(curthread->td_md.md_efirt_dis_pf);
275 }
276 
277