xref: /freebsd/sys/arm/include/pmap_var.h (revision 6ef644f5889afbd0f681b08ed1a2f369524af83e)
1 /*-
2  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #ifndef _MACHINE_PMAP_VAR_H_
29 #define _MACHINE_PMAP_VAR_H_
30 
31 #include <machine/pte.h>
32 
33 /*
34  *  Various PMAP defines, exports, and inline functions
35  *  definitions also usable in other MD code.
36  */
37 
38 /*  A number of pages in L1 page table. */
39 #define NPG_IN_PT1	(NB_IN_PT1 / PAGE_SIZE)
40 
41 /*  A number of L2 page tables in a page. */
42 #define NPT2_IN_PG	(PAGE_SIZE / NB_IN_PT2)
43 
44 /*  A number of L2 page table entries in a page. */
45 #define NPTE2_IN_PG	(NPT2_IN_PG * NPTE2_IN_PT2)
46 
47 #ifdef _KERNEL
48 
49 /*
50  *  A L2 page tables page contains NPT2_IN_PG L2 page tables. Masking of
51  *  pte1_idx by PT2PG_MASK gives us an index to associated L2 page table
52  *  in a page. The PT2PG_SHIFT definition depends on NPT2_IN_PG strictly.
53  *  I.e., (1 << PT2PG_SHIFT) == NPT2_IN_PG must be fulfilled.
54  */
55 #define PT2PG_SHIFT	2
56 #define PT2PG_MASK	((1 << PT2PG_SHIFT) - 1)
57 
58 /*
59  *  A PT2TAB holds all allocated L2 page table pages in a pmap.
60  *  Right shifting of virtual address by PT2TAB_SHIFT gives us an index
61  *  to L2 page table page in PT2TAB which holds the address mapping.
62  */
63 #define PT2TAB_ENTRIES  (NPTE1_IN_PT1 / NPT2_IN_PG)
64 #define PT2TAB_SHIFT	(PTE1_SHIFT + PT2PG_SHIFT)
65 
66 /*
67  *  All allocated L2 page table pages in a pmap are mapped into PT2MAP space.
68  *  An virtual address right shifting by PT2MAP_SHIFT gives us an index to PTE2
69  *  which maps the address.
70  */
71 #define PT2MAP_SIZE	(NPTE1_IN_PT1 * NB_IN_PT2)
72 #define PT2MAP_SHIFT	PTE2_SHIFT
73 
74 extern pt1_entry_t *kern_pt1;
75 extern pt2_entry_t *kern_pt2tab;
76 extern pt2_entry_t *PT2MAP;
77 
78 /*
79  *  Virtual interface for L1 page table management.
80  */
81 
82 static __inline u_int
83 pte1_index(vm_offset_t va)
84 {
85 
86 	return (va >> PTE1_SHIFT);
87 }
88 
89 static __inline pt1_entry_t *
90 pte1_ptr(pt1_entry_t *pt1, vm_offset_t va)
91 {
92 
93 	return (pt1 + pte1_index(va));
94 }
95 
96 static __inline vm_offset_t
97 pte1_trunc(vm_offset_t va)
98 {
99 
100 	return (va & PTE1_FRAME);
101 }
102 
103 static __inline vm_offset_t
104 pte1_roundup(vm_offset_t va)
105 {
106 
107 	return ((va + PTE1_OFFSET) & PTE1_FRAME);
108 }
109 
110 /*
111  *  Virtual interface for L1 page table entries management.
112  *
113  *  XXX: Some of the following functions now with a synchronization barrier
114  *  are called in a loop, so it could be useful to have two versions of them.
115  *  One with the barrier and one without the barrier. In this case, pure
116  *  barrier pte1_sync() should be implemented as well.
117  */
118 static __inline void
119 pte1_sync(pt1_entry_t *pte1p)
120 {
121 
122 	dsb();
123 #ifndef PMAP_PTE_NOCACHE
124 	if (!cpuinfo.coherent_walk)
125 		dcache_wb_pou((vm_offset_t)pte1p, sizeof(*pte1p));
126 #endif
127 }
128 
129 static __inline void
130 pte1_sync_range(pt1_entry_t *pte1p, vm_size_t size)
131 {
132 
133 	dsb();
134 #ifndef PMAP_PTE_NOCACHE
135 	if (!cpuinfo.coherent_walk)
136 		dcache_wb_pou((vm_offset_t)pte1p, size);
137 #endif
138 }
139 
140 static __inline void
141 pte1_store(pt1_entry_t *pte1p, pt1_entry_t pte1)
142 {
143 
144 	dmb();
145 	*pte1p = pte1;
146 	pte1_sync(pte1p);
147 }
148 
149 static __inline void
150 pte1_clear(pt1_entry_t *pte1p)
151 {
152 
153 	pte1_store(pte1p, 0);
154 }
155 
156 static __inline void
157 pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit)
158 {
159 
160 	*pte1p &= ~bit;
161 	pte1_sync(pte1p);
162 }
163 
164 static __inline boolean_t
165 pte1_is_link(pt1_entry_t pte1)
166 {
167 
168 	return ((pte1 & L1_TYPE_MASK) == L1_TYPE_C);
169 }
170 
171 static __inline int
172 pte1_is_section(pt1_entry_t pte1)
173 {
174 
175 	return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S);
176 }
177 
178 static __inline boolean_t
179 pte1_is_dirty(pt1_entry_t pte1)
180 {
181 
182 	return ((pte1 & (PTE1_NM | PTE1_RO)) == 0);
183 }
184 
185 static __inline boolean_t
186 pte1_is_global(pt1_entry_t pte1)
187 {
188 
189 	return ((pte1 & PTE1_NG) == 0);
190 }
191 
192 static __inline boolean_t
193 pte1_is_valid(pt1_entry_t pte1)
194 {
195 	int l1_type;
196 
197 	l1_type = pte1 & L1_TYPE_MASK;
198 	return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S));
199 }
200 
201 static __inline boolean_t
202 pte1_is_wired(pt1_entry_t pte1)
203 {
204 
205 	return (pte1 & PTE1_W);
206 }
207 
208 static __inline pt1_entry_t
209 pte1_load(pt1_entry_t *pte1p)
210 {
211 	pt1_entry_t pte1;
212 
213 	pte1 = *pte1p;
214 	return (pte1);
215 }
216 
217 static __inline pt1_entry_t
218 pte1_load_clear(pt1_entry_t *pte1p)
219 {
220 	pt1_entry_t opte1;
221 
222 	opte1 = *pte1p;
223 	*pte1p = 0;
224 	pte1_sync(pte1p);
225 	return (opte1);
226 }
227 
228 static __inline void
229 pte1_set_bit(pt1_entry_t *pte1p, uint32_t bit)
230 {
231 
232 	*pte1p |= bit;
233 	pte1_sync(pte1p);
234 }
235 
236 static __inline vm_paddr_t
237 pte1_pa(pt1_entry_t pte1)
238 {
239 
240 	return ((vm_paddr_t)(pte1 & PTE1_FRAME));
241 }
242 
243 static __inline vm_paddr_t
244 pte1_link_pa(pt1_entry_t pte1)
245 {
246 
247 	return ((vm_paddr_t)(pte1 & L1_C_ADDR_MASK));
248 }
249 
250 /*
251  *  Virtual interface for L2 page table entries management.
252  *
253  *  XXX: Some of the following functions now with a synchronization barrier
254  *  are called in a loop, so it could be useful to have two versions of them.
255  *  One with the barrier and one without the barrier.
256  */
257 
258 static __inline void
259 pte2_sync(pt2_entry_t *pte2p)
260 {
261 
262 	dsb();
263 #ifndef PMAP_PTE_NOCACHE
264 	if (!cpuinfo.coherent_walk)
265 		dcache_wb_pou((vm_offset_t)pte2p, sizeof(*pte2p));
266 #endif
267 }
268 
269 static __inline void
270 pte2_sync_range(pt2_entry_t *pte2p, vm_size_t size)
271 {
272 
273 	dsb();
274 #ifndef PMAP_PTE_NOCACHE
275 	if (!cpuinfo.coherent_walk)
276 		dcache_wb_pou((vm_offset_t)pte2p, size);
277 #endif
278 }
279 
280 static __inline void
281 pte2_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
282 {
283 
284 	dmb();
285 	*pte2p = pte2;
286 	pte2_sync(pte2p);
287 }
288 
289 static __inline void
290 pte2_clear(pt2_entry_t *pte2p)
291 {
292 
293 	pte2_store(pte2p, 0);
294 }
295 
296 static __inline void
297 pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit)
298 {
299 
300 	*pte2p &= ~bit;
301 	pte2_sync(pte2p);
302 }
303 
304 static __inline boolean_t
305 pte2_is_dirty(pt2_entry_t pte2)
306 {
307 
308 	return ((pte2 & (PTE2_NM | PTE2_RO)) == 0);
309 }
310 
311 static __inline boolean_t
312 pte2_is_global(pt2_entry_t pte2)
313 {
314 
315 	return ((pte2 & PTE2_NG) == 0);
316 }
317 
318 static __inline boolean_t
319 pte2_is_valid(pt2_entry_t pte2)
320 {
321 
322 	return (pte2 & PTE2_V);
323 }
324 
325 static __inline boolean_t
326 pte2_is_wired(pt2_entry_t pte2)
327 {
328 
329 	return (pte2 & PTE2_W);
330 }
331 
332 static __inline pt2_entry_t
333 pte2_load(pt2_entry_t *pte2p)
334 {
335 	pt2_entry_t pte2;
336 
337 	pte2 = *pte2p;
338 	return (pte2);
339 }
340 
341 static __inline pt2_entry_t
342 pte2_load_clear(pt2_entry_t *pte2p)
343 {
344 	pt2_entry_t opte2;
345 
346 	opte2 = *pte2p;
347 	*pte2p = 0;
348 	pte2_sync(pte2p);
349 	return (opte2);
350 }
351 
352 static __inline void
353 pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit)
354 {
355 
356 	*pte2p |= bit;
357 	pte2_sync(pte2p);
358 }
359 
360 static __inline void
361 pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired)
362 {
363 
364 	/*
365 	 * Wired bit is transparent for page table walk,
366 	 * so pte2_sync() is not needed.
367 	 */
368 	if (wired)
369 		*pte2p |= PTE2_W;
370 	else
371 		*pte2p &= ~PTE2_W;
372 }
373 
374 static __inline vm_paddr_t
375 pte2_pa(pt2_entry_t pte2)
376 {
377 
378 	return ((vm_paddr_t)(pte2 & PTE2_FRAME));
379 }
380 
381 static __inline u_int
382 pte2_attr(pt2_entry_t pte2)
383 {
384 
385 	return ((u_int)(pte2 & PTE2_ATTR_MASK));
386 }
387 
388 /*
389  *  Virtual interface for L2 page tables mapping management.
390  */
391 
392 static __inline u_int
393 pt2tab_index(vm_offset_t va)
394 {
395 
396 	return (va >> PT2TAB_SHIFT);
397 }
398 
399 static __inline pt2_entry_t *
400 pt2tab_entry(pt2_entry_t *pt2tab, vm_offset_t va)
401 {
402 
403 	return (pt2tab + pt2tab_index(va));
404 }
405 
406 static __inline void
407 pt2tab_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
408 {
409 
410 	pte2_store(pte2p,pte2);
411 }
412 
413 static __inline pt2_entry_t
414 pt2tab_load(pt2_entry_t *pte2p)
415 {
416 
417 	return (pte2_load(pte2p));
418 }
419 
420 static __inline pt2_entry_t
421 pt2tab_load_clear(pt2_entry_t *pte2p)
422 {
423 
424 	return (pte2_load_clear(pte2p));
425 }
426 
427 static __inline u_int
428 pt2map_index(vm_offset_t va)
429 {
430 
431 	return (va >> PT2MAP_SHIFT);
432 }
433 
434 static __inline pt2_entry_t *
435 pt2map_entry(vm_offset_t va)
436 {
437 
438 	return (PT2MAP + pt2map_index(va));
439 }
440 
441 /*
442  *  Virtual interface for pmap structure & kernel shortcuts.
443  */
444 
445 static __inline pt1_entry_t *
446 pmap_pte1(pmap_t pmap, vm_offset_t va)
447 {
448 
449 	return (pte1_ptr(pmap->pm_pt1, va));
450 }
451 
452 static __inline pt1_entry_t *
453 kern_pte1(vm_offset_t va)
454 {
455 
456 	return (pte1_ptr(kern_pt1, va));
457 }
458 
459 static __inline pt2_entry_t *
460 pmap_pt2tab_entry(pmap_t pmap, vm_offset_t va)
461 {
462 
463 	return (pt2tab_entry(pmap->pm_pt2tab, va));
464 }
465 
466 static __inline pt2_entry_t *
467 kern_pt2tab_entry(vm_offset_t va)
468 {
469 
470 	return (pt2tab_entry(kern_pt2tab, va));
471 }
472 
473 static __inline vm_page_t
474 pmap_pt2_page(pmap_t pmap, vm_offset_t va)
475 {
476 	pt2_entry_t pte2;
477 
478 	pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
479 	return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
480 }
481 
482 static __inline vm_page_t
483 kern_pt2_page(vm_offset_t va)
484 {
485 	pt2_entry_t pte2;
486 
487 	pte2 = pte2_load(kern_pt2tab_entry(va));
488 	return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
489 }
490 
491 #endif	/* _KERNEL */
492 #endif	/* !_MACHINE_PMAP_VAR_H_ */
493