xref: /freebsd/sys/arm/include/pmap_var.h (revision f5e9c916afed4a948fe5c03bfaee038d165e12ab)
1 /*-
2  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #ifndef _MACHINE_PMAP_VAR_H_
31 #define _MACHINE_PMAP_VAR_H_
32 
33 #include <machine/cpu-v6.h>
34 #include <machine/pte-v6.h>
35 /*
36  *  Various PMAP defines, exports, and inline functions
37  *  definitions also usable in other MD code.
38  */
39 
40 /*  A number of pages in L1 page table. */
41 #define NPG_IN_PT1	(NB_IN_PT1 / PAGE_SIZE)
42 
43 /*  A number of L2 page tables in a page. */
44 #define NPT2_IN_PG	(PAGE_SIZE / NB_IN_PT2)
45 
46 /*  A number of L2 page table entries in a page. */
47 #define NPTE2_IN_PG	(NPT2_IN_PG * NPTE2_IN_PT2)
48 
49 #ifdef _KERNEL
50 
51 /*
52  *  A L2 page tables page contains NPT2_IN_PG L2 page tables. Masking of
53  *  pte1_idx by PT2PG_MASK gives us an index to associated L2 page table
54  *  in a page. The PT2PG_SHIFT definition depends on NPT2_IN_PG strictly.
55  *  I.e., (1 << PT2PG_SHIFT) == NPT2_IN_PG must be fulfilled.
56  */
57 #define PT2PG_SHIFT	2
58 #define PT2PG_MASK	((1 << PT2PG_SHIFT) - 1)
59 
60 /*
61  *  A PT2TAB holds all allocated L2 page table pages in a pmap.
62  *  Right shifting of virtual address by PT2TAB_SHIFT gives us an index
63  *  to L2 page table page in PT2TAB which holds the address mapping.
64  */
65 #define PT2TAB_ENTRIES  (NPTE1_IN_PT1 / NPT2_IN_PG)
66 #define PT2TAB_SHIFT	(PTE1_SHIFT + PT2PG_SHIFT)
67 
68 /*
69  *  All allocated L2 page table pages in a pmap are mapped into PT2MAP space.
70  *  An virtual address right shifting by PT2MAP_SHIFT gives us an index to PTE2
71  *  which maps the address.
72  */
73 #define PT2MAP_SIZE	(NPTE1_IN_PT1 * NB_IN_PT2)
74 #define PT2MAP_SHIFT	PTE2_SHIFT
75 
76 extern pt1_entry_t *kern_pt1;
77 extern pt2_entry_t *kern_pt2tab;
78 extern pt2_entry_t *PT2MAP;
79 
80 /*
81  *  Virtual interface for L1 page table management.
82  */
83 
84 static __inline u_int
85 pte1_index(vm_offset_t va)
86 {
87 
88 	return (va >> PTE1_SHIFT);
89 }
90 
91 static __inline pt1_entry_t *
92 pte1_ptr(pt1_entry_t *pt1, vm_offset_t va)
93 {
94 
95 	return (pt1 + pte1_index(va));
96 }
97 
98 static __inline vm_offset_t
99 pte1_trunc(vm_offset_t va)
100 {
101 
102 	return (va & PTE1_FRAME);
103 }
104 
105 static __inline vm_offset_t
106 pte1_roundup(vm_offset_t va)
107 {
108 
109 	return ((va + PTE1_OFFSET) & PTE1_FRAME);
110 }
111 
112 /*
113  *  Virtual interface for L1 page table entries management.
114  *
115  *  XXX: Some of the following functions now with a synchronization barrier
116  *  are called in a loop, so it could be useful to have two versions of them.
117  *  One with the barrier and one without the barrier. In this case, pure
118  *  barrier pte1_sync() should be implemented as well.
119  */
120 static __inline void
121 pte1_sync(pt1_entry_t *pte1p)
122 {
123 
124 	dsb();
125 #ifndef PMAP_PTE_NOCACHE
126 	if (!cpuinfo.coherent_walk)
127 		dcache_wb_pou((vm_offset_t)pte1p, sizeof(*pte1p));
128 #endif
129 }
130 
131 static __inline void
132 pte1_sync_range(pt1_entry_t *pte1p, vm_size_t size)
133 {
134 
135 	dsb();
136 #ifndef PMAP_PTE_NOCACHE
137 	if (!cpuinfo.coherent_walk)
138 		dcache_wb_pou((vm_offset_t)pte1p, size);
139 #endif
140 }
141 
142 static __inline void
143 pte1_store(pt1_entry_t *pte1p, pt1_entry_t pte1)
144 {
145 
146 	atomic_store_rel_int(pte1p, pte1);
147 	pte1_sync(pte1p);
148 }
149 
150 static __inline void
151 pte1_clear(pt1_entry_t *pte1p)
152 {
153 
154 	pte1_store(pte1p, 0);
155 }
156 
157 static __inline void
158 pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit)
159 {
160 
161 	atomic_clear_int(pte1p, bit);
162 	pte1_sync(pte1p);
163 }
164 
165 static __inline boolean_t
166 pte1_cmpset(pt1_entry_t *pte1p, pt1_entry_t opte1, pt1_entry_t npte1)
167 {
168 	boolean_t ret;
169 
170 	ret = atomic_cmpset_int(pte1p, opte1, npte1);
171 	if (ret) pte1_sync(pte1p);
172 
173 	return (ret);
174 }
175 
176 static __inline boolean_t
177 pte1_is_link(pt1_entry_t pte1)
178 {
179 
180 	return ((pte1 & L1_TYPE_MASK) == L1_TYPE_C);
181 }
182 
183 static __inline int
184 pte1_is_section(pt1_entry_t pte1)
185 {
186 
187 	return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S);
188 }
189 
190 static __inline boolean_t
191 pte1_is_dirty(pt1_entry_t pte1)
192 {
193 
194 	return ((pte1 & (PTE1_NM | PTE1_RO)) == 0);
195 }
196 
197 static __inline boolean_t
198 pte1_is_global(pt1_entry_t pte1)
199 {
200 
201 	return ((pte1 & PTE1_NG) == 0);
202 }
203 
204 static __inline boolean_t
205 pte1_is_valid(pt1_entry_t pte1)
206 {
207 	int l1_type;
208 
209 	l1_type = pte1 & L1_TYPE_MASK;
210 	return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S));
211 }
212 
213 static __inline boolean_t
214 pte1_is_wired(pt1_entry_t pte1)
215 {
216 
217 	return (pte1 & PTE1_W);
218 }
219 
220 static __inline pt1_entry_t
221 pte1_load(pt1_entry_t *pte1p)
222 {
223 	pt1_entry_t pte1;
224 
225 	pte1 = *pte1p;
226 	return (pte1);
227 }
228 
229 static __inline pt1_entry_t
230 pte1_load_clear(pt1_entry_t *pte1p)
231 {
232 	pt1_entry_t opte1;
233 
234 	opte1 = atomic_readandclear_int(pte1p);
235 	pte1_sync(pte1p);
236 	return (opte1);
237 }
238 
239 static __inline void
240 pte1_set_bit(pt1_entry_t *pte1p, uint32_t bit)
241 {
242 
243 	atomic_set_int(pte1p, bit);
244 	pte1_sync(pte1p);
245 }
246 
247 static __inline vm_paddr_t
248 pte1_pa(pt1_entry_t pte1)
249 {
250 
251 	return ((vm_paddr_t)(pte1 & PTE1_FRAME));
252 }
253 
254 static __inline vm_paddr_t
255 pte1_link_pa(pt1_entry_t pte1)
256 {
257 
258 	return ((vm_paddr_t)(pte1 & L1_C_ADDR_MASK));
259 }
260 
261 /*
262  *  Virtual interface for L2 page table entries management.
263  *
264  *  XXX: Some of the following functions now with a synchronization barrier
265  *  are called in a loop, so it could be useful to have two versions of them.
266  *  One with the barrier and one without the barrier.
267  */
268 
269 static __inline void
270 pte2_sync(pt2_entry_t *pte2p)
271 {
272 
273 	dsb();
274 #ifndef PMAP_PTE_NOCACHE
275 	if (!cpuinfo.coherent_walk)
276 		dcache_wb_pou((vm_offset_t)pte2p, sizeof(*pte2p));
277 #endif
278 }
279 
280 static __inline void
281 pte2_sync_range(pt2_entry_t *pte2p, vm_size_t size)
282 {
283 
284 	dsb();
285 #ifndef PMAP_PTE_NOCACHE
286 	if (!cpuinfo.coherent_walk)
287 		dcache_wb_pou((vm_offset_t)pte2p, size);
288 #endif
289 }
290 
291 static __inline void
292 pte2_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
293 {
294 
295 	atomic_store_rel_int(pte2p, pte2);
296 	pte2_sync(pte2p);
297 }
298 
299 static __inline void
300 pte2_clear(pt2_entry_t *pte2p)
301 {
302 
303 	pte2_store(pte2p, 0);
304 }
305 
306 static __inline void
307 pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit)
308 {
309 
310 	atomic_clear_int(pte2p, bit);
311 	pte2_sync(pte2p);
312 }
313 
314 static __inline boolean_t
315 pte2_cmpset(pt2_entry_t *pte2p, pt2_entry_t opte2, pt2_entry_t npte2)
316 {
317 	boolean_t ret;
318 
319 	ret = atomic_cmpset_int(pte2p, opte2, npte2);
320 	if (ret) pte2_sync(pte2p);
321 
322 	return (ret);
323 }
324 
325 static __inline boolean_t
326 pte2_is_dirty(pt2_entry_t pte2)
327 {
328 
329 	return ((pte2 & (PTE2_NM | PTE2_RO)) == 0);
330 }
331 
332 static __inline boolean_t
333 pte2_is_global(pt2_entry_t pte2)
334 {
335 
336 	return ((pte2 & PTE2_NG) == 0);
337 }
338 
339 static __inline boolean_t
340 pte2_is_valid(pt2_entry_t pte2)
341 {
342 
343 	return (pte2 & PTE2_V);
344 }
345 
346 static __inline boolean_t
347 pte2_is_wired(pt2_entry_t pte2)
348 {
349 
350 	return (pte2 & PTE2_W);
351 }
352 
353 static __inline pt2_entry_t
354 pte2_load(pt2_entry_t *pte2p)
355 {
356 	pt2_entry_t pte2;
357 
358 	pte2 = *pte2p;
359 	return (pte2);
360 }
361 
362 static __inline pt2_entry_t
363 pte2_load_clear(pt2_entry_t *pte2p)
364 {
365 	pt2_entry_t opte2;
366 
367 	opte2 = atomic_readandclear_int(pte2p);
368 	pte2_sync(pte2p);
369 	return (opte2);
370 }
371 
372 static __inline void
373 pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit)
374 {
375 
376 	atomic_set_int(pte2p, bit);
377 	pte2_sync(pte2p);
378 }
379 
380 static __inline void
381 pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired)
382 {
383 
384 	/*
385 	 * Wired bit is transparent for page table walk,
386 	 * so pte2_sync() is not needed.
387 	 */
388 	if (wired)
389 		atomic_set_int(pte2p, PTE2_W);
390 	else
391 		atomic_clear_int(pte2p, PTE2_W);
392 }
393 
394 static __inline vm_paddr_t
395 pte2_pa(pt2_entry_t pte2)
396 {
397 
398 	return ((vm_paddr_t)(pte2 & PTE2_FRAME));
399 }
400 
401 static __inline u_int
402 pte2_attr(pt2_entry_t pte2)
403 {
404 
405 	return ((u_int)(pte2 & PTE2_ATTR_MASK));
406 }
407 
408 /*
409  *  Virtual interface for L2 page tables mapping management.
410  */
411 
412 static __inline u_int
413 pt2tab_index(vm_offset_t va)
414 {
415 
416 	return (va >> PT2TAB_SHIFT);
417 }
418 
419 static __inline pt2_entry_t *
420 pt2tab_entry(pt2_entry_t *pt2tab, vm_offset_t va)
421 {
422 
423 	return (pt2tab + pt2tab_index(va));
424 }
425 
426 static __inline void
427 pt2tab_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
428 {
429 
430 	pte2_store(pte2p,pte2);
431 }
432 
433 static __inline pt2_entry_t
434 pt2tab_load(pt2_entry_t *pte2p)
435 {
436 
437 	return (pte2_load(pte2p));
438 }
439 
440 static __inline pt2_entry_t
441 pt2tab_load_clear(pt2_entry_t *pte2p)
442 {
443 
444 	return (pte2_load_clear(pte2p));
445 }
446 
447 static __inline u_int
448 pt2map_index(vm_offset_t va)
449 {
450 
451 	return (va >> PT2MAP_SHIFT);
452 }
453 
454 static __inline pt2_entry_t *
455 pt2map_entry(vm_offset_t va)
456 {
457 
458 	return (PT2MAP + pt2map_index(va));
459 }
460 
461 /*
462  *  Virtual interface for pmap structure & kernel shortcuts.
463  */
464 
465 static __inline pt1_entry_t *
466 pmap_pte1(pmap_t pmap, vm_offset_t va)
467 {
468 
469 	return (pte1_ptr(pmap->pm_pt1, va));
470 }
471 
472 static __inline pt1_entry_t *
473 kern_pte1(vm_offset_t va)
474 {
475 
476 	return (pte1_ptr(kern_pt1, va));
477 }
478 
479 static __inline pt2_entry_t *
480 pmap_pt2tab_entry(pmap_t pmap, vm_offset_t va)
481 {
482 
483 	return (pt2tab_entry(pmap->pm_pt2tab, va));
484 }
485 
486 static __inline pt2_entry_t *
487 kern_pt2tab_entry(vm_offset_t va)
488 {
489 
490 	return (pt2tab_entry(kern_pt2tab, va));
491 }
492 
493 static __inline vm_page_t
494 pmap_pt2_page(pmap_t pmap, vm_offset_t va)
495 {
496 	pt2_entry_t pte2;
497 
498 	pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
499 	return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
500 }
501 
502 static __inline vm_page_t
503 kern_pt2_page(vm_offset_t va)
504 {
505 	pt2_entry_t pte2;
506 
507 	pte2 = pte2_load(kern_pt2tab_entry(va));
508 	return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
509 }
510 
511 #endif	/* _KERNEL */
512 #endif	/* !_MACHINE_PMAP_VAR_H_ */
513