xref: /freebsd/sys/arm/include/pmap_var.h (revision 416ba5c74546f32a993436a99516d35008e9f384)
1 /*-
2  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #ifndef _MACHINE_PMAP_VAR_H_
31 #define _MACHINE_PMAP_VAR_H_
32 
33 #include <machine/cpu-v6.h>
34 /*
35  *  Various PMAP defines, exports, and inline functions
36  *  definitions also usable in other MD code.
37  */
38 
39 /*  A number of pages in L1 page table. */
40 #define NPG_IN_PT1	(NB_IN_PT1 / PAGE_SIZE)
41 
42 /*  A number of L2 page tables in a page. */
43 #define NPT2_IN_PG	(PAGE_SIZE / NB_IN_PT2)
44 
45 /*  A number of L2 page table entries in a page. */
46 #define NPTE2_IN_PG	(NPT2_IN_PG * NPTE2_IN_PT2)
47 
48 #ifdef _KERNEL
49 
50 /*
51  *  A L2 page tables page contains NPT2_IN_PG L2 page tables. Masking of
52  *  pte1_idx by PT2PG_MASK gives us an index to associated L2 page table
53  *  in a page. The PT2PG_SHIFT definition depends on NPT2_IN_PG strictly.
54  *  I.e., (1 << PT2PG_SHIFT) == NPT2_IN_PG must be fulfilled.
55  */
56 #define PT2PG_SHIFT	2
57 #define PT2PG_MASK	((1 << PT2PG_SHIFT) - 1)
58 
59 /*
60  *  A PT2TAB holds all allocated L2 page table pages in a pmap.
61  *  Right shifting of virtual address by PT2TAB_SHIFT gives us an index
62  *  to L2 page table page in PT2TAB which holds the address mapping.
63  */
64 #define PT2TAB_ENTRIES  (NPTE1_IN_PT1 / NPT2_IN_PG)
65 #define PT2TAB_SHIFT	(PTE1_SHIFT + PT2PG_SHIFT)
66 
67 /*
68  *  All allocated L2 page table pages in a pmap are mapped into PT2MAP space.
69  *  An virtual address right shifting by PT2MAP_SHIFT gives us an index to PTE2
70  *  which maps the address.
71  */
72 #define PT2MAP_SIZE	(NPTE1_IN_PT1 * NB_IN_PT2)
73 #define PT2MAP_SHIFT	PTE2_SHIFT
74 
75 extern pt1_entry_t *kern_pt1;
76 extern pt2_entry_t *kern_pt2tab;
77 extern pt2_entry_t *PT2MAP;
78 
79 /*
80  *  Virtual interface for L1 page table management.
81  */
82 
83 static __inline u_int
84 pte1_index(vm_offset_t va)
85 {
86 
87 	return (va >> PTE1_SHIFT);
88 }
89 
90 static __inline pt1_entry_t *
91 pte1_ptr(pt1_entry_t *pt1, vm_offset_t va)
92 {
93 
94 	return (pt1 + pte1_index(va));
95 }
96 
97 static __inline vm_offset_t
98 pte1_trunc(vm_offset_t va)
99 {
100 
101 	return (va & PTE1_FRAME);
102 }
103 
104 static __inline vm_offset_t
105 pte1_roundup(vm_offset_t va)
106 {
107 
108 	return ((va + PTE1_OFFSET) & PTE1_FRAME);
109 }
110 
111 /*
112  *  Virtual interface for L1 page table entries management.
113  *
114  *  XXX: Some of the following functions now with a synchronization barrier
115  *  are called in a loop, so it could be useful to have two versions of them.
116  *  One with the barrier and one without the barrier. In this case, pure
117  *  barrier pte1_sync() should be implemented as well.
118  */
119 static __inline void
120 pte1_sync(pt1_entry_t *pte1p)
121 {
122 
123 	dsb();
124 #ifndef PMAP_PTE_NOCACHE
125 	if (!cpuinfo.coherent_walk)
126 		dcache_wb_pou((vm_offset_t)pte1p, sizeof(*pte1p));
127 #endif
128 }
129 
130 static __inline void
131 pte1_sync_range(pt1_entry_t *pte1p, vm_size_t size)
132 {
133 
134 	dsb();
135 #ifndef PMAP_PTE_NOCACHE
136 	if (!cpuinfo.coherent_walk)
137 		dcache_wb_pou((vm_offset_t)pte1p, size);
138 #endif
139 }
140 
141 static __inline void
142 pte1_store(pt1_entry_t *pte1p, pt1_entry_t pte1)
143 {
144 
145 	atomic_store_rel_int(pte1p, pte1);
146 	pte1_sync(pte1p);
147 }
148 
149 static __inline void
150 pte1_clear(pt1_entry_t *pte1p)
151 {
152 
153 	pte1_store(pte1p, 0);
154 }
155 
156 static __inline void
157 pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit)
158 {
159 
160 	atomic_clear_int(pte1p, bit);
161 	pte1_sync(pte1p);
162 }
163 
164 static __inline boolean_t
165 pte1_cmpset(pt1_entry_t *pte1p, pt1_entry_t opte1, pt1_entry_t npte1)
166 {
167 	boolean_t ret;
168 
169 	ret = atomic_cmpset_int(pte1p, opte1, npte1);
170 	if (ret) pte1_sync(pte1p);
171 
172 	return (ret);
173 }
174 
175 static __inline boolean_t
176 pte1_is_link(pt1_entry_t pte1)
177 {
178 
179 	return ((pte1 & L1_TYPE_MASK) == L1_TYPE_C);
180 }
181 
182 static __inline int
183 pte1_is_section(pt1_entry_t pte1)
184 {
185 
186 	return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S);
187 }
188 
189 static __inline boolean_t
190 pte1_is_dirty(pt1_entry_t pte1)
191 {
192 
193 	return ((pte1 & (PTE1_NM | PTE1_RO)) == 0);
194 }
195 
196 static __inline boolean_t
197 pte1_is_global(pt1_entry_t pte1)
198 {
199 
200 	return ((pte1 & PTE1_NG) == 0);
201 }
202 
203 static __inline boolean_t
204 pte1_is_valid(pt1_entry_t pte1)
205 {
206 	int l1_type;
207 
208 	l1_type = pte1 & L1_TYPE_MASK;
209 	return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S));
210 }
211 
212 static __inline boolean_t
213 pte1_is_wired(pt1_entry_t pte1)
214 {
215 
216 	return (pte1 & PTE1_W);
217 }
218 
219 static __inline pt1_entry_t
220 pte1_load(pt1_entry_t *pte1p)
221 {
222 	pt1_entry_t pte1;
223 
224 	pte1 = *pte1p;
225 	return (pte1);
226 }
227 
228 static __inline pt1_entry_t
229 pte1_load_clear(pt1_entry_t *pte1p)
230 {
231 	pt1_entry_t opte1;
232 
233 	opte1 = atomic_readandclear_int(pte1p);
234 	pte1_sync(pte1p);
235 	return (opte1);
236 }
237 
238 static __inline void
239 pte1_set_bit(pt1_entry_t *pte1p, uint32_t bit)
240 {
241 
242 	atomic_set_int(pte1p, bit);
243 	pte1_sync(pte1p);
244 }
245 
246 static __inline vm_paddr_t
247 pte1_pa(pt1_entry_t pte1)
248 {
249 
250 	return ((vm_paddr_t)(pte1 & PTE1_FRAME));
251 }
252 
253 static __inline vm_paddr_t
254 pte1_link_pa(pt1_entry_t pte1)
255 {
256 
257 	return ((vm_paddr_t)(pte1 & L1_C_ADDR_MASK));
258 }
259 
260 /*
261  *  Virtual interface for L2 page table entries management.
262  *
263  *  XXX: Some of the following functions now with a synchronization barrier
264  *  are called in a loop, so it could be useful to have two versions of them.
265  *  One with the barrier and one without the barrier.
266  */
267 
268 static __inline void
269 pte2_sync(pt2_entry_t *pte2p)
270 {
271 
272 	dsb();
273 #ifndef PMAP_PTE_NOCACHE
274 	if (!cpuinfo.coherent_walk)
275 		dcache_wb_pou((vm_offset_t)pte2p, sizeof(*pte2p));
276 #endif
277 }
278 
279 static __inline void
280 pte2_sync_range(pt2_entry_t *pte2p, vm_size_t size)
281 {
282 
283 	dsb();
284 #ifndef PMAP_PTE_NOCACHE
285 	if (!cpuinfo.coherent_walk)
286 		dcache_wb_pou((vm_offset_t)pte2p, size);
287 #endif
288 }
289 
290 static __inline void
291 pte2_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
292 {
293 
294 	atomic_store_rel_int(pte2p, pte2);
295 	pte2_sync(pte2p);
296 }
297 
298 static __inline void
299 pte2_clear(pt2_entry_t *pte2p)
300 {
301 
302 	pte2_store(pte2p, 0);
303 }
304 
305 static __inline void
306 pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit)
307 {
308 
309 	atomic_clear_int(pte2p, bit);
310 	pte2_sync(pte2p);
311 }
312 
313 static __inline boolean_t
314 pte2_cmpset(pt2_entry_t *pte2p, pt2_entry_t opte2, pt2_entry_t npte2)
315 {
316 	boolean_t ret;
317 
318 	ret = atomic_cmpset_int(pte2p, opte2, npte2);
319 	if (ret) pte2_sync(pte2p);
320 
321 	return (ret);
322 }
323 
324 static __inline boolean_t
325 pte2_is_dirty(pt2_entry_t pte2)
326 {
327 
328 	return ((pte2 & (PTE2_NM | PTE2_RO)) == 0);
329 }
330 
331 static __inline boolean_t
332 pte2_is_global(pt2_entry_t pte2)
333 {
334 
335 	return ((pte2 & PTE2_NG) == 0);
336 }
337 
338 static __inline boolean_t
339 pte2_is_valid(pt2_entry_t pte2)
340 {
341 
342 	return (pte2 & PTE2_V);
343 }
344 
345 static __inline boolean_t
346 pte2_is_wired(pt2_entry_t pte2)
347 {
348 
349 	return (pte2 & PTE2_W);
350 }
351 
352 static __inline pt2_entry_t
353 pte2_load(pt2_entry_t *pte2p)
354 {
355 	pt2_entry_t pte2;
356 
357 	pte2 = *pte2p;
358 	return (pte2);
359 }
360 
361 static __inline pt2_entry_t
362 pte2_load_clear(pt2_entry_t *pte2p)
363 {
364 	pt2_entry_t opte2;
365 
366 	opte2 = atomic_readandclear_int(pte2p);
367 	pte2_sync(pte2p);
368 	return (opte2);
369 }
370 
371 static __inline void
372 pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit)
373 {
374 
375 	atomic_set_int(pte2p, bit);
376 	pte2_sync(pte2p);
377 }
378 
379 static __inline void
380 pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired)
381 {
382 
383 	/*
384 	 * Wired bit is transparent for page table walk,
385 	 * so pte2_sync() is not needed.
386 	 */
387 	if (wired)
388 		atomic_set_int(pte2p, PTE2_W);
389 	else
390 		atomic_clear_int(pte2p, PTE2_W);
391 }
392 
393 static __inline vm_paddr_t
394 pte2_pa(pt2_entry_t pte2)
395 {
396 
397 	return ((vm_paddr_t)(pte2 & PTE2_FRAME));
398 }
399 
400 static __inline u_int
401 pte2_attr(pt2_entry_t pte2)
402 {
403 
404 	return ((u_int)(pte2 & PTE2_ATTR_MASK));
405 }
406 
407 /*
408  *  Virtual interface for L2 page tables mapping management.
409  */
410 
411 static __inline u_int
412 pt2tab_index(vm_offset_t va)
413 {
414 
415 	return (va >> PT2TAB_SHIFT);
416 }
417 
418 static __inline pt2_entry_t *
419 pt2tab_entry(pt2_entry_t *pt2tab, vm_offset_t va)
420 {
421 
422 	return (pt2tab + pt2tab_index(va));
423 }
424 
425 static __inline void
426 pt2tab_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
427 {
428 
429 	pte2_store(pte2p,pte2);
430 }
431 
432 static __inline pt2_entry_t
433 pt2tab_load(pt2_entry_t *pte2p)
434 {
435 
436 	return (pte2_load(pte2p));
437 }
438 
439 static __inline pt2_entry_t
440 pt2tab_load_clear(pt2_entry_t *pte2p)
441 {
442 
443 	return (pte2_load_clear(pte2p));
444 }
445 
446 static __inline u_int
447 pt2map_index(vm_offset_t va)
448 {
449 
450 	return (va >> PT2MAP_SHIFT);
451 }
452 
453 static __inline pt2_entry_t *
454 pt2map_entry(vm_offset_t va)
455 {
456 
457 	return (PT2MAP + pt2map_index(va));
458 }
459 
460 /*
461  *  Virtual interface for pmap structure & kernel shortcuts.
462  */
463 
464 static __inline pt1_entry_t *
465 pmap_pte1(pmap_t pmap, vm_offset_t va)
466 {
467 
468 	return (pte1_ptr(pmap->pm_pt1, va));
469 }
470 
471 static __inline pt1_entry_t *
472 kern_pte1(vm_offset_t va)
473 {
474 
475 	return (pte1_ptr(kern_pt1, va));
476 }
477 
478 static __inline pt2_entry_t *
479 pmap_pt2tab_entry(pmap_t pmap, vm_offset_t va)
480 {
481 
482 	return (pt2tab_entry(pmap->pm_pt2tab, va));
483 }
484 
485 static __inline pt2_entry_t *
486 kern_pt2tab_entry(vm_offset_t va)
487 {
488 
489 	return (pt2tab_entry(kern_pt2tab, va));
490 }
491 
492 static __inline vm_page_t
493 pmap_pt2_page(pmap_t pmap, vm_offset_t va)
494 {
495 	pt2_entry_t pte2;
496 
497 	pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
498 	return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
499 }
500 
501 static __inline vm_page_t
502 kern_pt2_page(vm_offset_t va)
503 {
504 	pt2_entry_t pte2;
505 
506 	pte2 = pte2_load(kern_pt2tab_entry(va));
507 	return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
508 }
509 
510 #endif	/* _KERNEL */
511 #endif	/* !_MACHINE_PMAP_VAR_H_ */
512