xref: /linux/arch/x86/include/asm/pgtable.h (revision b6ebbac51bedf9e98e837688bc838f400196da5e)
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3 
4 #include <asm/page.h>
5 #include <asm/e820.h>
6 
7 #include <asm/pgtable_types.h>
8 
9 /*
10  * Macro to mark a page protection value as UC-
11  */
12 #define pgprot_noncached(prot)						\
13 	((boot_cpu_data.x86 > 3)					\
14 	 ? (__pgprot(pgprot_val(prot) |					\
15 		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16 	 : (prot))
17 
18 #ifndef __ASSEMBLY__
19 #include <asm/x86_init.h>
20 
21 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
22 void ptdump_walk_pgd_level_checkwx(void);
23 
24 #ifdef CONFIG_DEBUG_WX
25 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
26 #else
27 #define debug_checkwx() do { } while (0)
28 #endif
29 
30 /*
31  * ZERO_PAGE is a global shared page that is always zero: used
32  * for zero-mapped memory areas etc..
33  */
34 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
35 	__visible;
36 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
37 
38 extern spinlock_t pgd_lock;
39 extern struct list_head pgd_list;
40 
41 extern struct mm_struct *pgd_page_get_mm(struct page *page);
42 
43 #ifdef CONFIG_PARAVIRT
44 #include <asm/paravirt.h>
45 #else  /* !CONFIG_PARAVIRT */
46 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
47 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
48 #define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
49 
50 #define set_pte_atomic(ptep, pte)					\
51 	native_set_pte_atomic(ptep, pte)
52 
53 #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
54 
55 #ifndef __PAGETABLE_PUD_FOLDED
56 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
57 #define pgd_clear(pgd)			native_pgd_clear(pgd)
58 #endif
59 
60 #ifndef set_pud
61 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
62 #endif
63 
64 #ifndef __PAGETABLE_PMD_FOLDED
65 #define pud_clear(pud)			native_pud_clear(pud)
66 #endif
67 
68 #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
69 #define pmd_clear(pmd)			native_pmd_clear(pmd)
70 
71 #define pte_update(mm, addr, ptep)              do { } while (0)
72 
73 #define pgd_val(x)	native_pgd_val(x)
74 #define __pgd(x)	native_make_pgd(x)
75 
76 #ifndef __PAGETABLE_PUD_FOLDED
77 #define pud_val(x)	native_pud_val(x)
78 #define __pud(x)	native_make_pud(x)
79 #endif
80 
81 #ifndef __PAGETABLE_PMD_FOLDED
82 #define pmd_val(x)	native_pmd_val(x)
83 #define __pmd(x)	native_make_pmd(x)
84 #endif
85 
86 #define pte_val(x)	native_pte_val(x)
87 #define __pte(x)	native_make_pte(x)
88 
89 #define arch_end_context_switch(prev)	do {} while(0)
90 
91 #endif	/* CONFIG_PARAVIRT */
92 
93 /*
94  * The following only work if pte_present() is true.
95  * Undefined behaviour if not..
96  */
97 static inline int pte_dirty(pte_t pte)
98 {
99 	return pte_flags(pte) & _PAGE_DIRTY;
100 }
101 
102 
103 static inline u32 read_pkru(void)
104 {
105 	if (boot_cpu_has(X86_FEATURE_OSPKE))
106 		return __read_pkru();
107 	return 0;
108 }
109 
110 static inline void write_pkru(u32 pkru)
111 {
112 	if (boot_cpu_has(X86_FEATURE_OSPKE))
113 		__write_pkru(pkru);
114 }
115 
116 static inline int pte_young(pte_t pte)
117 {
118 	return pte_flags(pte) & _PAGE_ACCESSED;
119 }
120 
121 static inline int pmd_dirty(pmd_t pmd)
122 {
123 	return pmd_flags(pmd) & _PAGE_DIRTY;
124 }
125 
126 static inline int pmd_young(pmd_t pmd)
127 {
128 	return pmd_flags(pmd) & _PAGE_ACCESSED;
129 }
130 
131 static inline int pte_write(pte_t pte)
132 {
133 	return pte_flags(pte) & _PAGE_RW;
134 }
135 
136 static inline int pte_huge(pte_t pte)
137 {
138 	return pte_flags(pte) & _PAGE_PSE;
139 }
140 
141 static inline int pte_global(pte_t pte)
142 {
143 	return pte_flags(pte) & _PAGE_GLOBAL;
144 }
145 
146 static inline int pte_exec(pte_t pte)
147 {
148 	return !(pte_flags(pte) & _PAGE_NX);
149 }
150 
151 static inline int pte_special(pte_t pte)
152 {
153 	return pte_flags(pte) & _PAGE_SPECIAL;
154 }
155 
156 static inline unsigned long pte_pfn(pte_t pte)
157 {
158 	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
159 }
160 
161 static inline unsigned long pmd_pfn(pmd_t pmd)
162 {
163 	return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
164 }
165 
166 static inline unsigned long pud_pfn(pud_t pud)
167 {
168 	return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
169 }
170 
171 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
172 
173 static inline int pmd_large(pmd_t pte)
174 {
175 	return pmd_flags(pte) & _PAGE_PSE;
176 }
177 
178 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
179 static inline int pmd_trans_huge(pmd_t pmd)
180 {
181 	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
182 }
183 
184 #define has_transparent_hugepage has_transparent_hugepage
185 static inline int has_transparent_hugepage(void)
186 {
187 	return boot_cpu_has(X86_FEATURE_PSE);
188 }
189 
190 #ifdef __HAVE_ARCH_PTE_DEVMAP
191 static inline int pmd_devmap(pmd_t pmd)
192 {
193 	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
194 }
195 #endif
196 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
197 
198 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
199 {
200 	pteval_t v = native_pte_val(pte);
201 
202 	return native_make_pte(v | set);
203 }
204 
205 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
206 {
207 	pteval_t v = native_pte_val(pte);
208 
209 	return native_make_pte(v & ~clear);
210 }
211 
212 static inline pte_t pte_mkclean(pte_t pte)
213 {
214 	return pte_clear_flags(pte, _PAGE_DIRTY);
215 }
216 
217 static inline pte_t pte_mkold(pte_t pte)
218 {
219 	return pte_clear_flags(pte, _PAGE_ACCESSED);
220 }
221 
222 static inline pte_t pte_wrprotect(pte_t pte)
223 {
224 	return pte_clear_flags(pte, _PAGE_RW);
225 }
226 
227 static inline pte_t pte_mkexec(pte_t pte)
228 {
229 	return pte_clear_flags(pte, _PAGE_NX);
230 }
231 
232 static inline pte_t pte_mkdirty(pte_t pte)
233 {
234 	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
235 }
236 
237 static inline pte_t pte_mkyoung(pte_t pte)
238 {
239 	return pte_set_flags(pte, _PAGE_ACCESSED);
240 }
241 
242 static inline pte_t pte_mkwrite(pte_t pte)
243 {
244 	return pte_set_flags(pte, _PAGE_RW);
245 }
246 
247 static inline pte_t pte_mkhuge(pte_t pte)
248 {
249 	return pte_set_flags(pte, _PAGE_PSE);
250 }
251 
252 static inline pte_t pte_clrhuge(pte_t pte)
253 {
254 	return pte_clear_flags(pte, _PAGE_PSE);
255 }
256 
257 static inline pte_t pte_mkglobal(pte_t pte)
258 {
259 	return pte_set_flags(pte, _PAGE_GLOBAL);
260 }
261 
262 static inline pte_t pte_clrglobal(pte_t pte)
263 {
264 	return pte_clear_flags(pte, _PAGE_GLOBAL);
265 }
266 
267 static inline pte_t pte_mkspecial(pte_t pte)
268 {
269 	return pte_set_flags(pte, _PAGE_SPECIAL);
270 }
271 
272 static inline pte_t pte_mkdevmap(pte_t pte)
273 {
274 	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
275 }
276 
277 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
278 {
279 	pmdval_t v = native_pmd_val(pmd);
280 
281 	return __pmd(v | set);
282 }
283 
284 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
285 {
286 	pmdval_t v = native_pmd_val(pmd);
287 
288 	return __pmd(v & ~clear);
289 }
290 
291 static inline pmd_t pmd_mkold(pmd_t pmd)
292 {
293 	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
294 }
295 
296 static inline pmd_t pmd_mkclean(pmd_t pmd)
297 {
298 	return pmd_clear_flags(pmd, _PAGE_DIRTY);
299 }
300 
301 static inline pmd_t pmd_wrprotect(pmd_t pmd)
302 {
303 	return pmd_clear_flags(pmd, _PAGE_RW);
304 }
305 
306 static inline pmd_t pmd_mkdirty(pmd_t pmd)
307 {
308 	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
309 }
310 
311 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
312 {
313 	return pmd_set_flags(pmd, _PAGE_DEVMAP);
314 }
315 
316 static inline pmd_t pmd_mkhuge(pmd_t pmd)
317 {
318 	return pmd_set_flags(pmd, _PAGE_PSE);
319 }
320 
321 static inline pmd_t pmd_mkyoung(pmd_t pmd)
322 {
323 	return pmd_set_flags(pmd, _PAGE_ACCESSED);
324 }
325 
326 static inline pmd_t pmd_mkwrite(pmd_t pmd)
327 {
328 	return pmd_set_flags(pmd, _PAGE_RW);
329 }
330 
331 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
332 {
333 	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
334 }
335 
336 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
337 static inline int pte_soft_dirty(pte_t pte)
338 {
339 	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
340 }
341 
342 static inline int pmd_soft_dirty(pmd_t pmd)
343 {
344 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
345 }
346 
347 static inline pte_t pte_mksoft_dirty(pte_t pte)
348 {
349 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
350 }
351 
352 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
353 {
354 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
355 }
356 
357 static inline pte_t pte_clear_soft_dirty(pte_t pte)
358 {
359 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
360 }
361 
362 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
363 {
364 	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
365 }
366 
367 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
368 
369 /*
370  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
371  * can use those bits for other purposes, so leave them be.
372  */
373 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
374 {
375 	pgprotval_t protval = pgprot_val(pgprot);
376 
377 	if (protval & _PAGE_PRESENT)
378 		protval &= __supported_pte_mask;
379 
380 	return protval;
381 }
382 
383 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
384 {
385 	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
386 		     massage_pgprot(pgprot));
387 }
388 
389 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
390 {
391 	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
392 		     massage_pgprot(pgprot));
393 }
394 
395 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
396 {
397 	pteval_t val = pte_val(pte);
398 
399 	/*
400 	 * Chop off the NX bit (if present), and add the NX portion of
401 	 * the newprot (if present):
402 	 */
403 	val &= _PAGE_CHG_MASK;
404 	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
405 
406 	return __pte(val);
407 }
408 
409 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
410 {
411 	pmdval_t val = pmd_val(pmd);
412 
413 	val &= _HPAGE_CHG_MASK;
414 	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
415 
416 	return __pmd(val);
417 }
418 
419 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
420 #define pgprot_modify pgprot_modify
421 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
422 {
423 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
424 	pgprotval_t addbits = pgprot_val(newprot);
425 	return __pgprot(preservebits | addbits);
426 }
427 
428 #define pte_pgprot(x) __pgprot(pte_flags(x))
429 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
430 #define pud_pgprot(x) __pgprot(pud_flags(x))
431 
432 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
433 
434 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
435 					 enum page_cache_mode pcm,
436 					 enum page_cache_mode new_pcm)
437 {
438 	/*
439 	 * PAT type is always WB for untracked ranges, so no need to check.
440 	 */
441 	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
442 		return 1;
443 
444 	/*
445 	 * Certain new memtypes are not allowed with certain
446 	 * requested memtype:
447 	 * - request is uncached, return cannot be write-back
448 	 * - request is write-combine, return cannot be write-back
449 	 * - request is write-through, return cannot be write-back
450 	 * - request is write-through, return cannot be write-combine
451 	 */
452 	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
453 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
454 	    (pcm == _PAGE_CACHE_MODE_WC &&
455 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
456 	    (pcm == _PAGE_CACHE_MODE_WT &&
457 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
458 	    (pcm == _PAGE_CACHE_MODE_WT &&
459 	     new_pcm == _PAGE_CACHE_MODE_WC)) {
460 		return 0;
461 	}
462 
463 	return 1;
464 }
465 
466 pmd_t *populate_extra_pmd(unsigned long vaddr);
467 pte_t *populate_extra_pte(unsigned long vaddr);
468 #endif	/* __ASSEMBLY__ */
469 
470 #ifdef CONFIG_X86_32
471 # include <asm/pgtable_32.h>
472 #else
473 # include <asm/pgtable_64.h>
474 #endif
475 
476 #ifndef __ASSEMBLY__
477 #include <linux/mm_types.h>
478 #include <linux/mmdebug.h>
479 #include <linux/log2.h>
480 
481 static inline int pte_none(pte_t pte)
482 {
483 	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
484 }
485 
486 #define __HAVE_ARCH_PTE_SAME
487 static inline int pte_same(pte_t a, pte_t b)
488 {
489 	return a.pte == b.pte;
490 }
491 
492 static inline int pte_present(pte_t a)
493 {
494 	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
495 }
496 
497 #ifdef __HAVE_ARCH_PTE_DEVMAP
498 static inline int pte_devmap(pte_t a)
499 {
500 	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
501 }
502 #endif
503 
504 #define pte_accessible pte_accessible
505 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
506 {
507 	if (pte_flags(a) & _PAGE_PRESENT)
508 		return true;
509 
510 	if ((pte_flags(a) & _PAGE_PROTNONE) &&
511 			mm_tlb_flush_pending(mm))
512 		return true;
513 
514 	return false;
515 }
516 
517 static inline int pte_hidden(pte_t pte)
518 {
519 	return pte_flags(pte) & _PAGE_HIDDEN;
520 }
521 
522 static inline int pmd_present(pmd_t pmd)
523 {
524 	/*
525 	 * Checking for _PAGE_PSE is needed too because
526 	 * split_huge_page will temporarily clear the present bit (but
527 	 * the _PAGE_PSE flag will remain set at all times while the
528 	 * _PAGE_PRESENT bit is clear).
529 	 */
530 	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
531 }
532 
533 #ifdef CONFIG_NUMA_BALANCING
534 /*
535  * These work without NUMA balancing but the kernel does not care. See the
536  * comment in include/asm-generic/pgtable.h
537  */
538 static inline int pte_protnone(pte_t pte)
539 {
540 	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
541 		== _PAGE_PROTNONE;
542 }
543 
544 static inline int pmd_protnone(pmd_t pmd)
545 {
546 	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
547 		== _PAGE_PROTNONE;
548 }
549 #endif /* CONFIG_NUMA_BALANCING */
550 
551 static inline int pmd_none(pmd_t pmd)
552 {
553 	/* Only check low word on 32-bit platforms, since it might be
554 	   out of sync with upper half. */
555 	unsigned long val = native_pmd_val(pmd);
556 	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
557 }
558 
559 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
560 {
561 	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
562 }
563 
564 /*
565  * Currently stuck as a macro due to indirect forward reference to
566  * linux/mmzone.h's __section_mem_map_addr() definition:
567  */
568 #define pmd_page(pmd)		\
569 	pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
570 
571 /*
572  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
573  *
574  * this macro returns the index of the entry in the pmd page which would
575  * control the given virtual address
576  */
577 static inline unsigned long pmd_index(unsigned long address)
578 {
579 	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
580 }
581 
582 /*
583  * Conversion functions: convert a page and protection to a page entry,
584  * and a page entry and page directory to the page they refer to.
585  *
586  * (Currently stuck as a macro because of indirect forward reference
587  * to linux/mm.h:page_to_nid())
588  */
589 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
590 
591 /*
592  * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
593  *
594  * this function returns the index of the entry in the pte page which would
595  * control the given virtual address
596  */
597 static inline unsigned long pte_index(unsigned long address)
598 {
599 	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
600 }
601 
602 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
603 {
604 	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
605 }
606 
607 static inline int pmd_bad(pmd_t pmd)
608 {
609 	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
610 }
611 
612 static inline unsigned long pages_to_mb(unsigned long npg)
613 {
614 	return npg >> (20 - PAGE_SHIFT);
615 }
616 
617 #if CONFIG_PGTABLE_LEVELS > 2
618 static inline int pud_none(pud_t pud)
619 {
620 	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
621 }
622 
623 static inline int pud_present(pud_t pud)
624 {
625 	return pud_flags(pud) & _PAGE_PRESENT;
626 }
627 
628 static inline unsigned long pud_page_vaddr(pud_t pud)
629 {
630 	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
631 }
632 
633 /*
634  * Currently stuck as a macro due to indirect forward reference to
635  * linux/mmzone.h's __section_mem_map_addr() definition:
636  */
637 #define pud_page(pud)		\
638 	pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
639 
640 /* Find an entry in the second-level page table.. */
641 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
642 {
643 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
644 }
645 
646 static inline int pud_large(pud_t pud)
647 {
648 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
649 		(_PAGE_PSE | _PAGE_PRESENT);
650 }
651 
652 static inline int pud_bad(pud_t pud)
653 {
654 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
655 }
656 #else
657 static inline int pud_large(pud_t pud)
658 {
659 	return 0;
660 }
661 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
662 
663 #if CONFIG_PGTABLE_LEVELS > 3
664 static inline int pgd_present(pgd_t pgd)
665 {
666 	return pgd_flags(pgd) & _PAGE_PRESENT;
667 }
668 
669 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
670 {
671 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
672 }
673 
674 /*
675  * Currently stuck as a macro due to indirect forward reference to
676  * linux/mmzone.h's __section_mem_map_addr() definition:
677  */
678 #define pgd_page(pgd)		pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
679 
680 /* to find an entry in a page-table-directory. */
681 static inline unsigned long pud_index(unsigned long address)
682 {
683 	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
684 }
685 
686 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
687 {
688 	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
689 }
690 
691 static inline int pgd_bad(pgd_t pgd)
692 {
693 	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
694 }
695 
696 static inline int pgd_none(pgd_t pgd)
697 {
698 	/*
699 	 * There is no need to do a workaround for the KNL stray
700 	 * A/D bit erratum here.  PGDs only point to page tables
701 	 * except on 32-bit non-PAE which is not supported on
702 	 * KNL.
703 	 */
704 	return !native_pgd_val(pgd);
705 }
706 #endif	/* CONFIG_PGTABLE_LEVELS > 3 */
707 
708 #endif	/* __ASSEMBLY__ */
709 
710 /*
711  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
712  *
713  * this macro returns the index of the entry in the pgd page which would
714  * control the given virtual address
715  */
716 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
717 
718 /*
719  * pgd_offset() returns a (pgd_t *)
720  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
721  */
722 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
723 /*
724  * a shortcut which implies the use of the kernel's pgd, instead
725  * of a process's
726  */
727 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
728 
729 
730 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
731 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
732 
733 #ifndef __ASSEMBLY__
734 
735 extern int direct_gbpages;
736 void init_mem_mapping(void);
737 void early_alloc_pgt_buf(void);
738 
739 #ifdef CONFIG_X86_64
740 /* Realmode trampoline initialization. */
741 extern pgd_t trampoline_pgd_entry;
742 static inline void __meminit init_trampoline_default(void)
743 {
744 	/* Default trampoline pgd value */
745 	trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
746 }
747 # ifdef CONFIG_RANDOMIZE_MEMORY
748 void __meminit init_trampoline(void);
749 # else
750 #  define init_trampoline init_trampoline_default
751 # endif
752 #else
753 static inline void init_trampoline(void) { }
754 #endif
755 
756 /* local pte updates need not use xchg for locking */
757 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
758 {
759 	pte_t res = *ptep;
760 
761 	/* Pure native function needs no input for mm, addr */
762 	native_pte_clear(NULL, 0, ptep);
763 	return res;
764 }
765 
766 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
767 {
768 	pmd_t res = *pmdp;
769 
770 	native_pmd_clear(pmdp);
771 	return res;
772 }
773 
774 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
775 				     pte_t *ptep , pte_t pte)
776 {
777 	native_set_pte(ptep, pte);
778 }
779 
780 static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
781 				     pmd_t *pmdp , pmd_t pmd)
782 {
783 	native_set_pmd(pmdp, pmd);
784 }
785 
786 #ifndef CONFIG_PARAVIRT
787 /*
788  * Rules for using pte_update - it must be called after any PTE update which
789  * has not been done using the set_pte / clear_pte interfaces.  It is used by
790  * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
791  * updates should either be sets, clears, or set_pte_atomic for P->P
792  * transitions, which means this hook should only be called for user PTEs.
793  * This hook implies a P->P protection or access change has taken place, which
794  * requires a subsequent TLB flush.
795  */
796 #define pte_update(mm, addr, ptep)		do { } while (0)
797 #endif
798 
799 /*
800  * We only update the dirty/accessed state if we set
801  * the dirty bit by hand in the kernel, since the hardware
802  * will do the accessed bit for us, and we don't want to
803  * race with other CPU's that might be updating the dirty
804  * bit at the same time.
805  */
806 struct vm_area_struct;
807 
808 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
809 extern int ptep_set_access_flags(struct vm_area_struct *vma,
810 				 unsigned long address, pte_t *ptep,
811 				 pte_t entry, int dirty);
812 
813 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
814 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
815 				     unsigned long addr, pte_t *ptep);
816 
817 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
818 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
819 				  unsigned long address, pte_t *ptep);
820 
821 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
822 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
823 				       pte_t *ptep)
824 {
825 	pte_t pte = native_ptep_get_and_clear(ptep);
826 	pte_update(mm, addr, ptep);
827 	return pte;
828 }
829 
830 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
831 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
832 					    unsigned long addr, pte_t *ptep,
833 					    int full)
834 {
835 	pte_t pte;
836 	if (full) {
837 		/*
838 		 * Full address destruction in progress; paravirt does not
839 		 * care about updates and native needs no locking
840 		 */
841 		pte = native_local_ptep_get_and_clear(ptep);
842 	} else {
843 		pte = ptep_get_and_clear(mm, addr, ptep);
844 	}
845 	return pte;
846 }
847 
848 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
849 static inline void ptep_set_wrprotect(struct mm_struct *mm,
850 				      unsigned long addr, pte_t *ptep)
851 {
852 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
853 	pte_update(mm, addr, ptep);
854 }
855 
856 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
857 
858 #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
859 
860 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
861 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
862 				 unsigned long address, pmd_t *pmdp,
863 				 pmd_t entry, int dirty);
864 
865 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
866 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
867 				     unsigned long addr, pmd_t *pmdp);
868 
869 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
870 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
871 				  unsigned long address, pmd_t *pmdp);
872 
873 
874 #define __HAVE_ARCH_PMD_WRITE
875 static inline int pmd_write(pmd_t pmd)
876 {
877 	return pmd_flags(pmd) & _PAGE_RW;
878 }
879 
880 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
881 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
882 				       pmd_t *pmdp)
883 {
884 	return native_pmdp_get_and_clear(pmdp);
885 }
886 
887 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
888 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
889 				      unsigned long addr, pmd_t *pmdp)
890 {
891 	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
892 }
893 
894 /*
895  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
896  *
897  *  dst - pointer to pgd range anwhere on a pgd page
898  *  src - ""
899  *  count - the number of pgds to copy.
900  *
901  * dst and src can be on the same page, but the range must not overlap,
902  * and must not cross a page boundary.
903  */
904 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
905 {
906        memcpy(dst, src, count * sizeof(pgd_t));
907 }
908 
909 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
910 static inline int page_level_shift(enum pg_level level)
911 {
912 	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
913 }
914 static inline unsigned long page_level_size(enum pg_level level)
915 {
916 	return 1UL << page_level_shift(level);
917 }
918 static inline unsigned long page_level_mask(enum pg_level level)
919 {
920 	return ~(page_level_size(level) - 1);
921 }
922 
923 /*
924  * The x86 doesn't have any external MMU info: the kernel page
925  * tables contain all the necessary information.
926  */
927 static inline void update_mmu_cache(struct vm_area_struct *vma,
928 		unsigned long addr, pte_t *ptep)
929 {
930 }
931 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
932 		unsigned long addr, pmd_t *pmd)
933 {
934 }
935 
936 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
937 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
938 {
939 	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
940 }
941 
942 static inline int pte_swp_soft_dirty(pte_t pte)
943 {
944 	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
945 }
946 
947 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
948 {
949 	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
950 }
951 #endif
952 
953 #define PKRU_AD_BIT 0x1
954 #define PKRU_WD_BIT 0x2
955 #define PKRU_BITS_PER_PKEY 2
956 
957 static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
958 {
959 	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
960 	return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
961 }
962 
963 static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
964 {
965 	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
966 	/*
967 	 * Access-disable disables writes too so we need to check
968 	 * both bits here.
969 	 */
970 	return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
971 }
972 
973 static inline u16 pte_flags_pkey(unsigned long pte_flags)
974 {
975 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
976 	/* ifdef to avoid doing 59-bit shift on 32-bit values */
977 	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
978 #else
979 	return 0;
980 #endif
981 }
982 
983 #include <asm-generic/pgtable.h>
984 #endif	/* __ASSEMBLY__ */
985 
986 #endif /* _ASM_X86_PGTABLE_H */
987