xref: /linux/arch/x86/include/asm/pgtable.h (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_H
3 #define _ASM_X86_PGTABLE_H
4 
5 #include <linux/mem_encrypt.h>
6 #include <asm/page.h>
7 #include <asm/pgtable_types.h>
8 
9 /*
10  * Macro to mark a page protection value as UC-
11  */
12 #define pgprot_noncached(prot)						\
13 	((boot_cpu_data.x86 > 3)					\
14 	 ? (__pgprot(pgprot_val(prot) |					\
15 		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16 	 : (prot))
17 
18 #ifndef __ASSEMBLY__
19 #include <linux/spinlock.h>
20 #include <asm/x86_init.h>
21 #include <asm/pkru.h>
22 #include <asm/fpu/api.h>
23 #include <asm/coco.h>
24 #include <asm-generic/pgtable_uffd.h>
25 #include <linux/page_table_check.h>
26 
27 extern pgd_t early_top_pgt[PTRS_PER_PGD];
28 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29 
30 void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
31 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
32 				   bool user);
33 void ptdump_walk_pgd_level_checkwx(void);
34 void ptdump_walk_user_pgd_level_checkwx(void);
35 
36 /*
37  * Macros to add or remove encryption attribute
38  */
39 #define pgprot_encrypted(prot)	__pgprot(cc_mkenc(pgprot_val(prot)))
40 #define pgprot_decrypted(prot)	__pgprot(cc_mkdec(pgprot_val(prot)))
41 
42 #ifdef CONFIG_DEBUG_WX
43 #define debug_checkwx()		ptdump_walk_pgd_level_checkwx()
44 #define debug_checkwx_user()	ptdump_walk_user_pgd_level_checkwx()
45 #else
46 #define debug_checkwx()		do { } while (0)
47 #define debug_checkwx_user()	do { } while (0)
48 #endif
49 
50 /*
51  * ZERO_PAGE is a global shared page that is always zero: used
52  * for zero-mapped memory areas etc..
53  */
54 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
55 	__visible;
56 #define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
57 
58 extern spinlock_t pgd_lock;
59 extern struct list_head pgd_list;
60 
61 extern struct mm_struct *pgd_page_get_mm(struct page *page);
62 
63 extern pmdval_t early_pmd_flags;
64 
65 #ifdef CONFIG_PARAVIRT_XXL
66 #include <asm/paravirt.h>
67 #else  /* !CONFIG_PARAVIRT_XXL */
68 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
69 
70 #define set_pte_atomic(ptep, pte)					\
71 	native_set_pte_atomic(ptep, pte)
72 
73 #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
74 
75 #ifndef __PAGETABLE_P4D_FOLDED
76 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
77 #define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
78 #endif
79 
80 #ifndef set_p4d
81 # define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
82 #endif
83 
84 #ifndef __PAGETABLE_PUD_FOLDED
85 #define p4d_clear(p4d)			native_p4d_clear(p4d)
86 #endif
87 
88 #ifndef set_pud
89 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
90 #endif
91 
92 #ifndef __PAGETABLE_PUD_FOLDED
93 #define pud_clear(pud)			native_pud_clear(pud)
94 #endif
95 
96 #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
97 #define pmd_clear(pmd)			native_pmd_clear(pmd)
98 
99 #define pgd_val(x)	native_pgd_val(x)
100 #define __pgd(x)	native_make_pgd(x)
101 
102 #ifndef __PAGETABLE_P4D_FOLDED
103 #define p4d_val(x)	native_p4d_val(x)
104 #define __p4d(x)	native_make_p4d(x)
105 #endif
106 
107 #ifndef __PAGETABLE_PUD_FOLDED
108 #define pud_val(x)	native_pud_val(x)
109 #define __pud(x)	native_make_pud(x)
110 #endif
111 
112 #ifndef __PAGETABLE_PMD_FOLDED
113 #define pmd_val(x)	native_pmd_val(x)
114 #define __pmd(x)	native_make_pmd(x)
115 #endif
116 
117 #define pte_val(x)	native_pte_val(x)
118 #define __pte(x)	native_make_pte(x)
119 
120 #define arch_end_context_switch(prev)	do {} while(0)
121 #endif	/* CONFIG_PARAVIRT_XXL */
122 
123 /*
124  * The following only work if pte_present() is true.
125  * Undefined behaviour if not..
126  */
127 static inline int pte_dirty(pte_t pte)
128 {
129 	return pte_flags(pte) & _PAGE_DIRTY;
130 }
131 
132 static inline int pte_young(pte_t pte)
133 {
134 	return pte_flags(pte) & _PAGE_ACCESSED;
135 }
136 
137 static inline int pmd_dirty(pmd_t pmd)
138 {
139 	return pmd_flags(pmd) & _PAGE_DIRTY;
140 }
141 
142 #define pmd_young pmd_young
143 static inline int pmd_young(pmd_t pmd)
144 {
145 	return pmd_flags(pmd) & _PAGE_ACCESSED;
146 }
147 
148 static inline int pud_dirty(pud_t pud)
149 {
150 	return pud_flags(pud) & _PAGE_DIRTY;
151 }
152 
153 static inline int pud_young(pud_t pud)
154 {
155 	return pud_flags(pud) & _PAGE_ACCESSED;
156 }
157 
158 static inline int pte_write(pte_t pte)
159 {
160 	return pte_flags(pte) & _PAGE_RW;
161 }
162 
163 static inline int pte_huge(pte_t pte)
164 {
165 	return pte_flags(pte) & _PAGE_PSE;
166 }
167 
168 static inline int pte_global(pte_t pte)
169 {
170 	return pte_flags(pte) & _PAGE_GLOBAL;
171 }
172 
173 static inline int pte_exec(pte_t pte)
174 {
175 	return !(pte_flags(pte) & _PAGE_NX);
176 }
177 
178 static inline int pte_special(pte_t pte)
179 {
180 	return pte_flags(pte) & _PAGE_SPECIAL;
181 }
182 
183 /* Entries that were set to PROT_NONE are inverted */
184 
185 static inline u64 protnone_mask(u64 val);
186 
187 static inline unsigned long pte_pfn(pte_t pte)
188 {
189 	phys_addr_t pfn = pte_val(pte);
190 	pfn ^= protnone_mask(pfn);
191 	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
192 }
193 
194 static inline unsigned long pmd_pfn(pmd_t pmd)
195 {
196 	phys_addr_t pfn = pmd_val(pmd);
197 	pfn ^= protnone_mask(pfn);
198 	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
199 }
200 
201 static inline unsigned long pud_pfn(pud_t pud)
202 {
203 	phys_addr_t pfn = pud_val(pud);
204 	pfn ^= protnone_mask(pfn);
205 	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
206 }
207 
208 static inline unsigned long p4d_pfn(p4d_t p4d)
209 {
210 	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
211 }
212 
213 static inline unsigned long pgd_pfn(pgd_t pgd)
214 {
215 	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
216 }
217 
218 #define p4d_leaf	p4d_large
219 static inline int p4d_large(p4d_t p4d)
220 {
221 	/* No 512 GiB pages yet */
222 	return 0;
223 }
224 
225 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
226 
227 #define pmd_leaf	pmd_large
228 static inline int pmd_large(pmd_t pte)
229 {
230 	return pmd_flags(pte) & _PAGE_PSE;
231 }
232 
233 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
234 /* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
235 static inline int pmd_trans_huge(pmd_t pmd)
236 {
237 	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
238 }
239 
240 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
241 static inline int pud_trans_huge(pud_t pud)
242 {
243 	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
244 }
245 #endif
246 
247 #define has_transparent_hugepage has_transparent_hugepage
248 static inline int has_transparent_hugepage(void)
249 {
250 	return boot_cpu_has(X86_FEATURE_PSE);
251 }
252 
253 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
254 static inline int pmd_devmap(pmd_t pmd)
255 {
256 	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
257 }
258 
259 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
260 static inline int pud_devmap(pud_t pud)
261 {
262 	return !!(pud_val(pud) & _PAGE_DEVMAP);
263 }
264 #else
265 static inline int pud_devmap(pud_t pud)
266 {
267 	return 0;
268 }
269 #endif
270 
271 static inline int pgd_devmap(pgd_t pgd)
272 {
273 	return 0;
274 }
275 #endif
276 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
277 
278 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
279 {
280 	pteval_t v = native_pte_val(pte);
281 
282 	return native_make_pte(v | set);
283 }
284 
285 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
286 {
287 	pteval_t v = native_pte_val(pte);
288 
289 	return native_make_pte(v & ~clear);
290 }
291 
292 static inline pte_t pte_wrprotect(pte_t pte)
293 {
294 	return pte_clear_flags(pte, _PAGE_RW);
295 }
296 
297 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
298 static inline int pte_uffd_wp(pte_t pte)
299 {
300 	bool wp = pte_flags(pte) & _PAGE_UFFD_WP;
301 
302 #ifdef CONFIG_DEBUG_VM
303 	/*
304 	 * Having write bit for wr-protect-marked present ptes is fatal,
305 	 * because it means the uffd-wp bit will be ignored and write will
306 	 * just go through.
307 	 *
308 	 * Use any chance of pgtable walking to verify this (e.g., when
309 	 * page swapped out or being migrated for all purposes). It means
310 	 * something is already wrong.  Tell the admin even before the
311 	 * process crashes. We also nail it with wrong pgtable setup.
312 	 */
313 	WARN_ON_ONCE(wp && pte_write(pte));
314 #endif
315 
316 	return wp;
317 }
318 
319 static inline pte_t pte_mkuffd_wp(pte_t pte)
320 {
321 	return pte_wrprotect(pte_set_flags(pte, _PAGE_UFFD_WP));
322 }
323 
324 static inline pte_t pte_clear_uffd_wp(pte_t pte)
325 {
326 	return pte_clear_flags(pte, _PAGE_UFFD_WP);
327 }
328 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
329 
330 static inline pte_t pte_mkclean(pte_t pte)
331 {
332 	return pte_clear_flags(pte, _PAGE_DIRTY);
333 }
334 
335 static inline pte_t pte_mkold(pte_t pte)
336 {
337 	return pte_clear_flags(pte, _PAGE_ACCESSED);
338 }
339 
340 static inline pte_t pte_mkexec(pte_t pte)
341 {
342 	return pte_clear_flags(pte, _PAGE_NX);
343 }
344 
345 static inline pte_t pte_mkdirty(pte_t pte)
346 {
347 	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
348 }
349 
350 static inline pte_t pte_mkyoung(pte_t pte)
351 {
352 	return pte_set_flags(pte, _PAGE_ACCESSED);
353 }
354 
355 static inline pte_t pte_mkwrite(pte_t pte)
356 {
357 	return pte_set_flags(pte, _PAGE_RW);
358 }
359 
360 static inline pte_t pte_mkhuge(pte_t pte)
361 {
362 	return pte_set_flags(pte, _PAGE_PSE);
363 }
364 
365 static inline pte_t pte_clrhuge(pte_t pte)
366 {
367 	return pte_clear_flags(pte, _PAGE_PSE);
368 }
369 
370 static inline pte_t pte_mkglobal(pte_t pte)
371 {
372 	return pte_set_flags(pte, _PAGE_GLOBAL);
373 }
374 
375 static inline pte_t pte_clrglobal(pte_t pte)
376 {
377 	return pte_clear_flags(pte, _PAGE_GLOBAL);
378 }
379 
380 static inline pte_t pte_mkspecial(pte_t pte)
381 {
382 	return pte_set_flags(pte, _PAGE_SPECIAL);
383 }
384 
385 static inline pte_t pte_mkdevmap(pte_t pte)
386 {
387 	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
388 }
389 
390 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
391 {
392 	pmdval_t v = native_pmd_val(pmd);
393 
394 	return native_make_pmd(v | set);
395 }
396 
397 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
398 {
399 	pmdval_t v = native_pmd_val(pmd);
400 
401 	return native_make_pmd(v & ~clear);
402 }
403 
404 static inline pmd_t pmd_wrprotect(pmd_t pmd)
405 {
406 	return pmd_clear_flags(pmd, _PAGE_RW);
407 }
408 
409 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
410 static inline int pmd_uffd_wp(pmd_t pmd)
411 {
412 	return pmd_flags(pmd) & _PAGE_UFFD_WP;
413 }
414 
415 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
416 {
417 	return pmd_wrprotect(pmd_set_flags(pmd, _PAGE_UFFD_WP));
418 }
419 
420 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
421 {
422 	return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
423 }
424 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
425 
426 static inline pmd_t pmd_mkold(pmd_t pmd)
427 {
428 	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
429 }
430 
431 static inline pmd_t pmd_mkclean(pmd_t pmd)
432 {
433 	return pmd_clear_flags(pmd, _PAGE_DIRTY);
434 }
435 
436 static inline pmd_t pmd_mkdirty(pmd_t pmd)
437 {
438 	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
439 }
440 
441 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
442 {
443 	return pmd_set_flags(pmd, _PAGE_DEVMAP);
444 }
445 
446 static inline pmd_t pmd_mkhuge(pmd_t pmd)
447 {
448 	return pmd_set_flags(pmd, _PAGE_PSE);
449 }
450 
451 static inline pmd_t pmd_mkyoung(pmd_t pmd)
452 {
453 	return pmd_set_flags(pmd, _PAGE_ACCESSED);
454 }
455 
456 static inline pmd_t pmd_mkwrite(pmd_t pmd)
457 {
458 	return pmd_set_flags(pmd, _PAGE_RW);
459 }
460 
461 static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
462 {
463 	pudval_t v = native_pud_val(pud);
464 
465 	return native_make_pud(v | set);
466 }
467 
468 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
469 {
470 	pudval_t v = native_pud_val(pud);
471 
472 	return native_make_pud(v & ~clear);
473 }
474 
475 static inline pud_t pud_mkold(pud_t pud)
476 {
477 	return pud_clear_flags(pud, _PAGE_ACCESSED);
478 }
479 
480 static inline pud_t pud_mkclean(pud_t pud)
481 {
482 	return pud_clear_flags(pud, _PAGE_DIRTY);
483 }
484 
485 static inline pud_t pud_wrprotect(pud_t pud)
486 {
487 	return pud_clear_flags(pud, _PAGE_RW);
488 }
489 
490 static inline pud_t pud_mkdirty(pud_t pud)
491 {
492 	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
493 }
494 
495 static inline pud_t pud_mkdevmap(pud_t pud)
496 {
497 	return pud_set_flags(pud, _PAGE_DEVMAP);
498 }
499 
500 static inline pud_t pud_mkhuge(pud_t pud)
501 {
502 	return pud_set_flags(pud, _PAGE_PSE);
503 }
504 
505 static inline pud_t pud_mkyoung(pud_t pud)
506 {
507 	return pud_set_flags(pud, _PAGE_ACCESSED);
508 }
509 
510 static inline pud_t pud_mkwrite(pud_t pud)
511 {
512 	return pud_set_flags(pud, _PAGE_RW);
513 }
514 
515 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
516 static inline int pte_soft_dirty(pte_t pte)
517 {
518 	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
519 }
520 
521 static inline int pmd_soft_dirty(pmd_t pmd)
522 {
523 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
524 }
525 
526 static inline int pud_soft_dirty(pud_t pud)
527 {
528 	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
529 }
530 
531 static inline pte_t pte_mksoft_dirty(pte_t pte)
532 {
533 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
534 }
535 
536 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
537 {
538 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
539 }
540 
541 static inline pud_t pud_mksoft_dirty(pud_t pud)
542 {
543 	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
544 }
545 
546 static inline pte_t pte_clear_soft_dirty(pte_t pte)
547 {
548 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
549 }
550 
551 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
552 {
553 	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
554 }
555 
556 static inline pud_t pud_clear_soft_dirty(pud_t pud)
557 {
558 	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
559 }
560 
561 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
562 
563 /*
564  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
565  * can use those bits for other purposes, so leave them be.
566  */
567 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
568 {
569 	pgprotval_t protval = pgprot_val(pgprot);
570 
571 	if (protval & _PAGE_PRESENT)
572 		protval &= __supported_pte_mask;
573 
574 	return protval;
575 }
576 
577 static inline pgprotval_t check_pgprot(pgprot_t pgprot)
578 {
579 	pgprotval_t massaged_val = massage_pgprot(pgprot);
580 
581 	/* mmdebug.h can not be included here because of dependencies */
582 #ifdef CONFIG_DEBUG_VM
583 	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
584 		  "attempted to set unsupported pgprot: %016llx "
585 		  "bits: %016llx supported: %016llx\n",
586 		  (u64)pgprot_val(pgprot),
587 		  (u64)pgprot_val(pgprot) ^ massaged_val,
588 		  (u64)__supported_pte_mask);
589 #endif
590 
591 	return massaged_val;
592 }
593 
594 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
595 {
596 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
597 	pfn ^= protnone_mask(pgprot_val(pgprot));
598 	pfn &= PTE_PFN_MASK;
599 	return __pte(pfn | check_pgprot(pgprot));
600 }
601 
602 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
603 {
604 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
605 	pfn ^= protnone_mask(pgprot_val(pgprot));
606 	pfn &= PHYSICAL_PMD_PAGE_MASK;
607 	return __pmd(pfn | check_pgprot(pgprot));
608 }
609 
610 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
611 {
612 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
613 	pfn ^= protnone_mask(pgprot_val(pgprot));
614 	pfn &= PHYSICAL_PUD_PAGE_MASK;
615 	return __pud(pfn | check_pgprot(pgprot));
616 }
617 
618 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
619 {
620 	return pfn_pmd(pmd_pfn(pmd),
621 		      __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
622 }
623 
624 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
625 
626 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
627 {
628 	pteval_t val = pte_val(pte), oldval = val;
629 
630 	/*
631 	 * Chop off the NX bit (if present), and add the NX portion of
632 	 * the newprot (if present):
633 	 */
634 	val &= _PAGE_CHG_MASK;
635 	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
636 	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
637 	return __pte(val);
638 }
639 
640 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
641 {
642 	pmdval_t val = pmd_val(pmd), oldval = val;
643 
644 	val &= _HPAGE_CHG_MASK;
645 	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
646 	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
647 	return __pmd(val);
648 }
649 
650 /*
651  * mprotect needs to preserve PAT and encryption bits when updating
652  * vm_page_prot
653  */
654 #define pgprot_modify pgprot_modify
655 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
656 {
657 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
658 	pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
659 	return __pgprot(preservebits | addbits);
660 }
661 
662 #define pte_pgprot(x) __pgprot(pte_flags(x))
663 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
664 #define pud_pgprot(x) __pgprot(pud_flags(x))
665 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
666 
667 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
668 
669 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
670 					 enum page_cache_mode pcm,
671 					 enum page_cache_mode new_pcm)
672 {
673 	/*
674 	 * PAT type is always WB for untracked ranges, so no need to check.
675 	 */
676 	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
677 		return 1;
678 
679 	/*
680 	 * Certain new memtypes are not allowed with certain
681 	 * requested memtype:
682 	 * - request is uncached, return cannot be write-back
683 	 * - request is write-combine, return cannot be write-back
684 	 * - request is write-through, return cannot be write-back
685 	 * - request is write-through, return cannot be write-combine
686 	 */
687 	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
688 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
689 	    (pcm == _PAGE_CACHE_MODE_WC &&
690 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
691 	    (pcm == _PAGE_CACHE_MODE_WT &&
692 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
693 	    (pcm == _PAGE_CACHE_MODE_WT &&
694 	     new_pcm == _PAGE_CACHE_MODE_WC)) {
695 		return 0;
696 	}
697 
698 	return 1;
699 }
700 
701 pmd_t *populate_extra_pmd(unsigned long vaddr);
702 pte_t *populate_extra_pte(unsigned long vaddr);
703 
704 #ifdef CONFIG_PAGE_TABLE_ISOLATION
705 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
706 
707 /*
708  * Take a PGD location (pgdp) and a pgd value that needs to be set there.
709  * Populates the user and returns the resulting PGD that must be set in
710  * the kernel copy of the page tables.
711  */
712 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
713 {
714 	if (!static_cpu_has(X86_FEATURE_PTI))
715 		return pgd;
716 	return __pti_set_user_pgtbl(pgdp, pgd);
717 }
718 #else   /* CONFIG_PAGE_TABLE_ISOLATION */
719 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
720 {
721 	return pgd;
722 }
723 #endif  /* CONFIG_PAGE_TABLE_ISOLATION */
724 
725 #endif	/* __ASSEMBLY__ */
726 
727 
728 #ifdef CONFIG_X86_32
729 # include <asm/pgtable_32.h>
730 #else
731 # include <asm/pgtable_64.h>
732 #endif
733 
734 #ifndef __ASSEMBLY__
735 #include <linux/mm_types.h>
736 #include <linux/mmdebug.h>
737 #include <linux/log2.h>
738 #include <asm/fixmap.h>
739 
740 static inline int pte_none(pte_t pte)
741 {
742 	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
743 }
744 
745 #define __HAVE_ARCH_PTE_SAME
746 static inline int pte_same(pte_t a, pte_t b)
747 {
748 	return a.pte == b.pte;
749 }
750 
751 static inline int pte_present(pte_t a)
752 {
753 	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
754 }
755 
756 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
757 static inline int pte_devmap(pte_t a)
758 {
759 	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
760 }
761 #endif
762 
763 #define pte_accessible pte_accessible
764 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
765 {
766 	if (pte_flags(a) & _PAGE_PRESENT)
767 		return true;
768 
769 	if ((pte_flags(a) & _PAGE_PROTNONE) &&
770 			atomic_read(&mm->tlb_flush_pending))
771 		return true;
772 
773 	return false;
774 }
775 
776 static inline int pmd_present(pmd_t pmd)
777 {
778 	/*
779 	 * Checking for _PAGE_PSE is needed too because
780 	 * split_huge_page will temporarily clear the present bit (but
781 	 * the _PAGE_PSE flag will remain set at all times while the
782 	 * _PAGE_PRESENT bit is clear).
783 	 */
784 	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
785 }
786 
787 #ifdef CONFIG_NUMA_BALANCING
788 /*
789  * These work without NUMA balancing but the kernel does not care. See the
790  * comment in include/linux/pgtable.h
791  */
792 static inline int pte_protnone(pte_t pte)
793 {
794 	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
795 		== _PAGE_PROTNONE;
796 }
797 
798 static inline int pmd_protnone(pmd_t pmd)
799 {
800 	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
801 		== _PAGE_PROTNONE;
802 }
803 #endif /* CONFIG_NUMA_BALANCING */
804 
805 static inline int pmd_none(pmd_t pmd)
806 {
807 	/* Only check low word on 32-bit platforms, since it might be
808 	   out of sync with upper half. */
809 	unsigned long val = native_pmd_val(pmd);
810 	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
811 }
812 
813 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
814 {
815 	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
816 }
817 
818 /*
819  * Currently stuck as a macro due to indirect forward reference to
820  * linux/mmzone.h's __section_mem_map_addr() definition:
821  */
822 #define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
823 
824 /*
825  * Conversion functions: convert a page and protection to a page entry,
826  * and a page entry and page directory to the page they refer to.
827  *
828  * (Currently stuck as a macro because of indirect forward reference
829  * to linux/mm.h:page_to_nid())
830  */
831 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
832 
833 static inline int pmd_bad(pmd_t pmd)
834 {
835 	return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
836 	       (_KERNPG_TABLE & ~_PAGE_ACCESSED);
837 }
838 
839 static inline unsigned long pages_to_mb(unsigned long npg)
840 {
841 	return npg >> (20 - PAGE_SHIFT);
842 }
843 
844 #if CONFIG_PGTABLE_LEVELS > 2
845 static inline int pud_none(pud_t pud)
846 {
847 	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
848 }
849 
850 static inline int pud_present(pud_t pud)
851 {
852 	return pud_flags(pud) & _PAGE_PRESENT;
853 }
854 
855 static inline pmd_t *pud_pgtable(pud_t pud)
856 {
857 	return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
858 }
859 
860 /*
861  * Currently stuck as a macro due to indirect forward reference to
862  * linux/mmzone.h's __section_mem_map_addr() definition:
863  */
864 #define pud_page(pud)	pfn_to_page(pud_pfn(pud))
865 
866 #define pud_leaf	pud_large
867 static inline int pud_large(pud_t pud)
868 {
869 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
870 		(_PAGE_PSE | _PAGE_PRESENT);
871 }
872 
873 static inline int pud_bad(pud_t pud)
874 {
875 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
876 }
877 #else
878 #define pud_leaf	pud_large
879 static inline int pud_large(pud_t pud)
880 {
881 	return 0;
882 }
883 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
884 
885 #if CONFIG_PGTABLE_LEVELS > 3
886 static inline int p4d_none(p4d_t p4d)
887 {
888 	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
889 }
890 
891 static inline int p4d_present(p4d_t p4d)
892 {
893 	return p4d_flags(p4d) & _PAGE_PRESENT;
894 }
895 
896 static inline pud_t *p4d_pgtable(p4d_t p4d)
897 {
898 	return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
899 }
900 
901 /*
902  * Currently stuck as a macro due to indirect forward reference to
903  * linux/mmzone.h's __section_mem_map_addr() definition:
904  */
905 #define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
906 
907 static inline int p4d_bad(p4d_t p4d)
908 {
909 	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
910 
911 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
912 		ignore_flags |= _PAGE_NX;
913 
914 	return (p4d_flags(p4d) & ~ignore_flags) != 0;
915 }
916 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
917 
918 static inline unsigned long p4d_index(unsigned long address)
919 {
920 	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
921 }
922 
923 #if CONFIG_PGTABLE_LEVELS > 4
924 static inline int pgd_present(pgd_t pgd)
925 {
926 	if (!pgtable_l5_enabled())
927 		return 1;
928 	return pgd_flags(pgd) & _PAGE_PRESENT;
929 }
930 
931 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
932 {
933 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
934 }
935 
936 /*
937  * Currently stuck as a macro due to indirect forward reference to
938  * linux/mmzone.h's __section_mem_map_addr() definition:
939  */
940 #define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
941 
942 /* to find an entry in a page-table-directory. */
943 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
944 {
945 	if (!pgtable_l5_enabled())
946 		return (p4d_t *)pgd;
947 	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
948 }
949 
950 static inline int pgd_bad(pgd_t pgd)
951 {
952 	unsigned long ignore_flags = _PAGE_USER;
953 
954 	if (!pgtable_l5_enabled())
955 		return 0;
956 
957 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
958 		ignore_flags |= _PAGE_NX;
959 
960 	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
961 }
962 
963 static inline int pgd_none(pgd_t pgd)
964 {
965 	if (!pgtable_l5_enabled())
966 		return 0;
967 	/*
968 	 * There is no need to do a workaround for the KNL stray
969 	 * A/D bit erratum here.  PGDs only point to page tables
970 	 * except on 32-bit non-PAE which is not supported on
971 	 * KNL.
972 	 */
973 	return !native_pgd_val(pgd);
974 }
975 #endif	/* CONFIG_PGTABLE_LEVELS > 4 */
976 
977 #endif	/* __ASSEMBLY__ */
978 
979 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
980 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
981 
982 #ifndef __ASSEMBLY__
983 
984 extern int direct_gbpages;
985 void init_mem_mapping(void);
986 void early_alloc_pgt_buf(void);
987 extern void memblock_find_dma_reserve(void);
988 void __init poking_init(void);
989 unsigned long init_memory_mapping(unsigned long start,
990 				  unsigned long end, pgprot_t prot);
991 
992 #ifdef CONFIG_X86_64
993 extern pgd_t trampoline_pgd_entry;
994 #endif
995 
996 /* local pte updates need not use xchg for locking */
997 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
998 {
999 	pte_t res = *ptep;
1000 
1001 	/* Pure native function needs no input for mm, addr */
1002 	native_pte_clear(NULL, 0, ptep);
1003 	return res;
1004 }
1005 
1006 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1007 {
1008 	pmd_t res = *pmdp;
1009 
1010 	native_pmd_clear(pmdp);
1011 	return res;
1012 }
1013 
1014 static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1015 {
1016 	pud_t res = *pudp;
1017 
1018 	native_pud_clear(pudp);
1019 	return res;
1020 }
1021 
1022 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1023 			      pte_t *ptep, pte_t pte)
1024 {
1025 	page_table_check_pte_set(mm, addr, ptep, pte);
1026 	set_pte(ptep, pte);
1027 }
1028 
1029 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1030 			      pmd_t *pmdp, pmd_t pmd)
1031 {
1032 	page_table_check_pmd_set(mm, addr, pmdp, pmd);
1033 	set_pmd(pmdp, pmd);
1034 }
1035 
1036 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1037 			      pud_t *pudp, pud_t pud)
1038 {
1039 	page_table_check_pud_set(mm, addr, pudp, pud);
1040 	native_set_pud(pudp, pud);
1041 }
1042 
1043 /*
1044  * We only update the dirty/accessed state if we set
1045  * the dirty bit by hand in the kernel, since the hardware
1046  * will do the accessed bit for us, and we don't want to
1047  * race with other CPU's that might be updating the dirty
1048  * bit at the same time.
1049  */
1050 struct vm_area_struct;
1051 
1052 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1053 extern int ptep_set_access_flags(struct vm_area_struct *vma,
1054 				 unsigned long address, pte_t *ptep,
1055 				 pte_t entry, int dirty);
1056 
1057 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1058 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1059 				     unsigned long addr, pte_t *ptep);
1060 
1061 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1062 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1063 				  unsigned long address, pte_t *ptep);
1064 
1065 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1066 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1067 				       pte_t *ptep)
1068 {
1069 	pte_t pte = native_ptep_get_and_clear(ptep);
1070 	page_table_check_pte_clear(mm, addr, pte);
1071 	return pte;
1072 }
1073 
1074 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1075 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1076 					    unsigned long addr, pte_t *ptep,
1077 					    int full)
1078 {
1079 	pte_t pte;
1080 	if (full) {
1081 		/*
1082 		 * Full address destruction in progress; paravirt does not
1083 		 * care about updates and native needs no locking
1084 		 */
1085 		pte = native_local_ptep_get_and_clear(ptep);
1086 		page_table_check_pte_clear(mm, addr, pte);
1087 	} else {
1088 		pte = ptep_get_and_clear(mm, addr, ptep);
1089 	}
1090 	return pte;
1091 }
1092 
1093 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1094 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1095 				      unsigned long addr, pte_t *ptep)
1096 {
1097 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1098 }
1099 
1100 #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
1101 
1102 #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
1103 
1104 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1105 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1106 				 unsigned long address, pmd_t *pmdp,
1107 				 pmd_t entry, int dirty);
1108 extern int pudp_set_access_flags(struct vm_area_struct *vma,
1109 				 unsigned long address, pud_t *pudp,
1110 				 pud_t entry, int dirty);
1111 
1112 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1113 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1114 				     unsigned long addr, pmd_t *pmdp);
1115 extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1116 				     unsigned long addr, pud_t *pudp);
1117 
1118 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1119 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1120 				  unsigned long address, pmd_t *pmdp);
1121 
1122 
1123 #define pmd_write pmd_write
1124 static inline int pmd_write(pmd_t pmd)
1125 {
1126 	return pmd_flags(pmd) & _PAGE_RW;
1127 }
1128 
1129 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1130 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1131 				       pmd_t *pmdp)
1132 {
1133 	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
1134 
1135 	page_table_check_pmd_clear(mm, addr, pmd);
1136 
1137 	return pmd;
1138 }
1139 
1140 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1141 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1142 					unsigned long addr, pud_t *pudp)
1143 {
1144 	pud_t pud = native_pudp_get_and_clear(pudp);
1145 
1146 	page_table_check_pud_clear(mm, addr, pud);
1147 
1148 	return pud;
1149 }
1150 
1151 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1152 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1153 				      unsigned long addr, pmd_t *pmdp)
1154 {
1155 	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1156 }
1157 
1158 #define pud_write pud_write
1159 static inline int pud_write(pud_t pud)
1160 {
1161 	return pud_flags(pud) & _PAGE_RW;
1162 }
1163 
1164 #ifndef pmdp_establish
1165 #define pmdp_establish pmdp_establish
1166 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1167 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1168 {
1169 	page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
1170 	if (IS_ENABLED(CONFIG_SMP)) {
1171 		return xchg(pmdp, pmd);
1172 	} else {
1173 		pmd_t old = *pmdp;
1174 		WRITE_ONCE(*pmdp, pmd);
1175 		return old;
1176 	}
1177 }
1178 #endif
1179 
1180 #define __HAVE_ARCH_PMDP_INVALIDATE_AD
1181 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1182 				unsigned long address, pmd_t *pmdp);
1183 
1184 /*
1185  * Page table pages are page-aligned.  The lower half of the top
1186  * level is used for userspace and the top half for the kernel.
1187  *
1188  * Returns true for parts of the PGD that map userspace and
1189  * false for the parts that map the kernel.
1190  */
1191 static inline bool pgdp_maps_userspace(void *__ptr)
1192 {
1193 	unsigned long ptr = (unsigned long)__ptr;
1194 
1195 	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1196 }
1197 
1198 #define pgd_leaf	pgd_large
1199 static inline int pgd_large(pgd_t pgd) { return 0; }
1200 
1201 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1202 /*
1203  * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1204  * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
1205  * the user one is in the last 4k.  To switch between them, you
1206  * just need to flip the 12th bit in their addresses.
1207  */
1208 #define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
1209 
1210 /*
1211  * This generates better code than the inline assembly in
1212  * __set_bit().
1213  */
1214 static inline void *ptr_set_bit(void *ptr, int bit)
1215 {
1216 	unsigned long __ptr = (unsigned long)ptr;
1217 
1218 	__ptr |= BIT(bit);
1219 	return (void *)__ptr;
1220 }
1221 static inline void *ptr_clear_bit(void *ptr, int bit)
1222 {
1223 	unsigned long __ptr = (unsigned long)ptr;
1224 
1225 	__ptr &= ~BIT(bit);
1226 	return (void *)__ptr;
1227 }
1228 
1229 static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1230 {
1231 	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1232 }
1233 
1234 static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1235 {
1236 	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1237 }
1238 
1239 static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1240 {
1241 	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1242 }
1243 
1244 static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1245 {
1246 	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1247 }
1248 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
1249 
1250 /*
1251  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1252  *
1253  *  dst - pointer to pgd range anywhere on a pgd page
1254  *  src - ""
1255  *  count - the number of pgds to copy.
1256  *
1257  * dst and src can be on the same page, but the range must not overlap,
1258  * and must not cross a page boundary.
1259  */
1260 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1261 {
1262 	memcpy(dst, src, count * sizeof(pgd_t));
1263 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1264 	if (!static_cpu_has(X86_FEATURE_PTI))
1265 		return;
1266 	/* Clone the user space pgd as well */
1267 	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1268 	       count * sizeof(pgd_t));
1269 #endif
1270 }
1271 
1272 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
1273 static inline int page_level_shift(enum pg_level level)
1274 {
1275 	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1276 }
1277 static inline unsigned long page_level_size(enum pg_level level)
1278 {
1279 	return 1UL << page_level_shift(level);
1280 }
1281 static inline unsigned long page_level_mask(enum pg_level level)
1282 {
1283 	return ~(page_level_size(level) - 1);
1284 }
1285 
1286 /*
1287  * The x86 doesn't have any external MMU info: the kernel page
1288  * tables contain all the necessary information.
1289  */
1290 static inline void update_mmu_cache(struct vm_area_struct *vma,
1291 		unsigned long addr, pte_t *ptep)
1292 {
1293 }
1294 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1295 		unsigned long addr, pmd_t *pmd)
1296 {
1297 }
1298 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1299 		unsigned long addr, pud_t *pud)
1300 {
1301 }
1302 static inline pte_t pte_swp_mkexclusive(pte_t pte)
1303 {
1304 	return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE);
1305 }
1306 
1307 static inline int pte_swp_exclusive(pte_t pte)
1308 {
1309 	return pte_flags(pte) & _PAGE_SWP_EXCLUSIVE;
1310 }
1311 
1312 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1313 {
1314 	return pte_clear_flags(pte, _PAGE_SWP_EXCLUSIVE);
1315 }
1316 
1317 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1318 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1319 {
1320 	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1321 }
1322 
1323 static inline int pte_swp_soft_dirty(pte_t pte)
1324 {
1325 	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1326 }
1327 
1328 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1329 {
1330 	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1331 }
1332 
1333 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1334 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1335 {
1336 	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1337 }
1338 
1339 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1340 {
1341 	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1342 }
1343 
1344 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1345 {
1346 	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1347 }
1348 #endif
1349 #endif
1350 
1351 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1352 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1353 {
1354 	return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1355 }
1356 
1357 static inline int pte_swp_uffd_wp(pte_t pte)
1358 {
1359 	return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1360 }
1361 
1362 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1363 {
1364 	return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1365 }
1366 
1367 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1368 {
1369 	return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1370 }
1371 
1372 static inline int pmd_swp_uffd_wp(pmd_t pmd)
1373 {
1374 	return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1375 }
1376 
1377 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1378 {
1379 	return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1380 }
1381 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1382 
1383 static inline u16 pte_flags_pkey(unsigned long pte_flags)
1384 {
1385 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1386 	/* ifdef to avoid doing 59-bit shift on 32-bit values */
1387 	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1388 #else
1389 	return 0;
1390 #endif
1391 }
1392 
1393 static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1394 {
1395 	u32 pkru = read_pkru();
1396 
1397 	if (!__pkru_allows_read(pkru, pkey))
1398 		return false;
1399 	if (write && !__pkru_allows_write(pkru, pkey))
1400 		return false;
1401 
1402 	return true;
1403 }
1404 
1405 /*
1406  * 'pteval' can come from a PTE, PMD or PUD.  We only check
1407  * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1408  * same value on all 3 types.
1409  */
1410 static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1411 {
1412 	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1413 
1414 	if (write)
1415 		need_pte_bits |= _PAGE_RW;
1416 
1417 	if ((pteval & need_pte_bits) != need_pte_bits)
1418 		return 0;
1419 
1420 	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1421 }
1422 
1423 #define pte_access_permitted pte_access_permitted
1424 static inline bool pte_access_permitted(pte_t pte, bool write)
1425 {
1426 	return __pte_access_permitted(pte_val(pte), write);
1427 }
1428 
1429 #define pmd_access_permitted pmd_access_permitted
1430 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1431 {
1432 	return __pte_access_permitted(pmd_val(pmd), write);
1433 }
1434 
1435 #define pud_access_permitted pud_access_permitted
1436 static inline bool pud_access_permitted(pud_t pud, bool write)
1437 {
1438 	return __pte_access_permitted(pud_val(pud), write);
1439 }
1440 
1441 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1442 extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1443 
1444 static inline bool arch_has_pfn_modify_check(void)
1445 {
1446 	return boot_cpu_has_bug(X86_BUG_L1TF);
1447 }
1448 
1449 #define arch_has_hw_pte_young arch_has_hw_pte_young
1450 static inline bool arch_has_hw_pte_young(void)
1451 {
1452 	return true;
1453 }
1454 
1455 #ifdef CONFIG_XEN_PV
1456 #define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
1457 static inline bool arch_has_hw_nonleaf_pmd_young(void)
1458 {
1459 	return !cpu_feature_enabled(X86_FEATURE_XENPV);
1460 }
1461 #endif
1462 
1463 #ifdef CONFIG_PAGE_TABLE_CHECK
1464 static inline bool pte_user_accessible_page(pte_t pte)
1465 {
1466 	return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
1467 }
1468 
1469 static inline bool pmd_user_accessible_page(pmd_t pmd)
1470 {
1471 	return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && (pmd_val(pmd) & _PAGE_USER);
1472 }
1473 
1474 static inline bool pud_user_accessible_page(pud_t pud)
1475 {
1476 	return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && (pud_val(pud) & _PAGE_USER);
1477 }
1478 #endif
1479 
1480 #endif	/* __ASSEMBLY__ */
1481 
1482 #endif /* _ASM_X86_PGTABLE_H */
1483