xref: /linux/arch/riscv/mm/pgtable.c (revision 509d3f45847627f4c5cdce004c3ec79262b5239c)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <asm/pgalloc.h>
4 #include <linux/gfp.h>
5 #include <linux/kernel.h>
6 #include <linux/pgtable.h>
7 
8 int ptep_set_access_flags(struct vm_area_struct *vma,
9 			  unsigned long address, pte_t *ptep,
10 			  pte_t entry, int dirty)
11 {
12 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC)) {
13 		if (!pte_same(ptep_get(ptep), entry)) {
14 			__set_pte_at(vma->vm_mm, ptep, entry);
15 			/* Here only not svadu is impacted */
16 			flush_tlb_page(vma, address);
17 			return true;
18 		}
19 
20 		return false;
21 	}
22 
23 	if (!pte_same(ptep_get(ptep), entry))
24 		__set_pte_at(vma->vm_mm, ptep, entry);
25 	/*
26 	 * update_mmu_cache will unconditionally execute, handling both
27 	 * the case that the PTE changed and the spurious fault case.
28 	 */
29 	return true;
30 }
31 
32 int ptep_test_and_clear_young(struct vm_area_struct *vma,
33 			      unsigned long address,
34 			      pte_t *ptep)
35 {
36 	if (!pte_young(ptep_get(ptep)))
37 		return 0;
38 	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
39 }
40 EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
41 
42 #ifdef CONFIG_64BIT
43 pud_t *pud_offset(p4d_t *p4d, unsigned long address)
44 {
45 	if (pgtable_l4_enabled)
46 		return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
47 
48 	return (pud_t *)p4d;
49 }
50 
51 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
52 {
53 	if (pgtable_l5_enabled)
54 		return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
55 
56 	return (p4d_t *)pgd;
57 }
58 #endif
59 
60 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
61 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
62 {
63 	return 0;
64 }
65 
66 void p4d_clear_huge(p4d_t *p4d)
67 {
68 }
69 
70 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
71 {
72 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
73 
74 	set_pud(pud, new_pud);
75 	return 1;
76 }
77 
78 int pud_clear_huge(pud_t *pud)
79 {
80 	if (!pud_leaf(pudp_get(pud)))
81 		return 0;
82 	pud_clear(pud);
83 	return 1;
84 }
85 
86 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
87 {
88 	pmd_t *pmd = pud_pgtable(pudp_get(pud));
89 	int i;
90 
91 	pud_clear(pud);
92 
93 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
94 
95 	for (i = 0; i < PTRS_PER_PMD; i++) {
96 		if (!pmd_none(pmd[i])) {
97 			pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
98 
99 			pte_free_kernel(NULL, pte);
100 		}
101 	}
102 
103 	pmd_free(NULL, pmd);
104 
105 	return 1;
106 }
107 
108 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
109 {
110 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
111 
112 	set_pmd(pmd, new_pmd);
113 	return 1;
114 }
115 
116 int pmd_clear_huge(pmd_t *pmd)
117 {
118 	if (!pmd_leaf(pmdp_get(pmd)))
119 		return 0;
120 	pmd_clear(pmd);
121 	return 1;
122 }
123 
124 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
125 {
126 	pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
127 
128 	pmd_clear(pmd);
129 
130 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
131 	pte_free_kernel(NULL, pte);
132 	return 1;
133 }
134 
135 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
136 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
137 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
138 					unsigned long address, pmd_t *pmdp)
139 {
140 	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
141 
142 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
143 	VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
144 	/*
145 	 * When leaf PTE entries (regular pages) are collapsed into a leaf
146 	 * PMD entry (huge page), a valid non-leaf PTE is converted into a
147 	 * valid leaf PTE at the level 1 page table.  Since the sfence.vma
148 	 * forms that specify an address only apply to leaf PTEs, we need a
149 	 * global flush here.  collapse_huge_page() assumes these flushes are
150 	 * eager, so just do the fence here.
151 	 */
152 	flush_tlb_mm(vma->vm_mm);
153 	return pmd;
154 }
155 
156 pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
157 		      pud_t *pudp)
158 {
159 	VM_WARN_ON_ONCE(!pud_present(*pudp));
160 	pud_t old = pudp_establish(vma, address, pudp, pud_mkinvalid(*pudp));
161 
162 	flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
163 	return old;
164 }
165 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
166