xref: /linux/arch/riscv/mm/pgtable.c (revision 5643c6b2c8308b206cb01cbfd0e6ac80f9f1bc9a)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <asm/pgalloc.h>
4 #include <linux/gfp.h>
5 #include <linux/kernel.h>
6 #include <linux/pgtable.h>
7 
8 int ptep_set_access_flags(struct vm_area_struct *vma,
9 			  unsigned long address, pte_t *ptep,
10 			  pte_t entry, int dirty)
11 {
12 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC)) {
13 		if (!pte_same(ptep_get(ptep), entry)) {
14 			__set_pte_at(vma->vm_mm, ptep, entry);
15 			/* Here only not svadu is impacted */
16 			flush_tlb_page(vma, address);
17 			return true;
18 		}
19 
20 		return false;
21 	}
22 
23 	if (!pte_same(ptep_get(ptep), entry))
24 		__set_pte_at(vma->vm_mm, ptep, entry);
25 	/*
26 	 * update_mmu_cache will unconditionally execute, handling both
27 	 * the case that the PTE changed and the spurious fault case.
28 	 */
29 	return true;
30 }
31 
32 bool ptep_test_and_clear_young(struct vm_area_struct *vma,
33 		unsigned long address, pte_t *ptep)
34 {
35 	if (!pte_young(ptep_get(ptep)))
36 		return false;
37 	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
38 }
39 EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
40 
41 #ifdef CONFIG_64BIT
42 pud_t *pud_offset(p4d_t *p4d, unsigned long address)
43 {
44 	if (pgtable_l4_enabled)
45 		return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
46 
47 	return (pud_t *)p4d;
48 }
49 EXPORT_SYMBOL_GPL(pud_offset);
50 
51 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
52 {
53 	if (pgtable_l5_enabled)
54 		return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
55 
56 	return (p4d_t *)pgd;
57 }
58 EXPORT_SYMBOL_GPL(p4d_offset);
59 #endif
60 
61 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
62 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
63 {
64 	return 0;
65 }
66 
67 void p4d_clear_huge(p4d_t *p4d)
68 {
69 }
70 
71 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
72 {
73 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
74 
75 	set_pud(pud, new_pud);
76 	return 1;
77 }
78 
79 int pud_clear_huge(pud_t *pud)
80 {
81 	if (!pud_leaf(pudp_get(pud)))
82 		return 0;
83 	pud_clear(pud);
84 	return 1;
85 }
86 
87 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
88 {
89 	pmd_t *pmd = pud_pgtable(pudp_get(pud));
90 	int i;
91 
92 	pud_clear(pud);
93 
94 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
95 
96 	for (i = 0; i < PTRS_PER_PMD; i++) {
97 		if (!pmd_none(pmd[i])) {
98 			pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
99 
100 			pte_free_kernel(NULL, pte);
101 		}
102 	}
103 
104 	pmd_free(NULL, pmd);
105 
106 	return 1;
107 }
108 
109 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
110 {
111 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
112 
113 	set_pmd(pmd, new_pmd);
114 	return 1;
115 }
116 
117 int pmd_clear_huge(pmd_t *pmd)
118 {
119 	if (!pmd_leaf(pmdp_get(pmd)))
120 		return 0;
121 	pmd_clear(pmd);
122 	return 1;
123 }
124 
125 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
126 {
127 	pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
128 
129 	pmd_clear(pmd);
130 
131 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
132 	pte_free_kernel(NULL, pte);
133 	return 1;
134 }
135 
136 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
137 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
138 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
139 					unsigned long address, pmd_t *pmdp)
140 {
141 	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
142 
143 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
144 	VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
145 	/*
146 	 * When leaf PTE entries (regular pages) are collapsed into a leaf
147 	 * PMD entry (huge page), a valid non-leaf PTE is converted into a
148 	 * valid leaf PTE at the level 1 page table.  Since the sfence.vma
149 	 * forms that specify an address only apply to leaf PTEs, we need a
150 	 * global flush here.  collapse_huge_page() assumes these flushes are
151 	 * eager, so just do the fence here.
152 	 */
153 	flush_tlb_mm(vma->vm_mm);
154 	return pmd;
155 }
156 
157 pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
158 		      pud_t *pudp)
159 {
160 	VM_WARN_ON_ONCE(!pud_present(*pudp));
161 	pud_t old = pudp_establish(vma, address, pudp, pud_mkinvalid(*pudp));
162 
163 	flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
164 	return old;
165 }
166 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
167 
168 pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
169 {
170 	if (vma->vm_flags & VM_SHADOW_STACK)
171 		return pte_mkwrite_shstk(pte);
172 
173 	return pte_mkwrite_novma(pte);
174 }
175 
176 pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
177 {
178 	if (vma->vm_flags & VM_SHADOW_STACK)
179 		return pmd_mkwrite_shstk(pmd);
180 
181 	return pmd_mkwrite_novma(pmd);
182 }
183