xref: /linux/arch/riscv/mm/pgtable.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <asm/pgalloc.h>
4 #include <linux/gfp.h>
5 #include <linux/kernel.h>
6 #include <linux/pgtable.h>
7 
8 int ptep_set_access_flags(struct vm_area_struct *vma,
9 			  unsigned long address, pte_t *ptep,
10 			  pte_t entry, int dirty)
11 {
12 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC)) {
13 		if (!pte_same(ptep_get(ptep), entry)) {
14 			__set_pte_at(vma->vm_mm, ptep, entry);
15 			/* Here only not svadu is impacted */
16 			flush_tlb_page(vma, address);
17 			return true;
18 		}
19 
20 		return false;
21 	}
22 
23 	if (!pte_same(ptep_get(ptep), entry))
24 		__set_pte_at(vma->vm_mm, ptep, entry);
25 	/*
26 	 * update_mmu_cache will unconditionally execute, handling both
27 	 * the case that the PTE changed and the spurious fault case.
28 	 */
29 	return true;
30 }
31 
32 int ptep_test_and_clear_young(struct vm_area_struct *vma,
33 			      unsigned long address,
34 			      pte_t *ptep)
35 {
36 	if (!pte_young(ptep_get(ptep)))
37 		return 0;
38 	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
39 }
40 EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
41 
42 #ifdef CONFIG_64BIT
43 pud_t *pud_offset(p4d_t *p4d, unsigned long address)
44 {
45 	if (pgtable_l4_enabled)
46 		return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
47 
48 	return (pud_t *)p4d;
49 }
50 EXPORT_SYMBOL_GPL(pud_offset);
51 
52 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
53 {
54 	if (pgtable_l5_enabled)
55 		return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
56 
57 	return (p4d_t *)pgd;
58 }
59 EXPORT_SYMBOL_GPL(p4d_offset);
60 #endif
61 
62 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
63 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
64 {
65 	return 0;
66 }
67 
68 void p4d_clear_huge(p4d_t *p4d)
69 {
70 }
71 
72 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
73 {
74 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
75 
76 	set_pud(pud, new_pud);
77 	return 1;
78 }
79 
80 int pud_clear_huge(pud_t *pud)
81 {
82 	if (!pud_leaf(pudp_get(pud)))
83 		return 0;
84 	pud_clear(pud);
85 	return 1;
86 }
87 
88 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
89 {
90 	pmd_t *pmd = pud_pgtable(pudp_get(pud));
91 	int i;
92 
93 	pud_clear(pud);
94 
95 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
96 
97 	for (i = 0; i < PTRS_PER_PMD; i++) {
98 		if (!pmd_none(pmd[i])) {
99 			pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
100 
101 			pte_free_kernel(NULL, pte);
102 		}
103 	}
104 
105 	pmd_free(NULL, pmd);
106 
107 	return 1;
108 }
109 
110 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
111 {
112 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
113 
114 	set_pmd(pmd, new_pmd);
115 	return 1;
116 }
117 
118 int pmd_clear_huge(pmd_t *pmd)
119 {
120 	if (!pmd_leaf(pmdp_get(pmd)))
121 		return 0;
122 	pmd_clear(pmd);
123 	return 1;
124 }
125 
126 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
127 {
128 	pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
129 
130 	pmd_clear(pmd);
131 
132 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
133 	pte_free_kernel(NULL, pte);
134 	return 1;
135 }
136 
137 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
138 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
139 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
140 					unsigned long address, pmd_t *pmdp)
141 {
142 	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
143 
144 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
145 	VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
146 	/*
147 	 * When leaf PTE entries (regular pages) are collapsed into a leaf
148 	 * PMD entry (huge page), a valid non-leaf PTE is converted into a
149 	 * valid leaf PTE at the level 1 page table.  Since the sfence.vma
150 	 * forms that specify an address only apply to leaf PTEs, we need a
151 	 * global flush here.  collapse_huge_page() assumes these flushes are
152 	 * eager, so just do the fence here.
153 	 */
154 	flush_tlb_mm(vma->vm_mm);
155 	return pmd;
156 }
157 
158 pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
159 		      pud_t *pudp)
160 {
161 	VM_WARN_ON_ONCE(!pud_present(*pudp));
162 	pud_t old = pudp_establish(vma, address, pudp, pud_mkinvalid(*pudp));
163 
164 	flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
165 	return old;
166 }
167 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
168 
169 pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
170 {
171 	if (vma->vm_flags & VM_SHADOW_STACK)
172 		return pte_mkwrite_shstk(pte);
173 
174 	return pte_mkwrite_novma(pte);
175 }
176 
177 pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
178 {
179 	if (vma->vm_flags & VM_SHADOW_STACK)
180 		return pmd_mkwrite_shstk(pmd);
181 
182 	return pmd_mkwrite_novma(pmd);
183 }
184