xref: /linux/arch/riscv/mm/pgtable.c (revision 97d8894b6f4c44762fd48f5d29e73358d6181dbb)
1310f541aSLiu Shixin // SPDX-License-Identifier: GPL-2.0
2310f541aSLiu Shixin 
3310f541aSLiu Shixin #include <asm/pgalloc.h>
4310f541aSLiu Shixin #include <linux/gfp.h>
5310f541aSLiu Shixin #include <linux/kernel.h>
6310f541aSLiu Shixin #include <linux/pgtable.h>
7310f541aSLiu Shixin 
8edf95564SAlexandre Ghiti int ptep_set_access_flags(struct vm_area_struct *vma,
9edf95564SAlexandre Ghiti 			  unsigned long address, pte_t *ptep,
10edf95564SAlexandre Ghiti 			  pte_t entry, int dirty)
11edf95564SAlexandre Ghiti {
12*7a21b2e3SAlexandre Ghiti 	asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
13*7a21b2e3SAlexandre Ghiti 		 : : : : svvptc);
14*7a21b2e3SAlexandre Ghiti 
15edf95564SAlexandre Ghiti 	if (!pte_same(ptep_get(ptep), entry))
1601261e24SAlexandre Ghiti 		__set_pte_at(vma->vm_mm, ptep, entry);
17edf95564SAlexandre Ghiti 	/*
18edf95564SAlexandre Ghiti 	 * update_mmu_cache will unconditionally execute, handling both
19edf95564SAlexandre Ghiti 	 * the case that the PTE changed and the spurious fault case.
20edf95564SAlexandre Ghiti 	 */
21edf95564SAlexandre Ghiti 	return true;
22*7a21b2e3SAlexandre Ghiti 
23*7a21b2e3SAlexandre Ghiti svvptc:
24*7a21b2e3SAlexandre Ghiti 	if (!pte_same(ptep_get(ptep), entry)) {
25*7a21b2e3SAlexandre Ghiti 		__set_pte_at(vma->vm_mm, ptep, entry);
26*7a21b2e3SAlexandre Ghiti 		/* Here only not svadu is impacted */
27*7a21b2e3SAlexandre Ghiti 		flush_tlb_page(vma, address);
28*7a21b2e3SAlexandre Ghiti 		return true;
29*7a21b2e3SAlexandre Ghiti 	}
30*7a21b2e3SAlexandre Ghiti 
31*7a21b2e3SAlexandre Ghiti 	return false;
32edf95564SAlexandre Ghiti }
33edf95564SAlexandre Ghiti 
34edf95564SAlexandre Ghiti int ptep_test_and_clear_young(struct vm_area_struct *vma,
35edf95564SAlexandre Ghiti 			      unsigned long address,
36edf95564SAlexandre Ghiti 			      pte_t *ptep)
37edf95564SAlexandre Ghiti {
38edf95564SAlexandre Ghiti 	if (!pte_young(ptep_get(ptep)))
39edf95564SAlexandre Ghiti 		return 0;
40edf95564SAlexandre Ghiti 	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
41edf95564SAlexandre Ghiti }
42edf95564SAlexandre Ghiti EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
43edf95564SAlexandre Ghiti 
44edf95564SAlexandre Ghiti #ifdef CONFIG_64BIT
45edf95564SAlexandre Ghiti pud_t *pud_offset(p4d_t *p4d, unsigned long address)
46edf95564SAlexandre Ghiti {
47edf95564SAlexandre Ghiti 	if (pgtable_l4_enabled)
48edf95564SAlexandre Ghiti 		return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
49edf95564SAlexandre Ghiti 
50edf95564SAlexandre Ghiti 	return (pud_t *)p4d;
51edf95564SAlexandre Ghiti }
52edf95564SAlexandre Ghiti 
53edf95564SAlexandre Ghiti p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
54edf95564SAlexandre Ghiti {
55edf95564SAlexandre Ghiti 	if (pgtable_l5_enabled)
56edf95564SAlexandre Ghiti 		return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
57edf95564SAlexandre Ghiti 
58edf95564SAlexandre Ghiti 	return (p4d_t *)pgd;
59edf95564SAlexandre Ghiti }
60edf95564SAlexandre Ghiti #endif
61edf95564SAlexandre Ghiti 
62310f541aSLiu Shixin #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
63310f541aSLiu Shixin int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
64310f541aSLiu Shixin {
65310f541aSLiu Shixin 	return 0;
66310f541aSLiu Shixin }
67310f541aSLiu Shixin 
68310f541aSLiu Shixin void p4d_clear_huge(p4d_t *p4d)
69310f541aSLiu Shixin {
70310f541aSLiu Shixin }
71310f541aSLiu Shixin 
72310f541aSLiu Shixin int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
73310f541aSLiu Shixin {
74310f541aSLiu Shixin 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
75310f541aSLiu Shixin 
76310f541aSLiu Shixin 	set_pud(pud, new_pud);
77310f541aSLiu Shixin 	return 1;
78310f541aSLiu Shixin }
79310f541aSLiu Shixin 
80310f541aSLiu Shixin int pud_clear_huge(pud_t *pud)
81310f541aSLiu Shixin {
82edf95564SAlexandre Ghiti 	if (!pud_leaf(pudp_get(pud)))
83310f541aSLiu Shixin 		return 0;
84310f541aSLiu Shixin 	pud_clear(pud);
85310f541aSLiu Shixin 	return 1;
86310f541aSLiu Shixin }
87310f541aSLiu Shixin 
88310f541aSLiu Shixin int pud_free_pmd_page(pud_t *pud, unsigned long addr)
89310f541aSLiu Shixin {
90edf95564SAlexandre Ghiti 	pmd_t *pmd = pud_pgtable(pudp_get(pud));
91310f541aSLiu Shixin 	int i;
92310f541aSLiu Shixin 
93310f541aSLiu Shixin 	pud_clear(pud);
94310f541aSLiu Shixin 
95310f541aSLiu Shixin 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
96310f541aSLiu Shixin 
97310f541aSLiu Shixin 	for (i = 0; i < PTRS_PER_PMD; i++) {
98310f541aSLiu Shixin 		if (!pmd_none(pmd[i])) {
99310f541aSLiu Shixin 			pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
100310f541aSLiu Shixin 
101310f541aSLiu Shixin 			pte_free_kernel(NULL, pte);
102310f541aSLiu Shixin 		}
103310f541aSLiu Shixin 	}
104310f541aSLiu Shixin 
105310f541aSLiu Shixin 	pmd_free(NULL, pmd);
106310f541aSLiu Shixin 
107310f541aSLiu Shixin 	return 1;
108310f541aSLiu Shixin }
109310f541aSLiu Shixin 
110310f541aSLiu Shixin int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
111310f541aSLiu Shixin {
112310f541aSLiu Shixin 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
113310f541aSLiu Shixin 
114310f541aSLiu Shixin 	set_pmd(pmd, new_pmd);
115310f541aSLiu Shixin 	return 1;
116310f541aSLiu Shixin }
117310f541aSLiu Shixin 
118310f541aSLiu Shixin int pmd_clear_huge(pmd_t *pmd)
119310f541aSLiu Shixin {
120edf95564SAlexandre Ghiti 	if (!pmd_leaf(pmdp_get(pmd)))
121310f541aSLiu Shixin 		return 0;
122310f541aSLiu Shixin 	pmd_clear(pmd);
123310f541aSLiu Shixin 	return 1;
124310f541aSLiu Shixin }
125310f541aSLiu Shixin 
126310f541aSLiu Shixin int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
127310f541aSLiu Shixin {
128edf95564SAlexandre Ghiti 	pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
129310f541aSLiu Shixin 
130310f541aSLiu Shixin 	pmd_clear(pmd);
131310f541aSLiu Shixin 
132310f541aSLiu Shixin 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
133310f541aSLiu Shixin 	pte_free_kernel(NULL, pte);
134310f541aSLiu Shixin 	return 1;
135310f541aSLiu Shixin }
136310f541aSLiu Shixin 
137310f541aSLiu Shixin #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
138f0293cd1SMayuresh Chitale #ifdef CONFIG_TRANSPARENT_HUGEPAGE
139f0293cd1SMayuresh Chitale pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
140f0293cd1SMayuresh Chitale 					unsigned long address, pmd_t *pmdp)
141f0293cd1SMayuresh Chitale {
142f0293cd1SMayuresh Chitale 	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
143f0293cd1SMayuresh Chitale 
144f0293cd1SMayuresh Chitale 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
145edf95564SAlexandre Ghiti 	VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
146f0293cd1SMayuresh Chitale 	/*
147f0293cd1SMayuresh Chitale 	 * When leaf PTE entries (regular pages) are collapsed into a leaf
148f0293cd1SMayuresh Chitale 	 * PMD entry (huge page), a valid non-leaf PTE is converted into a
149f0293cd1SMayuresh Chitale 	 * valid leaf PTE at the level 1 page table.  Since the sfence.vma
150f0293cd1SMayuresh Chitale 	 * forms that specify an address only apply to leaf PTEs, we need a
151f0293cd1SMayuresh Chitale 	 * global flush here.  collapse_huge_page() assumes these flushes are
152f0293cd1SMayuresh Chitale 	 * eager, so just do the fence here.
153f0293cd1SMayuresh Chitale 	 */
154f0293cd1SMayuresh Chitale 	flush_tlb_mm(vma->vm_mm);
155f0293cd1SMayuresh Chitale 	return pmd;
156f0293cd1SMayuresh Chitale }
157f0293cd1SMayuresh Chitale #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
158