xref: /linux/arch/riscv/include/asm/pgalloc.h (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4  * Copyright (C) 2012 Regents of the University of California
5  */
6 
7 #ifndef _ASM_RISCV_PGALLOC_H
8 #define _ASM_RISCV_PGALLOC_H
9 
10 #include <linux/mm.h>
11 #include <asm/sbi.h>
12 #include <asm/tlb.h>
13 
14 #ifdef CONFIG_MMU
15 #define __HAVE_ARCH_PUD_ALLOC_ONE
16 #define __HAVE_ARCH_PUD_FREE
17 #include <asm-generic/pgalloc.h>
18 
19 static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
20 {
21 	if (riscv_use_sbi_for_rfence())
22 		tlb_remove_ptdesc(tlb, pt);
23 	else
24 		tlb_remove_page_ptdesc(tlb, pt);
25 }
26 
27 static inline void pmd_populate_kernel(struct mm_struct *mm,
28 	pmd_t *pmd, pte_t *pte)
29 {
30 	unsigned long pfn = virt_to_pfn(pte);
31 
32 	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
33 }
34 
35 static inline void pmd_populate(struct mm_struct *mm,
36 	pmd_t *pmd, pgtable_t pte)
37 {
38 	unsigned long pfn = virt_to_pfn(page_address(pte));
39 
40 	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
41 }
42 
43 #ifndef __PAGETABLE_PMD_FOLDED
44 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
45 {
46 	unsigned long pfn = virt_to_pfn(pmd);
47 
48 	set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
49 }
50 
51 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
52 {
53 	if (pgtable_l4_enabled) {
54 		unsigned long pfn = virt_to_pfn(pud);
55 
56 		set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
57 	}
58 }
59 
60 static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
61 				     pud_t *pud)
62 {
63 	if (pgtable_l4_enabled) {
64 		unsigned long pfn = virt_to_pfn(pud);
65 
66 		set_p4d_safe(p4d,
67 			     __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
68 	}
69 }
70 
71 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
72 {
73 	if (pgtable_l5_enabled) {
74 		unsigned long pfn = virt_to_pfn(p4d);
75 
76 		set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
77 	}
78 }
79 
80 static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
81 				     p4d_t *p4d)
82 {
83 	if (pgtable_l5_enabled) {
84 		unsigned long pfn = virt_to_pfn(p4d);
85 
86 		set_pgd_safe(pgd,
87 			     __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
88 	}
89 }
90 
91 #define pud_alloc_one pud_alloc_one
92 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
93 {
94 	if (pgtable_l4_enabled)
95 		return __pud_alloc_one(mm, addr);
96 
97 	return NULL;
98 }
99 
100 #define pud_free pud_free
101 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
102 {
103 	if (pgtable_l4_enabled)
104 		__pud_free(mm, pud);
105 }
106 
107 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
108 				  unsigned long addr)
109 {
110 	if (pgtable_l4_enabled) {
111 		struct ptdesc *ptdesc = virt_to_ptdesc(pud);
112 
113 		pagetable_pud_dtor(ptdesc);
114 		riscv_tlb_remove_ptdesc(tlb, ptdesc);
115 	}
116 }
117 
118 #define p4d_alloc_one p4d_alloc_one
119 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
120 {
121 	if (pgtable_l5_enabled) {
122 		gfp_t gfp = GFP_PGTABLE_USER;
123 
124 		if (mm == &init_mm)
125 			gfp = GFP_PGTABLE_KERNEL;
126 		return (p4d_t *)get_zeroed_page(gfp);
127 	}
128 
129 	return NULL;
130 }
131 
132 static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
133 {
134 	BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
135 	free_page((unsigned long)p4d);
136 }
137 
138 #define p4d_free p4d_free
139 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
140 {
141 	if (pgtable_l5_enabled)
142 		__p4d_free(mm, p4d);
143 }
144 
145 static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
146 				  unsigned long addr)
147 {
148 	if (pgtable_l5_enabled)
149 		riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
150 }
151 #endif /* __PAGETABLE_PMD_FOLDED */
152 
153 static inline void sync_kernel_mappings(pgd_t *pgd)
154 {
155 	memcpy(pgd + USER_PTRS_PER_PGD,
156 	       init_mm.pgd + USER_PTRS_PER_PGD,
157 	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
158 }
159 
160 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
161 {
162 	pgd_t *pgd;
163 
164 	pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
165 	if (likely(pgd != NULL)) {
166 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
167 		/* Copy kernel mappings */
168 		sync_kernel_mappings(pgd);
169 	}
170 	return pgd;
171 }
172 
173 #ifndef __PAGETABLE_PMD_FOLDED
174 
175 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
176 				  unsigned long addr)
177 {
178 	struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
179 
180 	pagetable_pmd_dtor(ptdesc);
181 	riscv_tlb_remove_ptdesc(tlb, ptdesc);
182 }
183 
184 #endif /* __PAGETABLE_PMD_FOLDED */
185 
186 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
187 				  unsigned long addr)
188 {
189 	struct ptdesc *ptdesc = page_ptdesc(pte);
190 
191 	pagetable_pte_dtor(ptdesc);
192 	riscv_tlb_remove_ptdesc(tlb, ptdesc);
193 }
194 #endif /* CONFIG_MMU */
195 
196 #endif /* _ASM_RISCV_PGALLOC_H */
197