1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPU-agnostic AMD IO page table v2 allocator.
4 *
5 * Copyright (C) 2022, 2023 Advanced Micro Devices, Inc.
6 * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
7 * Author: Vasant Hegde <vasant.hegde@amd.com>
8 */
9
10 #define pr_fmt(fmt) "AMD-Vi: " fmt
11 #define dev_fmt(fmt) pr_fmt(fmt)
12
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16
17 #include <asm/barrier.h>
18
19 #include "amd_iommu_types.h"
20 #include "amd_iommu.h"
21 #include "../iommu-pages.h"
22
23 #define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
24 #define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
25 #define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
26 #define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
27 #define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
28 #define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
29 #define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
30 #define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
31 #define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
32
33 #define MAX_PTRS_PER_PAGE 512
34
35 #define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
36 #define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
37
38
get_pgtable_level(void)39 static inline int get_pgtable_level(void)
40 {
41 return amd_iommu_gpt_level;
42 }
43
is_large_pte(u64 pte)44 static inline bool is_large_pte(u64 pte)
45 {
46 return (pte & IOMMU_PAGE_PSE);
47 }
48
set_pgtable_attr(u64 * page)49 static inline u64 set_pgtable_attr(u64 *page)
50 {
51 u64 prot;
52
53 prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
54 prot |= IOMMU_PAGE_ACCESS;
55
56 return (iommu_virt_to_phys(page) | prot);
57 }
58
get_pgtable_pte(u64 pte)59 static inline void *get_pgtable_pte(u64 pte)
60 {
61 return iommu_phys_to_virt(pte & PM_ADDR_MASK);
62 }
63
set_pte_attr(u64 paddr,u64 pg_size,int prot)64 static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
65 {
66 u64 pte;
67
68 pte = __sme_set(paddr & PM_ADDR_MASK);
69 pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
70 pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
71
72 if (prot & IOMMU_PROT_IW)
73 pte |= IOMMU_PAGE_RW;
74
75 /* Large page */
76 if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
77 pte |= IOMMU_PAGE_PSE;
78
79 return pte;
80 }
81
get_alloc_page_size(u64 size)82 static inline u64 get_alloc_page_size(u64 size)
83 {
84 if (size >= IOMMU_PAGE_SIZE_1G)
85 return IOMMU_PAGE_SIZE_1G;
86
87 if (size >= IOMMU_PAGE_SIZE_2M)
88 return IOMMU_PAGE_SIZE_2M;
89
90 return PAGE_SIZE;
91 }
92
page_size_to_level(u64 pg_size)93 static inline int page_size_to_level(u64 pg_size)
94 {
95 if (pg_size == IOMMU_PAGE_SIZE_1G)
96 return PAGE_MODE_3_LEVEL;
97 if (pg_size == IOMMU_PAGE_SIZE_2M)
98 return PAGE_MODE_2_LEVEL;
99
100 return PAGE_MODE_1_LEVEL;
101 }
102
free_pgtable(u64 * pt,int level)103 static void free_pgtable(u64 *pt, int level)
104 {
105 u64 *p;
106 int i;
107
108 for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
109 /* PTE present? */
110 if (!IOMMU_PTE_PRESENT(pt[i]))
111 continue;
112
113 if (is_large_pte(pt[i]))
114 continue;
115
116 /*
117 * Free the next level. No need to look at l1 tables here since
118 * they can only contain leaf PTEs; just free them directly.
119 */
120 p = get_pgtable_pte(pt[i]);
121 if (level > 2)
122 free_pgtable(p, level - 1);
123 else
124 iommu_free_page(p);
125 }
126
127 iommu_free_page(pt);
128 }
129
130 /* Allocate page table */
v2_alloc_pte(int nid,u64 * pgd,unsigned long iova,unsigned long pg_size,gfp_t gfp,bool * updated)131 static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
132 unsigned long pg_size, gfp_t gfp, bool *updated)
133 {
134 u64 *pte, *page;
135 int level, end_level;
136
137 level = get_pgtable_level() - 1;
138 end_level = page_size_to_level(pg_size);
139 pte = &pgd[PM_LEVEL_INDEX(level, iova)];
140 iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
141
142 while (level >= end_level) {
143 u64 __pte, __npte;
144
145 __pte = *pte;
146
147 if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
148 /* Unmap large pte */
149 cmpxchg64(pte, *pte, 0ULL);
150 *updated = true;
151 continue;
152 }
153
154 if (!IOMMU_PTE_PRESENT(__pte)) {
155 page = iommu_alloc_page_node(nid, gfp);
156 if (!page)
157 return NULL;
158
159 __npte = set_pgtable_attr(page);
160 /* pte could have been changed somewhere. */
161 if (!try_cmpxchg64(pte, &__pte, __npte))
162 iommu_free_page(page);
163 else if (IOMMU_PTE_PRESENT(__pte))
164 *updated = true;
165
166 continue;
167 }
168
169 level -= 1;
170 pte = get_pgtable_pte(__pte);
171 pte = &pte[PM_LEVEL_INDEX(level, iova)];
172 }
173
174 /* Tear down existing pte entries */
175 if (IOMMU_PTE_PRESENT(*pte)) {
176 u64 *__pte;
177
178 *updated = true;
179 __pte = get_pgtable_pte(*pte);
180 cmpxchg64(pte, *pte, 0ULL);
181 if (pg_size == IOMMU_PAGE_SIZE_1G)
182 free_pgtable(__pte, end_level - 1);
183 else if (pg_size == IOMMU_PAGE_SIZE_2M)
184 iommu_free_page(__pte);
185 }
186
187 return pte;
188 }
189
190 /*
191 * This function checks if there is a PTE for a given dma address.
192 * If there is one, it returns the pointer to it.
193 */
fetch_pte(struct amd_io_pgtable * pgtable,unsigned long iova,unsigned long * page_size)194 static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
195 unsigned long iova, unsigned long *page_size)
196 {
197 u64 *pte;
198 int level;
199
200 level = get_pgtable_level() - 1;
201 pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
202 /* Default page size is 4K */
203 *page_size = PAGE_SIZE;
204
205 while (level) {
206 /* Not present */
207 if (!IOMMU_PTE_PRESENT(*pte))
208 return NULL;
209
210 /* Walk to the next level */
211 pte = get_pgtable_pte(*pte);
212 pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
213
214 /* Large page */
215 if (is_large_pte(*pte)) {
216 if (level == PAGE_MODE_3_LEVEL)
217 *page_size = IOMMU_PAGE_SIZE_1G;
218 else if (level == PAGE_MODE_2_LEVEL)
219 *page_size = IOMMU_PAGE_SIZE_2M;
220 else
221 return NULL; /* Wrongly set PSE bit in PTE */
222
223 break;
224 }
225
226 level -= 1;
227 }
228
229 return pte;
230 }
231
iommu_v2_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)232 static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
233 phys_addr_t paddr, size_t pgsize, size_t pgcount,
234 int prot, gfp_t gfp, size_t *mapped)
235 {
236 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
237 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
238 u64 *pte;
239 unsigned long map_size;
240 unsigned long mapped_size = 0;
241 unsigned long o_iova = iova;
242 size_t size = pgcount << __ffs(pgsize);
243 int ret = 0;
244 bool updated = false;
245
246 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
247 return -EINVAL;
248
249 if (!(prot & IOMMU_PROT_MASK))
250 return -EINVAL;
251
252 while (mapped_size < size) {
253 map_size = get_alloc_page_size(pgsize);
254 pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
255 iova, map_size, gfp, &updated);
256 if (!pte) {
257 ret = -EINVAL;
258 goto out;
259 }
260
261 *pte = set_pte_attr(paddr, map_size, prot);
262
263 iova += map_size;
264 paddr += map_size;
265 mapped_size += map_size;
266 }
267
268 out:
269 if (updated) {
270 struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
271
272 amd_iommu_domain_flush_pages(pdom, o_iova, size);
273 }
274
275 if (mapped)
276 *mapped += mapped_size;
277
278 return ret;
279 }
280
iommu_v2_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)281 static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
282 unsigned long iova,
283 size_t pgsize, size_t pgcount,
284 struct iommu_iotlb_gather *gather)
285 {
286 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
287 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
288 unsigned long unmap_size;
289 unsigned long unmapped = 0;
290 size_t size = pgcount << __ffs(pgsize);
291 u64 *pte;
292
293 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
294 return 0;
295
296 while (unmapped < size) {
297 pte = fetch_pte(pgtable, iova, &unmap_size);
298 if (!pte)
299 return unmapped;
300
301 *pte = 0ULL;
302
303 iova = (iova & ~(unmap_size - 1)) + unmap_size;
304 unmapped += unmap_size;
305 }
306
307 return unmapped;
308 }
309
iommu_v2_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)310 static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
311 {
312 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
313 unsigned long offset_mask, pte_pgsize;
314 u64 *pte, __pte;
315
316 pte = fetch_pte(pgtable, iova, &pte_pgsize);
317 if (!pte || !IOMMU_PTE_PRESENT(*pte))
318 return 0;
319
320 offset_mask = pte_pgsize - 1;
321 __pte = __sme_clr(*pte & PM_ADDR_MASK);
322
323 return (__pte & ~offset_mask) | (iova & offset_mask);
324 }
325
326 /*
327 * ----------------------------------------------------
328 */
v2_free_pgtable(struct io_pgtable * iop)329 static void v2_free_pgtable(struct io_pgtable *iop)
330 {
331 struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
332
333 if (!pgtable || !pgtable->pgd)
334 return;
335
336 /* Free page table */
337 free_pgtable(pgtable->pgd, get_pgtable_level());
338 pgtable->pgd = NULL;
339 }
340
v2_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)341 static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
342 {
343 struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
344 int ias = IOMMU_IN_ADDR_BIT_SIZE;
345
346 pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
347 if (!pgtable->pgd)
348 return NULL;
349
350 if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
351 ias = 57;
352
353 pgtable->pgtbl.ops.map_pages = iommu_v2_map_pages;
354 pgtable->pgtbl.ops.unmap_pages = iommu_v2_unmap_pages;
355 pgtable->pgtbl.ops.iova_to_phys = iommu_v2_iova_to_phys;
356
357 cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
358 cfg->ias = ias;
359 cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
360
361 return &pgtable->pgtbl;
362 }
363
364 struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
365 .alloc = v2_alloc_pgtable,
366 .free = v2_free_pgtable,
367 };
368