xref: /linux/drivers/iommu/amd/io_pgtable_v2.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic AMD IO page table v2 allocator.
4  *
5  * Copyright (C) 2022, 2023 Advanced Micro Devices, Inc.
6  * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
7  * Author: Vasant Hegde <vasant.hegde@amd.com>
8  */
9 
10 #define pr_fmt(fmt)	"AMD-Vi: " fmt
11 #define dev_fmt(fmt)	pr_fmt(fmt)
12 
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 
17 #include <asm/barrier.h>
18 
19 #include "amd_iommu_types.h"
20 #include "amd_iommu.h"
21 #include "../iommu-pages.h"
22 
23 #define IOMMU_PAGE_PRESENT	BIT_ULL(0)	/* Is present */
24 #define IOMMU_PAGE_RW		BIT_ULL(1)	/* Writeable */
25 #define IOMMU_PAGE_USER		BIT_ULL(2)	/* Userspace addressable */
26 #define IOMMU_PAGE_PWT		BIT_ULL(3)	/* Page write through */
27 #define IOMMU_PAGE_PCD		BIT_ULL(4)	/* Page cache disabled */
28 #define IOMMU_PAGE_ACCESS	BIT_ULL(5)	/* Was accessed (updated by IOMMU) */
29 #define IOMMU_PAGE_DIRTY	BIT_ULL(6)	/* Was written to (updated by IOMMU) */
30 #define IOMMU_PAGE_PSE		BIT_ULL(7)	/* Page Size Extensions */
31 #define IOMMU_PAGE_NX		BIT_ULL(63)	/* No execute */
32 
33 #define MAX_PTRS_PER_PAGE	512
34 
35 #define IOMMU_PAGE_SIZE_2M	BIT_ULL(21)
36 #define IOMMU_PAGE_SIZE_1G	BIT_ULL(30)
37 
38 
39 static inline int get_pgtable_level(void)
40 {
41 	return amd_iommu_gpt_level;
42 }
43 
44 static inline bool is_large_pte(u64 pte)
45 {
46 	return (pte & IOMMU_PAGE_PSE);
47 }
48 
49 static inline u64 set_pgtable_attr(u64 *page)
50 {
51 	u64 prot;
52 
53 	prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
54 	prot |= IOMMU_PAGE_ACCESS;
55 
56 	return (iommu_virt_to_phys(page) | prot);
57 }
58 
59 static inline void *get_pgtable_pte(u64 pte)
60 {
61 	return iommu_phys_to_virt(pte & PM_ADDR_MASK);
62 }
63 
64 static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
65 {
66 	u64 pte;
67 
68 	pte = __sme_set(paddr & PM_ADDR_MASK);
69 	pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
70 	pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
71 
72 	if (prot & IOMMU_PROT_IW)
73 		pte |= IOMMU_PAGE_RW;
74 
75 	/* Large page */
76 	if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
77 		pte |= IOMMU_PAGE_PSE;
78 
79 	return pte;
80 }
81 
82 static inline u64 get_alloc_page_size(u64 size)
83 {
84 	if (size >= IOMMU_PAGE_SIZE_1G)
85 		return IOMMU_PAGE_SIZE_1G;
86 
87 	if (size >= IOMMU_PAGE_SIZE_2M)
88 		return IOMMU_PAGE_SIZE_2M;
89 
90 	return PAGE_SIZE;
91 }
92 
93 static inline int page_size_to_level(u64 pg_size)
94 {
95 	if (pg_size == IOMMU_PAGE_SIZE_1G)
96 		return PAGE_MODE_3_LEVEL;
97 	if (pg_size == IOMMU_PAGE_SIZE_2M)
98 		return PAGE_MODE_2_LEVEL;
99 
100 	return PAGE_MODE_1_LEVEL;
101 }
102 
103 static void free_pgtable(u64 *pt, int level)
104 {
105 	u64 *p;
106 	int i;
107 
108 	for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
109 		/* PTE present? */
110 		if (!IOMMU_PTE_PRESENT(pt[i]))
111 			continue;
112 
113 		if (is_large_pte(pt[i]))
114 			continue;
115 
116 		/*
117 		 * Free the next level. No need to look at l1 tables here since
118 		 * they can only contain leaf PTEs; just free them directly.
119 		 */
120 		p = get_pgtable_pte(pt[i]);
121 		if (level > 2)
122 			free_pgtable(p, level - 1);
123 		else
124 			iommu_free_page(p);
125 	}
126 
127 	iommu_free_page(pt);
128 }
129 
130 /* Allocate page table */
131 static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
132 			 unsigned long pg_size, gfp_t gfp, bool *updated)
133 {
134 	u64 *pte, *page;
135 	int level, end_level;
136 
137 	level = get_pgtable_level() - 1;
138 	end_level = page_size_to_level(pg_size);
139 	pte = &pgd[PM_LEVEL_INDEX(level, iova)];
140 	iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
141 
142 	while (level >= end_level) {
143 		u64 __pte, __npte;
144 
145 		__pte = *pte;
146 
147 		if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
148 			/* Unmap large pte */
149 			cmpxchg64(pte, *pte, 0ULL);
150 			*updated = true;
151 			continue;
152 		}
153 
154 		if (!IOMMU_PTE_PRESENT(__pte)) {
155 			page = iommu_alloc_page_node(nid, gfp);
156 			if (!page)
157 				return NULL;
158 
159 			__npte = set_pgtable_attr(page);
160 			/* pte could have been changed somewhere. */
161 			if (!try_cmpxchg64(pte, &__pte, __npte))
162 				iommu_free_page(page);
163 			else if (IOMMU_PTE_PRESENT(__pte))
164 				*updated = true;
165 
166 			continue;
167 		}
168 
169 		level -= 1;
170 		pte = get_pgtable_pte(__pte);
171 		pte = &pte[PM_LEVEL_INDEX(level, iova)];
172 	}
173 
174 	/* Tear down existing pte entries */
175 	if (IOMMU_PTE_PRESENT(*pte)) {
176 		u64 *__pte;
177 
178 		*updated = true;
179 		__pte = get_pgtable_pte(*pte);
180 		cmpxchg64(pte, *pte, 0ULL);
181 		if (pg_size == IOMMU_PAGE_SIZE_1G)
182 			free_pgtable(__pte, end_level - 1);
183 		else if (pg_size == IOMMU_PAGE_SIZE_2M)
184 			iommu_free_page(__pte);
185 	}
186 
187 	return pte;
188 }
189 
190 /*
191  * This function checks if there is a PTE for a given dma address.
192  * If there is one, it returns the pointer to it.
193  */
194 static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
195 		      unsigned long iova, unsigned long *page_size)
196 {
197 	u64 *pte;
198 	int level;
199 
200 	level = get_pgtable_level() - 1;
201 	pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
202 	/* Default page size is 4K */
203 	*page_size = PAGE_SIZE;
204 
205 	while (level) {
206 		/* Not present */
207 		if (!IOMMU_PTE_PRESENT(*pte))
208 			return NULL;
209 
210 		/* Walk to the next level */
211 		pte = get_pgtable_pte(*pte);
212 		pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
213 
214 		/* Large page */
215 		if (is_large_pte(*pte)) {
216 			if (level == PAGE_MODE_3_LEVEL)
217 				*page_size = IOMMU_PAGE_SIZE_1G;
218 			else if (level == PAGE_MODE_2_LEVEL)
219 				*page_size = IOMMU_PAGE_SIZE_2M;
220 			else
221 				return NULL;	/* Wrongly set PSE bit in PTE */
222 
223 			break;
224 		}
225 
226 		level -= 1;
227 	}
228 
229 	return pte;
230 }
231 
232 static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
233 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
234 			      int prot, gfp_t gfp, size_t *mapped)
235 {
236 	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
237 	struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
238 	u64 *pte;
239 	unsigned long map_size;
240 	unsigned long mapped_size = 0;
241 	unsigned long o_iova = iova;
242 	size_t size = pgcount << __ffs(pgsize);
243 	int ret = 0;
244 	bool updated = false;
245 
246 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
247 		return -EINVAL;
248 
249 	if (!(prot & IOMMU_PROT_MASK))
250 		return -EINVAL;
251 
252 	while (mapped_size < size) {
253 		map_size = get_alloc_page_size(pgsize);
254 		pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
255 				   iova, map_size, gfp, &updated);
256 		if (!pte) {
257 			ret = -EINVAL;
258 			goto out;
259 		}
260 
261 		*pte = set_pte_attr(paddr, map_size, prot);
262 
263 		iova += map_size;
264 		paddr += map_size;
265 		mapped_size += map_size;
266 	}
267 
268 out:
269 	if (updated) {
270 		struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
271 		unsigned long flags;
272 
273 		spin_lock_irqsave(&pdom->lock, flags);
274 		amd_iommu_domain_flush_pages(pdom, o_iova, size);
275 		spin_unlock_irqrestore(&pdom->lock, flags);
276 	}
277 
278 	if (mapped)
279 		*mapped += mapped_size;
280 
281 	return ret;
282 }
283 
284 static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
285 					  unsigned long iova,
286 					  size_t pgsize, size_t pgcount,
287 					  struct iommu_iotlb_gather *gather)
288 {
289 	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
290 	struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
291 	unsigned long unmap_size;
292 	unsigned long unmapped = 0;
293 	size_t size = pgcount << __ffs(pgsize);
294 	u64 *pte;
295 
296 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
297 		return 0;
298 
299 	while (unmapped < size) {
300 		pte = fetch_pte(pgtable, iova, &unmap_size);
301 		if (!pte)
302 			return unmapped;
303 
304 		*pte = 0ULL;
305 
306 		iova = (iova & ~(unmap_size - 1)) + unmap_size;
307 		unmapped += unmap_size;
308 	}
309 
310 	return unmapped;
311 }
312 
313 static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
314 {
315 	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
316 	unsigned long offset_mask, pte_pgsize;
317 	u64 *pte, __pte;
318 
319 	pte = fetch_pte(pgtable, iova, &pte_pgsize);
320 	if (!pte || !IOMMU_PTE_PRESENT(*pte))
321 		return 0;
322 
323 	offset_mask = pte_pgsize - 1;
324 	__pte = __sme_clr(*pte & PM_ADDR_MASK);
325 
326 	return (__pte & ~offset_mask) | (iova & offset_mask);
327 }
328 
329 /*
330  * ----------------------------------------------------
331  */
332 static void v2_free_pgtable(struct io_pgtable *iop)
333 {
334 	struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
335 
336 	if (!pgtable || !pgtable->pgd)
337 		return;
338 
339 	/* Free page table */
340 	free_pgtable(pgtable->pgd, get_pgtable_level());
341 	pgtable->pgd = NULL;
342 }
343 
344 static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
345 {
346 	struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
347 	int ias = IOMMU_IN_ADDR_BIT_SIZE;
348 
349 	pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
350 	if (!pgtable->pgd)
351 		return NULL;
352 
353 	if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
354 		ias = 57;
355 
356 	pgtable->pgtbl.ops.map_pages    = iommu_v2_map_pages;
357 	pgtable->pgtbl.ops.unmap_pages  = iommu_v2_unmap_pages;
358 	pgtable->pgtbl.ops.iova_to_phys = iommu_v2_iova_to_phys;
359 
360 	cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
361 	cfg->ias           = ias;
362 	cfg->oas           = IOMMU_OUT_ADDR_BIT_SIZE;
363 
364 	return &pgtable->pgtbl;
365 }
366 
367 struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
368 	.alloc	= v2_alloc_pgtable,
369 	.free	= v2_free_pgtable,
370 };
371