xref: /linux/drivers/iommu/generic_pt/iommu_pt.h (revision f2161d5f1aae21a42b0a64d87e10cb31db423f42)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
4  *
5  * "Templated C code" for implementing the iommu operations for page tables.
6  * This is compiled multiple times, over all the page table formats to pick up
7  * the per-format definitions.
8  */
9 #ifndef __GENERIC_PT_IOMMU_PT_H
10 #define __GENERIC_PT_IOMMU_PT_H
11 
12 #include "pt_iter.h"
13 
14 #include <linux/export.h>
15 #include <linux/iommu.h>
16 #include "../iommu-pages.h"
17 #include <linux/cleanup.h>
18 #include <linux/dma-mapping.h>
19 
20 enum {
21 	SW_BIT_CACHE_FLUSH_DONE = 0,
22 };
23 
flush_writes_range(const struct pt_state * pts,unsigned int start_index,unsigned int end_index)24 static void flush_writes_range(const struct pt_state *pts,
25 			       unsigned int start_index, unsigned int end_index)
26 {
27 	if (pts_feature(pts, PT_FEAT_DMA_INCOHERENT))
28 		iommu_pages_flush_incoherent(
29 			iommu_from_common(pts->range->common)->iommu_device,
30 			pts->table, start_index * PT_ITEM_WORD_SIZE,
31 			(end_index - start_index) * PT_ITEM_WORD_SIZE);
32 }
33 
flush_writes_item(const struct pt_state * pts)34 static void flush_writes_item(const struct pt_state *pts)
35 {
36 	if (pts_feature(pts, PT_FEAT_DMA_INCOHERENT))
37 		iommu_pages_flush_incoherent(
38 			iommu_from_common(pts->range->common)->iommu_device,
39 			pts->table, pts->index * PT_ITEM_WORD_SIZE,
40 			PT_ITEM_WORD_SIZE);
41 }
42 
gather_range_pages(struct iommu_iotlb_gather * iotlb_gather,struct pt_iommu * iommu_table,pt_vaddr_t iova,pt_vaddr_t len,struct iommu_pages_list * free_list)43 static void gather_range_pages(struct iommu_iotlb_gather *iotlb_gather,
44 			       struct pt_iommu *iommu_table, pt_vaddr_t iova,
45 			       pt_vaddr_t len,
46 			       struct iommu_pages_list *free_list)
47 {
48 	struct pt_common *common = common_from_iommu(iommu_table);
49 
50 	if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
51 		iommu_pages_stop_incoherent_list(free_list,
52 						 iommu_table->iommu_device);
53 
54 	if (pt_feature(common, PT_FEAT_FLUSH_RANGE_NO_GAPS) &&
55 	    iommu_iotlb_gather_is_disjoint(iotlb_gather, iova, len)) {
56 		iommu_iotlb_sync(&iommu_table->domain, iotlb_gather);
57 		/*
58 		 * Note that the sync frees the gather's free list, so we must
59 		 * not have any pages on that list that are covered by iova/len
60 		 */
61 	} else if (pt_feature(common, PT_FEAT_FLUSH_RANGE)) {
62 		iommu_iotlb_gather_add_range(iotlb_gather, iova, len);
63 	}
64 
65 	iommu_pages_list_splice(free_list, &iotlb_gather->freelist);
66 }
67 
68 #define DOMAIN_NS(op) CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), op)
69 
make_range_ul(struct pt_common * common,struct pt_range * range,unsigned long iova,unsigned long len)70 static int make_range_ul(struct pt_common *common, struct pt_range *range,
71 			 unsigned long iova, unsigned long len)
72 {
73 	unsigned long last;
74 
75 	if (unlikely(len == 0))
76 		return -EINVAL;
77 
78 	if (check_add_overflow(iova, len - 1, &last))
79 		return -EOVERFLOW;
80 
81 	*range = pt_make_range(common, iova, last);
82 	if (sizeof(iova) > sizeof(range->va)) {
83 		if (unlikely(range->va != iova || range->last_va != last))
84 			return -EOVERFLOW;
85 	}
86 	return 0;
87 }
88 
make_range_u64(struct pt_common * common,struct pt_range * range,u64 iova,u64 len)89 static __maybe_unused int make_range_u64(struct pt_common *common,
90 					 struct pt_range *range, u64 iova,
91 					 u64 len)
92 {
93 	if (unlikely(iova > ULONG_MAX || len > ULONG_MAX))
94 		return -EOVERFLOW;
95 	return make_range_ul(common, range, iova, len);
96 }
97 
98 /*
99  * Some APIs use unsigned long, while othersuse dma_addr_t as the type. Dispatch
100  * to the correct validation based on the type.
101  */
102 #define make_range_no_check(common, range, iova, len)                   \
103 	({                                                              \
104 		int ret;                                                \
105 		if (sizeof(iova) > sizeof(unsigned long) ||             \
106 		    sizeof(len) > sizeof(unsigned long))                \
107 			ret = make_range_u64(common, range, iova, len); \
108 		else                                                    \
109 			ret = make_range_ul(common, range, iova, len);  \
110 		ret;                                                    \
111 	})
112 
113 #define make_range(common, range, iova, len)                             \
114 	({                                                               \
115 		int ret = make_range_no_check(common, range, iova, len); \
116 		if (!ret)                                                \
117 			ret = pt_check_range(range);                     \
118 		ret;                                                     \
119 	})
120 
compute_best_pgsize(struct pt_state * pts,pt_oaddr_t oa)121 static inline unsigned int compute_best_pgsize(struct pt_state *pts,
122 					       pt_oaddr_t oa)
123 {
124 	struct pt_iommu *iommu_table = iommu_from_common(pts->range->common);
125 
126 	if (!pt_can_have_leaf(pts))
127 		return 0;
128 
129 	/*
130 	 * The page size is limited by the domain's bitmap. This allows the core
131 	 * code to reduce the supported page sizes by changing the bitmap.
132 	 */
133 	return pt_compute_best_pgsize(pt_possible_sizes(pts) &
134 					      iommu_table->domain.pgsize_bitmap,
135 				      pts->range->va, pts->range->last_va, oa);
136 }
137 
__do_iova_to_phys(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table,pt_level_fn_t descend_fn)138 static __always_inline int __do_iova_to_phys(struct pt_range *range, void *arg,
139 					     unsigned int level,
140 					     struct pt_table_p *table,
141 					     pt_level_fn_t descend_fn)
142 {
143 	struct pt_state pts = pt_init(range, level, table);
144 	pt_oaddr_t *res = arg;
145 
146 	switch (pt_load_single_entry(&pts)) {
147 	case PT_ENTRY_EMPTY:
148 		return -ENOENT;
149 	case PT_ENTRY_TABLE:
150 		return pt_descend(&pts, arg, descend_fn);
151 	case PT_ENTRY_OA:
152 		*res = pt_entry_oa_exact(&pts);
153 		return 0;
154 	}
155 	return -ENOENT;
156 }
157 PT_MAKE_LEVELS(__iova_to_phys, __do_iova_to_phys);
158 
159 /**
160  * iova_to_phys() - Return the output address for the given IOVA
161  * @domain: Table to query
162  * @iova: IO virtual address to query
163  *
164  * Determine the output address from the given IOVA. @iova may have any
165  * alignment, the returned physical will be adjusted with any sub page offset.
166  *
167  * Context: The caller must hold a read range lock that includes @iova.
168  *
169  * Return: 0 if there is no translation for the given iova.
170  */
DOMAIN_NS(iova_to_phys)171 phys_addr_t DOMAIN_NS(iova_to_phys)(struct iommu_domain *domain,
172 				    dma_addr_t iova)
173 {
174 	struct pt_iommu *iommu_table =
175 		container_of(domain, struct pt_iommu, domain);
176 	struct pt_range range;
177 	pt_oaddr_t res;
178 	int ret;
179 
180 	ret = make_range(common_from_iommu(iommu_table), &range, iova, 1);
181 	if (ret)
182 		return ret;
183 
184 	ret = pt_walk_range(&range, __iova_to_phys, &res);
185 	/* PHYS_ADDR_MAX would be a better error code */
186 	if (ret)
187 		return 0;
188 	return res;
189 }
190 EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(iova_to_phys), "GENERIC_PT_IOMMU");
191 
192 struct pt_iommu_dirty_args {
193 	struct iommu_dirty_bitmap *dirty;
194 	unsigned int flags;
195 };
196 
record_dirty(struct pt_state * pts,struct pt_iommu_dirty_args * dirty,unsigned int num_contig_lg2)197 static void record_dirty(struct pt_state *pts,
198 			 struct pt_iommu_dirty_args *dirty,
199 			 unsigned int num_contig_lg2)
200 {
201 	pt_vaddr_t dirty_len;
202 
203 	if (num_contig_lg2 != ilog2(1)) {
204 		unsigned int index = pts->index;
205 		unsigned int end_index = log2_set_mod_max_t(
206 			unsigned int, pts->index, num_contig_lg2);
207 
208 		/* Adjust for being contained inside a contiguous page */
209 		end_index = min(end_index, pts->end_index);
210 		dirty_len = (end_index - index) *
211 				log2_to_int(pt_table_item_lg2sz(pts));
212 	} else {
213 		dirty_len = log2_to_int(pt_table_item_lg2sz(pts));
214 	}
215 
216 	if (dirty->dirty->bitmap)
217 		iova_bitmap_set(dirty->dirty->bitmap, pts->range->va,
218 				dirty_len);
219 
220 	if (!(dirty->flags & IOMMU_DIRTY_NO_CLEAR)) {
221 		/*
222 		 * No write log required because DMA incoherence and atomic
223 		 * dirty tracking bits can't work together
224 		 */
225 		pt_entry_make_write_clean(pts);
226 		iommu_iotlb_gather_add_range(dirty->dirty->gather,
227 					     pts->range->va, dirty_len);
228 	}
229 }
230 
__read_and_clear_dirty(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)231 static inline int __read_and_clear_dirty(struct pt_range *range, void *arg,
232 					 unsigned int level,
233 					 struct pt_table_p *table)
234 {
235 	struct pt_state pts = pt_init(range, level, table);
236 	struct pt_iommu_dirty_args *dirty = arg;
237 	int ret;
238 
239 	for_each_pt_level_entry(&pts) {
240 		if (pts.type == PT_ENTRY_TABLE) {
241 			ret = pt_descend(&pts, arg, __read_and_clear_dirty);
242 			if (ret)
243 				return ret;
244 			continue;
245 		}
246 		if (pts.type == PT_ENTRY_OA && pt_entry_is_write_dirty(&pts))
247 			record_dirty(&pts, dirty,
248 				     pt_entry_num_contig_lg2(&pts));
249 	}
250 	return 0;
251 }
252 
253 /**
254  * read_and_clear_dirty() - Manipulate the HW set write dirty state
255  * @domain: Domain to manipulate
256  * @iova: IO virtual address to start
257  * @size: Length of the IOVA
258  * @flags: A bitmap of IOMMU_DIRTY_NO_CLEAR
259  * @dirty: Place to store the dirty bits
260  *
261  * Iterate over all the entries in the mapped range and record their write dirty
262  * status in iommu_dirty_bitmap. If IOMMU_DIRTY_NO_CLEAR is not specified then
263  * the entries will be left dirty, otherwise they are returned to being not
264  * write dirty.
265  *
266  * Context: The caller must hold a read range lock that includes @iova.
267  *
268  * Returns: -ERRNO on failure, 0 on success.
269  */
DOMAIN_NS(read_and_clear_dirty)270 int DOMAIN_NS(read_and_clear_dirty)(struct iommu_domain *domain,
271 				    unsigned long iova, size_t size,
272 				    unsigned long flags,
273 				    struct iommu_dirty_bitmap *dirty)
274 {
275 	struct pt_iommu *iommu_table =
276 		container_of(domain, struct pt_iommu, domain);
277 	struct pt_iommu_dirty_args dirty_args = {
278 		.dirty = dirty,
279 		.flags = flags,
280 	};
281 	struct pt_range range;
282 	int ret;
283 
284 #if !IS_ENABLED(CONFIG_IOMMUFD_DRIVER) || !defined(pt_entry_is_write_dirty)
285 	return -EOPNOTSUPP;
286 #endif
287 
288 	ret = make_range(common_from_iommu(iommu_table), &range, iova, size);
289 	if (ret)
290 		return ret;
291 
292 	ret = pt_walk_range(&range, __read_and_clear_dirty, &dirty_args);
293 	PT_WARN_ON(ret);
294 	return ret;
295 }
296 EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(read_and_clear_dirty), "GENERIC_PT_IOMMU");
297 
__set_dirty(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)298 static inline int __set_dirty(struct pt_range *range, void *arg,
299 			      unsigned int level, struct pt_table_p *table)
300 {
301 	struct pt_state pts = pt_init(range, level, table);
302 
303 	switch (pt_load_single_entry(&pts)) {
304 	case PT_ENTRY_EMPTY:
305 		return -ENOENT;
306 	case PT_ENTRY_TABLE:
307 		return pt_descend(&pts, arg, __set_dirty);
308 	case PT_ENTRY_OA:
309 		if (!pt_entry_make_write_dirty(&pts))
310 			return -EAGAIN;
311 		return 0;
312 	}
313 	return -ENOENT;
314 }
315 
NS(set_dirty)316 static int __maybe_unused NS(set_dirty)(struct pt_iommu *iommu_table,
317 					dma_addr_t iova)
318 {
319 	struct pt_range range;
320 	int ret;
321 
322 	ret = make_range(common_from_iommu(iommu_table), &range, iova, 1);
323 	if (ret)
324 		return ret;
325 
326 	/*
327 	 * Note: There is no locking here yet, if the test suite races this it
328 	 * can crash. It should use RCU locking eventually.
329 	 */
330 	return pt_walk_range(&range, __set_dirty, NULL);
331 }
332 
333 struct pt_iommu_collect_args {
334 	struct iommu_pages_list free_list;
335 	/* Fail if any OAs are within the range */
336 	u8 check_mapped : 1;
337 };
338 
__collect_tables(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)339 static int __collect_tables(struct pt_range *range, void *arg,
340 			    unsigned int level, struct pt_table_p *table)
341 {
342 	struct pt_state pts = pt_init(range, level, table);
343 	struct pt_iommu_collect_args *collect = arg;
344 	int ret;
345 
346 	if (!collect->check_mapped && !pt_can_have_table(&pts))
347 		return 0;
348 
349 	for_each_pt_level_entry(&pts) {
350 		if (pts.type == PT_ENTRY_TABLE) {
351 			iommu_pages_list_add(&collect->free_list, pts.table_lower);
352 			ret = pt_descend(&pts, arg, __collect_tables);
353 			if (ret)
354 				return ret;
355 			continue;
356 		}
357 		if (pts.type == PT_ENTRY_OA && collect->check_mapped)
358 			return -EADDRINUSE;
359 	}
360 	return 0;
361 }
362 
363 enum alloc_mode {ALLOC_NORMAL, ALLOC_DEFER_COHERENT_FLUSH};
364 
365 /* Allocate a table, the empty table will be ready to be installed. */
_table_alloc(struct pt_common * common,size_t lg2sz,gfp_t gfp,enum alloc_mode mode)366 static inline struct pt_table_p *_table_alloc(struct pt_common *common,
367 					      size_t lg2sz, gfp_t gfp,
368 					      enum alloc_mode mode)
369 {
370 	struct pt_iommu *iommu_table = iommu_from_common(common);
371 	struct pt_table_p *table_mem;
372 
373 	table_mem = iommu_alloc_pages_node_sz(iommu_table->nid, gfp,
374 					      log2_to_int(lg2sz));
375 	if (!table_mem)
376 		return ERR_PTR(-ENOMEM);
377 
378 	if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) &&
379 	    mode == ALLOC_NORMAL) {
380 		int ret = iommu_pages_start_incoherent(
381 			table_mem, iommu_table->iommu_device);
382 		if (ret) {
383 			iommu_free_pages(table_mem);
384 			return ERR_PTR(ret);
385 		}
386 	}
387 	return table_mem;
388 }
389 
table_alloc_top(struct pt_common * common,uintptr_t top_of_table,gfp_t gfp,enum alloc_mode mode)390 static inline struct pt_table_p *table_alloc_top(struct pt_common *common,
391 						 uintptr_t top_of_table,
392 						 gfp_t gfp,
393 						 enum alloc_mode mode)
394 {
395 	/*
396 	 * Top doesn't need the free list or otherwise, so it technically
397 	 * doesn't need to use iommu pages. Use the API anyhow as the top is
398 	 * usually not smaller than PAGE_SIZE to keep things simple.
399 	 */
400 	return _table_alloc(common, pt_top_memsize_lg2(common, top_of_table),
401 			    gfp, mode);
402 }
403 
404 /* Allocate an interior table */
table_alloc(const struct pt_state * parent_pts,gfp_t gfp,enum alloc_mode mode)405 static inline struct pt_table_p *table_alloc(const struct pt_state *parent_pts,
406 					     gfp_t gfp, enum alloc_mode mode)
407 {
408 	struct pt_state child_pts =
409 		pt_init(parent_pts->range, parent_pts->level - 1, NULL);
410 
411 	return _table_alloc(parent_pts->range->common,
412 			    pt_num_items_lg2(&child_pts) +
413 				    ilog2(PT_ITEM_WORD_SIZE),
414 			    gfp, mode);
415 }
416 
pt_iommu_new_table(struct pt_state * pts,struct pt_write_attrs * attrs)417 static inline int pt_iommu_new_table(struct pt_state *pts,
418 				     struct pt_write_attrs *attrs)
419 {
420 	struct pt_table_p *table_mem;
421 	phys_addr_t phys;
422 
423 	/* Given PA/VA/length can't be represented */
424 	if (PT_WARN_ON(!pt_can_have_table(pts)))
425 		return -ENXIO;
426 
427 	table_mem = table_alloc(pts, attrs->gfp, ALLOC_NORMAL);
428 	if (IS_ERR(table_mem))
429 		return PTR_ERR(table_mem);
430 
431 	phys = virt_to_phys(table_mem);
432 	if (!pt_install_table(pts, phys, attrs)) {
433 		iommu_pages_free_incoherent(
434 			table_mem,
435 			iommu_from_common(pts->range->common)->iommu_device);
436 		return -EAGAIN;
437 	}
438 
439 	if (pts_feature(pts, PT_FEAT_DMA_INCOHERENT)) {
440 		flush_writes_item(pts);
441 		pt_set_sw_bit_release(pts, SW_BIT_CACHE_FLUSH_DONE);
442 	}
443 
444 	if (IS_ENABLED(CONFIG_DEBUG_GENERIC_PT)) {
445 		/*
446 		 * The underlying table can't store the physical table address.
447 		 * This happens when kunit testing tables outside their normal
448 		 * environment where a CPU might be limited.
449 		 */
450 		pt_load_single_entry(pts);
451 		if (PT_WARN_ON(pt_table_pa(pts) != phys)) {
452 			pt_clear_entries(pts, ilog2(1));
453 			iommu_pages_free_incoherent(
454 				table_mem, iommu_from_common(pts->range->common)
455 						   ->iommu_device);
456 			return -EINVAL;
457 		}
458 	}
459 
460 	pts->table_lower = table_mem;
461 	return 0;
462 }
463 
464 struct pt_iommu_map_args {
465 	struct iommu_iotlb_gather *iotlb_gather;
466 	struct pt_write_attrs attrs;
467 	pt_oaddr_t oa;
468 	unsigned int leaf_pgsize_lg2;
469 	unsigned int leaf_level;
470 };
471 
472 /*
473  * This will recursively check any tables in the block to validate they are
474  * empty and then free them through the gather.
475  */
clear_contig(const struct pt_state * start_pts,struct iommu_iotlb_gather * iotlb_gather,unsigned int step,unsigned int pgsize_lg2)476 static int clear_contig(const struct pt_state *start_pts,
477 			struct iommu_iotlb_gather *iotlb_gather,
478 			unsigned int step, unsigned int pgsize_lg2)
479 {
480 	struct pt_iommu *iommu_table =
481 		iommu_from_common(start_pts->range->common);
482 	struct pt_range range = *start_pts->range;
483 	struct pt_state pts =
484 		pt_init(&range, start_pts->level, start_pts->table);
485 	struct pt_iommu_collect_args collect = { .check_mapped = true };
486 	int ret;
487 
488 	pts.index = start_pts->index;
489 	pts.end_index = start_pts->index + step;
490 	for (; _pt_iter_load(&pts); pt_next_entry(&pts)) {
491 		if (pts.type == PT_ENTRY_TABLE) {
492 			collect.free_list =
493 				IOMMU_PAGES_LIST_INIT(collect.free_list);
494 			ret = pt_walk_descend_all(&pts, __collect_tables,
495 						  &collect);
496 			if (ret)
497 				return ret;
498 
499 			/*
500 			 * The table item must be cleared before we can update
501 			 * the gather
502 			 */
503 			pt_clear_entries(&pts, ilog2(1));
504 			flush_writes_item(&pts);
505 
506 			iommu_pages_list_add(&collect.free_list,
507 					     pt_table_ptr(&pts));
508 			gather_range_pages(
509 				iotlb_gather, iommu_table, range.va,
510 				log2_to_int(pt_table_item_lg2sz(&pts)),
511 				&collect.free_list);
512 		} else if (pts.type != PT_ENTRY_EMPTY) {
513 			return -EADDRINUSE;
514 		}
515 	}
516 	return 0;
517 }
518 
__map_range_leaf(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)519 static int __map_range_leaf(struct pt_range *range, void *arg,
520 			    unsigned int level, struct pt_table_p *table)
521 {
522 	struct pt_state pts = pt_init(range, level, table);
523 	struct pt_iommu_map_args *map = arg;
524 	unsigned int leaf_pgsize_lg2 = map->leaf_pgsize_lg2;
525 	unsigned int start_index;
526 	pt_oaddr_t oa = map->oa;
527 	unsigned int step;
528 	bool need_contig;
529 	int ret = 0;
530 
531 	PT_WARN_ON(map->leaf_level != level);
532 	PT_WARN_ON(!pt_can_have_leaf(&pts));
533 
534 	step = log2_to_int_t(unsigned int,
535 			     leaf_pgsize_lg2 - pt_table_item_lg2sz(&pts));
536 	need_contig = leaf_pgsize_lg2 != pt_table_item_lg2sz(&pts);
537 
538 	_pt_iter_first(&pts);
539 	start_index = pts.index;
540 	do {
541 		pts.type = pt_load_entry_raw(&pts);
542 		if (pts.type != PT_ENTRY_EMPTY || need_contig) {
543 			if (pts.index != start_index)
544 				pt_index_to_va(&pts);
545 			ret = clear_contig(&pts, map->iotlb_gather, step,
546 					   leaf_pgsize_lg2);
547 			if (ret)
548 				break;
549 		}
550 
551 		if (IS_ENABLED(CONFIG_DEBUG_GENERIC_PT)) {
552 			pt_index_to_va(&pts);
553 			PT_WARN_ON(compute_best_pgsize(&pts, oa) !=
554 				   leaf_pgsize_lg2);
555 		}
556 		pt_install_leaf_entry(&pts, oa, leaf_pgsize_lg2, &map->attrs);
557 
558 		oa += log2_to_int(leaf_pgsize_lg2);
559 		pts.index += step;
560 	} while (pts.index < pts.end_index);
561 
562 	flush_writes_range(&pts, start_index, pts.index);
563 
564 	map->oa = oa;
565 	return ret;
566 }
567 
__map_range(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)568 static int __map_range(struct pt_range *range, void *arg, unsigned int level,
569 		       struct pt_table_p *table)
570 {
571 	struct pt_state pts = pt_init(range, level, table);
572 	struct pt_iommu_map_args *map = arg;
573 	int ret;
574 
575 	PT_WARN_ON(map->leaf_level == level);
576 	PT_WARN_ON(!pt_can_have_table(&pts));
577 
578 	_pt_iter_first(&pts);
579 
580 	/* Descend to a child table */
581 	do {
582 		pts.type = pt_load_entry_raw(&pts);
583 
584 		if (pts.type != PT_ENTRY_TABLE) {
585 			if (pts.type != PT_ENTRY_EMPTY)
586 				return -EADDRINUSE;
587 			ret = pt_iommu_new_table(&pts, &map->attrs);
588 			if (ret) {
589 				/*
590 				 * Racing with another thread installing a table
591 				 */
592 				if (ret == -EAGAIN)
593 					continue;
594 				return ret;
595 			}
596 		} else {
597 			pts.table_lower = pt_table_ptr(&pts);
598 			/*
599 			 * Racing with a shared pt_iommu_new_table()? The other
600 			 * thread is still flushing the cache, so we have to
601 			 * also flush it to ensure that when our thread's map
602 			 * completes all the table items leading to our mapping
603 			 * are visible.
604 			 *
605 			 * This requires the pt_set_bit_release() to be a
606 			 * release of the cache flush so that this can acquire
607 			 * visibility at the iommu.
608 			 */
609 			if (pts_feature(&pts, PT_FEAT_DMA_INCOHERENT) &&
610 			    !pt_test_sw_bit_acquire(&pts,
611 						    SW_BIT_CACHE_FLUSH_DONE))
612 				flush_writes_item(&pts);
613 		}
614 
615 		/*
616 		 * The already present table can possibly be shared with another
617 		 * concurrent map.
618 		 */
619 		if (map->leaf_level == level - 1)
620 			ret = pt_descend(&pts, arg, __map_range_leaf);
621 		else
622 			ret = pt_descend(&pts, arg, __map_range);
623 		if (ret)
624 			return ret;
625 
626 		pts.index++;
627 		pt_index_to_va(&pts);
628 		if (pts.index >= pts.end_index)
629 			break;
630 	} while (true);
631 	return 0;
632 }
633 
634 /*
635  * Fast path for the easy case of mapping a 4k page to an already allocated
636  * table. This is a common workload. If it returns EAGAIN run the full algorithm
637  * instead.
638  */
__do_map_single_page(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table,pt_level_fn_t descend_fn)639 static __always_inline int __do_map_single_page(struct pt_range *range,
640 						void *arg, unsigned int level,
641 						struct pt_table_p *table,
642 						pt_level_fn_t descend_fn)
643 {
644 	struct pt_state pts = pt_init(range, level, table);
645 	struct pt_iommu_map_args *map = arg;
646 
647 	pts.type = pt_load_single_entry(&pts);
648 	if (level == 0) {
649 		if (pts.type != PT_ENTRY_EMPTY)
650 			return -EADDRINUSE;
651 		pt_install_leaf_entry(&pts, map->oa, PAGE_SHIFT,
652 				      &map->attrs);
653 		/* No flush, not used when incoherent */
654 		map->oa += PAGE_SIZE;
655 		return 0;
656 	}
657 	if (pts.type == PT_ENTRY_TABLE)
658 		return pt_descend(&pts, arg, descend_fn);
659 	/* Something else, use the slow path */
660 	return -EAGAIN;
661 }
662 PT_MAKE_LEVELS(__map_single_page, __do_map_single_page);
663 
664 /*
665  * Add a table to the top, increasing the top level as much as necessary to
666  * encompass range.
667  */
increase_top(struct pt_iommu * iommu_table,struct pt_range * range,struct pt_iommu_map_args * map)668 static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
669 			struct pt_iommu_map_args *map)
670 {
671 	struct iommu_pages_list free_list = IOMMU_PAGES_LIST_INIT(free_list);
672 	struct pt_common *common = common_from_iommu(iommu_table);
673 	uintptr_t top_of_table = READ_ONCE(common->top_of_table);
674 	uintptr_t new_top_of_table = top_of_table;
675 	struct pt_table_p *table_mem;
676 	unsigned int new_level;
677 	spinlock_t *domain_lock;
678 	unsigned long flags;
679 	int ret;
680 
681 	while (true) {
682 		struct pt_range top_range =
683 			_pt_top_range(common, new_top_of_table);
684 		struct pt_state pts = pt_init_top(&top_range);
685 
686 		top_range.va = range->va;
687 		top_range.last_va = range->last_va;
688 
689 		if (!pt_check_range(&top_range) &&
690 		    map->leaf_level <= pts.level) {
691 			new_level = pts.level;
692 			break;
693 		}
694 
695 		pts.level++;
696 		if (pts.level > PT_MAX_TOP_LEVEL ||
697 		    pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2) {
698 			ret = -ERANGE;
699 			goto err_free;
700 		}
701 
702 		table_mem =
703 			table_alloc_top(common, _pt_top_set(NULL, pts.level),
704 					map->attrs.gfp, ALLOC_DEFER_COHERENT_FLUSH);
705 		if (IS_ERR(table_mem)) {
706 			ret = PTR_ERR(table_mem);
707 			goto err_free;
708 		}
709 		iommu_pages_list_add(&free_list, table_mem);
710 
711 		/* The new table links to the lower table always at index 0 */
712 		top_range.va = 0;
713 		top_range.top_level = pts.level;
714 		pts.table_lower = pts.table;
715 		pts.table = table_mem;
716 		pt_load_single_entry(&pts);
717 		PT_WARN_ON(pts.index != 0);
718 		pt_install_table(&pts, virt_to_phys(pts.table_lower),
719 				 &map->attrs);
720 		new_top_of_table = _pt_top_set(pts.table, pts.level);
721 	}
722 
723 	/*
724 	 * Avoid double flushing, flush it once after all pt_install_table()
725 	 */
726 	if (pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
727 		ret = iommu_pages_start_incoherent_list(
728 			&free_list, iommu_table->iommu_device);
729 		if (ret)
730 			goto err_free;
731 	}
732 
733 	/*
734 	 * top_of_table is write locked by the spinlock, but readers can use
735 	 * READ_ONCE() to get the value. Since we encode both the level and the
736 	 * pointer in one quanta the lockless reader will always see something
737 	 * valid. The HW must be updated to the new level under the spinlock
738 	 * before top_of_table is updated so that concurrent readers don't map
739 	 * into the new level until it is fully functional. If another thread
740 	 * already updated it while we were working then throw everything away
741 	 * and try again.
742 	 */
743 	domain_lock = iommu_table->driver_ops->get_top_lock(iommu_table);
744 	spin_lock_irqsave(domain_lock, flags);
745 	if (common->top_of_table != top_of_table ||
746 	    top_of_table == new_top_of_table) {
747 		spin_unlock_irqrestore(domain_lock, flags);
748 		ret = -EAGAIN;
749 		goto err_free;
750 	}
751 
752 	/*
753 	 * We do not issue any flushes for change_top on the expectation that
754 	 * any walk cache will not become a problem by adding another layer to
755 	 * the tree. Misses will rewalk from the updated top pointer, hits
756 	 * continue to be correct. Negative caching is fine too since all the
757 	 * new IOVA added by the new top is non-present.
758 	 */
759 	iommu_table->driver_ops->change_top(
760 		iommu_table, virt_to_phys(table_mem), new_level);
761 	WRITE_ONCE(common->top_of_table, new_top_of_table);
762 	spin_unlock_irqrestore(domain_lock, flags);
763 	return 0;
764 
765 err_free:
766 	if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
767 		iommu_pages_stop_incoherent_list(&free_list,
768 						 iommu_table->iommu_device);
769 	iommu_put_pages_list(&free_list);
770 	return ret;
771 }
772 
check_map_range(struct pt_iommu * iommu_table,struct pt_range * range,struct pt_iommu_map_args * map)773 static int check_map_range(struct pt_iommu *iommu_table, struct pt_range *range,
774 			   struct pt_iommu_map_args *map)
775 {
776 	struct pt_common *common = common_from_iommu(iommu_table);
777 	int ret;
778 
779 	do {
780 		ret = pt_check_range(range);
781 		if (!pt_feature(common, PT_FEAT_DYNAMIC_TOP))
782 			return ret;
783 
784 		if (!ret && map->leaf_level <= range->top_level)
785 			break;
786 
787 		ret = increase_top(iommu_table, range, map);
788 		if (ret && ret != -EAGAIN)
789 			return ret;
790 
791 		/* Reload the new top */
792 		*range = pt_make_range(common, range->va, range->last_va);
793 	} while (ret);
794 	PT_WARN_ON(pt_check_range(range));
795 	return 0;
796 }
797 
do_map(struct pt_range * range,struct pt_common * common,bool single_page,struct pt_iommu_map_args * map)798 static int do_map(struct pt_range *range, struct pt_common *common,
799 		  bool single_page, struct pt_iommu_map_args *map)
800 {
801 	/*
802 	 * The __map_single_page() fast path does not support DMA_INCOHERENT
803 	 * flushing to keep its .text small.
804 	 */
805 	if (single_page && !pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
806 		int ret;
807 
808 		ret = pt_walk_range(range, __map_single_page, map);
809 		if (ret != -EAGAIN)
810 			return ret;
811 		/* EAGAIN falls through to the full path */
812 	}
813 
814 	if (map->leaf_level == range->top_level)
815 		return pt_walk_range(range, __map_range_leaf, map);
816 	return pt_walk_range(range, __map_range, map);
817 }
818 
819 /**
820  * map_pages() - Install translation for an IOVA range
821  * @domain: Domain to manipulate
822  * @iova: IO virtual address to start
823  * @paddr: Physical/Output address to start
824  * @pgsize: Length of each page
825  * @pgcount: Length of the range in pgsize units starting from @iova
826  * @prot: A bitmap of IOMMU_READ/WRITE/CACHE/NOEXEC/MMIO
827  * @gfp: GFP flags for any memory allocations
828  * @mapped: Total bytes successfully mapped
829  *
830  * The range starting at IOVA will have paddr installed into it. The caller
831  * must specify a valid pgsize and pgcount to segment the range into compatible
832  * blocks.
833  *
834  * On error the caller will probably want to invoke unmap on the range from iova
835  * up to the amount indicated by @mapped to return the table back to an
836  * unchanged state.
837  *
838  * Context: The caller must hold a write range lock that includes the whole
839  * range.
840  *
841  * Returns: -ERRNO on failure, 0 on success. The number of bytes of VA that were
842  * mapped are added to @mapped, @mapped is not zerod first.
843  */
DOMAIN_NS(map_pages)844 int DOMAIN_NS(map_pages)(struct iommu_domain *domain, unsigned long iova,
845 			 phys_addr_t paddr, size_t pgsize, size_t pgcount,
846 			 int prot, gfp_t gfp, size_t *mapped)
847 {
848 	struct pt_iommu *iommu_table =
849 		container_of(domain, struct pt_iommu, domain);
850 	pt_vaddr_t pgsize_bitmap = iommu_table->domain.pgsize_bitmap;
851 	struct pt_common *common = common_from_iommu(iommu_table);
852 	struct iommu_iotlb_gather iotlb_gather;
853 	pt_vaddr_t len = pgsize * pgcount;
854 	struct pt_iommu_map_args map = {
855 		.iotlb_gather = &iotlb_gather,
856 		.oa = paddr,
857 		.leaf_pgsize_lg2 = vaffs(pgsize),
858 	};
859 	bool single_page = false;
860 	struct pt_range range;
861 	int ret;
862 
863 	iommu_iotlb_gather_init(&iotlb_gather);
864 
865 	if (WARN_ON(!(prot & (IOMMU_READ | IOMMU_WRITE))))
866 		return -EINVAL;
867 
868 	/* Check the paddr doesn't exceed what the table can store */
869 	if ((sizeof(pt_oaddr_t) < sizeof(paddr) &&
870 	     (pt_vaddr_t)paddr > PT_VADDR_MAX) ||
871 	    (common->max_oasz_lg2 != PT_VADDR_MAX_LG2 &&
872 	     oalog2_div(paddr, common->max_oasz_lg2)))
873 		return -ERANGE;
874 
875 	ret = pt_iommu_set_prot(common, &map.attrs, prot);
876 	if (ret)
877 		return ret;
878 	map.attrs.gfp = gfp;
879 
880 	ret = make_range_no_check(common, &range, iova, len);
881 	if (ret)
882 		return ret;
883 
884 	/* Calculate target page size and level for the leaves */
885 	if (pt_has_system_page_size(common) && pgsize == PAGE_SIZE &&
886 	    pgcount == 1) {
887 		PT_WARN_ON(!(pgsize_bitmap & PAGE_SIZE));
888 		if (log2_mod(iova | paddr, PAGE_SHIFT))
889 			return -ENXIO;
890 		map.leaf_pgsize_lg2 = PAGE_SHIFT;
891 		map.leaf_level = 0;
892 		single_page = true;
893 	} else {
894 		map.leaf_pgsize_lg2 = pt_compute_best_pgsize(
895 			pgsize_bitmap, range.va, range.last_va, paddr);
896 		if (!map.leaf_pgsize_lg2)
897 			return -ENXIO;
898 		map.leaf_level =
899 			pt_pgsz_lg2_to_level(common, map.leaf_pgsize_lg2);
900 	}
901 
902 	ret = check_map_range(iommu_table, &range, &map);
903 	if (ret)
904 		return ret;
905 
906 	PT_WARN_ON(map.leaf_level > range.top_level);
907 
908 	ret = do_map(&range, common, single_page, &map);
909 
910 	/*
911 	 * Table levels were freed and replaced with large items, flush any walk
912 	 * cache that may refer to the freed levels.
913 	 */
914 	if (!iommu_pages_list_empty(&iotlb_gather.freelist))
915 		iommu_iotlb_sync(&iommu_table->domain, &iotlb_gather);
916 
917 	/* Bytes successfully mapped */
918 	PT_WARN_ON(!ret && map.oa - paddr != len);
919 	*mapped += map.oa - paddr;
920 	return ret;
921 }
922 EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(map_pages), "GENERIC_PT_IOMMU");
923 
924 struct pt_unmap_args {
925 	struct iommu_pages_list free_list;
926 	pt_vaddr_t unmapped;
927 };
928 
__unmap_range(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)929 static __maybe_unused int __unmap_range(struct pt_range *range, void *arg,
930 					unsigned int level,
931 					struct pt_table_p *table)
932 {
933 	struct pt_state pts = pt_init(range, level, table);
934 	struct pt_unmap_args *unmap = arg;
935 	unsigned int num_oas = 0;
936 	unsigned int start_index;
937 	int ret = 0;
938 
939 	_pt_iter_first(&pts);
940 	start_index = pts.index;
941 	pts.type = pt_load_entry_raw(&pts);
942 	/*
943 	 * A starting index is in the middle of a contiguous entry
944 	 *
945 	 * The IOMMU API does not require drivers to support unmapping parts of
946 	 * large pages. Long ago VFIO would try to split maps but the current
947 	 * version never does.
948 	 *
949 	 * Instead when unmap reaches a partial unmap of the start of a large
950 	 * IOPTE it should remove the entire IOPTE and return that size to the
951 	 * caller.
952 	 */
953 	if (pts.type == PT_ENTRY_OA) {
954 		if (log2_mod(range->va, pt_entry_oa_lg2sz(&pts)))
955 			return -EINVAL;
956 		/* Micro optimization */
957 		goto start_oa;
958 	}
959 
960 	do {
961 		if (pts.type != PT_ENTRY_OA) {
962 			bool fully_covered;
963 
964 			if (pts.type != PT_ENTRY_TABLE) {
965 				ret = -EINVAL;
966 				break;
967 			}
968 
969 			if (pts.index != start_index)
970 				pt_index_to_va(&pts);
971 			pts.table_lower = pt_table_ptr(&pts);
972 
973 			fully_covered = pt_entry_fully_covered(
974 				&pts, pt_table_item_lg2sz(&pts));
975 
976 			ret = pt_descend(&pts, arg, __unmap_range);
977 			if (ret)
978 				break;
979 
980 			/*
981 			 * If the unmapping range fully covers the table then we
982 			 * can free it as well. The clear is delayed until we
983 			 * succeed in clearing the lower table levels.
984 			 */
985 			if (fully_covered) {
986 				iommu_pages_list_add(&unmap->free_list,
987 						     pts.table_lower);
988 				pt_clear_entries(&pts, ilog2(1));
989 			}
990 			pts.index++;
991 		} else {
992 			unsigned int num_contig_lg2;
993 start_oa:
994 			/*
995 			 * If the caller requested an last that falls within a
996 			 * single entry then the entire entry is unmapped and
997 			 * the length returned will be larger than requested.
998 			 */
999 			num_contig_lg2 = pt_entry_num_contig_lg2(&pts);
1000 			pt_clear_entries(&pts, num_contig_lg2);
1001 			num_oas += log2_to_int(num_contig_lg2);
1002 			pts.index += log2_to_int(num_contig_lg2);
1003 		}
1004 		if (pts.index >= pts.end_index)
1005 			break;
1006 		pts.type = pt_load_entry_raw(&pts);
1007 	} while (true);
1008 
1009 	unmap->unmapped += log2_mul(num_oas, pt_table_item_lg2sz(&pts));
1010 	flush_writes_range(&pts, start_index, pts.index);
1011 
1012 	return ret;
1013 }
1014 
1015 /**
1016  * unmap_pages() - Make a range of IOVA empty/not present
1017  * @domain: Domain to manipulate
1018  * @iova: IO virtual address to start
1019  * @pgsize: Length of each page
1020  * @pgcount: Length of the range in pgsize units starting from @iova
1021  * @iotlb_gather: Gather struct that must be flushed on return
1022  *
1023  * unmap_pages() will remove a translation created by map_pages(). It cannot
1024  * subdivide a mapping created by map_pages(), so it should be called with IOVA
1025  * ranges that match those passed to map_pages(). The IOVA range can aggregate
1026  * contiguous map_pages() calls so long as no individual range is split.
1027  *
1028  * Context: The caller must hold a write range lock that includes
1029  * the whole range.
1030  *
1031  * Returns: Number of bytes of VA unmapped. iova + res will be the point
1032  * unmapping stopped.
1033  */
DOMAIN_NS(unmap_pages)1034 size_t DOMAIN_NS(unmap_pages)(struct iommu_domain *domain, unsigned long iova,
1035 			      size_t pgsize, size_t pgcount,
1036 			      struct iommu_iotlb_gather *iotlb_gather)
1037 {
1038 	struct pt_iommu *iommu_table =
1039 		container_of(domain, struct pt_iommu, domain);
1040 	struct pt_unmap_args unmap = { .free_list = IOMMU_PAGES_LIST_INIT(
1041 					       unmap.free_list) };
1042 	pt_vaddr_t len = pgsize * pgcount;
1043 	struct pt_range range;
1044 	int ret;
1045 
1046 	ret = make_range(common_from_iommu(iommu_table), &range, iova, len);
1047 	if (ret)
1048 		return 0;
1049 
1050 	pt_walk_range(&range, __unmap_range, &unmap);
1051 
1052 	gather_range_pages(iotlb_gather, iommu_table, iova, len,
1053 			   &unmap.free_list);
1054 
1055 	return unmap.unmapped;
1056 }
1057 EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(unmap_pages), "GENERIC_PT_IOMMU");
1058 
NS(get_info)1059 static void NS(get_info)(struct pt_iommu *iommu_table,
1060 			 struct pt_iommu_info *info)
1061 {
1062 	struct pt_common *common = common_from_iommu(iommu_table);
1063 	struct pt_range range = pt_top_range(common);
1064 	struct pt_state pts = pt_init_top(&range);
1065 	pt_vaddr_t pgsize_bitmap = 0;
1066 
1067 	if (pt_feature(common, PT_FEAT_DYNAMIC_TOP)) {
1068 		for (pts.level = 0; pts.level <= PT_MAX_TOP_LEVEL;
1069 		     pts.level++) {
1070 			if (pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2)
1071 				break;
1072 			pgsize_bitmap |= pt_possible_sizes(&pts);
1073 		}
1074 	} else {
1075 		for (pts.level = 0; pts.level <= range.top_level; pts.level++)
1076 			pgsize_bitmap |= pt_possible_sizes(&pts);
1077 	}
1078 
1079 	/* Hide page sizes larger than the maximum OA */
1080 	info->pgsize_bitmap = oalog2_mod(pgsize_bitmap, common->max_oasz_lg2);
1081 }
1082 
NS(deinit)1083 static void NS(deinit)(struct pt_iommu *iommu_table)
1084 {
1085 	struct pt_common *common = common_from_iommu(iommu_table);
1086 	struct pt_range range = pt_all_range(common);
1087 	struct pt_iommu_collect_args collect = {
1088 		.free_list = IOMMU_PAGES_LIST_INIT(collect.free_list),
1089 	};
1090 
1091 	iommu_pages_list_add(&collect.free_list, range.top_table);
1092 	pt_walk_range(&range, __collect_tables, &collect);
1093 
1094 	/*
1095 	 * The driver has to already have fenced the HW access to the page table
1096 	 * and invalidated any caching referring to this memory.
1097 	 */
1098 	if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
1099 		iommu_pages_stop_incoherent_list(&collect.free_list,
1100 						 iommu_table->iommu_device);
1101 	iommu_put_pages_list(&collect.free_list);
1102 }
1103 
1104 static const struct pt_iommu_ops NS(ops) = {
1105 #if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) && defined(pt_entry_is_write_dirty) && \
1106 	IS_ENABLED(CONFIG_IOMMUFD_TEST) && defined(pt_entry_make_write_dirty)
1107 	.set_dirty = NS(set_dirty),
1108 #endif
1109 	.get_info = NS(get_info),
1110 	.deinit = NS(deinit),
1111 };
1112 
pt_init_common(struct pt_common * common)1113 static int pt_init_common(struct pt_common *common)
1114 {
1115 	struct pt_range top_range = pt_top_range(common);
1116 
1117 	if (PT_WARN_ON(top_range.top_level > PT_MAX_TOP_LEVEL))
1118 		return -EINVAL;
1119 
1120 	if (top_range.top_level == PT_MAX_TOP_LEVEL ||
1121 	    common->max_vasz_lg2 == top_range.max_vasz_lg2)
1122 		common->features &= ~BIT(PT_FEAT_DYNAMIC_TOP);
1123 
1124 	if (top_range.max_vasz_lg2 == PT_VADDR_MAX_LG2)
1125 		common->features |= BIT(PT_FEAT_FULL_VA);
1126 
1127 	/* Requested features must match features compiled into this format */
1128 	if ((common->features & ~(unsigned int)PT_SUPPORTED_FEATURES) ||
1129 	    (!IS_ENABLED(CONFIG_DEBUG_GENERIC_PT) &&
1130 	     (common->features & PT_FORCE_ENABLED_FEATURES) !=
1131 		     PT_FORCE_ENABLED_FEATURES))
1132 		return -EOPNOTSUPP;
1133 
1134 	/*
1135 	 * Check if the top level of the page table is too small to hold the
1136 	 * specified maxvasz.
1137 	 */
1138 	if (!pt_feature(common, PT_FEAT_DYNAMIC_TOP) &&
1139 	    top_range.top_level != PT_MAX_TOP_LEVEL) {
1140 		struct pt_state pts = { .range = &top_range,
1141 					.level = top_range.top_level };
1142 
1143 		if (common->max_vasz_lg2 >
1144 		    pt_num_items_lg2(&pts) + pt_table_item_lg2sz(&pts))
1145 			return -EOPNOTSUPP;
1146 	}
1147 
1148 	if (common->max_oasz_lg2 == 0)
1149 		common->max_oasz_lg2 = pt_max_oa_lg2(common);
1150 	else
1151 		common->max_oasz_lg2 = min(common->max_oasz_lg2,
1152 					   pt_max_oa_lg2(common));
1153 	return 0;
1154 }
1155 
pt_iommu_init_domain(struct pt_iommu * iommu_table,struct iommu_domain * domain)1156 static int pt_iommu_init_domain(struct pt_iommu *iommu_table,
1157 				struct iommu_domain *domain)
1158 {
1159 	struct pt_common *common = common_from_iommu(iommu_table);
1160 	struct pt_iommu_info info;
1161 	struct pt_range range;
1162 
1163 	NS(get_info)(iommu_table, &info);
1164 
1165 	domain->type = __IOMMU_DOMAIN_PAGING;
1166 	domain->pgsize_bitmap = info.pgsize_bitmap;
1167 
1168 	if (pt_feature(common, PT_FEAT_DYNAMIC_TOP))
1169 		range = _pt_top_range(common,
1170 				      _pt_top_set(NULL, PT_MAX_TOP_LEVEL));
1171 	else
1172 		range = pt_top_range(common);
1173 
1174 	/* A 64-bit high address space table on a 32-bit system cannot work. */
1175 	domain->geometry.aperture_start = (unsigned long)range.va;
1176 	if ((pt_vaddr_t)domain->geometry.aperture_start != range.va)
1177 		return -EOVERFLOW;
1178 
1179 	/*
1180 	 * The aperture is limited to what the API can do after considering all
1181 	 * the different types dma_addr_t/unsigned long/pt_vaddr_t that are used
1182 	 * to store a VA. Set the aperture to something that is valid for all
1183 	 * cases. Saturate instead of truncate the end if the types are smaller
1184 	 * than the top range. aperture_end should be called aperture_last.
1185 	 */
1186 	domain->geometry.aperture_end = (unsigned long)range.last_va;
1187 	if ((pt_vaddr_t)domain->geometry.aperture_end != range.last_va) {
1188 		domain->geometry.aperture_end = ULONG_MAX;
1189 		domain->pgsize_bitmap &= ULONG_MAX;
1190 	}
1191 	domain->geometry.force_aperture = true;
1192 
1193 	return 0;
1194 }
1195 
pt_iommu_zero(struct pt_iommu_table * fmt_table)1196 static void pt_iommu_zero(struct pt_iommu_table *fmt_table)
1197 {
1198 	struct pt_iommu *iommu_table = &fmt_table->iommu;
1199 	struct pt_iommu cfg = *iommu_table;
1200 
1201 	static_assert(offsetof(struct pt_iommu_table, iommu.domain) == 0);
1202 	memset_after(fmt_table, 0, iommu.domain);
1203 
1204 	/* The caller can initialize some of these values */
1205 	iommu_table->iommu_device = cfg.iommu_device;
1206 	iommu_table->driver_ops = cfg.driver_ops;
1207 	iommu_table->nid = cfg.nid;
1208 }
1209 
1210 #define pt_iommu_table_cfg CONCATENATE(pt_iommu_table, _cfg)
1211 #define pt_iommu_init CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), init)
1212 
pt_iommu_init(struct pt_iommu_table * fmt_table,const struct pt_iommu_table_cfg * cfg,gfp_t gfp)1213 int pt_iommu_init(struct pt_iommu_table *fmt_table,
1214 		  const struct pt_iommu_table_cfg *cfg, gfp_t gfp)
1215 {
1216 	struct pt_iommu *iommu_table = &fmt_table->iommu;
1217 	struct pt_common *common = common_from_iommu(iommu_table);
1218 	struct pt_table_p *table_mem;
1219 	int ret;
1220 
1221 	if (cfg->common.hw_max_vasz_lg2 > PT_MAX_VA_ADDRESS_LG2 ||
1222 	    !cfg->common.hw_max_vasz_lg2 || !cfg->common.hw_max_oasz_lg2)
1223 		return -EINVAL;
1224 
1225 	pt_iommu_zero(fmt_table);
1226 	common->features = cfg->common.features;
1227 	common->max_vasz_lg2 = cfg->common.hw_max_vasz_lg2;
1228 	common->max_oasz_lg2 = cfg->common.hw_max_oasz_lg2;
1229 	ret = pt_iommu_fmt_init(fmt_table, cfg);
1230 	if (ret)
1231 		return ret;
1232 
1233 	if (cfg->common.hw_max_oasz_lg2 > pt_max_oa_lg2(common))
1234 		return -EINVAL;
1235 
1236 	ret = pt_init_common(common);
1237 	if (ret)
1238 		return ret;
1239 
1240 	if (pt_feature(common, PT_FEAT_DYNAMIC_TOP) &&
1241 	    WARN_ON(!iommu_table->driver_ops ||
1242 		    !iommu_table->driver_ops->change_top ||
1243 		    !iommu_table->driver_ops->get_top_lock))
1244 		return -EINVAL;
1245 
1246 	if (pt_feature(common, PT_FEAT_SIGN_EXTEND) &&
1247 	    (pt_feature(common, PT_FEAT_FULL_VA) ||
1248 	     pt_feature(common, PT_FEAT_DYNAMIC_TOP)))
1249 		return -EINVAL;
1250 
1251 	if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) &&
1252 	    WARN_ON(!iommu_table->iommu_device))
1253 		return -EINVAL;
1254 
1255 	ret = pt_iommu_init_domain(iommu_table, &iommu_table->domain);
1256 	if (ret)
1257 		return ret;
1258 
1259 	table_mem = table_alloc_top(common, common->top_of_table, gfp,
1260 				    ALLOC_NORMAL);
1261 	if (IS_ERR(table_mem))
1262 		return PTR_ERR(table_mem);
1263 	pt_top_set(common, table_mem, pt_top_get_level(common));
1264 
1265 	/* Must be last, see pt_iommu_deinit() */
1266 	iommu_table->ops = &NS(ops);
1267 	return 0;
1268 }
1269 EXPORT_SYMBOL_NS_GPL(pt_iommu_init, "GENERIC_PT_IOMMU");
1270 
1271 #ifdef pt_iommu_fmt_hw_info
1272 #define pt_iommu_table_hw_info CONCATENATE(pt_iommu_table, _hw_info)
1273 #define pt_iommu_hw_info CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), hw_info)
pt_iommu_hw_info(struct pt_iommu_table * fmt_table,struct pt_iommu_table_hw_info * info)1274 void pt_iommu_hw_info(struct pt_iommu_table *fmt_table,
1275 		      struct pt_iommu_table_hw_info *info)
1276 {
1277 	struct pt_iommu *iommu_table = &fmt_table->iommu;
1278 	struct pt_common *common = common_from_iommu(iommu_table);
1279 	struct pt_range top_range = pt_top_range(common);
1280 
1281 	pt_iommu_fmt_hw_info(fmt_table, &top_range, info);
1282 }
1283 EXPORT_SYMBOL_NS_GPL(pt_iommu_hw_info, "GENERIC_PT_IOMMU");
1284 #endif
1285 
1286 MODULE_LICENSE("GPL");
1287 MODULE_DESCRIPTION("IOMMU Page table implementation for " __stringify(PTPFX_RAW));
1288 MODULE_IMPORT_NS("GENERIC_PT");
1289 /* For iommu_dirty_bitmap_record() */
1290 MODULE_IMPORT_NS("IOMMUFD");
1291 
1292 #endif  /* __GENERIC_PT_IOMMU_PT_H */
1293