1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
4 *
5 * "Templated C code" for implementing the iommu operations for page tables.
6 * This is compiled multiple times, over all the page table formats to pick up
7 * the per-format definitions.
8 */
9 #ifndef __GENERIC_PT_IOMMU_PT_H
10 #define __GENERIC_PT_IOMMU_PT_H
11
12 #include "pt_iter.h"
13
14 #include <linux/export.h>
15 #include <linux/iommu.h>
16 #include "../iommu-pages.h"
17 #include <linux/cleanup.h>
18 #include <linux/dma-mapping.h>
19
20 enum {
21 SW_BIT_CACHE_FLUSH_DONE = 0,
22 };
23
flush_writes_range(const struct pt_state * pts,unsigned int start_index,unsigned int end_index)24 static void flush_writes_range(const struct pt_state *pts,
25 unsigned int start_index, unsigned int end_index)
26 {
27 if (pts_feature(pts, PT_FEAT_DMA_INCOHERENT))
28 iommu_pages_flush_incoherent(
29 iommu_from_common(pts->range->common)->iommu_device,
30 pts->table, start_index * PT_ITEM_WORD_SIZE,
31 (end_index - start_index) * PT_ITEM_WORD_SIZE);
32 }
33
flush_writes_item(const struct pt_state * pts)34 static void flush_writes_item(const struct pt_state *pts)
35 {
36 if (pts_feature(pts, PT_FEAT_DMA_INCOHERENT))
37 iommu_pages_flush_incoherent(
38 iommu_from_common(pts->range->common)->iommu_device,
39 pts->table, pts->index * PT_ITEM_WORD_SIZE,
40 PT_ITEM_WORD_SIZE);
41 }
42
gather_range_pages(struct iommu_iotlb_gather * iotlb_gather,struct pt_iommu * iommu_table,pt_vaddr_t iova,pt_vaddr_t len,struct iommu_pages_list * free_list)43 static void gather_range_pages(struct iommu_iotlb_gather *iotlb_gather,
44 struct pt_iommu *iommu_table, pt_vaddr_t iova,
45 pt_vaddr_t len,
46 struct iommu_pages_list *free_list)
47 {
48 struct pt_common *common = common_from_iommu(iommu_table);
49
50 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
51 iommu_pages_stop_incoherent_list(free_list,
52 iommu_table->iommu_device);
53
54 if (pt_feature(common, PT_FEAT_FLUSH_RANGE_NO_GAPS) &&
55 iommu_iotlb_gather_is_disjoint(iotlb_gather, iova, len)) {
56 iommu_iotlb_sync(&iommu_table->domain, iotlb_gather);
57 /*
58 * Note that the sync frees the gather's free list, so we must
59 * not have any pages on that list that are covered by iova/len
60 */
61 }
62
63 iommu_iotlb_gather_add_range(iotlb_gather, iova, len);
64 iommu_pages_list_splice(free_list, &iotlb_gather->freelist);
65 }
66
67 #define DOMAIN_NS(op) CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), op)
68
make_range_ul(struct pt_common * common,struct pt_range * range,unsigned long iova,unsigned long len)69 static int make_range_ul(struct pt_common *common, struct pt_range *range,
70 unsigned long iova, unsigned long len)
71 {
72 unsigned long last;
73
74 if (unlikely(len == 0))
75 return -EINVAL;
76
77 if (check_add_overflow(iova, len - 1, &last))
78 return -EOVERFLOW;
79
80 *range = pt_make_range(common, iova, last);
81 if (sizeof(iova) > sizeof(range->va)) {
82 if (unlikely(range->va != iova || range->last_va != last))
83 return -EOVERFLOW;
84 }
85 return 0;
86 }
87
make_range_u64(struct pt_common * common,struct pt_range * range,u64 iova,u64 len)88 static __maybe_unused int make_range_u64(struct pt_common *common,
89 struct pt_range *range, u64 iova,
90 u64 len)
91 {
92 if (unlikely(iova > ULONG_MAX || len > ULONG_MAX))
93 return -EOVERFLOW;
94 return make_range_ul(common, range, iova, len);
95 }
96
97 /*
98 * Some APIs use unsigned long, while othersuse dma_addr_t as the type. Dispatch
99 * to the correct validation based on the type.
100 */
101 #define make_range_no_check(common, range, iova, len) \
102 ({ \
103 int ret; \
104 if (sizeof(iova) > sizeof(unsigned long) || \
105 sizeof(len) > sizeof(unsigned long)) \
106 ret = make_range_u64(common, range, iova, len); \
107 else \
108 ret = make_range_ul(common, range, iova, len); \
109 ret; \
110 })
111
112 #define make_range(common, range, iova, len) \
113 ({ \
114 int ret = make_range_no_check(common, range, iova, len); \
115 if (!ret) \
116 ret = pt_check_range(range); \
117 ret; \
118 })
119
compute_best_pgsize(struct pt_state * pts,pt_oaddr_t oa)120 static inline unsigned int compute_best_pgsize(struct pt_state *pts,
121 pt_oaddr_t oa)
122 {
123 struct pt_iommu *iommu_table = iommu_from_common(pts->range->common);
124
125 if (!pt_can_have_leaf(pts))
126 return 0;
127
128 /*
129 * The page size is limited by the domain's bitmap. This allows the core
130 * code to reduce the supported page sizes by changing the bitmap.
131 */
132 return pt_compute_best_pgsize(pt_possible_sizes(pts) &
133 iommu_table->domain.pgsize_bitmap,
134 pts->range->va, pts->range->last_va, oa);
135 }
136
__do_iova_to_phys(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table,pt_level_fn_t descend_fn)137 static __always_inline int __do_iova_to_phys(struct pt_range *range, void *arg,
138 unsigned int level,
139 struct pt_table_p *table,
140 pt_level_fn_t descend_fn)
141 {
142 struct pt_state pts = pt_init(range, level, table);
143 pt_oaddr_t *res = arg;
144
145 switch (pt_load_single_entry(&pts)) {
146 case PT_ENTRY_EMPTY:
147 return -ENOENT;
148 case PT_ENTRY_TABLE:
149 return pt_descend(&pts, arg, descend_fn);
150 case PT_ENTRY_OA:
151 *res = pt_entry_oa_exact(&pts);
152 return 0;
153 }
154 return -ENOENT;
155 }
156 PT_MAKE_LEVELS(__iova_to_phys, __do_iova_to_phys);
157
158 /**
159 * iova_to_phys() - Return the output address for the given IOVA
160 * @domain: Table to query
161 * @iova: IO virtual address to query
162 *
163 * Determine the output address from the given IOVA. @iova may have any
164 * alignment, the returned physical will be adjusted with any sub page offset.
165 *
166 * Context: The caller must hold a read range lock that includes @iova.
167 *
168 * Return: 0 if there is no translation for the given iova.
169 */
DOMAIN_NS(iova_to_phys)170 phys_addr_t DOMAIN_NS(iova_to_phys)(struct iommu_domain *domain,
171 dma_addr_t iova)
172 {
173 struct pt_iommu *iommu_table =
174 container_of(domain, struct pt_iommu, domain);
175 struct pt_range range;
176 pt_oaddr_t res;
177 int ret;
178
179 ret = make_range(common_from_iommu(iommu_table), &range, iova, 1);
180 if (ret)
181 return ret;
182
183 ret = pt_walk_range(&range, __iova_to_phys, &res);
184 /* PHYS_ADDR_MAX would be a better error code */
185 if (ret)
186 return 0;
187 return res;
188 }
189 EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(iova_to_phys), "GENERIC_PT_IOMMU");
190
191 struct pt_iommu_dirty_args {
192 struct iommu_dirty_bitmap *dirty;
193 unsigned int flags;
194 };
195
record_dirty(struct pt_state * pts,struct pt_iommu_dirty_args * dirty,unsigned int num_contig_lg2)196 static void record_dirty(struct pt_state *pts,
197 struct pt_iommu_dirty_args *dirty,
198 unsigned int num_contig_lg2)
199 {
200 pt_vaddr_t dirty_len;
201
202 if (num_contig_lg2 != ilog2(1)) {
203 unsigned int index = pts->index;
204 unsigned int end_index = log2_set_mod_max_t(
205 unsigned int, pts->index, num_contig_lg2);
206
207 /* Adjust for being contained inside a contiguous page */
208 end_index = min(end_index, pts->end_index);
209 dirty_len = (end_index - index) *
210 log2_to_int(pt_table_item_lg2sz(pts));
211 } else {
212 dirty_len = log2_to_int(pt_table_item_lg2sz(pts));
213 }
214
215 if (dirty->dirty->bitmap)
216 iova_bitmap_set(dirty->dirty->bitmap, pts->range->va,
217 dirty_len);
218
219 if (!(dirty->flags & IOMMU_DIRTY_NO_CLEAR)) {
220 /*
221 * No write log required because DMA incoherence and atomic
222 * dirty tracking bits can't work together
223 */
224 pt_entry_make_write_clean(pts);
225 iommu_iotlb_gather_add_range(dirty->dirty->gather,
226 pts->range->va, dirty_len);
227 }
228 }
229
__read_and_clear_dirty(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)230 static inline int __read_and_clear_dirty(struct pt_range *range, void *arg,
231 unsigned int level,
232 struct pt_table_p *table)
233 {
234 struct pt_state pts = pt_init(range, level, table);
235 struct pt_iommu_dirty_args *dirty = arg;
236 int ret;
237
238 for_each_pt_level_entry(&pts) {
239 if (pts.type == PT_ENTRY_TABLE) {
240 ret = pt_descend(&pts, arg, __read_and_clear_dirty);
241 if (ret)
242 return ret;
243 continue;
244 }
245 if (pts.type == PT_ENTRY_OA && pt_entry_is_write_dirty(&pts))
246 record_dirty(&pts, dirty,
247 pt_entry_num_contig_lg2(&pts));
248 }
249 return 0;
250 }
251
252 /**
253 * read_and_clear_dirty() - Manipulate the HW set write dirty state
254 * @domain: Domain to manipulate
255 * @iova: IO virtual address to start
256 * @size: Length of the IOVA
257 * @flags: A bitmap of IOMMU_DIRTY_NO_CLEAR
258 * @dirty: Place to store the dirty bits
259 *
260 * Iterate over all the entries in the mapped range and record their write dirty
261 * status in iommu_dirty_bitmap. If IOMMU_DIRTY_NO_CLEAR is not specified then
262 * the entries will be left dirty, otherwise they are returned to being not
263 * write dirty.
264 *
265 * Context: The caller must hold a read range lock that includes @iova.
266 *
267 * Returns: -ERRNO on failure, 0 on success.
268 */
DOMAIN_NS(read_and_clear_dirty)269 int DOMAIN_NS(read_and_clear_dirty)(struct iommu_domain *domain,
270 unsigned long iova, size_t size,
271 unsigned long flags,
272 struct iommu_dirty_bitmap *dirty)
273 {
274 struct pt_iommu *iommu_table =
275 container_of(domain, struct pt_iommu, domain);
276 struct pt_iommu_dirty_args dirty_args = {
277 .dirty = dirty,
278 .flags = flags,
279 };
280 struct pt_range range;
281 int ret;
282
283 #if !IS_ENABLED(CONFIG_IOMMUFD_DRIVER) || !defined(pt_entry_is_write_dirty)
284 return -EOPNOTSUPP;
285 #endif
286
287 ret = make_range(common_from_iommu(iommu_table), &range, iova, size);
288 if (ret)
289 return ret;
290
291 ret = pt_walk_range(&range, __read_and_clear_dirty, &dirty_args);
292 PT_WARN_ON(ret);
293 return ret;
294 }
295 EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(read_and_clear_dirty), "GENERIC_PT_IOMMU");
296
__set_dirty(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)297 static inline int __set_dirty(struct pt_range *range, void *arg,
298 unsigned int level, struct pt_table_p *table)
299 {
300 struct pt_state pts = pt_init(range, level, table);
301
302 switch (pt_load_single_entry(&pts)) {
303 case PT_ENTRY_EMPTY:
304 return -ENOENT;
305 case PT_ENTRY_TABLE:
306 return pt_descend(&pts, arg, __set_dirty);
307 case PT_ENTRY_OA:
308 if (!pt_entry_make_write_dirty(&pts))
309 return -EAGAIN;
310 return 0;
311 }
312 return -ENOENT;
313 }
314
NS(set_dirty)315 static int __maybe_unused NS(set_dirty)(struct pt_iommu *iommu_table,
316 dma_addr_t iova)
317 {
318 struct pt_range range;
319 int ret;
320
321 ret = make_range(common_from_iommu(iommu_table), &range, iova, 1);
322 if (ret)
323 return ret;
324
325 /*
326 * Note: There is no locking here yet, if the test suite races this it
327 * can crash. It should use RCU locking eventually.
328 */
329 return pt_walk_range(&range, __set_dirty, NULL);
330 }
331
332 struct pt_iommu_collect_args {
333 struct iommu_pages_list free_list;
334 /* Fail if any OAs are within the range */
335 u8 check_mapped : 1;
336 };
337
__collect_tables(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)338 static int __collect_tables(struct pt_range *range, void *arg,
339 unsigned int level, struct pt_table_p *table)
340 {
341 struct pt_state pts = pt_init(range, level, table);
342 struct pt_iommu_collect_args *collect = arg;
343 int ret;
344
345 if (!collect->check_mapped && !pt_can_have_table(&pts))
346 return 0;
347
348 for_each_pt_level_entry(&pts) {
349 if (pts.type == PT_ENTRY_TABLE) {
350 iommu_pages_list_add(&collect->free_list, pts.table_lower);
351 ret = pt_descend(&pts, arg, __collect_tables);
352 if (ret)
353 return ret;
354 continue;
355 }
356 if (pts.type == PT_ENTRY_OA && collect->check_mapped)
357 return -EADDRINUSE;
358 }
359 return 0;
360 }
361
362 enum alloc_mode {ALLOC_NORMAL, ALLOC_DEFER_COHERENT_FLUSH};
363
364 /* Allocate a table, the empty table will be ready to be installed. */
_table_alloc(struct pt_common * common,size_t lg2sz,gfp_t gfp,enum alloc_mode mode)365 static inline struct pt_table_p *_table_alloc(struct pt_common *common,
366 size_t lg2sz, gfp_t gfp,
367 enum alloc_mode mode)
368 {
369 struct pt_iommu *iommu_table = iommu_from_common(common);
370 struct pt_table_p *table_mem;
371
372 table_mem = iommu_alloc_pages_node_sz(iommu_table->nid, gfp,
373 log2_to_int(lg2sz));
374 if (!table_mem)
375 return ERR_PTR(-ENOMEM);
376
377 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) &&
378 mode == ALLOC_NORMAL) {
379 int ret = iommu_pages_start_incoherent(
380 table_mem, iommu_table->iommu_device);
381 if (ret) {
382 iommu_free_pages(table_mem);
383 return ERR_PTR(ret);
384 }
385 }
386 return table_mem;
387 }
388
table_alloc_top(struct pt_common * common,uintptr_t top_of_table,gfp_t gfp,enum alloc_mode mode)389 static inline struct pt_table_p *table_alloc_top(struct pt_common *common,
390 uintptr_t top_of_table,
391 gfp_t gfp,
392 enum alloc_mode mode)
393 {
394 /*
395 * Top doesn't need the free list or otherwise, so it technically
396 * doesn't need to use iommu pages. Use the API anyhow as the top is
397 * usually not smaller than PAGE_SIZE to keep things simple.
398 */
399 return _table_alloc(common, pt_top_memsize_lg2(common, top_of_table),
400 gfp, mode);
401 }
402
403 /* Allocate an interior table */
table_alloc(const struct pt_state * parent_pts,gfp_t gfp,enum alloc_mode mode)404 static inline struct pt_table_p *table_alloc(const struct pt_state *parent_pts,
405 gfp_t gfp, enum alloc_mode mode)
406 {
407 struct pt_state child_pts =
408 pt_init(parent_pts->range, parent_pts->level - 1, NULL);
409
410 return _table_alloc(parent_pts->range->common,
411 pt_num_items_lg2(&child_pts) +
412 ilog2(PT_ITEM_WORD_SIZE),
413 gfp, mode);
414 }
415
pt_iommu_new_table(struct pt_state * pts,struct pt_write_attrs * attrs)416 static inline int pt_iommu_new_table(struct pt_state *pts,
417 struct pt_write_attrs *attrs)
418 {
419 struct pt_table_p *table_mem;
420 phys_addr_t phys;
421
422 /* Given PA/VA/length can't be represented */
423 if (PT_WARN_ON(!pt_can_have_table(pts)))
424 return -ENXIO;
425
426 table_mem = table_alloc(pts, attrs->gfp, ALLOC_NORMAL);
427 if (IS_ERR(table_mem))
428 return PTR_ERR(table_mem);
429
430 phys = virt_to_phys(table_mem);
431 if (!pt_install_table(pts, phys, attrs)) {
432 iommu_pages_free_incoherent(
433 table_mem,
434 iommu_from_common(pts->range->common)->iommu_device);
435 return -EAGAIN;
436 }
437
438 if (pts_feature(pts, PT_FEAT_DMA_INCOHERENT)) {
439 flush_writes_item(pts);
440 pt_set_sw_bit_release(pts, SW_BIT_CACHE_FLUSH_DONE);
441 }
442
443 if (IS_ENABLED(CONFIG_DEBUG_GENERIC_PT)) {
444 /*
445 * The underlying table can't store the physical table address.
446 * This happens when kunit testing tables outside their normal
447 * environment where a CPU might be limited.
448 */
449 pt_load_single_entry(pts);
450 if (PT_WARN_ON(pt_table_pa(pts) != phys)) {
451 pt_clear_entries(pts, ilog2(1));
452 iommu_pages_free_incoherent(
453 table_mem, iommu_from_common(pts->range->common)
454 ->iommu_device);
455 return -EINVAL;
456 }
457 }
458
459 pts->table_lower = table_mem;
460 return 0;
461 }
462
463 struct pt_iommu_map_args {
464 struct iommu_iotlb_gather *iotlb_gather;
465 struct pt_write_attrs attrs;
466 pt_oaddr_t oa;
467 unsigned int leaf_pgsize_lg2;
468 unsigned int leaf_level;
469 };
470
471 /*
472 * This will recursively check any tables in the block to validate they are
473 * empty and then free them through the gather.
474 */
clear_contig(const struct pt_state * start_pts,struct iommu_iotlb_gather * iotlb_gather,unsigned int step,unsigned int pgsize_lg2)475 static int clear_contig(const struct pt_state *start_pts,
476 struct iommu_iotlb_gather *iotlb_gather,
477 unsigned int step, unsigned int pgsize_lg2)
478 {
479 struct pt_iommu *iommu_table =
480 iommu_from_common(start_pts->range->common);
481 struct pt_range range = *start_pts->range;
482 struct pt_state pts =
483 pt_init(&range, start_pts->level, start_pts->table);
484 struct pt_iommu_collect_args collect = { .check_mapped = true };
485 int ret;
486
487 pts.index = start_pts->index;
488 pts.end_index = start_pts->index + step;
489 for (; _pt_iter_load(&pts); pt_next_entry(&pts)) {
490 if (pts.type == PT_ENTRY_TABLE) {
491 collect.free_list =
492 IOMMU_PAGES_LIST_INIT(collect.free_list);
493 ret = pt_walk_descend_all(&pts, __collect_tables,
494 &collect);
495 if (ret)
496 return ret;
497
498 /*
499 * The table item must be cleared before we can update
500 * the gather
501 */
502 pt_clear_entries(&pts, ilog2(1));
503 flush_writes_item(&pts);
504
505 iommu_pages_list_add(&collect.free_list,
506 pt_table_ptr(&pts));
507 gather_range_pages(
508 iotlb_gather, iommu_table, range.va,
509 log2_to_int(pt_table_item_lg2sz(&pts)),
510 &collect.free_list);
511 } else if (pts.type != PT_ENTRY_EMPTY) {
512 return -EADDRINUSE;
513 }
514 }
515 return 0;
516 }
517
__map_range_leaf(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)518 static int __map_range_leaf(struct pt_range *range, void *arg,
519 unsigned int level, struct pt_table_p *table)
520 {
521 struct pt_state pts = pt_init(range, level, table);
522 struct pt_iommu_map_args *map = arg;
523 unsigned int leaf_pgsize_lg2 = map->leaf_pgsize_lg2;
524 unsigned int start_index;
525 pt_oaddr_t oa = map->oa;
526 unsigned int step;
527 bool need_contig;
528 int ret = 0;
529
530 PT_WARN_ON(map->leaf_level != level);
531 PT_WARN_ON(!pt_can_have_leaf(&pts));
532
533 step = log2_to_int_t(unsigned int,
534 leaf_pgsize_lg2 - pt_table_item_lg2sz(&pts));
535 need_contig = leaf_pgsize_lg2 != pt_table_item_lg2sz(&pts);
536
537 _pt_iter_first(&pts);
538 start_index = pts.index;
539 do {
540 pts.type = pt_load_entry_raw(&pts);
541 if (pts.type != PT_ENTRY_EMPTY || need_contig) {
542 if (pts.index != start_index)
543 pt_index_to_va(&pts);
544 ret = clear_contig(&pts, map->iotlb_gather, step,
545 leaf_pgsize_lg2);
546 if (ret)
547 break;
548 }
549
550 if (IS_ENABLED(CONFIG_DEBUG_GENERIC_PT)) {
551 pt_index_to_va(&pts);
552 PT_WARN_ON(compute_best_pgsize(&pts, oa) !=
553 leaf_pgsize_lg2);
554 }
555 pt_install_leaf_entry(&pts, oa, leaf_pgsize_lg2, &map->attrs);
556
557 oa += log2_to_int(leaf_pgsize_lg2);
558 pts.index += step;
559 } while (pts.index < pts.end_index);
560
561 flush_writes_range(&pts, start_index, pts.index);
562
563 map->oa = oa;
564 return ret;
565 }
566
__map_range(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)567 static int __map_range(struct pt_range *range, void *arg, unsigned int level,
568 struct pt_table_p *table)
569 {
570 struct pt_state pts = pt_init(range, level, table);
571 struct pt_iommu_map_args *map = arg;
572 int ret;
573
574 PT_WARN_ON(map->leaf_level == level);
575 PT_WARN_ON(!pt_can_have_table(&pts));
576
577 _pt_iter_first(&pts);
578
579 /* Descend to a child table */
580 do {
581 pts.type = pt_load_entry_raw(&pts);
582
583 if (pts.type != PT_ENTRY_TABLE) {
584 if (pts.type != PT_ENTRY_EMPTY)
585 return -EADDRINUSE;
586 ret = pt_iommu_new_table(&pts, &map->attrs);
587 if (ret) {
588 /*
589 * Racing with another thread installing a table
590 */
591 if (ret == -EAGAIN)
592 continue;
593 return ret;
594 }
595 } else {
596 pts.table_lower = pt_table_ptr(&pts);
597 /*
598 * Racing with a shared pt_iommu_new_table()? The other
599 * thread is still flushing the cache, so we have to
600 * also flush it to ensure that when our thread's map
601 * completes all the table items leading to our mapping
602 * are visible.
603 *
604 * This requires the pt_set_bit_release() to be a
605 * release of the cache flush so that this can acquire
606 * visibility at the iommu.
607 */
608 if (pts_feature(&pts, PT_FEAT_DMA_INCOHERENT) &&
609 !pt_test_sw_bit_acquire(&pts,
610 SW_BIT_CACHE_FLUSH_DONE))
611 flush_writes_item(&pts);
612 }
613
614 /*
615 * The already present table can possibly be shared with another
616 * concurrent map.
617 */
618 if (map->leaf_level == level - 1)
619 ret = pt_descend(&pts, arg, __map_range_leaf);
620 else
621 ret = pt_descend(&pts, arg, __map_range);
622 if (ret)
623 return ret;
624
625 pts.index++;
626 pt_index_to_va(&pts);
627 if (pts.index >= pts.end_index)
628 break;
629 } while (true);
630 return 0;
631 }
632
633 /*
634 * Fast path for the easy case of mapping a 4k page to an already allocated
635 * table. This is a common workload. If it returns EAGAIN run the full algorithm
636 * instead.
637 */
__do_map_single_page(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table,pt_level_fn_t descend_fn)638 static __always_inline int __do_map_single_page(struct pt_range *range,
639 void *arg, unsigned int level,
640 struct pt_table_p *table,
641 pt_level_fn_t descend_fn)
642 {
643 struct pt_state pts = pt_init(range, level, table);
644 struct pt_iommu_map_args *map = arg;
645
646 pts.type = pt_load_single_entry(&pts);
647 if (pts.level == 0) {
648 if (pts.type != PT_ENTRY_EMPTY)
649 return -EADDRINUSE;
650 pt_install_leaf_entry(&pts, map->oa, PAGE_SHIFT,
651 &map->attrs);
652 /* No flush, not used when incoherent */
653 map->oa += PAGE_SIZE;
654 return 0;
655 }
656 if (pts.type == PT_ENTRY_TABLE)
657 return pt_descend(&pts, arg, descend_fn);
658 /* Something else, use the slow path */
659 return -EAGAIN;
660 }
661 PT_MAKE_LEVELS(__map_single_page, __do_map_single_page);
662
663 /*
664 * Add a table to the top, increasing the top level as much as necessary to
665 * encompass range.
666 */
increase_top(struct pt_iommu * iommu_table,struct pt_range * range,struct pt_iommu_map_args * map)667 static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
668 struct pt_iommu_map_args *map)
669 {
670 struct iommu_pages_list free_list = IOMMU_PAGES_LIST_INIT(free_list);
671 struct pt_common *common = common_from_iommu(iommu_table);
672 uintptr_t top_of_table = READ_ONCE(common->top_of_table);
673 uintptr_t new_top_of_table = top_of_table;
674 struct pt_table_p *table_mem;
675 unsigned int new_level;
676 spinlock_t *domain_lock;
677 unsigned long flags;
678 int ret;
679
680 while (true) {
681 struct pt_range top_range =
682 _pt_top_range(common, new_top_of_table);
683 struct pt_state pts = pt_init_top(&top_range);
684
685 top_range.va = range->va;
686 top_range.last_va = range->last_va;
687
688 if (!pt_check_range(&top_range) &&
689 map->leaf_level <= pts.level) {
690 new_level = pts.level;
691 break;
692 }
693
694 pts.level++;
695 if (pts.level > PT_MAX_TOP_LEVEL ||
696 pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2) {
697 ret = -ERANGE;
698 goto err_free;
699 }
700
701 table_mem =
702 table_alloc_top(common, _pt_top_set(NULL, pts.level),
703 map->attrs.gfp, ALLOC_DEFER_COHERENT_FLUSH);
704 if (IS_ERR(table_mem)) {
705 ret = PTR_ERR(table_mem);
706 goto err_free;
707 }
708 iommu_pages_list_add(&free_list, table_mem);
709
710 /* The new table links to the lower table always at index 0 */
711 top_range.va = 0;
712 top_range.top_level = pts.level;
713 pts.table_lower = pts.table;
714 pts.table = table_mem;
715 pt_load_single_entry(&pts);
716 PT_WARN_ON(pts.index != 0);
717 pt_install_table(&pts, virt_to_phys(pts.table_lower),
718 &map->attrs);
719 new_top_of_table = _pt_top_set(pts.table, pts.level);
720 }
721
722 /*
723 * Avoid double flushing, flush it once after all pt_install_table()
724 */
725 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
726 ret = iommu_pages_start_incoherent_list(
727 &free_list, iommu_table->iommu_device);
728 if (ret)
729 goto err_free;
730 }
731
732 /*
733 * top_of_table is write locked by the spinlock, but readers can use
734 * READ_ONCE() to get the value. Since we encode both the level and the
735 * pointer in one quanta the lockless reader will always see something
736 * valid. The HW must be updated to the new level under the spinlock
737 * before top_of_table is updated so that concurrent readers don't map
738 * into the new level until it is fully functional. If another thread
739 * already updated it while we were working then throw everything away
740 * and try again.
741 */
742 domain_lock = iommu_table->driver_ops->get_top_lock(iommu_table);
743 spin_lock_irqsave(domain_lock, flags);
744 if (common->top_of_table != top_of_table ||
745 top_of_table == new_top_of_table) {
746 spin_unlock_irqrestore(domain_lock, flags);
747 ret = -EAGAIN;
748 goto err_free;
749 }
750
751 /*
752 * We do not issue any flushes for change_top on the expectation that
753 * any walk cache will not become a problem by adding another layer to
754 * the tree. Misses will rewalk from the updated top pointer, hits
755 * continue to be correct. Negative caching is fine too since all the
756 * new IOVA added by the new top is non-present.
757 */
758 iommu_table->driver_ops->change_top(
759 iommu_table, virt_to_phys(table_mem), new_level);
760 WRITE_ONCE(common->top_of_table, new_top_of_table);
761 spin_unlock_irqrestore(domain_lock, flags);
762 return 0;
763
764 err_free:
765 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
766 iommu_pages_stop_incoherent_list(&free_list,
767 iommu_table->iommu_device);
768 iommu_put_pages_list(&free_list);
769 return ret;
770 }
771
check_map_range(struct pt_iommu * iommu_table,struct pt_range * range,struct pt_iommu_map_args * map)772 static int check_map_range(struct pt_iommu *iommu_table, struct pt_range *range,
773 struct pt_iommu_map_args *map)
774 {
775 struct pt_common *common = common_from_iommu(iommu_table);
776 int ret;
777
778 do {
779 ret = pt_check_range(range);
780 if (!pt_feature(common, PT_FEAT_DYNAMIC_TOP))
781 return ret;
782
783 if (!ret && map->leaf_level <= range->top_level)
784 break;
785
786 ret = increase_top(iommu_table, range, map);
787 if (ret && ret != -EAGAIN)
788 return ret;
789
790 /* Reload the new top */
791 *range = pt_make_range(common, range->va, range->last_va);
792 } while (ret);
793 PT_WARN_ON(pt_check_range(range));
794 return 0;
795 }
796
do_map(struct pt_range * range,struct pt_common * common,bool single_page,struct pt_iommu_map_args * map)797 static int do_map(struct pt_range *range, struct pt_common *common,
798 bool single_page, struct pt_iommu_map_args *map)
799 {
800 /*
801 * The __map_single_page() fast path does not support DMA_INCOHERENT
802 * flushing to keep its .text small.
803 */
804 if (single_page && !pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
805 int ret;
806
807 ret = pt_walk_range(range, __map_single_page, map);
808 if (ret != -EAGAIN)
809 return ret;
810 /* EAGAIN falls through to the full path */
811 }
812
813 if (map->leaf_level == range->top_level)
814 return pt_walk_range(range, __map_range_leaf, map);
815 return pt_walk_range(range, __map_range, map);
816 }
817
818 /**
819 * map_pages() - Install translation for an IOVA range
820 * @domain: Domain to manipulate
821 * @iova: IO virtual address to start
822 * @paddr: Physical/Output address to start
823 * @pgsize: Length of each page
824 * @pgcount: Length of the range in pgsize units starting from @iova
825 * @prot: A bitmap of IOMMU_READ/WRITE/CACHE/NOEXEC/MMIO
826 * @gfp: GFP flags for any memory allocations
827 * @mapped: Total bytes successfully mapped
828 *
829 * The range starting at IOVA will have paddr installed into it. The caller
830 * must specify a valid pgsize and pgcount to segment the range into compatible
831 * blocks.
832 *
833 * On error the caller will probably want to invoke unmap on the range from iova
834 * up to the amount indicated by @mapped to return the table back to an
835 * unchanged state.
836 *
837 * Context: The caller must hold a write range lock that includes the whole
838 * range.
839 *
840 * Returns: -ERRNO on failure, 0 on success. The number of bytes of VA that were
841 * mapped are added to @mapped, @mapped is not zerod first.
842 */
DOMAIN_NS(map_pages)843 int DOMAIN_NS(map_pages)(struct iommu_domain *domain, unsigned long iova,
844 phys_addr_t paddr, size_t pgsize, size_t pgcount,
845 int prot, gfp_t gfp, size_t *mapped)
846 {
847 struct pt_iommu *iommu_table =
848 container_of(domain, struct pt_iommu, domain);
849 pt_vaddr_t pgsize_bitmap = iommu_table->domain.pgsize_bitmap;
850 struct pt_common *common = common_from_iommu(iommu_table);
851 struct iommu_iotlb_gather iotlb_gather;
852 pt_vaddr_t len = pgsize * pgcount;
853 struct pt_iommu_map_args map = {
854 .iotlb_gather = &iotlb_gather,
855 .oa = paddr,
856 .leaf_pgsize_lg2 = vaffs(pgsize),
857 };
858 bool single_page = false;
859 struct pt_range range;
860 int ret;
861
862 iommu_iotlb_gather_init(&iotlb_gather);
863
864 if (WARN_ON(!(prot & (IOMMU_READ | IOMMU_WRITE))))
865 return -EINVAL;
866
867 /* Check the paddr doesn't exceed what the table can store */
868 if ((sizeof(pt_oaddr_t) < sizeof(paddr) &&
869 (pt_vaddr_t)paddr > PT_VADDR_MAX) ||
870 (common->max_oasz_lg2 != PT_VADDR_MAX_LG2 &&
871 oalog2_div(paddr, common->max_oasz_lg2)))
872 return -ERANGE;
873
874 ret = pt_iommu_set_prot(common, &map.attrs, prot);
875 if (ret)
876 return ret;
877 map.attrs.gfp = gfp;
878
879 ret = make_range_no_check(common, &range, iova, len);
880 if (ret)
881 return ret;
882
883 /* Calculate target page size and level for the leaves */
884 if (pt_has_system_page_size(common) && pgsize == PAGE_SIZE &&
885 pgcount == 1) {
886 PT_WARN_ON(!(pgsize_bitmap & PAGE_SIZE));
887 if (log2_mod(iova | paddr, PAGE_SHIFT))
888 return -ENXIO;
889 map.leaf_pgsize_lg2 = PAGE_SHIFT;
890 map.leaf_level = 0;
891 single_page = true;
892 } else {
893 map.leaf_pgsize_lg2 = pt_compute_best_pgsize(
894 pgsize_bitmap, range.va, range.last_va, paddr);
895 if (!map.leaf_pgsize_lg2)
896 return -ENXIO;
897 map.leaf_level =
898 pt_pgsz_lg2_to_level(common, map.leaf_pgsize_lg2);
899 }
900
901 ret = check_map_range(iommu_table, &range, &map);
902 if (ret)
903 return ret;
904
905 PT_WARN_ON(map.leaf_level > range.top_level);
906
907 ret = do_map(&range, common, single_page, &map);
908
909 /*
910 * Table levels were freed and replaced with large items, flush any walk
911 * cache that may refer to the freed levels.
912 */
913 if (!iommu_pages_list_empty(&iotlb_gather.freelist))
914 iommu_iotlb_sync(&iommu_table->domain, &iotlb_gather);
915
916 /* Bytes successfully mapped */
917 PT_WARN_ON(!ret && map.oa - paddr != len);
918 *mapped += map.oa - paddr;
919 return ret;
920 }
921 EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(map_pages), "GENERIC_PT_IOMMU");
922
923 struct pt_unmap_args {
924 struct iommu_pages_list free_list;
925 pt_vaddr_t unmapped;
926 };
927
__unmap_range(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)928 static __maybe_unused int __unmap_range(struct pt_range *range, void *arg,
929 unsigned int level,
930 struct pt_table_p *table)
931 {
932 struct pt_state pts = pt_init(range, level, table);
933 unsigned int flush_start_index = UINT_MAX;
934 unsigned int flush_end_index = UINT_MAX;
935 struct pt_unmap_args *unmap = arg;
936 unsigned int num_oas = 0;
937 unsigned int start_index;
938 int ret = 0;
939
940 _pt_iter_first(&pts);
941 start_index = pts.index;
942 pts.type = pt_load_entry_raw(&pts);
943 /*
944 * A starting index is in the middle of a contiguous entry
945 *
946 * The IOMMU API does not require drivers to support unmapping parts of
947 * large pages. Long ago VFIO would try to split maps but the current
948 * version never does.
949 *
950 * Instead when unmap reaches a partial unmap of the start of a large
951 * IOPTE it should remove the entire IOPTE and return that size to the
952 * caller.
953 */
954 if (pts.type == PT_ENTRY_OA) {
955 if (log2_mod(range->va, pt_entry_oa_lg2sz(&pts)))
956 return -EINVAL;
957 /* Micro optimization */
958 goto start_oa;
959 }
960
961 do {
962 if (pts.type != PT_ENTRY_OA) {
963 bool fully_covered;
964
965 if (pts.type != PT_ENTRY_TABLE) {
966 ret = -EINVAL;
967 break;
968 }
969
970 if (pts.index != start_index)
971 pt_index_to_va(&pts);
972 pts.table_lower = pt_table_ptr(&pts);
973
974 fully_covered = pt_entry_fully_covered(
975 &pts, pt_table_item_lg2sz(&pts));
976
977 ret = pt_descend(&pts, arg, __unmap_range);
978 if (ret)
979 break;
980
981 /*
982 * If the unmapping range fully covers the table then we
983 * can free it as well. The clear is delayed until we
984 * succeed in clearing the lower table levels.
985 */
986 if (fully_covered) {
987 iommu_pages_list_add(&unmap->free_list,
988 pts.table_lower);
989 pt_clear_entries(&pts, ilog2(1));
990 if (pts.index < flush_start_index)
991 flush_start_index = pts.index;
992 flush_end_index = pts.index + 1;
993 }
994 pts.index++;
995 } else {
996 unsigned int num_contig_lg2;
997 start_oa:
998 /*
999 * If the caller requested an last that falls within a
1000 * single entry then the entire entry is unmapped and
1001 * the length returned will be larger than requested.
1002 */
1003 num_contig_lg2 = pt_entry_num_contig_lg2(&pts);
1004 pt_clear_entries(&pts, num_contig_lg2);
1005 num_oas += log2_to_int(num_contig_lg2);
1006 if (pts.index < flush_start_index)
1007 flush_start_index = pts.index;
1008 pts.index += log2_to_int(num_contig_lg2);
1009 flush_end_index = pts.index;
1010 }
1011 if (pts.index >= pts.end_index)
1012 break;
1013 pts.type = pt_load_entry_raw(&pts);
1014 } while (true);
1015
1016 unmap->unmapped += log2_mul(num_oas, pt_table_item_lg2sz(&pts));
1017 if (flush_start_index != flush_end_index)
1018 flush_writes_range(&pts, flush_start_index, flush_end_index);
1019
1020 return ret;
1021 }
1022
1023 /**
1024 * unmap_pages() - Make a range of IOVA empty/not present
1025 * @domain: Domain to manipulate
1026 * @iova: IO virtual address to start
1027 * @pgsize: Length of each page
1028 * @pgcount: Length of the range in pgsize units starting from @iova
1029 * @iotlb_gather: Gather struct that must be flushed on return
1030 *
1031 * unmap_pages() will remove a translation created by map_pages(). It cannot
1032 * subdivide a mapping created by map_pages(), so it should be called with IOVA
1033 * ranges that match those passed to map_pages(). The IOVA range can aggregate
1034 * contiguous map_pages() calls so long as no individual range is split.
1035 *
1036 * Context: The caller must hold a write range lock that includes
1037 * the whole range.
1038 *
1039 * Returns: Number of bytes of VA unmapped. iova + res will be the point
1040 * unmapping stopped.
1041 */
DOMAIN_NS(unmap_pages)1042 size_t DOMAIN_NS(unmap_pages)(struct iommu_domain *domain, unsigned long iova,
1043 size_t pgsize, size_t pgcount,
1044 struct iommu_iotlb_gather *iotlb_gather)
1045 {
1046 struct pt_iommu *iommu_table =
1047 container_of(domain, struct pt_iommu, domain);
1048 struct pt_unmap_args unmap = { .free_list = IOMMU_PAGES_LIST_INIT(
1049 unmap.free_list) };
1050 pt_vaddr_t len = pgsize * pgcount;
1051 struct pt_range range;
1052 int ret;
1053
1054 ret = make_range(common_from_iommu(iommu_table), &range, iova, len);
1055 if (ret)
1056 return 0;
1057
1058 pt_walk_range(&range, __unmap_range, &unmap);
1059
1060 gather_range_pages(iotlb_gather, iommu_table, iova, len,
1061 &unmap.free_list);
1062
1063 return unmap.unmapped;
1064 }
1065 EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(unmap_pages), "GENERIC_PT_IOMMU");
1066
NS(get_info)1067 static void NS(get_info)(struct pt_iommu *iommu_table,
1068 struct pt_iommu_info *info)
1069 {
1070 struct pt_common *common = common_from_iommu(iommu_table);
1071 struct pt_range range = pt_top_range(common);
1072 struct pt_state pts = pt_init_top(&range);
1073 pt_vaddr_t pgsize_bitmap = 0;
1074
1075 if (pt_feature(common, PT_FEAT_DYNAMIC_TOP)) {
1076 for (pts.level = 0; pts.level <= PT_MAX_TOP_LEVEL;
1077 pts.level++) {
1078 if (pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2)
1079 break;
1080 pgsize_bitmap |= pt_possible_sizes(&pts);
1081 }
1082 } else {
1083 for (pts.level = 0; pts.level <= range.top_level; pts.level++)
1084 pgsize_bitmap |= pt_possible_sizes(&pts);
1085 }
1086
1087 /* Hide page sizes larger than the maximum OA */
1088 info->pgsize_bitmap = oalog2_mod(pgsize_bitmap, common->max_oasz_lg2);
1089 }
1090
NS(deinit)1091 static void NS(deinit)(struct pt_iommu *iommu_table)
1092 {
1093 struct pt_common *common = common_from_iommu(iommu_table);
1094 struct pt_range range = pt_all_range(common);
1095 struct pt_iommu_collect_args collect = {
1096 .free_list = IOMMU_PAGES_LIST_INIT(collect.free_list),
1097 };
1098
1099 iommu_pages_list_add(&collect.free_list, range.top_table);
1100 pt_walk_range(&range, __collect_tables, &collect);
1101
1102 /*
1103 * The driver has to already have fenced the HW access to the page table
1104 * and invalidated any caching referring to this memory.
1105 */
1106 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
1107 iommu_pages_stop_incoherent_list(&collect.free_list,
1108 iommu_table->iommu_device);
1109 iommu_put_pages_list(&collect.free_list);
1110 }
1111
1112 static const struct pt_iommu_ops NS(ops) = {
1113 #if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) && defined(pt_entry_is_write_dirty) && \
1114 IS_ENABLED(CONFIG_IOMMUFD_TEST) && defined(pt_entry_make_write_dirty)
1115 .set_dirty = NS(set_dirty),
1116 #endif
1117 .get_info = NS(get_info),
1118 .deinit = NS(deinit),
1119 };
1120
pt_init_common(struct pt_common * common)1121 static int pt_init_common(struct pt_common *common)
1122 {
1123 struct pt_range top_range = pt_top_range(common);
1124
1125 if (PT_WARN_ON(top_range.top_level > PT_MAX_TOP_LEVEL))
1126 return -EINVAL;
1127
1128 if (top_range.top_level == PT_MAX_TOP_LEVEL ||
1129 common->max_vasz_lg2 == top_range.max_vasz_lg2)
1130 common->features &= ~BIT(PT_FEAT_DYNAMIC_TOP);
1131
1132 if (top_range.max_vasz_lg2 == PT_VADDR_MAX_LG2)
1133 common->features |= BIT(PT_FEAT_FULL_VA);
1134
1135 /* Requested features must match features compiled into this format */
1136 if ((common->features & ~(unsigned int)PT_SUPPORTED_FEATURES) ||
1137 (!IS_ENABLED(CONFIG_DEBUG_GENERIC_PT) &&
1138 (common->features & PT_FORCE_ENABLED_FEATURES) !=
1139 PT_FORCE_ENABLED_FEATURES))
1140 return -EOPNOTSUPP;
1141
1142 /*
1143 * Check if the top level of the page table is too small to hold the
1144 * specified maxvasz.
1145 */
1146 if (!pt_feature(common, PT_FEAT_DYNAMIC_TOP) &&
1147 top_range.top_level != PT_MAX_TOP_LEVEL) {
1148 struct pt_state pts = { .range = &top_range,
1149 .level = top_range.top_level };
1150
1151 if (common->max_vasz_lg2 >
1152 pt_num_items_lg2(&pts) + pt_table_item_lg2sz(&pts))
1153 return -EOPNOTSUPP;
1154 }
1155
1156 if (common->max_oasz_lg2 == 0)
1157 common->max_oasz_lg2 = pt_max_oa_lg2(common);
1158 else
1159 common->max_oasz_lg2 = min(common->max_oasz_lg2,
1160 pt_max_oa_lg2(common));
1161 return 0;
1162 }
1163
pt_iommu_init_domain(struct pt_iommu * iommu_table,struct iommu_domain * domain)1164 static int pt_iommu_init_domain(struct pt_iommu *iommu_table,
1165 struct iommu_domain *domain)
1166 {
1167 struct pt_common *common = common_from_iommu(iommu_table);
1168 struct pt_iommu_info info;
1169 struct pt_range range;
1170
1171 NS(get_info)(iommu_table, &info);
1172
1173 domain->type = __IOMMU_DOMAIN_PAGING;
1174 domain->pgsize_bitmap = info.pgsize_bitmap;
1175
1176 if (pt_feature(common, PT_FEAT_DYNAMIC_TOP))
1177 range = _pt_top_range(common,
1178 _pt_top_set(NULL, PT_MAX_TOP_LEVEL));
1179 else
1180 range = pt_top_range(common);
1181
1182 /* A 64-bit high address space table on a 32-bit system cannot work. */
1183 domain->geometry.aperture_start = (unsigned long)range.va;
1184 if ((pt_vaddr_t)domain->geometry.aperture_start != range.va)
1185 return -EOVERFLOW;
1186
1187 /*
1188 * The aperture is limited to what the API can do after considering all
1189 * the different types dma_addr_t/unsigned long/pt_vaddr_t that are used
1190 * to store a VA. Set the aperture to something that is valid for all
1191 * cases. Saturate instead of truncate the end if the types are smaller
1192 * than the top range. aperture_end should be called aperture_last.
1193 */
1194 domain->geometry.aperture_end = (unsigned long)range.last_va;
1195 if ((pt_vaddr_t)domain->geometry.aperture_end != range.last_va) {
1196 domain->geometry.aperture_end = ULONG_MAX;
1197 domain->pgsize_bitmap &= ULONG_MAX;
1198 }
1199 domain->geometry.force_aperture = true;
1200
1201 return 0;
1202 }
1203
pt_iommu_zero(struct pt_iommu_table * fmt_table)1204 static void pt_iommu_zero(struct pt_iommu_table *fmt_table)
1205 {
1206 struct pt_iommu *iommu_table = &fmt_table->iommu;
1207 struct pt_iommu cfg = *iommu_table;
1208
1209 static_assert(offsetof(struct pt_iommu_table, iommu.domain) == 0);
1210 memset_after(fmt_table, 0, iommu.domain);
1211
1212 /* The caller can initialize some of these values */
1213 iommu_table->iommu_device = cfg.iommu_device;
1214 iommu_table->driver_ops = cfg.driver_ops;
1215 iommu_table->nid = cfg.nid;
1216 }
1217
1218 #define pt_iommu_table_cfg CONCATENATE(pt_iommu_table, _cfg)
1219 #define pt_iommu_init CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), init)
1220
pt_iommu_init(struct pt_iommu_table * fmt_table,const struct pt_iommu_table_cfg * cfg,gfp_t gfp)1221 int pt_iommu_init(struct pt_iommu_table *fmt_table,
1222 const struct pt_iommu_table_cfg *cfg, gfp_t gfp)
1223 {
1224 struct pt_iommu *iommu_table = &fmt_table->iommu;
1225 struct pt_common *common = common_from_iommu(iommu_table);
1226 struct pt_table_p *table_mem;
1227 int ret;
1228
1229 if (cfg->common.hw_max_vasz_lg2 > PT_MAX_VA_ADDRESS_LG2 ||
1230 !cfg->common.hw_max_vasz_lg2 || !cfg->common.hw_max_oasz_lg2)
1231 return -EINVAL;
1232
1233 pt_iommu_zero(fmt_table);
1234 common->features = cfg->common.features;
1235 common->max_vasz_lg2 = cfg->common.hw_max_vasz_lg2;
1236 common->max_oasz_lg2 = cfg->common.hw_max_oasz_lg2;
1237 ret = pt_iommu_fmt_init(fmt_table, cfg);
1238 if (ret)
1239 return ret;
1240
1241 if (cfg->common.hw_max_oasz_lg2 > pt_max_oa_lg2(common))
1242 return -EINVAL;
1243
1244 ret = pt_init_common(common);
1245 if (ret)
1246 return ret;
1247
1248 if (pt_feature(common, PT_FEAT_DYNAMIC_TOP) &&
1249 WARN_ON(!iommu_table->driver_ops ||
1250 !iommu_table->driver_ops->change_top ||
1251 !iommu_table->driver_ops->get_top_lock))
1252 return -EINVAL;
1253
1254 if (pt_feature(common, PT_FEAT_SIGN_EXTEND) &&
1255 (pt_feature(common, PT_FEAT_FULL_VA) ||
1256 pt_feature(common, PT_FEAT_DYNAMIC_TOP)))
1257 return -EINVAL;
1258
1259 if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) &&
1260 WARN_ON(!iommu_table->iommu_device))
1261 return -EINVAL;
1262
1263 ret = pt_iommu_init_domain(iommu_table, &iommu_table->domain);
1264 if (ret)
1265 return ret;
1266
1267 table_mem = table_alloc_top(common, common->top_of_table, gfp,
1268 ALLOC_NORMAL);
1269 if (IS_ERR(table_mem))
1270 return PTR_ERR(table_mem);
1271 pt_top_set(common, table_mem, pt_top_get_level(common));
1272
1273 /* Must be last, see pt_iommu_deinit() */
1274 iommu_table->ops = &NS(ops);
1275 return 0;
1276 }
1277 EXPORT_SYMBOL_NS_GPL(pt_iommu_init, "GENERIC_PT_IOMMU");
1278
1279 #ifdef pt_iommu_fmt_hw_info
1280 #define pt_iommu_table_hw_info CONCATENATE(pt_iommu_table, _hw_info)
1281 #define pt_iommu_hw_info CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), hw_info)
pt_iommu_hw_info(struct pt_iommu_table * fmt_table,struct pt_iommu_table_hw_info * info)1282 void pt_iommu_hw_info(struct pt_iommu_table *fmt_table,
1283 struct pt_iommu_table_hw_info *info)
1284 {
1285 struct pt_iommu *iommu_table = &fmt_table->iommu;
1286 struct pt_common *common = common_from_iommu(iommu_table);
1287 struct pt_range top_range = pt_top_range(common);
1288
1289 pt_iommu_fmt_hw_info(fmt_table, &top_range, info);
1290 }
1291 EXPORT_SYMBOL_NS_GPL(pt_iommu_hw_info, "GENERIC_PT_IOMMU");
1292 #endif
1293
1294 MODULE_LICENSE("GPL");
1295 MODULE_DESCRIPTION("IOMMU Page table implementation for " __stringify(PTPFX_RAW));
1296 MODULE_IMPORT_NS("GENERIC_PT");
1297 /* For iommu_dirty_bitmap_record() */
1298 MODULE_IMPORT_NS("IOMMUFD");
1299
1300 #endif /* __GENERIC_PT_IOMMU_PT_H */
1301