1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
4 */
5 #ifndef __GENERIC_PT_KUNIT_IOMMU_H
6 #define __GENERIC_PT_KUNIT_IOMMU_H
7
8 #define GENERIC_PT_KUNIT 1
9 #include <kunit/device.h>
10 #include <kunit/test.h>
11 #include "../iommu-pages.h"
12 #include "pt_iter.h"
13
14 #define pt_iommu_table_cfg CONCATENATE(pt_iommu_table, _cfg)
15 #define pt_iommu_init CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), init)
16 int pt_iommu_init(struct pt_iommu_table *fmt_table,
17 const struct pt_iommu_table_cfg *cfg, gfp_t gfp);
18
19 /* The format can provide a list of configurations it would like to test */
20 #ifdef kunit_fmt_cfgs
kunit_pt_gen_params_cfg(struct kunit * test,const void * prev,char * desc)21 static const void *kunit_pt_gen_params_cfg(struct kunit *test, const void *prev,
22 char *desc)
23 {
24 uintptr_t cfg_id = (uintptr_t)prev;
25
26 cfg_id++;
27 if (cfg_id >= ARRAY_SIZE(kunit_fmt_cfgs) + 1)
28 return NULL;
29 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s_cfg_%u",
30 __stringify(PTPFX_RAW), (unsigned int)(cfg_id - 1));
31 return (void *)cfg_id;
32 }
33 #define KUNIT_CASE_FMT(test_name) \
34 KUNIT_CASE_PARAM(test_name, kunit_pt_gen_params_cfg)
35 #else
36 #define KUNIT_CASE_FMT(test_name) KUNIT_CASE(test_name)
37 #endif
38
39 #define KUNIT_ASSERT_NO_ERRNO(test, ret) \
40 KUNIT_ASSERT_EQ_MSG(test, ret, 0, KUNIT_SUBSUBTEST_INDENT "errno %pe", \
41 ERR_PTR(ret))
42
43 #define KUNIT_ASSERT_NO_ERRNO_FN(test, fn, ret) \
44 KUNIT_ASSERT_EQ_MSG(test, ret, 0, \
45 KUNIT_SUBSUBTEST_INDENT "errno %pe from %s", \
46 ERR_PTR(ret), fn)
47
48 /*
49 * When the test is run on a 32 bit system unsigned long can be 32 bits. This
50 * cause the iommu op signatures to be restricted to 32 bits. Meaning the test
51 * has to be mindful not to create any VA's over the 32 bit limit. Reduce the
52 * scope of the testing as the main purpose of checking on full 32 bit is to
53 * look for 32bitism in the core code. Run the test on i386 with X86_PAE=y to
54 * get the full coverage when dma_addr_t & phys_addr_t are 8 bytes
55 */
56 #define IS_32BIT (sizeof(unsigned long) == 4)
57
58 struct kunit_iommu_priv {
59 union {
60 struct iommu_domain domain;
61 struct pt_iommu_table fmt_table;
62 };
63 spinlock_t top_lock;
64 struct device *dummy_dev;
65 struct pt_iommu *iommu;
66 struct pt_common *common;
67 struct pt_iommu_table_cfg cfg;
68 struct pt_iommu_info info;
69 unsigned int smallest_pgsz_lg2;
70 pt_vaddr_t smallest_pgsz;
71 unsigned int largest_pgsz_lg2;
72 pt_oaddr_t test_oa;
73 pt_vaddr_t safe_pgsize_bitmap;
74 unsigned long orig_nr_secondary_pagetable;
75
76 };
77 PT_IOMMU_CHECK_DOMAIN(struct kunit_iommu_priv, fmt_table.iommu, domain);
78
pt_kunit_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)79 static void pt_kunit_iotlb_sync(struct iommu_domain *domain,
80 struct iommu_iotlb_gather *gather)
81 {
82 iommu_put_pages_list(&gather->freelist);
83 }
84
85 #define IOMMU_PT_DOMAIN_OPS1(x) IOMMU_PT_DOMAIN_OPS(x)
86 static const struct iommu_domain_ops kunit_pt_ops = {
87 IOMMU_PT_DOMAIN_OPS1(PTPFX_RAW),
88 .iotlb_sync = &pt_kunit_iotlb_sync,
89 };
90
pt_kunit_change_top(struct pt_iommu * iommu_table,phys_addr_t top_paddr,unsigned int top_level)91 static void pt_kunit_change_top(struct pt_iommu *iommu_table,
92 phys_addr_t top_paddr, unsigned int top_level)
93 {
94 }
95
pt_kunit_get_top_lock(struct pt_iommu * iommu_table)96 static spinlock_t *pt_kunit_get_top_lock(struct pt_iommu *iommu_table)
97 {
98 struct kunit_iommu_priv *priv = container_of(
99 iommu_table, struct kunit_iommu_priv, fmt_table.iommu);
100
101 return &priv->top_lock;
102 }
103
104 static const struct pt_iommu_driver_ops pt_kunit_driver_ops = {
105 .change_top = &pt_kunit_change_top,
106 .get_top_lock = &pt_kunit_get_top_lock,
107 };
108
pt_kunit_priv_init(struct kunit * test,struct kunit_iommu_priv * priv)109 static int pt_kunit_priv_init(struct kunit *test, struct kunit_iommu_priv *priv)
110 {
111 unsigned int va_lg2sz;
112 int ret;
113
114 /* Enough so the memory allocator works */
115 priv->dummy_dev = kunit_device_register(test, "pt_kunit_dev");
116 if (IS_ERR(priv->dummy_dev))
117 return PTR_ERR(priv->dummy_dev);
118 set_dev_node(priv->dummy_dev, NUMA_NO_NODE);
119
120 spin_lock_init(&priv->top_lock);
121
122 #ifdef kunit_fmt_cfgs
123 priv->cfg = kunit_fmt_cfgs[((uintptr_t)test->param_value) - 1];
124 /*
125 * The format can set a list of features that the kunit_fmt_cfgs
126 * controls, other features are default to on.
127 */
128 priv->cfg.common.features |= PT_SUPPORTED_FEATURES &
129 (~KUNIT_FMT_FEATURES);
130 #else
131 priv->cfg.common.features = PT_SUPPORTED_FEATURES;
132 #endif
133
134 /* Defaults, for the kunit */
135 if (!priv->cfg.common.hw_max_vasz_lg2)
136 priv->cfg.common.hw_max_vasz_lg2 = PT_MAX_VA_ADDRESS_LG2;
137 if (!priv->cfg.common.hw_max_oasz_lg2)
138 priv->cfg.common.hw_max_oasz_lg2 = pt_max_oa_lg2(NULL);
139
140 priv->fmt_table.iommu.nid = NUMA_NO_NODE;
141 priv->fmt_table.iommu.driver_ops = &pt_kunit_driver_ops;
142 priv->fmt_table.iommu.iommu_device = priv->dummy_dev;
143 priv->domain.ops = &kunit_pt_ops;
144 ret = pt_iommu_init(&priv->fmt_table, &priv->cfg, GFP_KERNEL);
145 if (ret) {
146 if (ret == -EOVERFLOW)
147 kunit_skip(
148 test,
149 "This configuration cannot be tested on 32 bit");
150 return ret;
151 }
152
153 priv->iommu = &priv->fmt_table.iommu;
154 priv->common = common_from_iommu(&priv->fmt_table.iommu);
155 priv->iommu->ops->get_info(priv->iommu, &priv->info);
156
157 /*
158 * size_t is used to pass the mapping length, it can be 32 bit, truncate
159 * the pagesizes so we don't use large sizes.
160 */
161 priv->info.pgsize_bitmap = (size_t)priv->info.pgsize_bitmap;
162
163 priv->smallest_pgsz_lg2 = vaffs(priv->info.pgsize_bitmap);
164 priv->smallest_pgsz = log2_to_int(priv->smallest_pgsz_lg2);
165 priv->largest_pgsz_lg2 =
166 vafls((dma_addr_t)priv->info.pgsize_bitmap) - 1;
167
168 priv->test_oa =
169 oalog2_mod(0x74a71445deadbeef, priv->common->max_oasz_lg2);
170
171 /*
172 * We run out of VA space if the mappings get too big, make something
173 * smaller that can safely pass through dma_addr_t API.
174 */
175 va_lg2sz = priv->common->max_vasz_lg2;
176 if (IS_32BIT && va_lg2sz > 32)
177 va_lg2sz = 32;
178 priv->safe_pgsize_bitmap =
179 log2_mod(priv->info.pgsize_bitmap, va_lg2sz - 1);
180
181 return 0;
182 }
183
184 #endif
185