xref: /linux/drivers/iommu/generic_pt/kunit_iommu.h (revision 1dd4187f53c35165262691795109879e37ddca62)
1*1dd4187fSJason Gunthorpe /* SPDX-License-Identifier: GPL-2.0-only */
2*1dd4187fSJason Gunthorpe /*
3*1dd4187fSJason Gunthorpe  * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
4*1dd4187fSJason Gunthorpe  */
5*1dd4187fSJason Gunthorpe #ifndef __GENERIC_PT_KUNIT_IOMMU_H
6*1dd4187fSJason Gunthorpe #define __GENERIC_PT_KUNIT_IOMMU_H
7*1dd4187fSJason Gunthorpe 
8*1dd4187fSJason Gunthorpe #define GENERIC_PT_KUNIT 1
9*1dd4187fSJason Gunthorpe #include <kunit/device.h>
10*1dd4187fSJason Gunthorpe #include <kunit/test.h>
11*1dd4187fSJason Gunthorpe #include <../../iommu-pages.h>
12*1dd4187fSJason Gunthorpe #include "pt_iter.h"
13*1dd4187fSJason Gunthorpe 
14*1dd4187fSJason Gunthorpe #define pt_iommu_table_cfg CONCATENATE(pt_iommu_table, _cfg)
15*1dd4187fSJason Gunthorpe #define pt_iommu_init CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), init)
16*1dd4187fSJason Gunthorpe int pt_iommu_init(struct pt_iommu_table *fmt_table,
17*1dd4187fSJason Gunthorpe 		  const struct pt_iommu_table_cfg *cfg, gfp_t gfp);
18*1dd4187fSJason Gunthorpe 
19*1dd4187fSJason Gunthorpe /* The format can provide a list of configurations it would like to test */
20*1dd4187fSJason Gunthorpe #ifdef kunit_fmt_cfgs
21*1dd4187fSJason Gunthorpe static const void *kunit_pt_gen_params_cfg(struct kunit *test, const void *prev,
22*1dd4187fSJason Gunthorpe 					   char *desc)
23*1dd4187fSJason Gunthorpe {
24*1dd4187fSJason Gunthorpe 	uintptr_t cfg_id = (uintptr_t)prev;
25*1dd4187fSJason Gunthorpe 
26*1dd4187fSJason Gunthorpe 	cfg_id++;
27*1dd4187fSJason Gunthorpe 	if (cfg_id >= ARRAY_SIZE(kunit_fmt_cfgs) + 1)
28*1dd4187fSJason Gunthorpe 		return NULL;
29*1dd4187fSJason Gunthorpe 	snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s_cfg_%u",
30*1dd4187fSJason Gunthorpe 		 __stringify(PTPFX_RAW), (unsigned int)(cfg_id - 1));
31*1dd4187fSJason Gunthorpe 	return (void *)cfg_id;
32*1dd4187fSJason Gunthorpe }
33*1dd4187fSJason Gunthorpe #define KUNIT_CASE_FMT(test_name) \
34*1dd4187fSJason Gunthorpe 	KUNIT_CASE_PARAM(test_name, kunit_pt_gen_params_cfg)
35*1dd4187fSJason Gunthorpe #else
36*1dd4187fSJason Gunthorpe #define KUNIT_CASE_FMT(test_name) KUNIT_CASE(test_name)
37*1dd4187fSJason Gunthorpe #endif
38*1dd4187fSJason Gunthorpe 
39*1dd4187fSJason Gunthorpe #define KUNIT_ASSERT_NO_ERRNO(test, ret)                                       \
40*1dd4187fSJason Gunthorpe 	KUNIT_ASSERT_EQ_MSG(test, ret, 0, KUNIT_SUBSUBTEST_INDENT "errno %pe", \
41*1dd4187fSJason Gunthorpe 			    ERR_PTR(ret))
42*1dd4187fSJason Gunthorpe 
43*1dd4187fSJason Gunthorpe #define KUNIT_ASSERT_NO_ERRNO_FN(test, fn, ret)                          \
44*1dd4187fSJason Gunthorpe 	KUNIT_ASSERT_EQ_MSG(test, ret, 0,                                \
45*1dd4187fSJason Gunthorpe 			    KUNIT_SUBSUBTEST_INDENT "errno %pe from %s", \
46*1dd4187fSJason Gunthorpe 			    ERR_PTR(ret), fn)
47*1dd4187fSJason Gunthorpe 
48*1dd4187fSJason Gunthorpe /*
49*1dd4187fSJason Gunthorpe  * When the test is run on a 32 bit system unsigned long can be 32 bits. This
50*1dd4187fSJason Gunthorpe  * cause the iommu op signatures to be restricted to 32 bits. Meaning the test
51*1dd4187fSJason Gunthorpe  * has to be mindful not to create any VA's over the 32 bit limit. Reduce the
52*1dd4187fSJason Gunthorpe  * scope of the testing as the main purpose of checking on full 32 bit is to
53*1dd4187fSJason Gunthorpe  * look for 32bitism in the core code. Run the test on i386 with X86_PAE=y to
54*1dd4187fSJason Gunthorpe  * get the full coverage when dma_addr_t & phys_addr_t are 8 bytes
55*1dd4187fSJason Gunthorpe  */
56*1dd4187fSJason Gunthorpe #define IS_32BIT (sizeof(unsigned long) == 4)
57*1dd4187fSJason Gunthorpe 
58*1dd4187fSJason Gunthorpe struct kunit_iommu_priv {
59*1dd4187fSJason Gunthorpe 	union {
60*1dd4187fSJason Gunthorpe 		struct iommu_domain domain;
61*1dd4187fSJason Gunthorpe 		struct pt_iommu_table fmt_table;
62*1dd4187fSJason Gunthorpe 	};
63*1dd4187fSJason Gunthorpe 	spinlock_t top_lock;
64*1dd4187fSJason Gunthorpe 	struct device *dummy_dev;
65*1dd4187fSJason Gunthorpe 	struct pt_iommu *iommu;
66*1dd4187fSJason Gunthorpe 	struct pt_common *common;
67*1dd4187fSJason Gunthorpe 	struct pt_iommu_table_cfg cfg;
68*1dd4187fSJason Gunthorpe 	struct pt_iommu_info info;
69*1dd4187fSJason Gunthorpe 	unsigned int smallest_pgsz_lg2;
70*1dd4187fSJason Gunthorpe 	pt_vaddr_t smallest_pgsz;
71*1dd4187fSJason Gunthorpe 	unsigned int largest_pgsz_lg2;
72*1dd4187fSJason Gunthorpe 	pt_oaddr_t test_oa;
73*1dd4187fSJason Gunthorpe 	pt_vaddr_t safe_pgsize_bitmap;
74*1dd4187fSJason Gunthorpe };
75*1dd4187fSJason Gunthorpe PT_IOMMU_CHECK_DOMAIN(struct kunit_iommu_priv, fmt_table.iommu, domain);
76*1dd4187fSJason Gunthorpe 
77*1dd4187fSJason Gunthorpe static void pt_kunit_iotlb_sync(struct iommu_domain *domain,
78*1dd4187fSJason Gunthorpe 				struct iommu_iotlb_gather *gather)
79*1dd4187fSJason Gunthorpe {
80*1dd4187fSJason Gunthorpe 	iommu_put_pages_list(&gather->freelist);
81*1dd4187fSJason Gunthorpe }
82*1dd4187fSJason Gunthorpe 
83*1dd4187fSJason Gunthorpe #define IOMMU_PT_DOMAIN_OPS1(x) IOMMU_PT_DOMAIN_OPS(x)
84*1dd4187fSJason Gunthorpe static const struct iommu_domain_ops kunit_pt_ops = {
85*1dd4187fSJason Gunthorpe 	IOMMU_PT_DOMAIN_OPS1(PTPFX_RAW),
86*1dd4187fSJason Gunthorpe 	.iotlb_sync = &pt_kunit_iotlb_sync,
87*1dd4187fSJason Gunthorpe };
88*1dd4187fSJason Gunthorpe 
89*1dd4187fSJason Gunthorpe static void pt_kunit_change_top(struct pt_iommu *iommu_table,
90*1dd4187fSJason Gunthorpe 				phys_addr_t top_paddr, unsigned int top_level)
91*1dd4187fSJason Gunthorpe {
92*1dd4187fSJason Gunthorpe }
93*1dd4187fSJason Gunthorpe 
94*1dd4187fSJason Gunthorpe static spinlock_t *pt_kunit_get_top_lock(struct pt_iommu *iommu_table)
95*1dd4187fSJason Gunthorpe {
96*1dd4187fSJason Gunthorpe 	struct kunit_iommu_priv *priv = container_of(
97*1dd4187fSJason Gunthorpe 		iommu_table, struct kunit_iommu_priv, fmt_table.iommu);
98*1dd4187fSJason Gunthorpe 
99*1dd4187fSJason Gunthorpe 	return &priv->top_lock;
100*1dd4187fSJason Gunthorpe }
101*1dd4187fSJason Gunthorpe 
102*1dd4187fSJason Gunthorpe static const struct pt_iommu_driver_ops pt_kunit_driver_ops = {
103*1dd4187fSJason Gunthorpe 	.change_top = &pt_kunit_change_top,
104*1dd4187fSJason Gunthorpe 	.get_top_lock = &pt_kunit_get_top_lock,
105*1dd4187fSJason Gunthorpe };
106*1dd4187fSJason Gunthorpe 
107*1dd4187fSJason Gunthorpe static int pt_kunit_priv_init(struct kunit *test, struct kunit_iommu_priv *priv)
108*1dd4187fSJason Gunthorpe {
109*1dd4187fSJason Gunthorpe 	unsigned int va_lg2sz;
110*1dd4187fSJason Gunthorpe 	int ret;
111*1dd4187fSJason Gunthorpe 
112*1dd4187fSJason Gunthorpe 	/* Enough so the memory allocator works */
113*1dd4187fSJason Gunthorpe 	priv->dummy_dev = kunit_device_register(test, "pt_kunit_dev");
114*1dd4187fSJason Gunthorpe 	if (IS_ERR(priv->dummy_dev))
115*1dd4187fSJason Gunthorpe 		return PTR_ERR(priv->dummy_dev);
116*1dd4187fSJason Gunthorpe 	set_dev_node(priv->dummy_dev, NUMA_NO_NODE);
117*1dd4187fSJason Gunthorpe 
118*1dd4187fSJason Gunthorpe 	spin_lock_init(&priv->top_lock);
119*1dd4187fSJason Gunthorpe 
120*1dd4187fSJason Gunthorpe #ifdef kunit_fmt_cfgs
121*1dd4187fSJason Gunthorpe 	priv->cfg = kunit_fmt_cfgs[((uintptr_t)test->param_value) - 1];
122*1dd4187fSJason Gunthorpe 	/*
123*1dd4187fSJason Gunthorpe 	 * The format can set a list of features that the kunit_fmt_cfgs
124*1dd4187fSJason Gunthorpe 	 * controls, other features are default to on.
125*1dd4187fSJason Gunthorpe 	 */
126*1dd4187fSJason Gunthorpe 	priv->cfg.common.features |= PT_SUPPORTED_FEATURES &
127*1dd4187fSJason Gunthorpe 				     (~KUNIT_FMT_FEATURES);
128*1dd4187fSJason Gunthorpe #else
129*1dd4187fSJason Gunthorpe 	priv->cfg.common.features = PT_SUPPORTED_FEATURES;
130*1dd4187fSJason Gunthorpe #endif
131*1dd4187fSJason Gunthorpe 
132*1dd4187fSJason Gunthorpe 	/* Defaults, for the kunit */
133*1dd4187fSJason Gunthorpe 	if (!priv->cfg.common.hw_max_vasz_lg2)
134*1dd4187fSJason Gunthorpe 		priv->cfg.common.hw_max_vasz_lg2 = PT_MAX_VA_ADDRESS_LG2;
135*1dd4187fSJason Gunthorpe 	if (!priv->cfg.common.hw_max_oasz_lg2)
136*1dd4187fSJason Gunthorpe 		priv->cfg.common.hw_max_oasz_lg2 = pt_max_oa_lg2(NULL);
137*1dd4187fSJason Gunthorpe 
138*1dd4187fSJason Gunthorpe 	priv->fmt_table.iommu.nid = NUMA_NO_NODE;
139*1dd4187fSJason Gunthorpe 	priv->fmt_table.iommu.driver_ops = &pt_kunit_driver_ops;
140*1dd4187fSJason Gunthorpe 	priv->domain.ops = &kunit_pt_ops;
141*1dd4187fSJason Gunthorpe 	ret = pt_iommu_init(&priv->fmt_table, &priv->cfg, GFP_KERNEL);
142*1dd4187fSJason Gunthorpe 	if (ret) {
143*1dd4187fSJason Gunthorpe 		if (ret == -EOVERFLOW)
144*1dd4187fSJason Gunthorpe 			kunit_skip(
145*1dd4187fSJason Gunthorpe 				test,
146*1dd4187fSJason Gunthorpe 				"This configuration cannot be tested on 32 bit");
147*1dd4187fSJason Gunthorpe 		return ret;
148*1dd4187fSJason Gunthorpe 	}
149*1dd4187fSJason Gunthorpe 
150*1dd4187fSJason Gunthorpe 	priv->iommu = &priv->fmt_table.iommu;
151*1dd4187fSJason Gunthorpe 	priv->common = common_from_iommu(&priv->fmt_table.iommu);
152*1dd4187fSJason Gunthorpe 	priv->iommu->ops->get_info(priv->iommu, &priv->info);
153*1dd4187fSJason Gunthorpe 
154*1dd4187fSJason Gunthorpe 	/*
155*1dd4187fSJason Gunthorpe 	 * size_t is used to pass the mapping length, it can be 32 bit, truncate
156*1dd4187fSJason Gunthorpe 	 * the pagesizes so we don't use large sizes.
157*1dd4187fSJason Gunthorpe 	 */
158*1dd4187fSJason Gunthorpe 	priv->info.pgsize_bitmap = (size_t)priv->info.pgsize_bitmap;
159*1dd4187fSJason Gunthorpe 
160*1dd4187fSJason Gunthorpe 	priv->smallest_pgsz_lg2 = vaffs(priv->info.pgsize_bitmap);
161*1dd4187fSJason Gunthorpe 	priv->smallest_pgsz = log2_to_int(priv->smallest_pgsz_lg2);
162*1dd4187fSJason Gunthorpe 	priv->largest_pgsz_lg2 =
163*1dd4187fSJason Gunthorpe 		vafls((dma_addr_t)priv->info.pgsize_bitmap) - 1;
164*1dd4187fSJason Gunthorpe 
165*1dd4187fSJason Gunthorpe 	priv->test_oa =
166*1dd4187fSJason Gunthorpe 		oalog2_mod(0x74a71445deadbeef, priv->common->max_oasz_lg2);
167*1dd4187fSJason Gunthorpe 
168*1dd4187fSJason Gunthorpe 	/*
169*1dd4187fSJason Gunthorpe 	 * We run out of VA space if the mappings get too big, make something
170*1dd4187fSJason Gunthorpe 	 * smaller that can safely pass through dma_addr_t API.
171*1dd4187fSJason Gunthorpe 	 */
172*1dd4187fSJason Gunthorpe 	va_lg2sz = priv->common->max_vasz_lg2;
173*1dd4187fSJason Gunthorpe 	if (IS_32BIT && va_lg2sz > 32)
174*1dd4187fSJason Gunthorpe 		va_lg2sz = 32;
175*1dd4187fSJason Gunthorpe 	priv->safe_pgsize_bitmap =
176*1dd4187fSJason Gunthorpe 		log2_mod(priv->info.pgsize_bitmap, va_lg2sz - 1);
177*1dd4187fSJason Gunthorpe 
178*1dd4187fSJason Gunthorpe 	return 0;
179*1dd4187fSJason Gunthorpe }
180*1dd4187fSJason Gunthorpe 
181*1dd4187fSJason Gunthorpe #endif
182