11dd4187fSJason Gunthorpe /* SPDX-License-Identifier: GPL-2.0-only */
21dd4187fSJason Gunthorpe /*
31dd4187fSJason Gunthorpe * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
41dd4187fSJason Gunthorpe */
51dd4187fSJason Gunthorpe #ifndef __GENERIC_PT_KUNIT_IOMMU_H
61dd4187fSJason Gunthorpe #define __GENERIC_PT_KUNIT_IOMMU_H
71dd4187fSJason Gunthorpe
81dd4187fSJason Gunthorpe #define GENERIC_PT_KUNIT 1
91dd4187fSJason Gunthorpe #include <kunit/device.h>
101dd4187fSJason Gunthorpe #include <kunit/test.h>
11*9ad64801SJoerg Roedel #include "../iommu-pages.h"
121dd4187fSJason Gunthorpe #include "pt_iter.h"
131dd4187fSJason Gunthorpe
141dd4187fSJason Gunthorpe #define pt_iommu_table_cfg CONCATENATE(pt_iommu_table, _cfg)
151dd4187fSJason Gunthorpe #define pt_iommu_init CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), init)
161dd4187fSJason Gunthorpe int pt_iommu_init(struct pt_iommu_table *fmt_table,
171dd4187fSJason Gunthorpe const struct pt_iommu_table_cfg *cfg, gfp_t gfp);
181dd4187fSJason Gunthorpe
191dd4187fSJason Gunthorpe /* The format can provide a list of configurations it would like to test */
201dd4187fSJason Gunthorpe #ifdef kunit_fmt_cfgs
kunit_pt_gen_params_cfg(struct kunit * test,const void * prev,char * desc)211dd4187fSJason Gunthorpe static const void *kunit_pt_gen_params_cfg(struct kunit *test, const void *prev,
221dd4187fSJason Gunthorpe char *desc)
231dd4187fSJason Gunthorpe {
241dd4187fSJason Gunthorpe uintptr_t cfg_id = (uintptr_t)prev;
251dd4187fSJason Gunthorpe
261dd4187fSJason Gunthorpe cfg_id++;
271dd4187fSJason Gunthorpe if (cfg_id >= ARRAY_SIZE(kunit_fmt_cfgs) + 1)
281dd4187fSJason Gunthorpe return NULL;
291dd4187fSJason Gunthorpe snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s_cfg_%u",
301dd4187fSJason Gunthorpe __stringify(PTPFX_RAW), (unsigned int)(cfg_id - 1));
311dd4187fSJason Gunthorpe return (void *)cfg_id;
321dd4187fSJason Gunthorpe }
331dd4187fSJason Gunthorpe #define KUNIT_CASE_FMT(test_name) \
341dd4187fSJason Gunthorpe KUNIT_CASE_PARAM(test_name, kunit_pt_gen_params_cfg)
351dd4187fSJason Gunthorpe #else
361dd4187fSJason Gunthorpe #define KUNIT_CASE_FMT(test_name) KUNIT_CASE(test_name)
371dd4187fSJason Gunthorpe #endif
381dd4187fSJason Gunthorpe
391dd4187fSJason Gunthorpe #define KUNIT_ASSERT_NO_ERRNO(test, ret) \
401dd4187fSJason Gunthorpe KUNIT_ASSERT_EQ_MSG(test, ret, 0, KUNIT_SUBSUBTEST_INDENT "errno %pe", \
411dd4187fSJason Gunthorpe ERR_PTR(ret))
421dd4187fSJason Gunthorpe
431dd4187fSJason Gunthorpe #define KUNIT_ASSERT_NO_ERRNO_FN(test, fn, ret) \
441dd4187fSJason Gunthorpe KUNIT_ASSERT_EQ_MSG(test, ret, 0, \
451dd4187fSJason Gunthorpe KUNIT_SUBSUBTEST_INDENT "errno %pe from %s", \
461dd4187fSJason Gunthorpe ERR_PTR(ret), fn)
471dd4187fSJason Gunthorpe
481dd4187fSJason Gunthorpe /*
491dd4187fSJason Gunthorpe * When the test is run on a 32 bit system unsigned long can be 32 bits. This
501dd4187fSJason Gunthorpe * cause the iommu op signatures to be restricted to 32 bits. Meaning the test
511dd4187fSJason Gunthorpe * has to be mindful not to create any VA's over the 32 bit limit. Reduce the
521dd4187fSJason Gunthorpe * scope of the testing as the main purpose of checking on full 32 bit is to
531dd4187fSJason Gunthorpe * look for 32bitism in the core code. Run the test on i386 with X86_PAE=y to
541dd4187fSJason Gunthorpe * get the full coverage when dma_addr_t & phys_addr_t are 8 bytes
551dd4187fSJason Gunthorpe */
561dd4187fSJason Gunthorpe #define IS_32BIT (sizeof(unsigned long) == 4)
571dd4187fSJason Gunthorpe
581dd4187fSJason Gunthorpe struct kunit_iommu_priv {
591dd4187fSJason Gunthorpe union {
601dd4187fSJason Gunthorpe struct iommu_domain domain;
611dd4187fSJason Gunthorpe struct pt_iommu_table fmt_table;
621dd4187fSJason Gunthorpe };
631dd4187fSJason Gunthorpe spinlock_t top_lock;
641dd4187fSJason Gunthorpe struct device *dummy_dev;
651dd4187fSJason Gunthorpe struct pt_iommu *iommu;
661dd4187fSJason Gunthorpe struct pt_common *common;
671dd4187fSJason Gunthorpe struct pt_iommu_table_cfg cfg;
681dd4187fSJason Gunthorpe struct pt_iommu_info info;
691dd4187fSJason Gunthorpe unsigned int smallest_pgsz_lg2;
701dd4187fSJason Gunthorpe pt_vaddr_t smallest_pgsz;
711dd4187fSJason Gunthorpe unsigned int largest_pgsz_lg2;
721dd4187fSJason Gunthorpe pt_oaddr_t test_oa;
731dd4187fSJason Gunthorpe pt_vaddr_t safe_pgsize_bitmap;
74bc5233c0SJason Gunthorpe unsigned long orig_nr_secondary_pagetable;
75bc5233c0SJason Gunthorpe
761dd4187fSJason Gunthorpe };
771dd4187fSJason Gunthorpe PT_IOMMU_CHECK_DOMAIN(struct kunit_iommu_priv, fmt_table.iommu, domain);
781dd4187fSJason Gunthorpe
pt_kunit_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)791dd4187fSJason Gunthorpe static void pt_kunit_iotlb_sync(struct iommu_domain *domain,
801dd4187fSJason Gunthorpe struct iommu_iotlb_gather *gather)
811dd4187fSJason Gunthorpe {
821dd4187fSJason Gunthorpe iommu_put_pages_list(&gather->freelist);
831dd4187fSJason Gunthorpe }
841dd4187fSJason Gunthorpe
851dd4187fSJason Gunthorpe #define IOMMU_PT_DOMAIN_OPS1(x) IOMMU_PT_DOMAIN_OPS(x)
861dd4187fSJason Gunthorpe static const struct iommu_domain_ops kunit_pt_ops = {
871dd4187fSJason Gunthorpe IOMMU_PT_DOMAIN_OPS1(PTPFX_RAW),
881dd4187fSJason Gunthorpe .iotlb_sync = &pt_kunit_iotlb_sync,
891dd4187fSJason Gunthorpe };
901dd4187fSJason Gunthorpe
pt_kunit_change_top(struct pt_iommu * iommu_table,phys_addr_t top_paddr,unsigned int top_level)911dd4187fSJason Gunthorpe static void pt_kunit_change_top(struct pt_iommu *iommu_table,
921dd4187fSJason Gunthorpe phys_addr_t top_paddr, unsigned int top_level)
931dd4187fSJason Gunthorpe {
941dd4187fSJason Gunthorpe }
951dd4187fSJason Gunthorpe
pt_kunit_get_top_lock(struct pt_iommu * iommu_table)961dd4187fSJason Gunthorpe static spinlock_t *pt_kunit_get_top_lock(struct pt_iommu *iommu_table)
971dd4187fSJason Gunthorpe {
981dd4187fSJason Gunthorpe struct kunit_iommu_priv *priv = container_of(
991dd4187fSJason Gunthorpe iommu_table, struct kunit_iommu_priv, fmt_table.iommu);
1001dd4187fSJason Gunthorpe
1011dd4187fSJason Gunthorpe return &priv->top_lock;
1021dd4187fSJason Gunthorpe }
1031dd4187fSJason Gunthorpe
1041dd4187fSJason Gunthorpe static const struct pt_iommu_driver_ops pt_kunit_driver_ops = {
1051dd4187fSJason Gunthorpe .change_top = &pt_kunit_change_top,
1061dd4187fSJason Gunthorpe .get_top_lock = &pt_kunit_get_top_lock,
1071dd4187fSJason Gunthorpe };
1081dd4187fSJason Gunthorpe
pt_kunit_priv_init(struct kunit * test,struct kunit_iommu_priv * priv)1091dd4187fSJason Gunthorpe static int pt_kunit_priv_init(struct kunit *test, struct kunit_iommu_priv *priv)
1101dd4187fSJason Gunthorpe {
1111dd4187fSJason Gunthorpe unsigned int va_lg2sz;
1121dd4187fSJason Gunthorpe int ret;
1131dd4187fSJason Gunthorpe
1141dd4187fSJason Gunthorpe /* Enough so the memory allocator works */
1151dd4187fSJason Gunthorpe priv->dummy_dev = kunit_device_register(test, "pt_kunit_dev");
1161dd4187fSJason Gunthorpe if (IS_ERR(priv->dummy_dev))
1171dd4187fSJason Gunthorpe return PTR_ERR(priv->dummy_dev);
1181dd4187fSJason Gunthorpe set_dev_node(priv->dummy_dev, NUMA_NO_NODE);
1191dd4187fSJason Gunthorpe
1201dd4187fSJason Gunthorpe spin_lock_init(&priv->top_lock);
1211dd4187fSJason Gunthorpe
1221dd4187fSJason Gunthorpe #ifdef kunit_fmt_cfgs
1231dd4187fSJason Gunthorpe priv->cfg = kunit_fmt_cfgs[((uintptr_t)test->param_value) - 1];
1241dd4187fSJason Gunthorpe /*
1251dd4187fSJason Gunthorpe * The format can set a list of features that the kunit_fmt_cfgs
1261dd4187fSJason Gunthorpe * controls, other features are default to on.
1271dd4187fSJason Gunthorpe */
1281dd4187fSJason Gunthorpe priv->cfg.common.features |= PT_SUPPORTED_FEATURES &
1291dd4187fSJason Gunthorpe (~KUNIT_FMT_FEATURES);
1301dd4187fSJason Gunthorpe #else
1311dd4187fSJason Gunthorpe priv->cfg.common.features = PT_SUPPORTED_FEATURES;
1321dd4187fSJason Gunthorpe #endif
1331dd4187fSJason Gunthorpe
1341dd4187fSJason Gunthorpe /* Defaults, for the kunit */
1351dd4187fSJason Gunthorpe if (!priv->cfg.common.hw_max_vasz_lg2)
1361dd4187fSJason Gunthorpe priv->cfg.common.hw_max_vasz_lg2 = PT_MAX_VA_ADDRESS_LG2;
1371dd4187fSJason Gunthorpe if (!priv->cfg.common.hw_max_oasz_lg2)
1381dd4187fSJason Gunthorpe priv->cfg.common.hw_max_oasz_lg2 = pt_max_oa_lg2(NULL);
1391dd4187fSJason Gunthorpe
1401dd4187fSJason Gunthorpe priv->fmt_table.iommu.nid = NUMA_NO_NODE;
1411dd4187fSJason Gunthorpe priv->fmt_table.iommu.driver_ops = &pt_kunit_driver_ops;
142aefd967dSJason Gunthorpe priv->fmt_table.iommu.iommu_device = priv->dummy_dev;
1431dd4187fSJason Gunthorpe priv->domain.ops = &kunit_pt_ops;
1441dd4187fSJason Gunthorpe ret = pt_iommu_init(&priv->fmt_table, &priv->cfg, GFP_KERNEL);
1451dd4187fSJason Gunthorpe if (ret) {
1461dd4187fSJason Gunthorpe if (ret == -EOVERFLOW)
1471dd4187fSJason Gunthorpe kunit_skip(
1481dd4187fSJason Gunthorpe test,
1491dd4187fSJason Gunthorpe "This configuration cannot be tested on 32 bit");
1501dd4187fSJason Gunthorpe return ret;
1511dd4187fSJason Gunthorpe }
1521dd4187fSJason Gunthorpe
1531dd4187fSJason Gunthorpe priv->iommu = &priv->fmt_table.iommu;
1541dd4187fSJason Gunthorpe priv->common = common_from_iommu(&priv->fmt_table.iommu);
1551dd4187fSJason Gunthorpe priv->iommu->ops->get_info(priv->iommu, &priv->info);
1561dd4187fSJason Gunthorpe
1571dd4187fSJason Gunthorpe /*
1581dd4187fSJason Gunthorpe * size_t is used to pass the mapping length, it can be 32 bit, truncate
1591dd4187fSJason Gunthorpe * the pagesizes so we don't use large sizes.
1601dd4187fSJason Gunthorpe */
1611dd4187fSJason Gunthorpe priv->info.pgsize_bitmap = (size_t)priv->info.pgsize_bitmap;
1621dd4187fSJason Gunthorpe
1631dd4187fSJason Gunthorpe priv->smallest_pgsz_lg2 = vaffs(priv->info.pgsize_bitmap);
1641dd4187fSJason Gunthorpe priv->smallest_pgsz = log2_to_int(priv->smallest_pgsz_lg2);
1651dd4187fSJason Gunthorpe priv->largest_pgsz_lg2 =
1661dd4187fSJason Gunthorpe vafls((dma_addr_t)priv->info.pgsize_bitmap) - 1;
1671dd4187fSJason Gunthorpe
1681dd4187fSJason Gunthorpe priv->test_oa =
1691dd4187fSJason Gunthorpe oalog2_mod(0x74a71445deadbeef, priv->common->max_oasz_lg2);
1701dd4187fSJason Gunthorpe
1711dd4187fSJason Gunthorpe /*
1721dd4187fSJason Gunthorpe * We run out of VA space if the mappings get too big, make something
1731dd4187fSJason Gunthorpe * smaller that can safely pass through dma_addr_t API.
1741dd4187fSJason Gunthorpe */
1751dd4187fSJason Gunthorpe va_lg2sz = priv->common->max_vasz_lg2;
1761dd4187fSJason Gunthorpe if (IS_32BIT && va_lg2sz > 32)
1771dd4187fSJason Gunthorpe va_lg2sz = 32;
1781dd4187fSJason Gunthorpe priv->safe_pgsize_bitmap =
1791dd4187fSJason Gunthorpe log2_mod(priv->info.pgsize_bitmap, va_lg2sz - 1);
1801dd4187fSJason Gunthorpe
1811dd4187fSJason Gunthorpe return 0;
1821dd4187fSJason Gunthorpe }
1831dd4187fSJason Gunthorpe
1841dd4187fSJason Gunthorpe #endif
185