xref: /linux/drivers/iommu/intel/nested.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nested.c - nested mode translation support
4  *
5  * Copyright (C) 2023 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  *         Jacob Pan <jacob.jun.pan@linux.intel.com>
9  *         Yi Liu <yi.l.liu@intel.com>
10  */
11 
12 #define pr_fmt(fmt)	"DMAR: " fmt
13 
14 #include <linux/iommu.h>
15 #include <linux/pci.h>
16 #include <linux/pci-ats.h>
17 
18 #include "iommu.h"
19 #include "pasid.h"
20 
21 static int intel_nested_attach_dev(struct iommu_domain *domain,
22 				   struct device *dev)
23 {
24 	struct device_domain_info *info = dev_iommu_priv_get(dev);
25 	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
26 	struct intel_iommu *iommu = info->iommu;
27 	unsigned long flags;
28 	int ret = 0;
29 
30 	if (info->domain)
31 		device_block_translation(dev);
32 
33 	if (iommu->agaw < dmar_domain->s2_domain->agaw) {
34 		dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
35 		return -ENODEV;
36 	}
37 
38 	/*
39 	 * Stage-1 domain cannot work alone, it is nested on a s2_domain.
40 	 * The s2_domain will be used in nested translation, hence needs
41 	 * to ensure the s2_domain is compatible with this IOMMU.
42 	 */
43 	ret = prepare_domain_attach_device(&dmar_domain->s2_domain->domain, dev);
44 	if (ret) {
45 		dev_err_ratelimited(dev, "s2 domain is not compatible\n");
46 		return ret;
47 	}
48 
49 	ret = domain_attach_iommu(dmar_domain, iommu);
50 	if (ret) {
51 		dev_err_ratelimited(dev, "Failed to attach domain to iommu\n");
52 		return ret;
53 	}
54 
55 	ret = cache_tag_assign_domain(dmar_domain, dev, IOMMU_NO_PASID);
56 	if (ret)
57 		goto detach_iommu;
58 
59 	ret = intel_pasid_setup_nested(iommu, dev,
60 				       IOMMU_NO_PASID, dmar_domain);
61 	if (ret)
62 		goto unassign_tag;
63 
64 	info->domain = dmar_domain;
65 	spin_lock_irqsave(&dmar_domain->lock, flags);
66 	list_add(&info->link, &dmar_domain->devices);
67 	spin_unlock_irqrestore(&dmar_domain->lock, flags);
68 
69 	return 0;
70 unassign_tag:
71 	cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
72 detach_iommu:
73 	domain_detach_iommu(dmar_domain, iommu);
74 
75 	return ret;
76 }
77 
78 static void intel_nested_domain_free(struct iommu_domain *domain)
79 {
80 	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
81 	struct dmar_domain *s2_domain = dmar_domain->s2_domain;
82 
83 	spin_lock(&s2_domain->s1_lock);
84 	list_del(&dmar_domain->s2_link);
85 	spin_unlock(&s2_domain->s1_lock);
86 	kfree(dmar_domain->qi_batch);
87 	kfree(dmar_domain);
88 }
89 
90 static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
91 					      struct iommu_user_data_array *array)
92 {
93 	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
94 	struct iommu_hwpt_vtd_s1_invalidate inv_entry;
95 	u32 index, processed = 0;
96 	int ret = 0;
97 
98 	if (array->type != IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) {
99 		ret = -EINVAL;
100 		goto out;
101 	}
102 
103 	for (index = 0; index < array->entry_num; index++) {
104 		ret = iommu_copy_struct_from_user_array(&inv_entry, array,
105 							IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
106 							index, __reserved);
107 		if (ret)
108 			break;
109 
110 		if ((inv_entry.flags & ~IOMMU_VTD_INV_FLAGS_LEAF) ||
111 		    inv_entry.__reserved) {
112 			ret = -EOPNOTSUPP;
113 			break;
114 		}
115 
116 		if (!IS_ALIGNED(inv_entry.addr, VTD_PAGE_SIZE) ||
117 		    ((inv_entry.npages == U64_MAX) && inv_entry.addr)) {
118 			ret = -EINVAL;
119 			break;
120 		}
121 
122 		cache_tag_flush_range(dmar_domain, inv_entry.addr,
123 				      inv_entry.addr + nrpages_to_size(inv_entry.npages) - 1,
124 				      inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
125 		processed++;
126 	}
127 
128 out:
129 	array->entry_num = processed;
130 	return ret;
131 }
132 
133 static const struct iommu_domain_ops intel_nested_domain_ops = {
134 	.attach_dev		= intel_nested_attach_dev,
135 	.free			= intel_nested_domain_free,
136 	.cache_invalidate_user	= intel_nested_cache_invalidate_user,
137 };
138 
139 struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
140 					       const struct iommu_user_data *user_data)
141 {
142 	struct dmar_domain *s2_domain = to_dmar_domain(parent);
143 	struct iommu_hwpt_vtd_s1 vtd;
144 	struct dmar_domain *domain;
145 	int ret;
146 
147 	/* Must be nested domain */
148 	if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
149 		return ERR_PTR(-EOPNOTSUPP);
150 	if (parent->ops != intel_iommu_ops.default_domain_ops ||
151 	    !s2_domain->nested_parent)
152 		return ERR_PTR(-EINVAL);
153 
154 	ret = iommu_copy_struct_from_user(&vtd, user_data,
155 					  IOMMU_HWPT_DATA_VTD_S1, __reserved);
156 	if (ret)
157 		return ERR_PTR(ret);
158 
159 	domain = kzalloc(sizeof(*domain), GFP_KERNEL_ACCOUNT);
160 	if (!domain)
161 		return ERR_PTR(-ENOMEM);
162 
163 	domain->use_first_level = true;
164 	domain->s2_domain = s2_domain;
165 	domain->s1_pgtbl = vtd.pgtbl_addr;
166 	domain->s1_cfg = vtd;
167 	domain->domain.ops = &intel_nested_domain_ops;
168 	domain->domain.type = IOMMU_DOMAIN_NESTED;
169 	INIT_LIST_HEAD(&domain->devices);
170 	INIT_LIST_HEAD(&domain->dev_pasids);
171 	INIT_LIST_HEAD(&domain->cache_tags);
172 	spin_lock_init(&domain->lock);
173 	spin_lock_init(&domain->cache_lock);
174 	xa_init(&domain->iommu_array);
175 
176 	spin_lock(&s2_domain->s1_lock);
177 	list_add(&domain->s2_link, &s2_domain->s1_domains);
178 	spin_unlock(&s2_domain->s1_lock);
179 
180 	return &domain->domain;
181 }
182