1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Virtual I/O topology
4 *
5 * The Virtual I/O Translation Table (VIOT) describes the topology of
6 * para-virtual IOMMUs and the endpoints they manage. The OS uses it to
7 * initialize devices in the right order, preventing endpoints from issuing DMA
8 * before their IOMMU is ready.
9 *
10 * When binding a driver to a device, before calling the device driver's probe()
11 * method, the driver infrastructure calls dma_configure(). At that point the
12 * VIOT driver looks for an IOMMU associated to the device in the VIOT table.
13 * If an IOMMU exists and has been initialized, the VIOT driver initializes the
14 * device's IOMMU fwspec, allowing the DMA infrastructure to invoke the IOMMU
15 * ops when the device driver configures DMA mappings. If an IOMMU exists and
16 * hasn't yet been initialized, VIOT returns -EPROBE_DEFER to postpone probing
17 * the device until the IOMMU is available.
18 */
19 #define pr_fmt(fmt) "ACPI: VIOT: " fmt
20
21 #include <linux/acpi_viot.h>
22 #include <linux/fwnode.h>
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/pci.h>
26 #include <linux/platform_device.h>
27
28 struct viot_iommu {
29 /* Node offset within the table */
30 unsigned int offset;
31 struct fwnode_handle *fwnode;
32 struct list_head list;
33 };
34
35 struct viot_endpoint {
36 union {
37 /* PCI range */
38 struct {
39 u16 segment_start;
40 u16 segment_end;
41 u16 bdf_start;
42 u16 bdf_end;
43 };
44 /* MMIO */
45 u64 address;
46 };
47 u32 endpoint_id;
48 struct viot_iommu *viommu;
49 struct list_head list;
50 };
51
52 static struct acpi_table_viot *viot;
53 static LIST_HEAD(viot_iommus);
54 static LIST_HEAD(viot_pci_ranges);
55 static LIST_HEAD(viot_mmio_endpoints);
56
viot_check_bounds(const struct acpi_viot_header * hdr)57 static int __init viot_check_bounds(const struct acpi_viot_header *hdr)
58 {
59 struct acpi_viot_header *start, *end, *hdr_end;
60
61 start = ACPI_ADD_PTR(struct acpi_viot_header, viot,
62 max_t(size_t, sizeof(*viot), viot->node_offset));
63 end = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->header.length);
64 hdr_end = ACPI_ADD_PTR(struct acpi_viot_header, hdr, sizeof(*hdr));
65
66 if (hdr < start || hdr_end > end) {
67 pr_err(FW_BUG "Node pointer overflows\n");
68 return -EOVERFLOW;
69 }
70 if (hdr->length < sizeof(*hdr)) {
71 pr_err(FW_BUG "Empty node\n");
72 return -EINVAL;
73 }
74 return 0;
75 }
76
viot_get_pci_iommu_fwnode(struct viot_iommu * viommu,u16 segment,u16 bdf)77 static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
78 u16 segment, u16 bdf)
79 {
80 struct pci_dev *pdev;
81 struct fwnode_handle *fwnode;
82
83 pdev = pci_get_domain_bus_and_slot(segment, PCI_BUS_NUM(bdf),
84 bdf & 0xff);
85 if (!pdev) {
86 pr_err("Could not find PCI IOMMU\n");
87 return -ENODEV;
88 }
89
90 fwnode = dev_fwnode(&pdev->dev);
91 if (!fwnode) {
92 /*
93 * PCI devices aren't necessarily described by ACPI. Create a
94 * fwnode so the IOMMU subsystem can identify this device.
95 */
96 fwnode = acpi_alloc_fwnode_static();
97 if (!fwnode) {
98 pci_dev_put(pdev);
99 return -ENOMEM;
100 }
101 set_primary_fwnode(&pdev->dev, fwnode);
102 }
103 viommu->fwnode = dev_fwnode(&pdev->dev);
104 pci_dev_put(pdev);
105 return 0;
106 }
107
viot_get_mmio_iommu_fwnode(struct viot_iommu * viommu,u64 address)108 static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu,
109 u64 address)
110 {
111 struct acpi_device *adev;
112 struct resource res = {
113 .start = address,
114 .end = address,
115 .flags = IORESOURCE_MEM,
116 };
117
118 adev = acpi_resource_consumer(&res);
119 if (!adev) {
120 pr_err("Could not find MMIO IOMMU\n");
121 return -EINVAL;
122 }
123 viommu->fwnode = &adev->fwnode;
124 return 0;
125 }
126
viot_get_iommu(unsigned int offset)127 static struct viot_iommu * __init viot_get_iommu(unsigned int offset)
128 {
129 int ret;
130 struct viot_iommu *viommu;
131 struct acpi_viot_header *hdr = ACPI_ADD_PTR(struct acpi_viot_header,
132 viot, offset);
133 union {
134 struct acpi_viot_virtio_iommu_pci pci;
135 struct acpi_viot_virtio_iommu_mmio mmio;
136 } *node = (void *)hdr;
137
138 list_for_each_entry(viommu, &viot_iommus, list)
139 if (viommu->offset == offset)
140 return viommu;
141
142 if (viot_check_bounds(hdr))
143 return NULL;
144
145 viommu = kzalloc(sizeof(*viommu), GFP_KERNEL);
146 if (!viommu)
147 return NULL;
148
149 viommu->offset = offset;
150 switch (hdr->type) {
151 case ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI:
152 if (hdr->length < sizeof(node->pci))
153 goto err_free;
154
155 ret = viot_get_pci_iommu_fwnode(viommu, node->pci.segment,
156 node->pci.bdf);
157 break;
158 case ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO:
159 if (hdr->length < sizeof(node->mmio))
160 goto err_free;
161
162 ret = viot_get_mmio_iommu_fwnode(viommu,
163 node->mmio.base_address);
164 break;
165 default:
166 ret = -EINVAL;
167 }
168 if (ret)
169 goto err_free;
170
171 list_add(&viommu->list, &viot_iommus);
172 return viommu;
173
174 err_free:
175 kfree(viommu);
176 return NULL;
177 }
178
viot_parse_node(const struct acpi_viot_header * hdr)179 static int __init viot_parse_node(const struct acpi_viot_header *hdr)
180 {
181 int ret = -EINVAL;
182 struct list_head *list;
183 struct viot_endpoint *ep;
184 union {
185 struct acpi_viot_mmio mmio;
186 struct acpi_viot_pci_range pci;
187 } *node = (void *)hdr;
188
189 if (viot_check_bounds(hdr))
190 return -EINVAL;
191
192 if (hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI ||
193 hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO)
194 return 0;
195
196 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
197 if (!ep)
198 return -ENOMEM;
199
200 switch (hdr->type) {
201 case ACPI_VIOT_NODE_PCI_RANGE:
202 if (hdr->length < sizeof(node->pci)) {
203 pr_err(FW_BUG "Invalid PCI node size\n");
204 goto err_free;
205 }
206
207 ep->segment_start = node->pci.segment_start;
208 ep->segment_end = node->pci.segment_end;
209 ep->bdf_start = node->pci.bdf_start;
210 ep->bdf_end = node->pci.bdf_end;
211 ep->endpoint_id = node->pci.endpoint_start;
212 ep->viommu = viot_get_iommu(node->pci.output_node);
213 list = &viot_pci_ranges;
214 break;
215 case ACPI_VIOT_NODE_MMIO:
216 if (hdr->length < sizeof(node->mmio)) {
217 pr_err(FW_BUG "Invalid MMIO node size\n");
218 goto err_free;
219 }
220
221 ep->address = node->mmio.base_address;
222 ep->endpoint_id = node->mmio.endpoint;
223 ep->viommu = viot_get_iommu(node->mmio.output_node);
224 list = &viot_mmio_endpoints;
225 break;
226 default:
227 pr_warn("Unsupported node %x\n", hdr->type);
228 ret = 0;
229 goto err_free;
230 }
231
232 if (!ep->viommu) {
233 pr_warn("No IOMMU node found\n");
234 /*
235 * A future version of the table may use the node for other
236 * purposes. Keep parsing.
237 */
238 ret = 0;
239 goto err_free;
240 }
241
242 list_add(&ep->list, list);
243 return 0;
244
245 err_free:
246 kfree(ep);
247 return ret;
248 }
249
250 /**
251 * acpi_viot_early_init - Test the presence of VIOT and enable ACS
252 *
253 * If the VIOT does exist, ACS must be enabled. This cannot be
254 * done in acpi_viot_init() which is called after the bus scan
255 */
acpi_viot_early_init(void)256 void __init acpi_viot_early_init(void)
257 {
258 #ifdef CONFIG_PCI
259 acpi_status status;
260 struct acpi_table_header *hdr;
261
262 status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
263 if (ACPI_FAILURE(status))
264 return;
265 pci_request_acs();
266 acpi_put_table(hdr);
267 #endif
268 }
269
270 /**
271 * acpi_viot_init - Parse the VIOT table
272 *
273 * Parse the VIOT table, prepare the list of endpoints to be used during DMA
274 * setup of devices.
275 */
acpi_viot_init(void)276 void __init acpi_viot_init(void)
277 {
278 int i;
279 acpi_status status;
280 struct acpi_table_header *hdr;
281 struct acpi_viot_header *node;
282
283 status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
284 if (ACPI_FAILURE(status)) {
285 if (status != AE_NOT_FOUND) {
286 const char *msg = acpi_format_exception(status);
287
288 pr_err("Failed to get table, %s\n", msg);
289 }
290 return;
291 }
292
293 viot = (void *)hdr;
294
295 node = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->node_offset);
296 for (i = 0; i < viot->node_count; i++) {
297 if (viot_parse_node(node))
298 return;
299
300 node = ACPI_ADD_PTR(struct acpi_viot_header, node,
301 node->length);
302 }
303
304 acpi_put_table(hdr);
305 }
306
viot_dev_iommu_init(struct device * dev,struct viot_iommu * viommu,u32 epid)307 static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
308 u32 epid)
309 {
310 if (!viommu || !IS_ENABLED(CONFIG_VIRTIO_IOMMU))
311 return -ENODEV;
312
313 /* We're not translating ourself */
314 if (device_match_fwnode(dev, viommu->fwnode))
315 return -EINVAL;
316
317 return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode);
318 }
319
viot_pci_dev_iommu_init(struct pci_dev * pdev,u16 dev_id,void * data)320 static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
321 {
322 u32 epid;
323 struct viot_endpoint *ep;
324 struct device *aliased_dev = data;
325 u32 domain_nr = pci_domain_nr(pdev->bus);
326
327 list_for_each_entry(ep, &viot_pci_ranges, list) {
328 if (domain_nr >= ep->segment_start &&
329 domain_nr <= ep->segment_end &&
330 dev_id >= ep->bdf_start &&
331 dev_id <= ep->bdf_end) {
332 epid = ((domain_nr - ep->segment_start) << 16) +
333 dev_id - ep->bdf_start + ep->endpoint_id;
334
335 return viot_dev_iommu_init(aliased_dev, ep->viommu,
336 epid);
337 }
338 }
339 return -ENODEV;
340 }
341
viot_mmio_dev_iommu_init(struct platform_device * pdev)342 static int viot_mmio_dev_iommu_init(struct platform_device *pdev)
343 {
344 struct resource *mem;
345 struct viot_endpoint *ep;
346
347 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
348 if (!mem)
349 return -ENODEV;
350
351 list_for_each_entry(ep, &viot_mmio_endpoints, list) {
352 if (ep->address == mem->start)
353 return viot_dev_iommu_init(&pdev->dev, ep->viommu,
354 ep->endpoint_id);
355 }
356 return -ENODEV;
357 }
358
359 /**
360 * viot_iommu_configure - Setup IOMMU ops for an endpoint described by VIOT
361 * @dev: the endpoint
362 *
363 * Return: 0 on success, <0 on failure
364 */
viot_iommu_configure(struct device * dev)365 int viot_iommu_configure(struct device *dev)
366 {
367 if (dev_is_pci(dev))
368 return pci_for_each_dma_alias(to_pci_dev(dev),
369 viot_pci_dev_iommu_init, dev);
370 else if (dev_is_platform(dev))
371 return viot_mmio_dev_iommu_init(to_platform_device(dev));
372 return -ENODEV;
373 }
374