xref: /linux/drivers/acpi/viot.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Virtual I/O topology
4  *
5  * The Virtual I/O Translation Table (VIOT) describes the topology of
6  * para-virtual IOMMUs and the endpoints they manage. The OS uses it to
7  * initialize devices in the right order, preventing endpoints from issuing DMA
8  * before their IOMMU is ready.
9  *
10  * When binding a driver to a device, before calling the device driver's probe()
11  * method, the driver infrastructure calls dma_configure(). At that point the
12  * VIOT driver looks for an IOMMU associated to the device in the VIOT table.
13  * If an IOMMU exists and has been initialized, the VIOT driver initializes the
14  * device's IOMMU fwspec, allowing the DMA infrastructure to invoke the IOMMU
15  * ops when the device driver configures DMA mappings. If an IOMMU exists and
16  * hasn't yet been initialized, VIOT returns -EPROBE_DEFER to postpone probing
17  * the device until the IOMMU is available.
18  */
19 #define pr_fmt(fmt) "ACPI: VIOT: " fmt
20 
21 #include <linux/acpi_viot.h>
22 #include <linux/dma-iommu.h>
23 #include <linux/fwnode.h>
24 #include <linux/iommu.h>
25 #include <linux/list.h>
26 #include <linux/pci.h>
27 #include <linux/platform_device.h>
28 
29 struct viot_iommu {
30 	/* Node offset within the table */
31 	unsigned int			offset;
32 	struct fwnode_handle		*fwnode;
33 	struct list_head		list;
34 };
35 
36 struct viot_endpoint {
37 	union {
38 		/* PCI range */
39 		struct {
40 			u16		segment_start;
41 			u16		segment_end;
42 			u16		bdf_start;
43 			u16		bdf_end;
44 		};
45 		/* MMIO */
46 		u64			address;
47 	};
48 	u32				endpoint_id;
49 	struct viot_iommu		*viommu;
50 	struct list_head		list;
51 };
52 
53 static struct acpi_table_viot *viot;
54 static LIST_HEAD(viot_iommus);
55 static LIST_HEAD(viot_pci_ranges);
56 static LIST_HEAD(viot_mmio_endpoints);
57 
58 static int __init viot_check_bounds(const struct acpi_viot_header *hdr)
59 {
60 	struct acpi_viot_header *start, *end, *hdr_end;
61 
62 	start = ACPI_ADD_PTR(struct acpi_viot_header, viot,
63 			     max_t(size_t, sizeof(*viot), viot->node_offset));
64 	end = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->header.length);
65 	hdr_end = ACPI_ADD_PTR(struct acpi_viot_header, hdr, sizeof(*hdr));
66 
67 	if (hdr < start || hdr_end > end) {
68 		pr_err(FW_BUG "Node pointer overflows\n");
69 		return -EOVERFLOW;
70 	}
71 	if (hdr->length < sizeof(*hdr)) {
72 		pr_err(FW_BUG "Empty node\n");
73 		return -EINVAL;
74 	}
75 	return 0;
76 }
77 
78 static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
79 					    u16 segment, u16 bdf)
80 {
81 	struct pci_dev *pdev;
82 	struct fwnode_handle *fwnode;
83 
84 	pdev = pci_get_domain_bus_and_slot(segment, PCI_BUS_NUM(bdf),
85 					   bdf & 0xff);
86 	if (!pdev) {
87 		pr_err("Could not find PCI IOMMU\n");
88 		return -ENODEV;
89 	}
90 
91 	fwnode = pdev->dev.fwnode;
92 	if (!fwnode) {
93 		/*
94 		 * PCI devices aren't necessarily described by ACPI. Create a
95 		 * fwnode so the IOMMU subsystem can identify this device.
96 		 */
97 		fwnode = acpi_alloc_fwnode_static();
98 		if (!fwnode) {
99 			pci_dev_put(pdev);
100 			return -ENOMEM;
101 		}
102 		set_primary_fwnode(&pdev->dev, fwnode);
103 	}
104 	viommu->fwnode = pdev->dev.fwnode;
105 	pci_dev_put(pdev);
106 	return 0;
107 }
108 
109 static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu,
110 					     u64 address)
111 {
112 	struct acpi_device *adev;
113 	struct resource res = {
114 		.start	= address,
115 		.end	= address,
116 		.flags	= IORESOURCE_MEM,
117 	};
118 
119 	adev = acpi_resource_consumer(&res);
120 	if (!adev) {
121 		pr_err("Could not find MMIO IOMMU\n");
122 		return -EINVAL;
123 	}
124 	viommu->fwnode = &adev->fwnode;
125 	return 0;
126 }
127 
128 static struct viot_iommu * __init viot_get_iommu(unsigned int offset)
129 {
130 	int ret;
131 	struct viot_iommu *viommu;
132 	struct acpi_viot_header *hdr = ACPI_ADD_PTR(struct acpi_viot_header,
133 						    viot, offset);
134 	union {
135 		struct acpi_viot_virtio_iommu_pci pci;
136 		struct acpi_viot_virtio_iommu_mmio mmio;
137 	} *node = (void *)hdr;
138 
139 	list_for_each_entry(viommu, &viot_iommus, list)
140 		if (viommu->offset == offset)
141 			return viommu;
142 
143 	if (viot_check_bounds(hdr))
144 		return NULL;
145 
146 	viommu = kzalloc(sizeof(*viommu), GFP_KERNEL);
147 	if (!viommu)
148 		return NULL;
149 
150 	viommu->offset = offset;
151 	switch (hdr->type) {
152 	case ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI:
153 		if (hdr->length < sizeof(node->pci))
154 			goto err_free;
155 
156 		ret = viot_get_pci_iommu_fwnode(viommu, node->pci.segment,
157 						node->pci.bdf);
158 		break;
159 	case ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO:
160 		if (hdr->length < sizeof(node->mmio))
161 			goto err_free;
162 
163 		ret = viot_get_mmio_iommu_fwnode(viommu,
164 						 node->mmio.base_address);
165 		break;
166 	default:
167 		ret = -EINVAL;
168 	}
169 	if (ret)
170 		goto err_free;
171 
172 	list_add(&viommu->list, &viot_iommus);
173 	return viommu;
174 
175 err_free:
176 	kfree(viommu);
177 	return NULL;
178 }
179 
180 static int __init viot_parse_node(const struct acpi_viot_header *hdr)
181 {
182 	int ret = -EINVAL;
183 	struct list_head *list;
184 	struct viot_endpoint *ep;
185 	union {
186 		struct acpi_viot_mmio mmio;
187 		struct acpi_viot_pci_range pci;
188 	} *node = (void *)hdr;
189 
190 	if (viot_check_bounds(hdr))
191 		return -EINVAL;
192 
193 	if (hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI ||
194 	    hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO)
195 		return 0;
196 
197 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
198 	if (!ep)
199 		return -ENOMEM;
200 
201 	switch (hdr->type) {
202 	case ACPI_VIOT_NODE_PCI_RANGE:
203 		if (hdr->length < sizeof(node->pci)) {
204 			pr_err(FW_BUG "Invalid PCI node size\n");
205 			goto err_free;
206 		}
207 
208 		ep->segment_start = node->pci.segment_start;
209 		ep->segment_end = node->pci.segment_end;
210 		ep->bdf_start = node->pci.bdf_start;
211 		ep->bdf_end = node->pci.bdf_end;
212 		ep->endpoint_id = node->pci.endpoint_start;
213 		ep->viommu = viot_get_iommu(node->pci.output_node);
214 		list = &viot_pci_ranges;
215 		break;
216 	case ACPI_VIOT_NODE_MMIO:
217 		if (hdr->length < sizeof(node->mmio)) {
218 			pr_err(FW_BUG "Invalid MMIO node size\n");
219 			goto err_free;
220 		}
221 
222 		ep->address = node->mmio.base_address;
223 		ep->endpoint_id = node->mmio.endpoint;
224 		ep->viommu = viot_get_iommu(node->mmio.output_node);
225 		list = &viot_mmio_endpoints;
226 		break;
227 	default:
228 		pr_warn("Unsupported node %x\n", hdr->type);
229 		ret = 0;
230 		goto err_free;
231 	}
232 
233 	if (!ep->viommu) {
234 		pr_warn("No IOMMU node found\n");
235 		/*
236 		 * A future version of the table may use the node for other
237 		 * purposes. Keep parsing.
238 		 */
239 		ret = 0;
240 		goto err_free;
241 	}
242 
243 	list_add(&ep->list, list);
244 	return 0;
245 
246 err_free:
247 	kfree(ep);
248 	return ret;
249 }
250 
251 /**
252  * acpi_viot_init - Parse the VIOT table
253  *
254  * Parse the VIOT table, prepare the list of endpoints to be used during DMA
255  * setup of devices.
256  */
257 void __init acpi_viot_init(void)
258 {
259 	int i;
260 	acpi_status status;
261 	struct acpi_table_header *hdr;
262 	struct acpi_viot_header *node;
263 
264 	status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
265 	if (ACPI_FAILURE(status)) {
266 		if (status != AE_NOT_FOUND) {
267 			const char *msg = acpi_format_exception(status);
268 
269 			pr_err("Failed to get table, %s\n", msg);
270 		}
271 		return;
272 	}
273 
274 	viot = (void *)hdr;
275 
276 	node = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->node_offset);
277 	for (i = 0; i < viot->node_count; i++) {
278 		if (viot_parse_node(node))
279 			return;
280 
281 		node = ACPI_ADD_PTR(struct acpi_viot_header, node,
282 				    node->length);
283 	}
284 
285 	acpi_put_table(hdr);
286 }
287 
288 static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
289 			       u32 epid)
290 {
291 	const struct iommu_ops *ops;
292 
293 	if (!viommu)
294 		return -ENODEV;
295 
296 	/* We're not translating ourself */
297 	if (viommu->fwnode == dev->fwnode)
298 		return -EINVAL;
299 
300 	ops = iommu_ops_from_fwnode(viommu->fwnode);
301 	if (!ops)
302 		return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ?
303 			-EPROBE_DEFER : -ENODEV;
304 
305 	return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops);
306 }
307 
308 static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
309 {
310 	u32 epid;
311 	struct viot_endpoint *ep;
312 	u32 domain_nr = pci_domain_nr(pdev->bus);
313 
314 	list_for_each_entry(ep, &viot_pci_ranges, list) {
315 		if (domain_nr >= ep->segment_start &&
316 		    domain_nr <= ep->segment_end &&
317 		    dev_id >= ep->bdf_start &&
318 		    dev_id <= ep->bdf_end) {
319 			epid = ((domain_nr - ep->segment_start) << 16) +
320 				dev_id - ep->bdf_start + ep->endpoint_id;
321 
322 			/*
323 			 * If we found a PCI range managed by the viommu, we're
324 			 * the one that has to request ACS.
325 			 */
326 			pci_request_acs();
327 
328 			return viot_dev_iommu_init(&pdev->dev, ep->viommu,
329 						   epid);
330 		}
331 	}
332 	return -ENODEV;
333 }
334 
335 static int viot_mmio_dev_iommu_init(struct platform_device *pdev)
336 {
337 	struct resource *mem;
338 	struct viot_endpoint *ep;
339 
340 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
341 	if (!mem)
342 		return -ENODEV;
343 
344 	list_for_each_entry(ep, &viot_mmio_endpoints, list) {
345 		if (ep->address == mem->start)
346 			return viot_dev_iommu_init(&pdev->dev, ep->viommu,
347 						   ep->endpoint_id);
348 	}
349 	return -ENODEV;
350 }
351 
352 /**
353  * viot_iommu_configure - Setup IOMMU ops for an endpoint described by VIOT
354  * @dev: the endpoint
355  *
356  * Return: 0 on success, <0 on failure
357  */
358 int viot_iommu_configure(struct device *dev)
359 {
360 	if (dev_is_pci(dev))
361 		return pci_for_each_dma_alias(to_pci_dev(dev),
362 					      viot_pci_dev_iommu_init, NULL);
363 	else if (dev_is_platform(dev))
364 		return viot_mmio_dev_iommu_init(to_platform_device(dev));
365 	return -ENODEV;
366 }
367