1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024-2025, Ventana Micro Systems Inc
4 * Author: Sunil V L <sunilvl@ventanamicro.com>
5 *
6 */
7
8 #define pr_fmt(fmt) "ACPI: RIMT: " fmt
9
10 #include <linux/acpi.h>
11 #include <linux/acpi_rimt.h>
12 #include <linux/iommu.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/platform_device.h>
16 #include "init.h"
17
18 struct rimt_fwnode {
19 struct list_head list;
20 struct acpi_rimt_node *rimt_node;
21 struct fwnode_handle *fwnode;
22 };
23
24 static LIST_HEAD(rimt_fwnode_list);
25 static DEFINE_SPINLOCK(rimt_fwnode_lock);
26
27 #define RIMT_TYPE_MASK(type) (1 << (type))
28 #define RIMT_IOMMU_TYPE BIT(0)
29
30 /* Root pointer to the mapped RIMT table */
31 static struct acpi_table_header *rimt_table;
32
33 /**
34 * rimt_set_fwnode() - Create rimt_fwnode and use it to register
35 * iommu data in the rimt_fwnode_list
36 *
37 * @rimt_node: RIMT table node associated with the IOMMU
38 * @fwnode: fwnode associated with the RIMT node
39 *
40 * Returns: 0 on success
41 * <0 on failure
42 */
rimt_set_fwnode(struct acpi_rimt_node * rimt_node,struct fwnode_handle * fwnode)43 static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
44 struct fwnode_handle *fwnode)
45 {
46 struct rimt_fwnode *np;
47
48 np = kzalloc_obj(*np, GFP_ATOMIC);
49
50 if (WARN_ON(!np))
51 return -ENOMEM;
52
53 INIT_LIST_HEAD(&np->list);
54 np->rimt_node = rimt_node;
55 np->fwnode = fwnode;
56
57 spin_lock(&rimt_fwnode_lock);
58 list_add_tail(&np->list, &rimt_fwnode_list);
59 spin_unlock(&rimt_fwnode_lock);
60
61 return 0;
62 }
63
rimt_match_node_callback(struct acpi_rimt_node * node,void * context)64 static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
65 void *context)
66 {
67 acpi_status status = AE_NOT_FOUND;
68 struct device *dev = context;
69
70 if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
71 struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
72
73 if (dev_is_pci(dev)) {
74 struct pci_dev *pdev;
75 u16 bdf;
76
77 pdev = to_pci_dev(dev);
78 bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
79 if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) &&
80 bdf == iommu_node->pcie_bdf) {
81 status = AE_OK;
82 } else {
83 status = AE_NOT_FOUND;
84 }
85 } else {
86 struct platform_device *pdev = to_platform_device(dev);
87 struct resource *res;
88
89 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
90 if (res && res->start == iommu_node->base_address)
91 status = AE_OK;
92 else
93 status = AE_NOT_FOUND;
94 }
95 } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
96 struct acpi_rimt_pcie_rc *pci_rc;
97 struct pci_bus *bus;
98
99 bus = to_pci_bus(dev);
100 pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
101
102 /*
103 * It is assumed that PCI segment numbers maps one-to-one
104 * with root complexes. Each segment number can represent only
105 * one root complex.
106 */
107 status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ?
108 AE_OK : AE_NOT_FOUND;
109 } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
110 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
111 struct acpi_rimt_platform_device *ncomp;
112 struct device *plat_dev = dev;
113 struct acpi_device *adev;
114
115 /*
116 * Walk the device tree to find a device with an
117 * ACPI companion; there is no point in scanning
118 * RIMT for a device matching a platform device if
119 * the device does not have an ACPI companion to
120 * start with.
121 */
122 do {
123 adev = ACPI_COMPANION(plat_dev);
124 if (adev)
125 break;
126
127 plat_dev = plat_dev->parent;
128 } while (plat_dev);
129
130 if (!adev)
131 return status;
132
133 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
134 if (ACPI_FAILURE(status)) {
135 dev_warn(plat_dev, "Can't get device full path name\n");
136 return status;
137 }
138
139 ncomp = (struct acpi_rimt_platform_device *)node->node_data;
140 status = !strcmp(ncomp->device_name, buf.pointer) ?
141 AE_OK : AE_NOT_FOUND;
142 acpi_os_free(buf.pointer);
143 }
144
145 return status;
146 }
147
rimt_scan_node(enum acpi_rimt_node_type type,void * context)148 static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
149 void *context)
150 {
151 struct acpi_rimt_node *rimt_node, *rimt_end;
152 struct acpi_table_rimt *rimt;
153 int i;
154
155 if (!rimt_table)
156 return NULL;
157
158 /* Get the first RIMT node */
159 rimt = (struct acpi_table_rimt *)rimt_table;
160 rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt,
161 rimt->node_offset);
162 rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
163 rimt_table->length);
164
165 for (i = 0; i < rimt->num_nodes; i++) {
166 if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND,
167 "RIMT node pointer overflows, bad table!\n"))
168 return NULL;
169
170 if (rimt_node->type == type &&
171 ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context)))
172 return rimt_node;
173
174 rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node,
175 rimt_node->length);
176 }
177
178 return NULL;
179 }
180
181 /*
182 * RISC-V supports IOMMU as a PCI device or a platform device.
183 * When it is a platform device, there should be a namespace device as
184 * well along with RIMT. To create the link between RIMT information and
185 * the platform device, the IOMMU driver should register itself with the
186 * RIMT module. This is true for PCI based IOMMU as well.
187 */
rimt_iommu_register(struct device * dev)188 int rimt_iommu_register(struct device *dev)
189 {
190 struct fwnode_handle *rimt_fwnode;
191 struct acpi_rimt_node *node;
192
193 node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
194 if (!node) {
195 pr_err("Could not find IOMMU node in RIMT\n");
196 return -ENODEV;
197 }
198
199 if (dev_is_pci(dev)) {
200 rimt_fwnode = acpi_alloc_fwnode_static();
201 if (!rimt_fwnode)
202 return -ENOMEM;
203
204 rimt_fwnode->dev = dev;
205 if (!dev->fwnode)
206 dev->fwnode = rimt_fwnode;
207
208 rimt_set_fwnode(node, rimt_fwnode);
209 } else {
210 rimt_set_fwnode(node, dev->fwnode);
211 }
212
213 return 0;
214 }
215
216 #ifdef CONFIG_IOMMU_API
217
218 /**
219 * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
220 *
221 * @node: RIMT table node to be looked-up
222 *
223 * Returns: fwnode_handle pointer on success, NULL on failure
224 */
rimt_get_fwnode(struct acpi_rimt_node * node)225 static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
226 {
227 struct fwnode_handle *fwnode = NULL;
228 struct rimt_fwnode *curr;
229
230 spin_lock(&rimt_fwnode_lock);
231 list_for_each_entry(curr, &rimt_fwnode_list, list) {
232 if (curr->rimt_node == node) {
233 fwnode = curr->fwnode;
234 break;
235 }
236 }
237 spin_unlock(&rimt_fwnode_lock);
238
239 return fwnode;
240 }
241
rimt_pcie_rc_supports_ats(struct acpi_rimt_node * node)242 static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
243 {
244 struct acpi_rimt_pcie_rc *pci_rc;
245
246 pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
247 return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED;
248 }
249
rimt_iommu_xlate(struct device * dev,struct acpi_rimt_node * node,u32 deviceid)250 static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
251 {
252 struct fwnode_handle *rimt_fwnode;
253
254 if (!node)
255 return -ENODEV;
256
257 rimt_fwnode = rimt_get_fwnode(node);
258
259 /*
260 * The IOMMU drivers may not be probed yet.
261 * Defer the IOMMU configuration
262 */
263 if (!rimt_fwnode)
264 return -EPROBE_DEFER;
265
266 /*
267 * EPROBE_DEFER ensures IOMMU is probed before the devices that
268 * depend on them. During shutdown, however, the IOMMU may be removed
269 * first, leading to issues. To avoid this, a device link is added
270 * which enforces the correct removal order.
271 */
272 device_link_add(dev, rimt_fwnode->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
273 return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
274 }
275
276 struct rimt_pci_alias_info {
277 struct device *dev;
278 struct acpi_rimt_node *node;
279 const struct iommu_ops *ops;
280 };
281
rimt_id_map(struct acpi_rimt_id_mapping * map,u8 type,u32 rid_in,u32 * rid_out)282 static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
283 {
284 if (rid_in < map->source_id_base ||
285 (rid_in > map->source_id_base + map->num_ids))
286 return -ENXIO;
287
288 *rid_out = map->dest_id_base + (rid_in - map->source_id_base);
289 return 0;
290 }
291
rimt_node_get_id(struct acpi_rimt_node * node,u32 * id_out,int index)292 static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
293 u32 *id_out, int index)
294 {
295 struct acpi_rimt_platform_device *plat_node;
296 u32 id_mapping_offset, num_id_mapping;
297 struct acpi_rimt_pcie_rc *pci_node;
298 struct acpi_rimt_id_mapping *map;
299 struct acpi_rimt_node *parent;
300
301 if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
302 pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
303 id_mapping_offset = pci_node->id_mapping_offset;
304 num_id_mapping = pci_node->num_id_mappings;
305 } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
306 plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
307 id_mapping_offset = plat_node->id_mapping_offset;
308 num_id_mapping = plat_node->num_id_mappings;
309 } else {
310 return NULL;
311 }
312
313 if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping)
314 return NULL;
315
316 map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
317 id_mapping_offset + index * sizeof(*map));
318
319 /* Firmware bug! */
320 if (!map->dest_offset) {
321 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
322 node, node->type);
323 return NULL;
324 }
325
326 parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
327
328 if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
329 node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
330 *id_out = map->dest_id_base;
331 return parent;
332 }
333
334 return NULL;
335 }
336
rimt_node_map_id(struct acpi_rimt_node * node,u32 id_in,u32 * id_out,u8 type_mask)337 static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
338 u32 id_in, u32 *id_out,
339 u8 type_mask)
340 {
341 struct acpi_rimt_platform_device *plat_node;
342 u32 id_mapping_offset, num_id_mapping;
343 struct acpi_rimt_pcie_rc *pci_node;
344 u32 id = id_in;
345
346 /* Parse the ID mapping tree to find specified node type */
347 while (node) {
348 struct acpi_rimt_id_mapping *map;
349 int i, rc = 0;
350 u32 map_id = id;
351
352 if (RIMT_TYPE_MASK(node->type) & type_mask) {
353 if (id_out)
354 *id_out = id;
355 return node;
356 }
357
358 if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
359 pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
360 id_mapping_offset = pci_node->id_mapping_offset;
361 num_id_mapping = pci_node->num_id_mappings;
362 } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
363 plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
364 id_mapping_offset = plat_node->id_mapping_offset;
365 num_id_mapping = plat_node->num_id_mappings;
366 } else {
367 goto fail_map;
368 }
369
370 if (!id_mapping_offset || !num_id_mapping)
371 goto fail_map;
372
373 map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
374 id_mapping_offset);
375
376 /* Firmware bug! */
377 if (!map->dest_offset) {
378 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
379 node, node->type);
380 goto fail_map;
381 }
382
383 /* Do the ID translation */
384 for (i = 0; i < num_id_mapping; i++, map++) {
385 rc = rimt_id_map(map, node->type, map_id, &id);
386 if (!rc)
387 break;
388 }
389
390 if (i == num_id_mapping)
391 goto fail_map;
392
393 node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
394 rc ? 0 : map->dest_offset);
395 }
396
397 fail_map:
398 /* Map input ID to output ID unchanged on mapping failure */
399 if (id_out)
400 *id_out = id_in;
401
402 return NULL;
403 }
404
rimt_node_map_platform_id(struct acpi_rimt_node * node,u32 * id_out,u8 type_mask,int index)405 static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
406 u8 type_mask, int index)
407 {
408 struct acpi_rimt_node *parent;
409 u32 id;
410
411 parent = rimt_node_get_id(node, &id, index);
412 if (!parent)
413 return NULL;
414
415 if (!(RIMT_TYPE_MASK(parent->type) & type_mask))
416 parent = rimt_node_map_id(parent, id, id_out, type_mask);
417 else
418 if (id_out)
419 *id_out = id;
420
421 return parent;
422 }
423
rimt_pci_iommu_init(struct pci_dev * pdev,u16 alias,void * data)424 static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
425 {
426 struct rimt_pci_alias_info *info = data;
427 struct acpi_rimt_node *parent;
428 u32 deviceid;
429
430 parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
431 return rimt_iommu_xlate(info->dev, parent, deviceid);
432 }
433
rimt_plat_iommu_map(struct device * dev,struct acpi_rimt_node * node)434 static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
435 {
436 struct acpi_rimt_node *parent;
437 int err = -ENODEV, i = 0;
438 u32 deviceid = 0;
439
440 do {
441 parent = rimt_node_map_platform_id(node, &deviceid,
442 RIMT_IOMMU_TYPE,
443 i++);
444
445 if (parent)
446 err = rimt_iommu_xlate(dev, parent, deviceid);
447 } while (parent && !err);
448
449 return err;
450 }
451
rimt_plat_iommu_map_id(struct device * dev,struct acpi_rimt_node * node,const u32 * in_id)452 static int rimt_plat_iommu_map_id(struct device *dev,
453 struct acpi_rimt_node *node,
454 const u32 *in_id)
455 {
456 struct acpi_rimt_node *parent;
457 u32 deviceid;
458
459 parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
460 if (parent)
461 return rimt_iommu_xlate(dev, parent, deviceid);
462
463 return -ENODEV;
464 }
465
466 /**
467 * rimt_iommu_configure_id - Set-up IOMMU configuration for a device.
468 *
469 * @dev: device to configure
470 * @id_in: optional input id const value pointer
471 *
472 * Returns: 0 on success, <0 on failure
473 */
rimt_iommu_configure_id(struct device * dev,const u32 * id_in)474 int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
475 {
476 struct acpi_rimt_node *node;
477 int err = -ENODEV;
478
479 if (dev_is_pci(dev)) {
480 struct iommu_fwspec *fwspec;
481 struct pci_bus *bus = to_pci_dev(dev)->bus;
482 struct rimt_pci_alias_info info = { .dev = dev };
483
484 node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
485 if (!node)
486 return -ENODEV;
487
488 info.node = node;
489 err = pci_for_each_dma_alias(to_pci_dev(dev),
490 rimt_pci_iommu_init, &info);
491
492 fwspec = dev_iommu_fwspec_get(dev);
493 if (fwspec && rimt_pcie_rc_supports_ats(node))
494 fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
495 } else {
496 node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
497 if (!node)
498 return -ENODEV;
499
500 err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
501 rimt_plat_iommu_map(dev, node);
502 }
503
504 return err;
505 }
506
507 #endif
508
riscv_acpi_rimt_init(void)509 void __init riscv_acpi_rimt_init(void)
510 {
511 acpi_status status;
512
513 /* rimt_table will be used at runtime after the rimt init,
514 * so we don't need to call acpi_put_table() to release
515 * the RIMT table mapping.
516 */
517 status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table);
518 if (ACPI_FAILURE(status)) {
519 if (status != AE_NOT_FOUND) {
520 const char *msg = acpi_format_exception(status);
521
522 pr_err("Failed to get table, %s\n", msg);
523 }
524
525 return;
526 }
527 }
528