1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024-2025, Ventana Micro Systems Inc
4 * Author: Sunil V L <sunilvl@ventanamicro.com>
5 *
6 */
7
8 #define pr_fmt(fmt) "ACPI: RIMT: " fmt
9
10 #include <linux/acpi.h>
11 #include <linux/acpi_rimt.h>
12 #include <linux/iommu.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/platform_device.h>
16 #include "init.h"
17
18 struct rimt_fwnode {
19 struct list_head list;
20 struct acpi_rimt_node *rimt_node;
21 struct fwnode_handle *fwnode;
22 };
23
24 static LIST_HEAD(rimt_fwnode_list);
25 static DEFINE_SPINLOCK(rimt_fwnode_lock);
26
27 #define RIMT_TYPE_MASK(type) (1 << (type))
28 #define RIMT_IOMMU_TYPE BIT(0)
29
30 /* Root pointer to the mapped RIMT table */
31 static struct acpi_table_header *rimt_table;
32
33 /**
34 * rimt_set_fwnode() - Create rimt_fwnode and use it to register
35 * iommu data in the rimt_fwnode_list
36 *
37 * @rimt_node: RIMT table node associated with the IOMMU
38 * @fwnode: fwnode associated with the RIMT node
39 *
40 * Returns: 0 on success
41 * <0 on failure
42 */
rimt_set_fwnode(struct acpi_rimt_node * rimt_node,struct fwnode_handle * fwnode)43 static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
44 struct fwnode_handle *fwnode)
45 {
46 struct rimt_fwnode *np;
47
48 np = kzalloc(sizeof(*np), GFP_ATOMIC);
49
50 if (WARN_ON(!np))
51 return -ENOMEM;
52
53 INIT_LIST_HEAD(&np->list);
54 np->rimt_node = rimt_node;
55 np->fwnode = fwnode;
56
57 spin_lock(&rimt_fwnode_lock);
58 list_add_tail(&np->list, &rimt_fwnode_list);
59 spin_unlock(&rimt_fwnode_lock);
60
61 return 0;
62 }
63
rimt_match_node_callback(struct acpi_rimt_node * node,void * context)64 static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
65 void *context)
66 {
67 acpi_status status = AE_NOT_FOUND;
68 struct device *dev = context;
69
70 if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
71 struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
72
73 if (dev_is_pci(dev)) {
74 struct pci_dev *pdev;
75 u16 bdf;
76
77 pdev = to_pci_dev(dev);
78 bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
79 if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) &&
80 bdf == iommu_node->pcie_bdf) {
81 status = AE_OK;
82 } else {
83 status = AE_NOT_FOUND;
84 }
85 } else {
86 struct platform_device *pdev = to_platform_device(dev);
87 struct resource *res;
88
89 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
90 if (res && res->start == iommu_node->base_address)
91 status = AE_OK;
92 else
93 status = AE_NOT_FOUND;
94 }
95 } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
96 struct acpi_rimt_pcie_rc *pci_rc;
97 struct pci_bus *bus;
98
99 bus = to_pci_bus(dev);
100 pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
101
102 /*
103 * It is assumed that PCI segment numbers maps one-to-one
104 * with root complexes. Each segment number can represent only
105 * one root complex.
106 */
107 status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ?
108 AE_OK : AE_NOT_FOUND;
109 } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
110 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
111 struct acpi_rimt_platform_device *ncomp;
112 struct device *plat_dev = dev;
113 struct acpi_device *adev;
114
115 /*
116 * Walk the device tree to find a device with an
117 * ACPI companion; there is no point in scanning
118 * RIMT for a device matching a platform device if
119 * the device does not have an ACPI companion to
120 * start with.
121 */
122 do {
123 adev = ACPI_COMPANION(plat_dev);
124 if (adev)
125 break;
126
127 plat_dev = plat_dev->parent;
128 } while (plat_dev);
129
130 if (!adev)
131 return status;
132
133 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
134 if (ACPI_FAILURE(status)) {
135 dev_warn(plat_dev, "Can't get device full path name\n");
136 return status;
137 }
138
139 ncomp = (struct acpi_rimt_platform_device *)node->node_data;
140 status = !strcmp(ncomp->device_name, buf.pointer) ?
141 AE_OK : AE_NOT_FOUND;
142 acpi_os_free(buf.pointer);
143 }
144
145 return status;
146 }
147
rimt_scan_node(enum acpi_rimt_node_type type,void * context)148 static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
149 void *context)
150 {
151 struct acpi_rimt_node *rimt_node, *rimt_end;
152 struct acpi_table_rimt *rimt;
153 int i;
154
155 if (!rimt_table)
156 return NULL;
157
158 /* Get the first RIMT node */
159 rimt = (struct acpi_table_rimt *)rimt_table;
160 rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt,
161 rimt->node_offset);
162 rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
163 rimt_table->length);
164
165 for (i = 0; i < rimt->num_nodes; i++) {
166 if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND,
167 "RIMT node pointer overflows, bad table!\n"))
168 return NULL;
169
170 if (rimt_node->type == type &&
171 ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context)))
172 return rimt_node;
173
174 rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node,
175 rimt_node->length);
176 }
177
178 return NULL;
179 }
180
181 /*
182 * RISC-V supports IOMMU as a PCI device or a platform device.
183 * When it is a platform device, there should be a namespace device as
184 * well along with RIMT. To create the link between RIMT information and
185 * the platform device, the IOMMU driver should register itself with the
186 * RIMT module. This is true for PCI based IOMMU as well.
187 */
rimt_iommu_register(struct device * dev)188 int rimt_iommu_register(struct device *dev)
189 {
190 struct fwnode_handle *rimt_fwnode;
191 struct acpi_rimt_node *node;
192
193 node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
194 if (!node) {
195 pr_err("Could not find IOMMU node in RIMT\n");
196 return -ENODEV;
197 }
198
199 if (dev_is_pci(dev)) {
200 rimt_fwnode = acpi_alloc_fwnode_static();
201 if (!rimt_fwnode)
202 return -ENOMEM;
203
204 rimt_fwnode->dev = dev;
205 if (!dev->fwnode)
206 dev->fwnode = rimt_fwnode;
207
208 rimt_set_fwnode(node, rimt_fwnode);
209 } else {
210 rimt_set_fwnode(node, dev->fwnode);
211 }
212
213 return 0;
214 }
215
216 #ifdef CONFIG_IOMMU_API
217
218 /**
219 * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
220 *
221 * @node: RIMT table node to be looked-up
222 *
223 * Returns: fwnode_handle pointer on success, NULL on failure
224 */
rimt_get_fwnode(struct acpi_rimt_node * node)225 static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
226 {
227 struct fwnode_handle *fwnode = NULL;
228 struct rimt_fwnode *curr;
229
230 spin_lock(&rimt_fwnode_lock);
231 list_for_each_entry(curr, &rimt_fwnode_list, list) {
232 if (curr->rimt_node == node) {
233 fwnode = curr->fwnode;
234 break;
235 }
236 }
237 spin_unlock(&rimt_fwnode_lock);
238
239 return fwnode;
240 }
241
rimt_pcie_rc_supports_ats(struct acpi_rimt_node * node)242 static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
243 {
244 struct acpi_rimt_pcie_rc *pci_rc;
245
246 pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
247 return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED;
248 }
249
rimt_iommu_xlate(struct device * dev,struct acpi_rimt_node * node,u32 deviceid)250 static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
251 {
252 struct fwnode_handle *rimt_fwnode;
253
254 if (!node)
255 return -ENODEV;
256
257 rimt_fwnode = rimt_get_fwnode(node);
258
259 /*
260 * The IOMMU drivers may not be probed yet.
261 * Defer the IOMMU configuration
262 */
263 if (!rimt_fwnode)
264 return -EPROBE_DEFER;
265
266 return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
267 }
268
269 struct rimt_pci_alias_info {
270 struct device *dev;
271 struct acpi_rimt_node *node;
272 const struct iommu_ops *ops;
273 };
274
rimt_id_map(struct acpi_rimt_id_mapping * map,u8 type,u32 rid_in,u32 * rid_out)275 static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
276 {
277 if (rid_in < map->source_id_base ||
278 (rid_in > map->source_id_base + map->num_ids))
279 return -ENXIO;
280
281 *rid_out = map->dest_id_base + (rid_in - map->source_id_base);
282 return 0;
283 }
284
rimt_node_get_id(struct acpi_rimt_node * node,u32 * id_out,int index)285 static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
286 u32 *id_out, int index)
287 {
288 struct acpi_rimt_platform_device *plat_node;
289 u32 id_mapping_offset, num_id_mapping;
290 struct acpi_rimt_pcie_rc *pci_node;
291 struct acpi_rimt_id_mapping *map;
292 struct acpi_rimt_node *parent;
293
294 if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
295 pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
296 id_mapping_offset = pci_node->id_mapping_offset;
297 num_id_mapping = pci_node->num_id_mappings;
298 } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
299 plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
300 id_mapping_offset = plat_node->id_mapping_offset;
301 num_id_mapping = plat_node->num_id_mappings;
302 } else {
303 return NULL;
304 }
305
306 if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping)
307 return NULL;
308
309 map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
310 id_mapping_offset + index * sizeof(*map));
311
312 /* Firmware bug! */
313 if (!map->dest_offset) {
314 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
315 node, node->type);
316 return NULL;
317 }
318
319 parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
320
321 if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
322 node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
323 *id_out = map->dest_id_base;
324 return parent;
325 }
326
327 return NULL;
328 }
329
rimt_node_map_id(struct acpi_rimt_node * node,u32 id_in,u32 * id_out,u8 type_mask)330 static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
331 u32 id_in, u32 *id_out,
332 u8 type_mask)
333 {
334 struct acpi_rimt_platform_device *plat_node;
335 u32 id_mapping_offset, num_id_mapping;
336 struct acpi_rimt_pcie_rc *pci_node;
337 u32 id = id_in;
338
339 /* Parse the ID mapping tree to find specified node type */
340 while (node) {
341 struct acpi_rimt_id_mapping *map;
342 int i, rc = 0;
343 u32 map_id = id;
344
345 if (RIMT_TYPE_MASK(node->type) & type_mask) {
346 if (id_out)
347 *id_out = id;
348 return node;
349 }
350
351 if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
352 pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
353 id_mapping_offset = pci_node->id_mapping_offset;
354 num_id_mapping = pci_node->num_id_mappings;
355 } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
356 plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
357 id_mapping_offset = plat_node->id_mapping_offset;
358 num_id_mapping = plat_node->num_id_mappings;
359 } else {
360 goto fail_map;
361 }
362
363 if (!id_mapping_offset || !num_id_mapping)
364 goto fail_map;
365
366 map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
367 id_mapping_offset);
368
369 /* Firmware bug! */
370 if (!map->dest_offset) {
371 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
372 node, node->type);
373 goto fail_map;
374 }
375
376 /* Do the ID translation */
377 for (i = 0; i < num_id_mapping; i++, map++) {
378 rc = rimt_id_map(map, node->type, map_id, &id);
379 if (!rc)
380 break;
381 }
382
383 if (i == num_id_mapping)
384 goto fail_map;
385
386 node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
387 rc ? 0 : map->dest_offset);
388 }
389
390 fail_map:
391 /* Map input ID to output ID unchanged on mapping failure */
392 if (id_out)
393 *id_out = id_in;
394
395 return NULL;
396 }
397
rimt_node_map_platform_id(struct acpi_rimt_node * node,u32 * id_out,u8 type_mask,int index)398 static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
399 u8 type_mask, int index)
400 {
401 struct acpi_rimt_node *parent;
402 u32 id;
403
404 parent = rimt_node_get_id(node, &id, index);
405 if (!parent)
406 return NULL;
407
408 if (!(RIMT_TYPE_MASK(parent->type) & type_mask))
409 parent = rimt_node_map_id(parent, id, id_out, type_mask);
410 else
411 if (id_out)
412 *id_out = id;
413
414 return parent;
415 }
416
rimt_pci_iommu_init(struct pci_dev * pdev,u16 alias,void * data)417 static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
418 {
419 struct rimt_pci_alias_info *info = data;
420 struct acpi_rimt_node *parent;
421 u32 deviceid;
422
423 parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
424 return rimt_iommu_xlate(info->dev, parent, deviceid);
425 }
426
rimt_plat_iommu_map(struct device * dev,struct acpi_rimt_node * node)427 static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
428 {
429 struct acpi_rimt_node *parent;
430 int err = -ENODEV, i = 0;
431 u32 deviceid = 0;
432
433 do {
434 parent = rimt_node_map_platform_id(node, &deviceid,
435 RIMT_IOMMU_TYPE,
436 i++);
437
438 if (parent)
439 err = rimt_iommu_xlate(dev, parent, deviceid);
440 } while (parent && !err);
441
442 return err;
443 }
444
rimt_plat_iommu_map_id(struct device * dev,struct acpi_rimt_node * node,const u32 * in_id)445 static int rimt_plat_iommu_map_id(struct device *dev,
446 struct acpi_rimt_node *node,
447 const u32 *in_id)
448 {
449 struct acpi_rimt_node *parent;
450 u32 deviceid;
451
452 parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
453 if (parent)
454 return rimt_iommu_xlate(dev, parent, deviceid);
455
456 return -ENODEV;
457 }
458
459 /**
460 * rimt_iommu_configure_id - Set-up IOMMU configuration for a device.
461 *
462 * @dev: device to configure
463 * @id_in: optional input id const value pointer
464 *
465 * Returns: 0 on success, <0 on failure
466 */
rimt_iommu_configure_id(struct device * dev,const u32 * id_in)467 int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
468 {
469 struct acpi_rimt_node *node;
470 int err = -ENODEV;
471
472 if (dev_is_pci(dev)) {
473 struct iommu_fwspec *fwspec;
474 struct pci_bus *bus = to_pci_dev(dev)->bus;
475 struct rimt_pci_alias_info info = { .dev = dev };
476
477 node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
478 if (!node)
479 return -ENODEV;
480
481 info.node = node;
482 err = pci_for_each_dma_alias(to_pci_dev(dev),
483 rimt_pci_iommu_init, &info);
484
485 fwspec = dev_iommu_fwspec_get(dev);
486 if (fwspec && rimt_pcie_rc_supports_ats(node))
487 fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
488 } else {
489 node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
490 if (!node)
491 return -ENODEV;
492
493 err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
494 rimt_plat_iommu_map(dev, node);
495 }
496
497 return err;
498 }
499
500 #endif
501
riscv_acpi_rimt_init(void)502 void __init riscv_acpi_rimt_init(void)
503 {
504 acpi_status status;
505
506 /* rimt_table will be used at runtime after the rimt init,
507 * so we don't need to call acpi_put_table() to release
508 * the RIMT table mapping.
509 */
510 status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table);
511 if (ACPI_FAILURE(status)) {
512 if (status != AE_NOT_FOUND) {
513 const char *msg = acpi_format_exception(status);
514
515 pr_err("Failed to get table, %s\n", msg);
516 }
517
518 return;
519 }
520 }
521