1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * Author: Jayachandran C Nair <jchandra@freebsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include "opt_acpi.h" 31 32 #include <sys/cdefs.h> 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 38 #include <machine/intr.h> 39 40 #include <contrib/dev/acpica/include/acpi.h> 41 #include <contrib/dev/acpica/include/accommon.h> 42 #include <contrib/dev/acpica/include/actables.h> 43 44 #include <dev/acpica/acpivar.h> 45 46 /* 47 * Track next XREF available for ITS groups. 48 */ 49 static u_int acpi_its_xref = ACPI_MSI_XREF; 50 51 /* 52 * Some types of IORT nodes have a set of mappings. Each of them map 53 * a range of device IDs [base..end] from the current node to another 54 * node. The corresponding device IDs on destination node starts at 55 * outbase. 56 */ 57 struct iort_map_entry { 58 u_int base; 59 u_int end; 60 u_int outbase; 61 u_int flags; 62 u_int out_node_offset; 63 struct iort_node *out_node; 64 }; 65 66 /* 67 * The ITS group node does not have any outgoing mappings. It has a 68 * of a list of GIC ITS blocks which can handle the device ID. We 69 * will store the PIC XREF used by the block and the blocks proximity 70 * data here, so that it can be retrieved together. 71 */ 72 struct iort_its_entry { 73 u_int its_id; 74 u_int xref; 75 int pxm; 76 }; 77 78 struct iort_named_component 79 { 80 UINT32 NodeFlags; 81 UINT64 MemoryProperties; 82 UINT8 MemoryAddressLimit; 83 char DeviceName[32]; /* Path of namespace object */ 84 }; 85 86 /* 87 * IORT node. Each node has some device specific data depending on the 88 * type of the node. The node can also have a set of mappings, OR in 89 * case of ITS group nodes a set of ITS entries. 90 * The nodes are kept in a TAILQ by type. 91 */ 92 struct iort_node { 93 TAILQ_ENTRY(iort_node) next; /* next entry with same type */ 94 enum AcpiIortNodeType type; /* ACPI type */ 95 u_int node_offset; /* offset in IORT - node ID */ 96 u_int nentries; /* items in array below */ 97 u_int usecount; /* for bookkeeping */ 98 u_int revision; /* node revision */ 99 union { 100 ACPI_IORT_ROOT_COMPLEX pci_rc; /* PCI root complex */ 101 ACPI_IORT_SMMU smmu; 102 ACPI_IORT_SMMU_V3 smmu_v3; 103 struct iort_named_component named_comp; 104 } data; 105 union { 106 struct iort_map_entry *mappings; /* node mappings */ 107 struct iort_its_entry *its; /* ITS IDs array */ 108 } entries; 109 }; 110 111 /* Lists for each of the types. */ 112 static TAILQ_HEAD(, iort_node) pci_nodes = TAILQ_HEAD_INITIALIZER(pci_nodes); 113 static TAILQ_HEAD(, iort_node) smmu_nodes = TAILQ_HEAD_INITIALIZER(smmu_nodes); 114 static TAILQ_HEAD(, iort_node) its_groups = TAILQ_HEAD_INITIALIZER(its_groups); 115 static TAILQ_HEAD(, iort_node) named_nodes = TAILQ_HEAD_INITIALIZER(named_nodes); 116 117 static int 118 iort_entry_get_id_mapping_index(struct iort_node *node) 119 { 120 121 switch(node->type) { 122 case ACPI_IORT_NODE_SMMU_V3: 123 /* The ID mapping field was added in version 1 */ 124 if (node->revision < 1) 125 return (-1); 126 127 /* 128 * If all the control interrupts are GISCV based the ID 129 * mapping field is ignored. 130 */ 131 if (node->data.smmu_v3.EventGsiv != 0 && 132 node->data.smmu_v3.PriGsiv != 0 && 133 node->data.smmu_v3.GerrGsiv != 0 && 134 node->data.smmu_v3.SyncGsiv != 0) 135 return (-1); 136 137 if (node->data.smmu_v3.IdMappingIndex >= node->nentries) 138 return (-1); 139 140 return (node->data.smmu_v3.IdMappingIndex); 141 case ACPI_IORT_NODE_PMCG: 142 return (0); 143 default: 144 break; 145 } 146 147 return (-1); 148 } 149 150 /* 151 * Lookup an ID in the mappings array. If successful, map the input ID 152 * to the output ID and return the output node found. 153 */ 154 static struct iort_node * 155 iort_entry_lookup(struct iort_node *node, u_int id, u_int *outid) 156 { 157 struct iort_map_entry *entry; 158 int i, id_map; 159 160 id_map = iort_entry_get_id_mapping_index(node); 161 entry = node->entries.mappings; 162 for (i = 0; i < node->nentries; i++, entry++) { 163 if (i == id_map) 164 continue; 165 if (entry->base <= id && id <= entry->end) 166 break; 167 } 168 if (i == node->nentries) 169 return (NULL); 170 if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0) 171 *outid = entry->outbase + (id - entry->base); 172 else 173 *outid = entry->outbase; 174 return (entry->out_node); 175 } 176 177 /* 178 * Perform an additional lookup in case of SMMU node and ITS outtype. 179 */ 180 static struct iort_node * 181 iort_smmu_trymap(struct iort_node *node, u_int outtype, u_int *outid) 182 { 183 /* Original node can be not found. */ 184 if (!node) 185 return (NULL); 186 187 /* Node can be SMMU or ITS. If SMMU, we need another lookup. */ 188 if (outtype == ACPI_IORT_NODE_ITS_GROUP && 189 (node->type == ACPI_IORT_NODE_SMMU_V3 || 190 node->type == ACPI_IORT_NODE_SMMU)) { 191 node = iort_entry_lookup(node, *outid, outid); 192 if (node == NULL) 193 return (NULL); 194 } 195 196 KASSERT(node->type == outtype, ("mapping fail")); 197 return (node); 198 } 199 200 /* 201 * Map a PCI RID to a SMMU node or an ITS node, based on outtype. 202 */ 203 static struct iort_node * 204 iort_pci_rc_map(u_int seg, u_int rid, u_int outtype, u_int *outid) 205 { 206 struct iort_node *node, *out_node; 207 u_int nxtid; 208 209 out_node = NULL; 210 TAILQ_FOREACH(node, &pci_nodes, next) { 211 if (node->data.pci_rc.PciSegmentNumber != seg) 212 continue; 213 out_node = iort_entry_lookup(node, rid, &nxtid); 214 if (out_node != NULL) 215 break; 216 } 217 218 out_node = iort_smmu_trymap(out_node, outtype, &nxtid); 219 if (out_node) 220 *outid = nxtid; 221 222 return (out_node); 223 } 224 225 /* 226 * Map a named component node to a SMMU node or an ITS node, based on outtype. 227 */ 228 static struct iort_node * 229 iort_named_comp_map(const char *devname, u_int rid, u_int outtype, u_int *outid) 230 { 231 struct iort_node *node, *out_node; 232 u_int nxtid; 233 234 out_node = NULL; 235 TAILQ_FOREACH(node, &named_nodes, next) { 236 if (strstr(node->data.named_comp.DeviceName, devname) == NULL) 237 continue; 238 out_node = iort_entry_lookup(node, rid, &nxtid); 239 if (out_node != NULL) 240 break; 241 } 242 243 out_node = iort_smmu_trymap(out_node, outtype, &nxtid); 244 if (out_node) 245 *outid = nxtid; 246 247 return (out_node); 248 } 249 250 #ifdef notyet 251 /* 252 * Not implemented, map a PCIe device to the SMMU it is associated with. 253 */ 254 int 255 acpi_iort_map_smmu(u_int seg, u_int devid, void **smmu, u_int *sid) 256 { 257 /* XXX: convert oref to SMMU device */ 258 return (ENXIO); 259 } 260 #endif 261 262 /* 263 * Allocate memory for a node, initialize and copy mappings. 'start' 264 * argument provides the table start used to calculate the node offset. 265 */ 266 static void 267 iort_copy_data(struct iort_node *node, ACPI_IORT_NODE *node_entry) 268 { 269 ACPI_IORT_ID_MAPPING *map_entry; 270 struct iort_map_entry *mapping; 271 int i; 272 273 map_entry = ACPI_ADD_PTR(ACPI_IORT_ID_MAPPING, node_entry, 274 node_entry->MappingOffset); 275 node->nentries = node_entry->MappingCount; 276 node->usecount = 0; 277 mapping = malloc(sizeof(*mapping) * node->nentries, M_DEVBUF, 278 M_WAITOK | M_ZERO); 279 node->entries.mappings = mapping; 280 for (i = 0; i < node->nentries; i++, mapping++, map_entry++) { 281 mapping->base = map_entry->InputBase; 282 /* 283 * IdCount means "The number of IDs in the range minus one" (ARM DEN 0049D). 284 * We use <= for comparison against this field, so don't add one here. 285 */ 286 mapping->end = map_entry->InputBase + map_entry->IdCount; 287 mapping->outbase = map_entry->OutputBase; 288 mapping->out_node_offset = map_entry->OutputReference; 289 mapping->flags = map_entry->Flags; 290 mapping->out_node = NULL; 291 } 292 } 293 294 /* 295 * Allocate and copy an ITS group. 296 */ 297 static void 298 iort_copy_its(struct iort_node *node, ACPI_IORT_NODE *node_entry) 299 { 300 struct iort_its_entry *its; 301 ACPI_IORT_ITS_GROUP *itsg_entry; 302 UINT32 *id; 303 int i; 304 305 itsg_entry = (ACPI_IORT_ITS_GROUP *)node_entry->NodeData; 306 node->nentries = itsg_entry->ItsCount; 307 node->usecount = 0; 308 its = malloc(sizeof(*its) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO); 309 node->entries.its = its; 310 id = &itsg_entry->Identifiers[0]; 311 for (i = 0; i < node->nentries; i++, its++, id++) { 312 its->its_id = *id; 313 its->pxm = -1; 314 its->xref = 0; 315 } 316 } 317 318 /* 319 * Walk the IORT table and add nodes to corresponding list. 320 */ 321 static void 322 iort_add_nodes(ACPI_IORT_NODE *node_entry, u_int node_offset) 323 { 324 ACPI_IORT_ROOT_COMPLEX *pci_rc; 325 ACPI_IORT_SMMU *smmu; 326 ACPI_IORT_SMMU_V3 *smmu_v3; 327 ACPI_IORT_NAMED_COMPONENT *named_comp; 328 struct iort_node *node; 329 330 node = malloc(sizeof(*node), M_DEVBUF, M_WAITOK | M_ZERO); 331 node->type = node_entry->Type; 332 node->node_offset = node_offset; 333 node->revision = node_entry->Revision; 334 335 /* copy nodes depending on type */ 336 switch(node_entry->Type) { 337 case ACPI_IORT_NODE_PCI_ROOT_COMPLEX: 338 pci_rc = (ACPI_IORT_ROOT_COMPLEX *)node_entry->NodeData; 339 memcpy(&node->data.pci_rc, pci_rc, sizeof(*pci_rc)); 340 iort_copy_data(node, node_entry); 341 TAILQ_INSERT_TAIL(&pci_nodes, node, next); 342 break; 343 case ACPI_IORT_NODE_SMMU: 344 smmu = (ACPI_IORT_SMMU *)node_entry->NodeData; 345 memcpy(&node->data.smmu, smmu, sizeof(*smmu)); 346 iort_copy_data(node, node_entry); 347 TAILQ_INSERT_TAIL(&smmu_nodes, node, next); 348 break; 349 case ACPI_IORT_NODE_SMMU_V3: 350 smmu_v3 = (ACPI_IORT_SMMU_V3 *)node_entry->NodeData; 351 memcpy(&node->data.smmu_v3, smmu_v3, sizeof(*smmu_v3)); 352 iort_copy_data(node, node_entry); 353 TAILQ_INSERT_TAIL(&smmu_nodes, node, next); 354 break; 355 case ACPI_IORT_NODE_ITS_GROUP: 356 iort_copy_its(node, node_entry); 357 TAILQ_INSERT_TAIL(&its_groups, node, next); 358 break; 359 case ACPI_IORT_NODE_NAMED_COMPONENT: 360 named_comp = (ACPI_IORT_NAMED_COMPONENT *)node_entry->NodeData; 361 memcpy(&node->data.named_comp, named_comp, sizeof(*named_comp)); 362 363 /* Copy name of the node separately. */ 364 strncpy(node->data.named_comp.DeviceName, 365 named_comp->DeviceName, 366 sizeof(node->data.named_comp.DeviceName)); 367 node->data.named_comp.DeviceName[31] = 0; 368 369 iort_copy_data(node, node_entry); 370 TAILQ_INSERT_TAIL(&named_nodes, node, next); 371 break; 372 default: 373 printf("ACPI: IORT: Dropping unhandled type %u\n", 374 node_entry->Type); 375 free(node, M_DEVBUF); 376 break; 377 } 378 } 379 380 /* 381 * For the mapping entry given, walk thru all the possible destination 382 * nodes and resolve the output reference. 383 */ 384 static void 385 iort_resolve_node(struct iort_map_entry *entry, int check_smmu) 386 { 387 struct iort_node *node, *np; 388 389 node = NULL; 390 if (check_smmu) { 391 TAILQ_FOREACH(np, &smmu_nodes, next) { 392 if (entry->out_node_offset == np->node_offset) { 393 node = np; 394 break; 395 } 396 } 397 } 398 if (node == NULL) { 399 TAILQ_FOREACH(np, &its_groups, next) { 400 if (entry->out_node_offset == np->node_offset) { 401 node = np; 402 break; 403 } 404 } 405 } 406 if (node != NULL) { 407 node->usecount++; 408 entry->out_node = node; 409 } else { 410 printf("ACPI: IORT: Firmware Bug: no mapping for node %u\n", 411 entry->out_node_offset); 412 } 413 } 414 415 /* 416 * Resolve all output node references to node pointers. 417 */ 418 static void 419 iort_post_process_mappings(void) 420 { 421 struct iort_node *node; 422 int i; 423 424 TAILQ_FOREACH(node, &pci_nodes, next) 425 for (i = 0; i < node->nentries; i++) 426 iort_resolve_node(&node->entries.mappings[i], TRUE); 427 TAILQ_FOREACH(node, &smmu_nodes, next) 428 for (i = 0; i < node->nentries; i++) 429 iort_resolve_node(&node->entries.mappings[i], FALSE); 430 TAILQ_FOREACH(node, &named_nodes, next) 431 for (i = 0; i < node->nentries; i++) 432 iort_resolve_node(&node->entries.mappings[i], TRUE); 433 } 434 435 /* 436 * Walk MADT table, assign PIC xrefs to all ITS entries. 437 */ 438 static void 439 madt_resolve_its_xref(ACPI_SUBTABLE_HEADER *entry, void *arg) 440 { 441 ACPI_MADT_GENERIC_TRANSLATOR *gict; 442 struct iort_node *its_node; 443 struct iort_its_entry *its_entry; 444 u_int xref; 445 int i, matches; 446 447 if (entry->Type != ACPI_MADT_TYPE_GENERIC_TRANSLATOR) 448 return; 449 450 gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry; 451 matches = 0; 452 xref = acpi_its_xref++; 453 TAILQ_FOREACH(its_node, &its_groups, next) { 454 its_entry = its_node->entries.its; 455 for (i = 0; i < its_node->nentries; i++, its_entry++) { 456 if (its_entry->its_id == gict->TranslationId) { 457 its_entry->xref = xref; 458 matches++; 459 } 460 } 461 } 462 if (matches == 0) 463 printf("ACPI: IORT: Unused ITS block, ID %u\n", 464 gict->TranslationId); 465 } 466 467 /* 468 * Walk SRAT, assign proximity to all ITS entries. 469 */ 470 static void 471 srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg) 472 { 473 ACPI_SRAT_GIC_ITS_AFFINITY *gicits; 474 struct iort_node *its_node; 475 struct iort_its_entry *its_entry; 476 int *map_counts; 477 int i, matches, dom; 478 479 if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY) 480 return; 481 482 matches = 0; 483 map_counts = arg; 484 gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry; 485 dom = acpi_map_pxm_to_vm_domainid(gicits->ProximityDomain); 486 487 /* 488 * Catch firmware and config errors. map_counts keeps a 489 * count of ProximityDomain values mapping to a domain ID 490 */ 491 #if MAXMEMDOM > 1 492 if (dom == -1) 493 printf("Firmware Error: Proximity Domain %d could not be" 494 " mapped for GIC ITS ID %d!\n", 495 gicits->ProximityDomain, gicits->ItsId); 496 #endif 497 /* use dom + 1 as index to handle the case where dom == -1 */ 498 i = ++map_counts[dom + 1]; 499 if (i > 1) { 500 #ifdef NUMA 501 if (dom != -1) 502 printf("ERROR: Multiple Proximity Domains map to the" 503 " same NUMA domain %d!\n", dom); 504 #else 505 printf("WARNING: multiple Proximity Domains in SRAT but NUMA" 506 " NOT enabled!\n"); 507 #endif 508 } 509 TAILQ_FOREACH(its_node, &its_groups, next) { 510 its_entry = its_node->entries.its; 511 for (i = 0; i < its_node->nentries; i++, its_entry++) { 512 if (its_entry->its_id == gicits->ItsId) { 513 its_entry->pxm = dom; 514 matches++; 515 } 516 } 517 } 518 if (matches == 0) 519 printf("ACPI: IORT: ITS block %u in SRAT not found in IORT!\n", 520 gicits->ItsId); 521 } 522 523 /* 524 * Cross check the ITS Id with MADT and (if available) SRAT. 525 */ 526 static int 527 iort_post_process_its(void) 528 { 529 ACPI_TABLE_MADT *madt; 530 ACPI_TABLE_SRAT *srat; 531 vm_paddr_t madt_pa, srat_pa; 532 int map_counts[MAXMEMDOM + 1] = { 0 }; 533 534 /* Check ITS block in MADT */ 535 madt_pa = acpi_find_table(ACPI_SIG_MADT); 536 KASSERT(madt_pa != 0, ("no MADT!")); 537 madt = acpi_map_table(madt_pa, ACPI_SIG_MADT); 538 KASSERT(madt != NULL, ("can't map MADT!")); 539 acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, 540 madt_resolve_its_xref, NULL); 541 acpi_unmap_table(madt); 542 543 /* Get proximtiy if available */ 544 srat_pa = acpi_find_table(ACPI_SIG_SRAT); 545 if (srat_pa != 0) { 546 srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT); 547 KASSERT(srat != NULL, ("can't map SRAT!")); 548 acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length, 549 srat_resolve_its_pxm, map_counts); 550 acpi_unmap_table(srat); 551 } 552 return (0); 553 } 554 555 /* 556 * Find, parse, and save IO Remapping Table ("IORT"). 557 */ 558 static int 559 acpi_parse_iort(void *dummy __unused) 560 { 561 ACPI_TABLE_IORT *iort; 562 ACPI_IORT_NODE *node_entry; 563 vm_paddr_t iort_pa; 564 u_int node_offset; 565 566 iort_pa = acpi_find_table(ACPI_SIG_IORT); 567 if (iort_pa == 0) 568 return (ENXIO); 569 570 iort = acpi_map_table(iort_pa, ACPI_SIG_IORT); 571 if (iort == NULL) { 572 printf("ACPI: Unable to map the IORT table!\n"); 573 return (ENXIO); 574 } 575 for (node_offset = iort->NodeOffset; 576 node_offset < iort->Header.Length; 577 node_offset += node_entry->Length) { 578 node_entry = ACPI_ADD_PTR(ACPI_IORT_NODE, iort, node_offset); 579 iort_add_nodes(node_entry, node_offset); 580 } 581 acpi_unmap_table(iort); 582 iort_post_process_mappings(); 583 iort_post_process_its(); 584 return (0); 585 } 586 SYSINIT(acpi_parse_iort, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_parse_iort, NULL); 587 588 /* 589 * Provide ITS ID to PIC xref mapping. 590 */ 591 int 592 acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm) 593 { 594 struct iort_node *its_node; 595 struct iort_its_entry *its_entry; 596 int i; 597 598 TAILQ_FOREACH(its_node, &its_groups, next) { 599 its_entry = its_node->entries.its; 600 for (i = 0; i < its_node->nentries; i++, its_entry++) { 601 if (its_entry->its_id == its_id) { 602 *xref = its_entry->xref; 603 *pxm = its_entry->pxm; 604 return (0); 605 } 606 } 607 } 608 return (ENOENT); 609 } 610 611 /* 612 * Find mapping for a PCIe device given segment and device ID 613 * returns the XREF for MSI interrupt setup and the device ID to 614 * use for the interrupt setup 615 */ 616 int 617 acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid) 618 { 619 struct iort_node *node; 620 621 node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_ITS_GROUP, devid); 622 if (node == NULL) 623 return (ENOENT); 624 625 /* This should be an ITS node */ 626 KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group")); 627 628 /* return first node, we don't handle more than that now. */ 629 *xref = node->entries.its[0].xref; 630 return (0); 631 } 632 633 int 634 acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *sid) 635 { 636 ACPI_IORT_SMMU_V3 *smmu; 637 struct iort_node *node; 638 639 node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_SMMU_V3, sid); 640 if (node == NULL) 641 return (ENOENT); 642 643 /* This should be an SMMU node. */ 644 KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node")); 645 646 smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3; 647 *xref = smmu->BaseAddress; 648 649 return (0); 650 } 651 652 /* 653 * Finds mapping for a named node given name and resource ID and returns the 654 * XREF for MSI interrupt setup and the device ID to use for the interrupt setup. 655 */ 656 int 657 acpi_iort_map_named_msi(const char *devname, u_int rid, u_int *xref, 658 u_int *devid) 659 { 660 struct iort_node *node; 661 662 node = iort_named_comp_map(devname, rid, ACPI_IORT_NODE_ITS_GROUP, 663 devid); 664 if (node == NULL) 665 return (ENOENT); 666 667 /* This should be an ITS node */ 668 KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group")); 669 670 /* Return first node, we don't handle more than that now. */ 671 *xref = node->entries.its[0].xref; 672 return (0); 673 } 674 675 int 676 acpi_iort_map_named_smmuv3(const char *devname, u_int rid, u_int *xref, 677 u_int *devid) 678 { 679 ACPI_IORT_SMMU_V3 *smmu; 680 struct iort_node *node; 681 682 node = iort_named_comp_map(devname, rid, ACPI_IORT_NODE_SMMU_V3, devid); 683 if (node == NULL) 684 return (ENOENT); 685 686 /* This should be an SMMU node. */ 687 KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node")); 688 689 smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3; 690 *xref = smmu->BaseAddress; 691 692 return (0); 693 } 694