1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * Author: Jayachandran C Nair <jchandra@freebsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include "opt_acpi.h" 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 40 #include <machine/intr.h> 41 42 #include <contrib/dev/acpica/include/acpi.h> 43 #include <contrib/dev/acpica/include/accommon.h> 44 #include <contrib/dev/acpica/include/actables.h> 45 46 #include <dev/acpica/acpivar.h> 47 48 /* 49 * Track next XREF available for ITS groups. 50 */ 51 static u_int acpi_its_xref = ACPI_MSI_XREF; 52 53 /* 54 * Some types of IORT nodes have a set of mappings. Each of them map 55 * a range of device IDs [base..end] from the current node to another 56 * node. The corresponding device IDs on destination node starts at 57 * outbase. 58 */ 59 struct iort_map_entry { 60 u_int base; 61 u_int end; 62 u_int outbase; 63 u_int flags; 64 u_int out_node_offset; 65 struct iort_node *out_node; 66 }; 67 68 /* 69 * The ITS group node does not have any outgoing mappings. It has a 70 * of a list of GIC ITS blocks which can handle the device ID. We 71 * will store the PIC XREF used by the block and the blocks proximity 72 * data here, so that it can be retrieved together. 73 */ 74 struct iort_its_entry { 75 u_int its_id; 76 u_int xref; 77 int pxm; 78 }; 79 80 /* 81 * IORT node. Each node has some device specific data depending on the 82 * type of the node. The node can also have a set of mappings, OR in 83 * case of ITS group nodes a set of ITS entries. 84 * The nodes are kept in a TAILQ by type. 85 */ 86 struct iort_node { 87 TAILQ_ENTRY(iort_node) next; /* next entry with same type */ 88 enum AcpiIortNodeType type; /* ACPI type */ 89 u_int node_offset; /* offset in IORT - node ID */ 90 u_int nentries; /* items in array below */ 91 u_int usecount; /* for bookkeeping */ 92 union { 93 ACPI_IORT_ROOT_COMPLEX pci_rc; /* PCI root complex */ 94 ACPI_IORT_SMMU smmu; 95 ACPI_IORT_SMMU_V3 smmu_v3; 96 } data; 97 union { 98 struct iort_map_entry *mappings; /* node mappings */ 99 struct iort_its_entry *its; /* ITS IDs array */ 100 } entries; 101 }; 102 103 /* Lists for each of the types. */ 104 static TAILQ_HEAD(, iort_node) pci_nodes = TAILQ_HEAD_INITIALIZER(pci_nodes); 105 static TAILQ_HEAD(, iort_node) smmu_nodes = TAILQ_HEAD_INITIALIZER(smmu_nodes); 106 static TAILQ_HEAD(, iort_node) its_groups = TAILQ_HEAD_INITIALIZER(its_groups); 107 108 /* 109 * Lookup an ID in the mappings array. If successful, map the input ID 110 * to the output ID and return the output node found. 111 */ 112 static struct iort_node * 113 iort_entry_lookup(struct iort_node *node, u_int id, u_int *outid) 114 { 115 struct iort_map_entry *entry; 116 int i; 117 118 entry = node->entries.mappings; 119 for (i = 0; i < node->nentries; i++, entry++) { 120 if (entry->base <= id && id <= entry->end) 121 break; 122 } 123 if (i == node->nentries) 124 return (NULL); 125 if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0) 126 *outid = entry->outbase + (id - entry->base); 127 else 128 *outid = entry->outbase; 129 return (entry->out_node); 130 } 131 132 /* 133 * Map a PCI RID to a SMMU node or an ITS node, based on outtype. 134 */ 135 static struct iort_node * 136 iort_pci_rc_map(u_int seg, u_int rid, u_int outtype, u_int *outid) 137 { 138 struct iort_node *node, *out_node; 139 u_int nxtid; 140 141 out_node = NULL; 142 TAILQ_FOREACH(node, &pci_nodes, next) { 143 if (node->data.pci_rc.PciSegmentNumber != seg) 144 continue; 145 out_node = iort_entry_lookup(node, rid, &nxtid); 146 if (out_node != NULL) 147 break; 148 } 149 150 /* Could not find a PCI RC node with segment and device ID. */ 151 if (out_node == NULL) 152 return (NULL); 153 154 /* Node can be SMMU or ITS. If SMMU, we need another lookup. */ 155 if (outtype == ACPI_IORT_NODE_ITS_GROUP && 156 (out_node->type == ACPI_IORT_NODE_SMMU_V3 || 157 out_node->type == ACPI_IORT_NODE_SMMU)) { 158 out_node = iort_entry_lookup(out_node, nxtid, &nxtid); 159 if (out_node == NULL) 160 return (NULL); 161 } 162 163 KASSERT(out_node->type == outtype, ("mapping fail")); 164 *outid = nxtid; 165 return (out_node); 166 } 167 168 #ifdef notyet 169 /* 170 * Not implemented, map a PCIe device to the SMMU it is associated with. 171 */ 172 int 173 acpi_iort_map_smmu(u_int seg, u_int devid, void **smmu, u_int *sid) 174 { 175 /* XXX: convert oref to SMMU device */ 176 return (ENXIO); 177 } 178 #endif 179 180 /* 181 * Allocate memory for a node, initialize and copy mappings. 'start' 182 * argument provides the table start used to calculate the node offset. 183 */ 184 static void 185 iort_copy_data(struct iort_node *node, ACPI_IORT_NODE *node_entry) 186 { 187 ACPI_IORT_ID_MAPPING *map_entry; 188 struct iort_map_entry *mapping; 189 int i; 190 191 map_entry = ACPI_ADD_PTR(ACPI_IORT_ID_MAPPING, node_entry, 192 node_entry->MappingOffset); 193 node->nentries = node_entry->MappingCount; 194 node->usecount = 0; 195 mapping = malloc(sizeof(*mapping) * node->nentries, M_DEVBUF, 196 M_WAITOK | M_ZERO); 197 node->entries.mappings = mapping; 198 for (i = 0; i < node->nentries; i++, mapping++, map_entry++) { 199 mapping->base = map_entry->InputBase; 200 mapping->end = map_entry->InputBase + map_entry->IdCount - 1; 201 mapping->outbase = map_entry->OutputBase; 202 mapping->out_node_offset = map_entry->OutputReference; 203 mapping->flags = map_entry->Flags; 204 mapping->out_node = NULL; 205 } 206 } 207 208 /* 209 * Allocate and copy an ITS group. 210 */ 211 static void 212 iort_copy_its(struct iort_node *node, ACPI_IORT_NODE *node_entry) 213 { 214 struct iort_its_entry *its; 215 ACPI_IORT_ITS_GROUP *itsg_entry; 216 UINT32 *id; 217 int i; 218 219 itsg_entry = (ACPI_IORT_ITS_GROUP *)node_entry->NodeData; 220 node->nentries = itsg_entry->ItsCount; 221 node->usecount = 0; 222 its = malloc(sizeof(*its) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO); 223 node->entries.its = its; 224 id = &itsg_entry->Identifiers[0]; 225 for (i = 0; i < node->nentries; i++, its++, id++) { 226 its->its_id = *id; 227 its->pxm = -1; 228 its->xref = 0; 229 } 230 } 231 232 /* 233 * Walk the IORT table and add nodes to corresponding list. 234 */ 235 static void 236 iort_add_nodes(ACPI_IORT_NODE *node_entry, u_int node_offset) 237 { 238 ACPI_IORT_ROOT_COMPLEX *pci_rc; 239 ACPI_IORT_SMMU *smmu; 240 ACPI_IORT_SMMU_V3 *smmu_v3; 241 struct iort_node *node; 242 243 node = malloc(sizeof(*node), M_DEVBUF, M_WAITOK | M_ZERO); 244 node->type = node_entry->Type; 245 node->node_offset = node_offset; 246 247 /* copy nodes depending on type */ 248 switch(node_entry->Type) { 249 case ACPI_IORT_NODE_PCI_ROOT_COMPLEX: 250 pci_rc = (ACPI_IORT_ROOT_COMPLEX *)node_entry->NodeData; 251 memcpy(&node->data.pci_rc, pci_rc, sizeof(*pci_rc)); 252 iort_copy_data(node, node_entry); 253 TAILQ_INSERT_TAIL(&pci_nodes, node, next); 254 break; 255 case ACPI_IORT_NODE_SMMU: 256 smmu = (ACPI_IORT_SMMU *)node_entry->NodeData; 257 memcpy(&node->data.smmu, smmu, sizeof(*smmu)); 258 iort_copy_data(node, node_entry); 259 TAILQ_INSERT_TAIL(&smmu_nodes, node, next); 260 break; 261 case ACPI_IORT_NODE_SMMU_V3: 262 smmu_v3 = (ACPI_IORT_SMMU_V3 *)node_entry->NodeData; 263 memcpy(&node->data.smmu_v3, smmu_v3, sizeof(*smmu_v3)); 264 iort_copy_data(node, node_entry); 265 TAILQ_INSERT_TAIL(&smmu_nodes, node, next); 266 break; 267 case ACPI_IORT_NODE_ITS_GROUP: 268 iort_copy_its(node, node_entry); 269 TAILQ_INSERT_TAIL(&its_groups, node, next); 270 break; 271 default: 272 printf("ACPI: IORT: Dropping unhandled type %u\n", 273 node_entry->Type); 274 free(node, M_DEVBUF); 275 break; 276 } 277 } 278 279 /* 280 * For the mapping entry given, walk thru all the possible destination 281 * nodes and resolve the output reference. 282 */ 283 static void 284 iort_resolve_node(struct iort_map_entry *entry, int check_smmu) 285 { 286 struct iort_node *node, *np; 287 288 node = NULL; 289 if (check_smmu) { 290 TAILQ_FOREACH(np, &smmu_nodes, next) { 291 if (entry->out_node_offset == np->node_offset) { 292 node = np; 293 break; 294 } 295 } 296 } 297 if (node == NULL) { 298 TAILQ_FOREACH(np, &its_groups, next) { 299 if (entry->out_node_offset == np->node_offset) { 300 node = np; 301 break; 302 } 303 } 304 } 305 if (node != NULL) { 306 node->usecount++; 307 entry->out_node = node; 308 } else { 309 printf("ACPI: IORT: Firmware Bug: no mapping for node %u\n", 310 entry->out_node_offset); 311 } 312 } 313 314 /* 315 * Resolve all output node references to node pointers. 316 */ 317 static void 318 iort_post_process_mappings(void) 319 { 320 struct iort_node *node; 321 int i; 322 323 TAILQ_FOREACH(node, &pci_nodes, next) 324 for (i = 0; i < node->nentries; i++) 325 iort_resolve_node(&node->entries.mappings[i], TRUE); 326 TAILQ_FOREACH(node, &smmu_nodes, next) 327 for (i = 0; i < node->nentries; i++) 328 iort_resolve_node(&node->entries.mappings[i], FALSE); 329 /* TODO: named nodes */ 330 } 331 332 /* 333 * Walk MADT table, assign PIC xrefs to all ITS entries. 334 */ 335 static void 336 madt_resolve_its_xref(ACPI_SUBTABLE_HEADER *entry, void *arg) 337 { 338 ACPI_MADT_GENERIC_TRANSLATOR *gict; 339 struct iort_node *its_node; 340 struct iort_its_entry *its_entry; 341 u_int xref; 342 int i, matches; 343 344 if (entry->Type != ACPI_MADT_TYPE_GENERIC_TRANSLATOR) 345 return; 346 347 gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry; 348 matches = 0; 349 xref = acpi_its_xref++; 350 TAILQ_FOREACH(its_node, &its_groups, next) { 351 its_entry = its_node->entries.its; 352 for (i = 0; i < its_node->nentries; i++, its_entry++) { 353 if (its_entry->its_id == gict->TranslationId) { 354 its_entry->xref = xref; 355 matches++; 356 } 357 } 358 } 359 if (matches == 0) 360 printf("ACPI: IORT: Unused ITS block, ID %u\n", 361 gict->TranslationId); 362 } 363 364 /* 365 * Walk SRAT, assign proximity to all ITS entries. 366 */ 367 static void 368 srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg) 369 { 370 ACPI_SRAT_GIC_ITS_AFFINITY *gicits; 371 struct iort_node *its_node; 372 struct iort_its_entry *its_entry; 373 int i, matches; 374 375 if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY) 376 return; 377 378 matches = 0; 379 gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry; 380 TAILQ_FOREACH(its_node, &its_groups, next) { 381 its_entry = its_node->entries.its; 382 for (i = 0; i < its_node->nentries; i++, its_entry++) { 383 if (its_entry->its_id == gicits->ItsId) { 384 its_entry->pxm = acpi_map_pxm_to_vm_domainid( 385 gicits->ProximityDomain); 386 matches++; 387 } 388 } 389 } 390 if (matches == 0) 391 printf("ACPI: IORT: ITS block %u in SRAT not found in IORT!\n", 392 gicits->ItsId); 393 } 394 395 /* 396 * Cross check the ITS Id with MADT and (if available) SRAT. 397 */ 398 static int 399 iort_post_process_its(void) 400 { 401 ACPI_TABLE_MADT *madt; 402 ACPI_TABLE_SRAT *srat; 403 vm_paddr_t madt_pa, srat_pa; 404 405 /* Check ITS block in MADT */ 406 madt_pa = acpi_find_table(ACPI_SIG_MADT); 407 KASSERT(madt_pa != 0, ("no MADT!")); 408 madt = acpi_map_table(madt_pa, ACPI_SIG_MADT); 409 KASSERT(madt != NULL, ("can't map MADT!")); 410 acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, 411 madt_resolve_its_xref, NULL); 412 acpi_unmap_table(madt); 413 414 /* Get proximtiy if available */ 415 srat_pa = acpi_find_table(ACPI_SIG_SRAT); 416 if (srat_pa != 0) { 417 srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT); 418 KASSERT(srat != NULL, ("can't map SRAT!")); 419 acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length, 420 srat_resolve_its_pxm, NULL); 421 acpi_unmap_table(srat); 422 } 423 return (0); 424 } 425 426 /* 427 * Find, parse, and save IO Remapping Table ("IORT"). 428 */ 429 static int 430 acpi_parse_iort(void *dummy __unused) 431 { 432 ACPI_TABLE_IORT *iort; 433 ACPI_IORT_NODE *node_entry; 434 vm_paddr_t iort_pa; 435 u_int node_offset; 436 437 iort_pa = acpi_find_table(ACPI_SIG_IORT); 438 if (iort_pa == 0) 439 return (ENXIO); 440 441 iort = acpi_map_table(iort_pa, ACPI_SIG_IORT); 442 if (iort == NULL) { 443 printf("ACPI: Unable to map the IORT table!\n"); 444 return (ENXIO); 445 } 446 for (node_offset = iort->NodeOffset; 447 node_offset < iort->Header.Length; 448 node_offset += node_entry->Length) { 449 node_entry = ACPI_ADD_PTR(ACPI_IORT_NODE, iort, node_offset); 450 iort_add_nodes(node_entry, node_offset); 451 } 452 acpi_unmap_table(iort); 453 iort_post_process_mappings(); 454 iort_post_process_its(); 455 return (0); 456 } 457 SYSINIT(acpi_parse_iort, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_parse_iort, NULL); 458 459 /* 460 * Provide ITS ID to PIC xref mapping. 461 */ 462 int 463 acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm) 464 { 465 struct iort_node *its_node; 466 struct iort_its_entry *its_entry; 467 int i; 468 469 TAILQ_FOREACH(its_node, &its_groups, next) { 470 its_entry = its_node->entries.its; 471 for (i = 0; i < its_node->nentries; i++, its_entry++) { 472 if (its_entry->its_id == its_id) { 473 *xref = its_entry->xref; 474 *pxm = its_entry->pxm; 475 return (0); 476 } 477 } 478 } 479 return (ENOENT); 480 } 481 482 /* 483 * Find mapping for a PCIe device given segment and device ID 484 * returns the XREF for MSI interrupt setup and the device ID to 485 * use for the interrupt setup 486 */ 487 int 488 acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid) 489 { 490 struct iort_node *node; 491 492 node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_ITS_GROUP, devid); 493 if (node == NULL) 494 return (ENOENT); 495 496 /* This should be an ITS node */ 497 KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group")); 498 499 /* return first node, we don't handle more than that now. */ 500 *xref = node->entries.its[0].xref; 501 return (0); 502 } 503