1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016, Semihalf 4 * Author: Tomasz Nowicki <tn@semihalf.com> 5 * 6 * This file implements early detection/parsing of I/O mapping 7 * reported to OS through firmware via I/O Remapping Table (IORT) 8 * IORT document number: ARM DEN 0049A 9 */ 10 11 #define pr_fmt(fmt) "ACPI: IORT: " fmt 12 13 #include <linux/acpi_iort.h> 14 #include <linux/iommu.h> 15 #include <linux/kernel.h> 16 #include <linux/list.h> 17 #include <linux/pci.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 21 #define IORT_TYPE_MASK(type) (1 << (type)) 22 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 23 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 24 (1 << ACPI_IORT_NODE_SMMU_V3)) 25 26 struct iort_its_msi_chip { 27 struct list_head list; 28 struct fwnode_handle *fw_node; 29 phys_addr_t base_addr; 30 u32 translation_id; 31 }; 32 33 struct iort_fwnode { 34 struct list_head list; 35 struct acpi_iort_node *iort_node; 36 struct fwnode_handle *fwnode; 37 }; 38 static LIST_HEAD(iort_fwnode_list); 39 static DEFINE_SPINLOCK(iort_fwnode_lock); 40 41 /** 42 * iort_set_fwnode() - Create iort_fwnode and use it to register 43 * iommu data in the iort_fwnode_list 44 * 45 * @node: IORT table node associated with the IOMMU 46 * @fwnode: fwnode associated with the IORT node 47 * 48 * Returns: 0 on success 49 * <0 on failure 50 */ 51 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 52 struct fwnode_handle *fwnode) 53 { 54 struct iort_fwnode *np; 55 56 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 57 58 if (WARN_ON(!np)) 59 return -ENOMEM; 60 61 INIT_LIST_HEAD(&np->list); 62 np->iort_node = iort_node; 63 np->fwnode = fwnode; 64 65 spin_lock(&iort_fwnode_lock); 66 list_add_tail(&np->list, &iort_fwnode_list); 67 spin_unlock(&iort_fwnode_lock); 68 69 return 0; 70 } 71 72 /** 73 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 74 * 75 * @node: IORT table node to be looked-up 76 * 77 * Returns: fwnode_handle pointer on success, NULL on failure 78 */ 79 static inline struct fwnode_handle *iort_get_fwnode( 80 struct acpi_iort_node *node) 81 { 82 struct iort_fwnode *curr; 83 struct fwnode_handle *fwnode = NULL; 84 85 spin_lock(&iort_fwnode_lock); 86 list_for_each_entry(curr, &iort_fwnode_list, list) { 87 if (curr->iort_node == node) { 88 fwnode = curr->fwnode; 89 break; 90 } 91 } 92 spin_unlock(&iort_fwnode_lock); 93 94 return fwnode; 95 } 96 97 /** 98 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 99 * 100 * @node: IORT table node associated with fwnode to delete 101 */ 102 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 103 { 104 struct iort_fwnode *curr, *tmp; 105 106 spin_lock(&iort_fwnode_lock); 107 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 108 if (curr->iort_node == node) { 109 list_del(&curr->list); 110 kfree(curr); 111 break; 112 } 113 } 114 spin_unlock(&iort_fwnode_lock); 115 } 116 117 /** 118 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode 119 * 120 * @fwnode: fwnode associated with device to be looked-up 121 * 122 * Returns: iort_node pointer on success, NULL on failure 123 */ 124 static inline struct acpi_iort_node *iort_get_iort_node( 125 struct fwnode_handle *fwnode) 126 { 127 struct iort_fwnode *curr; 128 struct acpi_iort_node *iort_node = NULL; 129 130 spin_lock(&iort_fwnode_lock); 131 list_for_each_entry(curr, &iort_fwnode_list, list) { 132 if (curr->fwnode == fwnode) { 133 iort_node = curr->iort_node; 134 break; 135 } 136 } 137 spin_unlock(&iort_fwnode_lock); 138 139 return iort_node; 140 } 141 142 typedef acpi_status (*iort_find_node_callback) 143 (struct acpi_iort_node *node, void *context); 144 145 /* Root pointer to the mapped IORT table */ 146 static struct acpi_table_header *iort_table; 147 148 static LIST_HEAD(iort_msi_chip_list); 149 static DEFINE_SPINLOCK(iort_msi_chip_lock); 150 151 /** 152 * iort_register_domain_token() - register domain token along with related 153 * ITS ID and base address to the list from where we can get it back later on. 154 * @trans_id: ITS ID. 155 * @base: ITS base address. 156 * @fw_node: Domain token. 157 * 158 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 159 */ 160 int iort_register_domain_token(int trans_id, phys_addr_t base, 161 struct fwnode_handle *fw_node) 162 { 163 struct iort_its_msi_chip *its_msi_chip; 164 165 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 166 if (!its_msi_chip) 167 return -ENOMEM; 168 169 its_msi_chip->fw_node = fw_node; 170 its_msi_chip->translation_id = trans_id; 171 its_msi_chip->base_addr = base; 172 173 spin_lock(&iort_msi_chip_lock); 174 list_add(&its_msi_chip->list, &iort_msi_chip_list); 175 spin_unlock(&iort_msi_chip_lock); 176 177 return 0; 178 } 179 180 /** 181 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 182 * @trans_id: ITS ID. 183 * 184 * Returns: none. 185 */ 186 void iort_deregister_domain_token(int trans_id) 187 { 188 struct iort_its_msi_chip *its_msi_chip, *t; 189 190 spin_lock(&iort_msi_chip_lock); 191 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 192 if (its_msi_chip->translation_id == trans_id) { 193 list_del(&its_msi_chip->list); 194 kfree(its_msi_chip); 195 break; 196 } 197 } 198 spin_unlock(&iort_msi_chip_lock); 199 } 200 201 /** 202 * iort_find_domain_token() - Find domain token based on given ITS ID 203 * @trans_id: ITS ID. 204 * 205 * Returns: domain token when find on the list, NULL otherwise 206 */ 207 struct fwnode_handle *iort_find_domain_token(int trans_id) 208 { 209 struct fwnode_handle *fw_node = NULL; 210 struct iort_its_msi_chip *its_msi_chip; 211 212 spin_lock(&iort_msi_chip_lock); 213 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 214 if (its_msi_chip->translation_id == trans_id) { 215 fw_node = its_msi_chip->fw_node; 216 break; 217 } 218 } 219 spin_unlock(&iort_msi_chip_lock); 220 221 return fw_node; 222 } 223 224 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 225 iort_find_node_callback callback, 226 void *context) 227 { 228 struct acpi_iort_node *iort_node, *iort_end; 229 struct acpi_table_iort *iort; 230 int i; 231 232 if (!iort_table) 233 return NULL; 234 235 /* Get the first IORT node */ 236 iort = (struct acpi_table_iort *)iort_table; 237 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 238 iort->node_offset); 239 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 240 iort_table->length); 241 242 for (i = 0; i < iort->node_count; i++) { 243 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 244 "IORT node pointer overflows, bad table!\n")) 245 return NULL; 246 247 if (iort_node->type == type && 248 ACPI_SUCCESS(callback(iort_node, context))) 249 return iort_node; 250 251 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 252 iort_node->length); 253 } 254 255 return NULL; 256 } 257 258 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 259 void *context) 260 { 261 struct device *dev = context; 262 acpi_status status = AE_NOT_FOUND; 263 264 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 265 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 266 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 267 struct acpi_iort_named_component *ncomp; 268 269 if (!adev) 270 goto out; 271 272 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 273 if (ACPI_FAILURE(status)) { 274 dev_warn(dev, "Can't get device full path name\n"); 275 goto out; 276 } 277 278 ncomp = (struct acpi_iort_named_component *)node->node_data; 279 status = !strcmp(ncomp->device_name, buf.pointer) ? 280 AE_OK : AE_NOT_FOUND; 281 acpi_os_free(buf.pointer); 282 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 283 struct acpi_iort_root_complex *pci_rc; 284 struct pci_bus *bus; 285 286 bus = to_pci_bus(dev); 287 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 288 289 /* 290 * It is assumed that PCI segment numbers maps one-to-one 291 * with root complexes. Each segment number can represent only 292 * one root complex. 293 */ 294 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 295 AE_OK : AE_NOT_FOUND; 296 } 297 out: 298 return status; 299 } 300 301 struct iort_workaround_oem_info { 302 char oem_id[ACPI_OEM_ID_SIZE + 1]; 303 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 304 u32 oem_revision; 305 }; 306 307 static bool apply_id_count_workaround; 308 309 static struct iort_workaround_oem_info wa_info[] __initdata = { 310 { 311 .oem_id = "HISI ", 312 .oem_table_id = "HIP07 ", 313 .oem_revision = 0, 314 }, { 315 .oem_id = "HISI ", 316 .oem_table_id = "HIP08 ", 317 .oem_revision = 0, 318 } 319 }; 320 321 static void __init 322 iort_check_id_count_workaround(struct acpi_table_header *tbl) 323 { 324 int i; 325 326 for (i = 0; i < ARRAY_SIZE(wa_info); i++) { 327 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && 328 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 329 wa_info[i].oem_revision == tbl->oem_revision) { 330 apply_id_count_workaround = true; 331 pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n"); 332 break; 333 } 334 } 335 } 336 337 static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map) 338 { 339 u32 map_max = map->input_base + map->id_count; 340 341 /* 342 * The IORT specification revision D (Section 3, table 4, page 9) says 343 * Number of IDs = The number of IDs in the range minus one, but the 344 * IORT code ignored the "minus one", and some firmware did that too, 345 * so apply a workaround here to keep compatible with both the spec 346 * compliant and non-spec compliant firmwares. 347 */ 348 if (apply_id_count_workaround) 349 map_max--; 350 351 return map_max; 352 } 353 354 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 355 u32 *rid_out) 356 { 357 /* Single mapping does not care for input id */ 358 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 359 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 360 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 361 *rid_out = map->output_base; 362 return 0; 363 } 364 365 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 366 map, type); 367 return -ENXIO; 368 } 369 370 if (rid_in < map->input_base || rid_in > iort_get_map_max(map)) 371 return -ENXIO; 372 373 *rid_out = map->output_base + (rid_in - map->input_base); 374 return 0; 375 } 376 377 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 378 u32 *id_out, int index) 379 { 380 struct acpi_iort_node *parent; 381 struct acpi_iort_id_mapping *map; 382 383 if (!node->mapping_offset || !node->mapping_count || 384 index >= node->mapping_count) 385 return NULL; 386 387 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 388 node->mapping_offset + index * sizeof(*map)); 389 390 /* Firmware bug! */ 391 if (!map->output_reference) { 392 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 393 node, node->type); 394 return NULL; 395 } 396 397 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 398 map->output_reference); 399 400 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 401 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 402 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || 403 node->type == ACPI_IORT_NODE_SMMU_V3 || 404 node->type == ACPI_IORT_NODE_PMCG) { 405 *id_out = map->output_base; 406 return parent; 407 } 408 } 409 410 return NULL; 411 } 412 413 static int iort_get_id_mapping_index(struct acpi_iort_node *node) 414 { 415 struct acpi_iort_smmu_v3 *smmu; 416 417 switch (node->type) { 418 case ACPI_IORT_NODE_SMMU_V3: 419 /* 420 * SMMUv3 dev ID mapping index was introduced in revision 1 421 * table, not available in revision 0 422 */ 423 if (node->revision < 1) 424 return -EINVAL; 425 426 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 427 /* 428 * ID mapping index is only ignored if all interrupts are 429 * GSIV based 430 */ 431 if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv 432 && smmu->sync_gsiv) 433 return -EINVAL; 434 435 if (smmu->id_mapping_index >= node->mapping_count) { 436 pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n", 437 node, node->type); 438 return -EINVAL; 439 } 440 441 return smmu->id_mapping_index; 442 case ACPI_IORT_NODE_PMCG: 443 return 0; 444 default: 445 return -EINVAL; 446 } 447 } 448 449 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 450 u32 id_in, u32 *id_out, 451 u8 type_mask) 452 { 453 u32 id = id_in; 454 455 /* Parse the ID mapping tree to find specified node type */ 456 while (node) { 457 struct acpi_iort_id_mapping *map; 458 int i, index; 459 460 if (IORT_TYPE_MASK(node->type) & type_mask) { 461 if (id_out) 462 *id_out = id; 463 return node; 464 } 465 466 if (!node->mapping_offset || !node->mapping_count) 467 goto fail_map; 468 469 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 470 node->mapping_offset); 471 472 /* Firmware bug! */ 473 if (!map->output_reference) { 474 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 475 node, node->type); 476 goto fail_map; 477 } 478 479 /* 480 * Get the special ID mapping index (if any) and skip its 481 * associated ID map to prevent erroneous multi-stage 482 * IORT ID translations. 483 */ 484 index = iort_get_id_mapping_index(node); 485 486 /* Do the ID translation */ 487 for (i = 0; i < node->mapping_count; i++, map++) { 488 /* if it is special mapping index, skip it */ 489 if (i == index) 490 continue; 491 492 if (!iort_id_map(map, node->type, id, &id)) 493 break; 494 } 495 496 if (i == node->mapping_count) 497 goto fail_map; 498 499 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 500 map->output_reference); 501 } 502 503 fail_map: 504 /* Map input ID to output ID unchanged on mapping failure */ 505 if (id_out) 506 *id_out = id_in; 507 508 return NULL; 509 } 510 511 static struct acpi_iort_node *iort_node_map_platform_id( 512 struct acpi_iort_node *node, u32 *id_out, u8 type_mask, 513 int index) 514 { 515 struct acpi_iort_node *parent; 516 u32 id; 517 518 /* step 1: retrieve the initial dev id */ 519 parent = iort_node_get_id(node, &id, index); 520 if (!parent) 521 return NULL; 522 523 /* 524 * optional step 2: map the initial dev id if its parent is not 525 * the target type we want, map it again for the use cases such 526 * as NC (named component) -> SMMU -> ITS. If the type is matched, 527 * return the initial dev id and its parent pointer directly. 528 */ 529 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 530 parent = iort_node_map_id(parent, id, id_out, type_mask); 531 else 532 if (id_out) 533 *id_out = id; 534 535 return parent; 536 } 537 538 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 539 { 540 struct pci_bus *pbus; 541 542 if (!dev_is_pci(dev)) { 543 struct acpi_iort_node *node; 544 /* 545 * scan iort_fwnode_list to see if it's an iort platform 546 * device (such as SMMU, PMCG),its iort node already cached 547 * and associated with fwnode when iort platform devices 548 * were initialized. 549 */ 550 node = iort_get_iort_node(dev->fwnode); 551 if (node) 552 return node; 553 554 /* 555 * if not, then it should be a platform device defined in 556 * DSDT/SSDT (with Named Component node in IORT) 557 */ 558 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 559 iort_match_node_callback, dev); 560 } 561 562 /* Find a PCI root bus */ 563 pbus = to_pci_dev(dev)->bus; 564 while (!pci_is_root_bus(pbus)) 565 pbus = pbus->parent; 566 567 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 568 iort_match_node_callback, &pbus->dev); 569 } 570 571 /** 572 * iort_msi_map_rid() - Map a MSI requester ID for a device 573 * @dev: The device for which the mapping is to be done. 574 * @req_id: The device requester ID. 575 * 576 * Returns: mapped MSI RID on success, input requester ID otherwise 577 */ 578 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 579 { 580 struct acpi_iort_node *node; 581 u32 dev_id; 582 583 node = iort_find_dev_node(dev); 584 if (!node) 585 return req_id; 586 587 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); 588 return dev_id; 589 } 590 591 /** 592 * iort_pmsi_get_dev_id() - Get the device id for a device 593 * @dev: The device for which the mapping is to be done. 594 * @dev_id: The device ID found. 595 * 596 * Returns: 0 for successful find a dev id, -ENODEV on error 597 */ 598 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 599 { 600 int i, index; 601 struct acpi_iort_node *node; 602 603 node = iort_find_dev_node(dev); 604 if (!node) 605 return -ENODEV; 606 607 index = iort_get_id_mapping_index(node); 608 /* if there is a valid index, go get the dev_id directly */ 609 if (index >= 0) { 610 if (iort_node_get_id(node, dev_id, index)) 611 return 0; 612 } else { 613 for (i = 0; i < node->mapping_count; i++) { 614 if (iort_node_map_platform_id(node, dev_id, 615 IORT_MSI_TYPE, i)) 616 return 0; 617 } 618 } 619 620 return -ENODEV; 621 } 622 623 static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base) 624 { 625 struct iort_its_msi_chip *its_msi_chip; 626 int ret = -ENODEV; 627 628 spin_lock(&iort_msi_chip_lock); 629 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 630 if (its_msi_chip->translation_id == its_id) { 631 *base = its_msi_chip->base_addr; 632 ret = 0; 633 break; 634 } 635 } 636 spin_unlock(&iort_msi_chip_lock); 637 638 return ret; 639 } 640 641 /** 642 * iort_dev_find_its_id() - Find the ITS identifier for a device 643 * @dev: The device. 644 * @req_id: Device's requester ID 645 * @idx: Index of the ITS identifier list. 646 * @its_id: ITS identifier. 647 * 648 * Returns: 0 on success, appropriate error value otherwise 649 */ 650 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 651 unsigned int idx, int *its_id) 652 { 653 struct acpi_iort_its_group *its; 654 struct acpi_iort_node *node; 655 656 node = iort_find_dev_node(dev); 657 if (!node) 658 return -ENXIO; 659 660 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); 661 if (!node) 662 return -ENXIO; 663 664 /* Move to ITS specific data */ 665 its = (struct acpi_iort_its_group *)node->node_data; 666 if (idx >= its->its_count) { 667 dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n", 668 idx, its->its_count); 669 return -ENXIO; 670 } 671 672 *its_id = its->identifiers[idx]; 673 return 0; 674 } 675 676 /** 677 * iort_get_device_domain() - Find MSI domain related to a device 678 * @dev: The device. 679 * @req_id: Requester ID for the device. 680 * 681 * Returns: the MSI domain for this device, NULL otherwise 682 */ 683 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 684 { 685 struct fwnode_handle *handle; 686 int its_id; 687 688 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 689 return NULL; 690 691 handle = iort_find_domain_token(its_id); 692 if (!handle) 693 return NULL; 694 695 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 696 } 697 698 static void iort_set_device_domain(struct device *dev, 699 struct acpi_iort_node *node) 700 { 701 struct acpi_iort_its_group *its; 702 struct acpi_iort_node *msi_parent; 703 struct acpi_iort_id_mapping *map; 704 struct fwnode_handle *iort_fwnode; 705 struct irq_domain *domain; 706 int index; 707 708 index = iort_get_id_mapping_index(node); 709 if (index < 0) 710 return; 711 712 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 713 node->mapping_offset + index * sizeof(*map)); 714 715 /* Firmware bug! */ 716 if (!map->output_reference || 717 !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) { 718 pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n", 719 node, node->type); 720 return; 721 } 722 723 msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 724 map->output_reference); 725 726 if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP) 727 return; 728 729 /* Move to ITS specific data */ 730 its = (struct acpi_iort_its_group *)msi_parent->node_data; 731 732 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 733 if (!iort_fwnode) 734 return; 735 736 domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 737 if (domain) 738 dev_set_msi_domain(dev, domain); 739 } 740 741 /** 742 * iort_get_platform_device_domain() - Find MSI domain related to a 743 * platform device 744 * @dev: the dev pointer associated with the platform device 745 * 746 * Returns: the MSI domain for this device, NULL otherwise 747 */ 748 static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 749 { 750 struct acpi_iort_node *node, *msi_parent = NULL; 751 struct fwnode_handle *iort_fwnode; 752 struct acpi_iort_its_group *its; 753 int i; 754 755 /* find its associated iort node */ 756 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 757 iort_match_node_callback, dev); 758 if (!node) 759 return NULL; 760 761 /* then find its msi parent node */ 762 for (i = 0; i < node->mapping_count; i++) { 763 msi_parent = iort_node_map_platform_id(node, NULL, 764 IORT_MSI_TYPE, i); 765 if (msi_parent) 766 break; 767 } 768 769 if (!msi_parent) 770 return NULL; 771 772 /* Move to ITS specific data */ 773 its = (struct acpi_iort_its_group *)msi_parent->node_data; 774 775 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 776 if (!iort_fwnode) 777 return NULL; 778 779 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 780 } 781 782 void acpi_configure_pmsi_domain(struct device *dev) 783 { 784 struct irq_domain *msi_domain; 785 786 msi_domain = iort_get_platform_device_domain(dev); 787 if (msi_domain) 788 dev_set_msi_domain(dev, msi_domain); 789 } 790 791 static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias, 792 void *data) 793 { 794 u32 *rid = data; 795 796 *rid = alias; 797 return 0; 798 } 799 800 #ifdef CONFIG_IOMMU_API 801 static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev) 802 { 803 struct acpi_iort_node *iommu; 804 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 805 806 iommu = iort_get_iort_node(fwspec->iommu_fwnode); 807 808 if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) { 809 struct acpi_iort_smmu_v3 *smmu; 810 811 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; 812 if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) 813 return iommu; 814 } 815 816 return NULL; 817 } 818 819 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) 820 { 821 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 822 823 return (fwspec && fwspec->ops) ? fwspec->ops : NULL; 824 } 825 826 static inline int iort_add_device_replay(const struct iommu_ops *ops, 827 struct device *dev) 828 { 829 int err = 0; 830 831 if (dev->bus && !device_iommu_mapped(dev)) 832 err = iommu_probe_device(dev); 833 834 return err; 835 } 836 837 /** 838 * iort_iommu_msi_get_resv_regions - Reserved region driver helper 839 * @dev: Device from iommu_get_resv_regions() 840 * @head: Reserved region list from iommu_get_resv_regions() 841 * 842 * Returns: Number of msi reserved regions on success (0 if platform 843 * doesn't require the reservation or no associated msi regions), 844 * appropriate error value otherwise. The ITS interrupt translation 845 * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device 846 * are the msi reserved regions. 847 */ 848 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) 849 { 850 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 851 struct acpi_iort_its_group *its; 852 struct acpi_iort_node *iommu_node, *its_node = NULL; 853 int i, resv = 0; 854 855 iommu_node = iort_get_msi_resv_iommu(dev); 856 if (!iommu_node) 857 return 0; 858 859 /* 860 * Current logic to reserve ITS regions relies on HW topologies 861 * where a given PCI or named component maps its IDs to only one 862 * ITS group; if a PCI or named component can map its IDs to 863 * different ITS groups through IORT mappings this function has 864 * to be reworked to ensure we reserve regions for all ITS groups 865 * a given PCI or named component may map IDs to. 866 */ 867 868 for (i = 0; i < fwspec->num_ids; i++) { 869 its_node = iort_node_map_id(iommu_node, 870 fwspec->ids[i], 871 NULL, IORT_MSI_TYPE); 872 if (its_node) 873 break; 874 } 875 876 if (!its_node) 877 return 0; 878 879 /* Move to ITS specific data */ 880 its = (struct acpi_iort_its_group *)its_node->node_data; 881 882 for (i = 0; i < its->its_count; i++) { 883 phys_addr_t base; 884 885 if (!iort_find_its_base(its->identifiers[i], &base)) { 886 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 887 struct iommu_resv_region *region; 888 889 region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K, 890 prot, IOMMU_RESV_MSI); 891 if (region) { 892 list_add_tail(®ion->list, head); 893 resv++; 894 } 895 } 896 } 897 898 return (resv == its->its_count) ? resv : -ENODEV; 899 } 900 901 static inline bool iort_iommu_driver_enabled(u8 type) 902 { 903 switch (type) { 904 case ACPI_IORT_NODE_SMMU_V3: 905 return IS_BUILTIN(CONFIG_ARM_SMMU_V3); 906 case ACPI_IORT_NODE_SMMU: 907 return IS_BUILTIN(CONFIG_ARM_SMMU); 908 default: 909 pr_warn("IORT node type %u does not describe an SMMU\n", type); 910 return false; 911 } 912 } 913 914 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 915 struct fwnode_handle *fwnode, 916 const struct iommu_ops *ops) 917 { 918 int ret = iommu_fwspec_init(dev, fwnode, ops); 919 920 if (!ret) 921 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 922 923 return ret; 924 } 925 926 static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node) 927 { 928 struct acpi_iort_root_complex *pci_rc; 929 930 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 931 return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED; 932 } 933 934 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, 935 u32 streamid) 936 { 937 const struct iommu_ops *ops; 938 struct fwnode_handle *iort_fwnode; 939 940 if (!node) 941 return -ENODEV; 942 943 iort_fwnode = iort_get_fwnode(node); 944 if (!iort_fwnode) 945 return -ENODEV; 946 947 /* 948 * If the ops look-up fails, this means that either 949 * the SMMU drivers have not been probed yet or that 950 * the SMMU drivers are not built in the kernel; 951 * Depending on whether the SMMU drivers are built-in 952 * in the kernel or not, defer the IOMMU configuration 953 * or just abort it. 954 */ 955 ops = iommu_ops_from_fwnode(iort_fwnode); 956 if (!ops) 957 return iort_iommu_driver_enabled(node->type) ? 958 -EPROBE_DEFER : -ENODEV; 959 960 return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 961 } 962 963 struct iort_pci_alias_info { 964 struct device *dev; 965 struct acpi_iort_node *node; 966 }; 967 968 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) 969 { 970 struct iort_pci_alias_info *info = data; 971 struct acpi_iort_node *parent; 972 u32 streamid; 973 974 parent = iort_node_map_id(info->node, alias, &streamid, 975 IORT_IOMMU_TYPE); 976 return iort_iommu_xlate(info->dev, parent, streamid); 977 } 978 979 /** 980 * iort_iommu_configure - Set-up IOMMU configuration for a device. 981 * 982 * @dev: device to configure 983 * 984 * Returns: iommu_ops pointer on configuration success 985 * NULL on configuration failure 986 */ 987 const struct iommu_ops *iort_iommu_configure(struct device *dev) 988 { 989 struct acpi_iort_node *node, *parent; 990 const struct iommu_ops *ops; 991 u32 streamid = 0; 992 int err = -ENODEV; 993 994 /* 995 * If we already translated the fwspec there 996 * is nothing left to do, return the iommu_ops. 997 */ 998 ops = iort_fwspec_iommu_ops(dev); 999 if (ops) 1000 return ops; 1001 1002 if (dev_is_pci(dev)) { 1003 struct pci_bus *bus = to_pci_dev(dev)->bus; 1004 struct iort_pci_alias_info info = { .dev = dev }; 1005 1006 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 1007 iort_match_node_callback, &bus->dev); 1008 if (!node) 1009 return NULL; 1010 1011 info.node = node; 1012 err = pci_for_each_dma_alias(to_pci_dev(dev), 1013 iort_pci_iommu_init, &info); 1014 1015 if (!err && iort_pci_rc_supports_ats(node)) 1016 dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; 1017 } else { 1018 int i = 0; 1019 1020 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 1021 iort_match_node_callback, dev); 1022 if (!node) 1023 return NULL; 1024 1025 do { 1026 parent = iort_node_map_platform_id(node, &streamid, 1027 IORT_IOMMU_TYPE, 1028 i++); 1029 1030 if (parent) 1031 err = iort_iommu_xlate(dev, parent, streamid); 1032 } while (parent && !err); 1033 } 1034 1035 /* 1036 * If we have reason to believe the IOMMU driver missed the initial 1037 * add_device callback for dev, replay it to get things in order. 1038 */ 1039 if (!err) { 1040 ops = iort_fwspec_iommu_ops(dev); 1041 err = iort_add_device_replay(ops, dev); 1042 } 1043 1044 /* Ignore all other errors apart from EPROBE_DEFER */ 1045 if (err == -EPROBE_DEFER) { 1046 ops = ERR_PTR(err); 1047 } else if (err) { 1048 dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); 1049 ops = NULL; 1050 } 1051 1052 return ops; 1053 } 1054 #else 1055 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) 1056 { return NULL; } 1057 static inline int iort_add_device_replay(const struct iommu_ops *ops, 1058 struct device *dev) 1059 { return 0; } 1060 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) 1061 { return 0; } 1062 const struct iommu_ops *iort_iommu_configure(struct device *dev) 1063 { return NULL; } 1064 #endif 1065 1066 static int nc_dma_get_range(struct device *dev, u64 *size) 1067 { 1068 struct acpi_iort_node *node; 1069 struct acpi_iort_named_component *ncomp; 1070 1071 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 1072 iort_match_node_callback, dev); 1073 if (!node) 1074 return -ENODEV; 1075 1076 ncomp = (struct acpi_iort_named_component *)node->node_data; 1077 1078 *size = ncomp->memory_address_limit >= 64 ? U64_MAX : 1079 1ULL<<ncomp->memory_address_limit; 1080 1081 return 0; 1082 } 1083 1084 static int rc_dma_get_range(struct device *dev, u64 *size) 1085 { 1086 struct acpi_iort_node *node; 1087 struct acpi_iort_root_complex *rc; 1088 struct pci_bus *pbus = to_pci_dev(dev)->bus; 1089 1090 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 1091 iort_match_node_callback, &pbus->dev); 1092 if (!node || node->revision < 1) 1093 return -ENODEV; 1094 1095 rc = (struct acpi_iort_root_complex *)node->node_data; 1096 1097 *size = rc->memory_address_limit >= 64 ? U64_MAX : 1098 1ULL<<rc->memory_address_limit; 1099 1100 return 0; 1101 } 1102 1103 /** 1104 * iort_dma_setup() - Set-up device DMA parameters. 1105 * 1106 * @dev: device to configure 1107 * @dma_addr: device DMA address result pointer 1108 * @size: DMA range size result pointer 1109 */ 1110 void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) 1111 { 1112 u64 end, mask, dmaaddr = 0, size = 0, offset = 0; 1113 int ret; 1114 1115 /* 1116 * If @dev is expected to be DMA-capable then the bus code that created 1117 * it should have initialised its dma_mask pointer by this point. For 1118 * now, we'll continue the legacy behaviour of coercing it to the 1119 * coherent mask if not, but we'll no longer do so quietly. 1120 */ 1121 if (!dev->dma_mask) { 1122 dev_warn(dev, "DMA mask not set\n"); 1123 dev->dma_mask = &dev->coherent_dma_mask; 1124 } 1125 1126 if (dev->coherent_dma_mask) 1127 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 1128 else 1129 size = 1ULL << 32; 1130 1131 if (dev_is_pci(dev)) { 1132 ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); 1133 if (ret == -ENODEV) 1134 ret = rc_dma_get_range(dev, &size); 1135 } else { 1136 ret = nc_dma_get_range(dev, &size); 1137 } 1138 1139 if (!ret) { 1140 /* 1141 * Limit coherent and dma mask based on size retrieved from 1142 * firmware. 1143 */ 1144 end = dmaaddr + size - 1; 1145 mask = DMA_BIT_MASK(ilog2(end) + 1); 1146 dev->bus_dma_limit = end; 1147 dev->coherent_dma_mask = mask; 1148 *dev->dma_mask = mask; 1149 } 1150 1151 *dma_addr = dmaaddr; 1152 *dma_size = size; 1153 1154 dev->dma_pfn_offset = PFN_DOWN(offset); 1155 dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); 1156 } 1157 1158 static void __init acpi_iort_register_irq(int hwirq, const char *name, 1159 int trigger, 1160 struct resource *res) 1161 { 1162 int irq = acpi_register_gsi(NULL, hwirq, trigger, 1163 ACPI_ACTIVE_HIGH); 1164 1165 if (irq <= 0) { 1166 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 1167 name); 1168 return; 1169 } 1170 1171 res->start = irq; 1172 res->end = irq; 1173 res->flags = IORESOURCE_IRQ; 1174 res->name = name; 1175 } 1176 1177 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 1178 { 1179 struct acpi_iort_smmu_v3 *smmu; 1180 /* Always present mem resource */ 1181 int num_res = 1; 1182 1183 /* Retrieve SMMUv3 specific data */ 1184 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1185 1186 if (smmu->event_gsiv) 1187 num_res++; 1188 1189 if (smmu->pri_gsiv) 1190 num_res++; 1191 1192 if (smmu->gerr_gsiv) 1193 num_res++; 1194 1195 if (smmu->sync_gsiv) 1196 num_res++; 1197 1198 return num_res; 1199 } 1200 1201 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) 1202 { 1203 /* 1204 * Cavium ThunderX2 implementation doesn't not support unique 1205 * irq line. Use single irq line for all the SMMUv3 interrupts. 1206 */ 1207 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1208 return false; 1209 1210 /* 1211 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking 1212 * SPI numbers here. 1213 */ 1214 return smmu->event_gsiv == smmu->pri_gsiv && 1215 smmu->event_gsiv == smmu->gerr_gsiv && 1216 smmu->event_gsiv == smmu->sync_gsiv; 1217 } 1218 1219 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) 1220 { 1221 /* 1222 * Override the size, for Cavium ThunderX2 implementation 1223 * which doesn't support the page 1 SMMU register space. 1224 */ 1225 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1226 return SZ_64K; 1227 1228 return SZ_128K; 1229 } 1230 1231 static void __init arm_smmu_v3_init_resources(struct resource *res, 1232 struct acpi_iort_node *node) 1233 { 1234 struct acpi_iort_smmu_v3 *smmu; 1235 int num_res = 0; 1236 1237 /* Retrieve SMMUv3 specific data */ 1238 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1239 1240 res[num_res].start = smmu->base_address; 1241 res[num_res].end = smmu->base_address + 1242 arm_smmu_v3_resource_size(smmu) - 1; 1243 res[num_res].flags = IORESOURCE_MEM; 1244 1245 num_res++; 1246 if (arm_smmu_v3_is_combined_irq(smmu)) { 1247 if (smmu->event_gsiv) 1248 acpi_iort_register_irq(smmu->event_gsiv, "combined", 1249 ACPI_EDGE_SENSITIVE, 1250 &res[num_res++]); 1251 } else { 1252 1253 if (smmu->event_gsiv) 1254 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 1255 ACPI_EDGE_SENSITIVE, 1256 &res[num_res++]); 1257 1258 if (smmu->pri_gsiv) 1259 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 1260 ACPI_EDGE_SENSITIVE, 1261 &res[num_res++]); 1262 1263 if (smmu->gerr_gsiv) 1264 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 1265 ACPI_EDGE_SENSITIVE, 1266 &res[num_res++]); 1267 1268 if (smmu->sync_gsiv) 1269 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 1270 ACPI_EDGE_SENSITIVE, 1271 &res[num_res++]); 1272 } 1273 } 1274 1275 static void __init arm_smmu_v3_dma_configure(struct device *dev, 1276 struct acpi_iort_node *node) 1277 { 1278 struct acpi_iort_smmu_v3 *smmu; 1279 enum dev_dma_attr attr; 1280 1281 /* Retrieve SMMUv3 specific data */ 1282 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1283 1284 attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? 1285 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1286 1287 /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */ 1288 dev->dma_mask = &dev->coherent_dma_mask; 1289 1290 /* Configure DMA for the page table walker */ 1291 acpi_dma_configure(dev, attr); 1292 } 1293 1294 #if defined(CONFIG_ACPI_NUMA) 1295 /* 1296 * set numa proximity domain for smmuv3 device 1297 */ 1298 static int __init arm_smmu_v3_set_proximity(struct device *dev, 1299 struct acpi_iort_node *node) 1300 { 1301 struct acpi_iort_smmu_v3 *smmu; 1302 1303 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1304 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { 1305 int dev_node = acpi_map_pxm_to_node(smmu->pxm); 1306 1307 if (dev_node != NUMA_NO_NODE && !node_online(dev_node)) 1308 return -EINVAL; 1309 1310 set_dev_node(dev, dev_node); 1311 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", 1312 smmu->base_address, 1313 smmu->pxm); 1314 } 1315 return 0; 1316 } 1317 #else 1318 #define arm_smmu_v3_set_proximity NULL 1319 #endif 1320 1321 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 1322 { 1323 struct acpi_iort_smmu *smmu; 1324 1325 /* Retrieve SMMU specific data */ 1326 smmu = (struct acpi_iort_smmu *)node->node_data; 1327 1328 /* 1329 * Only consider the global fault interrupt and ignore the 1330 * configuration access interrupt. 1331 * 1332 * MMIO address and global fault interrupt resources are always 1333 * present so add them to the context interrupt count as a static 1334 * value. 1335 */ 1336 return smmu->context_interrupt_count + 2; 1337 } 1338 1339 static void __init arm_smmu_init_resources(struct resource *res, 1340 struct acpi_iort_node *node) 1341 { 1342 struct acpi_iort_smmu *smmu; 1343 int i, hw_irq, trigger, num_res = 0; 1344 u64 *ctx_irq, *glb_irq; 1345 1346 /* Retrieve SMMU specific data */ 1347 smmu = (struct acpi_iort_smmu *)node->node_data; 1348 1349 res[num_res].start = smmu->base_address; 1350 res[num_res].end = smmu->base_address + smmu->span - 1; 1351 res[num_res].flags = IORESOURCE_MEM; 1352 num_res++; 1353 1354 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 1355 /* Global IRQs */ 1356 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 1357 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 1358 1359 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 1360 &res[num_res++]); 1361 1362 /* Context IRQs */ 1363 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 1364 for (i = 0; i < smmu->context_interrupt_count; i++) { 1365 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 1366 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 1367 1368 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 1369 &res[num_res++]); 1370 } 1371 } 1372 1373 static void __init arm_smmu_dma_configure(struct device *dev, 1374 struct acpi_iort_node *node) 1375 { 1376 struct acpi_iort_smmu *smmu; 1377 enum dev_dma_attr attr; 1378 1379 /* Retrieve SMMU specific data */ 1380 smmu = (struct acpi_iort_smmu *)node->node_data; 1381 1382 attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? 1383 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1384 1385 /* We expect the dma masks to be equivalent for SMMU set-ups */ 1386 dev->dma_mask = &dev->coherent_dma_mask; 1387 1388 /* Configure DMA for the page table walker */ 1389 acpi_dma_configure(dev, attr); 1390 } 1391 1392 static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node) 1393 { 1394 struct acpi_iort_pmcg *pmcg; 1395 1396 /* Retrieve PMCG specific data */ 1397 pmcg = (struct acpi_iort_pmcg *)node->node_data; 1398 1399 /* 1400 * There are always 2 memory resources. 1401 * If the overflow_gsiv is present then add that for a total of 3. 1402 */ 1403 return pmcg->overflow_gsiv ? 3 : 2; 1404 } 1405 1406 static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, 1407 struct acpi_iort_node *node) 1408 { 1409 struct acpi_iort_pmcg *pmcg; 1410 1411 /* Retrieve PMCG specific data */ 1412 pmcg = (struct acpi_iort_pmcg *)node->node_data; 1413 1414 res[0].start = pmcg->page0_base_address; 1415 res[0].end = pmcg->page0_base_address + SZ_4K - 1; 1416 res[0].flags = IORESOURCE_MEM; 1417 res[1].start = pmcg->page1_base_address; 1418 res[1].end = pmcg->page1_base_address + SZ_4K - 1; 1419 res[1].flags = IORESOURCE_MEM; 1420 1421 if (pmcg->overflow_gsiv) 1422 acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", 1423 ACPI_EDGE_SENSITIVE, &res[2]); 1424 } 1425 1426 static struct acpi_platform_list pmcg_plat_info[] __initdata = { 1427 /* HiSilicon Hip08 Platform */ 1428 {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal, 1429 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08}, 1430 { } 1431 }; 1432 1433 static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev) 1434 { 1435 u32 model; 1436 int idx; 1437 1438 idx = acpi_match_platform_list(pmcg_plat_info); 1439 if (idx >= 0) 1440 model = pmcg_plat_info[idx].data; 1441 else 1442 model = IORT_SMMU_V3_PMCG_GENERIC; 1443 1444 return platform_device_add_data(pdev, &model, sizeof(model)); 1445 } 1446 1447 struct iort_dev_config { 1448 const char *name; 1449 int (*dev_init)(struct acpi_iort_node *node); 1450 void (*dev_dma_configure)(struct device *dev, 1451 struct acpi_iort_node *node); 1452 int (*dev_count_resources)(struct acpi_iort_node *node); 1453 void (*dev_init_resources)(struct resource *res, 1454 struct acpi_iort_node *node); 1455 int (*dev_set_proximity)(struct device *dev, 1456 struct acpi_iort_node *node); 1457 int (*dev_add_platdata)(struct platform_device *pdev); 1458 }; 1459 1460 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { 1461 .name = "arm-smmu-v3", 1462 .dev_dma_configure = arm_smmu_v3_dma_configure, 1463 .dev_count_resources = arm_smmu_v3_count_resources, 1464 .dev_init_resources = arm_smmu_v3_init_resources, 1465 .dev_set_proximity = arm_smmu_v3_set_proximity, 1466 }; 1467 1468 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { 1469 .name = "arm-smmu", 1470 .dev_dma_configure = arm_smmu_dma_configure, 1471 .dev_count_resources = arm_smmu_count_resources, 1472 .dev_init_resources = arm_smmu_init_resources, 1473 }; 1474 1475 static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = { 1476 .name = "arm-smmu-v3-pmcg", 1477 .dev_count_resources = arm_smmu_v3_pmcg_count_resources, 1478 .dev_init_resources = arm_smmu_v3_pmcg_init_resources, 1479 .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata, 1480 }; 1481 1482 static __init const struct iort_dev_config *iort_get_dev_cfg( 1483 struct acpi_iort_node *node) 1484 { 1485 switch (node->type) { 1486 case ACPI_IORT_NODE_SMMU_V3: 1487 return &iort_arm_smmu_v3_cfg; 1488 case ACPI_IORT_NODE_SMMU: 1489 return &iort_arm_smmu_cfg; 1490 case ACPI_IORT_NODE_PMCG: 1491 return &iort_arm_smmu_v3_pmcg_cfg; 1492 default: 1493 return NULL; 1494 } 1495 } 1496 1497 /** 1498 * iort_add_platform_device() - Allocate a platform device for IORT node 1499 * @node: Pointer to device ACPI IORT node 1500 * 1501 * Returns: 0 on success, <0 failure 1502 */ 1503 static int __init iort_add_platform_device(struct acpi_iort_node *node, 1504 const struct iort_dev_config *ops) 1505 { 1506 struct fwnode_handle *fwnode; 1507 struct platform_device *pdev; 1508 struct resource *r; 1509 int ret, count; 1510 1511 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 1512 if (!pdev) 1513 return -ENOMEM; 1514 1515 if (ops->dev_set_proximity) { 1516 ret = ops->dev_set_proximity(&pdev->dev, node); 1517 if (ret) 1518 goto dev_put; 1519 } 1520 1521 count = ops->dev_count_resources(node); 1522 1523 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 1524 if (!r) { 1525 ret = -ENOMEM; 1526 goto dev_put; 1527 } 1528 1529 ops->dev_init_resources(r, node); 1530 1531 ret = platform_device_add_resources(pdev, r, count); 1532 /* 1533 * Resources are duplicated in platform_device_add_resources, 1534 * free their allocated memory 1535 */ 1536 kfree(r); 1537 1538 if (ret) 1539 goto dev_put; 1540 1541 /* 1542 * Platform devices based on PMCG nodes uses platform_data to 1543 * pass the hardware model info to the driver. For others, add 1544 * a copy of IORT node pointer to platform_data to be used to 1545 * retrieve IORT data information. 1546 */ 1547 if (ops->dev_add_platdata) 1548 ret = ops->dev_add_platdata(pdev); 1549 else 1550 ret = platform_device_add_data(pdev, &node, sizeof(node)); 1551 1552 if (ret) 1553 goto dev_put; 1554 1555 fwnode = iort_get_fwnode(node); 1556 1557 if (!fwnode) { 1558 ret = -ENODEV; 1559 goto dev_put; 1560 } 1561 1562 pdev->dev.fwnode = fwnode; 1563 1564 if (ops->dev_dma_configure) 1565 ops->dev_dma_configure(&pdev->dev, node); 1566 1567 iort_set_device_domain(&pdev->dev, node); 1568 1569 ret = platform_device_add(pdev); 1570 if (ret) 1571 goto dma_deconfigure; 1572 1573 return 0; 1574 1575 dma_deconfigure: 1576 arch_teardown_dma_ops(&pdev->dev); 1577 dev_put: 1578 platform_device_put(pdev); 1579 1580 return ret; 1581 } 1582 1583 #ifdef CONFIG_PCI 1584 static void __init iort_enable_acs(struct acpi_iort_node *iort_node) 1585 { 1586 static bool acs_enabled __initdata; 1587 1588 if (acs_enabled) 1589 return; 1590 1591 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 1592 struct acpi_iort_node *parent; 1593 struct acpi_iort_id_mapping *map; 1594 int i; 1595 1596 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, 1597 iort_node->mapping_offset); 1598 1599 for (i = 0; i < iort_node->mapping_count; i++, map++) { 1600 if (!map->output_reference) 1601 continue; 1602 1603 parent = ACPI_ADD_PTR(struct acpi_iort_node, 1604 iort_table, map->output_reference); 1605 /* 1606 * If we detect a RC->SMMU mapping, make sure 1607 * we enable ACS on the system. 1608 */ 1609 if ((parent->type == ACPI_IORT_NODE_SMMU) || 1610 (parent->type == ACPI_IORT_NODE_SMMU_V3)) { 1611 pci_request_acs(); 1612 acs_enabled = true; 1613 return; 1614 } 1615 } 1616 } 1617 } 1618 #else 1619 static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { } 1620 #endif 1621 1622 static void __init iort_init_platform_devices(void) 1623 { 1624 struct acpi_iort_node *iort_node, *iort_end; 1625 struct acpi_table_iort *iort; 1626 struct fwnode_handle *fwnode; 1627 int i, ret; 1628 const struct iort_dev_config *ops; 1629 1630 /* 1631 * iort_table and iort both point to the start of IORT table, but 1632 * have different struct types 1633 */ 1634 iort = (struct acpi_table_iort *)iort_table; 1635 1636 /* Get the first IORT node */ 1637 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1638 iort->node_offset); 1639 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1640 iort_table->length); 1641 1642 for (i = 0; i < iort->node_count; i++) { 1643 if (iort_node >= iort_end) { 1644 pr_err("iort node pointer overflows, bad table\n"); 1645 return; 1646 } 1647 1648 iort_enable_acs(iort_node); 1649 1650 ops = iort_get_dev_cfg(iort_node); 1651 if (ops) { 1652 fwnode = acpi_alloc_fwnode_static(); 1653 if (!fwnode) 1654 return; 1655 1656 iort_set_fwnode(iort_node, fwnode); 1657 1658 ret = iort_add_platform_device(iort_node, ops); 1659 if (ret) { 1660 iort_delete_fwnode(iort_node); 1661 acpi_free_fwnode_static(fwnode); 1662 return; 1663 } 1664 } 1665 1666 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 1667 iort_node->length); 1668 } 1669 } 1670 1671 void __init acpi_iort_init(void) 1672 { 1673 acpi_status status; 1674 1675 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 1676 if (ACPI_FAILURE(status)) { 1677 if (status != AE_NOT_FOUND) { 1678 const char *msg = acpi_format_exception(status); 1679 1680 pr_err("Failed to get table, %s\n", msg); 1681 } 1682 1683 return; 1684 } 1685 1686 iort_check_id_count_workaround(iort_table); 1687 iort_init_platform_devices(); 1688 } 1689