1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016, Semihalf 4 * Author: Tomasz Nowicki <tn@semihalf.com> 5 * 6 * This file implements early detection/parsing of I/O mapping 7 * reported to OS through firmware via I/O Remapping Table (IORT) 8 * IORT document number: ARM DEN 0049A 9 */ 10 11 #define pr_fmt(fmt) "ACPI: IORT: " fmt 12 13 #include <linux/acpi_iort.h> 14 #include <linux/bitfield.h> 15 #include <linux/iommu.h> 16 #include <linux/kernel.h> 17 #include <linux/list.h> 18 #include <linux/pci.h> 19 #include <linux/platform_device.h> 20 #include <linux/slab.h> 21 22 #define IORT_TYPE_MASK(type) (1 << (type)) 23 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 24 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 25 (1 << ACPI_IORT_NODE_SMMU_V3)) 26 27 struct iort_its_msi_chip { 28 struct list_head list; 29 struct fwnode_handle *fw_node; 30 phys_addr_t base_addr; 31 u32 translation_id; 32 }; 33 34 struct iort_fwnode { 35 struct list_head list; 36 struct acpi_iort_node *iort_node; 37 struct fwnode_handle *fwnode; 38 }; 39 static LIST_HEAD(iort_fwnode_list); 40 static DEFINE_SPINLOCK(iort_fwnode_lock); 41 42 /** 43 * iort_set_fwnode() - Create iort_fwnode and use it to register 44 * iommu data in the iort_fwnode_list 45 * 46 * @node: IORT table node associated with the IOMMU 47 * @fwnode: fwnode associated with the IORT node 48 * 49 * Returns: 0 on success 50 * <0 on failure 51 */ 52 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 53 struct fwnode_handle *fwnode) 54 { 55 struct iort_fwnode *np; 56 57 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 58 59 if (WARN_ON(!np)) 60 return -ENOMEM; 61 62 INIT_LIST_HEAD(&np->list); 63 np->iort_node = iort_node; 64 np->fwnode = fwnode; 65 66 spin_lock(&iort_fwnode_lock); 67 list_add_tail(&np->list, &iort_fwnode_list); 68 spin_unlock(&iort_fwnode_lock); 69 70 return 0; 71 } 72 73 /** 74 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 75 * 76 * @node: IORT table node to be looked-up 77 * 78 * Returns: fwnode_handle pointer on success, NULL on failure 79 */ 80 static inline struct fwnode_handle *iort_get_fwnode( 81 struct acpi_iort_node *node) 82 { 83 struct iort_fwnode *curr; 84 struct fwnode_handle *fwnode = NULL; 85 86 spin_lock(&iort_fwnode_lock); 87 list_for_each_entry(curr, &iort_fwnode_list, list) { 88 if (curr->iort_node == node) { 89 fwnode = curr->fwnode; 90 break; 91 } 92 } 93 spin_unlock(&iort_fwnode_lock); 94 95 return fwnode; 96 } 97 98 /** 99 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 100 * 101 * @node: IORT table node associated with fwnode to delete 102 */ 103 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 104 { 105 struct iort_fwnode *curr, *tmp; 106 107 spin_lock(&iort_fwnode_lock); 108 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 109 if (curr->iort_node == node) { 110 list_del(&curr->list); 111 kfree(curr); 112 break; 113 } 114 } 115 spin_unlock(&iort_fwnode_lock); 116 } 117 118 /** 119 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode 120 * 121 * @fwnode: fwnode associated with device to be looked-up 122 * 123 * Returns: iort_node pointer on success, NULL on failure 124 */ 125 static inline struct acpi_iort_node *iort_get_iort_node( 126 struct fwnode_handle *fwnode) 127 { 128 struct iort_fwnode *curr; 129 struct acpi_iort_node *iort_node = NULL; 130 131 spin_lock(&iort_fwnode_lock); 132 list_for_each_entry(curr, &iort_fwnode_list, list) { 133 if (curr->fwnode == fwnode) { 134 iort_node = curr->iort_node; 135 break; 136 } 137 } 138 spin_unlock(&iort_fwnode_lock); 139 140 return iort_node; 141 } 142 143 typedef acpi_status (*iort_find_node_callback) 144 (struct acpi_iort_node *node, void *context); 145 146 /* Root pointer to the mapped IORT table */ 147 static struct acpi_table_header *iort_table; 148 149 static LIST_HEAD(iort_msi_chip_list); 150 static DEFINE_SPINLOCK(iort_msi_chip_lock); 151 152 /** 153 * iort_register_domain_token() - register domain token along with related 154 * ITS ID and base address to the list from where we can get it back later on. 155 * @trans_id: ITS ID. 156 * @base: ITS base address. 157 * @fw_node: Domain token. 158 * 159 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 160 */ 161 int iort_register_domain_token(int trans_id, phys_addr_t base, 162 struct fwnode_handle *fw_node) 163 { 164 struct iort_its_msi_chip *its_msi_chip; 165 166 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 167 if (!its_msi_chip) 168 return -ENOMEM; 169 170 its_msi_chip->fw_node = fw_node; 171 its_msi_chip->translation_id = trans_id; 172 its_msi_chip->base_addr = base; 173 174 spin_lock(&iort_msi_chip_lock); 175 list_add(&its_msi_chip->list, &iort_msi_chip_list); 176 spin_unlock(&iort_msi_chip_lock); 177 178 return 0; 179 } 180 181 /** 182 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 183 * @trans_id: ITS ID. 184 * 185 * Returns: none. 186 */ 187 void iort_deregister_domain_token(int trans_id) 188 { 189 struct iort_its_msi_chip *its_msi_chip, *t; 190 191 spin_lock(&iort_msi_chip_lock); 192 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 193 if (its_msi_chip->translation_id == trans_id) { 194 list_del(&its_msi_chip->list); 195 kfree(its_msi_chip); 196 break; 197 } 198 } 199 spin_unlock(&iort_msi_chip_lock); 200 } 201 202 /** 203 * iort_find_domain_token() - Find domain token based on given ITS ID 204 * @trans_id: ITS ID. 205 * 206 * Returns: domain token when find on the list, NULL otherwise 207 */ 208 struct fwnode_handle *iort_find_domain_token(int trans_id) 209 { 210 struct fwnode_handle *fw_node = NULL; 211 struct iort_its_msi_chip *its_msi_chip; 212 213 spin_lock(&iort_msi_chip_lock); 214 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 215 if (its_msi_chip->translation_id == trans_id) { 216 fw_node = its_msi_chip->fw_node; 217 break; 218 } 219 } 220 spin_unlock(&iort_msi_chip_lock); 221 222 return fw_node; 223 } 224 225 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 226 iort_find_node_callback callback, 227 void *context) 228 { 229 struct acpi_iort_node *iort_node, *iort_end; 230 struct acpi_table_iort *iort; 231 int i; 232 233 if (!iort_table) 234 return NULL; 235 236 /* Get the first IORT node */ 237 iort = (struct acpi_table_iort *)iort_table; 238 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 239 iort->node_offset); 240 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 241 iort_table->length); 242 243 for (i = 0; i < iort->node_count; i++) { 244 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 245 "IORT node pointer overflows, bad table!\n")) 246 return NULL; 247 248 if (iort_node->type == type && 249 ACPI_SUCCESS(callback(iort_node, context))) 250 return iort_node; 251 252 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 253 iort_node->length); 254 } 255 256 return NULL; 257 } 258 259 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 260 void *context) 261 { 262 struct device *dev = context; 263 acpi_status status = AE_NOT_FOUND; 264 265 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 266 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 267 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 268 struct acpi_iort_named_component *ncomp; 269 270 if (!adev) 271 goto out; 272 273 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 274 if (ACPI_FAILURE(status)) { 275 dev_warn(dev, "Can't get device full path name\n"); 276 goto out; 277 } 278 279 ncomp = (struct acpi_iort_named_component *)node->node_data; 280 status = !strcmp(ncomp->device_name, buf.pointer) ? 281 AE_OK : AE_NOT_FOUND; 282 acpi_os_free(buf.pointer); 283 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 284 struct acpi_iort_root_complex *pci_rc; 285 struct pci_bus *bus; 286 287 bus = to_pci_bus(dev); 288 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 289 290 /* 291 * It is assumed that PCI segment numbers maps one-to-one 292 * with root complexes. Each segment number can represent only 293 * one root complex. 294 */ 295 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 296 AE_OK : AE_NOT_FOUND; 297 } 298 out: 299 return status; 300 } 301 302 struct iort_workaround_oem_info { 303 char oem_id[ACPI_OEM_ID_SIZE + 1]; 304 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 305 u32 oem_revision; 306 }; 307 308 static bool apply_id_count_workaround; 309 310 static struct iort_workaround_oem_info wa_info[] __initdata = { 311 { 312 .oem_id = "HISI ", 313 .oem_table_id = "HIP07 ", 314 .oem_revision = 0, 315 }, { 316 .oem_id = "HISI ", 317 .oem_table_id = "HIP08 ", 318 .oem_revision = 0, 319 } 320 }; 321 322 static void __init 323 iort_check_id_count_workaround(struct acpi_table_header *tbl) 324 { 325 int i; 326 327 for (i = 0; i < ARRAY_SIZE(wa_info); i++) { 328 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && 329 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 330 wa_info[i].oem_revision == tbl->oem_revision) { 331 apply_id_count_workaround = true; 332 pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n"); 333 break; 334 } 335 } 336 } 337 338 static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map) 339 { 340 u32 map_max = map->input_base + map->id_count; 341 342 /* 343 * The IORT specification revision D (Section 3, table 4, page 9) says 344 * Number of IDs = The number of IDs in the range minus one, but the 345 * IORT code ignored the "minus one", and some firmware did that too, 346 * so apply a workaround here to keep compatible with both the spec 347 * compliant and non-spec compliant firmwares. 348 */ 349 if (apply_id_count_workaround) 350 map_max--; 351 352 return map_max; 353 } 354 355 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 356 u32 *rid_out) 357 { 358 /* Single mapping does not care for input id */ 359 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 360 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 361 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 362 *rid_out = map->output_base; 363 return 0; 364 } 365 366 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 367 map, type); 368 return -ENXIO; 369 } 370 371 if (rid_in < map->input_base || rid_in > iort_get_map_max(map)) 372 return -ENXIO; 373 374 *rid_out = map->output_base + (rid_in - map->input_base); 375 return 0; 376 } 377 378 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 379 u32 *id_out, int index) 380 { 381 struct acpi_iort_node *parent; 382 struct acpi_iort_id_mapping *map; 383 384 if (!node->mapping_offset || !node->mapping_count || 385 index >= node->mapping_count) 386 return NULL; 387 388 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 389 node->mapping_offset + index * sizeof(*map)); 390 391 /* Firmware bug! */ 392 if (!map->output_reference) { 393 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 394 node, node->type); 395 return NULL; 396 } 397 398 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 399 map->output_reference); 400 401 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 402 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 403 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || 404 node->type == ACPI_IORT_NODE_SMMU_V3 || 405 node->type == ACPI_IORT_NODE_PMCG) { 406 *id_out = map->output_base; 407 return parent; 408 } 409 } 410 411 return NULL; 412 } 413 414 static int iort_get_id_mapping_index(struct acpi_iort_node *node) 415 { 416 struct acpi_iort_smmu_v3 *smmu; 417 418 switch (node->type) { 419 case ACPI_IORT_NODE_SMMU_V3: 420 /* 421 * SMMUv3 dev ID mapping index was introduced in revision 1 422 * table, not available in revision 0 423 */ 424 if (node->revision < 1) 425 return -EINVAL; 426 427 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 428 /* 429 * ID mapping index is only ignored if all interrupts are 430 * GSIV based 431 */ 432 if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv 433 && smmu->sync_gsiv) 434 return -EINVAL; 435 436 if (smmu->id_mapping_index >= node->mapping_count) { 437 pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n", 438 node, node->type); 439 return -EINVAL; 440 } 441 442 return smmu->id_mapping_index; 443 case ACPI_IORT_NODE_PMCG: 444 return 0; 445 default: 446 return -EINVAL; 447 } 448 } 449 450 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 451 u32 id_in, u32 *id_out, 452 u8 type_mask) 453 { 454 u32 id = id_in; 455 456 /* Parse the ID mapping tree to find specified node type */ 457 while (node) { 458 struct acpi_iort_id_mapping *map; 459 int i, index; 460 461 if (IORT_TYPE_MASK(node->type) & type_mask) { 462 if (id_out) 463 *id_out = id; 464 return node; 465 } 466 467 if (!node->mapping_offset || !node->mapping_count) 468 goto fail_map; 469 470 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 471 node->mapping_offset); 472 473 /* Firmware bug! */ 474 if (!map->output_reference) { 475 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 476 node, node->type); 477 goto fail_map; 478 } 479 480 /* 481 * Get the special ID mapping index (if any) and skip its 482 * associated ID map to prevent erroneous multi-stage 483 * IORT ID translations. 484 */ 485 index = iort_get_id_mapping_index(node); 486 487 /* Do the ID translation */ 488 for (i = 0; i < node->mapping_count; i++, map++) { 489 /* if it is special mapping index, skip it */ 490 if (i == index) 491 continue; 492 493 if (!iort_id_map(map, node->type, id, &id)) 494 break; 495 } 496 497 if (i == node->mapping_count) 498 goto fail_map; 499 500 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 501 map->output_reference); 502 } 503 504 fail_map: 505 /* Map input ID to output ID unchanged on mapping failure */ 506 if (id_out) 507 *id_out = id_in; 508 509 return NULL; 510 } 511 512 static struct acpi_iort_node *iort_node_map_platform_id( 513 struct acpi_iort_node *node, u32 *id_out, u8 type_mask, 514 int index) 515 { 516 struct acpi_iort_node *parent; 517 u32 id; 518 519 /* step 1: retrieve the initial dev id */ 520 parent = iort_node_get_id(node, &id, index); 521 if (!parent) 522 return NULL; 523 524 /* 525 * optional step 2: map the initial dev id if its parent is not 526 * the target type we want, map it again for the use cases such 527 * as NC (named component) -> SMMU -> ITS. If the type is matched, 528 * return the initial dev id and its parent pointer directly. 529 */ 530 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 531 parent = iort_node_map_id(parent, id, id_out, type_mask); 532 else 533 if (id_out) 534 *id_out = id; 535 536 return parent; 537 } 538 539 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 540 { 541 struct pci_bus *pbus; 542 543 if (!dev_is_pci(dev)) { 544 struct acpi_iort_node *node; 545 /* 546 * scan iort_fwnode_list to see if it's an iort platform 547 * device (such as SMMU, PMCG),its iort node already cached 548 * and associated with fwnode when iort platform devices 549 * were initialized. 550 */ 551 node = iort_get_iort_node(dev->fwnode); 552 if (node) 553 return node; 554 555 /* 556 * if not, then it should be a platform device defined in 557 * DSDT/SSDT (with Named Component node in IORT) 558 */ 559 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 560 iort_match_node_callback, dev); 561 } 562 563 /* Find a PCI root bus */ 564 pbus = to_pci_dev(dev)->bus; 565 while (!pci_is_root_bus(pbus)) 566 pbus = pbus->parent; 567 568 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 569 iort_match_node_callback, &pbus->dev); 570 } 571 572 /** 573 * iort_msi_map_rid() - Map a MSI requester ID for a device 574 * @dev: The device for which the mapping is to be done. 575 * @req_id: The device requester ID. 576 * 577 * Returns: mapped MSI RID on success, input requester ID otherwise 578 */ 579 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 580 { 581 struct acpi_iort_node *node; 582 u32 dev_id; 583 584 node = iort_find_dev_node(dev); 585 if (!node) 586 return req_id; 587 588 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); 589 return dev_id; 590 } 591 592 /** 593 * iort_pmsi_get_dev_id() - Get the device id for a device 594 * @dev: The device for which the mapping is to be done. 595 * @dev_id: The device ID found. 596 * 597 * Returns: 0 for successful find a dev id, -ENODEV on error 598 */ 599 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 600 { 601 int i, index; 602 struct acpi_iort_node *node; 603 604 node = iort_find_dev_node(dev); 605 if (!node) 606 return -ENODEV; 607 608 index = iort_get_id_mapping_index(node); 609 /* if there is a valid index, go get the dev_id directly */ 610 if (index >= 0) { 611 if (iort_node_get_id(node, dev_id, index)) 612 return 0; 613 } else { 614 for (i = 0; i < node->mapping_count; i++) { 615 if (iort_node_map_platform_id(node, dev_id, 616 IORT_MSI_TYPE, i)) 617 return 0; 618 } 619 } 620 621 return -ENODEV; 622 } 623 624 static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base) 625 { 626 struct iort_its_msi_chip *its_msi_chip; 627 int ret = -ENODEV; 628 629 spin_lock(&iort_msi_chip_lock); 630 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 631 if (its_msi_chip->translation_id == its_id) { 632 *base = its_msi_chip->base_addr; 633 ret = 0; 634 break; 635 } 636 } 637 spin_unlock(&iort_msi_chip_lock); 638 639 return ret; 640 } 641 642 /** 643 * iort_dev_find_its_id() - Find the ITS identifier for a device 644 * @dev: The device. 645 * @req_id: Device's requester ID 646 * @idx: Index of the ITS identifier list. 647 * @its_id: ITS identifier. 648 * 649 * Returns: 0 on success, appropriate error value otherwise 650 */ 651 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 652 unsigned int idx, int *its_id) 653 { 654 struct acpi_iort_its_group *its; 655 struct acpi_iort_node *node; 656 657 node = iort_find_dev_node(dev); 658 if (!node) 659 return -ENXIO; 660 661 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); 662 if (!node) 663 return -ENXIO; 664 665 /* Move to ITS specific data */ 666 its = (struct acpi_iort_its_group *)node->node_data; 667 if (idx >= its->its_count) { 668 dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n", 669 idx, its->its_count); 670 return -ENXIO; 671 } 672 673 *its_id = its->identifiers[idx]; 674 return 0; 675 } 676 677 /** 678 * iort_get_device_domain() - Find MSI domain related to a device 679 * @dev: The device. 680 * @req_id: Requester ID for the device. 681 * 682 * Returns: the MSI domain for this device, NULL otherwise 683 */ 684 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 685 { 686 struct fwnode_handle *handle; 687 int its_id; 688 689 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 690 return NULL; 691 692 handle = iort_find_domain_token(its_id); 693 if (!handle) 694 return NULL; 695 696 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 697 } 698 699 static void iort_set_device_domain(struct device *dev, 700 struct acpi_iort_node *node) 701 { 702 struct acpi_iort_its_group *its; 703 struct acpi_iort_node *msi_parent; 704 struct acpi_iort_id_mapping *map; 705 struct fwnode_handle *iort_fwnode; 706 struct irq_domain *domain; 707 int index; 708 709 index = iort_get_id_mapping_index(node); 710 if (index < 0) 711 return; 712 713 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 714 node->mapping_offset + index * sizeof(*map)); 715 716 /* Firmware bug! */ 717 if (!map->output_reference || 718 !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) { 719 pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n", 720 node, node->type); 721 return; 722 } 723 724 msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 725 map->output_reference); 726 727 if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP) 728 return; 729 730 /* Move to ITS specific data */ 731 its = (struct acpi_iort_its_group *)msi_parent->node_data; 732 733 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 734 if (!iort_fwnode) 735 return; 736 737 domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 738 if (domain) 739 dev_set_msi_domain(dev, domain); 740 } 741 742 /** 743 * iort_get_platform_device_domain() - Find MSI domain related to a 744 * platform device 745 * @dev: the dev pointer associated with the platform device 746 * 747 * Returns: the MSI domain for this device, NULL otherwise 748 */ 749 static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 750 { 751 struct acpi_iort_node *node, *msi_parent = NULL; 752 struct fwnode_handle *iort_fwnode; 753 struct acpi_iort_its_group *its; 754 int i; 755 756 /* find its associated iort node */ 757 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 758 iort_match_node_callback, dev); 759 if (!node) 760 return NULL; 761 762 /* then find its msi parent node */ 763 for (i = 0; i < node->mapping_count; i++) { 764 msi_parent = iort_node_map_platform_id(node, NULL, 765 IORT_MSI_TYPE, i); 766 if (msi_parent) 767 break; 768 } 769 770 if (!msi_parent) 771 return NULL; 772 773 /* Move to ITS specific data */ 774 its = (struct acpi_iort_its_group *)msi_parent->node_data; 775 776 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 777 if (!iort_fwnode) 778 return NULL; 779 780 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 781 } 782 783 void acpi_configure_pmsi_domain(struct device *dev) 784 { 785 struct irq_domain *msi_domain; 786 787 msi_domain = iort_get_platform_device_domain(dev); 788 if (msi_domain) 789 dev_set_msi_domain(dev, msi_domain); 790 } 791 792 static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias, 793 void *data) 794 { 795 u32 *rid = data; 796 797 *rid = alias; 798 return 0; 799 } 800 801 #ifdef CONFIG_IOMMU_API 802 static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev) 803 { 804 struct acpi_iort_node *iommu; 805 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 806 807 iommu = iort_get_iort_node(fwspec->iommu_fwnode); 808 809 if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) { 810 struct acpi_iort_smmu_v3 *smmu; 811 812 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; 813 if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) 814 return iommu; 815 } 816 817 return NULL; 818 } 819 820 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) 821 { 822 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 823 824 return (fwspec && fwspec->ops) ? fwspec->ops : NULL; 825 } 826 827 static inline int iort_add_device_replay(const struct iommu_ops *ops, 828 struct device *dev) 829 { 830 int err = 0; 831 832 if (dev->bus && !device_iommu_mapped(dev)) 833 err = iommu_probe_device(dev); 834 835 return err; 836 } 837 838 /** 839 * iort_iommu_msi_get_resv_regions - Reserved region driver helper 840 * @dev: Device from iommu_get_resv_regions() 841 * @head: Reserved region list from iommu_get_resv_regions() 842 * 843 * Returns: Number of msi reserved regions on success (0 if platform 844 * doesn't require the reservation or no associated msi regions), 845 * appropriate error value otherwise. The ITS interrupt translation 846 * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device 847 * are the msi reserved regions. 848 */ 849 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) 850 { 851 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 852 struct acpi_iort_its_group *its; 853 struct acpi_iort_node *iommu_node, *its_node = NULL; 854 int i, resv = 0; 855 856 iommu_node = iort_get_msi_resv_iommu(dev); 857 if (!iommu_node) 858 return 0; 859 860 /* 861 * Current logic to reserve ITS regions relies on HW topologies 862 * where a given PCI or named component maps its IDs to only one 863 * ITS group; if a PCI or named component can map its IDs to 864 * different ITS groups through IORT mappings this function has 865 * to be reworked to ensure we reserve regions for all ITS groups 866 * a given PCI or named component may map IDs to. 867 */ 868 869 for (i = 0; i < fwspec->num_ids; i++) { 870 its_node = iort_node_map_id(iommu_node, 871 fwspec->ids[i], 872 NULL, IORT_MSI_TYPE); 873 if (its_node) 874 break; 875 } 876 877 if (!its_node) 878 return 0; 879 880 /* Move to ITS specific data */ 881 its = (struct acpi_iort_its_group *)its_node->node_data; 882 883 for (i = 0; i < its->its_count; i++) { 884 phys_addr_t base; 885 886 if (!iort_find_its_base(its->identifiers[i], &base)) { 887 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 888 struct iommu_resv_region *region; 889 890 region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K, 891 prot, IOMMU_RESV_MSI); 892 if (region) { 893 list_add_tail(®ion->list, head); 894 resv++; 895 } 896 } 897 } 898 899 return (resv == its->its_count) ? resv : -ENODEV; 900 } 901 902 static inline bool iort_iommu_driver_enabled(u8 type) 903 { 904 switch (type) { 905 case ACPI_IORT_NODE_SMMU_V3: 906 return IS_ENABLED(CONFIG_ARM_SMMU_V3); 907 case ACPI_IORT_NODE_SMMU: 908 return IS_ENABLED(CONFIG_ARM_SMMU); 909 default: 910 pr_warn("IORT node type %u does not describe an SMMU\n", type); 911 return false; 912 } 913 } 914 915 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 916 struct fwnode_handle *fwnode, 917 const struct iommu_ops *ops) 918 { 919 int ret = iommu_fwspec_init(dev, fwnode, ops); 920 921 if (!ret) 922 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 923 924 return ret; 925 } 926 927 static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node) 928 { 929 struct acpi_iort_root_complex *pci_rc; 930 931 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 932 return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED; 933 } 934 935 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, 936 u32 streamid) 937 { 938 const struct iommu_ops *ops; 939 struct fwnode_handle *iort_fwnode; 940 941 if (!node) 942 return -ENODEV; 943 944 iort_fwnode = iort_get_fwnode(node); 945 if (!iort_fwnode) 946 return -ENODEV; 947 948 /* 949 * If the ops look-up fails, this means that either 950 * the SMMU drivers have not been probed yet or that 951 * the SMMU drivers are not built in the kernel; 952 * Depending on whether the SMMU drivers are built-in 953 * in the kernel or not, defer the IOMMU configuration 954 * or just abort it. 955 */ 956 ops = iommu_ops_from_fwnode(iort_fwnode); 957 if (!ops) 958 return iort_iommu_driver_enabled(node->type) ? 959 -EPROBE_DEFER : -ENODEV; 960 961 return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 962 } 963 964 struct iort_pci_alias_info { 965 struct device *dev; 966 struct acpi_iort_node *node; 967 }; 968 969 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) 970 { 971 struct iort_pci_alias_info *info = data; 972 struct acpi_iort_node *parent; 973 u32 streamid; 974 975 parent = iort_node_map_id(info->node, alias, &streamid, 976 IORT_IOMMU_TYPE); 977 return iort_iommu_xlate(info->dev, parent, streamid); 978 } 979 980 static void iort_named_component_init(struct device *dev, 981 struct acpi_iort_node *node) 982 { 983 struct acpi_iort_named_component *nc; 984 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 985 986 if (!fwspec) 987 return; 988 989 nc = (struct acpi_iort_named_component *)node->node_data; 990 fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS, 991 nc->node_flags); 992 } 993 994 /** 995 * iort_iommu_configure - Set-up IOMMU configuration for a device. 996 * 997 * @dev: device to configure 998 * 999 * Returns: iommu_ops pointer on configuration success 1000 * NULL on configuration failure 1001 */ 1002 const struct iommu_ops *iort_iommu_configure(struct device *dev) 1003 { 1004 struct acpi_iort_node *node, *parent; 1005 const struct iommu_ops *ops; 1006 u32 streamid = 0; 1007 int err = -ENODEV; 1008 1009 /* 1010 * If we already translated the fwspec there 1011 * is nothing left to do, return the iommu_ops. 1012 */ 1013 ops = iort_fwspec_iommu_ops(dev); 1014 if (ops) 1015 return ops; 1016 1017 if (dev_is_pci(dev)) { 1018 struct iommu_fwspec *fwspec; 1019 struct pci_bus *bus = to_pci_dev(dev)->bus; 1020 struct iort_pci_alias_info info = { .dev = dev }; 1021 1022 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 1023 iort_match_node_callback, &bus->dev); 1024 if (!node) 1025 return NULL; 1026 1027 info.node = node; 1028 err = pci_for_each_dma_alias(to_pci_dev(dev), 1029 iort_pci_iommu_init, &info); 1030 1031 fwspec = dev_iommu_fwspec_get(dev); 1032 if (fwspec && iort_pci_rc_supports_ats(node)) 1033 fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; 1034 } else { 1035 int i = 0; 1036 1037 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 1038 iort_match_node_callback, dev); 1039 if (!node) 1040 return NULL; 1041 1042 do { 1043 parent = iort_node_map_platform_id(node, &streamid, 1044 IORT_IOMMU_TYPE, 1045 i++); 1046 1047 if (parent) 1048 err = iort_iommu_xlate(dev, parent, streamid); 1049 } while (parent && !err); 1050 1051 if (!err) 1052 iort_named_component_init(dev, node); 1053 } 1054 1055 /* 1056 * If we have reason to believe the IOMMU driver missed the initial 1057 * add_device callback for dev, replay it to get things in order. 1058 */ 1059 if (!err) { 1060 ops = iort_fwspec_iommu_ops(dev); 1061 err = iort_add_device_replay(ops, dev); 1062 } 1063 1064 /* Ignore all other errors apart from EPROBE_DEFER */ 1065 if (err == -EPROBE_DEFER) { 1066 ops = ERR_PTR(err); 1067 } else if (err) { 1068 dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); 1069 ops = NULL; 1070 } 1071 1072 return ops; 1073 } 1074 #else 1075 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) 1076 { return NULL; } 1077 static inline int iort_add_device_replay(const struct iommu_ops *ops, 1078 struct device *dev) 1079 { return 0; } 1080 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) 1081 { return 0; } 1082 const struct iommu_ops *iort_iommu_configure(struct device *dev) 1083 { return NULL; } 1084 #endif 1085 1086 static int nc_dma_get_range(struct device *dev, u64 *size) 1087 { 1088 struct acpi_iort_node *node; 1089 struct acpi_iort_named_component *ncomp; 1090 1091 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 1092 iort_match_node_callback, dev); 1093 if (!node) 1094 return -ENODEV; 1095 1096 ncomp = (struct acpi_iort_named_component *)node->node_data; 1097 1098 *size = ncomp->memory_address_limit >= 64 ? U64_MAX : 1099 1ULL<<ncomp->memory_address_limit; 1100 1101 return 0; 1102 } 1103 1104 static int rc_dma_get_range(struct device *dev, u64 *size) 1105 { 1106 struct acpi_iort_node *node; 1107 struct acpi_iort_root_complex *rc; 1108 struct pci_bus *pbus = to_pci_dev(dev)->bus; 1109 1110 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 1111 iort_match_node_callback, &pbus->dev); 1112 if (!node || node->revision < 1) 1113 return -ENODEV; 1114 1115 rc = (struct acpi_iort_root_complex *)node->node_data; 1116 1117 *size = rc->memory_address_limit >= 64 ? U64_MAX : 1118 1ULL<<rc->memory_address_limit; 1119 1120 return 0; 1121 } 1122 1123 /** 1124 * iort_dma_setup() - Set-up device DMA parameters. 1125 * 1126 * @dev: device to configure 1127 * @dma_addr: device DMA address result pointer 1128 * @size: DMA range size result pointer 1129 */ 1130 void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) 1131 { 1132 u64 end, mask, dmaaddr = 0, size = 0, offset = 0; 1133 int ret; 1134 1135 /* 1136 * If @dev is expected to be DMA-capable then the bus code that created 1137 * it should have initialised its dma_mask pointer by this point. For 1138 * now, we'll continue the legacy behaviour of coercing it to the 1139 * coherent mask if not, but we'll no longer do so quietly. 1140 */ 1141 if (!dev->dma_mask) { 1142 dev_warn(dev, "DMA mask not set\n"); 1143 dev->dma_mask = &dev->coherent_dma_mask; 1144 } 1145 1146 if (dev->coherent_dma_mask) 1147 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 1148 else 1149 size = 1ULL << 32; 1150 1151 if (dev_is_pci(dev)) { 1152 ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); 1153 if (ret == -ENODEV) 1154 ret = rc_dma_get_range(dev, &size); 1155 } else { 1156 ret = nc_dma_get_range(dev, &size); 1157 } 1158 1159 if (!ret) { 1160 /* 1161 * Limit coherent and dma mask based on size retrieved from 1162 * firmware. 1163 */ 1164 end = dmaaddr + size - 1; 1165 mask = DMA_BIT_MASK(ilog2(end) + 1); 1166 dev->bus_dma_limit = end; 1167 dev->coherent_dma_mask = mask; 1168 *dev->dma_mask = mask; 1169 } 1170 1171 *dma_addr = dmaaddr; 1172 *dma_size = size; 1173 1174 dev->dma_pfn_offset = PFN_DOWN(offset); 1175 dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); 1176 } 1177 1178 static void __init acpi_iort_register_irq(int hwirq, const char *name, 1179 int trigger, 1180 struct resource *res) 1181 { 1182 int irq = acpi_register_gsi(NULL, hwirq, trigger, 1183 ACPI_ACTIVE_HIGH); 1184 1185 if (irq <= 0) { 1186 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 1187 name); 1188 return; 1189 } 1190 1191 res->start = irq; 1192 res->end = irq; 1193 res->flags = IORESOURCE_IRQ; 1194 res->name = name; 1195 } 1196 1197 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 1198 { 1199 struct acpi_iort_smmu_v3 *smmu; 1200 /* Always present mem resource */ 1201 int num_res = 1; 1202 1203 /* Retrieve SMMUv3 specific data */ 1204 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1205 1206 if (smmu->event_gsiv) 1207 num_res++; 1208 1209 if (smmu->pri_gsiv) 1210 num_res++; 1211 1212 if (smmu->gerr_gsiv) 1213 num_res++; 1214 1215 if (smmu->sync_gsiv) 1216 num_res++; 1217 1218 return num_res; 1219 } 1220 1221 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) 1222 { 1223 /* 1224 * Cavium ThunderX2 implementation doesn't not support unique 1225 * irq line. Use single irq line for all the SMMUv3 interrupts. 1226 */ 1227 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1228 return false; 1229 1230 /* 1231 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking 1232 * SPI numbers here. 1233 */ 1234 return smmu->event_gsiv == smmu->pri_gsiv && 1235 smmu->event_gsiv == smmu->gerr_gsiv && 1236 smmu->event_gsiv == smmu->sync_gsiv; 1237 } 1238 1239 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) 1240 { 1241 /* 1242 * Override the size, for Cavium ThunderX2 implementation 1243 * which doesn't support the page 1 SMMU register space. 1244 */ 1245 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1246 return SZ_64K; 1247 1248 return SZ_128K; 1249 } 1250 1251 static void __init arm_smmu_v3_init_resources(struct resource *res, 1252 struct acpi_iort_node *node) 1253 { 1254 struct acpi_iort_smmu_v3 *smmu; 1255 int num_res = 0; 1256 1257 /* Retrieve SMMUv3 specific data */ 1258 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1259 1260 res[num_res].start = smmu->base_address; 1261 res[num_res].end = smmu->base_address + 1262 arm_smmu_v3_resource_size(smmu) - 1; 1263 res[num_res].flags = IORESOURCE_MEM; 1264 1265 num_res++; 1266 if (arm_smmu_v3_is_combined_irq(smmu)) { 1267 if (smmu->event_gsiv) 1268 acpi_iort_register_irq(smmu->event_gsiv, "combined", 1269 ACPI_EDGE_SENSITIVE, 1270 &res[num_res++]); 1271 } else { 1272 1273 if (smmu->event_gsiv) 1274 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 1275 ACPI_EDGE_SENSITIVE, 1276 &res[num_res++]); 1277 1278 if (smmu->pri_gsiv) 1279 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 1280 ACPI_EDGE_SENSITIVE, 1281 &res[num_res++]); 1282 1283 if (smmu->gerr_gsiv) 1284 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 1285 ACPI_EDGE_SENSITIVE, 1286 &res[num_res++]); 1287 1288 if (smmu->sync_gsiv) 1289 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 1290 ACPI_EDGE_SENSITIVE, 1291 &res[num_res++]); 1292 } 1293 } 1294 1295 static void __init arm_smmu_v3_dma_configure(struct device *dev, 1296 struct acpi_iort_node *node) 1297 { 1298 struct acpi_iort_smmu_v3 *smmu; 1299 enum dev_dma_attr attr; 1300 1301 /* Retrieve SMMUv3 specific data */ 1302 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1303 1304 attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? 1305 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1306 1307 /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */ 1308 dev->dma_mask = &dev->coherent_dma_mask; 1309 1310 /* Configure DMA for the page table walker */ 1311 acpi_dma_configure(dev, attr); 1312 } 1313 1314 #if defined(CONFIG_ACPI_NUMA) 1315 /* 1316 * set numa proximity domain for smmuv3 device 1317 */ 1318 static int __init arm_smmu_v3_set_proximity(struct device *dev, 1319 struct acpi_iort_node *node) 1320 { 1321 struct acpi_iort_smmu_v3 *smmu; 1322 1323 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1324 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { 1325 int dev_node = acpi_map_pxm_to_node(smmu->pxm); 1326 1327 if (dev_node != NUMA_NO_NODE && !node_online(dev_node)) 1328 return -EINVAL; 1329 1330 set_dev_node(dev, dev_node); 1331 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", 1332 smmu->base_address, 1333 smmu->pxm); 1334 } 1335 return 0; 1336 } 1337 #else 1338 #define arm_smmu_v3_set_proximity NULL 1339 #endif 1340 1341 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 1342 { 1343 struct acpi_iort_smmu *smmu; 1344 1345 /* Retrieve SMMU specific data */ 1346 smmu = (struct acpi_iort_smmu *)node->node_data; 1347 1348 /* 1349 * Only consider the global fault interrupt and ignore the 1350 * configuration access interrupt. 1351 * 1352 * MMIO address and global fault interrupt resources are always 1353 * present so add them to the context interrupt count as a static 1354 * value. 1355 */ 1356 return smmu->context_interrupt_count + 2; 1357 } 1358 1359 static void __init arm_smmu_init_resources(struct resource *res, 1360 struct acpi_iort_node *node) 1361 { 1362 struct acpi_iort_smmu *smmu; 1363 int i, hw_irq, trigger, num_res = 0; 1364 u64 *ctx_irq, *glb_irq; 1365 1366 /* Retrieve SMMU specific data */ 1367 smmu = (struct acpi_iort_smmu *)node->node_data; 1368 1369 res[num_res].start = smmu->base_address; 1370 res[num_res].end = smmu->base_address + smmu->span - 1; 1371 res[num_res].flags = IORESOURCE_MEM; 1372 num_res++; 1373 1374 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 1375 /* Global IRQs */ 1376 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 1377 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 1378 1379 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 1380 &res[num_res++]); 1381 1382 /* Context IRQs */ 1383 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 1384 for (i = 0; i < smmu->context_interrupt_count; i++) { 1385 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 1386 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 1387 1388 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 1389 &res[num_res++]); 1390 } 1391 } 1392 1393 static void __init arm_smmu_dma_configure(struct device *dev, 1394 struct acpi_iort_node *node) 1395 { 1396 struct acpi_iort_smmu *smmu; 1397 enum dev_dma_attr attr; 1398 1399 /* Retrieve SMMU specific data */ 1400 smmu = (struct acpi_iort_smmu *)node->node_data; 1401 1402 attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? 1403 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1404 1405 /* We expect the dma masks to be equivalent for SMMU set-ups */ 1406 dev->dma_mask = &dev->coherent_dma_mask; 1407 1408 /* Configure DMA for the page table walker */ 1409 acpi_dma_configure(dev, attr); 1410 } 1411 1412 static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node) 1413 { 1414 struct acpi_iort_pmcg *pmcg; 1415 1416 /* Retrieve PMCG specific data */ 1417 pmcg = (struct acpi_iort_pmcg *)node->node_data; 1418 1419 /* 1420 * There are always 2 memory resources. 1421 * If the overflow_gsiv is present then add that for a total of 3. 1422 */ 1423 return pmcg->overflow_gsiv ? 3 : 2; 1424 } 1425 1426 static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, 1427 struct acpi_iort_node *node) 1428 { 1429 struct acpi_iort_pmcg *pmcg; 1430 1431 /* Retrieve PMCG specific data */ 1432 pmcg = (struct acpi_iort_pmcg *)node->node_data; 1433 1434 res[0].start = pmcg->page0_base_address; 1435 res[0].end = pmcg->page0_base_address + SZ_4K - 1; 1436 res[0].flags = IORESOURCE_MEM; 1437 res[1].start = pmcg->page1_base_address; 1438 res[1].end = pmcg->page1_base_address + SZ_4K - 1; 1439 res[1].flags = IORESOURCE_MEM; 1440 1441 if (pmcg->overflow_gsiv) 1442 acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", 1443 ACPI_EDGE_SENSITIVE, &res[2]); 1444 } 1445 1446 static struct acpi_platform_list pmcg_plat_info[] __initdata = { 1447 /* HiSilicon Hip08 Platform */ 1448 {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal, 1449 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08}, 1450 { } 1451 }; 1452 1453 static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev) 1454 { 1455 u32 model; 1456 int idx; 1457 1458 idx = acpi_match_platform_list(pmcg_plat_info); 1459 if (idx >= 0) 1460 model = pmcg_plat_info[idx].data; 1461 else 1462 model = IORT_SMMU_V3_PMCG_GENERIC; 1463 1464 return platform_device_add_data(pdev, &model, sizeof(model)); 1465 } 1466 1467 struct iort_dev_config { 1468 const char *name; 1469 int (*dev_init)(struct acpi_iort_node *node); 1470 void (*dev_dma_configure)(struct device *dev, 1471 struct acpi_iort_node *node); 1472 int (*dev_count_resources)(struct acpi_iort_node *node); 1473 void (*dev_init_resources)(struct resource *res, 1474 struct acpi_iort_node *node); 1475 int (*dev_set_proximity)(struct device *dev, 1476 struct acpi_iort_node *node); 1477 int (*dev_add_platdata)(struct platform_device *pdev); 1478 }; 1479 1480 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { 1481 .name = "arm-smmu-v3", 1482 .dev_dma_configure = arm_smmu_v3_dma_configure, 1483 .dev_count_resources = arm_smmu_v3_count_resources, 1484 .dev_init_resources = arm_smmu_v3_init_resources, 1485 .dev_set_proximity = arm_smmu_v3_set_proximity, 1486 }; 1487 1488 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { 1489 .name = "arm-smmu", 1490 .dev_dma_configure = arm_smmu_dma_configure, 1491 .dev_count_resources = arm_smmu_count_resources, 1492 .dev_init_resources = arm_smmu_init_resources, 1493 }; 1494 1495 static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = { 1496 .name = "arm-smmu-v3-pmcg", 1497 .dev_count_resources = arm_smmu_v3_pmcg_count_resources, 1498 .dev_init_resources = arm_smmu_v3_pmcg_init_resources, 1499 .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata, 1500 }; 1501 1502 static __init const struct iort_dev_config *iort_get_dev_cfg( 1503 struct acpi_iort_node *node) 1504 { 1505 switch (node->type) { 1506 case ACPI_IORT_NODE_SMMU_V3: 1507 return &iort_arm_smmu_v3_cfg; 1508 case ACPI_IORT_NODE_SMMU: 1509 return &iort_arm_smmu_cfg; 1510 case ACPI_IORT_NODE_PMCG: 1511 return &iort_arm_smmu_v3_pmcg_cfg; 1512 default: 1513 return NULL; 1514 } 1515 } 1516 1517 /** 1518 * iort_add_platform_device() - Allocate a platform device for IORT node 1519 * @node: Pointer to device ACPI IORT node 1520 * 1521 * Returns: 0 on success, <0 failure 1522 */ 1523 static int __init iort_add_platform_device(struct acpi_iort_node *node, 1524 const struct iort_dev_config *ops) 1525 { 1526 struct fwnode_handle *fwnode; 1527 struct platform_device *pdev; 1528 struct resource *r; 1529 int ret, count; 1530 1531 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 1532 if (!pdev) 1533 return -ENOMEM; 1534 1535 if (ops->dev_set_proximity) { 1536 ret = ops->dev_set_proximity(&pdev->dev, node); 1537 if (ret) 1538 goto dev_put; 1539 } 1540 1541 count = ops->dev_count_resources(node); 1542 1543 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 1544 if (!r) { 1545 ret = -ENOMEM; 1546 goto dev_put; 1547 } 1548 1549 ops->dev_init_resources(r, node); 1550 1551 ret = platform_device_add_resources(pdev, r, count); 1552 /* 1553 * Resources are duplicated in platform_device_add_resources, 1554 * free their allocated memory 1555 */ 1556 kfree(r); 1557 1558 if (ret) 1559 goto dev_put; 1560 1561 /* 1562 * Platform devices based on PMCG nodes uses platform_data to 1563 * pass the hardware model info to the driver. For others, add 1564 * a copy of IORT node pointer to platform_data to be used to 1565 * retrieve IORT data information. 1566 */ 1567 if (ops->dev_add_platdata) 1568 ret = ops->dev_add_platdata(pdev); 1569 else 1570 ret = platform_device_add_data(pdev, &node, sizeof(node)); 1571 1572 if (ret) 1573 goto dev_put; 1574 1575 fwnode = iort_get_fwnode(node); 1576 1577 if (!fwnode) { 1578 ret = -ENODEV; 1579 goto dev_put; 1580 } 1581 1582 pdev->dev.fwnode = fwnode; 1583 1584 if (ops->dev_dma_configure) 1585 ops->dev_dma_configure(&pdev->dev, node); 1586 1587 iort_set_device_domain(&pdev->dev, node); 1588 1589 ret = platform_device_add(pdev); 1590 if (ret) 1591 goto dma_deconfigure; 1592 1593 return 0; 1594 1595 dma_deconfigure: 1596 arch_teardown_dma_ops(&pdev->dev); 1597 dev_put: 1598 platform_device_put(pdev); 1599 1600 return ret; 1601 } 1602 1603 #ifdef CONFIG_PCI 1604 static void __init iort_enable_acs(struct acpi_iort_node *iort_node) 1605 { 1606 static bool acs_enabled __initdata; 1607 1608 if (acs_enabled) 1609 return; 1610 1611 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 1612 struct acpi_iort_node *parent; 1613 struct acpi_iort_id_mapping *map; 1614 int i; 1615 1616 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, 1617 iort_node->mapping_offset); 1618 1619 for (i = 0; i < iort_node->mapping_count; i++, map++) { 1620 if (!map->output_reference) 1621 continue; 1622 1623 parent = ACPI_ADD_PTR(struct acpi_iort_node, 1624 iort_table, map->output_reference); 1625 /* 1626 * If we detect a RC->SMMU mapping, make sure 1627 * we enable ACS on the system. 1628 */ 1629 if ((parent->type == ACPI_IORT_NODE_SMMU) || 1630 (parent->type == ACPI_IORT_NODE_SMMU_V3)) { 1631 pci_request_acs(); 1632 acs_enabled = true; 1633 return; 1634 } 1635 } 1636 } 1637 } 1638 #else 1639 static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { } 1640 #endif 1641 1642 static void __init iort_init_platform_devices(void) 1643 { 1644 struct acpi_iort_node *iort_node, *iort_end; 1645 struct acpi_table_iort *iort; 1646 struct fwnode_handle *fwnode; 1647 int i, ret; 1648 const struct iort_dev_config *ops; 1649 1650 /* 1651 * iort_table and iort both point to the start of IORT table, but 1652 * have different struct types 1653 */ 1654 iort = (struct acpi_table_iort *)iort_table; 1655 1656 /* Get the first IORT node */ 1657 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1658 iort->node_offset); 1659 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1660 iort_table->length); 1661 1662 for (i = 0; i < iort->node_count; i++) { 1663 if (iort_node >= iort_end) { 1664 pr_err("iort node pointer overflows, bad table\n"); 1665 return; 1666 } 1667 1668 iort_enable_acs(iort_node); 1669 1670 ops = iort_get_dev_cfg(iort_node); 1671 if (ops) { 1672 fwnode = acpi_alloc_fwnode_static(); 1673 if (!fwnode) 1674 return; 1675 1676 iort_set_fwnode(iort_node, fwnode); 1677 1678 ret = iort_add_platform_device(iort_node, ops); 1679 if (ret) { 1680 iort_delete_fwnode(iort_node); 1681 acpi_free_fwnode_static(fwnode); 1682 return; 1683 } 1684 } 1685 1686 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 1687 iort_node->length); 1688 } 1689 } 1690 1691 void __init acpi_iort_init(void) 1692 { 1693 acpi_status status; 1694 1695 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 1696 if (ACPI_FAILURE(status)) { 1697 if (status != AE_NOT_FOUND) { 1698 const char *msg = acpi_format_exception(status); 1699 1700 pr_err("Failed to get table, %s\n", msg); 1701 } 1702 1703 return; 1704 } 1705 1706 iort_check_id_count_workaround(iort_table); 1707 iort_init_platform_devices(); 1708 } 1709