1 /* 2 * Copyright (C) 2016, Semihalf 3 * Author: Tomasz Nowicki <tn@semihalf.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * This file implements early detection/parsing of I/O mapping 15 * reported to OS through firmware via I/O Remapping Table (IORT) 16 * IORT document number: ARM DEN 0049A 17 */ 18 19 #define pr_fmt(fmt) "ACPI: IORT: " fmt 20 21 #include <linux/acpi_iort.h> 22 #include <linux/iommu.h> 23 #include <linux/kernel.h> 24 #include <linux/list.h> 25 #include <linux/pci.h> 26 #include <linux/platform_device.h> 27 #include <linux/slab.h> 28 29 #define IORT_TYPE_MASK(type) (1 << (type)) 30 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 31 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 32 (1 << ACPI_IORT_NODE_SMMU_V3)) 33 34 /* Until ACPICA headers cover IORT rev. C */ 35 #ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 36 #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 37 #endif 38 39 struct iort_its_msi_chip { 40 struct list_head list; 41 struct fwnode_handle *fw_node; 42 u32 translation_id; 43 }; 44 45 struct iort_fwnode { 46 struct list_head list; 47 struct acpi_iort_node *iort_node; 48 struct fwnode_handle *fwnode; 49 }; 50 static LIST_HEAD(iort_fwnode_list); 51 static DEFINE_SPINLOCK(iort_fwnode_lock); 52 53 /** 54 * iort_set_fwnode() - Create iort_fwnode and use it to register 55 * iommu data in the iort_fwnode_list 56 * 57 * @node: IORT table node associated with the IOMMU 58 * @fwnode: fwnode associated with the IORT node 59 * 60 * Returns: 0 on success 61 * <0 on failure 62 */ 63 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 64 struct fwnode_handle *fwnode) 65 { 66 struct iort_fwnode *np; 67 68 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 69 70 if (WARN_ON(!np)) 71 return -ENOMEM; 72 73 INIT_LIST_HEAD(&np->list); 74 np->iort_node = iort_node; 75 np->fwnode = fwnode; 76 77 spin_lock(&iort_fwnode_lock); 78 list_add_tail(&np->list, &iort_fwnode_list); 79 spin_unlock(&iort_fwnode_lock); 80 81 return 0; 82 } 83 84 /** 85 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 86 * 87 * @node: IORT table node to be looked-up 88 * 89 * Returns: fwnode_handle pointer on success, NULL on failure 90 */ 91 static inline struct fwnode_handle *iort_get_fwnode( 92 struct acpi_iort_node *node) 93 { 94 struct iort_fwnode *curr; 95 struct fwnode_handle *fwnode = NULL; 96 97 spin_lock(&iort_fwnode_lock); 98 list_for_each_entry(curr, &iort_fwnode_list, list) { 99 if (curr->iort_node == node) { 100 fwnode = curr->fwnode; 101 break; 102 } 103 } 104 spin_unlock(&iort_fwnode_lock); 105 106 return fwnode; 107 } 108 109 /** 110 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 111 * 112 * @node: IORT table node associated with fwnode to delete 113 */ 114 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 115 { 116 struct iort_fwnode *curr, *tmp; 117 118 spin_lock(&iort_fwnode_lock); 119 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 120 if (curr->iort_node == node) { 121 list_del(&curr->list); 122 kfree(curr); 123 break; 124 } 125 } 126 spin_unlock(&iort_fwnode_lock); 127 } 128 129 /** 130 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode 131 * 132 * @fwnode: fwnode associated with device to be looked-up 133 * 134 * Returns: iort_node pointer on success, NULL on failure 135 */ 136 static inline struct acpi_iort_node *iort_get_iort_node( 137 struct fwnode_handle *fwnode) 138 { 139 struct iort_fwnode *curr; 140 struct acpi_iort_node *iort_node = NULL; 141 142 spin_lock(&iort_fwnode_lock); 143 list_for_each_entry(curr, &iort_fwnode_list, list) { 144 if (curr->fwnode == fwnode) { 145 iort_node = curr->iort_node; 146 break; 147 } 148 } 149 spin_unlock(&iort_fwnode_lock); 150 151 return iort_node; 152 } 153 154 typedef acpi_status (*iort_find_node_callback) 155 (struct acpi_iort_node *node, void *context); 156 157 /* Root pointer to the mapped IORT table */ 158 static struct acpi_table_header *iort_table; 159 160 static LIST_HEAD(iort_msi_chip_list); 161 static DEFINE_SPINLOCK(iort_msi_chip_lock); 162 163 /** 164 * iort_register_domain_token() - register domain token and related ITS ID 165 * to the list from where we can get it back later on. 166 * @trans_id: ITS ID. 167 * @fw_node: Domain token. 168 * 169 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 170 */ 171 int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node) 172 { 173 struct iort_its_msi_chip *its_msi_chip; 174 175 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 176 if (!its_msi_chip) 177 return -ENOMEM; 178 179 its_msi_chip->fw_node = fw_node; 180 its_msi_chip->translation_id = trans_id; 181 182 spin_lock(&iort_msi_chip_lock); 183 list_add(&its_msi_chip->list, &iort_msi_chip_list); 184 spin_unlock(&iort_msi_chip_lock); 185 186 return 0; 187 } 188 189 /** 190 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 191 * @trans_id: ITS ID. 192 * 193 * Returns: none. 194 */ 195 void iort_deregister_domain_token(int trans_id) 196 { 197 struct iort_its_msi_chip *its_msi_chip, *t; 198 199 spin_lock(&iort_msi_chip_lock); 200 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 201 if (its_msi_chip->translation_id == trans_id) { 202 list_del(&its_msi_chip->list); 203 kfree(its_msi_chip); 204 break; 205 } 206 } 207 spin_unlock(&iort_msi_chip_lock); 208 } 209 210 /** 211 * iort_find_domain_token() - Find domain token based on given ITS ID 212 * @trans_id: ITS ID. 213 * 214 * Returns: domain token when find on the list, NULL otherwise 215 */ 216 struct fwnode_handle *iort_find_domain_token(int trans_id) 217 { 218 struct fwnode_handle *fw_node = NULL; 219 struct iort_its_msi_chip *its_msi_chip; 220 221 spin_lock(&iort_msi_chip_lock); 222 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 223 if (its_msi_chip->translation_id == trans_id) { 224 fw_node = its_msi_chip->fw_node; 225 break; 226 } 227 } 228 spin_unlock(&iort_msi_chip_lock); 229 230 return fw_node; 231 } 232 233 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 234 iort_find_node_callback callback, 235 void *context) 236 { 237 struct acpi_iort_node *iort_node, *iort_end; 238 struct acpi_table_iort *iort; 239 int i; 240 241 if (!iort_table) 242 return NULL; 243 244 /* Get the first IORT node */ 245 iort = (struct acpi_table_iort *)iort_table; 246 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 247 iort->node_offset); 248 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 249 iort_table->length); 250 251 for (i = 0; i < iort->node_count; i++) { 252 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 253 "IORT node pointer overflows, bad table!\n")) 254 return NULL; 255 256 if (iort_node->type == type && 257 ACPI_SUCCESS(callback(iort_node, context))) 258 return iort_node; 259 260 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 261 iort_node->length); 262 } 263 264 return NULL; 265 } 266 267 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 268 void *context) 269 { 270 struct device *dev = context; 271 acpi_status status = AE_NOT_FOUND; 272 273 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 274 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 275 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 276 struct acpi_iort_named_component *ncomp; 277 278 if (!adev) 279 goto out; 280 281 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 282 if (ACPI_FAILURE(status)) { 283 dev_warn(dev, "Can't get device full path name\n"); 284 goto out; 285 } 286 287 ncomp = (struct acpi_iort_named_component *)node->node_data; 288 status = !strcmp(ncomp->device_name, buf.pointer) ? 289 AE_OK : AE_NOT_FOUND; 290 acpi_os_free(buf.pointer); 291 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 292 struct acpi_iort_root_complex *pci_rc; 293 struct pci_bus *bus; 294 295 bus = to_pci_bus(dev); 296 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 297 298 /* 299 * It is assumed that PCI segment numbers maps one-to-one 300 * with root complexes. Each segment number can represent only 301 * one root complex. 302 */ 303 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 304 AE_OK : AE_NOT_FOUND; 305 } 306 out: 307 return status; 308 } 309 310 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 311 u32 *rid_out) 312 { 313 /* Single mapping does not care for input id */ 314 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 315 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 316 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 317 *rid_out = map->output_base; 318 return 0; 319 } 320 321 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 322 map, type); 323 return -ENXIO; 324 } 325 326 if (rid_in < map->input_base || 327 (rid_in >= map->input_base + map->id_count)) 328 return -ENXIO; 329 330 *rid_out = map->output_base + (rid_in - map->input_base); 331 return 0; 332 } 333 334 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 335 u32 *id_out, int index) 336 { 337 struct acpi_iort_node *parent; 338 struct acpi_iort_id_mapping *map; 339 340 if (!node->mapping_offset || !node->mapping_count || 341 index >= node->mapping_count) 342 return NULL; 343 344 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 345 node->mapping_offset + index * sizeof(*map)); 346 347 /* Firmware bug! */ 348 if (!map->output_reference) { 349 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 350 node, node->type); 351 return NULL; 352 } 353 354 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 355 map->output_reference); 356 357 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 358 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 359 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || 360 node->type == ACPI_IORT_NODE_SMMU_V3) { 361 *id_out = map->output_base; 362 return parent; 363 } 364 } 365 366 return NULL; 367 } 368 369 #if (ACPI_CA_VERSION > 0x20170929) 370 static int iort_get_id_mapping_index(struct acpi_iort_node *node) 371 { 372 struct acpi_iort_smmu_v3 *smmu; 373 374 switch (node->type) { 375 case ACPI_IORT_NODE_SMMU_V3: 376 /* 377 * SMMUv3 dev ID mapping index was introduced in revision 1 378 * table, not available in revision 0 379 */ 380 if (node->revision < 1) 381 return -EINVAL; 382 383 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 384 /* 385 * ID mapping index is only ignored if all interrupts are 386 * GSIV based 387 */ 388 if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv 389 && smmu->sync_gsiv) 390 return -EINVAL; 391 392 if (smmu->id_mapping_index >= node->mapping_count) { 393 pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n", 394 node, node->type); 395 return -EINVAL; 396 } 397 398 return smmu->id_mapping_index; 399 default: 400 return -EINVAL; 401 } 402 } 403 #else 404 static inline int iort_get_id_mapping_index(struct acpi_iort_node *node) 405 { 406 return -EINVAL; 407 } 408 #endif 409 410 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 411 u32 id_in, u32 *id_out, 412 u8 type_mask) 413 { 414 u32 id = id_in; 415 416 /* Parse the ID mapping tree to find specified node type */ 417 while (node) { 418 struct acpi_iort_id_mapping *map; 419 int i, index; 420 421 if (IORT_TYPE_MASK(node->type) & type_mask) { 422 if (id_out) 423 *id_out = id; 424 return node; 425 } 426 427 if (!node->mapping_offset || !node->mapping_count) 428 goto fail_map; 429 430 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 431 node->mapping_offset); 432 433 /* Firmware bug! */ 434 if (!map->output_reference) { 435 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 436 node, node->type); 437 goto fail_map; 438 } 439 440 /* 441 * Get the special ID mapping index (if any) and skip its 442 * associated ID map to prevent erroneous multi-stage 443 * IORT ID translations. 444 */ 445 index = iort_get_id_mapping_index(node); 446 447 /* Do the ID translation */ 448 for (i = 0; i < node->mapping_count; i++, map++) { 449 /* if it is special mapping index, skip it */ 450 if (i == index) 451 continue; 452 453 if (!iort_id_map(map, node->type, id, &id)) 454 break; 455 } 456 457 if (i == node->mapping_count) 458 goto fail_map; 459 460 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 461 map->output_reference); 462 } 463 464 fail_map: 465 /* Map input ID to output ID unchanged on mapping failure */ 466 if (id_out) 467 *id_out = id_in; 468 469 return NULL; 470 } 471 472 static struct acpi_iort_node *iort_node_map_platform_id( 473 struct acpi_iort_node *node, u32 *id_out, u8 type_mask, 474 int index) 475 { 476 struct acpi_iort_node *parent; 477 u32 id; 478 479 /* step 1: retrieve the initial dev id */ 480 parent = iort_node_get_id(node, &id, index); 481 if (!parent) 482 return NULL; 483 484 /* 485 * optional step 2: map the initial dev id if its parent is not 486 * the target type we want, map it again for the use cases such 487 * as NC (named component) -> SMMU -> ITS. If the type is matched, 488 * return the initial dev id and its parent pointer directly. 489 */ 490 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 491 parent = iort_node_map_id(parent, id, id_out, type_mask); 492 else 493 if (id_out) 494 *id_out = id; 495 496 return parent; 497 } 498 499 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 500 { 501 struct pci_bus *pbus; 502 503 if (!dev_is_pci(dev)) { 504 struct acpi_iort_node *node; 505 /* 506 * scan iort_fwnode_list to see if it's an iort platform 507 * device (such as SMMU, PMCG),its iort node already cached 508 * and associated with fwnode when iort platform devices 509 * were initialized. 510 */ 511 node = iort_get_iort_node(dev->fwnode); 512 if (node) 513 return node; 514 515 /* 516 * if not, then it should be a platform device defined in 517 * DSDT/SSDT (with Named Component node in IORT) 518 */ 519 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 520 iort_match_node_callback, dev); 521 } 522 523 /* Find a PCI root bus */ 524 pbus = to_pci_dev(dev)->bus; 525 while (!pci_is_root_bus(pbus)) 526 pbus = pbus->parent; 527 528 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 529 iort_match_node_callback, &pbus->dev); 530 } 531 532 /** 533 * iort_msi_map_rid() - Map a MSI requester ID for a device 534 * @dev: The device for which the mapping is to be done. 535 * @req_id: The device requester ID. 536 * 537 * Returns: mapped MSI RID on success, input requester ID otherwise 538 */ 539 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 540 { 541 struct acpi_iort_node *node; 542 u32 dev_id; 543 544 node = iort_find_dev_node(dev); 545 if (!node) 546 return req_id; 547 548 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); 549 return dev_id; 550 } 551 552 /** 553 * iort_pmsi_get_dev_id() - Get the device id for a device 554 * @dev: The device for which the mapping is to be done. 555 * @dev_id: The device ID found. 556 * 557 * Returns: 0 for successful find a dev id, -ENODEV on error 558 */ 559 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 560 { 561 int i, index; 562 struct acpi_iort_node *node; 563 564 node = iort_find_dev_node(dev); 565 if (!node) 566 return -ENODEV; 567 568 index = iort_get_id_mapping_index(node); 569 /* if there is a valid index, go get the dev_id directly */ 570 if (index >= 0) { 571 if (iort_node_get_id(node, dev_id, index)) 572 return 0; 573 } else { 574 for (i = 0; i < node->mapping_count; i++) { 575 if (iort_node_map_platform_id(node, dev_id, 576 IORT_MSI_TYPE, i)) 577 return 0; 578 } 579 } 580 581 return -ENODEV; 582 } 583 584 /** 585 * iort_dev_find_its_id() - Find the ITS identifier for a device 586 * @dev: The device. 587 * @req_id: Device's requester ID 588 * @idx: Index of the ITS identifier list. 589 * @its_id: ITS identifier. 590 * 591 * Returns: 0 on success, appropriate error value otherwise 592 */ 593 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 594 unsigned int idx, int *its_id) 595 { 596 struct acpi_iort_its_group *its; 597 struct acpi_iort_node *node; 598 599 node = iort_find_dev_node(dev); 600 if (!node) 601 return -ENXIO; 602 603 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); 604 if (!node) 605 return -ENXIO; 606 607 /* Move to ITS specific data */ 608 its = (struct acpi_iort_its_group *)node->node_data; 609 if (idx > its->its_count) { 610 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", 611 idx, its->its_count); 612 return -ENXIO; 613 } 614 615 *its_id = its->identifiers[idx]; 616 return 0; 617 } 618 619 /** 620 * iort_get_device_domain() - Find MSI domain related to a device 621 * @dev: The device. 622 * @req_id: Requester ID for the device. 623 * 624 * Returns: the MSI domain for this device, NULL otherwise 625 */ 626 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 627 { 628 struct fwnode_handle *handle; 629 int its_id; 630 631 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 632 return NULL; 633 634 handle = iort_find_domain_token(its_id); 635 if (!handle) 636 return NULL; 637 638 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 639 } 640 641 static void iort_set_device_domain(struct device *dev, 642 struct acpi_iort_node *node) 643 { 644 struct acpi_iort_its_group *its; 645 struct acpi_iort_node *msi_parent; 646 struct acpi_iort_id_mapping *map; 647 struct fwnode_handle *iort_fwnode; 648 struct irq_domain *domain; 649 int index; 650 651 index = iort_get_id_mapping_index(node); 652 if (index < 0) 653 return; 654 655 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 656 node->mapping_offset + index * sizeof(*map)); 657 658 /* Firmware bug! */ 659 if (!map->output_reference || 660 !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) { 661 pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n", 662 node, node->type); 663 return; 664 } 665 666 msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 667 map->output_reference); 668 669 if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP) 670 return; 671 672 /* Move to ITS specific data */ 673 its = (struct acpi_iort_its_group *)msi_parent->node_data; 674 675 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 676 if (!iort_fwnode) 677 return; 678 679 domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 680 if (domain) 681 dev_set_msi_domain(dev, domain); 682 } 683 684 /** 685 * iort_get_platform_device_domain() - Find MSI domain related to a 686 * platform device 687 * @dev: the dev pointer associated with the platform device 688 * 689 * Returns: the MSI domain for this device, NULL otherwise 690 */ 691 static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 692 { 693 struct acpi_iort_node *node, *msi_parent; 694 struct fwnode_handle *iort_fwnode; 695 struct acpi_iort_its_group *its; 696 int i; 697 698 /* find its associated iort node */ 699 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 700 iort_match_node_callback, dev); 701 if (!node) 702 return NULL; 703 704 /* then find its msi parent node */ 705 for (i = 0; i < node->mapping_count; i++) { 706 msi_parent = iort_node_map_platform_id(node, NULL, 707 IORT_MSI_TYPE, i); 708 if (msi_parent) 709 break; 710 } 711 712 if (!msi_parent) 713 return NULL; 714 715 /* Move to ITS specific data */ 716 its = (struct acpi_iort_its_group *)msi_parent->node_data; 717 718 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 719 if (!iort_fwnode) 720 return NULL; 721 722 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 723 } 724 725 void acpi_configure_pmsi_domain(struct device *dev) 726 { 727 struct irq_domain *msi_domain; 728 729 msi_domain = iort_get_platform_device_domain(dev); 730 if (msi_domain) 731 dev_set_msi_domain(dev, msi_domain); 732 } 733 734 static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias, 735 void *data) 736 { 737 u32 *rid = data; 738 739 *rid = alias; 740 return 0; 741 } 742 743 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 744 struct fwnode_handle *fwnode, 745 const struct iommu_ops *ops) 746 { 747 int ret = iommu_fwspec_init(dev, fwnode, ops); 748 749 if (!ret) 750 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 751 752 return ret; 753 } 754 755 static inline bool iort_iommu_driver_enabled(u8 type) 756 { 757 switch (type) { 758 case ACPI_IORT_NODE_SMMU_V3: 759 return IS_BUILTIN(CONFIG_ARM_SMMU_V3); 760 case ACPI_IORT_NODE_SMMU: 761 return IS_BUILTIN(CONFIG_ARM_SMMU); 762 default: 763 pr_warn("IORT node type %u does not describe an SMMU\n", type); 764 return false; 765 } 766 } 767 768 #ifdef CONFIG_IOMMU_API 769 static inline const struct iommu_ops *iort_fwspec_iommu_ops( 770 struct iommu_fwspec *fwspec) 771 { 772 return (fwspec && fwspec->ops) ? fwspec->ops : NULL; 773 } 774 775 static inline int iort_add_device_replay(const struct iommu_ops *ops, 776 struct device *dev) 777 { 778 int err = 0; 779 780 if (ops->add_device && dev->bus && !dev->iommu_group) 781 err = ops->add_device(dev); 782 783 return err; 784 } 785 #else 786 static inline const struct iommu_ops *iort_fwspec_iommu_ops( 787 struct iommu_fwspec *fwspec) 788 { return NULL; } 789 static inline int iort_add_device_replay(const struct iommu_ops *ops, 790 struct device *dev) 791 { return 0; } 792 #endif 793 794 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, 795 u32 streamid) 796 { 797 const struct iommu_ops *ops; 798 struct fwnode_handle *iort_fwnode; 799 800 if (!node) 801 return -ENODEV; 802 803 iort_fwnode = iort_get_fwnode(node); 804 if (!iort_fwnode) 805 return -ENODEV; 806 807 /* 808 * If the ops look-up fails, this means that either 809 * the SMMU drivers have not been probed yet or that 810 * the SMMU drivers are not built in the kernel; 811 * Depending on whether the SMMU drivers are built-in 812 * in the kernel or not, defer the IOMMU configuration 813 * or just abort it. 814 */ 815 ops = iommu_ops_from_fwnode(iort_fwnode); 816 if (!ops) 817 return iort_iommu_driver_enabled(node->type) ? 818 -EPROBE_DEFER : -ENODEV; 819 820 return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 821 } 822 823 struct iort_pci_alias_info { 824 struct device *dev; 825 struct acpi_iort_node *node; 826 }; 827 828 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) 829 { 830 struct iort_pci_alias_info *info = data; 831 struct acpi_iort_node *parent; 832 u32 streamid; 833 834 parent = iort_node_map_id(info->node, alias, &streamid, 835 IORT_IOMMU_TYPE); 836 return iort_iommu_xlate(info->dev, parent, streamid); 837 } 838 839 static int nc_dma_get_range(struct device *dev, u64 *size) 840 { 841 struct acpi_iort_node *node; 842 struct acpi_iort_named_component *ncomp; 843 844 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 845 iort_match_node_callback, dev); 846 if (!node) 847 return -ENODEV; 848 849 ncomp = (struct acpi_iort_named_component *)node->node_data; 850 851 *size = ncomp->memory_address_limit >= 64 ? U64_MAX : 852 1ULL<<ncomp->memory_address_limit; 853 854 return 0; 855 } 856 857 /** 858 * iort_dma_setup() - Set-up device DMA parameters. 859 * 860 * @dev: device to configure 861 * @dma_addr: device DMA address result pointer 862 * @size: DMA range size result pointer 863 */ 864 void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) 865 { 866 u64 mask, dmaaddr = 0, size = 0, offset = 0; 867 int ret, msb; 868 869 /* 870 * Set default coherent_dma_mask to 32 bit. Drivers are expected to 871 * setup the correct supported mask. 872 */ 873 if (!dev->coherent_dma_mask) 874 dev->coherent_dma_mask = DMA_BIT_MASK(32); 875 876 /* 877 * Set it to coherent_dma_mask by default if the architecture 878 * code has not set it. 879 */ 880 if (!dev->dma_mask) 881 dev->dma_mask = &dev->coherent_dma_mask; 882 883 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 884 885 if (dev_is_pci(dev)) 886 ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); 887 else 888 ret = nc_dma_get_range(dev, &size); 889 890 if (!ret) { 891 msb = fls64(dmaaddr + size - 1); 892 /* 893 * Round-up to the power-of-two mask or set 894 * the mask to the whole 64-bit address space 895 * in case the DMA region covers the full 896 * memory window. 897 */ 898 mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1; 899 /* 900 * Limit coherent and dma mask based on size 901 * retrieved from firmware. 902 */ 903 dev->coherent_dma_mask = mask; 904 *dev->dma_mask = mask; 905 } 906 907 *dma_addr = dmaaddr; 908 *dma_size = size; 909 910 dev->dma_pfn_offset = PFN_DOWN(offset); 911 dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); 912 } 913 914 /** 915 * iort_iommu_configure - Set-up IOMMU configuration for a device. 916 * 917 * @dev: device to configure 918 * 919 * Returns: iommu_ops pointer on configuration success 920 * NULL on configuration failure 921 */ 922 const struct iommu_ops *iort_iommu_configure(struct device *dev) 923 { 924 struct acpi_iort_node *node, *parent; 925 const struct iommu_ops *ops; 926 u32 streamid = 0; 927 int err = -ENODEV; 928 929 /* 930 * If we already translated the fwspec there 931 * is nothing left to do, return the iommu_ops. 932 */ 933 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); 934 if (ops) 935 return ops; 936 937 if (dev_is_pci(dev)) { 938 struct pci_bus *bus = to_pci_dev(dev)->bus; 939 struct iort_pci_alias_info info = { .dev = dev }; 940 941 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 942 iort_match_node_callback, &bus->dev); 943 if (!node) 944 return NULL; 945 946 info.node = node; 947 err = pci_for_each_dma_alias(to_pci_dev(dev), 948 iort_pci_iommu_init, &info); 949 } else { 950 int i = 0; 951 952 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 953 iort_match_node_callback, dev); 954 if (!node) 955 return NULL; 956 957 do { 958 parent = iort_node_map_platform_id(node, &streamid, 959 IORT_IOMMU_TYPE, 960 i++); 961 962 if (parent) 963 err = iort_iommu_xlate(dev, parent, streamid); 964 } while (parent && !err); 965 } 966 967 /* 968 * If we have reason to believe the IOMMU driver missed the initial 969 * add_device callback for dev, replay it to get things in order. 970 */ 971 if (!err) { 972 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); 973 err = iort_add_device_replay(ops, dev); 974 } 975 976 /* Ignore all other errors apart from EPROBE_DEFER */ 977 if (err == -EPROBE_DEFER) { 978 ops = ERR_PTR(err); 979 } else if (err) { 980 dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); 981 ops = NULL; 982 } 983 984 return ops; 985 } 986 987 static void __init acpi_iort_register_irq(int hwirq, const char *name, 988 int trigger, 989 struct resource *res) 990 { 991 int irq = acpi_register_gsi(NULL, hwirq, trigger, 992 ACPI_ACTIVE_HIGH); 993 994 if (irq <= 0) { 995 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 996 name); 997 return; 998 } 999 1000 res->start = irq; 1001 res->end = irq; 1002 res->flags = IORESOURCE_IRQ; 1003 res->name = name; 1004 } 1005 1006 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 1007 { 1008 struct acpi_iort_smmu_v3 *smmu; 1009 /* Always present mem resource */ 1010 int num_res = 1; 1011 1012 /* Retrieve SMMUv3 specific data */ 1013 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1014 1015 if (smmu->event_gsiv) 1016 num_res++; 1017 1018 if (smmu->pri_gsiv) 1019 num_res++; 1020 1021 if (smmu->gerr_gsiv) 1022 num_res++; 1023 1024 if (smmu->sync_gsiv) 1025 num_res++; 1026 1027 return num_res; 1028 } 1029 1030 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) 1031 { 1032 /* 1033 * Cavium ThunderX2 implementation doesn't not support unique 1034 * irq line. Use single irq line for all the SMMUv3 interrupts. 1035 */ 1036 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1037 return false; 1038 1039 /* 1040 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking 1041 * SPI numbers here. 1042 */ 1043 return smmu->event_gsiv == smmu->pri_gsiv && 1044 smmu->event_gsiv == smmu->gerr_gsiv && 1045 smmu->event_gsiv == smmu->sync_gsiv; 1046 } 1047 1048 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) 1049 { 1050 /* 1051 * Override the size, for Cavium ThunderX2 implementation 1052 * which doesn't support the page 1 SMMU register space. 1053 */ 1054 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1055 return SZ_64K; 1056 1057 return SZ_128K; 1058 } 1059 1060 static void __init arm_smmu_v3_init_resources(struct resource *res, 1061 struct acpi_iort_node *node) 1062 { 1063 struct acpi_iort_smmu_v3 *smmu; 1064 int num_res = 0; 1065 1066 /* Retrieve SMMUv3 specific data */ 1067 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1068 1069 res[num_res].start = smmu->base_address; 1070 res[num_res].end = smmu->base_address + 1071 arm_smmu_v3_resource_size(smmu) - 1; 1072 res[num_res].flags = IORESOURCE_MEM; 1073 1074 num_res++; 1075 if (arm_smmu_v3_is_combined_irq(smmu)) { 1076 if (smmu->event_gsiv) 1077 acpi_iort_register_irq(smmu->event_gsiv, "combined", 1078 ACPI_EDGE_SENSITIVE, 1079 &res[num_res++]); 1080 } else { 1081 1082 if (smmu->event_gsiv) 1083 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 1084 ACPI_EDGE_SENSITIVE, 1085 &res[num_res++]); 1086 1087 if (smmu->pri_gsiv) 1088 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 1089 ACPI_EDGE_SENSITIVE, 1090 &res[num_res++]); 1091 1092 if (smmu->gerr_gsiv) 1093 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 1094 ACPI_EDGE_SENSITIVE, 1095 &res[num_res++]); 1096 1097 if (smmu->sync_gsiv) 1098 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 1099 ACPI_EDGE_SENSITIVE, 1100 &res[num_res++]); 1101 } 1102 } 1103 1104 static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) 1105 { 1106 struct acpi_iort_smmu_v3 *smmu; 1107 1108 /* Retrieve SMMUv3 specific data */ 1109 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1110 1111 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; 1112 } 1113 1114 #if defined(CONFIG_ACPI_NUMA) 1115 /* 1116 * set numa proximity domain for smmuv3 device 1117 */ 1118 static void __init arm_smmu_v3_set_proximity(struct device *dev, 1119 struct acpi_iort_node *node) 1120 { 1121 struct acpi_iort_smmu_v3 *smmu; 1122 1123 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1124 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { 1125 set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm)); 1126 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", 1127 smmu->base_address, 1128 smmu->pxm); 1129 } 1130 } 1131 #else 1132 #define arm_smmu_v3_set_proximity NULL 1133 #endif 1134 1135 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 1136 { 1137 struct acpi_iort_smmu *smmu; 1138 1139 /* Retrieve SMMU specific data */ 1140 smmu = (struct acpi_iort_smmu *)node->node_data; 1141 1142 /* 1143 * Only consider the global fault interrupt and ignore the 1144 * configuration access interrupt. 1145 * 1146 * MMIO address and global fault interrupt resources are always 1147 * present so add them to the context interrupt count as a static 1148 * value. 1149 */ 1150 return smmu->context_interrupt_count + 2; 1151 } 1152 1153 static void __init arm_smmu_init_resources(struct resource *res, 1154 struct acpi_iort_node *node) 1155 { 1156 struct acpi_iort_smmu *smmu; 1157 int i, hw_irq, trigger, num_res = 0; 1158 u64 *ctx_irq, *glb_irq; 1159 1160 /* Retrieve SMMU specific data */ 1161 smmu = (struct acpi_iort_smmu *)node->node_data; 1162 1163 res[num_res].start = smmu->base_address; 1164 res[num_res].end = smmu->base_address + smmu->span - 1; 1165 res[num_res].flags = IORESOURCE_MEM; 1166 num_res++; 1167 1168 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 1169 /* Global IRQs */ 1170 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 1171 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 1172 1173 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 1174 &res[num_res++]); 1175 1176 /* Context IRQs */ 1177 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 1178 for (i = 0; i < smmu->context_interrupt_count; i++) { 1179 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 1180 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 1181 1182 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 1183 &res[num_res++]); 1184 } 1185 } 1186 1187 static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) 1188 { 1189 struct acpi_iort_smmu *smmu; 1190 1191 /* Retrieve SMMU specific data */ 1192 smmu = (struct acpi_iort_smmu *)node->node_data; 1193 1194 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; 1195 } 1196 1197 struct iort_dev_config { 1198 const char *name; 1199 int (*dev_init)(struct acpi_iort_node *node); 1200 bool (*dev_is_coherent)(struct acpi_iort_node *node); 1201 int (*dev_count_resources)(struct acpi_iort_node *node); 1202 void (*dev_init_resources)(struct resource *res, 1203 struct acpi_iort_node *node); 1204 void (*dev_set_proximity)(struct device *dev, 1205 struct acpi_iort_node *node); 1206 }; 1207 1208 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { 1209 .name = "arm-smmu-v3", 1210 .dev_is_coherent = arm_smmu_v3_is_coherent, 1211 .dev_count_resources = arm_smmu_v3_count_resources, 1212 .dev_init_resources = arm_smmu_v3_init_resources, 1213 .dev_set_proximity = arm_smmu_v3_set_proximity, 1214 }; 1215 1216 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { 1217 .name = "arm-smmu", 1218 .dev_is_coherent = arm_smmu_is_coherent, 1219 .dev_count_resources = arm_smmu_count_resources, 1220 .dev_init_resources = arm_smmu_init_resources 1221 }; 1222 1223 static __init const struct iort_dev_config *iort_get_dev_cfg( 1224 struct acpi_iort_node *node) 1225 { 1226 switch (node->type) { 1227 case ACPI_IORT_NODE_SMMU_V3: 1228 return &iort_arm_smmu_v3_cfg; 1229 case ACPI_IORT_NODE_SMMU: 1230 return &iort_arm_smmu_cfg; 1231 default: 1232 return NULL; 1233 } 1234 } 1235 1236 /** 1237 * iort_add_platform_device() - Allocate a platform device for IORT node 1238 * @node: Pointer to device ACPI IORT node 1239 * 1240 * Returns: 0 on success, <0 failure 1241 */ 1242 static int __init iort_add_platform_device(struct acpi_iort_node *node, 1243 const struct iort_dev_config *ops) 1244 { 1245 struct fwnode_handle *fwnode; 1246 struct platform_device *pdev; 1247 struct resource *r; 1248 enum dev_dma_attr attr; 1249 int ret, count; 1250 1251 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 1252 if (!pdev) 1253 return -ENOMEM; 1254 1255 if (ops->dev_set_proximity) 1256 ops->dev_set_proximity(&pdev->dev, node); 1257 1258 count = ops->dev_count_resources(node); 1259 1260 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 1261 if (!r) { 1262 ret = -ENOMEM; 1263 goto dev_put; 1264 } 1265 1266 ops->dev_init_resources(r, node); 1267 1268 ret = platform_device_add_resources(pdev, r, count); 1269 /* 1270 * Resources are duplicated in platform_device_add_resources, 1271 * free their allocated memory 1272 */ 1273 kfree(r); 1274 1275 if (ret) 1276 goto dev_put; 1277 1278 /* 1279 * Add a copy of IORT node pointer to platform_data to 1280 * be used to retrieve IORT data information. 1281 */ 1282 ret = platform_device_add_data(pdev, &node, sizeof(node)); 1283 if (ret) 1284 goto dev_put; 1285 1286 /* 1287 * We expect the dma masks to be equivalent for 1288 * all SMMUs set-ups 1289 */ 1290 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1291 1292 fwnode = iort_get_fwnode(node); 1293 1294 if (!fwnode) { 1295 ret = -ENODEV; 1296 goto dev_put; 1297 } 1298 1299 pdev->dev.fwnode = fwnode; 1300 1301 attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ? 1302 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1303 1304 /* Configure DMA for the page table walker */ 1305 acpi_dma_configure(&pdev->dev, attr); 1306 1307 iort_set_device_domain(&pdev->dev, node); 1308 1309 ret = platform_device_add(pdev); 1310 if (ret) 1311 goto dma_deconfigure; 1312 1313 return 0; 1314 1315 dma_deconfigure: 1316 acpi_dma_deconfigure(&pdev->dev); 1317 dev_put: 1318 platform_device_put(pdev); 1319 1320 return ret; 1321 } 1322 1323 static bool __init iort_enable_acs(struct acpi_iort_node *iort_node) 1324 { 1325 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 1326 struct acpi_iort_node *parent; 1327 struct acpi_iort_id_mapping *map; 1328 int i; 1329 1330 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, 1331 iort_node->mapping_offset); 1332 1333 for (i = 0; i < iort_node->mapping_count; i++, map++) { 1334 if (!map->output_reference) 1335 continue; 1336 1337 parent = ACPI_ADD_PTR(struct acpi_iort_node, 1338 iort_table, map->output_reference); 1339 /* 1340 * If we detect a RC->SMMU mapping, make sure 1341 * we enable ACS on the system. 1342 */ 1343 if ((parent->type == ACPI_IORT_NODE_SMMU) || 1344 (parent->type == ACPI_IORT_NODE_SMMU_V3)) { 1345 pci_request_acs(); 1346 return true; 1347 } 1348 } 1349 } 1350 1351 return false; 1352 } 1353 1354 static void __init iort_init_platform_devices(void) 1355 { 1356 struct acpi_iort_node *iort_node, *iort_end; 1357 struct acpi_table_iort *iort; 1358 struct fwnode_handle *fwnode; 1359 int i, ret; 1360 bool acs_enabled = false; 1361 const struct iort_dev_config *ops; 1362 1363 /* 1364 * iort_table and iort both point to the start of IORT table, but 1365 * have different struct types 1366 */ 1367 iort = (struct acpi_table_iort *)iort_table; 1368 1369 /* Get the first IORT node */ 1370 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1371 iort->node_offset); 1372 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1373 iort_table->length); 1374 1375 for (i = 0; i < iort->node_count; i++) { 1376 if (iort_node >= iort_end) { 1377 pr_err("iort node pointer overflows, bad table\n"); 1378 return; 1379 } 1380 1381 if (!acs_enabled) 1382 acs_enabled = iort_enable_acs(iort_node); 1383 1384 ops = iort_get_dev_cfg(iort_node); 1385 if (ops) { 1386 fwnode = acpi_alloc_fwnode_static(); 1387 if (!fwnode) 1388 return; 1389 1390 iort_set_fwnode(iort_node, fwnode); 1391 1392 ret = iort_add_platform_device(iort_node, ops); 1393 if (ret) { 1394 iort_delete_fwnode(iort_node); 1395 acpi_free_fwnode_static(fwnode); 1396 return; 1397 } 1398 } 1399 1400 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 1401 iort_node->length); 1402 } 1403 } 1404 1405 void __init acpi_iort_init(void) 1406 { 1407 acpi_status status; 1408 1409 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 1410 if (ACPI_FAILURE(status)) { 1411 if (status != AE_NOT_FOUND) { 1412 const char *msg = acpi_format_exception(status); 1413 1414 pr_err("Failed to get table, %s\n", msg); 1415 } 1416 1417 return; 1418 } 1419 1420 iort_init_platform_devices(); 1421 } 1422