1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <linux/cdx/cdx_bus.h> 32 #include <trace/events/iommu.h> 33 #include <linux/sched/mm.h> 34 #include <linux/msi.h> 35 36 #include "dma-iommu.h" 37 #include "iommu-priv.h" 38 39 #include "iommu-sva.h" 40 41 static struct kset *iommu_group_kset; 42 static DEFINE_IDA(iommu_group_ida); 43 static DEFINE_IDA(iommu_global_pasid_ida); 44 45 static unsigned int iommu_def_domain_type __read_mostly; 46 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 47 static u32 iommu_cmd_line __read_mostly; 48 49 struct iommu_group { 50 struct kobject kobj; 51 struct kobject *devices_kobj; 52 struct list_head devices; 53 struct xarray pasid_array; 54 struct mutex mutex; 55 void *iommu_data; 56 void (*iommu_data_release)(void *iommu_data); 57 char *name; 58 int id; 59 struct iommu_domain *default_domain; 60 struct iommu_domain *blocking_domain; 61 struct iommu_domain *domain; 62 struct list_head entry; 63 unsigned int owner_cnt; 64 void *owner; 65 }; 66 67 struct group_device { 68 struct list_head list; 69 struct device *dev; 70 char *name; 71 }; 72 73 /* Iterate over each struct group_device in a struct iommu_group */ 74 #define for_each_group_device(group, pos) \ 75 list_for_each_entry(pos, &(group)->devices, list) 76 77 struct iommu_group_attribute { 78 struct attribute attr; 79 ssize_t (*show)(struct iommu_group *group, char *buf); 80 ssize_t (*store)(struct iommu_group *group, 81 const char *buf, size_t count); 82 }; 83 84 static const char * const iommu_group_resv_type_string[] = { 85 [IOMMU_RESV_DIRECT] = "direct", 86 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 87 [IOMMU_RESV_RESERVED] = "reserved", 88 [IOMMU_RESV_MSI] = "msi", 89 [IOMMU_RESV_SW_MSI] = "msi", 90 }; 91 92 #define IOMMU_CMD_LINE_DMA_API BIT(0) 93 #define IOMMU_CMD_LINE_STRICT BIT(1) 94 95 static int iommu_bus_notifier(struct notifier_block *nb, 96 unsigned long action, void *data); 97 static void iommu_release_device(struct device *dev); 98 static struct iommu_domain * 99 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type); 100 static int __iommu_attach_device(struct iommu_domain *domain, 101 struct device *dev); 102 static int __iommu_attach_group(struct iommu_domain *domain, 103 struct iommu_group *group); 104 105 enum { 106 IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, 107 }; 108 109 static int __iommu_device_set_domain(struct iommu_group *group, 110 struct device *dev, 111 struct iommu_domain *new_domain, 112 unsigned int flags); 113 static int __iommu_group_set_domain_internal(struct iommu_group *group, 114 struct iommu_domain *new_domain, 115 unsigned int flags); 116 static int __iommu_group_set_domain(struct iommu_group *group, 117 struct iommu_domain *new_domain) 118 { 119 return __iommu_group_set_domain_internal(group, new_domain, 0); 120 } 121 static void __iommu_group_set_domain_nofail(struct iommu_group *group, 122 struct iommu_domain *new_domain) 123 { 124 WARN_ON(__iommu_group_set_domain_internal( 125 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); 126 } 127 128 static int iommu_setup_default_domain(struct iommu_group *group, 129 int target_type); 130 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 131 struct device *dev); 132 static ssize_t iommu_group_store_type(struct iommu_group *group, 133 const char *buf, size_t count); 134 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 135 struct device *dev); 136 static void __iommu_group_free_device(struct iommu_group *group, 137 struct group_device *grp_dev); 138 139 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 140 struct iommu_group_attribute iommu_group_attr_##_name = \ 141 __ATTR(_name, _mode, _show, _store) 142 143 #define to_iommu_group_attr(_attr) \ 144 container_of(_attr, struct iommu_group_attribute, attr) 145 #define to_iommu_group(_kobj) \ 146 container_of(_kobj, struct iommu_group, kobj) 147 148 static LIST_HEAD(iommu_device_list); 149 static DEFINE_SPINLOCK(iommu_device_lock); 150 151 static struct bus_type * const iommu_buses[] = { 152 &platform_bus_type, 153 #ifdef CONFIG_PCI 154 &pci_bus_type, 155 #endif 156 #ifdef CONFIG_ARM_AMBA 157 &amba_bustype, 158 #endif 159 #ifdef CONFIG_FSL_MC_BUS 160 &fsl_mc_bus_type, 161 #endif 162 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 163 &host1x_context_device_bus_type, 164 #endif 165 #ifdef CONFIG_CDX_BUS 166 &cdx_bus_type, 167 #endif 168 }; 169 170 /* 171 * Use a function instead of an array here because the domain-type is a 172 * bit-field, so an array would waste memory. 173 */ 174 static const char *iommu_domain_type_str(unsigned int t) 175 { 176 switch (t) { 177 case IOMMU_DOMAIN_BLOCKED: 178 return "Blocked"; 179 case IOMMU_DOMAIN_IDENTITY: 180 return "Passthrough"; 181 case IOMMU_DOMAIN_UNMANAGED: 182 return "Unmanaged"; 183 case IOMMU_DOMAIN_DMA: 184 case IOMMU_DOMAIN_DMA_FQ: 185 return "Translated"; 186 case IOMMU_DOMAIN_PLATFORM: 187 return "Platform"; 188 default: 189 return "Unknown"; 190 } 191 } 192 193 static int __init iommu_subsys_init(void) 194 { 195 struct notifier_block *nb; 196 197 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 198 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 199 iommu_set_default_passthrough(false); 200 else 201 iommu_set_default_translated(false); 202 203 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 204 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 205 iommu_set_default_translated(false); 206 } 207 } 208 209 if (!iommu_default_passthrough() && !iommu_dma_strict) 210 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 211 212 pr_info("Default domain type: %s%s\n", 213 iommu_domain_type_str(iommu_def_domain_type), 214 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 215 " (set via kernel command line)" : ""); 216 217 if (!iommu_default_passthrough()) 218 pr_info("DMA domain TLB invalidation policy: %s mode%s\n", 219 iommu_dma_strict ? "strict" : "lazy", 220 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 221 " (set via kernel command line)" : ""); 222 223 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 224 if (!nb) 225 return -ENOMEM; 226 227 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 228 nb[i].notifier_call = iommu_bus_notifier; 229 bus_register_notifier(iommu_buses[i], &nb[i]); 230 } 231 232 return 0; 233 } 234 subsys_initcall(iommu_subsys_init); 235 236 static int remove_iommu_group(struct device *dev, void *data) 237 { 238 if (dev->iommu && dev->iommu->iommu_dev == data) 239 iommu_release_device(dev); 240 241 return 0; 242 } 243 244 /** 245 * iommu_device_register() - Register an IOMMU hardware instance 246 * @iommu: IOMMU handle for the instance 247 * @ops: IOMMU ops to associate with the instance 248 * @hwdev: (optional) actual instance device, used for fwnode lookup 249 * 250 * Return: 0 on success, or an error. 251 */ 252 int iommu_device_register(struct iommu_device *iommu, 253 const struct iommu_ops *ops, struct device *hwdev) 254 { 255 int err = 0; 256 257 /* We need to be able to take module references appropriately */ 258 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 259 return -EINVAL; 260 /* 261 * Temporarily enforce global restriction to a single driver. This was 262 * already the de-facto behaviour, since any possible combination of 263 * existing drivers would compete for at least the PCI or platform bus. 264 */ 265 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 266 return -EBUSY; 267 268 iommu->ops = ops; 269 if (hwdev) 270 iommu->fwnode = dev_fwnode(hwdev); 271 272 spin_lock(&iommu_device_lock); 273 list_add_tail(&iommu->list, &iommu_device_list); 274 spin_unlock(&iommu_device_lock); 275 276 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 277 iommu_buses[i]->iommu_ops = ops; 278 err = bus_iommu_probe(iommu_buses[i]); 279 } 280 if (err) 281 iommu_device_unregister(iommu); 282 return err; 283 } 284 EXPORT_SYMBOL_GPL(iommu_device_register); 285 286 void iommu_device_unregister(struct iommu_device *iommu) 287 { 288 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 289 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 290 291 spin_lock(&iommu_device_lock); 292 list_del(&iommu->list); 293 spin_unlock(&iommu_device_lock); 294 295 /* Pairs with the alloc in generic_single_device_group() */ 296 iommu_group_put(iommu->singleton_group); 297 iommu->singleton_group = NULL; 298 } 299 EXPORT_SYMBOL_GPL(iommu_device_unregister); 300 301 #if IS_ENABLED(CONFIG_IOMMUFD_TEST) 302 void iommu_device_unregister_bus(struct iommu_device *iommu, 303 struct bus_type *bus, 304 struct notifier_block *nb) 305 { 306 bus_unregister_notifier(bus, nb); 307 iommu_device_unregister(iommu); 308 } 309 EXPORT_SYMBOL_GPL(iommu_device_unregister_bus); 310 311 /* 312 * Register an iommu driver against a single bus. This is only used by iommufd 313 * selftest to create a mock iommu driver. The caller must provide 314 * some memory to hold a notifier_block. 315 */ 316 int iommu_device_register_bus(struct iommu_device *iommu, 317 const struct iommu_ops *ops, struct bus_type *bus, 318 struct notifier_block *nb) 319 { 320 int err; 321 322 iommu->ops = ops; 323 nb->notifier_call = iommu_bus_notifier; 324 err = bus_register_notifier(bus, nb); 325 if (err) 326 return err; 327 328 spin_lock(&iommu_device_lock); 329 list_add_tail(&iommu->list, &iommu_device_list); 330 spin_unlock(&iommu_device_lock); 331 332 bus->iommu_ops = ops; 333 err = bus_iommu_probe(bus); 334 if (err) { 335 iommu_device_unregister_bus(iommu, bus, nb); 336 return err; 337 } 338 return 0; 339 } 340 EXPORT_SYMBOL_GPL(iommu_device_register_bus); 341 #endif 342 343 static struct dev_iommu *dev_iommu_get(struct device *dev) 344 { 345 struct dev_iommu *param = dev->iommu; 346 347 if (param) 348 return param; 349 350 param = kzalloc(sizeof(*param), GFP_KERNEL); 351 if (!param) 352 return NULL; 353 354 mutex_init(¶m->lock); 355 dev->iommu = param; 356 return param; 357 } 358 359 static void dev_iommu_free(struct device *dev) 360 { 361 struct dev_iommu *param = dev->iommu; 362 363 dev->iommu = NULL; 364 if (param->fwspec) { 365 fwnode_handle_put(param->fwspec->iommu_fwnode); 366 kfree(param->fwspec); 367 } 368 kfree(param); 369 } 370 371 static u32 dev_iommu_get_max_pasids(struct device *dev) 372 { 373 u32 max_pasids = 0, bits = 0; 374 int ret; 375 376 if (dev_is_pci(dev)) { 377 ret = pci_max_pasids(to_pci_dev(dev)); 378 if (ret > 0) 379 max_pasids = ret; 380 } else { 381 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 382 if (!ret) 383 max_pasids = 1UL << bits; 384 } 385 386 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 387 } 388 389 /* 390 * Init the dev->iommu and dev->iommu_group in the struct device and get the 391 * driver probed 392 */ 393 static int iommu_init_device(struct device *dev, const struct iommu_ops *ops) 394 { 395 struct iommu_device *iommu_dev; 396 struct iommu_group *group; 397 int ret; 398 399 if (!dev_iommu_get(dev)) 400 return -ENOMEM; 401 402 if (!try_module_get(ops->owner)) { 403 ret = -EINVAL; 404 goto err_free; 405 } 406 407 iommu_dev = ops->probe_device(dev); 408 if (IS_ERR(iommu_dev)) { 409 ret = PTR_ERR(iommu_dev); 410 goto err_module_put; 411 } 412 dev->iommu->iommu_dev = iommu_dev; 413 414 ret = iommu_device_link(iommu_dev, dev); 415 if (ret) 416 goto err_release; 417 418 group = ops->device_group(dev); 419 if (WARN_ON_ONCE(group == NULL)) 420 group = ERR_PTR(-EINVAL); 421 if (IS_ERR(group)) { 422 ret = PTR_ERR(group); 423 goto err_unlink; 424 } 425 dev->iommu_group = group; 426 427 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 428 if (ops->is_attach_deferred) 429 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 430 return 0; 431 432 err_unlink: 433 iommu_device_unlink(iommu_dev, dev); 434 err_release: 435 if (ops->release_device) 436 ops->release_device(dev); 437 err_module_put: 438 module_put(ops->owner); 439 err_free: 440 dev->iommu->iommu_dev = NULL; 441 dev_iommu_free(dev); 442 return ret; 443 } 444 445 static void iommu_deinit_device(struct device *dev) 446 { 447 struct iommu_group *group = dev->iommu_group; 448 const struct iommu_ops *ops = dev_iommu_ops(dev); 449 450 lockdep_assert_held(&group->mutex); 451 452 iommu_device_unlink(dev->iommu->iommu_dev, dev); 453 454 /* 455 * release_device() must stop using any attached domain on the device. 456 * If there are still other devices in the group they are not effected 457 * by this callback. 458 * 459 * The IOMMU driver must set the device to either an identity or 460 * blocking translation and stop using any domain pointer, as it is 461 * going to be freed. 462 */ 463 if (ops->release_device) 464 ops->release_device(dev); 465 466 /* 467 * If this is the last driver to use the group then we must free the 468 * domains before we do the module_put(). 469 */ 470 if (list_empty(&group->devices)) { 471 if (group->default_domain) { 472 iommu_domain_free(group->default_domain); 473 group->default_domain = NULL; 474 } 475 if (group->blocking_domain) { 476 iommu_domain_free(group->blocking_domain); 477 group->blocking_domain = NULL; 478 } 479 group->domain = NULL; 480 } 481 482 /* Caller must put iommu_group */ 483 dev->iommu_group = NULL; 484 module_put(ops->owner); 485 dev_iommu_free(dev); 486 } 487 488 DEFINE_MUTEX(iommu_probe_device_lock); 489 490 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 491 { 492 const struct iommu_ops *ops = dev->bus->iommu_ops; 493 struct iommu_group *group; 494 struct group_device *gdev; 495 int ret; 496 497 if (!ops) 498 return -ENODEV; 499 /* 500 * Serialise to avoid races between IOMMU drivers registering in 501 * parallel and/or the "replay" calls from ACPI/OF code via client 502 * driver probe. Once the latter have been cleaned up we should 503 * probably be able to use device_lock() here to minimise the scope, 504 * but for now enforcing a simple global ordering is fine. 505 */ 506 lockdep_assert_held(&iommu_probe_device_lock); 507 508 /* Device is probed already if in a group */ 509 if (dev->iommu_group) 510 return 0; 511 512 ret = iommu_init_device(dev, ops); 513 if (ret) 514 return ret; 515 516 group = dev->iommu_group; 517 gdev = iommu_group_alloc_device(group, dev); 518 mutex_lock(&group->mutex); 519 if (IS_ERR(gdev)) { 520 ret = PTR_ERR(gdev); 521 goto err_put_group; 522 } 523 524 /* 525 * The gdev must be in the list before calling 526 * iommu_setup_default_domain() 527 */ 528 list_add_tail(&gdev->list, &group->devices); 529 WARN_ON(group->default_domain && !group->domain); 530 if (group->default_domain) 531 iommu_create_device_direct_mappings(group->default_domain, dev); 532 if (group->domain) { 533 ret = __iommu_device_set_domain(group, dev, group->domain, 0); 534 if (ret) 535 goto err_remove_gdev; 536 } else if (!group->default_domain && !group_list) { 537 ret = iommu_setup_default_domain(group, 0); 538 if (ret) 539 goto err_remove_gdev; 540 } else if (!group->default_domain) { 541 /* 542 * With a group_list argument we defer the default_domain setup 543 * to the caller by providing a de-duplicated list of groups 544 * that need further setup. 545 */ 546 if (list_empty(&group->entry)) 547 list_add_tail(&group->entry, group_list); 548 } 549 mutex_unlock(&group->mutex); 550 551 if (dev_is_pci(dev)) 552 iommu_dma_set_pci_32bit_workaround(dev); 553 554 return 0; 555 556 err_remove_gdev: 557 list_del(&gdev->list); 558 __iommu_group_free_device(group, gdev); 559 err_put_group: 560 iommu_deinit_device(dev); 561 mutex_unlock(&group->mutex); 562 iommu_group_put(group); 563 564 return ret; 565 } 566 567 int iommu_probe_device(struct device *dev) 568 { 569 const struct iommu_ops *ops; 570 int ret; 571 572 mutex_lock(&iommu_probe_device_lock); 573 ret = __iommu_probe_device(dev, NULL); 574 mutex_unlock(&iommu_probe_device_lock); 575 if (ret) 576 return ret; 577 578 ops = dev_iommu_ops(dev); 579 if (ops->probe_finalize) 580 ops->probe_finalize(dev); 581 582 return 0; 583 } 584 585 static void __iommu_group_free_device(struct iommu_group *group, 586 struct group_device *grp_dev) 587 { 588 struct device *dev = grp_dev->dev; 589 590 sysfs_remove_link(group->devices_kobj, grp_dev->name); 591 sysfs_remove_link(&dev->kobj, "iommu_group"); 592 593 trace_remove_device_from_group(group->id, dev); 594 595 /* 596 * If the group has become empty then ownership must have been 597 * released, and the current domain must be set back to NULL or 598 * the default domain. 599 */ 600 if (list_empty(&group->devices)) 601 WARN_ON(group->owner_cnt || 602 group->domain != group->default_domain); 603 604 kfree(grp_dev->name); 605 kfree(grp_dev); 606 } 607 608 /* Remove the iommu_group from the struct device. */ 609 static void __iommu_group_remove_device(struct device *dev) 610 { 611 struct iommu_group *group = dev->iommu_group; 612 struct group_device *device; 613 614 mutex_lock(&group->mutex); 615 for_each_group_device(group, device) { 616 if (device->dev != dev) 617 continue; 618 619 list_del(&device->list); 620 __iommu_group_free_device(group, device); 621 if (dev->iommu && dev->iommu->iommu_dev) 622 iommu_deinit_device(dev); 623 else 624 dev->iommu_group = NULL; 625 break; 626 } 627 mutex_unlock(&group->mutex); 628 629 /* 630 * Pairs with the get in iommu_init_device() or 631 * iommu_group_add_device() 632 */ 633 iommu_group_put(group); 634 } 635 636 static void iommu_release_device(struct device *dev) 637 { 638 struct iommu_group *group = dev->iommu_group; 639 640 if (group) 641 __iommu_group_remove_device(dev); 642 643 /* Free any fwspec if no iommu_driver was ever attached */ 644 if (dev->iommu) 645 dev_iommu_free(dev); 646 } 647 648 static int __init iommu_set_def_domain_type(char *str) 649 { 650 bool pt; 651 int ret; 652 653 ret = kstrtobool(str, &pt); 654 if (ret) 655 return ret; 656 657 if (pt) 658 iommu_set_default_passthrough(true); 659 else 660 iommu_set_default_translated(true); 661 662 return 0; 663 } 664 early_param("iommu.passthrough", iommu_set_def_domain_type); 665 666 static int __init iommu_dma_setup(char *str) 667 { 668 int ret = kstrtobool(str, &iommu_dma_strict); 669 670 if (!ret) 671 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 672 return ret; 673 } 674 early_param("iommu.strict", iommu_dma_setup); 675 676 void iommu_set_dma_strict(void) 677 { 678 iommu_dma_strict = true; 679 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 680 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 681 } 682 683 static ssize_t iommu_group_attr_show(struct kobject *kobj, 684 struct attribute *__attr, char *buf) 685 { 686 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 687 struct iommu_group *group = to_iommu_group(kobj); 688 ssize_t ret = -EIO; 689 690 if (attr->show) 691 ret = attr->show(group, buf); 692 return ret; 693 } 694 695 static ssize_t iommu_group_attr_store(struct kobject *kobj, 696 struct attribute *__attr, 697 const char *buf, size_t count) 698 { 699 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 700 struct iommu_group *group = to_iommu_group(kobj); 701 ssize_t ret = -EIO; 702 703 if (attr->store) 704 ret = attr->store(group, buf, count); 705 return ret; 706 } 707 708 static const struct sysfs_ops iommu_group_sysfs_ops = { 709 .show = iommu_group_attr_show, 710 .store = iommu_group_attr_store, 711 }; 712 713 static int iommu_group_create_file(struct iommu_group *group, 714 struct iommu_group_attribute *attr) 715 { 716 return sysfs_create_file(&group->kobj, &attr->attr); 717 } 718 719 static void iommu_group_remove_file(struct iommu_group *group, 720 struct iommu_group_attribute *attr) 721 { 722 sysfs_remove_file(&group->kobj, &attr->attr); 723 } 724 725 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 726 { 727 return sysfs_emit(buf, "%s\n", group->name); 728 } 729 730 /** 731 * iommu_insert_resv_region - Insert a new region in the 732 * list of reserved regions. 733 * @new: new region to insert 734 * @regions: list of regions 735 * 736 * Elements are sorted by start address and overlapping segments 737 * of the same type are merged. 738 */ 739 static int iommu_insert_resv_region(struct iommu_resv_region *new, 740 struct list_head *regions) 741 { 742 struct iommu_resv_region *iter, *tmp, *nr, *top; 743 LIST_HEAD(stack); 744 745 nr = iommu_alloc_resv_region(new->start, new->length, 746 new->prot, new->type, GFP_KERNEL); 747 if (!nr) 748 return -ENOMEM; 749 750 /* First add the new element based on start address sorting */ 751 list_for_each_entry(iter, regions, list) { 752 if (nr->start < iter->start || 753 (nr->start == iter->start && nr->type <= iter->type)) 754 break; 755 } 756 list_add_tail(&nr->list, &iter->list); 757 758 /* Merge overlapping segments of type nr->type in @regions, if any */ 759 list_for_each_entry_safe(iter, tmp, regions, list) { 760 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 761 762 /* no merge needed on elements of different types than @new */ 763 if (iter->type != new->type) { 764 list_move_tail(&iter->list, &stack); 765 continue; 766 } 767 768 /* look for the last stack element of same type as @iter */ 769 list_for_each_entry_reverse(top, &stack, list) 770 if (top->type == iter->type) 771 goto check_overlap; 772 773 list_move_tail(&iter->list, &stack); 774 continue; 775 776 check_overlap: 777 top_end = top->start + top->length - 1; 778 779 if (iter->start > top_end + 1) { 780 list_move_tail(&iter->list, &stack); 781 } else { 782 top->length = max(top_end, iter_end) - top->start + 1; 783 list_del(&iter->list); 784 kfree(iter); 785 } 786 } 787 list_splice(&stack, regions); 788 return 0; 789 } 790 791 static int 792 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 793 struct list_head *group_resv_regions) 794 { 795 struct iommu_resv_region *entry; 796 int ret = 0; 797 798 list_for_each_entry(entry, dev_resv_regions, list) { 799 ret = iommu_insert_resv_region(entry, group_resv_regions); 800 if (ret) 801 break; 802 } 803 return ret; 804 } 805 806 int iommu_get_group_resv_regions(struct iommu_group *group, 807 struct list_head *head) 808 { 809 struct group_device *device; 810 int ret = 0; 811 812 mutex_lock(&group->mutex); 813 for_each_group_device(group, device) { 814 struct list_head dev_resv_regions; 815 816 /* 817 * Non-API groups still expose reserved_regions in sysfs, 818 * so filter out calls that get here that way. 819 */ 820 if (!device->dev->iommu) 821 break; 822 823 INIT_LIST_HEAD(&dev_resv_regions); 824 iommu_get_resv_regions(device->dev, &dev_resv_regions); 825 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 826 iommu_put_resv_regions(device->dev, &dev_resv_regions); 827 if (ret) 828 break; 829 } 830 mutex_unlock(&group->mutex); 831 return ret; 832 } 833 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 834 835 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 836 char *buf) 837 { 838 struct iommu_resv_region *region, *next; 839 struct list_head group_resv_regions; 840 int offset = 0; 841 842 INIT_LIST_HEAD(&group_resv_regions); 843 iommu_get_group_resv_regions(group, &group_resv_regions); 844 845 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 846 offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", 847 (long long)region->start, 848 (long long)(region->start + 849 region->length - 1), 850 iommu_group_resv_type_string[region->type]); 851 kfree(region); 852 } 853 854 return offset; 855 } 856 857 static ssize_t iommu_group_show_type(struct iommu_group *group, 858 char *buf) 859 { 860 char *type = "unknown"; 861 862 mutex_lock(&group->mutex); 863 if (group->default_domain) { 864 switch (group->default_domain->type) { 865 case IOMMU_DOMAIN_BLOCKED: 866 type = "blocked"; 867 break; 868 case IOMMU_DOMAIN_IDENTITY: 869 type = "identity"; 870 break; 871 case IOMMU_DOMAIN_UNMANAGED: 872 type = "unmanaged"; 873 break; 874 case IOMMU_DOMAIN_DMA: 875 type = "DMA"; 876 break; 877 case IOMMU_DOMAIN_DMA_FQ: 878 type = "DMA-FQ"; 879 break; 880 } 881 } 882 mutex_unlock(&group->mutex); 883 884 return sysfs_emit(buf, "%s\n", type); 885 } 886 887 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 888 889 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 890 iommu_group_show_resv_regions, NULL); 891 892 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 893 iommu_group_store_type); 894 895 static void iommu_group_release(struct kobject *kobj) 896 { 897 struct iommu_group *group = to_iommu_group(kobj); 898 899 pr_debug("Releasing group %d\n", group->id); 900 901 if (group->iommu_data_release) 902 group->iommu_data_release(group->iommu_data); 903 904 ida_free(&iommu_group_ida, group->id); 905 906 /* Domains are free'd by iommu_deinit_device() */ 907 WARN_ON(group->default_domain); 908 WARN_ON(group->blocking_domain); 909 910 kfree(group->name); 911 kfree(group); 912 } 913 914 static const struct kobj_type iommu_group_ktype = { 915 .sysfs_ops = &iommu_group_sysfs_ops, 916 .release = iommu_group_release, 917 }; 918 919 /** 920 * iommu_group_alloc - Allocate a new group 921 * 922 * This function is called by an iommu driver to allocate a new iommu 923 * group. The iommu group represents the minimum granularity of the iommu. 924 * Upon successful return, the caller holds a reference to the supplied 925 * group in order to hold the group until devices are added. Use 926 * iommu_group_put() to release this extra reference count, allowing the 927 * group to be automatically reclaimed once it has no devices or external 928 * references. 929 */ 930 struct iommu_group *iommu_group_alloc(void) 931 { 932 struct iommu_group *group; 933 int ret; 934 935 group = kzalloc(sizeof(*group), GFP_KERNEL); 936 if (!group) 937 return ERR_PTR(-ENOMEM); 938 939 group->kobj.kset = iommu_group_kset; 940 mutex_init(&group->mutex); 941 INIT_LIST_HEAD(&group->devices); 942 INIT_LIST_HEAD(&group->entry); 943 xa_init(&group->pasid_array); 944 945 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 946 if (ret < 0) { 947 kfree(group); 948 return ERR_PTR(ret); 949 } 950 group->id = ret; 951 952 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 953 NULL, "%d", group->id); 954 if (ret) { 955 kobject_put(&group->kobj); 956 return ERR_PTR(ret); 957 } 958 959 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 960 if (!group->devices_kobj) { 961 kobject_put(&group->kobj); /* triggers .release & free */ 962 return ERR_PTR(-ENOMEM); 963 } 964 965 /* 966 * The devices_kobj holds a reference on the group kobject, so 967 * as long as that exists so will the group. We can therefore 968 * use the devices_kobj for reference counting. 969 */ 970 kobject_put(&group->kobj); 971 972 ret = iommu_group_create_file(group, 973 &iommu_group_attr_reserved_regions); 974 if (ret) { 975 kobject_put(group->devices_kobj); 976 return ERR_PTR(ret); 977 } 978 979 ret = iommu_group_create_file(group, &iommu_group_attr_type); 980 if (ret) { 981 kobject_put(group->devices_kobj); 982 return ERR_PTR(ret); 983 } 984 985 pr_debug("Allocated group %d\n", group->id); 986 987 return group; 988 } 989 EXPORT_SYMBOL_GPL(iommu_group_alloc); 990 991 /** 992 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 993 * @group: the group 994 * 995 * iommu drivers can store data in the group for use when doing iommu 996 * operations. This function provides a way to retrieve it. Caller 997 * should hold a group reference. 998 */ 999 void *iommu_group_get_iommudata(struct iommu_group *group) 1000 { 1001 return group->iommu_data; 1002 } 1003 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 1004 1005 /** 1006 * iommu_group_set_iommudata - set iommu_data for a group 1007 * @group: the group 1008 * @iommu_data: new data 1009 * @release: release function for iommu_data 1010 * 1011 * iommu drivers can store data in the group for use when doing iommu 1012 * operations. This function provides a way to set the data after 1013 * the group has been allocated. Caller should hold a group reference. 1014 */ 1015 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 1016 void (*release)(void *iommu_data)) 1017 { 1018 group->iommu_data = iommu_data; 1019 group->iommu_data_release = release; 1020 } 1021 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 1022 1023 /** 1024 * iommu_group_set_name - set name for a group 1025 * @group: the group 1026 * @name: name 1027 * 1028 * Allow iommu driver to set a name for a group. When set it will 1029 * appear in a name attribute file under the group in sysfs. 1030 */ 1031 int iommu_group_set_name(struct iommu_group *group, const char *name) 1032 { 1033 int ret; 1034 1035 if (group->name) { 1036 iommu_group_remove_file(group, &iommu_group_attr_name); 1037 kfree(group->name); 1038 group->name = NULL; 1039 if (!name) 1040 return 0; 1041 } 1042 1043 group->name = kstrdup(name, GFP_KERNEL); 1044 if (!group->name) 1045 return -ENOMEM; 1046 1047 ret = iommu_group_create_file(group, &iommu_group_attr_name); 1048 if (ret) { 1049 kfree(group->name); 1050 group->name = NULL; 1051 return ret; 1052 } 1053 1054 return 0; 1055 } 1056 EXPORT_SYMBOL_GPL(iommu_group_set_name); 1057 1058 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 1059 struct device *dev) 1060 { 1061 struct iommu_resv_region *entry; 1062 struct list_head mappings; 1063 unsigned long pg_size; 1064 int ret = 0; 1065 1066 pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; 1067 INIT_LIST_HEAD(&mappings); 1068 1069 if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) 1070 return -EINVAL; 1071 1072 iommu_get_resv_regions(dev, &mappings); 1073 1074 /* We need to consider overlapping regions for different devices */ 1075 list_for_each_entry(entry, &mappings, list) { 1076 dma_addr_t start, end, addr; 1077 size_t map_size = 0; 1078 1079 if (entry->type == IOMMU_RESV_DIRECT) 1080 dev->iommu->require_direct = 1; 1081 1082 if ((entry->type != IOMMU_RESV_DIRECT && 1083 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || 1084 !iommu_is_dma_domain(domain)) 1085 continue; 1086 1087 start = ALIGN(entry->start, pg_size); 1088 end = ALIGN(entry->start + entry->length, pg_size); 1089 1090 for (addr = start; addr <= end; addr += pg_size) { 1091 phys_addr_t phys_addr; 1092 1093 if (addr == end) 1094 goto map_end; 1095 1096 phys_addr = iommu_iova_to_phys(domain, addr); 1097 if (!phys_addr) { 1098 map_size += pg_size; 1099 continue; 1100 } 1101 1102 map_end: 1103 if (map_size) { 1104 ret = iommu_map(domain, addr - map_size, 1105 addr - map_size, map_size, 1106 entry->prot, GFP_KERNEL); 1107 if (ret) 1108 goto out; 1109 map_size = 0; 1110 } 1111 } 1112 1113 } 1114 1115 if (!list_empty(&mappings) && iommu_is_dma_domain(domain)) 1116 iommu_flush_iotlb_all(domain); 1117 1118 out: 1119 iommu_put_resv_regions(dev, &mappings); 1120 1121 return ret; 1122 } 1123 1124 /* This is undone by __iommu_group_free_device() */ 1125 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 1126 struct device *dev) 1127 { 1128 int ret, i = 0; 1129 struct group_device *device; 1130 1131 device = kzalloc(sizeof(*device), GFP_KERNEL); 1132 if (!device) 1133 return ERR_PTR(-ENOMEM); 1134 1135 device->dev = dev; 1136 1137 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 1138 if (ret) 1139 goto err_free_device; 1140 1141 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1142 rename: 1143 if (!device->name) { 1144 ret = -ENOMEM; 1145 goto err_remove_link; 1146 } 1147 1148 ret = sysfs_create_link_nowarn(group->devices_kobj, 1149 &dev->kobj, device->name); 1150 if (ret) { 1151 if (ret == -EEXIST && i >= 0) { 1152 /* 1153 * Account for the slim chance of collision 1154 * and append an instance to the name. 1155 */ 1156 kfree(device->name); 1157 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1158 kobject_name(&dev->kobj), i++); 1159 goto rename; 1160 } 1161 goto err_free_name; 1162 } 1163 1164 trace_add_device_to_group(group->id, dev); 1165 1166 dev_info(dev, "Adding to iommu group %d\n", group->id); 1167 1168 return device; 1169 1170 err_free_name: 1171 kfree(device->name); 1172 err_remove_link: 1173 sysfs_remove_link(&dev->kobj, "iommu_group"); 1174 err_free_device: 1175 kfree(device); 1176 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1177 return ERR_PTR(ret); 1178 } 1179 1180 /** 1181 * iommu_group_add_device - add a device to an iommu group 1182 * @group: the group into which to add the device (reference should be held) 1183 * @dev: the device 1184 * 1185 * This function is called by an iommu driver to add a device into a 1186 * group. Adding a device increments the group reference count. 1187 */ 1188 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1189 { 1190 struct group_device *gdev; 1191 1192 gdev = iommu_group_alloc_device(group, dev); 1193 if (IS_ERR(gdev)) 1194 return PTR_ERR(gdev); 1195 1196 iommu_group_ref_get(group); 1197 dev->iommu_group = group; 1198 1199 mutex_lock(&group->mutex); 1200 list_add_tail(&gdev->list, &group->devices); 1201 mutex_unlock(&group->mutex); 1202 return 0; 1203 } 1204 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1205 1206 /** 1207 * iommu_group_remove_device - remove a device from it's current group 1208 * @dev: device to be removed 1209 * 1210 * This function is called by an iommu driver to remove the device from 1211 * it's current group. This decrements the iommu group reference count. 1212 */ 1213 void iommu_group_remove_device(struct device *dev) 1214 { 1215 struct iommu_group *group = dev->iommu_group; 1216 1217 if (!group) 1218 return; 1219 1220 dev_info(dev, "Removing from iommu group %d\n", group->id); 1221 1222 __iommu_group_remove_device(dev); 1223 } 1224 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1225 1226 /** 1227 * iommu_group_for_each_dev - iterate over each device in the group 1228 * @group: the group 1229 * @data: caller opaque data to be passed to callback function 1230 * @fn: caller supplied callback function 1231 * 1232 * This function is called by group users to iterate over group devices. 1233 * Callers should hold a reference count to the group during callback. 1234 * The group->mutex is held across callbacks, which will block calls to 1235 * iommu_group_add/remove_device. 1236 */ 1237 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1238 int (*fn)(struct device *, void *)) 1239 { 1240 struct group_device *device; 1241 int ret = 0; 1242 1243 mutex_lock(&group->mutex); 1244 for_each_group_device(group, device) { 1245 ret = fn(device->dev, data); 1246 if (ret) 1247 break; 1248 } 1249 mutex_unlock(&group->mutex); 1250 1251 return ret; 1252 } 1253 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1254 1255 /** 1256 * iommu_group_get - Return the group for a device and increment reference 1257 * @dev: get the group that this device belongs to 1258 * 1259 * This function is called by iommu drivers and users to get the group 1260 * for the specified device. If found, the group is returned and the group 1261 * reference in incremented, else NULL. 1262 */ 1263 struct iommu_group *iommu_group_get(struct device *dev) 1264 { 1265 struct iommu_group *group = dev->iommu_group; 1266 1267 if (group) 1268 kobject_get(group->devices_kobj); 1269 1270 return group; 1271 } 1272 EXPORT_SYMBOL_GPL(iommu_group_get); 1273 1274 /** 1275 * iommu_group_ref_get - Increment reference on a group 1276 * @group: the group to use, must not be NULL 1277 * 1278 * This function is called by iommu drivers to take additional references on an 1279 * existing group. Returns the given group for convenience. 1280 */ 1281 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1282 { 1283 kobject_get(group->devices_kobj); 1284 return group; 1285 } 1286 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1287 1288 /** 1289 * iommu_group_put - Decrement group reference 1290 * @group: the group to use 1291 * 1292 * This function is called by iommu drivers and users to release the 1293 * iommu group. Once the reference count is zero, the group is released. 1294 */ 1295 void iommu_group_put(struct iommu_group *group) 1296 { 1297 if (group) 1298 kobject_put(group->devices_kobj); 1299 } 1300 EXPORT_SYMBOL_GPL(iommu_group_put); 1301 1302 /** 1303 * iommu_register_device_fault_handler() - Register a device fault handler 1304 * @dev: the device 1305 * @handler: the fault handler 1306 * @data: private data passed as argument to the handler 1307 * 1308 * When an IOMMU fault event is received, this handler gets called with the 1309 * fault event and data as argument. The handler should return 0 on success. If 1310 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1311 * complete the fault by calling iommu_page_response() with one of the following 1312 * response code: 1313 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1314 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1315 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1316 * page faults if possible. 1317 * 1318 * Return 0 if the fault handler was installed successfully, or an error. 1319 */ 1320 int iommu_register_device_fault_handler(struct device *dev, 1321 iommu_dev_fault_handler_t handler, 1322 void *data) 1323 { 1324 struct dev_iommu *param = dev->iommu; 1325 int ret = 0; 1326 1327 if (!param) 1328 return -EINVAL; 1329 1330 mutex_lock(¶m->lock); 1331 /* Only allow one fault handler registered for each device */ 1332 if (param->fault_param) { 1333 ret = -EBUSY; 1334 goto done_unlock; 1335 } 1336 1337 get_device(dev); 1338 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1339 if (!param->fault_param) { 1340 put_device(dev); 1341 ret = -ENOMEM; 1342 goto done_unlock; 1343 } 1344 param->fault_param->handler = handler; 1345 param->fault_param->data = data; 1346 mutex_init(¶m->fault_param->lock); 1347 INIT_LIST_HEAD(¶m->fault_param->faults); 1348 1349 done_unlock: 1350 mutex_unlock(¶m->lock); 1351 1352 return ret; 1353 } 1354 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1355 1356 /** 1357 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1358 * @dev: the device 1359 * 1360 * Remove the device fault handler installed with 1361 * iommu_register_device_fault_handler(). 1362 * 1363 * Return 0 on success, or an error. 1364 */ 1365 int iommu_unregister_device_fault_handler(struct device *dev) 1366 { 1367 struct dev_iommu *param = dev->iommu; 1368 int ret = 0; 1369 1370 if (!param) 1371 return -EINVAL; 1372 1373 mutex_lock(¶m->lock); 1374 1375 if (!param->fault_param) 1376 goto unlock; 1377 1378 /* we cannot unregister handler if there are pending faults */ 1379 if (!list_empty(¶m->fault_param->faults)) { 1380 ret = -EBUSY; 1381 goto unlock; 1382 } 1383 1384 kfree(param->fault_param); 1385 param->fault_param = NULL; 1386 put_device(dev); 1387 unlock: 1388 mutex_unlock(¶m->lock); 1389 1390 return ret; 1391 } 1392 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1393 1394 /** 1395 * iommu_report_device_fault() - Report fault event to device driver 1396 * @dev: the device 1397 * @evt: fault event data 1398 * 1399 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1400 * handler. When this function fails and the fault is recoverable, it is the 1401 * caller's responsibility to complete the fault. 1402 * 1403 * Return 0 on success, or an error. 1404 */ 1405 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1406 { 1407 struct dev_iommu *param = dev->iommu; 1408 struct iommu_fault_event *evt_pending = NULL; 1409 struct iommu_fault_param *fparam; 1410 int ret = 0; 1411 1412 if (!param || !evt) 1413 return -EINVAL; 1414 1415 /* we only report device fault if there is a handler registered */ 1416 mutex_lock(¶m->lock); 1417 fparam = param->fault_param; 1418 if (!fparam || !fparam->handler) { 1419 ret = -EINVAL; 1420 goto done_unlock; 1421 } 1422 1423 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1424 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1425 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1426 GFP_KERNEL); 1427 if (!evt_pending) { 1428 ret = -ENOMEM; 1429 goto done_unlock; 1430 } 1431 mutex_lock(&fparam->lock); 1432 list_add_tail(&evt_pending->list, &fparam->faults); 1433 mutex_unlock(&fparam->lock); 1434 } 1435 1436 ret = fparam->handler(&evt->fault, fparam->data); 1437 if (ret && evt_pending) { 1438 mutex_lock(&fparam->lock); 1439 list_del(&evt_pending->list); 1440 mutex_unlock(&fparam->lock); 1441 kfree(evt_pending); 1442 } 1443 done_unlock: 1444 mutex_unlock(¶m->lock); 1445 return ret; 1446 } 1447 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1448 1449 int iommu_page_response(struct device *dev, 1450 struct iommu_page_response *msg) 1451 { 1452 bool needs_pasid; 1453 int ret = -EINVAL; 1454 struct iommu_fault_event *evt; 1455 struct iommu_fault_page_request *prm; 1456 struct dev_iommu *param = dev->iommu; 1457 const struct iommu_ops *ops = dev_iommu_ops(dev); 1458 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1459 1460 if (!ops->page_response) 1461 return -ENODEV; 1462 1463 if (!param || !param->fault_param) 1464 return -EINVAL; 1465 1466 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1467 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1468 return -EINVAL; 1469 1470 /* Only send response if there is a fault report pending */ 1471 mutex_lock(¶m->fault_param->lock); 1472 if (list_empty(¶m->fault_param->faults)) { 1473 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1474 goto done_unlock; 1475 } 1476 /* 1477 * Check if we have a matching page request pending to respond, 1478 * otherwise return -EINVAL 1479 */ 1480 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1481 prm = &evt->fault.prm; 1482 if (prm->grpid != msg->grpid) 1483 continue; 1484 1485 /* 1486 * If the PASID is required, the corresponding request is 1487 * matched using the group ID, the PASID valid bit and the PASID 1488 * value. Otherwise only the group ID matches request and 1489 * response. 1490 */ 1491 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1492 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1493 continue; 1494 1495 if (!needs_pasid && has_pasid) { 1496 /* No big deal, just clear it. */ 1497 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1498 msg->pasid = 0; 1499 } 1500 1501 ret = ops->page_response(dev, evt, msg); 1502 list_del(&evt->list); 1503 kfree(evt); 1504 break; 1505 } 1506 1507 done_unlock: 1508 mutex_unlock(¶m->fault_param->lock); 1509 return ret; 1510 } 1511 EXPORT_SYMBOL_GPL(iommu_page_response); 1512 1513 /** 1514 * iommu_group_id - Return ID for a group 1515 * @group: the group to ID 1516 * 1517 * Return the unique ID for the group matching the sysfs group number. 1518 */ 1519 int iommu_group_id(struct iommu_group *group) 1520 { 1521 return group->id; 1522 } 1523 EXPORT_SYMBOL_GPL(iommu_group_id); 1524 1525 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1526 unsigned long *devfns); 1527 1528 /* 1529 * To consider a PCI device isolated, we require ACS to support Source 1530 * Validation, Request Redirection, Completer Redirection, and Upstream 1531 * Forwarding. This effectively means that devices cannot spoof their 1532 * requester ID, requests and completions cannot be redirected, and all 1533 * transactions are forwarded upstream, even as it passes through a 1534 * bridge where the target device is downstream. 1535 */ 1536 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1537 1538 /* 1539 * For multifunction devices which are not isolated from each other, find 1540 * all the other non-isolated functions and look for existing groups. For 1541 * each function, we also need to look for aliases to or from other devices 1542 * that may already have a group. 1543 */ 1544 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1545 unsigned long *devfns) 1546 { 1547 struct pci_dev *tmp = NULL; 1548 struct iommu_group *group; 1549 1550 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1551 return NULL; 1552 1553 for_each_pci_dev(tmp) { 1554 if (tmp == pdev || tmp->bus != pdev->bus || 1555 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1556 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1557 continue; 1558 1559 group = get_pci_alias_group(tmp, devfns); 1560 if (group) { 1561 pci_dev_put(tmp); 1562 return group; 1563 } 1564 } 1565 1566 return NULL; 1567 } 1568 1569 /* 1570 * Look for aliases to or from the given device for existing groups. DMA 1571 * aliases are only supported on the same bus, therefore the search 1572 * space is quite small (especially since we're really only looking at pcie 1573 * device, and therefore only expect multiple slots on the root complex or 1574 * downstream switch ports). It's conceivable though that a pair of 1575 * multifunction devices could have aliases between them that would cause a 1576 * loop. To prevent this, we use a bitmap to track where we've been. 1577 */ 1578 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1579 unsigned long *devfns) 1580 { 1581 struct pci_dev *tmp = NULL; 1582 struct iommu_group *group; 1583 1584 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1585 return NULL; 1586 1587 group = iommu_group_get(&pdev->dev); 1588 if (group) 1589 return group; 1590 1591 for_each_pci_dev(tmp) { 1592 if (tmp == pdev || tmp->bus != pdev->bus) 1593 continue; 1594 1595 /* We alias them or they alias us */ 1596 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1597 group = get_pci_alias_group(tmp, devfns); 1598 if (group) { 1599 pci_dev_put(tmp); 1600 return group; 1601 } 1602 1603 group = get_pci_function_alias_group(tmp, devfns); 1604 if (group) { 1605 pci_dev_put(tmp); 1606 return group; 1607 } 1608 } 1609 } 1610 1611 return NULL; 1612 } 1613 1614 struct group_for_pci_data { 1615 struct pci_dev *pdev; 1616 struct iommu_group *group; 1617 }; 1618 1619 /* 1620 * DMA alias iterator callback, return the last seen device. Stop and return 1621 * the IOMMU group if we find one along the way. 1622 */ 1623 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1624 { 1625 struct group_for_pci_data *data = opaque; 1626 1627 data->pdev = pdev; 1628 data->group = iommu_group_get(&pdev->dev); 1629 1630 return data->group != NULL; 1631 } 1632 1633 /* 1634 * Generic device_group call-back function. It just allocates one 1635 * iommu-group per device. 1636 */ 1637 struct iommu_group *generic_device_group(struct device *dev) 1638 { 1639 return iommu_group_alloc(); 1640 } 1641 EXPORT_SYMBOL_GPL(generic_device_group); 1642 1643 /* 1644 * Generic device_group call-back function. It just allocates one 1645 * iommu-group per iommu driver instance shared by every device 1646 * probed by that iommu driver. 1647 */ 1648 struct iommu_group *generic_single_device_group(struct device *dev) 1649 { 1650 struct iommu_device *iommu = dev->iommu->iommu_dev; 1651 1652 if (!iommu->singleton_group) { 1653 struct iommu_group *group; 1654 1655 group = iommu_group_alloc(); 1656 if (IS_ERR(group)) 1657 return group; 1658 iommu->singleton_group = group; 1659 } 1660 return iommu_group_ref_get(iommu->singleton_group); 1661 } 1662 EXPORT_SYMBOL_GPL(generic_single_device_group); 1663 1664 /* 1665 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1666 * to find or create an IOMMU group for a device. 1667 */ 1668 struct iommu_group *pci_device_group(struct device *dev) 1669 { 1670 struct pci_dev *pdev = to_pci_dev(dev); 1671 struct group_for_pci_data data; 1672 struct pci_bus *bus; 1673 struct iommu_group *group = NULL; 1674 u64 devfns[4] = { 0 }; 1675 1676 if (WARN_ON(!dev_is_pci(dev))) 1677 return ERR_PTR(-EINVAL); 1678 1679 /* 1680 * Find the upstream DMA alias for the device. A device must not 1681 * be aliased due to topology in order to have its own IOMMU group. 1682 * If we find an alias along the way that already belongs to a 1683 * group, use it. 1684 */ 1685 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1686 return data.group; 1687 1688 pdev = data.pdev; 1689 1690 /* 1691 * Continue upstream from the point of minimum IOMMU granularity 1692 * due to aliases to the point where devices are protected from 1693 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1694 * group, use it. 1695 */ 1696 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1697 if (!bus->self) 1698 continue; 1699 1700 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1701 break; 1702 1703 pdev = bus->self; 1704 1705 group = iommu_group_get(&pdev->dev); 1706 if (group) 1707 return group; 1708 } 1709 1710 /* 1711 * Look for existing groups on device aliases. If we alias another 1712 * device or another device aliases us, use the same group. 1713 */ 1714 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1715 if (group) 1716 return group; 1717 1718 /* 1719 * Look for existing groups on non-isolated functions on the same 1720 * slot and aliases of those funcions, if any. No need to clear 1721 * the search bitmap, the tested devfns are still valid. 1722 */ 1723 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1724 if (group) 1725 return group; 1726 1727 /* No shared group found, allocate new */ 1728 return iommu_group_alloc(); 1729 } 1730 EXPORT_SYMBOL_GPL(pci_device_group); 1731 1732 /* Get the IOMMU group for device on fsl-mc bus */ 1733 struct iommu_group *fsl_mc_device_group(struct device *dev) 1734 { 1735 struct device *cont_dev = fsl_mc_cont_dev(dev); 1736 struct iommu_group *group; 1737 1738 group = iommu_group_get(cont_dev); 1739 if (!group) 1740 group = iommu_group_alloc(); 1741 return group; 1742 } 1743 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1744 1745 static struct iommu_domain * 1746 __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1747 { 1748 if (group->default_domain && group->default_domain->type == req_type) 1749 return group->default_domain; 1750 return __iommu_group_domain_alloc(group, req_type); 1751 } 1752 1753 /* 1754 * Returns the iommu_ops for the devices in an iommu group. 1755 * 1756 * It is assumed that all devices in an iommu group are managed by a single 1757 * IOMMU unit. Therefore, this returns the dev_iommu_ops of the first device 1758 * in the group. 1759 */ 1760 static const struct iommu_ops *group_iommu_ops(struct iommu_group *group) 1761 { 1762 struct group_device *device = 1763 list_first_entry(&group->devices, struct group_device, list); 1764 1765 lockdep_assert_held(&group->mutex); 1766 1767 return dev_iommu_ops(device->dev); 1768 } 1769 1770 /* 1771 * req_type of 0 means "auto" which means to select a domain based on 1772 * iommu_def_domain_type or what the driver actually supports. 1773 */ 1774 static struct iommu_domain * 1775 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1776 { 1777 const struct iommu_ops *ops = group_iommu_ops(group); 1778 struct iommu_domain *dom; 1779 1780 lockdep_assert_held(&group->mutex); 1781 1782 /* 1783 * Allow legacy drivers to specify the domain that will be the default 1784 * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM 1785 * domain. Do not use in new drivers. 1786 */ 1787 if (ops->default_domain) { 1788 if (req_type) 1789 return ERR_PTR(-EINVAL); 1790 return ops->default_domain; 1791 } 1792 1793 if (req_type) 1794 return __iommu_group_alloc_default_domain(group, req_type); 1795 1796 /* The driver gave no guidance on what type to use, try the default */ 1797 dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); 1798 if (!IS_ERR(dom)) 1799 return dom; 1800 1801 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ 1802 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) 1803 return ERR_PTR(-EINVAL); 1804 dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); 1805 if (IS_ERR(dom)) 1806 return dom; 1807 1808 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1809 iommu_def_domain_type, group->name); 1810 return dom; 1811 } 1812 1813 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1814 { 1815 return group->default_domain; 1816 } 1817 1818 static int probe_iommu_group(struct device *dev, void *data) 1819 { 1820 struct list_head *group_list = data; 1821 int ret; 1822 1823 mutex_lock(&iommu_probe_device_lock); 1824 ret = __iommu_probe_device(dev, group_list); 1825 mutex_unlock(&iommu_probe_device_lock); 1826 if (ret == -ENODEV) 1827 ret = 0; 1828 1829 return ret; 1830 } 1831 1832 static int iommu_bus_notifier(struct notifier_block *nb, 1833 unsigned long action, void *data) 1834 { 1835 struct device *dev = data; 1836 1837 if (action == BUS_NOTIFY_ADD_DEVICE) { 1838 int ret; 1839 1840 ret = iommu_probe_device(dev); 1841 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1842 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1843 iommu_release_device(dev); 1844 return NOTIFY_OK; 1845 } 1846 1847 return 0; 1848 } 1849 1850 /* 1851 * Combine the driver's chosen def_domain_type across all the devices in a 1852 * group. Drivers must give a consistent result. 1853 */ 1854 static int iommu_get_def_domain_type(struct iommu_group *group, 1855 struct device *dev, int cur_type) 1856 { 1857 const struct iommu_ops *ops = group_iommu_ops(group); 1858 int type; 1859 1860 if (!ops->def_domain_type) 1861 return cur_type; 1862 1863 type = ops->def_domain_type(dev); 1864 if (!type || cur_type == type) 1865 return cur_type; 1866 if (!cur_type) 1867 return type; 1868 1869 dev_err_ratelimited( 1870 dev, 1871 "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n", 1872 iommu_domain_type_str(cur_type), iommu_domain_type_str(type), 1873 group->id); 1874 1875 /* 1876 * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY 1877 * takes precedence. 1878 */ 1879 if (type == IOMMU_DOMAIN_IDENTITY) 1880 return type; 1881 return cur_type; 1882 } 1883 1884 /* 1885 * A target_type of 0 will select the best domain type. 0 can be returned in 1886 * this case meaning the global default should be used. 1887 */ 1888 static int iommu_get_default_domain_type(struct iommu_group *group, 1889 int target_type) 1890 { 1891 struct device *untrusted = NULL; 1892 struct group_device *gdev; 1893 int driver_type = 0; 1894 1895 lockdep_assert_held(&group->mutex); 1896 1897 /* 1898 * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an 1899 * identity_domain and it will automatically become their default 1900 * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain. 1901 * Override the selection to IDENTITY. 1902 */ 1903 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { 1904 static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) && 1905 IS_ENABLED(CONFIG_IOMMU_DMA))); 1906 driver_type = IOMMU_DOMAIN_IDENTITY; 1907 } 1908 1909 for_each_group_device(group, gdev) { 1910 driver_type = iommu_get_def_domain_type(group, gdev->dev, 1911 driver_type); 1912 1913 if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) { 1914 /* 1915 * No ARM32 using systems will set untrusted, it cannot 1916 * work. 1917 */ 1918 if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))) 1919 return -1; 1920 untrusted = gdev->dev; 1921 } 1922 } 1923 1924 /* 1925 * If the common dma ops are not selected in kconfig then we cannot use 1926 * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been 1927 * selected. 1928 */ 1929 if (!IS_ENABLED(CONFIG_IOMMU_DMA)) { 1930 if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA)) 1931 return -1; 1932 if (!driver_type) 1933 driver_type = IOMMU_DOMAIN_IDENTITY; 1934 } 1935 1936 if (untrusted) { 1937 if (driver_type && driver_type != IOMMU_DOMAIN_DMA) { 1938 dev_err_ratelimited( 1939 untrusted, 1940 "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n", 1941 group->id, iommu_domain_type_str(driver_type)); 1942 return -1; 1943 } 1944 driver_type = IOMMU_DOMAIN_DMA; 1945 } 1946 1947 if (target_type) { 1948 if (driver_type && target_type != driver_type) 1949 return -1; 1950 return target_type; 1951 } 1952 return driver_type; 1953 } 1954 1955 static void iommu_group_do_probe_finalize(struct device *dev) 1956 { 1957 const struct iommu_ops *ops = dev_iommu_ops(dev); 1958 1959 if (ops->probe_finalize) 1960 ops->probe_finalize(dev); 1961 } 1962 1963 int bus_iommu_probe(const struct bus_type *bus) 1964 { 1965 struct iommu_group *group, *next; 1966 LIST_HEAD(group_list); 1967 int ret; 1968 1969 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1970 if (ret) 1971 return ret; 1972 1973 list_for_each_entry_safe(group, next, &group_list, entry) { 1974 struct group_device *gdev; 1975 1976 mutex_lock(&group->mutex); 1977 1978 /* Remove item from the list */ 1979 list_del_init(&group->entry); 1980 1981 /* 1982 * We go to the trouble of deferred default domain creation so 1983 * that the cross-group default domain type and the setup of the 1984 * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. 1985 */ 1986 ret = iommu_setup_default_domain(group, 0); 1987 if (ret) { 1988 mutex_unlock(&group->mutex); 1989 return ret; 1990 } 1991 mutex_unlock(&group->mutex); 1992 1993 /* 1994 * FIXME: Mis-locked because the ops->probe_finalize() call-back 1995 * of some IOMMU drivers calls arm_iommu_attach_device() which 1996 * in-turn might call back into IOMMU core code, where it tries 1997 * to take group->mutex, resulting in a deadlock. 1998 */ 1999 for_each_group_device(group, gdev) 2000 iommu_group_do_probe_finalize(gdev->dev); 2001 } 2002 2003 return 0; 2004 } 2005 2006 bool iommu_present(const struct bus_type *bus) 2007 { 2008 return bus->iommu_ops != NULL; 2009 } 2010 EXPORT_SYMBOL_GPL(iommu_present); 2011 2012 /** 2013 * device_iommu_capable() - check for a general IOMMU capability 2014 * @dev: device to which the capability would be relevant, if available 2015 * @cap: IOMMU capability 2016 * 2017 * Return: true if an IOMMU is present and supports the given capability 2018 * for the given device, otherwise false. 2019 */ 2020 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 2021 { 2022 const struct iommu_ops *ops; 2023 2024 if (!dev->iommu || !dev->iommu->iommu_dev) 2025 return false; 2026 2027 ops = dev_iommu_ops(dev); 2028 if (!ops->capable) 2029 return false; 2030 2031 return ops->capable(dev, cap); 2032 } 2033 EXPORT_SYMBOL_GPL(device_iommu_capable); 2034 2035 /** 2036 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() 2037 * for a group 2038 * @group: Group to query 2039 * 2040 * IOMMU groups should not have differing values of 2041 * msi_device_has_isolated_msi() for devices in a group. However nothing 2042 * directly prevents this, so ensure mistakes don't result in isolation failures 2043 * by checking that all the devices are the same. 2044 */ 2045 bool iommu_group_has_isolated_msi(struct iommu_group *group) 2046 { 2047 struct group_device *group_dev; 2048 bool ret = true; 2049 2050 mutex_lock(&group->mutex); 2051 for_each_group_device(group, group_dev) 2052 ret &= msi_device_has_isolated_msi(group_dev->dev); 2053 mutex_unlock(&group->mutex); 2054 return ret; 2055 } 2056 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); 2057 2058 /** 2059 * iommu_set_fault_handler() - set a fault handler for an iommu domain 2060 * @domain: iommu domain 2061 * @handler: fault handler 2062 * @token: user data, will be passed back to the fault handler 2063 * 2064 * This function should be used by IOMMU users which want to be notified 2065 * whenever an IOMMU fault happens. 2066 * 2067 * The fault handler itself should return 0 on success, and an appropriate 2068 * error code otherwise. 2069 */ 2070 void iommu_set_fault_handler(struct iommu_domain *domain, 2071 iommu_fault_handler_t handler, 2072 void *token) 2073 { 2074 BUG_ON(!domain); 2075 2076 domain->handler = handler; 2077 domain->handler_token = token; 2078 } 2079 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 2080 2081 static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops, 2082 struct device *dev, 2083 unsigned int type) 2084 { 2085 struct iommu_domain *domain; 2086 unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; 2087 2088 if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain) 2089 return ops->identity_domain; 2090 else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain) 2091 return ops->blocked_domain; 2092 else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging) 2093 domain = ops->domain_alloc_paging(dev); 2094 else if (ops->domain_alloc) 2095 domain = ops->domain_alloc(alloc_type); 2096 else 2097 return ERR_PTR(-EOPNOTSUPP); 2098 2099 /* 2100 * Many domain_alloc ops now return ERR_PTR, make things easier for the 2101 * driver by accepting ERR_PTR from all domain_alloc ops instead of 2102 * having two rules. 2103 */ 2104 if (IS_ERR(domain)) 2105 return domain; 2106 if (!domain) 2107 return ERR_PTR(-ENOMEM); 2108 2109 domain->type = type; 2110 /* 2111 * If not already set, assume all sizes by default; the driver 2112 * may override this later 2113 */ 2114 if (!domain->pgsize_bitmap) 2115 domain->pgsize_bitmap = ops->pgsize_bitmap; 2116 2117 if (!domain->ops) 2118 domain->ops = ops->default_domain_ops; 2119 2120 if (iommu_is_dma_domain(domain)) { 2121 int rc; 2122 2123 rc = iommu_get_dma_cookie(domain); 2124 if (rc) { 2125 iommu_domain_free(domain); 2126 return ERR_PTR(rc); 2127 } 2128 } 2129 return domain; 2130 } 2131 2132 static struct iommu_domain * 2133 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) 2134 { 2135 struct device *dev = 2136 list_first_entry(&group->devices, struct group_device, list) 2137 ->dev; 2138 2139 return __iommu_domain_alloc(group_iommu_ops(group), dev, type); 2140 } 2141 2142 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 2143 { 2144 struct iommu_domain *domain; 2145 2146 if (bus == NULL || bus->iommu_ops == NULL) 2147 return NULL; 2148 domain = __iommu_domain_alloc(bus->iommu_ops, NULL, 2149 IOMMU_DOMAIN_UNMANAGED); 2150 if (IS_ERR(domain)) 2151 return NULL; 2152 return domain; 2153 } 2154 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 2155 2156 void iommu_domain_free(struct iommu_domain *domain) 2157 { 2158 if (domain->type == IOMMU_DOMAIN_SVA) 2159 mmdrop(domain->mm); 2160 iommu_put_dma_cookie(domain); 2161 if (domain->ops->free) 2162 domain->ops->free(domain); 2163 } 2164 EXPORT_SYMBOL_GPL(iommu_domain_free); 2165 2166 /* 2167 * Put the group's domain back to the appropriate core-owned domain - either the 2168 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 2169 */ 2170 static void __iommu_group_set_core_domain(struct iommu_group *group) 2171 { 2172 struct iommu_domain *new_domain; 2173 2174 if (group->owner) 2175 new_domain = group->blocking_domain; 2176 else 2177 new_domain = group->default_domain; 2178 2179 __iommu_group_set_domain_nofail(group, new_domain); 2180 } 2181 2182 static int __iommu_attach_device(struct iommu_domain *domain, 2183 struct device *dev) 2184 { 2185 int ret; 2186 2187 if (unlikely(domain->ops->attach_dev == NULL)) 2188 return -ENODEV; 2189 2190 ret = domain->ops->attach_dev(domain, dev); 2191 if (ret) 2192 return ret; 2193 dev->iommu->attach_deferred = 0; 2194 trace_attach_device_to_domain(dev); 2195 return 0; 2196 } 2197 2198 /** 2199 * iommu_attach_device - Attach an IOMMU domain to a device 2200 * @domain: IOMMU domain to attach 2201 * @dev: Device that will be attached 2202 * 2203 * Returns 0 on success and error code on failure 2204 * 2205 * Note that EINVAL can be treated as a soft failure, indicating 2206 * that certain configuration of the domain is incompatible with 2207 * the device. In this case attaching a different domain to the 2208 * device may succeed. 2209 */ 2210 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2211 { 2212 /* Caller must be a probed driver on dev */ 2213 struct iommu_group *group = dev->iommu_group; 2214 int ret; 2215 2216 if (!group) 2217 return -ENODEV; 2218 2219 /* 2220 * Lock the group to make sure the device-count doesn't 2221 * change while we are attaching 2222 */ 2223 mutex_lock(&group->mutex); 2224 ret = -EINVAL; 2225 if (list_count_nodes(&group->devices) != 1) 2226 goto out_unlock; 2227 2228 ret = __iommu_attach_group(domain, group); 2229 2230 out_unlock: 2231 mutex_unlock(&group->mutex); 2232 return ret; 2233 } 2234 EXPORT_SYMBOL_GPL(iommu_attach_device); 2235 2236 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2237 { 2238 if (dev->iommu && dev->iommu->attach_deferred) 2239 return __iommu_attach_device(domain, dev); 2240 2241 return 0; 2242 } 2243 2244 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2245 { 2246 /* Caller must be a probed driver on dev */ 2247 struct iommu_group *group = dev->iommu_group; 2248 2249 if (!group) 2250 return; 2251 2252 mutex_lock(&group->mutex); 2253 if (WARN_ON(domain != group->domain) || 2254 WARN_ON(list_count_nodes(&group->devices) != 1)) 2255 goto out_unlock; 2256 __iommu_group_set_core_domain(group); 2257 2258 out_unlock: 2259 mutex_unlock(&group->mutex); 2260 } 2261 EXPORT_SYMBOL_GPL(iommu_detach_device); 2262 2263 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2264 { 2265 /* Caller must be a probed driver on dev */ 2266 struct iommu_group *group = dev->iommu_group; 2267 2268 if (!group) 2269 return NULL; 2270 2271 return group->domain; 2272 } 2273 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2274 2275 /* 2276 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2277 * guarantees that the group and its default domain are valid and correct. 2278 */ 2279 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2280 { 2281 return dev->iommu_group->default_domain; 2282 } 2283 2284 static int __iommu_attach_group(struct iommu_domain *domain, 2285 struct iommu_group *group) 2286 { 2287 if (group->domain && group->domain != group->default_domain && 2288 group->domain != group->blocking_domain) 2289 return -EBUSY; 2290 2291 return __iommu_group_set_domain(group, domain); 2292 } 2293 2294 /** 2295 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2296 * @domain: IOMMU domain to attach 2297 * @group: IOMMU group that will be attached 2298 * 2299 * Returns 0 on success and error code on failure 2300 * 2301 * Note that EINVAL can be treated as a soft failure, indicating 2302 * that certain configuration of the domain is incompatible with 2303 * the group. In this case attaching a different domain to the 2304 * group may succeed. 2305 */ 2306 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2307 { 2308 int ret; 2309 2310 mutex_lock(&group->mutex); 2311 ret = __iommu_attach_group(domain, group); 2312 mutex_unlock(&group->mutex); 2313 2314 return ret; 2315 } 2316 EXPORT_SYMBOL_GPL(iommu_attach_group); 2317 2318 /** 2319 * iommu_group_replace_domain - replace the domain that a group is attached to 2320 * @new_domain: new IOMMU domain to replace with 2321 * @group: IOMMU group that will be attached to the new domain 2322 * 2323 * This API allows the group to switch domains without being forced to go to 2324 * the blocking domain in-between. 2325 * 2326 * If the currently attached domain is a core domain (e.g. a default_domain), 2327 * it will act just like the iommu_attach_group(). 2328 */ 2329 int iommu_group_replace_domain(struct iommu_group *group, 2330 struct iommu_domain *new_domain) 2331 { 2332 int ret; 2333 2334 if (!new_domain) 2335 return -EINVAL; 2336 2337 mutex_lock(&group->mutex); 2338 ret = __iommu_group_set_domain(group, new_domain); 2339 mutex_unlock(&group->mutex); 2340 return ret; 2341 } 2342 EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, IOMMUFD_INTERNAL); 2343 2344 static int __iommu_device_set_domain(struct iommu_group *group, 2345 struct device *dev, 2346 struct iommu_domain *new_domain, 2347 unsigned int flags) 2348 { 2349 int ret; 2350 2351 /* 2352 * If the device requires IOMMU_RESV_DIRECT then we cannot allow 2353 * the blocking domain to be attached as it does not contain the 2354 * required 1:1 mapping. This test effectively excludes the device 2355 * being used with iommu_group_claim_dma_owner() which will block 2356 * vfio and iommufd as well. 2357 */ 2358 if (dev->iommu->require_direct && 2359 (new_domain->type == IOMMU_DOMAIN_BLOCKED || 2360 new_domain == group->blocking_domain)) { 2361 dev_warn(dev, 2362 "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n"); 2363 return -EINVAL; 2364 } 2365 2366 if (dev->iommu->attach_deferred) { 2367 if (new_domain == group->default_domain) 2368 return 0; 2369 dev->iommu->attach_deferred = 0; 2370 } 2371 2372 ret = __iommu_attach_device(new_domain, dev); 2373 if (ret) { 2374 /* 2375 * If we have a blocking domain then try to attach that in hopes 2376 * of avoiding a UAF. Modern drivers should implement blocking 2377 * domains as global statics that cannot fail. 2378 */ 2379 if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && 2380 group->blocking_domain && 2381 group->blocking_domain != new_domain) 2382 __iommu_attach_device(group->blocking_domain, dev); 2383 return ret; 2384 } 2385 return 0; 2386 } 2387 2388 /* 2389 * If 0 is returned the group's domain is new_domain. If an error is returned 2390 * then the group's domain will be set back to the existing domain unless 2391 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's 2392 * domains is left inconsistent. This is a driver bug to fail attach with a 2393 * previously good domain. We try to avoid a kernel UAF because of this. 2394 * 2395 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU 2396 * API works on domains and devices. Bridge that gap by iterating over the 2397 * devices in a group. Ideally we'd have a single device which represents the 2398 * requestor ID of the group, but we also allow IOMMU drivers to create policy 2399 * defined minimum sets, where the physical hardware may be able to distiguish 2400 * members, but we wish to group them at a higher level (ex. untrusted 2401 * multi-function PCI devices). Thus we attach each device. 2402 */ 2403 static int __iommu_group_set_domain_internal(struct iommu_group *group, 2404 struct iommu_domain *new_domain, 2405 unsigned int flags) 2406 { 2407 struct group_device *last_gdev; 2408 struct group_device *gdev; 2409 int result; 2410 int ret; 2411 2412 lockdep_assert_held(&group->mutex); 2413 2414 if (group->domain == new_domain) 2415 return 0; 2416 2417 if (WARN_ON(!new_domain)) 2418 return -EINVAL; 2419 2420 /* 2421 * Changing the domain is done by calling attach_dev() on the new 2422 * domain. This switch does not have to be atomic and DMA can be 2423 * discarded during the transition. DMA must only be able to access 2424 * either new_domain or group->domain, never something else. 2425 */ 2426 result = 0; 2427 for_each_group_device(group, gdev) { 2428 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, 2429 flags); 2430 if (ret) { 2431 result = ret; 2432 /* 2433 * Keep trying the other devices in the group. If a 2434 * driver fails attach to an otherwise good domain, and 2435 * does not support blocking domains, it should at least 2436 * drop its reference on the current domain so we don't 2437 * UAF. 2438 */ 2439 if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) 2440 continue; 2441 goto err_revert; 2442 } 2443 } 2444 group->domain = new_domain; 2445 return result; 2446 2447 err_revert: 2448 /* 2449 * This is called in error unwind paths. A well behaved driver should 2450 * always allow us to attach to a domain that was already attached. 2451 */ 2452 last_gdev = gdev; 2453 for_each_group_device(group, gdev) { 2454 /* 2455 * A NULL domain can happen only for first probe, in which case 2456 * we leave group->domain as NULL and let release clean 2457 * everything up. 2458 */ 2459 if (group->domain) 2460 WARN_ON(__iommu_device_set_domain( 2461 group, gdev->dev, group->domain, 2462 IOMMU_SET_DOMAIN_MUST_SUCCEED)); 2463 if (gdev == last_gdev) 2464 break; 2465 } 2466 return ret; 2467 } 2468 2469 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2470 { 2471 mutex_lock(&group->mutex); 2472 __iommu_group_set_core_domain(group); 2473 mutex_unlock(&group->mutex); 2474 } 2475 EXPORT_SYMBOL_GPL(iommu_detach_group); 2476 2477 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2478 { 2479 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2480 return iova; 2481 2482 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2483 return 0; 2484 2485 return domain->ops->iova_to_phys(domain, iova); 2486 } 2487 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2488 2489 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2490 phys_addr_t paddr, size_t size, size_t *count) 2491 { 2492 unsigned int pgsize_idx, pgsize_idx_next; 2493 unsigned long pgsizes; 2494 size_t offset, pgsize, pgsize_next; 2495 unsigned long addr_merge = paddr | iova; 2496 2497 /* Page sizes supported by the hardware and small enough for @size */ 2498 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2499 2500 /* Constrain the page sizes further based on the maximum alignment */ 2501 if (likely(addr_merge)) 2502 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2503 2504 /* Make sure we have at least one suitable page size */ 2505 BUG_ON(!pgsizes); 2506 2507 /* Pick the biggest page size remaining */ 2508 pgsize_idx = __fls(pgsizes); 2509 pgsize = BIT(pgsize_idx); 2510 if (!count) 2511 return pgsize; 2512 2513 /* Find the next biggest support page size, if it exists */ 2514 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2515 if (!pgsizes) 2516 goto out_set_count; 2517 2518 pgsize_idx_next = __ffs(pgsizes); 2519 pgsize_next = BIT(pgsize_idx_next); 2520 2521 /* 2522 * There's no point trying a bigger page size unless the virtual 2523 * and physical addresses are similarly offset within the larger page. 2524 */ 2525 if ((iova ^ paddr) & (pgsize_next - 1)) 2526 goto out_set_count; 2527 2528 /* Calculate the offset to the next page size alignment boundary */ 2529 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2530 2531 /* 2532 * If size is big enough to accommodate the larger page, reduce 2533 * the number of smaller pages. 2534 */ 2535 if (offset + pgsize_next <= size) 2536 size = offset; 2537 2538 out_set_count: 2539 *count = size >> pgsize_idx; 2540 return pgsize; 2541 } 2542 2543 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2544 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2545 { 2546 const struct iommu_domain_ops *ops = domain->ops; 2547 unsigned long orig_iova = iova; 2548 unsigned int min_pagesz; 2549 size_t orig_size = size; 2550 phys_addr_t orig_paddr = paddr; 2551 int ret = 0; 2552 2553 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2554 return -EINVAL; 2555 2556 if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL)) 2557 return -ENODEV; 2558 2559 /* find out the minimum page size supported */ 2560 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2561 2562 /* 2563 * both the virtual address and the physical one, as well as 2564 * the size of the mapping, must be aligned (at least) to the 2565 * size of the smallest page supported by the hardware 2566 */ 2567 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2568 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2569 iova, &paddr, size, min_pagesz); 2570 return -EINVAL; 2571 } 2572 2573 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2574 2575 while (size) { 2576 size_t pgsize, count, mapped = 0; 2577 2578 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2579 2580 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2581 iova, &paddr, pgsize, count); 2582 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2583 gfp, &mapped); 2584 /* 2585 * Some pages may have been mapped, even if an error occurred, 2586 * so we should account for those so they can be unmapped. 2587 */ 2588 size -= mapped; 2589 2590 if (ret) 2591 break; 2592 2593 iova += mapped; 2594 paddr += mapped; 2595 } 2596 2597 /* unroll mapping in case something went wrong */ 2598 if (ret) 2599 iommu_unmap(domain, orig_iova, orig_size - size); 2600 else 2601 trace_map(orig_iova, orig_paddr, orig_size); 2602 2603 return ret; 2604 } 2605 2606 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2607 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2608 { 2609 const struct iommu_domain_ops *ops = domain->ops; 2610 int ret; 2611 2612 might_sleep_if(gfpflags_allow_blocking(gfp)); 2613 2614 /* Discourage passing strange GFP flags */ 2615 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2616 __GFP_HIGHMEM))) 2617 return -EINVAL; 2618 2619 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2620 if (ret == 0 && ops->iotlb_sync_map) { 2621 ret = ops->iotlb_sync_map(domain, iova, size); 2622 if (ret) 2623 goto out_err; 2624 } 2625 2626 return ret; 2627 2628 out_err: 2629 /* undo mappings already done */ 2630 iommu_unmap(domain, iova, size); 2631 2632 return ret; 2633 } 2634 EXPORT_SYMBOL_GPL(iommu_map); 2635 2636 static size_t __iommu_unmap(struct iommu_domain *domain, 2637 unsigned long iova, size_t size, 2638 struct iommu_iotlb_gather *iotlb_gather) 2639 { 2640 const struct iommu_domain_ops *ops = domain->ops; 2641 size_t unmapped_page, unmapped = 0; 2642 unsigned long orig_iova = iova; 2643 unsigned int min_pagesz; 2644 2645 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2646 return 0; 2647 2648 if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) 2649 return 0; 2650 2651 /* find out the minimum page size supported */ 2652 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2653 2654 /* 2655 * The virtual address, as well as the size of the mapping, must be 2656 * aligned (at least) to the size of the smallest page supported 2657 * by the hardware 2658 */ 2659 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2660 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2661 iova, size, min_pagesz); 2662 return 0; 2663 } 2664 2665 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2666 2667 /* 2668 * Keep iterating until we either unmap 'size' bytes (or more) 2669 * or we hit an area that isn't mapped. 2670 */ 2671 while (unmapped < size) { 2672 size_t pgsize, count; 2673 2674 pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count); 2675 unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); 2676 if (!unmapped_page) 2677 break; 2678 2679 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2680 iova, unmapped_page); 2681 2682 iova += unmapped_page; 2683 unmapped += unmapped_page; 2684 } 2685 2686 trace_unmap(orig_iova, size, unmapped); 2687 return unmapped; 2688 } 2689 2690 size_t iommu_unmap(struct iommu_domain *domain, 2691 unsigned long iova, size_t size) 2692 { 2693 struct iommu_iotlb_gather iotlb_gather; 2694 size_t ret; 2695 2696 iommu_iotlb_gather_init(&iotlb_gather); 2697 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2698 iommu_iotlb_sync(domain, &iotlb_gather); 2699 2700 return ret; 2701 } 2702 EXPORT_SYMBOL_GPL(iommu_unmap); 2703 2704 size_t iommu_unmap_fast(struct iommu_domain *domain, 2705 unsigned long iova, size_t size, 2706 struct iommu_iotlb_gather *iotlb_gather) 2707 { 2708 return __iommu_unmap(domain, iova, size, iotlb_gather); 2709 } 2710 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2711 2712 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2713 struct scatterlist *sg, unsigned int nents, int prot, 2714 gfp_t gfp) 2715 { 2716 const struct iommu_domain_ops *ops = domain->ops; 2717 size_t len = 0, mapped = 0; 2718 phys_addr_t start; 2719 unsigned int i = 0; 2720 int ret; 2721 2722 might_sleep_if(gfpflags_allow_blocking(gfp)); 2723 2724 /* Discourage passing strange GFP flags */ 2725 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2726 __GFP_HIGHMEM))) 2727 return -EINVAL; 2728 2729 while (i <= nents) { 2730 phys_addr_t s_phys = sg_phys(sg); 2731 2732 if (len && s_phys != start + len) { 2733 ret = __iommu_map(domain, iova + mapped, start, 2734 len, prot, gfp); 2735 2736 if (ret) 2737 goto out_err; 2738 2739 mapped += len; 2740 len = 0; 2741 } 2742 2743 if (sg_dma_is_bus_address(sg)) 2744 goto next; 2745 2746 if (len) { 2747 len += sg->length; 2748 } else { 2749 len = sg->length; 2750 start = s_phys; 2751 } 2752 2753 next: 2754 if (++i < nents) 2755 sg = sg_next(sg); 2756 } 2757 2758 if (ops->iotlb_sync_map) { 2759 ret = ops->iotlb_sync_map(domain, iova, mapped); 2760 if (ret) 2761 goto out_err; 2762 } 2763 return mapped; 2764 2765 out_err: 2766 /* undo mappings already done */ 2767 iommu_unmap(domain, iova, mapped); 2768 2769 return ret; 2770 } 2771 EXPORT_SYMBOL_GPL(iommu_map_sg); 2772 2773 /** 2774 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2775 * @domain: the iommu domain where the fault has happened 2776 * @dev: the device where the fault has happened 2777 * @iova: the faulting address 2778 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2779 * 2780 * This function should be called by the low-level IOMMU implementations 2781 * whenever IOMMU faults happen, to allow high-level users, that are 2782 * interested in such events, to know about them. 2783 * 2784 * This event may be useful for several possible use cases: 2785 * - mere logging of the event 2786 * - dynamic TLB/PTE loading 2787 * - if restarting of the faulting device is required 2788 * 2789 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2790 * PTE/TLB loading will one day be supported, implementations will be able 2791 * to tell whether it succeeded or not according to this return value). 2792 * 2793 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2794 * (though fault handlers can also return -ENOSYS, in case they want to 2795 * elicit the default behavior of the IOMMU drivers). 2796 */ 2797 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2798 unsigned long iova, int flags) 2799 { 2800 int ret = -ENOSYS; 2801 2802 /* 2803 * if upper layers showed interest and installed a fault handler, 2804 * invoke it. 2805 */ 2806 if (domain->handler) 2807 ret = domain->handler(domain, dev, iova, flags, 2808 domain->handler_token); 2809 2810 trace_io_page_fault(dev, iova, flags); 2811 return ret; 2812 } 2813 EXPORT_SYMBOL_GPL(report_iommu_fault); 2814 2815 static int __init iommu_init(void) 2816 { 2817 iommu_group_kset = kset_create_and_add("iommu_groups", 2818 NULL, kernel_kobj); 2819 BUG_ON(!iommu_group_kset); 2820 2821 iommu_debugfs_setup(); 2822 2823 return 0; 2824 } 2825 core_initcall(iommu_init); 2826 2827 int iommu_enable_nesting(struct iommu_domain *domain) 2828 { 2829 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2830 return -EINVAL; 2831 if (!domain->ops->enable_nesting) 2832 return -EINVAL; 2833 return domain->ops->enable_nesting(domain); 2834 } 2835 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2836 2837 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2838 unsigned long quirk) 2839 { 2840 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2841 return -EINVAL; 2842 if (!domain->ops->set_pgtable_quirks) 2843 return -EINVAL; 2844 return domain->ops->set_pgtable_quirks(domain, quirk); 2845 } 2846 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2847 2848 /** 2849 * iommu_get_resv_regions - get reserved regions 2850 * @dev: device for which to get reserved regions 2851 * @list: reserved region list for device 2852 * 2853 * This returns a list of reserved IOVA regions specific to this device. 2854 * A domain user should not map IOVA in these ranges. 2855 */ 2856 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2857 { 2858 const struct iommu_ops *ops = dev_iommu_ops(dev); 2859 2860 if (ops->get_resv_regions) 2861 ops->get_resv_regions(dev, list); 2862 } 2863 EXPORT_SYMBOL_GPL(iommu_get_resv_regions); 2864 2865 /** 2866 * iommu_put_resv_regions - release reserved regions 2867 * @dev: device for which to free reserved regions 2868 * @list: reserved region list for device 2869 * 2870 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2871 */ 2872 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2873 { 2874 struct iommu_resv_region *entry, *next; 2875 2876 list_for_each_entry_safe(entry, next, list, list) { 2877 if (entry->free) 2878 entry->free(dev, entry); 2879 else 2880 kfree(entry); 2881 } 2882 } 2883 EXPORT_SYMBOL(iommu_put_resv_regions); 2884 2885 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2886 size_t length, int prot, 2887 enum iommu_resv_type type, 2888 gfp_t gfp) 2889 { 2890 struct iommu_resv_region *region; 2891 2892 region = kzalloc(sizeof(*region), gfp); 2893 if (!region) 2894 return NULL; 2895 2896 INIT_LIST_HEAD(®ion->list); 2897 region->start = start; 2898 region->length = length; 2899 region->prot = prot; 2900 region->type = type; 2901 return region; 2902 } 2903 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2904 2905 void iommu_set_default_passthrough(bool cmd_line) 2906 { 2907 if (cmd_line) 2908 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2909 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2910 } 2911 2912 void iommu_set_default_translated(bool cmd_line) 2913 { 2914 if (cmd_line) 2915 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2916 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2917 } 2918 2919 bool iommu_default_passthrough(void) 2920 { 2921 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2922 } 2923 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2924 2925 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2926 { 2927 const struct iommu_ops *ops = NULL; 2928 struct iommu_device *iommu; 2929 2930 spin_lock(&iommu_device_lock); 2931 list_for_each_entry(iommu, &iommu_device_list, list) 2932 if (iommu->fwnode == fwnode) { 2933 ops = iommu->ops; 2934 break; 2935 } 2936 spin_unlock(&iommu_device_lock); 2937 return ops; 2938 } 2939 2940 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2941 const struct iommu_ops *ops) 2942 { 2943 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2944 2945 if (fwspec) 2946 return ops == fwspec->ops ? 0 : -EINVAL; 2947 2948 if (!dev_iommu_get(dev)) 2949 return -ENOMEM; 2950 2951 /* Preallocate for the overwhelmingly common case of 1 ID */ 2952 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2953 if (!fwspec) 2954 return -ENOMEM; 2955 2956 of_node_get(to_of_node(iommu_fwnode)); 2957 fwspec->iommu_fwnode = iommu_fwnode; 2958 fwspec->ops = ops; 2959 dev_iommu_fwspec_set(dev, fwspec); 2960 return 0; 2961 } 2962 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2963 2964 void iommu_fwspec_free(struct device *dev) 2965 { 2966 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2967 2968 if (fwspec) { 2969 fwnode_handle_put(fwspec->iommu_fwnode); 2970 kfree(fwspec); 2971 dev_iommu_fwspec_set(dev, NULL); 2972 } 2973 } 2974 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2975 2976 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2977 { 2978 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2979 int i, new_num; 2980 2981 if (!fwspec) 2982 return -EINVAL; 2983 2984 new_num = fwspec->num_ids + num_ids; 2985 if (new_num > 1) { 2986 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2987 GFP_KERNEL); 2988 if (!fwspec) 2989 return -ENOMEM; 2990 2991 dev_iommu_fwspec_set(dev, fwspec); 2992 } 2993 2994 for (i = 0; i < num_ids; i++) 2995 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2996 2997 fwspec->num_ids = new_num; 2998 return 0; 2999 } 3000 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 3001 3002 /* 3003 * Per device IOMMU features. 3004 */ 3005 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 3006 { 3007 if (dev->iommu && dev->iommu->iommu_dev) { 3008 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 3009 3010 if (ops->dev_enable_feat) 3011 return ops->dev_enable_feat(dev, feat); 3012 } 3013 3014 return -ENODEV; 3015 } 3016 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 3017 3018 /* 3019 * The device drivers should do the necessary cleanups before calling this. 3020 */ 3021 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 3022 { 3023 if (dev->iommu && dev->iommu->iommu_dev) { 3024 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 3025 3026 if (ops->dev_disable_feat) 3027 return ops->dev_disable_feat(dev, feat); 3028 } 3029 3030 return -EBUSY; 3031 } 3032 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 3033 3034 /** 3035 * iommu_setup_default_domain - Set the default_domain for the group 3036 * @group: Group to change 3037 * @target_type: Domain type to set as the default_domain 3038 * 3039 * Allocate a default domain and set it as the current domain on the group. If 3040 * the group already has a default domain it will be changed to the target_type. 3041 * When target_type is 0 the default domain is selected based on driver and 3042 * system preferences. 3043 */ 3044 static int iommu_setup_default_domain(struct iommu_group *group, 3045 int target_type) 3046 { 3047 struct iommu_domain *old_dom = group->default_domain; 3048 struct group_device *gdev; 3049 struct iommu_domain *dom; 3050 bool direct_failed; 3051 int req_type; 3052 int ret; 3053 3054 lockdep_assert_held(&group->mutex); 3055 3056 req_type = iommu_get_default_domain_type(group, target_type); 3057 if (req_type < 0) 3058 return -EINVAL; 3059 3060 dom = iommu_group_alloc_default_domain(group, req_type); 3061 if (IS_ERR(dom)) 3062 return PTR_ERR(dom); 3063 3064 if (group->default_domain == dom) 3065 return 0; 3066 3067 /* 3068 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be 3069 * mapped before their device is attached, in order to guarantee 3070 * continuity with any FW activity 3071 */ 3072 direct_failed = false; 3073 for_each_group_device(group, gdev) { 3074 if (iommu_create_device_direct_mappings(dom, gdev->dev)) { 3075 direct_failed = true; 3076 dev_warn_once( 3077 gdev->dev->iommu->iommu_dev->dev, 3078 "IOMMU driver was not able to establish FW requested direct mapping."); 3079 } 3080 } 3081 3082 /* We must set default_domain early for __iommu_device_set_domain */ 3083 group->default_domain = dom; 3084 if (!group->domain) { 3085 /* 3086 * Drivers are not allowed to fail the first domain attach. 3087 * The only way to recover from this is to fail attaching the 3088 * iommu driver and call ops->release_device. Put the domain 3089 * in group->default_domain so it is freed after. 3090 */ 3091 ret = __iommu_group_set_domain_internal( 3092 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 3093 if (WARN_ON(ret)) 3094 goto out_free_old; 3095 } else { 3096 ret = __iommu_group_set_domain(group, dom); 3097 if (ret) 3098 goto err_restore_def_domain; 3099 } 3100 3101 /* 3102 * Drivers are supposed to allow mappings to be installed in a domain 3103 * before device attachment, but some don't. Hack around this defect by 3104 * trying again after attaching. If this happens it means the device 3105 * will not continuously have the IOMMU_RESV_DIRECT map. 3106 */ 3107 if (direct_failed) { 3108 for_each_group_device(group, gdev) { 3109 ret = iommu_create_device_direct_mappings(dom, gdev->dev); 3110 if (ret) 3111 goto err_restore_domain; 3112 } 3113 } 3114 3115 out_free_old: 3116 if (old_dom) 3117 iommu_domain_free(old_dom); 3118 return ret; 3119 3120 err_restore_domain: 3121 if (old_dom) 3122 __iommu_group_set_domain_internal( 3123 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 3124 err_restore_def_domain: 3125 if (old_dom) { 3126 iommu_domain_free(dom); 3127 group->default_domain = old_dom; 3128 } 3129 return ret; 3130 } 3131 3132 /* 3133 * Changing the default domain through sysfs requires the users to unbind the 3134 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 3135 * transition. Return failure if this isn't met. 3136 * 3137 * We need to consider the race between this and the device release path. 3138 * group->mutex is used here to guarantee that the device release path 3139 * will not be entered at the same time. 3140 */ 3141 static ssize_t iommu_group_store_type(struct iommu_group *group, 3142 const char *buf, size_t count) 3143 { 3144 struct group_device *gdev; 3145 int ret, req_type; 3146 3147 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3148 return -EACCES; 3149 3150 if (WARN_ON(!group) || !group->default_domain) 3151 return -EINVAL; 3152 3153 if (sysfs_streq(buf, "identity")) 3154 req_type = IOMMU_DOMAIN_IDENTITY; 3155 else if (sysfs_streq(buf, "DMA")) 3156 req_type = IOMMU_DOMAIN_DMA; 3157 else if (sysfs_streq(buf, "DMA-FQ")) 3158 req_type = IOMMU_DOMAIN_DMA_FQ; 3159 else if (sysfs_streq(buf, "auto")) 3160 req_type = 0; 3161 else 3162 return -EINVAL; 3163 3164 mutex_lock(&group->mutex); 3165 /* We can bring up a flush queue without tearing down the domain. */ 3166 if (req_type == IOMMU_DOMAIN_DMA_FQ && 3167 group->default_domain->type == IOMMU_DOMAIN_DMA) { 3168 ret = iommu_dma_init_fq(group->default_domain); 3169 if (ret) 3170 goto out_unlock; 3171 3172 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; 3173 ret = count; 3174 goto out_unlock; 3175 } 3176 3177 /* Otherwise, ensure that device exists and no driver is bound. */ 3178 if (list_empty(&group->devices) || group->owner_cnt) { 3179 ret = -EPERM; 3180 goto out_unlock; 3181 } 3182 3183 ret = iommu_setup_default_domain(group, req_type); 3184 if (ret) 3185 goto out_unlock; 3186 3187 /* 3188 * Release the mutex here because ops->probe_finalize() call-back of 3189 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 3190 * in-turn might call back into IOMMU core code, where it tries to take 3191 * group->mutex, resulting in a deadlock. 3192 */ 3193 mutex_unlock(&group->mutex); 3194 3195 /* Make sure dma_ops is appropriatley set */ 3196 for_each_group_device(group, gdev) 3197 iommu_group_do_probe_finalize(gdev->dev); 3198 return count; 3199 3200 out_unlock: 3201 mutex_unlock(&group->mutex); 3202 return ret ?: count; 3203 } 3204 3205 /** 3206 * iommu_device_use_default_domain() - Device driver wants to handle device 3207 * DMA through the kernel DMA API. 3208 * @dev: The device. 3209 * 3210 * The device driver about to bind @dev wants to do DMA through the kernel 3211 * DMA API. Return 0 if it is allowed, otherwise an error. 3212 */ 3213 int iommu_device_use_default_domain(struct device *dev) 3214 { 3215 /* Caller is the driver core during the pre-probe path */ 3216 struct iommu_group *group = dev->iommu_group; 3217 int ret = 0; 3218 3219 if (!group) 3220 return 0; 3221 3222 mutex_lock(&group->mutex); 3223 if (group->owner_cnt) { 3224 if (group->domain != group->default_domain || group->owner || 3225 !xa_empty(&group->pasid_array)) { 3226 ret = -EBUSY; 3227 goto unlock_out; 3228 } 3229 } 3230 3231 group->owner_cnt++; 3232 3233 unlock_out: 3234 mutex_unlock(&group->mutex); 3235 return ret; 3236 } 3237 3238 /** 3239 * iommu_device_unuse_default_domain() - Device driver stops handling device 3240 * DMA through the kernel DMA API. 3241 * @dev: The device. 3242 * 3243 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3244 * It must be called after iommu_device_use_default_domain(). 3245 */ 3246 void iommu_device_unuse_default_domain(struct device *dev) 3247 { 3248 /* Caller is the driver core during the post-probe path */ 3249 struct iommu_group *group = dev->iommu_group; 3250 3251 if (!group) 3252 return; 3253 3254 mutex_lock(&group->mutex); 3255 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3256 group->owner_cnt--; 3257 3258 mutex_unlock(&group->mutex); 3259 } 3260 3261 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3262 { 3263 struct iommu_domain *domain; 3264 3265 if (group->blocking_domain) 3266 return 0; 3267 3268 domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); 3269 if (IS_ERR(domain)) { 3270 /* 3271 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3272 * create an empty domain instead. 3273 */ 3274 domain = __iommu_group_domain_alloc(group, 3275 IOMMU_DOMAIN_UNMANAGED); 3276 if (IS_ERR(domain)) 3277 return PTR_ERR(domain); 3278 } 3279 group->blocking_domain = domain; 3280 return 0; 3281 } 3282 3283 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3284 { 3285 int ret; 3286 3287 if ((group->domain && group->domain != group->default_domain) || 3288 !xa_empty(&group->pasid_array)) 3289 return -EBUSY; 3290 3291 ret = __iommu_group_alloc_blocking_domain(group); 3292 if (ret) 3293 return ret; 3294 ret = __iommu_group_set_domain(group, group->blocking_domain); 3295 if (ret) 3296 return ret; 3297 3298 group->owner = owner; 3299 group->owner_cnt++; 3300 return 0; 3301 } 3302 3303 /** 3304 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3305 * @group: The group. 3306 * @owner: Caller specified pointer. Used for exclusive ownership. 3307 * 3308 * This is to support backward compatibility for vfio which manages the dma 3309 * ownership in iommu_group level. New invocations on this interface should be 3310 * prohibited. Only a single owner may exist for a group. 3311 */ 3312 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3313 { 3314 int ret = 0; 3315 3316 if (WARN_ON(!owner)) 3317 return -EINVAL; 3318 3319 mutex_lock(&group->mutex); 3320 if (group->owner_cnt) { 3321 ret = -EPERM; 3322 goto unlock_out; 3323 } 3324 3325 ret = __iommu_take_dma_ownership(group, owner); 3326 unlock_out: 3327 mutex_unlock(&group->mutex); 3328 3329 return ret; 3330 } 3331 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3332 3333 /** 3334 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3335 * @dev: The device. 3336 * @owner: Caller specified pointer. Used for exclusive ownership. 3337 * 3338 * Claim the DMA ownership of a device. Multiple devices in the same group may 3339 * concurrently claim ownership if they present the same owner value. Returns 0 3340 * on success and error code on failure 3341 */ 3342 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3343 { 3344 /* Caller must be a probed driver on dev */ 3345 struct iommu_group *group = dev->iommu_group; 3346 int ret = 0; 3347 3348 if (WARN_ON(!owner)) 3349 return -EINVAL; 3350 3351 if (!group) 3352 return -ENODEV; 3353 3354 mutex_lock(&group->mutex); 3355 if (group->owner_cnt) { 3356 if (group->owner != owner) { 3357 ret = -EPERM; 3358 goto unlock_out; 3359 } 3360 group->owner_cnt++; 3361 goto unlock_out; 3362 } 3363 3364 ret = __iommu_take_dma_ownership(group, owner); 3365 unlock_out: 3366 mutex_unlock(&group->mutex); 3367 return ret; 3368 } 3369 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3370 3371 static void __iommu_release_dma_ownership(struct iommu_group *group) 3372 { 3373 if (WARN_ON(!group->owner_cnt || !group->owner || 3374 !xa_empty(&group->pasid_array))) 3375 return; 3376 3377 group->owner_cnt = 0; 3378 group->owner = NULL; 3379 __iommu_group_set_domain_nofail(group, group->default_domain); 3380 } 3381 3382 /** 3383 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3384 * @group: The group 3385 * 3386 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3387 */ 3388 void iommu_group_release_dma_owner(struct iommu_group *group) 3389 { 3390 mutex_lock(&group->mutex); 3391 __iommu_release_dma_ownership(group); 3392 mutex_unlock(&group->mutex); 3393 } 3394 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3395 3396 /** 3397 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3398 * @dev: The device. 3399 * 3400 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3401 */ 3402 void iommu_device_release_dma_owner(struct device *dev) 3403 { 3404 /* Caller must be a probed driver on dev */ 3405 struct iommu_group *group = dev->iommu_group; 3406 3407 mutex_lock(&group->mutex); 3408 if (group->owner_cnt > 1) 3409 group->owner_cnt--; 3410 else 3411 __iommu_release_dma_ownership(group); 3412 mutex_unlock(&group->mutex); 3413 } 3414 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3415 3416 /** 3417 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3418 * @group: The group. 3419 * 3420 * This provides status query on a given group. It is racy and only for 3421 * non-binding status reporting. 3422 */ 3423 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3424 { 3425 unsigned int user; 3426 3427 mutex_lock(&group->mutex); 3428 user = group->owner_cnt; 3429 mutex_unlock(&group->mutex); 3430 3431 return user; 3432 } 3433 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3434 3435 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3436 struct iommu_group *group, ioasid_t pasid) 3437 { 3438 struct group_device *device; 3439 int ret = 0; 3440 3441 for_each_group_device(group, device) { 3442 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3443 if (ret) 3444 break; 3445 } 3446 3447 return ret; 3448 } 3449 3450 static void __iommu_remove_group_pasid(struct iommu_group *group, 3451 ioasid_t pasid) 3452 { 3453 struct group_device *device; 3454 const struct iommu_ops *ops; 3455 3456 for_each_group_device(group, device) { 3457 ops = dev_iommu_ops(device->dev); 3458 ops->remove_dev_pasid(device->dev, pasid); 3459 } 3460 } 3461 3462 /* 3463 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3464 * @domain: the iommu domain. 3465 * @dev: the attached device. 3466 * @pasid: the pasid of the device. 3467 * 3468 * Return: 0 on success, or an error. 3469 */ 3470 int iommu_attach_device_pasid(struct iommu_domain *domain, 3471 struct device *dev, ioasid_t pasid) 3472 { 3473 /* Caller must be a probed driver on dev */ 3474 struct iommu_group *group = dev->iommu_group; 3475 void *curr; 3476 int ret; 3477 3478 if (!domain->ops->set_dev_pasid) 3479 return -EOPNOTSUPP; 3480 3481 if (!group) 3482 return -ENODEV; 3483 3484 mutex_lock(&group->mutex); 3485 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3486 if (curr) { 3487 ret = xa_err(curr) ? : -EBUSY; 3488 goto out_unlock; 3489 } 3490 3491 ret = __iommu_set_group_pasid(domain, group, pasid); 3492 if (ret) { 3493 __iommu_remove_group_pasid(group, pasid); 3494 xa_erase(&group->pasid_array, pasid); 3495 } 3496 out_unlock: 3497 mutex_unlock(&group->mutex); 3498 return ret; 3499 } 3500 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3501 3502 /* 3503 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3504 * @domain: the iommu domain. 3505 * @dev: the attached device. 3506 * @pasid: the pasid of the device. 3507 * 3508 * The @domain must have been attached to @pasid of the @dev with 3509 * iommu_attach_device_pasid(). 3510 */ 3511 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3512 ioasid_t pasid) 3513 { 3514 /* Caller must be a probed driver on dev */ 3515 struct iommu_group *group = dev->iommu_group; 3516 3517 mutex_lock(&group->mutex); 3518 __iommu_remove_group_pasid(group, pasid); 3519 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3520 mutex_unlock(&group->mutex); 3521 } 3522 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3523 3524 /* 3525 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3526 * @dev: the queried device 3527 * @pasid: the pasid of the device 3528 * @type: matched domain type, 0 for any match 3529 * 3530 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3531 * domain attached to pasid of a device. Callers must hold a lock around this 3532 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3533 * type is being manipulated. This API does not internally resolve races with 3534 * attach/detach. 3535 * 3536 * Return: attached domain on success, NULL otherwise. 3537 */ 3538 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3539 ioasid_t pasid, 3540 unsigned int type) 3541 { 3542 /* Caller must be a probed driver on dev */ 3543 struct iommu_group *group = dev->iommu_group; 3544 struct iommu_domain *domain; 3545 3546 if (!group) 3547 return NULL; 3548 3549 xa_lock(&group->pasid_array); 3550 domain = xa_load(&group->pasid_array, pasid); 3551 if (type && domain && domain->type != type) 3552 domain = ERR_PTR(-EBUSY); 3553 xa_unlock(&group->pasid_array); 3554 3555 return domain; 3556 } 3557 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3558 3559 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3560 struct mm_struct *mm) 3561 { 3562 const struct iommu_ops *ops = dev_iommu_ops(dev); 3563 struct iommu_domain *domain; 3564 3565 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3566 if (!domain) 3567 return NULL; 3568 3569 domain->type = IOMMU_DOMAIN_SVA; 3570 mmgrab(mm); 3571 domain->mm = mm; 3572 domain->iopf_handler = iommu_sva_handle_iopf; 3573 domain->fault_data = mm; 3574 3575 return domain; 3576 } 3577 3578 ioasid_t iommu_alloc_global_pasid(struct device *dev) 3579 { 3580 int ret; 3581 3582 /* max_pasids == 0 means that the device does not support PASID */ 3583 if (!dev->iommu->max_pasids) 3584 return IOMMU_PASID_INVALID; 3585 3586 /* 3587 * max_pasids is set up by vendor driver based on number of PASID bits 3588 * supported but the IDA allocation is inclusive. 3589 */ 3590 ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID, 3591 dev->iommu->max_pasids - 1, GFP_KERNEL); 3592 return ret < 0 ? IOMMU_PASID_INVALID : ret; 3593 } 3594 EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid); 3595 3596 void iommu_free_global_pasid(ioasid_t pasid) 3597 { 3598 if (WARN_ON(pasid == IOMMU_PASID_INVALID)) 3599 return; 3600 3601 ida_free(&iommu_global_pasid_ida, pasid); 3602 } 3603 EXPORT_SYMBOL_GPL(iommu_free_global_pasid); 3604