1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <linux/cdx/cdx_bus.h> 32 #include <trace/events/iommu.h> 33 #include <linux/sched/mm.h> 34 #include <linux/msi.h> 35 #include <uapi/linux/iommufd.h> 36 37 #include "dma-iommu.h" 38 #include "iommu-priv.h" 39 40 static struct kset *iommu_group_kset; 41 static DEFINE_IDA(iommu_group_ida); 42 static DEFINE_IDA(iommu_global_pasid_ida); 43 44 static unsigned int iommu_def_domain_type __read_mostly; 45 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 46 static u32 iommu_cmd_line __read_mostly; 47 48 struct iommu_group { 49 struct kobject kobj; 50 struct kobject *devices_kobj; 51 struct list_head devices; 52 struct xarray pasid_array; 53 struct mutex mutex; 54 void *iommu_data; 55 void (*iommu_data_release)(void *iommu_data); 56 char *name; 57 int id; 58 struct iommu_domain *default_domain; 59 struct iommu_domain *blocking_domain; 60 struct iommu_domain *domain; 61 struct list_head entry; 62 unsigned int owner_cnt; 63 void *owner; 64 }; 65 66 struct group_device { 67 struct list_head list; 68 struct device *dev; 69 char *name; 70 }; 71 72 /* Iterate over each struct group_device in a struct iommu_group */ 73 #define for_each_group_device(group, pos) \ 74 list_for_each_entry(pos, &(group)->devices, list) 75 76 struct iommu_group_attribute { 77 struct attribute attr; 78 ssize_t (*show)(struct iommu_group *group, char *buf); 79 ssize_t (*store)(struct iommu_group *group, 80 const char *buf, size_t count); 81 }; 82 83 static const char * const iommu_group_resv_type_string[] = { 84 [IOMMU_RESV_DIRECT] = "direct", 85 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 86 [IOMMU_RESV_RESERVED] = "reserved", 87 [IOMMU_RESV_MSI] = "msi", 88 [IOMMU_RESV_SW_MSI] = "msi", 89 }; 90 91 #define IOMMU_CMD_LINE_DMA_API BIT(0) 92 #define IOMMU_CMD_LINE_STRICT BIT(1) 93 94 static int bus_iommu_probe(const struct bus_type *bus); 95 static int iommu_bus_notifier(struct notifier_block *nb, 96 unsigned long action, void *data); 97 static void iommu_release_device(struct device *dev); 98 static int __iommu_attach_device(struct iommu_domain *domain, 99 struct device *dev); 100 static int __iommu_attach_group(struct iommu_domain *domain, 101 struct iommu_group *group); 102 static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev, 103 unsigned int type, 104 unsigned int flags); 105 106 enum { 107 IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, 108 }; 109 110 static int __iommu_device_set_domain(struct iommu_group *group, 111 struct device *dev, 112 struct iommu_domain *new_domain, 113 unsigned int flags); 114 static int __iommu_group_set_domain_internal(struct iommu_group *group, 115 struct iommu_domain *new_domain, 116 unsigned int flags); 117 static int __iommu_group_set_domain(struct iommu_group *group, 118 struct iommu_domain *new_domain) 119 { 120 return __iommu_group_set_domain_internal(group, new_domain, 0); 121 } 122 static void __iommu_group_set_domain_nofail(struct iommu_group *group, 123 struct iommu_domain *new_domain) 124 { 125 WARN_ON(__iommu_group_set_domain_internal( 126 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); 127 } 128 129 static int iommu_setup_default_domain(struct iommu_group *group, 130 int target_type); 131 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 132 struct device *dev); 133 static ssize_t iommu_group_store_type(struct iommu_group *group, 134 const char *buf, size_t count); 135 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 136 struct device *dev); 137 static void __iommu_group_free_device(struct iommu_group *group, 138 struct group_device *grp_dev); 139 static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, 140 const struct iommu_ops *ops); 141 142 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 143 struct iommu_group_attribute iommu_group_attr_##_name = \ 144 __ATTR(_name, _mode, _show, _store) 145 146 #define to_iommu_group_attr(_attr) \ 147 container_of(_attr, struct iommu_group_attribute, attr) 148 #define to_iommu_group(_kobj) \ 149 container_of(_kobj, struct iommu_group, kobj) 150 151 static LIST_HEAD(iommu_device_list); 152 static DEFINE_SPINLOCK(iommu_device_lock); 153 154 static const struct bus_type * const iommu_buses[] = { 155 &platform_bus_type, 156 #ifdef CONFIG_PCI 157 &pci_bus_type, 158 #endif 159 #ifdef CONFIG_ARM_AMBA 160 &amba_bustype, 161 #endif 162 #ifdef CONFIG_FSL_MC_BUS 163 &fsl_mc_bus_type, 164 #endif 165 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 166 &host1x_context_device_bus_type, 167 #endif 168 #ifdef CONFIG_CDX_BUS 169 &cdx_bus_type, 170 #endif 171 }; 172 173 /* 174 * Use a function instead of an array here because the domain-type is a 175 * bit-field, so an array would waste memory. 176 */ 177 static const char *iommu_domain_type_str(unsigned int t) 178 { 179 switch (t) { 180 case IOMMU_DOMAIN_BLOCKED: 181 return "Blocked"; 182 case IOMMU_DOMAIN_IDENTITY: 183 return "Passthrough"; 184 case IOMMU_DOMAIN_UNMANAGED: 185 return "Unmanaged"; 186 case IOMMU_DOMAIN_DMA: 187 case IOMMU_DOMAIN_DMA_FQ: 188 return "Translated"; 189 case IOMMU_DOMAIN_PLATFORM: 190 return "Platform"; 191 default: 192 return "Unknown"; 193 } 194 } 195 196 static int __init iommu_subsys_init(void) 197 { 198 struct notifier_block *nb; 199 200 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 201 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 202 iommu_set_default_passthrough(false); 203 else 204 iommu_set_default_translated(false); 205 206 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 207 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 208 iommu_set_default_translated(false); 209 } 210 } 211 212 if (!iommu_default_passthrough() && !iommu_dma_strict) 213 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 214 215 pr_info("Default domain type: %s%s\n", 216 iommu_domain_type_str(iommu_def_domain_type), 217 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 218 " (set via kernel command line)" : ""); 219 220 if (!iommu_default_passthrough()) 221 pr_info("DMA domain TLB invalidation policy: %s mode%s\n", 222 iommu_dma_strict ? "strict" : "lazy", 223 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 224 " (set via kernel command line)" : ""); 225 226 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 227 if (!nb) 228 return -ENOMEM; 229 230 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 231 nb[i].notifier_call = iommu_bus_notifier; 232 bus_register_notifier(iommu_buses[i], &nb[i]); 233 } 234 235 return 0; 236 } 237 subsys_initcall(iommu_subsys_init); 238 239 static int remove_iommu_group(struct device *dev, void *data) 240 { 241 if (dev->iommu && dev->iommu->iommu_dev == data) 242 iommu_release_device(dev); 243 244 return 0; 245 } 246 247 /** 248 * iommu_device_register() - Register an IOMMU hardware instance 249 * @iommu: IOMMU handle for the instance 250 * @ops: IOMMU ops to associate with the instance 251 * @hwdev: (optional) actual instance device, used for fwnode lookup 252 * 253 * Return: 0 on success, or an error. 254 */ 255 int iommu_device_register(struct iommu_device *iommu, 256 const struct iommu_ops *ops, struct device *hwdev) 257 { 258 int err = 0; 259 260 /* We need to be able to take module references appropriately */ 261 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 262 return -EINVAL; 263 264 iommu->ops = ops; 265 if (hwdev) 266 iommu->fwnode = dev_fwnode(hwdev); 267 268 spin_lock(&iommu_device_lock); 269 list_add_tail(&iommu->list, &iommu_device_list); 270 spin_unlock(&iommu_device_lock); 271 272 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) 273 err = bus_iommu_probe(iommu_buses[i]); 274 if (err) 275 iommu_device_unregister(iommu); 276 return err; 277 } 278 EXPORT_SYMBOL_GPL(iommu_device_register); 279 280 void iommu_device_unregister(struct iommu_device *iommu) 281 { 282 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 283 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 284 285 spin_lock(&iommu_device_lock); 286 list_del(&iommu->list); 287 spin_unlock(&iommu_device_lock); 288 289 /* Pairs with the alloc in generic_single_device_group() */ 290 iommu_group_put(iommu->singleton_group); 291 iommu->singleton_group = NULL; 292 } 293 EXPORT_SYMBOL_GPL(iommu_device_unregister); 294 295 #if IS_ENABLED(CONFIG_IOMMUFD_TEST) 296 void iommu_device_unregister_bus(struct iommu_device *iommu, 297 const struct bus_type *bus, 298 struct notifier_block *nb) 299 { 300 bus_unregister_notifier(bus, nb); 301 iommu_device_unregister(iommu); 302 } 303 EXPORT_SYMBOL_GPL(iommu_device_unregister_bus); 304 305 /* 306 * Register an iommu driver against a single bus. This is only used by iommufd 307 * selftest to create a mock iommu driver. The caller must provide 308 * some memory to hold a notifier_block. 309 */ 310 int iommu_device_register_bus(struct iommu_device *iommu, 311 const struct iommu_ops *ops, 312 const struct bus_type *bus, 313 struct notifier_block *nb) 314 { 315 int err; 316 317 iommu->ops = ops; 318 nb->notifier_call = iommu_bus_notifier; 319 err = bus_register_notifier(bus, nb); 320 if (err) 321 return err; 322 323 spin_lock(&iommu_device_lock); 324 list_add_tail(&iommu->list, &iommu_device_list); 325 spin_unlock(&iommu_device_lock); 326 327 err = bus_iommu_probe(bus); 328 if (err) { 329 iommu_device_unregister_bus(iommu, bus, nb); 330 return err; 331 } 332 return 0; 333 } 334 EXPORT_SYMBOL_GPL(iommu_device_register_bus); 335 #endif 336 337 static struct dev_iommu *dev_iommu_get(struct device *dev) 338 { 339 struct dev_iommu *param = dev->iommu; 340 341 lockdep_assert_held(&iommu_probe_device_lock); 342 343 if (param) 344 return param; 345 346 param = kzalloc(sizeof(*param), GFP_KERNEL); 347 if (!param) 348 return NULL; 349 350 mutex_init(¶m->lock); 351 dev->iommu = param; 352 return param; 353 } 354 355 static void dev_iommu_free(struct device *dev) 356 { 357 struct dev_iommu *param = dev->iommu; 358 359 dev->iommu = NULL; 360 if (param->fwspec) { 361 fwnode_handle_put(param->fwspec->iommu_fwnode); 362 kfree(param->fwspec); 363 } 364 kfree(param); 365 } 366 367 /* 368 * Internal equivalent of device_iommu_mapped() for when we care that a device 369 * actually has API ops, and don't want false positives from VFIO-only groups. 370 */ 371 static bool dev_has_iommu(struct device *dev) 372 { 373 return dev->iommu && dev->iommu->iommu_dev; 374 } 375 376 static u32 dev_iommu_get_max_pasids(struct device *dev) 377 { 378 u32 max_pasids = 0, bits = 0; 379 int ret; 380 381 if (dev_is_pci(dev)) { 382 ret = pci_max_pasids(to_pci_dev(dev)); 383 if (ret > 0) 384 max_pasids = ret; 385 } else { 386 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 387 if (!ret) 388 max_pasids = 1UL << bits; 389 } 390 391 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 392 } 393 394 void dev_iommu_priv_set(struct device *dev, void *priv) 395 { 396 /* FSL_PAMU does something weird */ 397 if (!IS_ENABLED(CONFIG_FSL_PAMU)) 398 lockdep_assert_held(&iommu_probe_device_lock); 399 dev->iommu->priv = priv; 400 } 401 EXPORT_SYMBOL_GPL(dev_iommu_priv_set); 402 403 /* 404 * Init the dev->iommu and dev->iommu_group in the struct device and get the 405 * driver probed 406 */ 407 static int iommu_init_device(struct device *dev, const struct iommu_ops *ops) 408 { 409 struct iommu_device *iommu_dev; 410 struct iommu_group *group; 411 int ret; 412 413 if (!dev_iommu_get(dev)) 414 return -ENOMEM; 415 416 if (!try_module_get(ops->owner)) { 417 ret = -EINVAL; 418 goto err_free; 419 } 420 421 iommu_dev = ops->probe_device(dev); 422 if (IS_ERR(iommu_dev)) { 423 ret = PTR_ERR(iommu_dev); 424 goto err_module_put; 425 } 426 dev->iommu->iommu_dev = iommu_dev; 427 428 ret = iommu_device_link(iommu_dev, dev); 429 if (ret) 430 goto err_release; 431 432 group = ops->device_group(dev); 433 if (WARN_ON_ONCE(group == NULL)) 434 group = ERR_PTR(-EINVAL); 435 if (IS_ERR(group)) { 436 ret = PTR_ERR(group); 437 goto err_unlink; 438 } 439 dev->iommu_group = group; 440 441 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 442 if (ops->is_attach_deferred) 443 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 444 return 0; 445 446 err_unlink: 447 iommu_device_unlink(iommu_dev, dev); 448 err_release: 449 if (ops->release_device) 450 ops->release_device(dev); 451 err_module_put: 452 module_put(ops->owner); 453 err_free: 454 dev->iommu->iommu_dev = NULL; 455 dev_iommu_free(dev); 456 return ret; 457 } 458 459 static void iommu_deinit_device(struct device *dev) 460 { 461 struct iommu_group *group = dev->iommu_group; 462 const struct iommu_ops *ops = dev_iommu_ops(dev); 463 464 lockdep_assert_held(&group->mutex); 465 466 iommu_device_unlink(dev->iommu->iommu_dev, dev); 467 468 /* 469 * release_device() must stop using any attached domain on the device. 470 * If there are still other devices in the group, they are not affected 471 * by this callback. 472 * 473 * If the iommu driver provides release_domain, the core code ensures 474 * that domain is attached prior to calling release_device. Drivers can 475 * use this to enforce a translation on the idle iommu. Typically, the 476 * global static blocked_domain is a good choice. 477 * 478 * Otherwise, the iommu driver must set the device to either an identity 479 * or a blocking translation in release_device() and stop using any 480 * domain pointer, as it is going to be freed. 481 * 482 * Regardless, if a delayed attach never occurred, then the release 483 * should still avoid touching any hardware configuration either. 484 */ 485 if (!dev->iommu->attach_deferred && ops->release_domain) 486 ops->release_domain->ops->attach_dev(ops->release_domain, dev); 487 488 if (ops->release_device) 489 ops->release_device(dev); 490 491 /* 492 * If this is the last driver to use the group then we must free the 493 * domains before we do the module_put(). 494 */ 495 if (list_empty(&group->devices)) { 496 if (group->default_domain) { 497 iommu_domain_free(group->default_domain); 498 group->default_domain = NULL; 499 } 500 if (group->blocking_domain) { 501 iommu_domain_free(group->blocking_domain); 502 group->blocking_domain = NULL; 503 } 504 group->domain = NULL; 505 } 506 507 /* Caller must put iommu_group */ 508 dev->iommu_group = NULL; 509 module_put(ops->owner); 510 dev_iommu_free(dev); 511 } 512 513 DEFINE_MUTEX(iommu_probe_device_lock); 514 515 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 516 { 517 const struct iommu_ops *ops; 518 struct iommu_group *group; 519 struct group_device *gdev; 520 int ret; 521 522 /* 523 * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU 524 * instances with non-NULL fwnodes, and client devices should have been 525 * identified with a fwspec by this point. Otherwise, we can currently 526 * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can 527 * be present, and that any of their registered instances has suitable 528 * ops for probing, and thus cheekily co-opt the same mechanism. 529 */ 530 ops = iommu_fwspec_ops(dev_iommu_fwspec_get(dev)); 531 if (!ops) 532 return -ENODEV; 533 /* 534 * Serialise to avoid races between IOMMU drivers registering in 535 * parallel and/or the "replay" calls from ACPI/OF code via client 536 * driver probe. Once the latter have been cleaned up we should 537 * probably be able to use device_lock() here to minimise the scope, 538 * but for now enforcing a simple global ordering is fine. 539 */ 540 lockdep_assert_held(&iommu_probe_device_lock); 541 542 /* Device is probed already if in a group */ 543 if (dev->iommu_group) 544 return 0; 545 546 ret = iommu_init_device(dev, ops); 547 if (ret) 548 return ret; 549 550 group = dev->iommu_group; 551 gdev = iommu_group_alloc_device(group, dev); 552 mutex_lock(&group->mutex); 553 if (IS_ERR(gdev)) { 554 ret = PTR_ERR(gdev); 555 goto err_put_group; 556 } 557 558 /* 559 * The gdev must be in the list before calling 560 * iommu_setup_default_domain() 561 */ 562 list_add_tail(&gdev->list, &group->devices); 563 WARN_ON(group->default_domain && !group->domain); 564 if (group->default_domain) 565 iommu_create_device_direct_mappings(group->default_domain, dev); 566 if (group->domain) { 567 ret = __iommu_device_set_domain(group, dev, group->domain, 0); 568 if (ret) 569 goto err_remove_gdev; 570 } else if (!group->default_domain && !group_list) { 571 ret = iommu_setup_default_domain(group, 0); 572 if (ret) 573 goto err_remove_gdev; 574 } else if (!group->default_domain) { 575 /* 576 * With a group_list argument we defer the default_domain setup 577 * to the caller by providing a de-duplicated list of groups 578 * that need further setup. 579 */ 580 if (list_empty(&group->entry)) 581 list_add_tail(&group->entry, group_list); 582 } 583 584 if (group->default_domain) 585 iommu_setup_dma_ops(dev); 586 587 mutex_unlock(&group->mutex); 588 589 return 0; 590 591 err_remove_gdev: 592 list_del(&gdev->list); 593 __iommu_group_free_device(group, gdev); 594 err_put_group: 595 iommu_deinit_device(dev); 596 mutex_unlock(&group->mutex); 597 iommu_group_put(group); 598 599 return ret; 600 } 601 602 int iommu_probe_device(struct device *dev) 603 { 604 const struct iommu_ops *ops; 605 int ret; 606 607 mutex_lock(&iommu_probe_device_lock); 608 ret = __iommu_probe_device(dev, NULL); 609 mutex_unlock(&iommu_probe_device_lock); 610 if (ret) 611 return ret; 612 613 ops = dev_iommu_ops(dev); 614 if (ops->probe_finalize) 615 ops->probe_finalize(dev); 616 617 return 0; 618 } 619 620 static void __iommu_group_free_device(struct iommu_group *group, 621 struct group_device *grp_dev) 622 { 623 struct device *dev = grp_dev->dev; 624 625 sysfs_remove_link(group->devices_kobj, grp_dev->name); 626 sysfs_remove_link(&dev->kobj, "iommu_group"); 627 628 trace_remove_device_from_group(group->id, dev); 629 630 /* 631 * If the group has become empty then ownership must have been 632 * released, and the current domain must be set back to NULL or 633 * the default domain. 634 */ 635 if (list_empty(&group->devices)) 636 WARN_ON(group->owner_cnt || 637 group->domain != group->default_domain); 638 639 kfree(grp_dev->name); 640 kfree(grp_dev); 641 } 642 643 /* Remove the iommu_group from the struct device. */ 644 static void __iommu_group_remove_device(struct device *dev) 645 { 646 struct iommu_group *group = dev->iommu_group; 647 struct group_device *device; 648 649 mutex_lock(&group->mutex); 650 for_each_group_device(group, device) { 651 if (device->dev != dev) 652 continue; 653 654 list_del(&device->list); 655 __iommu_group_free_device(group, device); 656 if (dev_has_iommu(dev)) 657 iommu_deinit_device(dev); 658 else 659 dev->iommu_group = NULL; 660 break; 661 } 662 mutex_unlock(&group->mutex); 663 664 /* 665 * Pairs with the get in iommu_init_device() or 666 * iommu_group_add_device() 667 */ 668 iommu_group_put(group); 669 } 670 671 static void iommu_release_device(struct device *dev) 672 { 673 struct iommu_group *group = dev->iommu_group; 674 675 if (group) 676 __iommu_group_remove_device(dev); 677 678 /* Free any fwspec if no iommu_driver was ever attached */ 679 if (dev->iommu) 680 dev_iommu_free(dev); 681 } 682 683 static int __init iommu_set_def_domain_type(char *str) 684 { 685 bool pt; 686 int ret; 687 688 ret = kstrtobool(str, &pt); 689 if (ret) 690 return ret; 691 692 if (pt) 693 iommu_set_default_passthrough(true); 694 else 695 iommu_set_default_translated(true); 696 697 return 0; 698 } 699 early_param("iommu.passthrough", iommu_set_def_domain_type); 700 701 static int __init iommu_dma_setup(char *str) 702 { 703 int ret = kstrtobool(str, &iommu_dma_strict); 704 705 if (!ret) 706 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 707 return ret; 708 } 709 early_param("iommu.strict", iommu_dma_setup); 710 711 void iommu_set_dma_strict(void) 712 { 713 iommu_dma_strict = true; 714 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 715 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 716 } 717 718 static ssize_t iommu_group_attr_show(struct kobject *kobj, 719 struct attribute *__attr, char *buf) 720 { 721 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 722 struct iommu_group *group = to_iommu_group(kobj); 723 ssize_t ret = -EIO; 724 725 if (attr->show) 726 ret = attr->show(group, buf); 727 return ret; 728 } 729 730 static ssize_t iommu_group_attr_store(struct kobject *kobj, 731 struct attribute *__attr, 732 const char *buf, size_t count) 733 { 734 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 735 struct iommu_group *group = to_iommu_group(kobj); 736 ssize_t ret = -EIO; 737 738 if (attr->store) 739 ret = attr->store(group, buf, count); 740 return ret; 741 } 742 743 static const struct sysfs_ops iommu_group_sysfs_ops = { 744 .show = iommu_group_attr_show, 745 .store = iommu_group_attr_store, 746 }; 747 748 static int iommu_group_create_file(struct iommu_group *group, 749 struct iommu_group_attribute *attr) 750 { 751 return sysfs_create_file(&group->kobj, &attr->attr); 752 } 753 754 static void iommu_group_remove_file(struct iommu_group *group, 755 struct iommu_group_attribute *attr) 756 { 757 sysfs_remove_file(&group->kobj, &attr->attr); 758 } 759 760 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 761 { 762 return sysfs_emit(buf, "%s\n", group->name); 763 } 764 765 /** 766 * iommu_insert_resv_region - Insert a new region in the 767 * list of reserved regions. 768 * @new: new region to insert 769 * @regions: list of regions 770 * 771 * Elements are sorted by start address and overlapping segments 772 * of the same type are merged. 773 */ 774 static int iommu_insert_resv_region(struct iommu_resv_region *new, 775 struct list_head *regions) 776 { 777 struct iommu_resv_region *iter, *tmp, *nr, *top; 778 LIST_HEAD(stack); 779 780 nr = iommu_alloc_resv_region(new->start, new->length, 781 new->prot, new->type, GFP_KERNEL); 782 if (!nr) 783 return -ENOMEM; 784 785 /* First add the new element based on start address sorting */ 786 list_for_each_entry(iter, regions, list) { 787 if (nr->start < iter->start || 788 (nr->start == iter->start && nr->type <= iter->type)) 789 break; 790 } 791 list_add_tail(&nr->list, &iter->list); 792 793 /* Merge overlapping segments of type nr->type in @regions, if any */ 794 list_for_each_entry_safe(iter, tmp, regions, list) { 795 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 796 797 /* no merge needed on elements of different types than @new */ 798 if (iter->type != new->type) { 799 list_move_tail(&iter->list, &stack); 800 continue; 801 } 802 803 /* look for the last stack element of same type as @iter */ 804 list_for_each_entry_reverse(top, &stack, list) 805 if (top->type == iter->type) 806 goto check_overlap; 807 808 list_move_tail(&iter->list, &stack); 809 continue; 810 811 check_overlap: 812 top_end = top->start + top->length - 1; 813 814 if (iter->start > top_end + 1) { 815 list_move_tail(&iter->list, &stack); 816 } else { 817 top->length = max(top_end, iter_end) - top->start + 1; 818 list_del(&iter->list); 819 kfree(iter); 820 } 821 } 822 list_splice(&stack, regions); 823 return 0; 824 } 825 826 static int 827 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 828 struct list_head *group_resv_regions) 829 { 830 struct iommu_resv_region *entry; 831 int ret = 0; 832 833 list_for_each_entry(entry, dev_resv_regions, list) { 834 ret = iommu_insert_resv_region(entry, group_resv_regions); 835 if (ret) 836 break; 837 } 838 return ret; 839 } 840 841 int iommu_get_group_resv_regions(struct iommu_group *group, 842 struct list_head *head) 843 { 844 struct group_device *device; 845 int ret = 0; 846 847 mutex_lock(&group->mutex); 848 for_each_group_device(group, device) { 849 struct list_head dev_resv_regions; 850 851 /* 852 * Non-API groups still expose reserved_regions in sysfs, 853 * so filter out calls that get here that way. 854 */ 855 if (!dev_has_iommu(device->dev)) 856 break; 857 858 INIT_LIST_HEAD(&dev_resv_regions); 859 iommu_get_resv_regions(device->dev, &dev_resv_regions); 860 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 861 iommu_put_resv_regions(device->dev, &dev_resv_regions); 862 if (ret) 863 break; 864 } 865 mutex_unlock(&group->mutex); 866 return ret; 867 } 868 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 869 870 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 871 char *buf) 872 { 873 struct iommu_resv_region *region, *next; 874 struct list_head group_resv_regions; 875 int offset = 0; 876 877 INIT_LIST_HEAD(&group_resv_regions); 878 iommu_get_group_resv_regions(group, &group_resv_regions); 879 880 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 881 offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", 882 (long long)region->start, 883 (long long)(region->start + 884 region->length - 1), 885 iommu_group_resv_type_string[region->type]); 886 kfree(region); 887 } 888 889 return offset; 890 } 891 892 static ssize_t iommu_group_show_type(struct iommu_group *group, 893 char *buf) 894 { 895 char *type = "unknown"; 896 897 mutex_lock(&group->mutex); 898 if (group->default_domain) { 899 switch (group->default_domain->type) { 900 case IOMMU_DOMAIN_BLOCKED: 901 type = "blocked"; 902 break; 903 case IOMMU_DOMAIN_IDENTITY: 904 type = "identity"; 905 break; 906 case IOMMU_DOMAIN_UNMANAGED: 907 type = "unmanaged"; 908 break; 909 case IOMMU_DOMAIN_DMA: 910 type = "DMA"; 911 break; 912 case IOMMU_DOMAIN_DMA_FQ: 913 type = "DMA-FQ"; 914 break; 915 } 916 } 917 mutex_unlock(&group->mutex); 918 919 return sysfs_emit(buf, "%s\n", type); 920 } 921 922 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 923 924 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 925 iommu_group_show_resv_regions, NULL); 926 927 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 928 iommu_group_store_type); 929 930 static void iommu_group_release(struct kobject *kobj) 931 { 932 struct iommu_group *group = to_iommu_group(kobj); 933 934 pr_debug("Releasing group %d\n", group->id); 935 936 if (group->iommu_data_release) 937 group->iommu_data_release(group->iommu_data); 938 939 ida_free(&iommu_group_ida, group->id); 940 941 /* Domains are free'd by iommu_deinit_device() */ 942 WARN_ON(group->default_domain); 943 WARN_ON(group->blocking_domain); 944 945 kfree(group->name); 946 kfree(group); 947 } 948 949 static const struct kobj_type iommu_group_ktype = { 950 .sysfs_ops = &iommu_group_sysfs_ops, 951 .release = iommu_group_release, 952 }; 953 954 /** 955 * iommu_group_alloc - Allocate a new group 956 * 957 * This function is called by an iommu driver to allocate a new iommu 958 * group. The iommu group represents the minimum granularity of the iommu. 959 * Upon successful return, the caller holds a reference to the supplied 960 * group in order to hold the group until devices are added. Use 961 * iommu_group_put() to release this extra reference count, allowing the 962 * group to be automatically reclaimed once it has no devices or external 963 * references. 964 */ 965 struct iommu_group *iommu_group_alloc(void) 966 { 967 struct iommu_group *group; 968 int ret; 969 970 group = kzalloc(sizeof(*group), GFP_KERNEL); 971 if (!group) 972 return ERR_PTR(-ENOMEM); 973 974 group->kobj.kset = iommu_group_kset; 975 mutex_init(&group->mutex); 976 INIT_LIST_HEAD(&group->devices); 977 INIT_LIST_HEAD(&group->entry); 978 xa_init(&group->pasid_array); 979 980 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 981 if (ret < 0) { 982 kfree(group); 983 return ERR_PTR(ret); 984 } 985 group->id = ret; 986 987 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 988 NULL, "%d", group->id); 989 if (ret) { 990 kobject_put(&group->kobj); 991 return ERR_PTR(ret); 992 } 993 994 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 995 if (!group->devices_kobj) { 996 kobject_put(&group->kobj); /* triggers .release & free */ 997 return ERR_PTR(-ENOMEM); 998 } 999 1000 /* 1001 * The devices_kobj holds a reference on the group kobject, so 1002 * as long as that exists so will the group. We can therefore 1003 * use the devices_kobj for reference counting. 1004 */ 1005 kobject_put(&group->kobj); 1006 1007 ret = iommu_group_create_file(group, 1008 &iommu_group_attr_reserved_regions); 1009 if (ret) { 1010 kobject_put(group->devices_kobj); 1011 return ERR_PTR(ret); 1012 } 1013 1014 ret = iommu_group_create_file(group, &iommu_group_attr_type); 1015 if (ret) { 1016 kobject_put(group->devices_kobj); 1017 return ERR_PTR(ret); 1018 } 1019 1020 pr_debug("Allocated group %d\n", group->id); 1021 1022 return group; 1023 } 1024 EXPORT_SYMBOL_GPL(iommu_group_alloc); 1025 1026 /** 1027 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 1028 * @group: the group 1029 * 1030 * iommu drivers can store data in the group for use when doing iommu 1031 * operations. This function provides a way to retrieve it. Caller 1032 * should hold a group reference. 1033 */ 1034 void *iommu_group_get_iommudata(struct iommu_group *group) 1035 { 1036 return group->iommu_data; 1037 } 1038 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 1039 1040 /** 1041 * iommu_group_set_iommudata - set iommu_data for a group 1042 * @group: the group 1043 * @iommu_data: new data 1044 * @release: release function for iommu_data 1045 * 1046 * iommu drivers can store data in the group for use when doing iommu 1047 * operations. This function provides a way to set the data after 1048 * the group has been allocated. Caller should hold a group reference. 1049 */ 1050 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 1051 void (*release)(void *iommu_data)) 1052 { 1053 group->iommu_data = iommu_data; 1054 group->iommu_data_release = release; 1055 } 1056 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 1057 1058 /** 1059 * iommu_group_set_name - set name for a group 1060 * @group: the group 1061 * @name: name 1062 * 1063 * Allow iommu driver to set a name for a group. When set it will 1064 * appear in a name attribute file under the group in sysfs. 1065 */ 1066 int iommu_group_set_name(struct iommu_group *group, const char *name) 1067 { 1068 int ret; 1069 1070 if (group->name) { 1071 iommu_group_remove_file(group, &iommu_group_attr_name); 1072 kfree(group->name); 1073 group->name = NULL; 1074 if (!name) 1075 return 0; 1076 } 1077 1078 group->name = kstrdup(name, GFP_KERNEL); 1079 if (!group->name) 1080 return -ENOMEM; 1081 1082 ret = iommu_group_create_file(group, &iommu_group_attr_name); 1083 if (ret) { 1084 kfree(group->name); 1085 group->name = NULL; 1086 return ret; 1087 } 1088 1089 return 0; 1090 } 1091 EXPORT_SYMBOL_GPL(iommu_group_set_name); 1092 1093 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 1094 struct device *dev) 1095 { 1096 struct iommu_resv_region *entry; 1097 struct list_head mappings; 1098 unsigned long pg_size; 1099 int ret = 0; 1100 1101 pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; 1102 INIT_LIST_HEAD(&mappings); 1103 1104 if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) 1105 return -EINVAL; 1106 1107 iommu_get_resv_regions(dev, &mappings); 1108 1109 /* We need to consider overlapping regions for different devices */ 1110 list_for_each_entry(entry, &mappings, list) { 1111 dma_addr_t start, end, addr; 1112 size_t map_size = 0; 1113 1114 if (entry->type == IOMMU_RESV_DIRECT) 1115 dev->iommu->require_direct = 1; 1116 1117 if ((entry->type != IOMMU_RESV_DIRECT && 1118 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || 1119 !iommu_is_dma_domain(domain)) 1120 continue; 1121 1122 start = ALIGN(entry->start, pg_size); 1123 end = ALIGN(entry->start + entry->length, pg_size); 1124 1125 for (addr = start; addr <= end; addr += pg_size) { 1126 phys_addr_t phys_addr; 1127 1128 if (addr == end) 1129 goto map_end; 1130 1131 phys_addr = iommu_iova_to_phys(domain, addr); 1132 if (!phys_addr) { 1133 map_size += pg_size; 1134 continue; 1135 } 1136 1137 map_end: 1138 if (map_size) { 1139 ret = iommu_map(domain, addr - map_size, 1140 addr - map_size, map_size, 1141 entry->prot, GFP_KERNEL); 1142 if (ret) 1143 goto out; 1144 map_size = 0; 1145 } 1146 } 1147 1148 } 1149 out: 1150 iommu_put_resv_regions(dev, &mappings); 1151 1152 return ret; 1153 } 1154 1155 /* This is undone by __iommu_group_free_device() */ 1156 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 1157 struct device *dev) 1158 { 1159 int ret, i = 0; 1160 struct group_device *device; 1161 1162 device = kzalloc(sizeof(*device), GFP_KERNEL); 1163 if (!device) 1164 return ERR_PTR(-ENOMEM); 1165 1166 device->dev = dev; 1167 1168 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 1169 if (ret) 1170 goto err_free_device; 1171 1172 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1173 rename: 1174 if (!device->name) { 1175 ret = -ENOMEM; 1176 goto err_remove_link; 1177 } 1178 1179 ret = sysfs_create_link_nowarn(group->devices_kobj, 1180 &dev->kobj, device->name); 1181 if (ret) { 1182 if (ret == -EEXIST && i >= 0) { 1183 /* 1184 * Account for the slim chance of collision 1185 * and append an instance to the name. 1186 */ 1187 kfree(device->name); 1188 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1189 kobject_name(&dev->kobj), i++); 1190 goto rename; 1191 } 1192 goto err_free_name; 1193 } 1194 1195 trace_add_device_to_group(group->id, dev); 1196 1197 dev_info(dev, "Adding to iommu group %d\n", group->id); 1198 1199 return device; 1200 1201 err_free_name: 1202 kfree(device->name); 1203 err_remove_link: 1204 sysfs_remove_link(&dev->kobj, "iommu_group"); 1205 err_free_device: 1206 kfree(device); 1207 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1208 return ERR_PTR(ret); 1209 } 1210 1211 /** 1212 * iommu_group_add_device - add a device to an iommu group 1213 * @group: the group into which to add the device (reference should be held) 1214 * @dev: the device 1215 * 1216 * This function is called by an iommu driver to add a device into a 1217 * group. Adding a device increments the group reference count. 1218 */ 1219 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1220 { 1221 struct group_device *gdev; 1222 1223 gdev = iommu_group_alloc_device(group, dev); 1224 if (IS_ERR(gdev)) 1225 return PTR_ERR(gdev); 1226 1227 iommu_group_ref_get(group); 1228 dev->iommu_group = group; 1229 1230 mutex_lock(&group->mutex); 1231 list_add_tail(&gdev->list, &group->devices); 1232 mutex_unlock(&group->mutex); 1233 return 0; 1234 } 1235 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1236 1237 /** 1238 * iommu_group_remove_device - remove a device from it's current group 1239 * @dev: device to be removed 1240 * 1241 * This function is called by an iommu driver to remove the device from 1242 * it's current group. This decrements the iommu group reference count. 1243 */ 1244 void iommu_group_remove_device(struct device *dev) 1245 { 1246 struct iommu_group *group = dev->iommu_group; 1247 1248 if (!group) 1249 return; 1250 1251 dev_info(dev, "Removing from iommu group %d\n", group->id); 1252 1253 __iommu_group_remove_device(dev); 1254 } 1255 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1256 1257 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) 1258 /** 1259 * iommu_group_mutex_assert - Check device group mutex lock 1260 * @dev: the device that has group param set 1261 * 1262 * This function is called by an iommu driver to check whether it holds 1263 * group mutex lock for the given device or not. 1264 * 1265 * Note that this function must be called after device group param is set. 1266 */ 1267 void iommu_group_mutex_assert(struct device *dev) 1268 { 1269 struct iommu_group *group = dev->iommu_group; 1270 1271 lockdep_assert_held(&group->mutex); 1272 } 1273 EXPORT_SYMBOL_GPL(iommu_group_mutex_assert); 1274 #endif 1275 1276 static struct device *iommu_group_first_dev(struct iommu_group *group) 1277 { 1278 lockdep_assert_held(&group->mutex); 1279 return list_first_entry(&group->devices, struct group_device, list)->dev; 1280 } 1281 1282 /** 1283 * iommu_group_for_each_dev - iterate over each device in the group 1284 * @group: the group 1285 * @data: caller opaque data to be passed to callback function 1286 * @fn: caller supplied callback function 1287 * 1288 * This function is called by group users to iterate over group devices. 1289 * Callers should hold a reference count to the group during callback. 1290 * The group->mutex is held across callbacks, which will block calls to 1291 * iommu_group_add/remove_device. 1292 */ 1293 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1294 int (*fn)(struct device *, void *)) 1295 { 1296 struct group_device *device; 1297 int ret = 0; 1298 1299 mutex_lock(&group->mutex); 1300 for_each_group_device(group, device) { 1301 ret = fn(device->dev, data); 1302 if (ret) 1303 break; 1304 } 1305 mutex_unlock(&group->mutex); 1306 1307 return ret; 1308 } 1309 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1310 1311 /** 1312 * iommu_group_get - Return the group for a device and increment reference 1313 * @dev: get the group that this device belongs to 1314 * 1315 * This function is called by iommu drivers and users to get the group 1316 * for the specified device. If found, the group is returned and the group 1317 * reference in incremented, else NULL. 1318 */ 1319 struct iommu_group *iommu_group_get(struct device *dev) 1320 { 1321 struct iommu_group *group = dev->iommu_group; 1322 1323 if (group) 1324 kobject_get(group->devices_kobj); 1325 1326 return group; 1327 } 1328 EXPORT_SYMBOL_GPL(iommu_group_get); 1329 1330 /** 1331 * iommu_group_ref_get - Increment reference on a group 1332 * @group: the group to use, must not be NULL 1333 * 1334 * This function is called by iommu drivers to take additional references on an 1335 * existing group. Returns the given group for convenience. 1336 */ 1337 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1338 { 1339 kobject_get(group->devices_kobj); 1340 return group; 1341 } 1342 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1343 1344 /** 1345 * iommu_group_put - Decrement group reference 1346 * @group: the group to use 1347 * 1348 * This function is called by iommu drivers and users to release the 1349 * iommu group. Once the reference count is zero, the group is released. 1350 */ 1351 void iommu_group_put(struct iommu_group *group) 1352 { 1353 if (group) 1354 kobject_put(group->devices_kobj); 1355 } 1356 EXPORT_SYMBOL_GPL(iommu_group_put); 1357 1358 /** 1359 * iommu_group_id - Return ID for a group 1360 * @group: the group to ID 1361 * 1362 * Return the unique ID for the group matching the sysfs group number. 1363 */ 1364 int iommu_group_id(struct iommu_group *group) 1365 { 1366 return group->id; 1367 } 1368 EXPORT_SYMBOL_GPL(iommu_group_id); 1369 1370 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1371 unsigned long *devfns); 1372 1373 /* 1374 * To consider a PCI device isolated, we require ACS to support Source 1375 * Validation, Request Redirection, Completer Redirection, and Upstream 1376 * Forwarding. This effectively means that devices cannot spoof their 1377 * requester ID, requests and completions cannot be redirected, and all 1378 * transactions are forwarded upstream, even as it passes through a 1379 * bridge where the target device is downstream. 1380 */ 1381 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1382 1383 /* 1384 * For multifunction devices which are not isolated from each other, find 1385 * all the other non-isolated functions and look for existing groups. For 1386 * each function, we also need to look for aliases to or from other devices 1387 * that may already have a group. 1388 */ 1389 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1390 unsigned long *devfns) 1391 { 1392 struct pci_dev *tmp = NULL; 1393 struct iommu_group *group; 1394 1395 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1396 return NULL; 1397 1398 for_each_pci_dev(tmp) { 1399 if (tmp == pdev || tmp->bus != pdev->bus || 1400 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1401 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1402 continue; 1403 1404 group = get_pci_alias_group(tmp, devfns); 1405 if (group) { 1406 pci_dev_put(tmp); 1407 return group; 1408 } 1409 } 1410 1411 return NULL; 1412 } 1413 1414 /* 1415 * Look for aliases to or from the given device for existing groups. DMA 1416 * aliases are only supported on the same bus, therefore the search 1417 * space is quite small (especially since we're really only looking at pcie 1418 * device, and therefore only expect multiple slots on the root complex or 1419 * downstream switch ports). It's conceivable though that a pair of 1420 * multifunction devices could have aliases between them that would cause a 1421 * loop. To prevent this, we use a bitmap to track where we've been. 1422 */ 1423 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1424 unsigned long *devfns) 1425 { 1426 struct pci_dev *tmp = NULL; 1427 struct iommu_group *group; 1428 1429 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1430 return NULL; 1431 1432 group = iommu_group_get(&pdev->dev); 1433 if (group) 1434 return group; 1435 1436 for_each_pci_dev(tmp) { 1437 if (tmp == pdev || tmp->bus != pdev->bus) 1438 continue; 1439 1440 /* We alias them or they alias us */ 1441 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1442 group = get_pci_alias_group(tmp, devfns); 1443 if (group) { 1444 pci_dev_put(tmp); 1445 return group; 1446 } 1447 1448 group = get_pci_function_alias_group(tmp, devfns); 1449 if (group) { 1450 pci_dev_put(tmp); 1451 return group; 1452 } 1453 } 1454 } 1455 1456 return NULL; 1457 } 1458 1459 struct group_for_pci_data { 1460 struct pci_dev *pdev; 1461 struct iommu_group *group; 1462 }; 1463 1464 /* 1465 * DMA alias iterator callback, return the last seen device. Stop and return 1466 * the IOMMU group if we find one along the way. 1467 */ 1468 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1469 { 1470 struct group_for_pci_data *data = opaque; 1471 1472 data->pdev = pdev; 1473 data->group = iommu_group_get(&pdev->dev); 1474 1475 return data->group != NULL; 1476 } 1477 1478 /* 1479 * Generic device_group call-back function. It just allocates one 1480 * iommu-group per device. 1481 */ 1482 struct iommu_group *generic_device_group(struct device *dev) 1483 { 1484 return iommu_group_alloc(); 1485 } 1486 EXPORT_SYMBOL_GPL(generic_device_group); 1487 1488 /* 1489 * Generic device_group call-back function. It just allocates one 1490 * iommu-group per iommu driver instance shared by every device 1491 * probed by that iommu driver. 1492 */ 1493 struct iommu_group *generic_single_device_group(struct device *dev) 1494 { 1495 struct iommu_device *iommu = dev->iommu->iommu_dev; 1496 1497 if (!iommu->singleton_group) { 1498 struct iommu_group *group; 1499 1500 group = iommu_group_alloc(); 1501 if (IS_ERR(group)) 1502 return group; 1503 iommu->singleton_group = group; 1504 } 1505 return iommu_group_ref_get(iommu->singleton_group); 1506 } 1507 EXPORT_SYMBOL_GPL(generic_single_device_group); 1508 1509 /* 1510 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1511 * to find or create an IOMMU group for a device. 1512 */ 1513 struct iommu_group *pci_device_group(struct device *dev) 1514 { 1515 struct pci_dev *pdev = to_pci_dev(dev); 1516 struct group_for_pci_data data; 1517 struct pci_bus *bus; 1518 struct iommu_group *group = NULL; 1519 u64 devfns[4] = { 0 }; 1520 1521 if (WARN_ON(!dev_is_pci(dev))) 1522 return ERR_PTR(-EINVAL); 1523 1524 /* 1525 * Find the upstream DMA alias for the device. A device must not 1526 * be aliased due to topology in order to have its own IOMMU group. 1527 * If we find an alias along the way that already belongs to a 1528 * group, use it. 1529 */ 1530 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1531 return data.group; 1532 1533 pdev = data.pdev; 1534 1535 /* 1536 * Continue upstream from the point of minimum IOMMU granularity 1537 * due to aliases to the point where devices are protected from 1538 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1539 * group, use it. 1540 */ 1541 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1542 if (!bus->self) 1543 continue; 1544 1545 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1546 break; 1547 1548 pdev = bus->self; 1549 1550 group = iommu_group_get(&pdev->dev); 1551 if (group) 1552 return group; 1553 } 1554 1555 /* 1556 * Look for existing groups on device aliases. If we alias another 1557 * device or another device aliases us, use the same group. 1558 */ 1559 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1560 if (group) 1561 return group; 1562 1563 /* 1564 * Look for existing groups on non-isolated functions on the same 1565 * slot and aliases of those funcions, if any. No need to clear 1566 * the search bitmap, the tested devfns are still valid. 1567 */ 1568 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1569 if (group) 1570 return group; 1571 1572 /* No shared group found, allocate new */ 1573 return iommu_group_alloc(); 1574 } 1575 EXPORT_SYMBOL_GPL(pci_device_group); 1576 1577 /* Get the IOMMU group for device on fsl-mc bus */ 1578 struct iommu_group *fsl_mc_device_group(struct device *dev) 1579 { 1580 struct device *cont_dev = fsl_mc_cont_dev(dev); 1581 struct iommu_group *group; 1582 1583 group = iommu_group_get(cont_dev); 1584 if (!group) 1585 group = iommu_group_alloc(); 1586 return group; 1587 } 1588 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1589 1590 static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev) 1591 { 1592 const struct iommu_ops *ops = dev_iommu_ops(dev); 1593 struct iommu_domain *domain; 1594 1595 if (ops->identity_domain) 1596 return ops->identity_domain; 1597 1598 /* Older drivers create the identity domain via ops->domain_alloc() */ 1599 if (!ops->domain_alloc) 1600 return ERR_PTR(-EOPNOTSUPP); 1601 1602 domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY); 1603 if (IS_ERR(domain)) 1604 return domain; 1605 if (!domain) 1606 return ERR_PTR(-ENOMEM); 1607 1608 iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops); 1609 return domain; 1610 } 1611 1612 static struct iommu_domain * 1613 __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1614 { 1615 struct device *dev = iommu_group_first_dev(group); 1616 struct iommu_domain *dom; 1617 1618 if (group->default_domain && group->default_domain->type == req_type) 1619 return group->default_domain; 1620 1621 /* 1622 * When allocating the DMA API domain assume that the driver is going to 1623 * use PASID and make sure the RID's domain is PASID compatible. 1624 */ 1625 if (req_type & __IOMMU_DOMAIN_PAGING) { 1626 dom = __iommu_paging_domain_alloc_flags(dev, req_type, 1627 dev->iommu->max_pasids ? IOMMU_HWPT_ALLOC_PASID : 0); 1628 1629 /* 1630 * If driver does not support PASID feature then 1631 * try to allocate non-PASID domain 1632 */ 1633 if (PTR_ERR(dom) == -EOPNOTSUPP) 1634 dom = __iommu_paging_domain_alloc_flags(dev, req_type, 0); 1635 1636 return dom; 1637 } 1638 1639 if (req_type == IOMMU_DOMAIN_IDENTITY) 1640 return __iommu_alloc_identity_domain(dev); 1641 1642 return ERR_PTR(-EINVAL); 1643 } 1644 1645 /* 1646 * req_type of 0 means "auto" which means to select a domain based on 1647 * iommu_def_domain_type or what the driver actually supports. 1648 */ 1649 static struct iommu_domain * 1650 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1651 { 1652 const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group)); 1653 struct iommu_domain *dom; 1654 1655 lockdep_assert_held(&group->mutex); 1656 1657 /* 1658 * Allow legacy drivers to specify the domain that will be the default 1659 * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM 1660 * domain. Do not use in new drivers. 1661 */ 1662 if (ops->default_domain) { 1663 if (req_type != ops->default_domain->type) 1664 return ERR_PTR(-EINVAL); 1665 return ops->default_domain; 1666 } 1667 1668 if (req_type) 1669 return __iommu_group_alloc_default_domain(group, req_type); 1670 1671 /* The driver gave no guidance on what type to use, try the default */ 1672 dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); 1673 if (!IS_ERR(dom)) 1674 return dom; 1675 1676 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ 1677 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) 1678 return ERR_PTR(-EINVAL); 1679 dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); 1680 if (IS_ERR(dom)) 1681 return dom; 1682 1683 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1684 iommu_def_domain_type, group->name); 1685 return dom; 1686 } 1687 1688 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1689 { 1690 return group->default_domain; 1691 } 1692 1693 static int probe_iommu_group(struct device *dev, void *data) 1694 { 1695 struct list_head *group_list = data; 1696 int ret; 1697 1698 mutex_lock(&iommu_probe_device_lock); 1699 ret = __iommu_probe_device(dev, group_list); 1700 mutex_unlock(&iommu_probe_device_lock); 1701 if (ret == -ENODEV) 1702 ret = 0; 1703 1704 return ret; 1705 } 1706 1707 static int iommu_bus_notifier(struct notifier_block *nb, 1708 unsigned long action, void *data) 1709 { 1710 struct device *dev = data; 1711 1712 if (action == BUS_NOTIFY_ADD_DEVICE) { 1713 int ret; 1714 1715 ret = iommu_probe_device(dev); 1716 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1717 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1718 iommu_release_device(dev); 1719 return NOTIFY_OK; 1720 } 1721 1722 return 0; 1723 } 1724 1725 /* 1726 * Combine the driver's chosen def_domain_type across all the devices in a 1727 * group. Drivers must give a consistent result. 1728 */ 1729 static int iommu_get_def_domain_type(struct iommu_group *group, 1730 struct device *dev, int cur_type) 1731 { 1732 const struct iommu_ops *ops = dev_iommu_ops(dev); 1733 int type; 1734 1735 if (ops->default_domain) { 1736 /* 1737 * Drivers that declare a global static default_domain will 1738 * always choose that. 1739 */ 1740 type = ops->default_domain->type; 1741 } else { 1742 if (ops->def_domain_type) 1743 type = ops->def_domain_type(dev); 1744 else 1745 return cur_type; 1746 } 1747 if (!type || cur_type == type) 1748 return cur_type; 1749 if (!cur_type) 1750 return type; 1751 1752 dev_err_ratelimited( 1753 dev, 1754 "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n", 1755 iommu_domain_type_str(cur_type), iommu_domain_type_str(type), 1756 group->id); 1757 1758 /* 1759 * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY 1760 * takes precedence. 1761 */ 1762 if (type == IOMMU_DOMAIN_IDENTITY) 1763 return type; 1764 return cur_type; 1765 } 1766 1767 /* 1768 * A target_type of 0 will select the best domain type. 0 can be returned in 1769 * this case meaning the global default should be used. 1770 */ 1771 static int iommu_get_default_domain_type(struct iommu_group *group, 1772 int target_type) 1773 { 1774 struct device *untrusted = NULL; 1775 struct group_device *gdev; 1776 int driver_type = 0; 1777 1778 lockdep_assert_held(&group->mutex); 1779 1780 /* 1781 * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an 1782 * identity_domain and it will automatically become their default 1783 * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain. 1784 * Override the selection to IDENTITY. 1785 */ 1786 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { 1787 static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) && 1788 IS_ENABLED(CONFIG_IOMMU_DMA))); 1789 driver_type = IOMMU_DOMAIN_IDENTITY; 1790 } 1791 1792 for_each_group_device(group, gdev) { 1793 driver_type = iommu_get_def_domain_type(group, gdev->dev, 1794 driver_type); 1795 1796 if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) { 1797 /* 1798 * No ARM32 using systems will set untrusted, it cannot 1799 * work. 1800 */ 1801 if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))) 1802 return -1; 1803 untrusted = gdev->dev; 1804 } 1805 } 1806 1807 /* 1808 * If the common dma ops are not selected in kconfig then we cannot use 1809 * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been 1810 * selected. 1811 */ 1812 if (!IS_ENABLED(CONFIG_IOMMU_DMA)) { 1813 if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA)) 1814 return -1; 1815 if (!driver_type) 1816 driver_type = IOMMU_DOMAIN_IDENTITY; 1817 } 1818 1819 if (untrusted) { 1820 if (driver_type && driver_type != IOMMU_DOMAIN_DMA) { 1821 dev_err_ratelimited( 1822 untrusted, 1823 "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n", 1824 group->id, iommu_domain_type_str(driver_type)); 1825 return -1; 1826 } 1827 driver_type = IOMMU_DOMAIN_DMA; 1828 } 1829 1830 if (target_type) { 1831 if (driver_type && target_type != driver_type) 1832 return -1; 1833 return target_type; 1834 } 1835 return driver_type; 1836 } 1837 1838 static void iommu_group_do_probe_finalize(struct device *dev) 1839 { 1840 const struct iommu_ops *ops = dev_iommu_ops(dev); 1841 1842 if (ops->probe_finalize) 1843 ops->probe_finalize(dev); 1844 } 1845 1846 static int bus_iommu_probe(const struct bus_type *bus) 1847 { 1848 struct iommu_group *group, *next; 1849 LIST_HEAD(group_list); 1850 int ret; 1851 1852 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1853 if (ret) 1854 return ret; 1855 1856 list_for_each_entry_safe(group, next, &group_list, entry) { 1857 struct group_device *gdev; 1858 1859 mutex_lock(&group->mutex); 1860 1861 /* Remove item from the list */ 1862 list_del_init(&group->entry); 1863 1864 /* 1865 * We go to the trouble of deferred default domain creation so 1866 * that the cross-group default domain type and the setup of the 1867 * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. 1868 */ 1869 ret = iommu_setup_default_domain(group, 0); 1870 if (ret) { 1871 mutex_unlock(&group->mutex); 1872 return ret; 1873 } 1874 for_each_group_device(group, gdev) 1875 iommu_setup_dma_ops(gdev->dev); 1876 mutex_unlock(&group->mutex); 1877 1878 /* 1879 * FIXME: Mis-locked because the ops->probe_finalize() call-back 1880 * of some IOMMU drivers calls arm_iommu_attach_device() which 1881 * in-turn might call back into IOMMU core code, where it tries 1882 * to take group->mutex, resulting in a deadlock. 1883 */ 1884 for_each_group_device(group, gdev) 1885 iommu_group_do_probe_finalize(gdev->dev); 1886 } 1887 1888 return 0; 1889 } 1890 1891 /** 1892 * device_iommu_capable() - check for a general IOMMU capability 1893 * @dev: device to which the capability would be relevant, if available 1894 * @cap: IOMMU capability 1895 * 1896 * Return: true if an IOMMU is present and supports the given capability 1897 * for the given device, otherwise false. 1898 */ 1899 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1900 { 1901 const struct iommu_ops *ops; 1902 1903 if (!dev_has_iommu(dev)) 1904 return false; 1905 1906 ops = dev_iommu_ops(dev); 1907 if (!ops->capable) 1908 return false; 1909 1910 return ops->capable(dev, cap); 1911 } 1912 EXPORT_SYMBOL_GPL(device_iommu_capable); 1913 1914 /** 1915 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() 1916 * for a group 1917 * @group: Group to query 1918 * 1919 * IOMMU groups should not have differing values of 1920 * msi_device_has_isolated_msi() for devices in a group. However nothing 1921 * directly prevents this, so ensure mistakes don't result in isolation failures 1922 * by checking that all the devices are the same. 1923 */ 1924 bool iommu_group_has_isolated_msi(struct iommu_group *group) 1925 { 1926 struct group_device *group_dev; 1927 bool ret = true; 1928 1929 mutex_lock(&group->mutex); 1930 for_each_group_device(group, group_dev) 1931 ret &= msi_device_has_isolated_msi(group_dev->dev); 1932 mutex_unlock(&group->mutex); 1933 return ret; 1934 } 1935 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); 1936 1937 /** 1938 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1939 * @domain: iommu domain 1940 * @handler: fault handler 1941 * @token: user data, will be passed back to the fault handler 1942 * 1943 * This function should be used by IOMMU users which want to be notified 1944 * whenever an IOMMU fault happens. 1945 * 1946 * The fault handler itself should return 0 on success, and an appropriate 1947 * error code otherwise. 1948 */ 1949 void iommu_set_fault_handler(struct iommu_domain *domain, 1950 iommu_fault_handler_t handler, 1951 void *token) 1952 { 1953 BUG_ON(!domain); 1954 1955 domain->handler = handler; 1956 domain->handler_token = token; 1957 } 1958 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1959 1960 static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, 1961 const struct iommu_ops *ops) 1962 { 1963 domain->type = type; 1964 domain->owner = ops; 1965 if (!domain->ops) 1966 domain->ops = ops->default_domain_ops; 1967 1968 /* 1969 * If not already set, assume all sizes by default; the driver 1970 * may override this later 1971 */ 1972 if (!domain->pgsize_bitmap) 1973 domain->pgsize_bitmap = ops->pgsize_bitmap; 1974 } 1975 1976 static struct iommu_domain * 1977 __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, 1978 unsigned int flags) 1979 { 1980 const struct iommu_ops *ops; 1981 struct iommu_domain *domain; 1982 1983 if (!dev_has_iommu(dev)) 1984 return ERR_PTR(-ENODEV); 1985 1986 ops = dev_iommu_ops(dev); 1987 1988 if (ops->domain_alloc_paging && !flags) 1989 domain = ops->domain_alloc_paging(dev); 1990 else if (ops->domain_alloc_paging_flags) 1991 domain = ops->domain_alloc_paging_flags(dev, flags, NULL); 1992 else if (ops->domain_alloc && !flags) 1993 domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); 1994 else 1995 return ERR_PTR(-EOPNOTSUPP); 1996 1997 if (IS_ERR(domain)) 1998 return domain; 1999 if (!domain) 2000 return ERR_PTR(-ENOMEM); 2001 2002 iommu_domain_init(domain, type, ops); 2003 return domain; 2004 } 2005 2006 /** 2007 * iommu_paging_domain_alloc_flags() - Allocate a paging domain 2008 * @dev: device for which the domain is allocated 2009 * @flags: Bitmap of iommufd_hwpt_alloc_flags 2010 * 2011 * Allocate a paging domain which will be managed by a kernel driver. Return 2012 * allocated domain if successful, or an ERR pointer for failure. 2013 */ 2014 struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, 2015 unsigned int flags) 2016 { 2017 return __iommu_paging_domain_alloc_flags(dev, 2018 IOMMU_DOMAIN_UNMANAGED, flags); 2019 } 2020 EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags); 2021 2022 void iommu_domain_free(struct iommu_domain *domain) 2023 { 2024 if (domain->type == IOMMU_DOMAIN_SVA) 2025 mmdrop(domain->mm); 2026 iommu_put_dma_cookie(domain); 2027 if (domain->ops->free) 2028 domain->ops->free(domain); 2029 } 2030 EXPORT_SYMBOL_GPL(iommu_domain_free); 2031 2032 /* 2033 * Put the group's domain back to the appropriate core-owned domain - either the 2034 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 2035 */ 2036 static void __iommu_group_set_core_domain(struct iommu_group *group) 2037 { 2038 struct iommu_domain *new_domain; 2039 2040 if (group->owner) 2041 new_domain = group->blocking_domain; 2042 else 2043 new_domain = group->default_domain; 2044 2045 __iommu_group_set_domain_nofail(group, new_domain); 2046 } 2047 2048 static int __iommu_attach_device(struct iommu_domain *domain, 2049 struct device *dev) 2050 { 2051 int ret; 2052 2053 if (unlikely(domain->ops->attach_dev == NULL)) 2054 return -ENODEV; 2055 2056 ret = domain->ops->attach_dev(domain, dev); 2057 if (ret) 2058 return ret; 2059 dev->iommu->attach_deferred = 0; 2060 trace_attach_device_to_domain(dev); 2061 return 0; 2062 } 2063 2064 /** 2065 * iommu_attach_device - Attach an IOMMU domain to a device 2066 * @domain: IOMMU domain to attach 2067 * @dev: Device that will be attached 2068 * 2069 * Returns 0 on success and error code on failure 2070 * 2071 * Note that EINVAL can be treated as a soft failure, indicating 2072 * that certain configuration of the domain is incompatible with 2073 * the device. In this case attaching a different domain to the 2074 * device may succeed. 2075 */ 2076 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2077 { 2078 /* Caller must be a probed driver on dev */ 2079 struct iommu_group *group = dev->iommu_group; 2080 int ret; 2081 2082 if (!group) 2083 return -ENODEV; 2084 2085 /* 2086 * Lock the group to make sure the device-count doesn't 2087 * change while we are attaching 2088 */ 2089 mutex_lock(&group->mutex); 2090 ret = -EINVAL; 2091 if (list_count_nodes(&group->devices) != 1) 2092 goto out_unlock; 2093 2094 ret = __iommu_attach_group(domain, group); 2095 2096 out_unlock: 2097 mutex_unlock(&group->mutex); 2098 return ret; 2099 } 2100 EXPORT_SYMBOL_GPL(iommu_attach_device); 2101 2102 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2103 { 2104 if (dev->iommu && dev->iommu->attach_deferred) 2105 return __iommu_attach_device(domain, dev); 2106 2107 return 0; 2108 } 2109 2110 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2111 { 2112 /* Caller must be a probed driver on dev */ 2113 struct iommu_group *group = dev->iommu_group; 2114 2115 if (!group) 2116 return; 2117 2118 mutex_lock(&group->mutex); 2119 if (WARN_ON(domain != group->domain) || 2120 WARN_ON(list_count_nodes(&group->devices) != 1)) 2121 goto out_unlock; 2122 __iommu_group_set_core_domain(group); 2123 2124 out_unlock: 2125 mutex_unlock(&group->mutex); 2126 } 2127 EXPORT_SYMBOL_GPL(iommu_detach_device); 2128 2129 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2130 { 2131 /* Caller must be a probed driver on dev */ 2132 struct iommu_group *group = dev->iommu_group; 2133 2134 if (!group) 2135 return NULL; 2136 2137 return group->domain; 2138 } 2139 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2140 2141 /* 2142 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2143 * guarantees that the group and its default domain are valid and correct. 2144 */ 2145 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2146 { 2147 return dev->iommu_group->default_domain; 2148 } 2149 2150 static int __iommu_attach_group(struct iommu_domain *domain, 2151 struct iommu_group *group) 2152 { 2153 struct device *dev; 2154 2155 if (group->domain && group->domain != group->default_domain && 2156 group->domain != group->blocking_domain) 2157 return -EBUSY; 2158 2159 dev = iommu_group_first_dev(group); 2160 if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner) 2161 return -EINVAL; 2162 2163 return __iommu_group_set_domain(group, domain); 2164 } 2165 2166 /** 2167 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2168 * @domain: IOMMU domain to attach 2169 * @group: IOMMU group that will be attached 2170 * 2171 * Returns 0 on success and error code on failure 2172 * 2173 * Note that EINVAL can be treated as a soft failure, indicating 2174 * that certain configuration of the domain is incompatible with 2175 * the group. In this case attaching a different domain to the 2176 * group may succeed. 2177 */ 2178 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2179 { 2180 int ret; 2181 2182 mutex_lock(&group->mutex); 2183 ret = __iommu_attach_group(domain, group); 2184 mutex_unlock(&group->mutex); 2185 2186 return ret; 2187 } 2188 EXPORT_SYMBOL_GPL(iommu_attach_group); 2189 2190 /** 2191 * iommu_group_replace_domain - replace the domain that a group is attached to 2192 * @group: IOMMU group that will be attached to the new domain 2193 * @new_domain: new IOMMU domain to replace with 2194 * 2195 * This API allows the group to switch domains without being forced to go to 2196 * the blocking domain in-between. 2197 * 2198 * If the currently attached domain is a core domain (e.g. a default_domain), 2199 * it will act just like the iommu_attach_group(). 2200 */ 2201 int iommu_group_replace_domain(struct iommu_group *group, 2202 struct iommu_domain *new_domain) 2203 { 2204 int ret; 2205 2206 if (!new_domain) 2207 return -EINVAL; 2208 2209 mutex_lock(&group->mutex); 2210 ret = __iommu_group_set_domain(group, new_domain); 2211 mutex_unlock(&group->mutex); 2212 return ret; 2213 } 2214 EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, "IOMMUFD_INTERNAL"); 2215 2216 static int __iommu_device_set_domain(struct iommu_group *group, 2217 struct device *dev, 2218 struct iommu_domain *new_domain, 2219 unsigned int flags) 2220 { 2221 int ret; 2222 2223 /* 2224 * If the device requires IOMMU_RESV_DIRECT then we cannot allow 2225 * the blocking domain to be attached as it does not contain the 2226 * required 1:1 mapping. This test effectively excludes the device 2227 * being used with iommu_group_claim_dma_owner() which will block 2228 * vfio and iommufd as well. 2229 */ 2230 if (dev->iommu->require_direct && 2231 (new_domain->type == IOMMU_DOMAIN_BLOCKED || 2232 new_domain == group->blocking_domain)) { 2233 dev_warn(dev, 2234 "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n"); 2235 return -EINVAL; 2236 } 2237 2238 if (dev->iommu->attach_deferred) { 2239 if (new_domain == group->default_domain) 2240 return 0; 2241 dev->iommu->attach_deferred = 0; 2242 } 2243 2244 ret = __iommu_attach_device(new_domain, dev); 2245 if (ret) { 2246 /* 2247 * If we have a blocking domain then try to attach that in hopes 2248 * of avoiding a UAF. Modern drivers should implement blocking 2249 * domains as global statics that cannot fail. 2250 */ 2251 if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && 2252 group->blocking_domain && 2253 group->blocking_domain != new_domain) 2254 __iommu_attach_device(group->blocking_domain, dev); 2255 return ret; 2256 } 2257 return 0; 2258 } 2259 2260 /* 2261 * If 0 is returned the group's domain is new_domain. If an error is returned 2262 * then the group's domain will be set back to the existing domain unless 2263 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's 2264 * domains is left inconsistent. This is a driver bug to fail attach with a 2265 * previously good domain. We try to avoid a kernel UAF because of this. 2266 * 2267 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU 2268 * API works on domains and devices. Bridge that gap by iterating over the 2269 * devices in a group. Ideally we'd have a single device which represents the 2270 * requestor ID of the group, but we also allow IOMMU drivers to create policy 2271 * defined minimum sets, where the physical hardware may be able to distiguish 2272 * members, but we wish to group them at a higher level (ex. untrusted 2273 * multi-function PCI devices). Thus we attach each device. 2274 */ 2275 static int __iommu_group_set_domain_internal(struct iommu_group *group, 2276 struct iommu_domain *new_domain, 2277 unsigned int flags) 2278 { 2279 struct group_device *last_gdev; 2280 struct group_device *gdev; 2281 int result; 2282 int ret; 2283 2284 lockdep_assert_held(&group->mutex); 2285 2286 if (group->domain == new_domain) 2287 return 0; 2288 2289 if (WARN_ON(!new_domain)) 2290 return -EINVAL; 2291 2292 /* 2293 * Changing the domain is done by calling attach_dev() on the new 2294 * domain. This switch does not have to be atomic and DMA can be 2295 * discarded during the transition. DMA must only be able to access 2296 * either new_domain or group->domain, never something else. 2297 */ 2298 result = 0; 2299 for_each_group_device(group, gdev) { 2300 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, 2301 flags); 2302 if (ret) { 2303 result = ret; 2304 /* 2305 * Keep trying the other devices in the group. If a 2306 * driver fails attach to an otherwise good domain, and 2307 * does not support blocking domains, it should at least 2308 * drop its reference on the current domain so we don't 2309 * UAF. 2310 */ 2311 if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) 2312 continue; 2313 goto err_revert; 2314 } 2315 } 2316 group->domain = new_domain; 2317 return result; 2318 2319 err_revert: 2320 /* 2321 * This is called in error unwind paths. A well behaved driver should 2322 * always allow us to attach to a domain that was already attached. 2323 */ 2324 last_gdev = gdev; 2325 for_each_group_device(group, gdev) { 2326 /* 2327 * A NULL domain can happen only for first probe, in which case 2328 * we leave group->domain as NULL and let release clean 2329 * everything up. 2330 */ 2331 if (group->domain) 2332 WARN_ON(__iommu_device_set_domain( 2333 group, gdev->dev, group->domain, 2334 IOMMU_SET_DOMAIN_MUST_SUCCEED)); 2335 if (gdev == last_gdev) 2336 break; 2337 } 2338 return ret; 2339 } 2340 2341 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2342 { 2343 mutex_lock(&group->mutex); 2344 __iommu_group_set_core_domain(group); 2345 mutex_unlock(&group->mutex); 2346 } 2347 EXPORT_SYMBOL_GPL(iommu_detach_group); 2348 2349 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2350 { 2351 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2352 return iova; 2353 2354 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2355 return 0; 2356 2357 return domain->ops->iova_to_phys(domain, iova); 2358 } 2359 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2360 2361 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2362 phys_addr_t paddr, size_t size, size_t *count) 2363 { 2364 unsigned int pgsize_idx, pgsize_idx_next; 2365 unsigned long pgsizes; 2366 size_t offset, pgsize, pgsize_next; 2367 unsigned long addr_merge = paddr | iova; 2368 2369 /* Page sizes supported by the hardware and small enough for @size */ 2370 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2371 2372 /* Constrain the page sizes further based on the maximum alignment */ 2373 if (likely(addr_merge)) 2374 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2375 2376 /* Make sure we have at least one suitable page size */ 2377 BUG_ON(!pgsizes); 2378 2379 /* Pick the biggest page size remaining */ 2380 pgsize_idx = __fls(pgsizes); 2381 pgsize = BIT(pgsize_idx); 2382 if (!count) 2383 return pgsize; 2384 2385 /* Find the next biggest support page size, if it exists */ 2386 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2387 if (!pgsizes) 2388 goto out_set_count; 2389 2390 pgsize_idx_next = __ffs(pgsizes); 2391 pgsize_next = BIT(pgsize_idx_next); 2392 2393 /* 2394 * There's no point trying a bigger page size unless the virtual 2395 * and physical addresses are similarly offset within the larger page. 2396 */ 2397 if ((iova ^ paddr) & (pgsize_next - 1)) 2398 goto out_set_count; 2399 2400 /* Calculate the offset to the next page size alignment boundary */ 2401 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2402 2403 /* 2404 * If size is big enough to accommodate the larger page, reduce 2405 * the number of smaller pages. 2406 */ 2407 if (offset + pgsize_next <= size) 2408 size = offset; 2409 2410 out_set_count: 2411 *count = size >> pgsize_idx; 2412 return pgsize; 2413 } 2414 2415 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2416 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2417 { 2418 const struct iommu_domain_ops *ops = domain->ops; 2419 unsigned long orig_iova = iova; 2420 unsigned int min_pagesz; 2421 size_t orig_size = size; 2422 phys_addr_t orig_paddr = paddr; 2423 int ret = 0; 2424 2425 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2426 return -EINVAL; 2427 2428 if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL)) 2429 return -ENODEV; 2430 2431 /* find out the minimum page size supported */ 2432 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2433 2434 /* 2435 * both the virtual address and the physical one, as well as 2436 * the size of the mapping, must be aligned (at least) to the 2437 * size of the smallest page supported by the hardware 2438 */ 2439 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2440 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2441 iova, &paddr, size, min_pagesz); 2442 return -EINVAL; 2443 } 2444 2445 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2446 2447 while (size) { 2448 size_t pgsize, count, mapped = 0; 2449 2450 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2451 2452 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2453 iova, &paddr, pgsize, count); 2454 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2455 gfp, &mapped); 2456 /* 2457 * Some pages may have been mapped, even if an error occurred, 2458 * so we should account for those so they can be unmapped. 2459 */ 2460 size -= mapped; 2461 2462 if (ret) 2463 break; 2464 2465 iova += mapped; 2466 paddr += mapped; 2467 } 2468 2469 /* unroll mapping in case something went wrong */ 2470 if (ret) 2471 iommu_unmap(domain, orig_iova, orig_size - size); 2472 else 2473 trace_map(orig_iova, orig_paddr, orig_size); 2474 2475 return ret; 2476 } 2477 2478 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2479 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2480 { 2481 const struct iommu_domain_ops *ops = domain->ops; 2482 int ret; 2483 2484 might_sleep_if(gfpflags_allow_blocking(gfp)); 2485 2486 /* Discourage passing strange GFP flags */ 2487 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2488 __GFP_HIGHMEM))) 2489 return -EINVAL; 2490 2491 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2492 if (ret == 0 && ops->iotlb_sync_map) { 2493 ret = ops->iotlb_sync_map(domain, iova, size); 2494 if (ret) 2495 goto out_err; 2496 } 2497 2498 return ret; 2499 2500 out_err: 2501 /* undo mappings already done */ 2502 iommu_unmap(domain, iova, size); 2503 2504 return ret; 2505 } 2506 EXPORT_SYMBOL_GPL(iommu_map); 2507 2508 static size_t __iommu_unmap(struct iommu_domain *domain, 2509 unsigned long iova, size_t size, 2510 struct iommu_iotlb_gather *iotlb_gather) 2511 { 2512 const struct iommu_domain_ops *ops = domain->ops; 2513 size_t unmapped_page, unmapped = 0; 2514 unsigned long orig_iova = iova; 2515 unsigned int min_pagesz; 2516 2517 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2518 return 0; 2519 2520 if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) 2521 return 0; 2522 2523 /* find out the minimum page size supported */ 2524 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2525 2526 /* 2527 * The virtual address, as well as the size of the mapping, must be 2528 * aligned (at least) to the size of the smallest page supported 2529 * by the hardware 2530 */ 2531 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2532 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2533 iova, size, min_pagesz); 2534 return 0; 2535 } 2536 2537 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2538 2539 /* 2540 * Keep iterating until we either unmap 'size' bytes (or more) 2541 * or we hit an area that isn't mapped. 2542 */ 2543 while (unmapped < size) { 2544 size_t pgsize, count; 2545 2546 pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count); 2547 unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); 2548 if (!unmapped_page) 2549 break; 2550 2551 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2552 iova, unmapped_page); 2553 2554 iova += unmapped_page; 2555 unmapped += unmapped_page; 2556 } 2557 2558 trace_unmap(orig_iova, size, unmapped); 2559 return unmapped; 2560 } 2561 2562 /** 2563 * iommu_unmap() - Remove mappings from a range of IOVA 2564 * @domain: Domain to manipulate 2565 * @iova: IO virtual address to start 2566 * @size: Length of the range starting from @iova 2567 * 2568 * iommu_unmap() will remove a translation created by iommu_map(). It cannot 2569 * subdivide a mapping created by iommu_map(), so it should be called with IOVA 2570 * ranges that match what was passed to iommu_map(). The range can aggregate 2571 * contiguous iommu_map() calls so long as no individual range is split. 2572 * 2573 * Returns: Number of bytes of IOVA unmapped. iova + res will be the point 2574 * unmapping stopped. 2575 */ 2576 size_t iommu_unmap(struct iommu_domain *domain, 2577 unsigned long iova, size_t size) 2578 { 2579 struct iommu_iotlb_gather iotlb_gather; 2580 size_t ret; 2581 2582 iommu_iotlb_gather_init(&iotlb_gather); 2583 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2584 iommu_iotlb_sync(domain, &iotlb_gather); 2585 2586 return ret; 2587 } 2588 EXPORT_SYMBOL_GPL(iommu_unmap); 2589 2590 size_t iommu_unmap_fast(struct iommu_domain *domain, 2591 unsigned long iova, size_t size, 2592 struct iommu_iotlb_gather *iotlb_gather) 2593 { 2594 return __iommu_unmap(domain, iova, size, iotlb_gather); 2595 } 2596 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2597 2598 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2599 struct scatterlist *sg, unsigned int nents, int prot, 2600 gfp_t gfp) 2601 { 2602 const struct iommu_domain_ops *ops = domain->ops; 2603 size_t len = 0, mapped = 0; 2604 phys_addr_t start; 2605 unsigned int i = 0; 2606 int ret; 2607 2608 might_sleep_if(gfpflags_allow_blocking(gfp)); 2609 2610 /* Discourage passing strange GFP flags */ 2611 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2612 __GFP_HIGHMEM))) 2613 return -EINVAL; 2614 2615 while (i <= nents) { 2616 phys_addr_t s_phys = sg_phys(sg); 2617 2618 if (len && s_phys != start + len) { 2619 ret = __iommu_map(domain, iova + mapped, start, 2620 len, prot, gfp); 2621 2622 if (ret) 2623 goto out_err; 2624 2625 mapped += len; 2626 len = 0; 2627 } 2628 2629 if (sg_dma_is_bus_address(sg)) 2630 goto next; 2631 2632 if (len) { 2633 len += sg->length; 2634 } else { 2635 len = sg->length; 2636 start = s_phys; 2637 } 2638 2639 next: 2640 if (++i < nents) 2641 sg = sg_next(sg); 2642 } 2643 2644 if (ops->iotlb_sync_map) { 2645 ret = ops->iotlb_sync_map(domain, iova, mapped); 2646 if (ret) 2647 goto out_err; 2648 } 2649 return mapped; 2650 2651 out_err: 2652 /* undo mappings already done */ 2653 iommu_unmap(domain, iova, mapped); 2654 2655 return ret; 2656 } 2657 EXPORT_SYMBOL_GPL(iommu_map_sg); 2658 2659 /** 2660 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2661 * @domain: the iommu domain where the fault has happened 2662 * @dev: the device where the fault has happened 2663 * @iova: the faulting address 2664 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2665 * 2666 * This function should be called by the low-level IOMMU implementations 2667 * whenever IOMMU faults happen, to allow high-level users, that are 2668 * interested in such events, to know about them. 2669 * 2670 * This event may be useful for several possible use cases: 2671 * - mere logging of the event 2672 * - dynamic TLB/PTE loading 2673 * - if restarting of the faulting device is required 2674 * 2675 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2676 * PTE/TLB loading will one day be supported, implementations will be able 2677 * to tell whether it succeeded or not according to this return value). 2678 * 2679 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2680 * (though fault handlers can also return -ENOSYS, in case they want to 2681 * elicit the default behavior of the IOMMU drivers). 2682 */ 2683 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2684 unsigned long iova, int flags) 2685 { 2686 int ret = -ENOSYS; 2687 2688 /* 2689 * if upper layers showed interest and installed a fault handler, 2690 * invoke it. 2691 */ 2692 if (domain->handler) 2693 ret = domain->handler(domain, dev, iova, flags, 2694 domain->handler_token); 2695 2696 trace_io_page_fault(dev, iova, flags); 2697 return ret; 2698 } 2699 EXPORT_SYMBOL_GPL(report_iommu_fault); 2700 2701 static int __init iommu_init(void) 2702 { 2703 iommu_group_kset = kset_create_and_add("iommu_groups", 2704 NULL, kernel_kobj); 2705 BUG_ON(!iommu_group_kset); 2706 2707 iommu_debugfs_setup(); 2708 2709 return 0; 2710 } 2711 core_initcall(iommu_init); 2712 2713 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2714 unsigned long quirk) 2715 { 2716 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2717 return -EINVAL; 2718 if (!domain->ops->set_pgtable_quirks) 2719 return -EINVAL; 2720 return domain->ops->set_pgtable_quirks(domain, quirk); 2721 } 2722 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2723 2724 /** 2725 * iommu_get_resv_regions - get reserved regions 2726 * @dev: device for which to get reserved regions 2727 * @list: reserved region list for device 2728 * 2729 * This returns a list of reserved IOVA regions specific to this device. 2730 * A domain user should not map IOVA in these ranges. 2731 */ 2732 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2733 { 2734 const struct iommu_ops *ops = dev_iommu_ops(dev); 2735 2736 if (ops->get_resv_regions) 2737 ops->get_resv_regions(dev, list); 2738 } 2739 EXPORT_SYMBOL_GPL(iommu_get_resv_regions); 2740 2741 /** 2742 * iommu_put_resv_regions - release reserved regions 2743 * @dev: device for which to free reserved regions 2744 * @list: reserved region list for device 2745 * 2746 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2747 */ 2748 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2749 { 2750 struct iommu_resv_region *entry, *next; 2751 2752 list_for_each_entry_safe(entry, next, list, list) { 2753 if (entry->free) 2754 entry->free(dev, entry); 2755 else 2756 kfree(entry); 2757 } 2758 } 2759 EXPORT_SYMBOL(iommu_put_resv_regions); 2760 2761 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2762 size_t length, int prot, 2763 enum iommu_resv_type type, 2764 gfp_t gfp) 2765 { 2766 struct iommu_resv_region *region; 2767 2768 region = kzalloc(sizeof(*region), gfp); 2769 if (!region) 2770 return NULL; 2771 2772 INIT_LIST_HEAD(®ion->list); 2773 region->start = start; 2774 region->length = length; 2775 region->prot = prot; 2776 region->type = type; 2777 return region; 2778 } 2779 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2780 2781 void iommu_set_default_passthrough(bool cmd_line) 2782 { 2783 if (cmd_line) 2784 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2785 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2786 } 2787 2788 void iommu_set_default_translated(bool cmd_line) 2789 { 2790 if (cmd_line) 2791 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2792 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2793 } 2794 2795 bool iommu_default_passthrough(void) 2796 { 2797 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2798 } 2799 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2800 2801 const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) 2802 { 2803 const struct iommu_ops *ops = NULL; 2804 struct iommu_device *iommu; 2805 2806 spin_lock(&iommu_device_lock); 2807 list_for_each_entry(iommu, &iommu_device_list, list) 2808 if (iommu->fwnode == fwnode) { 2809 ops = iommu->ops; 2810 break; 2811 } 2812 spin_unlock(&iommu_device_lock); 2813 return ops; 2814 } 2815 2816 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode) 2817 { 2818 const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode); 2819 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2820 2821 if (!ops) 2822 return -EPROBE_DEFER; 2823 2824 if (fwspec) 2825 return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; 2826 2827 if (!dev_iommu_get(dev)) 2828 return -ENOMEM; 2829 2830 /* Preallocate for the overwhelmingly common case of 1 ID */ 2831 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2832 if (!fwspec) 2833 return -ENOMEM; 2834 2835 fwnode_handle_get(iommu_fwnode); 2836 fwspec->iommu_fwnode = iommu_fwnode; 2837 dev_iommu_fwspec_set(dev, fwspec); 2838 return 0; 2839 } 2840 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2841 2842 void iommu_fwspec_free(struct device *dev) 2843 { 2844 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2845 2846 if (fwspec) { 2847 fwnode_handle_put(fwspec->iommu_fwnode); 2848 kfree(fwspec); 2849 dev_iommu_fwspec_set(dev, NULL); 2850 } 2851 } 2852 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2853 2854 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids) 2855 { 2856 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2857 int i, new_num; 2858 2859 if (!fwspec) 2860 return -EINVAL; 2861 2862 new_num = fwspec->num_ids + num_ids; 2863 if (new_num > 1) { 2864 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2865 GFP_KERNEL); 2866 if (!fwspec) 2867 return -ENOMEM; 2868 2869 dev_iommu_fwspec_set(dev, fwspec); 2870 } 2871 2872 for (i = 0; i < num_ids; i++) 2873 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2874 2875 fwspec->num_ids = new_num; 2876 return 0; 2877 } 2878 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2879 2880 /* 2881 * Per device IOMMU features. 2882 */ 2883 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2884 { 2885 if (dev_has_iommu(dev)) { 2886 const struct iommu_ops *ops = dev_iommu_ops(dev); 2887 2888 if (ops->dev_enable_feat) 2889 return ops->dev_enable_feat(dev, feat); 2890 } 2891 2892 return -ENODEV; 2893 } 2894 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2895 2896 /* 2897 * The device drivers should do the necessary cleanups before calling this. 2898 */ 2899 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2900 { 2901 if (dev_has_iommu(dev)) { 2902 const struct iommu_ops *ops = dev_iommu_ops(dev); 2903 2904 if (ops->dev_disable_feat) 2905 return ops->dev_disable_feat(dev, feat); 2906 } 2907 2908 return -EBUSY; 2909 } 2910 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2911 2912 /** 2913 * iommu_setup_default_domain - Set the default_domain for the group 2914 * @group: Group to change 2915 * @target_type: Domain type to set as the default_domain 2916 * 2917 * Allocate a default domain and set it as the current domain on the group. If 2918 * the group already has a default domain it will be changed to the target_type. 2919 * When target_type is 0 the default domain is selected based on driver and 2920 * system preferences. 2921 */ 2922 static int iommu_setup_default_domain(struct iommu_group *group, 2923 int target_type) 2924 { 2925 struct iommu_domain *old_dom = group->default_domain; 2926 struct group_device *gdev; 2927 struct iommu_domain *dom; 2928 bool direct_failed; 2929 int req_type; 2930 int ret; 2931 2932 lockdep_assert_held(&group->mutex); 2933 2934 req_type = iommu_get_default_domain_type(group, target_type); 2935 if (req_type < 0) 2936 return -EINVAL; 2937 2938 dom = iommu_group_alloc_default_domain(group, req_type); 2939 if (IS_ERR(dom)) 2940 return PTR_ERR(dom); 2941 2942 if (group->default_domain == dom) 2943 return 0; 2944 2945 if (iommu_is_dma_domain(dom)) { 2946 ret = iommu_get_dma_cookie(dom); 2947 if (ret) { 2948 iommu_domain_free(dom); 2949 return ret; 2950 } 2951 } 2952 2953 /* 2954 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be 2955 * mapped before their device is attached, in order to guarantee 2956 * continuity with any FW activity 2957 */ 2958 direct_failed = false; 2959 for_each_group_device(group, gdev) { 2960 if (iommu_create_device_direct_mappings(dom, gdev->dev)) { 2961 direct_failed = true; 2962 dev_warn_once( 2963 gdev->dev->iommu->iommu_dev->dev, 2964 "IOMMU driver was not able to establish FW requested direct mapping."); 2965 } 2966 } 2967 2968 /* We must set default_domain early for __iommu_device_set_domain */ 2969 group->default_domain = dom; 2970 if (!group->domain) { 2971 /* 2972 * Drivers are not allowed to fail the first domain attach. 2973 * The only way to recover from this is to fail attaching the 2974 * iommu driver and call ops->release_device. Put the domain 2975 * in group->default_domain so it is freed after. 2976 */ 2977 ret = __iommu_group_set_domain_internal( 2978 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 2979 if (WARN_ON(ret)) 2980 goto out_free_old; 2981 } else { 2982 ret = __iommu_group_set_domain(group, dom); 2983 if (ret) 2984 goto err_restore_def_domain; 2985 } 2986 2987 /* 2988 * Drivers are supposed to allow mappings to be installed in a domain 2989 * before device attachment, but some don't. Hack around this defect by 2990 * trying again after attaching. If this happens it means the device 2991 * will not continuously have the IOMMU_RESV_DIRECT map. 2992 */ 2993 if (direct_failed) { 2994 for_each_group_device(group, gdev) { 2995 ret = iommu_create_device_direct_mappings(dom, gdev->dev); 2996 if (ret) 2997 goto err_restore_domain; 2998 } 2999 } 3000 3001 out_free_old: 3002 if (old_dom) 3003 iommu_domain_free(old_dom); 3004 return ret; 3005 3006 err_restore_domain: 3007 if (old_dom) 3008 __iommu_group_set_domain_internal( 3009 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 3010 err_restore_def_domain: 3011 if (old_dom) { 3012 iommu_domain_free(dom); 3013 group->default_domain = old_dom; 3014 } 3015 return ret; 3016 } 3017 3018 /* 3019 * Changing the default domain through sysfs requires the users to unbind the 3020 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 3021 * transition. Return failure if this isn't met. 3022 * 3023 * We need to consider the race between this and the device release path. 3024 * group->mutex is used here to guarantee that the device release path 3025 * will not be entered at the same time. 3026 */ 3027 static ssize_t iommu_group_store_type(struct iommu_group *group, 3028 const char *buf, size_t count) 3029 { 3030 struct group_device *gdev; 3031 int ret, req_type; 3032 3033 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3034 return -EACCES; 3035 3036 if (WARN_ON(!group) || !group->default_domain) 3037 return -EINVAL; 3038 3039 if (sysfs_streq(buf, "identity")) 3040 req_type = IOMMU_DOMAIN_IDENTITY; 3041 else if (sysfs_streq(buf, "DMA")) 3042 req_type = IOMMU_DOMAIN_DMA; 3043 else if (sysfs_streq(buf, "DMA-FQ")) 3044 req_type = IOMMU_DOMAIN_DMA_FQ; 3045 else if (sysfs_streq(buf, "auto")) 3046 req_type = 0; 3047 else 3048 return -EINVAL; 3049 3050 mutex_lock(&group->mutex); 3051 /* We can bring up a flush queue without tearing down the domain. */ 3052 if (req_type == IOMMU_DOMAIN_DMA_FQ && 3053 group->default_domain->type == IOMMU_DOMAIN_DMA) { 3054 ret = iommu_dma_init_fq(group->default_domain); 3055 if (ret) 3056 goto out_unlock; 3057 3058 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; 3059 ret = count; 3060 goto out_unlock; 3061 } 3062 3063 /* Otherwise, ensure that device exists and no driver is bound. */ 3064 if (list_empty(&group->devices) || group->owner_cnt) { 3065 ret = -EPERM; 3066 goto out_unlock; 3067 } 3068 3069 ret = iommu_setup_default_domain(group, req_type); 3070 if (ret) 3071 goto out_unlock; 3072 3073 /* Make sure dma_ops is appropriatley set */ 3074 for_each_group_device(group, gdev) 3075 iommu_setup_dma_ops(gdev->dev); 3076 3077 out_unlock: 3078 mutex_unlock(&group->mutex); 3079 return ret ?: count; 3080 } 3081 3082 /** 3083 * iommu_device_use_default_domain() - Device driver wants to handle device 3084 * DMA through the kernel DMA API. 3085 * @dev: The device. 3086 * 3087 * The device driver about to bind @dev wants to do DMA through the kernel 3088 * DMA API. Return 0 if it is allowed, otherwise an error. 3089 */ 3090 int iommu_device_use_default_domain(struct device *dev) 3091 { 3092 /* Caller is the driver core during the pre-probe path */ 3093 struct iommu_group *group = dev->iommu_group; 3094 int ret = 0; 3095 3096 if (!group) 3097 return 0; 3098 3099 mutex_lock(&group->mutex); 3100 if (group->owner_cnt) { 3101 if (group->domain != group->default_domain || group->owner || 3102 !xa_empty(&group->pasid_array)) { 3103 ret = -EBUSY; 3104 goto unlock_out; 3105 } 3106 } 3107 3108 group->owner_cnt++; 3109 3110 unlock_out: 3111 mutex_unlock(&group->mutex); 3112 return ret; 3113 } 3114 3115 /** 3116 * iommu_device_unuse_default_domain() - Device driver stops handling device 3117 * DMA through the kernel DMA API. 3118 * @dev: The device. 3119 * 3120 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3121 * It must be called after iommu_device_use_default_domain(). 3122 */ 3123 void iommu_device_unuse_default_domain(struct device *dev) 3124 { 3125 /* Caller is the driver core during the post-probe path */ 3126 struct iommu_group *group = dev->iommu_group; 3127 3128 if (!group) 3129 return; 3130 3131 mutex_lock(&group->mutex); 3132 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3133 group->owner_cnt--; 3134 3135 mutex_unlock(&group->mutex); 3136 } 3137 3138 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3139 { 3140 struct device *dev = iommu_group_first_dev(group); 3141 const struct iommu_ops *ops = dev_iommu_ops(dev); 3142 struct iommu_domain *domain; 3143 3144 if (group->blocking_domain) 3145 return 0; 3146 3147 if (ops->blocked_domain) { 3148 group->blocking_domain = ops->blocked_domain; 3149 return 0; 3150 } 3151 3152 /* 3153 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED create an 3154 * empty PAGING domain instead. 3155 */ 3156 domain = iommu_paging_domain_alloc(dev); 3157 if (IS_ERR(domain)) 3158 return PTR_ERR(domain); 3159 group->blocking_domain = domain; 3160 return 0; 3161 } 3162 3163 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3164 { 3165 int ret; 3166 3167 if ((group->domain && group->domain != group->default_domain) || 3168 !xa_empty(&group->pasid_array)) 3169 return -EBUSY; 3170 3171 ret = __iommu_group_alloc_blocking_domain(group); 3172 if (ret) 3173 return ret; 3174 ret = __iommu_group_set_domain(group, group->blocking_domain); 3175 if (ret) 3176 return ret; 3177 3178 group->owner = owner; 3179 group->owner_cnt++; 3180 return 0; 3181 } 3182 3183 /** 3184 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3185 * @group: The group. 3186 * @owner: Caller specified pointer. Used for exclusive ownership. 3187 * 3188 * This is to support backward compatibility for vfio which manages the dma 3189 * ownership in iommu_group level. New invocations on this interface should be 3190 * prohibited. Only a single owner may exist for a group. 3191 */ 3192 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3193 { 3194 int ret = 0; 3195 3196 if (WARN_ON(!owner)) 3197 return -EINVAL; 3198 3199 mutex_lock(&group->mutex); 3200 if (group->owner_cnt) { 3201 ret = -EPERM; 3202 goto unlock_out; 3203 } 3204 3205 ret = __iommu_take_dma_ownership(group, owner); 3206 unlock_out: 3207 mutex_unlock(&group->mutex); 3208 3209 return ret; 3210 } 3211 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3212 3213 /** 3214 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3215 * @dev: The device. 3216 * @owner: Caller specified pointer. Used for exclusive ownership. 3217 * 3218 * Claim the DMA ownership of a device. Multiple devices in the same group may 3219 * concurrently claim ownership if they present the same owner value. Returns 0 3220 * on success and error code on failure 3221 */ 3222 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3223 { 3224 /* Caller must be a probed driver on dev */ 3225 struct iommu_group *group = dev->iommu_group; 3226 int ret = 0; 3227 3228 if (WARN_ON(!owner)) 3229 return -EINVAL; 3230 3231 if (!group) 3232 return -ENODEV; 3233 3234 mutex_lock(&group->mutex); 3235 if (group->owner_cnt) { 3236 if (group->owner != owner) { 3237 ret = -EPERM; 3238 goto unlock_out; 3239 } 3240 group->owner_cnt++; 3241 goto unlock_out; 3242 } 3243 3244 ret = __iommu_take_dma_ownership(group, owner); 3245 unlock_out: 3246 mutex_unlock(&group->mutex); 3247 return ret; 3248 } 3249 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3250 3251 static void __iommu_release_dma_ownership(struct iommu_group *group) 3252 { 3253 if (WARN_ON(!group->owner_cnt || !group->owner || 3254 !xa_empty(&group->pasid_array))) 3255 return; 3256 3257 group->owner_cnt = 0; 3258 group->owner = NULL; 3259 __iommu_group_set_domain_nofail(group, group->default_domain); 3260 } 3261 3262 /** 3263 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3264 * @group: The group 3265 * 3266 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3267 */ 3268 void iommu_group_release_dma_owner(struct iommu_group *group) 3269 { 3270 mutex_lock(&group->mutex); 3271 __iommu_release_dma_ownership(group); 3272 mutex_unlock(&group->mutex); 3273 } 3274 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3275 3276 /** 3277 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3278 * @dev: The device. 3279 * 3280 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3281 */ 3282 void iommu_device_release_dma_owner(struct device *dev) 3283 { 3284 /* Caller must be a probed driver on dev */ 3285 struct iommu_group *group = dev->iommu_group; 3286 3287 mutex_lock(&group->mutex); 3288 if (group->owner_cnt > 1) 3289 group->owner_cnt--; 3290 else 3291 __iommu_release_dma_ownership(group); 3292 mutex_unlock(&group->mutex); 3293 } 3294 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3295 3296 /** 3297 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3298 * @group: The group. 3299 * 3300 * This provides status query on a given group. It is racy and only for 3301 * non-binding status reporting. 3302 */ 3303 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3304 { 3305 unsigned int user; 3306 3307 mutex_lock(&group->mutex); 3308 user = group->owner_cnt; 3309 mutex_unlock(&group->mutex); 3310 3311 return user; 3312 } 3313 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3314 3315 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3316 struct iommu_group *group, ioasid_t pasid) 3317 { 3318 struct group_device *device, *last_gdev; 3319 int ret; 3320 3321 for_each_group_device(group, device) { 3322 ret = domain->ops->set_dev_pasid(domain, device->dev, 3323 pasid, NULL); 3324 if (ret) 3325 goto err_revert; 3326 } 3327 3328 return 0; 3329 3330 err_revert: 3331 last_gdev = device; 3332 for_each_group_device(group, device) { 3333 const struct iommu_ops *ops = dev_iommu_ops(device->dev); 3334 3335 if (device == last_gdev) 3336 break; 3337 ops->remove_dev_pasid(device->dev, pasid, domain); 3338 } 3339 return ret; 3340 } 3341 3342 static void __iommu_remove_group_pasid(struct iommu_group *group, 3343 ioasid_t pasid, 3344 struct iommu_domain *domain) 3345 { 3346 struct group_device *device; 3347 const struct iommu_ops *ops; 3348 3349 for_each_group_device(group, device) { 3350 ops = dev_iommu_ops(device->dev); 3351 ops->remove_dev_pasid(device->dev, pasid, domain); 3352 } 3353 } 3354 3355 /* 3356 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3357 * @domain: the iommu domain. 3358 * @dev: the attached device. 3359 * @pasid: the pasid of the device. 3360 * @handle: the attach handle. 3361 * 3362 * Return: 0 on success, or an error. 3363 */ 3364 int iommu_attach_device_pasid(struct iommu_domain *domain, 3365 struct device *dev, ioasid_t pasid, 3366 struct iommu_attach_handle *handle) 3367 { 3368 /* Caller must be a probed driver on dev */ 3369 struct iommu_group *group = dev->iommu_group; 3370 struct group_device *device; 3371 int ret; 3372 3373 if (!domain->ops->set_dev_pasid) 3374 return -EOPNOTSUPP; 3375 3376 if (!group) 3377 return -ENODEV; 3378 3379 if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner || 3380 pasid == IOMMU_NO_PASID) 3381 return -EINVAL; 3382 3383 mutex_lock(&group->mutex); 3384 for_each_group_device(group, device) { 3385 if (pasid >= device->dev->iommu->max_pasids) { 3386 ret = -EINVAL; 3387 goto out_unlock; 3388 } 3389 } 3390 3391 if (handle) 3392 handle->domain = domain; 3393 3394 ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL); 3395 if (ret) 3396 goto out_unlock; 3397 3398 ret = __iommu_set_group_pasid(domain, group, pasid); 3399 if (ret) 3400 xa_erase(&group->pasid_array, pasid); 3401 out_unlock: 3402 mutex_unlock(&group->mutex); 3403 return ret; 3404 } 3405 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3406 3407 /* 3408 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3409 * @domain: the iommu domain. 3410 * @dev: the attached device. 3411 * @pasid: the pasid of the device. 3412 * 3413 * The @domain must have been attached to @pasid of the @dev with 3414 * iommu_attach_device_pasid(). 3415 */ 3416 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3417 ioasid_t pasid) 3418 { 3419 /* Caller must be a probed driver on dev */ 3420 struct iommu_group *group = dev->iommu_group; 3421 3422 mutex_lock(&group->mutex); 3423 __iommu_remove_group_pasid(group, pasid, domain); 3424 xa_erase(&group->pasid_array, pasid); 3425 mutex_unlock(&group->mutex); 3426 } 3427 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3428 3429 ioasid_t iommu_alloc_global_pasid(struct device *dev) 3430 { 3431 int ret; 3432 3433 /* max_pasids == 0 means that the device does not support PASID */ 3434 if (!dev->iommu->max_pasids) 3435 return IOMMU_PASID_INVALID; 3436 3437 /* 3438 * max_pasids is set up by vendor driver based on number of PASID bits 3439 * supported but the IDA allocation is inclusive. 3440 */ 3441 ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID, 3442 dev->iommu->max_pasids - 1, GFP_KERNEL); 3443 return ret < 0 ? IOMMU_PASID_INVALID : ret; 3444 } 3445 EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid); 3446 3447 void iommu_free_global_pasid(ioasid_t pasid) 3448 { 3449 if (WARN_ON(pasid == IOMMU_PASID_INVALID)) 3450 return; 3451 3452 ida_free(&iommu_global_pasid_ida, pasid); 3453 } 3454 EXPORT_SYMBOL_GPL(iommu_free_global_pasid); 3455 3456 /** 3457 * iommu_attach_handle_get - Return the attach handle 3458 * @group: the iommu group that domain was attached to 3459 * @pasid: the pasid within the group 3460 * @type: matched domain type, 0 for any match 3461 * 3462 * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch. 3463 * 3464 * Return the attach handle to the caller. The life cycle of an iommu attach 3465 * handle is from the time when the domain is attached to the time when the 3466 * domain is detached. Callers are required to synchronize the call of 3467 * iommu_attach_handle_get() with domain attachment and detachment. The attach 3468 * handle can only be used during its life cycle. 3469 */ 3470 struct iommu_attach_handle * 3471 iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type) 3472 { 3473 struct iommu_attach_handle *handle; 3474 3475 xa_lock(&group->pasid_array); 3476 handle = xa_load(&group->pasid_array, pasid); 3477 if (!handle) 3478 handle = ERR_PTR(-ENOENT); 3479 else if (type && handle->domain->type != type) 3480 handle = ERR_PTR(-EBUSY); 3481 xa_unlock(&group->pasid_array); 3482 3483 return handle; 3484 } 3485 EXPORT_SYMBOL_NS_GPL(iommu_attach_handle_get, "IOMMUFD_INTERNAL"); 3486 3487 /** 3488 * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group 3489 * @domain: IOMMU domain to attach 3490 * @group: IOMMU group that will be attached 3491 * @handle: attach handle 3492 * 3493 * Returns 0 on success and error code on failure. 3494 * 3495 * This is a variant of iommu_attach_group(). It allows the caller to provide 3496 * an attach handle and use it when the domain is attached. This is currently 3497 * used by IOMMUFD to deliver the I/O page faults. 3498 */ 3499 int iommu_attach_group_handle(struct iommu_domain *domain, 3500 struct iommu_group *group, 3501 struct iommu_attach_handle *handle) 3502 { 3503 int ret; 3504 3505 if (handle) 3506 handle->domain = domain; 3507 3508 mutex_lock(&group->mutex); 3509 ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); 3510 if (ret) 3511 goto err_unlock; 3512 3513 ret = __iommu_attach_group(domain, group); 3514 if (ret) 3515 goto err_erase; 3516 mutex_unlock(&group->mutex); 3517 3518 return 0; 3519 err_erase: 3520 xa_erase(&group->pasid_array, IOMMU_NO_PASID); 3521 err_unlock: 3522 mutex_unlock(&group->mutex); 3523 return ret; 3524 } 3525 EXPORT_SYMBOL_NS_GPL(iommu_attach_group_handle, "IOMMUFD_INTERNAL"); 3526 3527 /** 3528 * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group 3529 * @domain: IOMMU domain to attach 3530 * @group: IOMMU group that will be attached 3531 * 3532 * Detach the specified IOMMU domain from the specified IOMMU group. 3533 * It must be used in conjunction with iommu_attach_group_handle(). 3534 */ 3535 void iommu_detach_group_handle(struct iommu_domain *domain, 3536 struct iommu_group *group) 3537 { 3538 mutex_lock(&group->mutex); 3539 __iommu_group_set_core_domain(group); 3540 xa_erase(&group->pasid_array, IOMMU_NO_PASID); 3541 mutex_unlock(&group->mutex); 3542 } 3543 EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL"); 3544 3545 /** 3546 * iommu_replace_group_handle - replace the domain that a group is attached to 3547 * @group: IOMMU group that will be attached to the new domain 3548 * @new_domain: new IOMMU domain to replace with 3549 * @handle: attach handle 3550 * 3551 * This is a variant of iommu_group_replace_domain(). It allows the caller to 3552 * provide an attach handle for the new domain and use it when the domain is 3553 * attached. 3554 */ 3555 int iommu_replace_group_handle(struct iommu_group *group, 3556 struct iommu_domain *new_domain, 3557 struct iommu_attach_handle *handle) 3558 { 3559 void *curr; 3560 int ret; 3561 3562 if (!new_domain) 3563 return -EINVAL; 3564 3565 mutex_lock(&group->mutex); 3566 if (handle) { 3567 ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); 3568 if (ret) 3569 goto err_unlock; 3570 handle->domain = new_domain; 3571 } 3572 3573 ret = __iommu_group_set_domain(group, new_domain); 3574 if (ret) 3575 goto err_release; 3576 3577 curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); 3578 WARN_ON(xa_is_err(curr)); 3579 3580 mutex_unlock(&group->mutex); 3581 3582 return 0; 3583 err_release: 3584 xa_release(&group->pasid_array, IOMMU_NO_PASID); 3585 err_unlock: 3586 mutex_unlock(&group->mutex); 3587 return ret; 3588 } 3589 EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL"); 3590