1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/iommufd.h> 22 #include <linux/idr.h> 23 #include <linux/err.h> 24 #include <linux/pci.h> 25 #include <linux/pci-ats.h> 26 #include <linux/bitops.h> 27 #include <linux/platform_device.h> 28 #include <linux/property.h> 29 #include <linux/fsl/mc.h> 30 #include <linux/module.h> 31 #include <linux/cc_platform.h> 32 #include <linux/cdx/cdx_bus.h> 33 #include <trace/events/iommu.h> 34 #include <linux/sched/mm.h> 35 #include <linux/msi.h> 36 #include <uapi/linux/iommufd.h> 37 38 #include "dma-iommu.h" 39 #include "iommu-priv.h" 40 41 static struct kset *iommu_group_kset; 42 static DEFINE_IDA(iommu_group_ida); 43 static DEFINE_IDA(iommu_global_pasid_ida); 44 45 static unsigned int iommu_def_domain_type __read_mostly; 46 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 47 static u32 iommu_cmd_line __read_mostly; 48 49 /* Tags used with xa_tag_pointer() in group->pasid_array */ 50 enum { IOMMU_PASID_ARRAY_DOMAIN = 0, IOMMU_PASID_ARRAY_HANDLE = 1 }; 51 52 struct iommu_group { 53 struct kobject kobj; 54 struct kobject *devices_kobj; 55 struct list_head devices; 56 struct xarray pasid_array; 57 struct mutex mutex; 58 void *iommu_data; 59 void (*iommu_data_release)(void *iommu_data); 60 char *name; 61 int id; 62 struct iommu_domain *default_domain; 63 struct iommu_domain *blocking_domain; 64 struct iommu_domain *domain; 65 struct list_head entry; 66 unsigned int owner_cnt; 67 void *owner; 68 }; 69 70 struct group_device { 71 struct list_head list; 72 struct device *dev; 73 char *name; 74 }; 75 76 /* Iterate over each struct group_device in a struct iommu_group */ 77 #define for_each_group_device(group, pos) \ 78 list_for_each_entry(pos, &(group)->devices, list) 79 80 struct iommu_group_attribute { 81 struct attribute attr; 82 ssize_t (*show)(struct iommu_group *group, char *buf); 83 ssize_t (*store)(struct iommu_group *group, 84 const char *buf, size_t count); 85 }; 86 87 static const char * const iommu_group_resv_type_string[] = { 88 [IOMMU_RESV_DIRECT] = "direct", 89 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 90 [IOMMU_RESV_RESERVED] = "reserved", 91 [IOMMU_RESV_MSI] = "msi", 92 [IOMMU_RESV_SW_MSI] = "msi", 93 }; 94 95 #define IOMMU_CMD_LINE_DMA_API BIT(0) 96 #define IOMMU_CMD_LINE_STRICT BIT(1) 97 98 static int bus_iommu_probe(const struct bus_type *bus); 99 static int iommu_bus_notifier(struct notifier_block *nb, 100 unsigned long action, void *data); 101 static void iommu_release_device(struct device *dev); 102 static int __iommu_attach_device(struct iommu_domain *domain, 103 struct device *dev); 104 static int __iommu_attach_group(struct iommu_domain *domain, 105 struct iommu_group *group); 106 static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev, 107 unsigned int type, 108 unsigned int flags); 109 110 enum { 111 IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, 112 }; 113 114 static int __iommu_device_set_domain(struct iommu_group *group, 115 struct device *dev, 116 struct iommu_domain *new_domain, 117 unsigned int flags); 118 static int __iommu_group_set_domain_internal(struct iommu_group *group, 119 struct iommu_domain *new_domain, 120 unsigned int flags); 121 static int __iommu_group_set_domain(struct iommu_group *group, 122 struct iommu_domain *new_domain) 123 { 124 return __iommu_group_set_domain_internal(group, new_domain, 0); 125 } 126 static void __iommu_group_set_domain_nofail(struct iommu_group *group, 127 struct iommu_domain *new_domain) 128 { 129 WARN_ON(__iommu_group_set_domain_internal( 130 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); 131 } 132 133 static int iommu_setup_default_domain(struct iommu_group *group, 134 int target_type); 135 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 136 struct device *dev); 137 static ssize_t iommu_group_store_type(struct iommu_group *group, 138 const char *buf, size_t count); 139 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 140 struct device *dev); 141 static void __iommu_group_free_device(struct iommu_group *group, 142 struct group_device *grp_dev); 143 static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, 144 const struct iommu_ops *ops); 145 146 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 147 struct iommu_group_attribute iommu_group_attr_##_name = \ 148 __ATTR(_name, _mode, _show, _store) 149 150 #define to_iommu_group_attr(_attr) \ 151 container_of(_attr, struct iommu_group_attribute, attr) 152 #define to_iommu_group(_kobj) \ 153 container_of(_kobj, struct iommu_group, kobj) 154 155 static LIST_HEAD(iommu_device_list); 156 static DEFINE_SPINLOCK(iommu_device_lock); 157 158 static const struct bus_type * const iommu_buses[] = { 159 &platform_bus_type, 160 #ifdef CONFIG_PCI 161 &pci_bus_type, 162 #endif 163 #ifdef CONFIG_ARM_AMBA 164 &amba_bustype, 165 #endif 166 #ifdef CONFIG_FSL_MC_BUS 167 &fsl_mc_bus_type, 168 #endif 169 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 170 &host1x_context_device_bus_type, 171 #endif 172 #ifdef CONFIG_CDX_BUS 173 &cdx_bus_type, 174 #endif 175 }; 176 177 /* 178 * Use a function instead of an array here because the domain-type is a 179 * bit-field, so an array would waste memory. 180 */ 181 static const char *iommu_domain_type_str(unsigned int t) 182 { 183 switch (t) { 184 case IOMMU_DOMAIN_BLOCKED: 185 return "Blocked"; 186 case IOMMU_DOMAIN_IDENTITY: 187 return "Passthrough"; 188 case IOMMU_DOMAIN_UNMANAGED: 189 return "Unmanaged"; 190 case IOMMU_DOMAIN_DMA: 191 case IOMMU_DOMAIN_DMA_FQ: 192 return "Translated"; 193 case IOMMU_DOMAIN_PLATFORM: 194 return "Platform"; 195 default: 196 return "Unknown"; 197 } 198 } 199 200 static int __init iommu_subsys_init(void) 201 { 202 struct notifier_block *nb; 203 204 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 205 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 206 iommu_set_default_passthrough(false); 207 else 208 iommu_set_default_translated(false); 209 210 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 211 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 212 iommu_set_default_translated(false); 213 } 214 } 215 216 if (!iommu_default_passthrough() && !iommu_dma_strict) 217 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 218 219 pr_info("Default domain type: %s%s\n", 220 iommu_domain_type_str(iommu_def_domain_type), 221 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 222 " (set via kernel command line)" : ""); 223 224 if (!iommu_default_passthrough()) 225 pr_info("DMA domain TLB invalidation policy: %s mode%s\n", 226 iommu_dma_strict ? "strict" : "lazy", 227 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 228 " (set via kernel command line)" : ""); 229 230 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 231 if (!nb) 232 return -ENOMEM; 233 234 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 235 nb[i].notifier_call = iommu_bus_notifier; 236 bus_register_notifier(iommu_buses[i], &nb[i]); 237 } 238 239 return 0; 240 } 241 subsys_initcall(iommu_subsys_init); 242 243 static int remove_iommu_group(struct device *dev, void *data) 244 { 245 if (dev->iommu && dev->iommu->iommu_dev == data) 246 iommu_release_device(dev); 247 248 return 0; 249 } 250 251 /** 252 * iommu_device_register() - Register an IOMMU hardware instance 253 * @iommu: IOMMU handle for the instance 254 * @ops: IOMMU ops to associate with the instance 255 * @hwdev: (optional) actual instance device, used for fwnode lookup 256 * 257 * Return: 0 on success, or an error. 258 */ 259 int iommu_device_register(struct iommu_device *iommu, 260 const struct iommu_ops *ops, struct device *hwdev) 261 { 262 int err = 0; 263 264 /* We need to be able to take module references appropriately */ 265 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 266 return -EINVAL; 267 268 iommu->ops = ops; 269 if (hwdev) 270 iommu->fwnode = dev_fwnode(hwdev); 271 272 spin_lock(&iommu_device_lock); 273 list_add_tail(&iommu->list, &iommu_device_list); 274 spin_unlock(&iommu_device_lock); 275 276 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) 277 err = bus_iommu_probe(iommu_buses[i]); 278 if (err) 279 iommu_device_unregister(iommu); 280 else 281 WRITE_ONCE(iommu->ready, true); 282 return err; 283 } 284 EXPORT_SYMBOL_GPL(iommu_device_register); 285 286 void iommu_device_unregister(struct iommu_device *iommu) 287 { 288 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 289 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 290 291 spin_lock(&iommu_device_lock); 292 list_del(&iommu->list); 293 spin_unlock(&iommu_device_lock); 294 295 /* Pairs with the alloc in generic_single_device_group() */ 296 iommu_group_put(iommu->singleton_group); 297 iommu->singleton_group = NULL; 298 } 299 EXPORT_SYMBOL_GPL(iommu_device_unregister); 300 301 #if IS_ENABLED(CONFIG_IOMMUFD_TEST) 302 void iommu_device_unregister_bus(struct iommu_device *iommu, 303 const struct bus_type *bus, 304 struct notifier_block *nb) 305 { 306 bus_unregister_notifier(bus, nb); 307 iommu_device_unregister(iommu); 308 } 309 EXPORT_SYMBOL_GPL(iommu_device_unregister_bus); 310 311 /* 312 * Register an iommu driver against a single bus. This is only used by iommufd 313 * selftest to create a mock iommu driver. The caller must provide 314 * some memory to hold a notifier_block. 315 */ 316 int iommu_device_register_bus(struct iommu_device *iommu, 317 const struct iommu_ops *ops, 318 const struct bus_type *bus, 319 struct notifier_block *nb) 320 { 321 int err; 322 323 iommu->ops = ops; 324 nb->notifier_call = iommu_bus_notifier; 325 err = bus_register_notifier(bus, nb); 326 if (err) 327 return err; 328 329 spin_lock(&iommu_device_lock); 330 list_add_tail(&iommu->list, &iommu_device_list); 331 spin_unlock(&iommu_device_lock); 332 333 err = bus_iommu_probe(bus); 334 if (err) { 335 iommu_device_unregister_bus(iommu, bus, nb); 336 return err; 337 } 338 return 0; 339 } 340 EXPORT_SYMBOL_GPL(iommu_device_register_bus); 341 #endif 342 343 static struct dev_iommu *dev_iommu_get(struct device *dev) 344 { 345 struct dev_iommu *param = dev->iommu; 346 347 lockdep_assert_held(&iommu_probe_device_lock); 348 349 if (param) 350 return param; 351 352 param = kzalloc(sizeof(*param), GFP_KERNEL); 353 if (!param) 354 return NULL; 355 356 mutex_init(¶m->lock); 357 dev->iommu = param; 358 return param; 359 } 360 361 void dev_iommu_free(struct device *dev) 362 { 363 struct dev_iommu *param = dev->iommu; 364 365 dev->iommu = NULL; 366 if (param->fwspec) { 367 fwnode_handle_put(param->fwspec->iommu_fwnode); 368 kfree(param->fwspec); 369 } 370 kfree(param); 371 } 372 373 /* 374 * Internal equivalent of device_iommu_mapped() for when we care that a device 375 * actually has API ops, and don't want false positives from VFIO-only groups. 376 */ 377 static bool dev_has_iommu(struct device *dev) 378 { 379 return dev->iommu && dev->iommu->iommu_dev; 380 } 381 382 static u32 dev_iommu_get_max_pasids(struct device *dev) 383 { 384 u32 max_pasids = 0, bits = 0; 385 int ret; 386 387 if (dev_is_pci(dev)) { 388 ret = pci_max_pasids(to_pci_dev(dev)); 389 if (ret > 0) 390 max_pasids = ret; 391 } else { 392 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 393 if (!ret) 394 max_pasids = 1UL << bits; 395 } 396 397 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 398 } 399 400 void dev_iommu_priv_set(struct device *dev, void *priv) 401 { 402 /* FSL_PAMU does something weird */ 403 if (!IS_ENABLED(CONFIG_FSL_PAMU)) 404 lockdep_assert_held(&iommu_probe_device_lock); 405 dev->iommu->priv = priv; 406 } 407 EXPORT_SYMBOL_GPL(dev_iommu_priv_set); 408 409 /* 410 * Init the dev->iommu and dev->iommu_group in the struct device and get the 411 * driver probed 412 */ 413 static int iommu_init_device(struct device *dev) 414 { 415 const struct iommu_ops *ops; 416 struct iommu_device *iommu_dev; 417 struct iommu_group *group; 418 int ret; 419 420 if (!dev_iommu_get(dev)) 421 return -ENOMEM; 422 /* 423 * For FDT-based systems and ACPI IORT/VIOT, the common firmware parsing 424 * is buried in the bus dma_configure path. Properly unpicking that is 425 * still a big job, so for now just invoke the whole thing. The device 426 * already having a driver bound means dma_configure has already run and 427 * found no IOMMU to wait for, so there's no point calling it again. 428 */ 429 if (!dev->iommu->fwspec && !dev->driver && dev->bus->dma_configure) { 430 mutex_unlock(&iommu_probe_device_lock); 431 dev->bus->dma_configure(dev); 432 mutex_lock(&iommu_probe_device_lock); 433 /* If another instance finished the job for us, skip it */ 434 if (!dev->iommu || dev->iommu_group) 435 return -ENODEV; 436 } 437 /* 438 * At this point, relevant devices either now have a fwspec which will 439 * match ops registered with a non-NULL fwnode, or we can reasonably 440 * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can 441 * be present, and that any of their registered instances has suitable 442 * ops for probing, and thus cheekily co-opt the same mechanism. 443 */ 444 ops = iommu_fwspec_ops(dev->iommu->fwspec); 445 if (!ops) { 446 ret = -ENODEV; 447 goto err_free; 448 } 449 450 if (!try_module_get(ops->owner)) { 451 ret = -EINVAL; 452 goto err_free; 453 } 454 455 iommu_dev = ops->probe_device(dev); 456 if (IS_ERR(iommu_dev)) { 457 ret = PTR_ERR(iommu_dev); 458 goto err_module_put; 459 } 460 dev->iommu->iommu_dev = iommu_dev; 461 462 ret = iommu_device_link(iommu_dev, dev); 463 if (ret) 464 goto err_release; 465 466 group = ops->device_group(dev); 467 if (WARN_ON_ONCE(group == NULL)) 468 group = ERR_PTR(-EINVAL); 469 if (IS_ERR(group)) { 470 ret = PTR_ERR(group); 471 goto err_unlink; 472 } 473 dev->iommu_group = group; 474 475 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 476 if (ops->is_attach_deferred) 477 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 478 return 0; 479 480 err_unlink: 481 iommu_device_unlink(iommu_dev, dev); 482 err_release: 483 if (ops->release_device) 484 ops->release_device(dev); 485 err_module_put: 486 module_put(ops->owner); 487 err_free: 488 dev->iommu->iommu_dev = NULL; 489 dev_iommu_free(dev); 490 return ret; 491 } 492 493 static void iommu_deinit_device(struct device *dev) 494 { 495 struct iommu_group *group = dev->iommu_group; 496 const struct iommu_ops *ops = dev_iommu_ops(dev); 497 498 lockdep_assert_held(&group->mutex); 499 500 iommu_device_unlink(dev->iommu->iommu_dev, dev); 501 502 /* 503 * release_device() must stop using any attached domain on the device. 504 * If there are still other devices in the group, they are not affected 505 * by this callback. 506 * 507 * If the iommu driver provides release_domain, the core code ensures 508 * that domain is attached prior to calling release_device. Drivers can 509 * use this to enforce a translation on the idle iommu. Typically, the 510 * global static blocked_domain is a good choice. 511 * 512 * Otherwise, the iommu driver must set the device to either an identity 513 * or a blocking translation in release_device() and stop using any 514 * domain pointer, as it is going to be freed. 515 * 516 * Regardless, if a delayed attach never occurred, then the release 517 * should still avoid touching any hardware configuration either. 518 */ 519 if (!dev->iommu->attach_deferred && ops->release_domain) 520 ops->release_domain->ops->attach_dev(ops->release_domain, dev); 521 522 if (ops->release_device) 523 ops->release_device(dev); 524 525 /* 526 * If this is the last driver to use the group then we must free the 527 * domains before we do the module_put(). 528 */ 529 if (list_empty(&group->devices)) { 530 if (group->default_domain) { 531 iommu_domain_free(group->default_domain); 532 group->default_domain = NULL; 533 } 534 if (group->blocking_domain) { 535 iommu_domain_free(group->blocking_domain); 536 group->blocking_domain = NULL; 537 } 538 group->domain = NULL; 539 } 540 541 /* Caller must put iommu_group */ 542 dev->iommu_group = NULL; 543 module_put(ops->owner); 544 dev_iommu_free(dev); 545 #ifdef CONFIG_IOMMU_DMA 546 dev->dma_iommu = false; 547 #endif 548 } 549 550 static struct iommu_domain *pasid_array_entry_to_domain(void *entry) 551 { 552 if (xa_pointer_tag(entry) == IOMMU_PASID_ARRAY_DOMAIN) 553 return xa_untag_pointer(entry); 554 return ((struct iommu_attach_handle *)xa_untag_pointer(entry))->domain; 555 } 556 557 DEFINE_MUTEX(iommu_probe_device_lock); 558 559 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 560 { 561 struct iommu_group *group; 562 struct group_device *gdev; 563 int ret; 564 565 /* 566 * Serialise to avoid races between IOMMU drivers registering in 567 * parallel and/or the "replay" calls from ACPI/OF code via client 568 * driver probe. Once the latter have been cleaned up we should 569 * probably be able to use device_lock() here to minimise the scope, 570 * but for now enforcing a simple global ordering is fine. 571 */ 572 lockdep_assert_held(&iommu_probe_device_lock); 573 574 /* Device is probed already if in a group */ 575 if (dev->iommu_group) 576 return 0; 577 578 ret = iommu_init_device(dev); 579 if (ret) 580 return ret; 581 /* 582 * And if we do now see any replay calls, they would indicate someone 583 * misusing the dma_configure path outside bus code. 584 */ 585 if (dev->driver) 586 dev_WARN(dev, "late IOMMU probe at driver bind, something fishy here!\n"); 587 588 group = dev->iommu_group; 589 gdev = iommu_group_alloc_device(group, dev); 590 mutex_lock(&group->mutex); 591 if (IS_ERR(gdev)) { 592 ret = PTR_ERR(gdev); 593 goto err_put_group; 594 } 595 596 /* 597 * The gdev must be in the list before calling 598 * iommu_setup_default_domain() 599 */ 600 list_add_tail(&gdev->list, &group->devices); 601 WARN_ON(group->default_domain && !group->domain); 602 if (group->default_domain) 603 iommu_create_device_direct_mappings(group->default_domain, dev); 604 if (group->domain) { 605 ret = __iommu_device_set_domain(group, dev, group->domain, 0); 606 if (ret) 607 goto err_remove_gdev; 608 } else if (!group->default_domain && !group_list) { 609 ret = iommu_setup_default_domain(group, 0); 610 if (ret) 611 goto err_remove_gdev; 612 } else if (!group->default_domain) { 613 /* 614 * With a group_list argument we defer the default_domain setup 615 * to the caller by providing a de-duplicated list of groups 616 * that need further setup. 617 */ 618 if (list_empty(&group->entry)) 619 list_add_tail(&group->entry, group_list); 620 } 621 622 if (group->default_domain) 623 iommu_setup_dma_ops(dev); 624 625 mutex_unlock(&group->mutex); 626 627 return 0; 628 629 err_remove_gdev: 630 list_del(&gdev->list); 631 __iommu_group_free_device(group, gdev); 632 err_put_group: 633 iommu_deinit_device(dev); 634 mutex_unlock(&group->mutex); 635 iommu_group_put(group); 636 637 return ret; 638 } 639 640 int iommu_probe_device(struct device *dev) 641 { 642 const struct iommu_ops *ops; 643 int ret; 644 645 mutex_lock(&iommu_probe_device_lock); 646 ret = __iommu_probe_device(dev, NULL); 647 mutex_unlock(&iommu_probe_device_lock); 648 if (ret) 649 return ret; 650 651 ops = dev_iommu_ops(dev); 652 if (ops->probe_finalize) 653 ops->probe_finalize(dev); 654 655 return 0; 656 } 657 658 static void __iommu_group_free_device(struct iommu_group *group, 659 struct group_device *grp_dev) 660 { 661 struct device *dev = grp_dev->dev; 662 663 sysfs_remove_link(group->devices_kobj, grp_dev->name); 664 sysfs_remove_link(&dev->kobj, "iommu_group"); 665 666 trace_remove_device_from_group(group->id, dev); 667 668 /* 669 * If the group has become empty then ownership must have been 670 * released, and the current domain must be set back to NULL or 671 * the default domain. 672 */ 673 if (list_empty(&group->devices)) 674 WARN_ON(group->owner_cnt || 675 group->domain != group->default_domain); 676 677 kfree(grp_dev->name); 678 kfree(grp_dev); 679 } 680 681 /* Remove the iommu_group from the struct device. */ 682 static void __iommu_group_remove_device(struct device *dev) 683 { 684 struct iommu_group *group = dev->iommu_group; 685 struct group_device *device; 686 687 mutex_lock(&group->mutex); 688 for_each_group_device(group, device) { 689 if (device->dev != dev) 690 continue; 691 692 list_del(&device->list); 693 __iommu_group_free_device(group, device); 694 if (dev_has_iommu(dev)) 695 iommu_deinit_device(dev); 696 else 697 dev->iommu_group = NULL; 698 break; 699 } 700 mutex_unlock(&group->mutex); 701 702 /* 703 * Pairs with the get in iommu_init_device() or 704 * iommu_group_add_device() 705 */ 706 iommu_group_put(group); 707 } 708 709 static void iommu_release_device(struct device *dev) 710 { 711 struct iommu_group *group = dev->iommu_group; 712 713 if (group) 714 __iommu_group_remove_device(dev); 715 716 /* Free any fwspec if no iommu_driver was ever attached */ 717 if (dev->iommu) 718 dev_iommu_free(dev); 719 } 720 721 static int __init iommu_set_def_domain_type(char *str) 722 { 723 bool pt; 724 int ret; 725 726 ret = kstrtobool(str, &pt); 727 if (ret) 728 return ret; 729 730 if (pt) 731 iommu_set_default_passthrough(true); 732 else 733 iommu_set_default_translated(true); 734 735 return 0; 736 } 737 early_param("iommu.passthrough", iommu_set_def_domain_type); 738 739 static int __init iommu_dma_setup(char *str) 740 { 741 int ret = kstrtobool(str, &iommu_dma_strict); 742 743 if (!ret) 744 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 745 return ret; 746 } 747 early_param("iommu.strict", iommu_dma_setup); 748 749 void iommu_set_dma_strict(void) 750 { 751 iommu_dma_strict = true; 752 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 753 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 754 } 755 756 static ssize_t iommu_group_attr_show(struct kobject *kobj, 757 struct attribute *__attr, char *buf) 758 { 759 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 760 struct iommu_group *group = to_iommu_group(kobj); 761 ssize_t ret = -EIO; 762 763 if (attr->show) 764 ret = attr->show(group, buf); 765 return ret; 766 } 767 768 static ssize_t iommu_group_attr_store(struct kobject *kobj, 769 struct attribute *__attr, 770 const char *buf, size_t count) 771 { 772 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 773 struct iommu_group *group = to_iommu_group(kobj); 774 ssize_t ret = -EIO; 775 776 if (attr->store) 777 ret = attr->store(group, buf, count); 778 return ret; 779 } 780 781 static const struct sysfs_ops iommu_group_sysfs_ops = { 782 .show = iommu_group_attr_show, 783 .store = iommu_group_attr_store, 784 }; 785 786 static int iommu_group_create_file(struct iommu_group *group, 787 struct iommu_group_attribute *attr) 788 { 789 return sysfs_create_file(&group->kobj, &attr->attr); 790 } 791 792 static void iommu_group_remove_file(struct iommu_group *group, 793 struct iommu_group_attribute *attr) 794 { 795 sysfs_remove_file(&group->kobj, &attr->attr); 796 } 797 798 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 799 { 800 return sysfs_emit(buf, "%s\n", group->name); 801 } 802 803 /** 804 * iommu_insert_resv_region - Insert a new region in the 805 * list of reserved regions. 806 * @new: new region to insert 807 * @regions: list of regions 808 * 809 * Elements are sorted by start address and overlapping segments 810 * of the same type are merged. 811 */ 812 static int iommu_insert_resv_region(struct iommu_resv_region *new, 813 struct list_head *regions) 814 { 815 struct iommu_resv_region *iter, *tmp, *nr, *top; 816 LIST_HEAD(stack); 817 818 nr = iommu_alloc_resv_region(new->start, new->length, 819 new->prot, new->type, GFP_KERNEL); 820 if (!nr) 821 return -ENOMEM; 822 823 /* First add the new element based on start address sorting */ 824 list_for_each_entry(iter, regions, list) { 825 if (nr->start < iter->start || 826 (nr->start == iter->start && nr->type <= iter->type)) 827 break; 828 } 829 list_add_tail(&nr->list, &iter->list); 830 831 /* Merge overlapping segments of type nr->type in @regions, if any */ 832 list_for_each_entry_safe(iter, tmp, regions, list) { 833 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 834 835 /* no merge needed on elements of different types than @new */ 836 if (iter->type != new->type) { 837 list_move_tail(&iter->list, &stack); 838 continue; 839 } 840 841 /* look for the last stack element of same type as @iter */ 842 list_for_each_entry_reverse(top, &stack, list) 843 if (top->type == iter->type) 844 goto check_overlap; 845 846 list_move_tail(&iter->list, &stack); 847 continue; 848 849 check_overlap: 850 top_end = top->start + top->length - 1; 851 852 if (iter->start > top_end + 1) { 853 list_move_tail(&iter->list, &stack); 854 } else { 855 top->length = max(top_end, iter_end) - top->start + 1; 856 list_del(&iter->list); 857 kfree(iter); 858 } 859 } 860 list_splice(&stack, regions); 861 return 0; 862 } 863 864 static int 865 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 866 struct list_head *group_resv_regions) 867 { 868 struct iommu_resv_region *entry; 869 int ret = 0; 870 871 list_for_each_entry(entry, dev_resv_regions, list) { 872 ret = iommu_insert_resv_region(entry, group_resv_regions); 873 if (ret) 874 break; 875 } 876 return ret; 877 } 878 879 int iommu_get_group_resv_regions(struct iommu_group *group, 880 struct list_head *head) 881 { 882 struct group_device *device; 883 int ret = 0; 884 885 mutex_lock(&group->mutex); 886 for_each_group_device(group, device) { 887 struct list_head dev_resv_regions; 888 889 /* 890 * Non-API groups still expose reserved_regions in sysfs, 891 * so filter out calls that get here that way. 892 */ 893 if (!dev_has_iommu(device->dev)) 894 break; 895 896 INIT_LIST_HEAD(&dev_resv_regions); 897 iommu_get_resv_regions(device->dev, &dev_resv_regions); 898 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 899 iommu_put_resv_regions(device->dev, &dev_resv_regions); 900 if (ret) 901 break; 902 } 903 mutex_unlock(&group->mutex); 904 return ret; 905 } 906 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 907 908 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 909 char *buf) 910 { 911 struct iommu_resv_region *region, *next; 912 struct list_head group_resv_regions; 913 int offset = 0; 914 915 INIT_LIST_HEAD(&group_resv_regions); 916 iommu_get_group_resv_regions(group, &group_resv_regions); 917 918 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 919 offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", 920 (long long)region->start, 921 (long long)(region->start + 922 region->length - 1), 923 iommu_group_resv_type_string[region->type]); 924 kfree(region); 925 } 926 927 return offset; 928 } 929 930 static ssize_t iommu_group_show_type(struct iommu_group *group, 931 char *buf) 932 { 933 char *type = "unknown"; 934 935 mutex_lock(&group->mutex); 936 if (group->default_domain) { 937 switch (group->default_domain->type) { 938 case IOMMU_DOMAIN_BLOCKED: 939 type = "blocked"; 940 break; 941 case IOMMU_DOMAIN_IDENTITY: 942 type = "identity"; 943 break; 944 case IOMMU_DOMAIN_UNMANAGED: 945 type = "unmanaged"; 946 break; 947 case IOMMU_DOMAIN_DMA: 948 type = "DMA"; 949 break; 950 case IOMMU_DOMAIN_DMA_FQ: 951 type = "DMA-FQ"; 952 break; 953 } 954 } 955 mutex_unlock(&group->mutex); 956 957 return sysfs_emit(buf, "%s\n", type); 958 } 959 960 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 961 962 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 963 iommu_group_show_resv_regions, NULL); 964 965 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 966 iommu_group_store_type); 967 968 static void iommu_group_release(struct kobject *kobj) 969 { 970 struct iommu_group *group = to_iommu_group(kobj); 971 972 pr_debug("Releasing group %d\n", group->id); 973 974 if (group->iommu_data_release) 975 group->iommu_data_release(group->iommu_data); 976 977 ida_free(&iommu_group_ida, group->id); 978 979 /* Domains are free'd by iommu_deinit_device() */ 980 WARN_ON(group->default_domain); 981 WARN_ON(group->blocking_domain); 982 983 kfree(group->name); 984 kfree(group); 985 } 986 987 static const struct kobj_type iommu_group_ktype = { 988 .sysfs_ops = &iommu_group_sysfs_ops, 989 .release = iommu_group_release, 990 }; 991 992 /** 993 * iommu_group_alloc - Allocate a new group 994 * 995 * This function is called by an iommu driver to allocate a new iommu 996 * group. The iommu group represents the minimum granularity of the iommu. 997 * Upon successful return, the caller holds a reference to the supplied 998 * group in order to hold the group until devices are added. Use 999 * iommu_group_put() to release this extra reference count, allowing the 1000 * group to be automatically reclaimed once it has no devices or external 1001 * references. 1002 */ 1003 struct iommu_group *iommu_group_alloc(void) 1004 { 1005 struct iommu_group *group; 1006 int ret; 1007 1008 group = kzalloc(sizeof(*group), GFP_KERNEL); 1009 if (!group) 1010 return ERR_PTR(-ENOMEM); 1011 1012 group->kobj.kset = iommu_group_kset; 1013 mutex_init(&group->mutex); 1014 INIT_LIST_HEAD(&group->devices); 1015 INIT_LIST_HEAD(&group->entry); 1016 xa_init(&group->pasid_array); 1017 1018 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 1019 if (ret < 0) { 1020 kfree(group); 1021 return ERR_PTR(ret); 1022 } 1023 group->id = ret; 1024 1025 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 1026 NULL, "%d", group->id); 1027 if (ret) { 1028 kobject_put(&group->kobj); 1029 return ERR_PTR(ret); 1030 } 1031 1032 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 1033 if (!group->devices_kobj) { 1034 kobject_put(&group->kobj); /* triggers .release & free */ 1035 return ERR_PTR(-ENOMEM); 1036 } 1037 1038 /* 1039 * The devices_kobj holds a reference on the group kobject, so 1040 * as long as that exists so will the group. We can therefore 1041 * use the devices_kobj for reference counting. 1042 */ 1043 kobject_put(&group->kobj); 1044 1045 ret = iommu_group_create_file(group, 1046 &iommu_group_attr_reserved_regions); 1047 if (ret) { 1048 kobject_put(group->devices_kobj); 1049 return ERR_PTR(ret); 1050 } 1051 1052 ret = iommu_group_create_file(group, &iommu_group_attr_type); 1053 if (ret) { 1054 kobject_put(group->devices_kobj); 1055 return ERR_PTR(ret); 1056 } 1057 1058 pr_debug("Allocated group %d\n", group->id); 1059 1060 return group; 1061 } 1062 EXPORT_SYMBOL_GPL(iommu_group_alloc); 1063 1064 /** 1065 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 1066 * @group: the group 1067 * 1068 * iommu drivers can store data in the group for use when doing iommu 1069 * operations. This function provides a way to retrieve it. Caller 1070 * should hold a group reference. 1071 */ 1072 void *iommu_group_get_iommudata(struct iommu_group *group) 1073 { 1074 return group->iommu_data; 1075 } 1076 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 1077 1078 /** 1079 * iommu_group_set_iommudata - set iommu_data for a group 1080 * @group: the group 1081 * @iommu_data: new data 1082 * @release: release function for iommu_data 1083 * 1084 * iommu drivers can store data in the group for use when doing iommu 1085 * operations. This function provides a way to set the data after 1086 * the group has been allocated. Caller should hold a group reference. 1087 */ 1088 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 1089 void (*release)(void *iommu_data)) 1090 { 1091 group->iommu_data = iommu_data; 1092 group->iommu_data_release = release; 1093 } 1094 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 1095 1096 /** 1097 * iommu_group_set_name - set name for a group 1098 * @group: the group 1099 * @name: name 1100 * 1101 * Allow iommu driver to set a name for a group. When set it will 1102 * appear in a name attribute file under the group in sysfs. 1103 */ 1104 int iommu_group_set_name(struct iommu_group *group, const char *name) 1105 { 1106 int ret; 1107 1108 if (group->name) { 1109 iommu_group_remove_file(group, &iommu_group_attr_name); 1110 kfree(group->name); 1111 group->name = NULL; 1112 if (!name) 1113 return 0; 1114 } 1115 1116 group->name = kstrdup(name, GFP_KERNEL); 1117 if (!group->name) 1118 return -ENOMEM; 1119 1120 ret = iommu_group_create_file(group, &iommu_group_attr_name); 1121 if (ret) { 1122 kfree(group->name); 1123 group->name = NULL; 1124 return ret; 1125 } 1126 1127 return 0; 1128 } 1129 EXPORT_SYMBOL_GPL(iommu_group_set_name); 1130 1131 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 1132 struct device *dev) 1133 { 1134 struct iommu_resv_region *entry; 1135 struct list_head mappings; 1136 unsigned long pg_size; 1137 int ret = 0; 1138 1139 pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; 1140 INIT_LIST_HEAD(&mappings); 1141 1142 if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) 1143 return -EINVAL; 1144 1145 iommu_get_resv_regions(dev, &mappings); 1146 1147 /* We need to consider overlapping regions for different devices */ 1148 list_for_each_entry(entry, &mappings, list) { 1149 dma_addr_t start, end, addr; 1150 size_t map_size = 0; 1151 1152 if (entry->type == IOMMU_RESV_DIRECT) 1153 dev->iommu->require_direct = 1; 1154 1155 if ((entry->type != IOMMU_RESV_DIRECT && 1156 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || 1157 !iommu_is_dma_domain(domain)) 1158 continue; 1159 1160 start = ALIGN(entry->start, pg_size); 1161 end = ALIGN(entry->start + entry->length, pg_size); 1162 1163 for (addr = start; addr <= end; addr += pg_size) { 1164 phys_addr_t phys_addr; 1165 1166 if (addr == end) 1167 goto map_end; 1168 1169 phys_addr = iommu_iova_to_phys(domain, addr); 1170 if (!phys_addr) { 1171 map_size += pg_size; 1172 continue; 1173 } 1174 1175 map_end: 1176 if (map_size) { 1177 ret = iommu_map(domain, addr - map_size, 1178 addr - map_size, map_size, 1179 entry->prot, GFP_KERNEL); 1180 if (ret) 1181 goto out; 1182 map_size = 0; 1183 } 1184 } 1185 1186 } 1187 out: 1188 iommu_put_resv_regions(dev, &mappings); 1189 1190 return ret; 1191 } 1192 1193 /* This is undone by __iommu_group_free_device() */ 1194 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 1195 struct device *dev) 1196 { 1197 int ret, i = 0; 1198 struct group_device *device; 1199 1200 device = kzalloc(sizeof(*device), GFP_KERNEL); 1201 if (!device) 1202 return ERR_PTR(-ENOMEM); 1203 1204 device->dev = dev; 1205 1206 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 1207 if (ret) 1208 goto err_free_device; 1209 1210 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1211 rename: 1212 if (!device->name) { 1213 ret = -ENOMEM; 1214 goto err_remove_link; 1215 } 1216 1217 ret = sysfs_create_link_nowarn(group->devices_kobj, 1218 &dev->kobj, device->name); 1219 if (ret) { 1220 if (ret == -EEXIST && i >= 0) { 1221 /* 1222 * Account for the slim chance of collision 1223 * and append an instance to the name. 1224 */ 1225 kfree(device->name); 1226 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1227 kobject_name(&dev->kobj), i++); 1228 goto rename; 1229 } 1230 goto err_free_name; 1231 } 1232 1233 trace_add_device_to_group(group->id, dev); 1234 1235 dev_info(dev, "Adding to iommu group %d\n", group->id); 1236 1237 return device; 1238 1239 err_free_name: 1240 kfree(device->name); 1241 err_remove_link: 1242 sysfs_remove_link(&dev->kobj, "iommu_group"); 1243 err_free_device: 1244 kfree(device); 1245 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1246 return ERR_PTR(ret); 1247 } 1248 1249 /** 1250 * iommu_group_add_device - add a device to an iommu group 1251 * @group: the group into which to add the device (reference should be held) 1252 * @dev: the device 1253 * 1254 * This function is called by an iommu driver to add a device into a 1255 * group. Adding a device increments the group reference count. 1256 */ 1257 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1258 { 1259 struct group_device *gdev; 1260 1261 gdev = iommu_group_alloc_device(group, dev); 1262 if (IS_ERR(gdev)) 1263 return PTR_ERR(gdev); 1264 1265 iommu_group_ref_get(group); 1266 dev->iommu_group = group; 1267 1268 mutex_lock(&group->mutex); 1269 list_add_tail(&gdev->list, &group->devices); 1270 mutex_unlock(&group->mutex); 1271 return 0; 1272 } 1273 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1274 1275 /** 1276 * iommu_group_remove_device - remove a device from it's current group 1277 * @dev: device to be removed 1278 * 1279 * This function is called by an iommu driver to remove the device from 1280 * it's current group. This decrements the iommu group reference count. 1281 */ 1282 void iommu_group_remove_device(struct device *dev) 1283 { 1284 struct iommu_group *group = dev->iommu_group; 1285 1286 if (!group) 1287 return; 1288 1289 dev_info(dev, "Removing from iommu group %d\n", group->id); 1290 1291 __iommu_group_remove_device(dev); 1292 } 1293 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1294 1295 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) 1296 /** 1297 * iommu_group_mutex_assert - Check device group mutex lock 1298 * @dev: the device that has group param set 1299 * 1300 * This function is called by an iommu driver to check whether it holds 1301 * group mutex lock for the given device or not. 1302 * 1303 * Note that this function must be called after device group param is set. 1304 */ 1305 void iommu_group_mutex_assert(struct device *dev) 1306 { 1307 struct iommu_group *group = dev->iommu_group; 1308 1309 lockdep_assert_held(&group->mutex); 1310 } 1311 EXPORT_SYMBOL_GPL(iommu_group_mutex_assert); 1312 #endif 1313 1314 static struct device *iommu_group_first_dev(struct iommu_group *group) 1315 { 1316 lockdep_assert_held(&group->mutex); 1317 return list_first_entry(&group->devices, struct group_device, list)->dev; 1318 } 1319 1320 /** 1321 * iommu_group_for_each_dev - iterate over each device in the group 1322 * @group: the group 1323 * @data: caller opaque data to be passed to callback function 1324 * @fn: caller supplied callback function 1325 * 1326 * This function is called by group users to iterate over group devices. 1327 * Callers should hold a reference count to the group during callback. 1328 * The group->mutex is held across callbacks, which will block calls to 1329 * iommu_group_add/remove_device. 1330 */ 1331 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1332 int (*fn)(struct device *, void *)) 1333 { 1334 struct group_device *device; 1335 int ret = 0; 1336 1337 mutex_lock(&group->mutex); 1338 for_each_group_device(group, device) { 1339 ret = fn(device->dev, data); 1340 if (ret) 1341 break; 1342 } 1343 mutex_unlock(&group->mutex); 1344 1345 return ret; 1346 } 1347 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1348 1349 /** 1350 * iommu_group_get - Return the group for a device and increment reference 1351 * @dev: get the group that this device belongs to 1352 * 1353 * This function is called by iommu drivers and users to get the group 1354 * for the specified device. If found, the group is returned and the group 1355 * reference in incremented, else NULL. 1356 */ 1357 struct iommu_group *iommu_group_get(struct device *dev) 1358 { 1359 struct iommu_group *group = dev->iommu_group; 1360 1361 if (group) 1362 kobject_get(group->devices_kobj); 1363 1364 return group; 1365 } 1366 EXPORT_SYMBOL_GPL(iommu_group_get); 1367 1368 /** 1369 * iommu_group_ref_get - Increment reference on a group 1370 * @group: the group to use, must not be NULL 1371 * 1372 * This function is called by iommu drivers to take additional references on an 1373 * existing group. Returns the given group for convenience. 1374 */ 1375 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1376 { 1377 kobject_get(group->devices_kobj); 1378 return group; 1379 } 1380 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1381 1382 /** 1383 * iommu_group_put - Decrement group reference 1384 * @group: the group to use 1385 * 1386 * This function is called by iommu drivers and users to release the 1387 * iommu group. Once the reference count is zero, the group is released. 1388 */ 1389 void iommu_group_put(struct iommu_group *group) 1390 { 1391 if (group) 1392 kobject_put(group->devices_kobj); 1393 } 1394 EXPORT_SYMBOL_GPL(iommu_group_put); 1395 1396 /** 1397 * iommu_group_id - Return ID for a group 1398 * @group: the group to ID 1399 * 1400 * Return the unique ID for the group matching the sysfs group number. 1401 */ 1402 int iommu_group_id(struct iommu_group *group) 1403 { 1404 return group->id; 1405 } 1406 EXPORT_SYMBOL_GPL(iommu_group_id); 1407 1408 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1409 unsigned long *devfns); 1410 1411 /* 1412 * To consider a PCI device isolated, we require ACS to support Source 1413 * Validation, Request Redirection, Completer Redirection, and Upstream 1414 * Forwarding. This effectively means that devices cannot spoof their 1415 * requester ID, requests and completions cannot be redirected, and all 1416 * transactions are forwarded upstream, even as it passes through a 1417 * bridge where the target device is downstream. 1418 */ 1419 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1420 1421 /* 1422 * For multifunction devices which are not isolated from each other, find 1423 * all the other non-isolated functions and look for existing groups. For 1424 * each function, we also need to look for aliases to or from other devices 1425 * that may already have a group. 1426 */ 1427 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1428 unsigned long *devfns) 1429 { 1430 struct pci_dev *tmp = NULL; 1431 struct iommu_group *group; 1432 1433 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1434 return NULL; 1435 1436 for_each_pci_dev(tmp) { 1437 if (tmp == pdev || tmp->bus != pdev->bus || 1438 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1439 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1440 continue; 1441 1442 group = get_pci_alias_group(tmp, devfns); 1443 if (group) { 1444 pci_dev_put(tmp); 1445 return group; 1446 } 1447 } 1448 1449 return NULL; 1450 } 1451 1452 /* 1453 * Look for aliases to or from the given device for existing groups. DMA 1454 * aliases are only supported on the same bus, therefore the search 1455 * space is quite small (especially since we're really only looking at pcie 1456 * device, and therefore only expect multiple slots on the root complex or 1457 * downstream switch ports). It's conceivable though that a pair of 1458 * multifunction devices could have aliases between them that would cause a 1459 * loop. To prevent this, we use a bitmap to track where we've been. 1460 */ 1461 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1462 unsigned long *devfns) 1463 { 1464 struct pci_dev *tmp = NULL; 1465 struct iommu_group *group; 1466 1467 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1468 return NULL; 1469 1470 group = iommu_group_get(&pdev->dev); 1471 if (group) 1472 return group; 1473 1474 for_each_pci_dev(tmp) { 1475 if (tmp == pdev || tmp->bus != pdev->bus) 1476 continue; 1477 1478 /* We alias them or they alias us */ 1479 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1480 group = get_pci_alias_group(tmp, devfns); 1481 if (group) { 1482 pci_dev_put(tmp); 1483 return group; 1484 } 1485 1486 group = get_pci_function_alias_group(tmp, devfns); 1487 if (group) { 1488 pci_dev_put(tmp); 1489 return group; 1490 } 1491 } 1492 } 1493 1494 return NULL; 1495 } 1496 1497 struct group_for_pci_data { 1498 struct pci_dev *pdev; 1499 struct iommu_group *group; 1500 }; 1501 1502 /* 1503 * DMA alias iterator callback, return the last seen device. Stop and return 1504 * the IOMMU group if we find one along the way. 1505 */ 1506 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1507 { 1508 struct group_for_pci_data *data = opaque; 1509 1510 data->pdev = pdev; 1511 data->group = iommu_group_get(&pdev->dev); 1512 1513 return data->group != NULL; 1514 } 1515 1516 /* 1517 * Generic device_group call-back function. It just allocates one 1518 * iommu-group per device. 1519 */ 1520 struct iommu_group *generic_device_group(struct device *dev) 1521 { 1522 return iommu_group_alloc(); 1523 } 1524 EXPORT_SYMBOL_GPL(generic_device_group); 1525 1526 /* 1527 * Generic device_group call-back function. It just allocates one 1528 * iommu-group per iommu driver instance shared by every device 1529 * probed by that iommu driver. 1530 */ 1531 struct iommu_group *generic_single_device_group(struct device *dev) 1532 { 1533 struct iommu_device *iommu = dev->iommu->iommu_dev; 1534 1535 if (!iommu->singleton_group) { 1536 struct iommu_group *group; 1537 1538 group = iommu_group_alloc(); 1539 if (IS_ERR(group)) 1540 return group; 1541 iommu->singleton_group = group; 1542 } 1543 return iommu_group_ref_get(iommu->singleton_group); 1544 } 1545 EXPORT_SYMBOL_GPL(generic_single_device_group); 1546 1547 /* 1548 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1549 * to find or create an IOMMU group for a device. 1550 */ 1551 struct iommu_group *pci_device_group(struct device *dev) 1552 { 1553 struct pci_dev *pdev = to_pci_dev(dev); 1554 struct group_for_pci_data data; 1555 struct pci_bus *bus; 1556 struct iommu_group *group = NULL; 1557 u64 devfns[4] = { 0 }; 1558 1559 if (WARN_ON(!dev_is_pci(dev))) 1560 return ERR_PTR(-EINVAL); 1561 1562 /* 1563 * Find the upstream DMA alias for the device. A device must not 1564 * be aliased due to topology in order to have its own IOMMU group. 1565 * If we find an alias along the way that already belongs to a 1566 * group, use it. 1567 */ 1568 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1569 return data.group; 1570 1571 pdev = data.pdev; 1572 1573 /* 1574 * Continue upstream from the point of minimum IOMMU granularity 1575 * due to aliases to the point where devices are protected from 1576 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1577 * group, use it. 1578 */ 1579 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1580 if (!bus->self) 1581 continue; 1582 1583 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1584 break; 1585 1586 pdev = bus->self; 1587 1588 group = iommu_group_get(&pdev->dev); 1589 if (group) 1590 return group; 1591 } 1592 1593 /* 1594 * Look for existing groups on device aliases. If we alias another 1595 * device or another device aliases us, use the same group. 1596 */ 1597 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1598 if (group) 1599 return group; 1600 1601 /* 1602 * Look for existing groups on non-isolated functions on the same 1603 * slot and aliases of those funcions, if any. No need to clear 1604 * the search bitmap, the tested devfns are still valid. 1605 */ 1606 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1607 if (group) 1608 return group; 1609 1610 /* No shared group found, allocate new */ 1611 return iommu_group_alloc(); 1612 } 1613 EXPORT_SYMBOL_GPL(pci_device_group); 1614 1615 /* Get the IOMMU group for device on fsl-mc bus */ 1616 struct iommu_group *fsl_mc_device_group(struct device *dev) 1617 { 1618 struct device *cont_dev = fsl_mc_cont_dev(dev); 1619 struct iommu_group *group; 1620 1621 group = iommu_group_get(cont_dev); 1622 if (!group) 1623 group = iommu_group_alloc(); 1624 return group; 1625 } 1626 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1627 1628 static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev) 1629 { 1630 const struct iommu_ops *ops = dev_iommu_ops(dev); 1631 struct iommu_domain *domain; 1632 1633 if (ops->identity_domain) 1634 return ops->identity_domain; 1635 1636 if (ops->domain_alloc_identity) { 1637 domain = ops->domain_alloc_identity(dev); 1638 if (IS_ERR(domain)) 1639 return domain; 1640 } else { 1641 return ERR_PTR(-EOPNOTSUPP); 1642 } 1643 1644 iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops); 1645 return domain; 1646 } 1647 1648 static struct iommu_domain * 1649 __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1650 { 1651 struct device *dev = iommu_group_first_dev(group); 1652 struct iommu_domain *dom; 1653 1654 if (group->default_domain && group->default_domain->type == req_type) 1655 return group->default_domain; 1656 1657 /* 1658 * When allocating the DMA API domain assume that the driver is going to 1659 * use PASID and make sure the RID's domain is PASID compatible. 1660 */ 1661 if (req_type & __IOMMU_DOMAIN_PAGING) { 1662 dom = __iommu_paging_domain_alloc_flags(dev, req_type, 1663 dev->iommu->max_pasids ? IOMMU_HWPT_ALLOC_PASID : 0); 1664 1665 /* 1666 * If driver does not support PASID feature then 1667 * try to allocate non-PASID domain 1668 */ 1669 if (PTR_ERR(dom) == -EOPNOTSUPP) 1670 dom = __iommu_paging_domain_alloc_flags(dev, req_type, 0); 1671 1672 return dom; 1673 } 1674 1675 if (req_type == IOMMU_DOMAIN_IDENTITY) 1676 return __iommu_alloc_identity_domain(dev); 1677 1678 return ERR_PTR(-EINVAL); 1679 } 1680 1681 /* 1682 * req_type of 0 means "auto" which means to select a domain based on 1683 * iommu_def_domain_type or what the driver actually supports. 1684 */ 1685 static struct iommu_domain * 1686 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1687 { 1688 const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group)); 1689 struct iommu_domain *dom; 1690 1691 lockdep_assert_held(&group->mutex); 1692 1693 /* 1694 * Allow legacy drivers to specify the domain that will be the default 1695 * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM 1696 * domain. Do not use in new drivers. 1697 */ 1698 if (ops->default_domain) { 1699 if (req_type != ops->default_domain->type) 1700 return ERR_PTR(-EINVAL); 1701 return ops->default_domain; 1702 } 1703 1704 if (req_type) 1705 return __iommu_group_alloc_default_domain(group, req_type); 1706 1707 /* The driver gave no guidance on what type to use, try the default */ 1708 dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); 1709 if (!IS_ERR(dom)) 1710 return dom; 1711 1712 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ 1713 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) 1714 return ERR_PTR(-EINVAL); 1715 dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); 1716 if (IS_ERR(dom)) 1717 return dom; 1718 1719 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1720 iommu_def_domain_type, group->name); 1721 return dom; 1722 } 1723 1724 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1725 { 1726 return group->default_domain; 1727 } 1728 1729 static int probe_iommu_group(struct device *dev, void *data) 1730 { 1731 struct list_head *group_list = data; 1732 int ret; 1733 1734 mutex_lock(&iommu_probe_device_lock); 1735 ret = __iommu_probe_device(dev, group_list); 1736 mutex_unlock(&iommu_probe_device_lock); 1737 if (ret == -ENODEV) 1738 ret = 0; 1739 1740 return ret; 1741 } 1742 1743 static int iommu_bus_notifier(struct notifier_block *nb, 1744 unsigned long action, void *data) 1745 { 1746 struct device *dev = data; 1747 1748 if (action == BUS_NOTIFY_ADD_DEVICE) { 1749 int ret; 1750 1751 ret = iommu_probe_device(dev); 1752 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1753 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1754 iommu_release_device(dev); 1755 return NOTIFY_OK; 1756 } 1757 1758 return 0; 1759 } 1760 1761 /* 1762 * Combine the driver's chosen def_domain_type across all the devices in a 1763 * group. Drivers must give a consistent result. 1764 */ 1765 static int iommu_get_def_domain_type(struct iommu_group *group, 1766 struct device *dev, int cur_type) 1767 { 1768 const struct iommu_ops *ops = dev_iommu_ops(dev); 1769 int type; 1770 1771 if (ops->default_domain) { 1772 /* 1773 * Drivers that declare a global static default_domain will 1774 * always choose that. 1775 */ 1776 type = ops->default_domain->type; 1777 } else { 1778 if (ops->def_domain_type) 1779 type = ops->def_domain_type(dev); 1780 else 1781 return cur_type; 1782 } 1783 if (!type || cur_type == type) 1784 return cur_type; 1785 if (!cur_type) 1786 return type; 1787 1788 dev_err_ratelimited( 1789 dev, 1790 "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n", 1791 iommu_domain_type_str(cur_type), iommu_domain_type_str(type), 1792 group->id); 1793 1794 /* 1795 * Try to recover, drivers are allowed to force IDENTITY or DMA, IDENTITY 1796 * takes precedence. 1797 */ 1798 if (type == IOMMU_DOMAIN_IDENTITY) 1799 return type; 1800 return cur_type; 1801 } 1802 1803 /* 1804 * A target_type of 0 will select the best domain type. 0 can be returned in 1805 * this case meaning the global default should be used. 1806 */ 1807 static int iommu_get_default_domain_type(struct iommu_group *group, 1808 int target_type) 1809 { 1810 struct device *untrusted = NULL; 1811 struct group_device *gdev; 1812 int driver_type = 0; 1813 1814 lockdep_assert_held(&group->mutex); 1815 1816 /* 1817 * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an 1818 * identity_domain and it will automatically become their default 1819 * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain. 1820 * Override the selection to IDENTITY. 1821 */ 1822 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { 1823 static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) && 1824 IS_ENABLED(CONFIG_IOMMU_DMA))); 1825 driver_type = IOMMU_DOMAIN_IDENTITY; 1826 } 1827 1828 for_each_group_device(group, gdev) { 1829 driver_type = iommu_get_def_domain_type(group, gdev->dev, 1830 driver_type); 1831 1832 if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) { 1833 /* 1834 * No ARM32 using systems will set untrusted, it cannot 1835 * work. 1836 */ 1837 if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))) 1838 return -1; 1839 untrusted = gdev->dev; 1840 } 1841 } 1842 1843 /* 1844 * If the common dma ops are not selected in kconfig then we cannot use 1845 * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been 1846 * selected. 1847 */ 1848 if (!IS_ENABLED(CONFIG_IOMMU_DMA)) { 1849 if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA)) 1850 return -1; 1851 if (!driver_type) 1852 driver_type = IOMMU_DOMAIN_IDENTITY; 1853 } 1854 1855 if (untrusted) { 1856 if (driver_type && driver_type != IOMMU_DOMAIN_DMA) { 1857 dev_err_ratelimited( 1858 untrusted, 1859 "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n", 1860 group->id, iommu_domain_type_str(driver_type)); 1861 return -1; 1862 } 1863 driver_type = IOMMU_DOMAIN_DMA; 1864 } 1865 1866 if (target_type) { 1867 if (driver_type && target_type != driver_type) 1868 return -1; 1869 return target_type; 1870 } 1871 return driver_type; 1872 } 1873 1874 static void iommu_group_do_probe_finalize(struct device *dev) 1875 { 1876 const struct iommu_ops *ops = dev_iommu_ops(dev); 1877 1878 if (ops->probe_finalize) 1879 ops->probe_finalize(dev); 1880 } 1881 1882 static int bus_iommu_probe(const struct bus_type *bus) 1883 { 1884 struct iommu_group *group, *next; 1885 LIST_HEAD(group_list); 1886 int ret; 1887 1888 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1889 if (ret) 1890 return ret; 1891 1892 list_for_each_entry_safe(group, next, &group_list, entry) { 1893 struct group_device *gdev; 1894 1895 mutex_lock(&group->mutex); 1896 1897 /* Remove item from the list */ 1898 list_del_init(&group->entry); 1899 1900 /* 1901 * We go to the trouble of deferred default domain creation so 1902 * that the cross-group default domain type and the setup of the 1903 * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. 1904 */ 1905 ret = iommu_setup_default_domain(group, 0); 1906 if (ret) { 1907 mutex_unlock(&group->mutex); 1908 return ret; 1909 } 1910 for_each_group_device(group, gdev) 1911 iommu_setup_dma_ops(gdev->dev); 1912 mutex_unlock(&group->mutex); 1913 1914 /* 1915 * FIXME: Mis-locked because the ops->probe_finalize() call-back 1916 * of some IOMMU drivers calls arm_iommu_attach_device() which 1917 * in-turn might call back into IOMMU core code, where it tries 1918 * to take group->mutex, resulting in a deadlock. 1919 */ 1920 for_each_group_device(group, gdev) 1921 iommu_group_do_probe_finalize(gdev->dev); 1922 } 1923 1924 return 0; 1925 } 1926 1927 /** 1928 * device_iommu_capable() - check for a general IOMMU capability 1929 * @dev: device to which the capability would be relevant, if available 1930 * @cap: IOMMU capability 1931 * 1932 * Return: true if an IOMMU is present and supports the given capability 1933 * for the given device, otherwise false. 1934 */ 1935 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1936 { 1937 const struct iommu_ops *ops; 1938 1939 if (!dev_has_iommu(dev)) 1940 return false; 1941 1942 ops = dev_iommu_ops(dev); 1943 if (!ops->capable) 1944 return false; 1945 1946 return ops->capable(dev, cap); 1947 } 1948 EXPORT_SYMBOL_GPL(device_iommu_capable); 1949 1950 /** 1951 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() 1952 * for a group 1953 * @group: Group to query 1954 * 1955 * IOMMU groups should not have differing values of 1956 * msi_device_has_isolated_msi() for devices in a group. However nothing 1957 * directly prevents this, so ensure mistakes don't result in isolation failures 1958 * by checking that all the devices are the same. 1959 */ 1960 bool iommu_group_has_isolated_msi(struct iommu_group *group) 1961 { 1962 struct group_device *group_dev; 1963 bool ret = true; 1964 1965 mutex_lock(&group->mutex); 1966 for_each_group_device(group, group_dev) 1967 ret &= msi_device_has_isolated_msi(group_dev->dev); 1968 mutex_unlock(&group->mutex); 1969 return ret; 1970 } 1971 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); 1972 1973 /** 1974 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1975 * @domain: iommu domain 1976 * @handler: fault handler 1977 * @token: user data, will be passed back to the fault handler 1978 * 1979 * This function should be used by IOMMU users which want to be notified 1980 * whenever an IOMMU fault happens. 1981 * 1982 * The fault handler itself should return 0 on success, and an appropriate 1983 * error code otherwise. 1984 */ 1985 void iommu_set_fault_handler(struct iommu_domain *domain, 1986 iommu_fault_handler_t handler, 1987 void *token) 1988 { 1989 if (WARN_ON(!domain || domain->cookie_type != IOMMU_COOKIE_NONE)) 1990 return; 1991 1992 domain->cookie_type = IOMMU_COOKIE_FAULT_HANDLER; 1993 domain->handler = handler; 1994 domain->handler_token = token; 1995 } 1996 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1997 1998 static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, 1999 const struct iommu_ops *ops) 2000 { 2001 domain->type = type; 2002 domain->owner = ops; 2003 if (!domain->ops) 2004 domain->ops = ops->default_domain_ops; 2005 } 2006 2007 static struct iommu_domain * 2008 __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, 2009 unsigned int flags) 2010 { 2011 const struct iommu_ops *ops; 2012 struct iommu_domain *domain; 2013 2014 if (!dev_has_iommu(dev)) 2015 return ERR_PTR(-ENODEV); 2016 2017 ops = dev_iommu_ops(dev); 2018 2019 if (ops->domain_alloc_paging && !flags) 2020 domain = ops->domain_alloc_paging(dev); 2021 else if (ops->domain_alloc_paging_flags) 2022 domain = ops->domain_alloc_paging_flags(dev, flags, NULL); 2023 #if IS_ENABLED(CONFIG_FSL_PAMU) 2024 else if (ops->domain_alloc && !flags) 2025 domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); 2026 #endif 2027 else 2028 return ERR_PTR(-EOPNOTSUPP); 2029 2030 if (IS_ERR(domain)) 2031 return domain; 2032 if (!domain) 2033 return ERR_PTR(-ENOMEM); 2034 2035 iommu_domain_init(domain, type, ops); 2036 return domain; 2037 } 2038 2039 /** 2040 * iommu_paging_domain_alloc_flags() - Allocate a paging domain 2041 * @dev: device for which the domain is allocated 2042 * @flags: Bitmap of iommufd_hwpt_alloc_flags 2043 * 2044 * Allocate a paging domain which will be managed by a kernel driver. Return 2045 * allocated domain if successful, or an ERR pointer for failure. 2046 */ 2047 struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, 2048 unsigned int flags) 2049 { 2050 return __iommu_paging_domain_alloc_flags(dev, 2051 IOMMU_DOMAIN_UNMANAGED, flags); 2052 } 2053 EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags); 2054 2055 void iommu_domain_free(struct iommu_domain *domain) 2056 { 2057 switch (domain->cookie_type) { 2058 case IOMMU_COOKIE_DMA_IOVA: 2059 iommu_put_dma_cookie(domain); 2060 break; 2061 case IOMMU_COOKIE_DMA_MSI: 2062 iommu_put_msi_cookie(domain); 2063 break; 2064 case IOMMU_COOKIE_SVA: 2065 mmdrop(domain->mm); 2066 break; 2067 default: 2068 break; 2069 } 2070 if (domain->ops->free) 2071 domain->ops->free(domain); 2072 } 2073 EXPORT_SYMBOL_GPL(iommu_domain_free); 2074 2075 /* 2076 * Put the group's domain back to the appropriate core-owned domain - either the 2077 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 2078 */ 2079 static void __iommu_group_set_core_domain(struct iommu_group *group) 2080 { 2081 struct iommu_domain *new_domain; 2082 2083 if (group->owner) 2084 new_domain = group->blocking_domain; 2085 else 2086 new_domain = group->default_domain; 2087 2088 __iommu_group_set_domain_nofail(group, new_domain); 2089 } 2090 2091 static int __iommu_attach_device(struct iommu_domain *domain, 2092 struct device *dev) 2093 { 2094 int ret; 2095 2096 if (unlikely(domain->ops->attach_dev == NULL)) 2097 return -ENODEV; 2098 2099 ret = domain->ops->attach_dev(domain, dev); 2100 if (ret) 2101 return ret; 2102 dev->iommu->attach_deferred = 0; 2103 trace_attach_device_to_domain(dev); 2104 return 0; 2105 } 2106 2107 /** 2108 * iommu_attach_device - Attach an IOMMU domain to a device 2109 * @domain: IOMMU domain to attach 2110 * @dev: Device that will be attached 2111 * 2112 * Returns 0 on success and error code on failure 2113 * 2114 * Note that EINVAL can be treated as a soft failure, indicating 2115 * that certain configuration of the domain is incompatible with 2116 * the device. In this case attaching a different domain to the 2117 * device may succeed. 2118 */ 2119 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2120 { 2121 /* Caller must be a probed driver on dev */ 2122 struct iommu_group *group = dev->iommu_group; 2123 int ret; 2124 2125 if (!group) 2126 return -ENODEV; 2127 2128 /* 2129 * Lock the group to make sure the device-count doesn't 2130 * change while we are attaching 2131 */ 2132 mutex_lock(&group->mutex); 2133 ret = -EINVAL; 2134 if (list_count_nodes(&group->devices) != 1) 2135 goto out_unlock; 2136 2137 ret = __iommu_attach_group(domain, group); 2138 2139 out_unlock: 2140 mutex_unlock(&group->mutex); 2141 return ret; 2142 } 2143 EXPORT_SYMBOL_GPL(iommu_attach_device); 2144 2145 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2146 { 2147 if (dev->iommu && dev->iommu->attach_deferred) 2148 return __iommu_attach_device(domain, dev); 2149 2150 return 0; 2151 } 2152 2153 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2154 { 2155 /* Caller must be a probed driver on dev */ 2156 struct iommu_group *group = dev->iommu_group; 2157 2158 if (!group) 2159 return; 2160 2161 mutex_lock(&group->mutex); 2162 if (WARN_ON(domain != group->domain) || 2163 WARN_ON(list_count_nodes(&group->devices) != 1)) 2164 goto out_unlock; 2165 __iommu_group_set_core_domain(group); 2166 2167 out_unlock: 2168 mutex_unlock(&group->mutex); 2169 } 2170 EXPORT_SYMBOL_GPL(iommu_detach_device); 2171 2172 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2173 { 2174 /* Caller must be a probed driver on dev */ 2175 struct iommu_group *group = dev->iommu_group; 2176 2177 if (!group) 2178 return NULL; 2179 2180 return group->domain; 2181 } 2182 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2183 2184 /* 2185 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2186 * guarantees that the group and its default domain are valid and correct. 2187 */ 2188 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2189 { 2190 return dev->iommu_group->default_domain; 2191 } 2192 2193 static void *iommu_make_pasid_array_entry(struct iommu_domain *domain, 2194 struct iommu_attach_handle *handle) 2195 { 2196 if (handle) { 2197 handle->domain = domain; 2198 return xa_tag_pointer(handle, IOMMU_PASID_ARRAY_HANDLE); 2199 } 2200 2201 return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN); 2202 } 2203 2204 static bool domain_iommu_ops_compatible(const struct iommu_ops *ops, 2205 struct iommu_domain *domain) 2206 { 2207 if (domain->owner == ops) 2208 return true; 2209 2210 /* For static domains, owner isn't set. */ 2211 if (domain == ops->blocked_domain || domain == ops->identity_domain) 2212 return true; 2213 2214 return false; 2215 } 2216 2217 static int __iommu_attach_group(struct iommu_domain *domain, 2218 struct iommu_group *group) 2219 { 2220 struct device *dev; 2221 2222 if (group->domain && group->domain != group->default_domain && 2223 group->domain != group->blocking_domain) 2224 return -EBUSY; 2225 2226 dev = iommu_group_first_dev(group); 2227 if (!dev_has_iommu(dev) || 2228 !domain_iommu_ops_compatible(dev_iommu_ops(dev), domain)) 2229 return -EINVAL; 2230 2231 return __iommu_group_set_domain(group, domain); 2232 } 2233 2234 /** 2235 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2236 * @domain: IOMMU domain to attach 2237 * @group: IOMMU group that will be attached 2238 * 2239 * Returns 0 on success and error code on failure 2240 * 2241 * Note that EINVAL can be treated as a soft failure, indicating 2242 * that certain configuration of the domain is incompatible with 2243 * the group. In this case attaching a different domain to the 2244 * group may succeed. 2245 */ 2246 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2247 { 2248 int ret; 2249 2250 mutex_lock(&group->mutex); 2251 ret = __iommu_attach_group(domain, group); 2252 mutex_unlock(&group->mutex); 2253 2254 return ret; 2255 } 2256 EXPORT_SYMBOL_GPL(iommu_attach_group); 2257 2258 static int __iommu_device_set_domain(struct iommu_group *group, 2259 struct device *dev, 2260 struct iommu_domain *new_domain, 2261 unsigned int flags) 2262 { 2263 int ret; 2264 2265 /* 2266 * If the device requires IOMMU_RESV_DIRECT then we cannot allow 2267 * the blocking domain to be attached as it does not contain the 2268 * required 1:1 mapping. This test effectively excludes the device 2269 * being used with iommu_group_claim_dma_owner() which will block 2270 * vfio and iommufd as well. 2271 */ 2272 if (dev->iommu->require_direct && 2273 (new_domain->type == IOMMU_DOMAIN_BLOCKED || 2274 new_domain == group->blocking_domain)) { 2275 dev_warn(dev, 2276 "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n"); 2277 return -EINVAL; 2278 } 2279 2280 if (dev->iommu->attach_deferred) { 2281 if (new_domain == group->default_domain) 2282 return 0; 2283 dev->iommu->attach_deferred = 0; 2284 } 2285 2286 ret = __iommu_attach_device(new_domain, dev); 2287 if (ret) { 2288 /* 2289 * If we have a blocking domain then try to attach that in hopes 2290 * of avoiding a UAF. Modern drivers should implement blocking 2291 * domains as global statics that cannot fail. 2292 */ 2293 if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && 2294 group->blocking_domain && 2295 group->blocking_domain != new_domain) 2296 __iommu_attach_device(group->blocking_domain, dev); 2297 return ret; 2298 } 2299 return 0; 2300 } 2301 2302 /* 2303 * If 0 is returned the group's domain is new_domain. If an error is returned 2304 * then the group's domain will be set back to the existing domain unless 2305 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's 2306 * domains is left inconsistent. This is a driver bug to fail attach with a 2307 * previously good domain. We try to avoid a kernel UAF because of this. 2308 * 2309 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU 2310 * API works on domains and devices. Bridge that gap by iterating over the 2311 * devices in a group. Ideally we'd have a single device which represents the 2312 * requestor ID of the group, but we also allow IOMMU drivers to create policy 2313 * defined minimum sets, where the physical hardware may be able to distiguish 2314 * members, but we wish to group them at a higher level (ex. untrusted 2315 * multi-function PCI devices). Thus we attach each device. 2316 */ 2317 static int __iommu_group_set_domain_internal(struct iommu_group *group, 2318 struct iommu_domain *new_domain, 2319 unsigned int flags) 2320 { 2321 struct group_device *last_gdev; 2322 struct group_device *gdev; 2323 int result; 2324 int ret; 2325 2326 lockdep_assert_held(&group->mutex); 2327 2328 if (group->domain == new_domain) 2329 return 0; 2330 2331 if (WARN_ON(!new_domain)) 2332 return -EINVAL; 2333 2334 /* 2335 * Changing the domain is done by calling attach_dev() on the new 2336 * domain. This switch does not have to be atomic and DMA can be 2337 * discarded during the transition. DMA must only be able to access 2338 * either new_domain or group->domain, never something else. 2339 */ 2340 result = 0; 2341 for_each_group_device(group, gdev) { 2342 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, 2343 flags); 2344 if (ret) { 2345 result = ret; 2346 /* 2347 * Keep trying the other devices in the group. If a 2348 * driver fails attach to an otherwise good domain, and 2349 * does not support blocking domains, it should at least 2350 * drop its reference on the current domain so we don't 2351 * UAF. 2352 */ 2353 if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) 2354 continue; 2355 goto err_revert; 2356 } 2357 } 2358 group->domain = new_domain; 2359 return result; 2360 2361 err_revert: 2362 /* 2363 * This is called in error unwind paths. A well behaved driver should 2364 * always allow us to attach to a domain that was already attached. 2365 */ 2366 last_gdev = gdev; 2367 for_each_group_device(group, gdev) { 2368 /* 2369 * A NULL domain can happen only for first probe, in which case 2370 * we leave group->domain as NULL and let release clean 2371 * everything up. 2372 */ 2373 if (group->domain) 2374 WARN_ON(__iommu_device_set_domain( 2375 group, gdev->dev, group->domain, 2376 IOMMU_SET_DOMAIN_MUST_SUCCEED)); 2377 if (gdev == last_gdev) 2378 break; 2379 } 2380 return ret; 2381 } 2382 2383 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2384 { 2385 mutex_lock(&group->mutex); 2386 __iommu_group_set_core_domain(group); 2387 mutex_unlock(&group->mutex); 2388 } 2389 EXPORT_SYMBOL_GPL(iommu_detach_group); 2390 2391 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2392 { 2393 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2394 return iova; 2395 2396 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2397 return 0; 2398 2399 return domain->ops->iova_to_phys(domain, iova); 2400 } 2401 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2402 2403 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2404 phys_addr_t paddr, size_t size, size_t *count) 2405 { 2406 unsigned int pgsize_idx, pgsize_idx_next; 2407 unsigned long pgsizes; 2408 size_t offset, pgsize, pgsize_next; 2409 size_t offset_end; 2410 unsigned long addr_merge = paddr | iova; 2411 2412 /* Page sizes supported by the hardware and small enough for @size */ 2413 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2414 2415 /* Constrain the page sizes further based on the maximum alignment */ 2416 if (likely(addr_merge)) 2417 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2418 2419 /* Make sure we have at least one suitable page size */ 2420 BUG_ON(!pgsizes); 2421 2422 /* Pick the biggest page size remaining */ 2423 pgsize_idx = __fls(pgsizes); 2424 pgsize = BIT(pgsize_idx); 2425 if (!count) 2426 return pgsize; 2427 2428 /* Find the next biggest support page size, if it exists */ 2429 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2430 if (!pgsizes) 2431 goto out_set_count; 2432 2433 pgsize_idx_next = __ffs(pgsizes); 2434 pgsize_next = BIT(pgsize_idx_next); 2435 2436 /* 2437 * There's no point trying a bigger page size unless the virtual 2438 * and physical addresses are similarly offset within the larger page. 2439 */ 2440 if ((iova ^ paddr) & (pgsize_next - 1)) 2441 goto out_set_count; 2442 2443 /* Calculate the offset to the next page size alignment boundary */ 2444 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2445 2446 /* 2447 * If size is big enough to accommodate the larger page, reduce 2448 * the number of smaller pages. 2449 */ 2450 if (!check_add_overflow(offset, pgsize_next, &offset_end) && 2451 offset_end <= size) 2452 size = offset; 2453 2454 out_set_count: 2455 *count = size >> pgsize_idx; 2456 return pgsize; 2457 } 2458 2459 int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova, 2460 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2461 { 2462 const struct iommu_domain_ops *ops = domain->ops; 2463 unsigned long orig_iova = iova; 2464 unsigned int min_pagesz; 2465 size_t orig_size = size; 2466 phys_addr_t orig_paddr = paddr; 2467 int ret = 0; 2468 2469 might_sleep_if(gfpflags_allow_blocking(gfp)); 2470 2471 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2472 return -EINVAL; 2473 2474 if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL)) 2475 return -ENODEV; 2476 2477 /* Discourage passing strange GFP flags */ 2478 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2479 __GFP_HIGHMEM))) 2480 return -EINVAL; 2481 2482 /* find out the minimum page size supported */ 2483 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2484 2485 /* 2486 * both the virtual address and the physical one, as well as 2487 * the size of the mapping, must be aligned (at least) to the 2488 * size of the smallest page supported by the hardware 2489 */ 2490 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2491 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2492 iova, &paddr, size, min_pagesz); 2493 return -EINVAL; 2494 } 2495 2496 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2497 2498 while (size) { 2499 size_t pgsize, count, mapped = 0; 2500 2501 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2502 2503 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2504 iova, &paddr, pgsize, count); 2505 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2506 gfp, &mapped); 2507 /* 2508 * Some pages may have been mapped, even if an error occurred, 2509 * so we should account for those so they can be unmapped. 2510 */ 2511 size -= mapped; 2512 2513 if (ret) 2514 break; 2515 2516 iova += mapped; 2517 paddr += mapped; 2518 } 2519 2520 /* unroll mapping in case something went wrong */ 2521 if (ret) 2522 iommu_unmap(domain, orig_iova, orig_size - size); 2523 else 2524 trace_map(orig_iova, orig_paddr, orig_size); 2525 2526 return ret; 2527 } 2528 2529 int iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) 2530 { 2531 const struct iommu_domain_ops *ops = domain->ops; 2532 2533 if (!ops->iotlb_sync_map) 2534 return 0; 2535 return ops->iotlb_sync_map(domain, iova, size); 2536 } 2537 2538 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2539 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2540 { 2541 int ret; 2542 2543 ret = iommu_map_nosync(domain, iova, paddr, size, prot, gfp); 2544 if (ret) 2545 return ret; 2546 2547 ret = iommu_sync_map(domain, iova, size); 2548 if (ret) 2549 iommu_unmap(domain, iova, size); 2550 2551 return ret; 2552 } 2553 EXPORT_SYMBOL_GPL(iommu_map); 2554 2555 static size_t __iommu_unmap(struct iommu_domain *domain, 2556 unsigned long iova, size_t size, 2557 struct iommu_iotlb_gather *iotlb_gather) 2558 { 2559 const struct iommu_domain_ops *ops = domain->ops; 2560 size_t unmapped_page, unmapped = 0; 2561 unsigned long orig_iova = iova; 2562 unsigned int min_pagesz; 2563 2564 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2565 return 0; 2566 2567 if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) 2568 return 0; 2569 2570 /* find out the minimum page size supported */ 2571 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2572 2573 /* 2574 * The virtual address, as well as the size of the mapping, must be 2575 * aligned (at least) to the size of the smallest page supported 2576 * by the hardware 2577 */ 2578 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2579 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2580 iova, size, min_pagesz); 2581 return 0; 2582 } 2583 2584 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2585 2586 /* 2587 * Keep iterating until we either unmap 'size' bytes (or more) 2588 * or we hit an area that isn't mapped. 2589 */ 2590 while (unmapped < size) { 2591 size_t pgsize, count; 2592 2593 pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count); 2594 unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); 2595 if (!unmapped_page) 2596 break; 2597 2598 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2599 iova, unmapped_page); 2600 2601 iova += unmapped_page; 2602 unmapped += unmapped_page; 2603 } 2604 2605 trace_unmap(orig_iova, size, unmapped); 2606 return unmapped; 2607 } 2608 2609 /** 2610 * iommu_unmap() - Remove mappings from a range of IOVA 2611 * @domain: Domain to manipulate 2612 * @iova: IO virtual address to start 2613 * @size: Length of the range starting from @iova 2614 * 2615 * iommu_unmap() will remove a translation created by iommu_map(). It cannot 2616 * subdivide a mapping created by iommu_map(), so it should be called with IOVA 2617 * ranges that match what was passed to iommu_map(). The range can aggregate 2618 * contiguous iommu_map() calls so long as no individual range is split. 2619 * 2620 * Returns: Number of bytes of IOVA unmapped. iova + res will be the point 2621 * unmapping stopped. 2622 */ 2623 size_t iommu_unmap(struct iommu_domain *domain, 2624 unsigned long iova, size_t size) 2625 { 2626 struct iommu_iotlb_gather iotlb_gather; 2627 size_t ret; 2628 2629 iommu_iotlb_gather_init(&iotlb_gather); 2630 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2631 iommu_iotlb_sync(domain, &iotlb_gather); 2632 2633 return ret; 2634 } 2635 EXPORT_SYMBOL_GPL(iommu_unmap); 2636 2637 /** 2638 * iommu_unmap_fast() - Remove mappings from a range of IOVA without IOTLB sync 2639 * @domain: Domain to manipulate 2640 * @iova: IO virtual address to start 2641 * @size: Length of the range starting from @iova 2642 * @iotlb_gather: range information for a pending IOTLB flush 2643 * 2644 * iommu_unmap_fast() will remove a translation created by iommu_map(). 2645 * It can't subdivide a mapping created by iommu_map(), so it should be 2646 * called with IOVA ranges that match what was passed to iommu_map(). The 2647 * range can aggregate contiguous iommu_map() calls so long as no individual 2648 * range is split. 2649 * 2650 * Basically iommu_unmap_fast() is the same as iommu_unmap() but for callers 2651 * which manage the IOTLB flushing externally to perform a batched sync. 2652 * 2653 * Returns: Number of bytes of IOVA unmapped. iova + res will be the point 2654 * unmapping stopped. 2655 */ 2656 size_t iommu_unmap_fast(struct iommu_domain *domain, 2657 unsigned long iova, size_t size, 2658 struct iommu_iotlb_gather *iotlb_gather) 2659 { 2660 return __iommu_unmap(domain, iova, size, iotlb_gather); 2661 } 2662 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2663 2664 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2665 struct scatterlist *sg, unsigned int nents, int prot, 2666 gfp_t gfp) 2667 { 2668 size_t len = 0, mapped = 0; 2669 phys_addr_t start; 2670 unsigned int i = 0; 2671 int ret; 2672 2673 while (i <= nents) { 2674 phys_addr_t s_phys = sg_phys(sg); 2675 2676 if (len && s_phys != start + len) { 2677 ret = iommu_map_nosync(domain, iova + mapped, start, 2678 len, prot, gfp); 2679 if (ret) 2680 goto out_err; 2681 2682 mapped += len; 2683 len = 0; 2684 } 2685 2686 if (sg_dma_is_bus_address(sg)) 2687 goto next; 2688 2689 if (len) { 2690 len += sg->length; 2691 } else { 2692 len = sg->length; 2693 start = s_phys; 2694 } 2695 2696 next: 2697 if (++i < nents) 2698 sg = sg_next(sg); 2699 } 2700 2701 ret = iommu_sync_map(domain, iova, mapped); 2702 if (ret) 2703 goto out_err; 2704 2705 return mapped; 2706 2707 out_err: 2708 /* undo mappings already done */ 2709 iommu_unmap(domain, iova, mapped); 2710 2711 return ret; 2712 } 2713 EXPORT_SYMBOL_GPL(iommu_map_sg); 2714 2715 /** 2716 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2717 * @domain: the iommu domain where the fault has happened 2718 * @dev: the device where the fault has happened 2719 * @iova: the faulting address 2720 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2721 * 2722 * This function should be called by the low-level IOMMU implementations 2723 * whenever IOMMU faults happen, to allow high-level users, that are 2724 * interested in such events, to know about them. 2725 * 2726 * This event may be useful for several possible use cases: 2727 * - mere logging of the event 2728 * - dynamic TLB/PTE loading 2729 * - if restarting of the faulting device is required 2730 * 2731 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2732 * PTE/TLB loading will one day be supported, implementations will be able 2733 * to tell whether it succeeded or not according to this return value). 2734 * 2735 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2736 * (though fault handlers can also return -ENOSYS, in case they want to 2737 * elicit the default behavior of the IOMMU drivers). 2738 */ 2739 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2740 unsigned long iova, int flags) 2741 { 2742 int ret = -ENOSYS; 2743 2744 /* 2745 * if upper layers showed interest and installed a fault handler, 2746 * invoke it. 2747 */ 2748 if (domain->cookie_type == IOMMU_COOKIE_FAULT_HANDLER && 2749 domain->handler) 2750 ret = domain->handler(domain, dev, iova, flags, 2751 domain->handler_token); 2752 2753 trace_io_page_fault(dev, iova, flags); 2754 return ret; 2755 } 2756 EXPORT_SYMBOL_GPL(report_iommu_fault); 2757 2758 static int __init iommu_init(void) 2759 { 2760 iommu_group_kset = kset_create_and_add("iommu_groups", 2761 NULL, kernel_kobj); 2762 BUG_ON(!iommu_group_kset); 2763 2764 iommu_debugfs_setup(); 2765 2766 return 0; 2767 } 2768 core_initcall(iommu_init); 2769 2770 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2771 unsigned long quirk) 2772 { 2773 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2774 return -EINVAL; 2775 if (!domain->ops->set_pgtable_quirks) 2776 return -EINVAL; 2777 return domain->ops->set_pgtable_quirks(domain, quirk); 2778 } 2779 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2780 2781 /** 2782 * iommu_get_resv_regions - get reserved regions 2783 * @dev: device for which to get reserved regions 2784 * @list: reserved region list for device 2785 * 2786 * This returns a list of reserved IOVA regions specific to this device. 2787 * A domain user should not map IOVA in these ranges. 2788 */ 2789 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2790 { 2791 const struct iommu_ops *ops = dev_iommu_ops(dev); 2792 2793 if (ops->get_resv_regions) 2794 ops->get_resv_regions(dev, list); 2795 } 2796 EXPORT_SYMBOL_GPL(iommu_get_resv_regions); 2797 2798 /** 2799 * iommu_put_resv_regions - release reserved regions 2800 * @dev: device for which to free reserved regions 2801 * @list: reserved region list for device 2802 * 2803 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2804 */ 2805 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2806 { 2807 struct iommu_resv_region *entry, *next; 2808 2809 list_for_each_entry_safe(entry, next, list, list) { 2810 if (entry->free) 2811 entry->free(dev, entry); 2812 else 2813 kfree(entry); 2814 } 2815 } 2816 EXPORT_SYMBOL(iommu_put_resv_regions); 2817 2818 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2819 size_t length, int prot, 2820 enum iommu_resv_type type, 2821 gfp_t gfp) 2822 { 2823 struct iommu_resv_region *region; 2824 2825 region = kzalloc(sizeof(*region), gfp); 2826 if (!region) 2827 return NULL; 2828 2829 INIT_LIST_HEAD(®ion->list); 2830 region->start = start; 2831 region->length = length; 2832 region->prot = prot; 2833 region->type = type; 2834 return region; 2835 } 2836 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2837 2838 void iommu_set_default_passthrough(bool cmd_line) 2839 { 2840 if (cmd_line) 2841 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2842 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2843 } 2844 2845 void iommu_set_default_translated(bool cmd_line) 2846 { 2847 if (cmd_line) 2848 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2849 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2850 } 2851 2852 bool iommu_default_passthrough(void) 2853 { 2854 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2855 } 2856 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2857 2858 static const struct iommu_device *iommu_from_fwnode(const struct fwnode_handle *fwnode) 2859 { 2860 const struct iommu_device *iommu, *ret = NULL; 2861 2862 spin_lock(&iommu_device_lock); 2863 list_for_each_entry(iommu, &iommu_device_list, list) 2864 if (iommu->fwnode == fwnode) { 2865 ret = iommu; 2866 break; 2867 } 2868 spin_unlock(&iommu_device_lock); 2869 return ret; 2870 } 2871 2872 const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) 2873 { 2874 const struct iommu_device *iommu = iommu_from_fwnode(fwnode); 2875 2876 return iommu ? iommu->ops : NULL; 2877 } 2878 2879 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode) 2880 { 2881 const struct iommu_device *iommu = iommu_from_fwnode(iommu_fwnode); 2882 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2883 2884 if (!iommu) 2885 return driver_deferred_probe_check_state(dev); 2886 if (!dev->iommu && !READ_ONCE(iommu->ready)) 2887 return -EPROBE_DEFER; 2888 2889 if (fwspec) 2890 return iommu->ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; 2891 2892 if (!dev_iommu_get(dev)) 2893 return -ENOMEM; 2894 2895 /* Preallocate for the overwhelmingly common case of 1 ID */ 2896 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2897 if (!fwspec) 2898 return -ENOMEM; 2899 2900 fwnode_handle_get(iommu_fwnode); 2901 fwspec->iommu_fwnode = iommu_fwnode; 2902 dev_iommu_fwspec_set(dev, fwspec); 2903 return 0; 2904 } 2905 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2906 2907 void iommu_fwspec_free(struct device *dev) 2908 { 2909 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2910 2911 if (fwspec) { 2912 fwnode_handle_put(fwspec->iommu_fwnode); 2913 kfree(fwspec); 2914 dev_iommu_fwspec_set(dev, NULL); 2915 } 2916 } 2917 2918 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids) 2919 { 2920 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2921 int i, new_num; 2922 2923 if (!fwspec) 2924 return -EINVAL; 2925 2926 new_num = fwspec->num_ids + num_ids; 2927 if (new_num > 1) { 2928 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2929 GFP_KERNEL); 2930 if (!fwspec) 2931 return -ENOMEM; 2932 2933 dev_iommu_fwspec_set(dev, fwspec); 2934 } 2935 2936 for (i = 0; i < num_ids; i++) 2937 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2938 2939 fwspec->num_ids = new_num; 2940 return 0; 2941 } 2942 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2943 2944 /** 2945 * iommu_setup_default_domain - Set the default_domain for the group 2946 * @group: Group to change 2947 * @target_type: Domain type to set as the default_domain 2948 * 2949 * Allocate a default domain and set it as the current domain on the group. If 2950 * the group already has a default domain it will be changed to the target_type. 2951 * When target_type is 0 the default domain is selected based on driver and 2952 * system preferences. 2953 */ 2954 static int iommu_setup_default_domain(struct iommu_group *group, 2955 int target_type) 2956 { 2957 struct iommu_domain *old_dom = group->default_domain; 2958 struct group_device *gdev; 2959 struct iommu_domain *dom; 2960 bool direct_failed; 2961 int req_type; 2962 int ret; 2963 2964 lockdep_assert_held(&group->mutex); 2965 2966 req_type = iommu_get_default_domain_type(group, target_type); 2967 if (req_type < 0) 2968 return -EINVAL; 2969 2970 dom = iommu_group_alloc_default_domain(group, req_type); 2971 if (IS_ERR(dom)) 2972 return PTR_ERR(dom); 2973 2974 if (group->default_domain == dom) 2975 return 0; 2976 2977 if (iommu_is_dma_domain(dom)) { 2978 ret = iommu_get_dma_cookie(dom); 2979 if (ret) { 2980 iommu_domain_free(dom); 2981 return ret; 2982 } 2983 } 2984 2985 /* 2986 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be 2987 * mapped before their device is attached, in order to guarantee 2988 * continuity with any FW activity 2989 */ 2990 direct_failed = false; 2991 for_each_group_device(group, gdev) { 2992 if (iommu_create_device_direct_mappings(dom, gdev->dev)) { 2993 direct_failed = true; 2994 dev_warn_once( 2995 gdev->dev->iommu->iommu_dev->dev, 2996 "IOMMU driver was not able to establish FW requested direct mapping."); 2997 } 2998 } 2999 3000 /* We must set default_domain early for __iommu_device_set_domain */ 3001 group->default_domain = dom; 3002 if (!group->domain) { 3003 /* 3004 * Drivers are not allowed to fail the first domain attach. 3005 * The only way to recover from this is to fail attaching the 3006 * iommu driver and call ops->release_device. Put the domain 3007 * in group->default_domain so it is freed after. 3008 */ 3009 ret = __iommu_group_set_domain_internal( 3010 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 3011 if (WARN_ON(ret)) 3012 goto out_free_old; 3013 } else { 3014 ret = __iommu_group_set_domain(group, dom); 3015 if (ret) 3016 goto err_restore_def_domain; 3017 } 3018 3019 /* 3020 * Drivers are supposed to allow mappings to be installed in a domain 3021 * before device attachment, but some don't. Hack around this defect by 3022 * trying again after attaching. If this happens it means the device 3023 * will not continuously have the IOMMU_RESV_DIRECT map. 3024 */ 3025 if (direct_failed) { 3026 for_each_group_device(group, gdev) { 3027 ret = iommu_create_device_direct_mappings(dom, gdev->dev); 3028 if (ret) 3029 goto err_restore_domain; 3030 } 3031 } 3032 3033 out_free_old: 3034 if (old_dom) 3035 iommu_domain_free(old_dom); 3036 return ret; 3037 3038 err_restore_domain: 3039 if (old_dom) 3040 __iommu_group_set_domain_internal( 3041 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 3042 err_restore_def_domain: 3043 if (old_dom) { 3044 iommu_domain_free(dom); 3045 group->default_domain = old_dom; 3046 } 3047 return ret; 3048 } 3049 3050 /* 3051 * Changing the default domain through sysfs requires the users to unbind the 3052 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 3053 * transition. Return failure if this isn't met. 3054 * 3055 * We need to consider the race between this and the device release path. 3056 * group->mutex is used here to guarantee that the device release path 3057 * will not be entered at the same time. 3058 */ 3059 static ssize_t iommu_group_store_type(struct iommu_group *group, 3060 const char *buf, size_t count) 3061 { 3062 struct group_device *gdev; 3063 int ret, req_type; 3064 3065 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3066 return -EACCES; 3067 3068 if (WARN_ON(!group) || !group->default_domain) 3069 return -EINVAL; 3070 3071 if (sysfs_streq(buf, "identity")) 3072 req_type = IOMMU_DOMAIN_IDENTITY; 3073 else if (sysfs_streq(buf, "DMA")) 3074 req_type = IOMMU_DOMAIN_DMA; 3075 else if (sysfs_streq(buf, "DMA-FQ")) 3076 req_type = IOMMU_DOMAIN_DMA_FQ; 3077 else if (sysfs_streq(buf, "auto")) 3078 req_type = 0; 3079 else 3080 return -EINVAL; 3081 3082 mutex_lock(&group->mutex); 3083 /* We can bring up a flush queue without tearing down the domain. */ 3084 if (req_type == IOMMU_DOMAIN_DMA_FQ && 3085 group->default_domain->type == IOMMU_DOMAIN_DMA) { 3086 ret = iommu_dma_init_fq(group->default_domain); 3087 if (ret) 3088 goto out_unlock; 3089 3090 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; 3091 ret = count; 3092 goto out_unlock; 3093 } 3094 3095 /* Otherwise, ensure that device exists and no driver is bound. */ 3096 if (list_empty(&group->devices) || group->owner_cnt) { 3097 ret = -EPERM; 3098 goto out_unlock; 3099 } 3100 3101 ret = iommu_setup_default_domain(group, req_type); 3102 if (ret) 3103 goto out_unlock; 3104 3105 /* Make sure dma_ops is appropriatley set */ 3106 for_each_group_device(group, gdev) 3107 iommu_setup_dma_ops(gdev->dev); 3108 3109 out_unlock: 3110 mutex_unlock(&group->mutex); 3111 return ret ?: count; 3112 } 3113 3114 /** 3115 * iommu_device_use_default_domain() - Device driver wants to handle device 3116 * DMA through the kernel DMA API. 3117 * @dev: The device. 3118 * 3119 * The device driver about to bind @dev wants to do DMA through the kernel 3120 * DMA API. Return 0 if it is allowed, otherwise an error. 3121 */ 3122 int iommu_device_use_default_domain(struct device *dev) 3123 { 3124 /* Caller is the driver core during the pre-probe path */ 3125 struct iommu_group *group = dev->iommu_group; 3126 int ret = 0; 3127 3128 if (!group) 3129 return 0; 3130 3131 mutex_lock(&group->mutex); 3132 /* We may race against bus_iommu_probe() finalising groups here */ 3133 if (!group->default_domain) { 3134 ret = -EPROBE_DEFER; 3135 goto unlock_out; 3136 } 3137 if (group->owner_cnt) { 3138 if (group->domain != group->default_domain || group->owner || 3139 !xa_empty(&group->pasid_array)) { 3140 ret = -EBUSY; 3141 goto unlock_out; 3142 } 3143 } 3144 3145 group->owner_cnt++; 3146 3147 unlock_out: 3148 mutex_unlock(&group->mutex); 3149 return ret; 3150 } 3151 3152 /** 3153 * iommu_device_unuse_default_domain() - Device driver stops handling device 3154 * DMA through the kernel DMA API. 3155 * @dev: The device. 3156 * 3157 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3158 * It must be called after iommu_device_use_default_domain(). 3159 */ 3160 void iommu_device_unuse_default_domain(struct device *dev) 3161 { 3162 /* Caller is the driver core during the post-probe path */ 3163 struct iommu_group *group = dev->iommu_group; 3164 3165 if (!group) 3166 return; 3167 3168 mutex_lock(&group->mutex); 3169 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3170 group->owner_cnt--; 3171 3172 mutex_unlock(&group->mutex); 3173 } 3174 3175 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3176 { 3177 struct device *dev = iommu_group_first_dev(group); 3178 const struct iommu_ops *ops = dev_iommu_ops(dev); 3179 struct iommu_domain *domain; 3180 3181 if (group->blocking_domain) 3182 return 0; 3183 3184 if (ops->blocked_domain) { 3185 group->blocking_domain = ops->blocked_domain; 3186 return 0; 3187 } 3188 3189 /* 3190 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED create an 3191 * empty PAGING domain instead. 3192 */ 3193 domain = iommu_paging_domain_alloc(dev); 3194 if (IS_ERR(domain)) 3195 return PTR_ERR(domain); 3196 group->blocking_domain = domain; 3197 return 0; 3198 } 3199 3200 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3201 { 3202 int ret; 3203 3204 if ((group->domain && group->domain != group->default_domain) || 3205 !xa_empty(&group->pasid_array)) 3206 return -EBUSY; 3207 3208 ret = __iommu_group_alloc_blocking_domain(group); 3209 if (ret) 3210 return ret; 3211 ret = __iommu_group_set_domain(group, group->blocking_domain); 3212 if (ret) 3213 return ret; 3214 3215 group->owner = owner; 3216 group->owner_cnt++; 3217 return 0; 3218 } 3219 3220 /** 3221 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3222 * @group: The group. 3223 * @owner: Caller specified pointer. Used for exclusive ownership. 3224 * 3225 * This is to support backward compatibility for vfio which manages the dma 3226 * ownership in iommu_group level. New invocations on this interface should be 3227 * prohibited. Only a single owner may exist for a group. 3228 */ 3229 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3230 { 3231 int ret = 0; 3232 3233 if (WARN_ON(!owner)) 3234 return -EINVAL; 3235 3236 mutex_lock(&group->mutex); 3237 if (group->owner_cnt) { 3238 ret = -EPERM; 3239 goto unlock_out; 3240 } 3241 3242 ret = __iommu_take_dma_ownership(group, owner); 3243 unlock_out: 3244 mutex_unlock(&group->mutex); 3245 3246 return ret; 3247 } 3248 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3249 3250 /** 3251 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3252 * @dev: The device. 3253 * @owner: Caller specified pointer. Used for exclusive ownership. 3254 * 3255 * Claim the DMA ownership of a device. Multiple devices in the same group may 3256 * concurrently claim ownership if they present the same owner value. Returns 0 3257 * on success and error code on failure 3258 */ 3259 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3260 { 3261 /* Caller must be a probed driver on dev */ 3262 struct iommu_group *group = dev->iommu_group; 3263 int ret = 0; 3264 3265 if (WARN_ON(!owner)) 3266 return -EINVAL; 3267 3268 if (!group) 3269 return -ENODEV; 3270 3271 mutex_lock(&group->mutex); 3272 if (group->owner_cnt) { 3273 if (group->owner != owner) { 3274 ret = -EPERM; 3275 goto unlock_out; 3276 } 3277 group->owner_cnt++; 3278 goto unlock_out; 3279 } 3280 3281 ret = __iommu_take_dma_ownership(group, owner); 3282 unlock_out: 3283 mutex_unlock(&group->mutex); 3284 return ret; 3285 } 3286 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3287 3288 static void __iommu_release_dma_ownership(struct iommu_group *group) 3289 { 3290 if (WARN_ON(!group->owner_cnt || !group->owner || 3291 !xa_empty(&group->pasid_array))) 3292 return; 3293 3294 group->owner_cnt = 0; 3295 group->owner = NULL; 3296 __iommu_group_set_domain_nofail(group, group->default_domain); 3297 } 3298 3299 /** 3300 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3301 * @group: The group 3302 * 3303 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3304 */ 3305 void iommu_group_release_dma_owner(struct iommu_group *group) 3306 { 3307 mutex_lock(&group->mutex); 3308 __iommu_release_dma_ownership(group); 3309 mutex_unlock(&group->mutex); 3310 } 3311 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3312 3313 /** 3314 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3315 * @dev: The device. 3316 * 3317 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3318 */ 3319 void iommu_device_release_dma_owner(struct device *dev) 3320 { 3321 /* Caller must be a probed driver on dev */ 3322 struct iommu_group *group = dev->iommu_group; 3323 3324 mutex_lock(&group->mutex); 3325 if (group->owner_cnt > 1) 3326 group->owner_cnt--; 3327 else 3328 __iommu_release_dma_ownership(group); 3329 mutex_unlock(&group->mutex); 3330 } 3331 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3332 3333 /** 3334 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3335 * @group: The group. 3336 * 3337 * This provides status query on a given group. It is racy and only for 3338 * non-binding status reporting. 3339 */ 3340 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3341 { 3342 unsigned int user; 3343 3344 mutex_lock(&group->mutex); 3345 user = group->owner_cnt; 3346 mutex_unlock(&group->mutex); 3347 3348 return user; 3349 } 3350 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3351 3352 static void iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, 3353 struct iommu_domain *domain) 3354 { 3355 const struct iommu_ops *ops = dev_iommu_ops(dev); 3356 struct iommu_domain *blocked_domain = ops->blocked_domain; 3357 3358 WARN_ON(blocked_domain->ops->set_dev_pasid(blocked_domain, 3359 dev, pasid, domain)); 3360 } 3361 3362 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3363 struct iommu_group *group, ioasid_t pasid, 3364 struct iommu_domain *old) 3365 { 3366 struct group_device *device, *last_gdev; 3367 int ret; 3368 3369 for_each_group_device(group, device) { 3370 if (device->dev->iommu->max_pasids > 0) { 3371 ret = domain->ops->set_dev_pasid(domain, device->dev, 3372 pasid, old); 3373 if (ret) 3374 goto err_revert; 3375 } 3376 } 3377 3378 return 0; 3379 3380 err_revert: 3381 last_gdev = device; 3382 for_each_group_device(group, device) { 3383 if (device == last_gdev) 3384 break; 3385 if (device->dev->iommu->max_pasids > 0) { 3386 /* 3387 * If no old domain, undo the succeeded devices/pasid. 3388 * Otherwise, rollback the succeeded devices/pasid to 3389 * the old domain. And it is a driver bug to fail 3390 * attaching with a previously good domain. 3391 */ 3392 if (!old || 3393 WARN_ON(old->ops->set_dev_pasid(old, device->dev, 3394 pasid, domain))) 3395 iommu_remove_dev_pasid(device->dev, pasid, domain); 3396 } 3397 } 3398 return ret; 3399 } 3400 3401 static void __iommu_remove_group_pasid(struct iommu_group *group, 3402 ioasid_t pasid, 3403 struct iommu_domain *domain) 3404 { 3405 struct group_device *device; 3406 3407 for_each_group_device(group, device) { 3408 if (device->dev->iommu->max_pasids > 0) 3409 iommu_remove_dev_pasid(device->dev, pasid, domain); 3410 } 3411 } 3412 3413 /* 3414 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3415 * @domain: the iommu domain. 3416 * @dev: the attached device. 3417 * @pasid: the pasid of the device. 3418 * @handle: the attach handle. 3419 * 3420 * Caller should always provide a new handle to avoid race with the paths 3421 * that have lockless reference to handle if it intends to pass a valid handle. 3422 * 3423 * Return: 0 on success, or an error. 3424 */ 3425 int iommu_attach_device_pasid(struct iommu_domain *domain, 3426 struct device *dev, ioasid_t pasid, 3427 struct iommu_attach_handle *handle) 3428 { 3429 /* Caller must be a probed driver on dev */ 3430 struct iommu_group *group = dev->iommu_group; 3431 struct group_device *device; 3432 const struct iommu_ops *ops; 3433 void *entry; 3434 int ret; 3435 3436 if (!group) 3437 return -ENODEV; 3438 3439 ops = dev_iommu_ops(dev); 3440 3441 if (!domain->ops->set_dev_pasid || 3442 !ops->blocked_domain || 3443 !ops->blocked_domain->ops->set_dev_pasid) 3444 return -EOPNOTSUPP; 3445 3446 if (!domain_iommu_ops_compatible(ops, domain) || 3447 pasid == IOMMU_NO_PASID) 3448 return -EINVAL; 3449 3450 mutex_lock(&group->mutex); 3451 for_each_group_device(group, device) { 3452 /* 3453 * Skip PASID validation for devices without PASID support 3454 * (max_pasids = 0). These devices cannot issue transactions 3455 * with PASID, so they don't affect group's PASID usage. 3456 */ 3457 if ((device->dev->iommu->max_pasids > 0) && 3458 (pasid >= device->dev->iommu->max_pasids)) { 3459 ret = -EINVAL; 3460 goto out_unlock; 3461 } 3462 } 3463 3464 entry = iommu_make_pasid_array_entry(domain, handle); 3465 3466 /* 3467 * Entry present is a failure case. Use xa_insert() instead of 3468 * xa_reserve(). 3469 */ 3470 ret = xa_insert(&group->pasid_array, pasid, XA_ZERO_ENTRY, GFP_KERNEL); 3471 if (ret) 3472 goto out_unlock; 3473 3474 ret = __iommu_set_group_pasid(domain, group, pasid, NULL); 3475 if (ret) { 3476 xa_release(&group->pasid_array, pasid); 3477 goto out_unlock; 3478 } 3479 3480 /* 3481 * The xa_insert() above reserved the memory, and the group->mutex is 3482 * held, this cannot fail. The new domain cannot be visible until the 3483 * operation succeeds as we cannot tolerate PRIs becoming concurrently 3484 * queued and then failing attach. 3485 */ 3486 WARN_ON(xa_is_err(xa_store(&group->pasid_array, 3487 pasid, entry, GFP_KERNEL))); 3488 3489 out_unlock: 3490 mutex_unlock(&group->mutex); 3491 return ret; 3492 } 3493 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3494 3495 /** 3496 * iommu_replace_device_pasid - Replace the domain that a specific pasid 3497 * of the device is attached to 3498 * @domain: the new iommu domain 3499 * @dev: the attached device. 3500 * @pasid: the pasid of the device. 3501 * @handle: the attach handle. 3502 * 3503 * This API allows the pasid to switch domains. The @pasid should have been 3504 * attached. Otherwise, this fails. The pasid will keep the old configuration 3505 * if replacement failed. 3506 * 3507 * Caller should always provide a new handle to avoid race with the paths 3508 * that have lockless reference to handle if it intends to pass a valid handle. 3509 * 3510 * Return 0 on success, or an error. 3511 */ 3512 int iommu_replace_device_pasid(struct iommu_domain *domain, 3513 struct device *dev, ioasid_t pasid, 3514 struct iommu_attach_handle *handle) 3515 { 3516 /* Caller must be a probed driver on dev */ 3517 struct iommu_group *group = dev->iommu_group; 3518 struct iommu_attach_handle *entry; 3519 struct iommu_domain *curr_domain; 3520 void *curr; 3521 int ret; 3522 3523 if (!group) 3524 return -ENODEV; 3525 3526 if (!domain->ops->set_dev_pasid) 3527 return -EOPNOTSUPP; 3528 3529 if (!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain) || 3530 pasid == IOMMU_NO_PASID || !handle) 3531 return -EINVAL; 3532 3533 mutex_lock(&group->mutex); 3534 entry = iommu_make_pasid_array_entry(domain, handle); 3535 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, 3536 XA_ZERO_ENTRY, GFP_KERNEL); 3537 if (xa_is_err(curr)) { 3538 ret = xa_err(curr); 3539 goto out_unlock; 3540 } 3541 3542 /* 3543 * No domain (with or without handle) attached, hence not 3544 * a replace case. 3545 */ 3546 if (!curr) { 3547 xa_release(&group->pasid_array, pasid); 3548 ret = -EINVAL; 3549 goto out_unlock; 3550 } 3551 3552 /* 3553 * Reusing handle is problematic as there are paths that refers 3554 * the handle without lock. To avoid race, reject the callers that 3555 * attempt it. 3556 */ 3557 if (curr == entry) { 3558 WARN_ON(1); 3559 ret = -EINVAL; 3560 goto out_unlock; 3561 } 3562 3563 curr_domain = pasid_array_entry_to_domain(curr); 3564 ret = 0; 3565 3566 if (curr_domain != domain) { 3567 ret = __iommu_set_group_pasid(domain, group, 3568 pasid, curr_domain); 3569 if (ret) 3570 goto out_unlock; 3571 } 3572 3573 /* 3574 * The above xa_cmpxchg() reserved the memory, and the 3575 * group->mutex is held, this cannot fail. 3576 */ 3577 WARN_ON(xa_is_err(xa_store(&group->pasid_array, 3578 pasid, entry, GFP_KERNEL))); 3579 3580 out_unlock: 3581 mutex_unlock(&group->mutex); 3582 return ret; 3583 } 3584 EXPORT_SYMBOL_NS_GPL(iommu_replace_device_pasid, "IOMMUFD_INTERNAL"); 3585 3586 /* 3587 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3588 * @domain: the iommu domain. 3589 * @dev: the attached device. 3590 * @pasid: the pasid of the device. 3591 * 3592 * The @domain must have been attached to @pasid of the @dev with 3593 * iommu_attach_device_pasid(). 3594 */ 3595 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3596 ioasid_t pasid) 3597 { 3598 /* Caller must be a probed driver on dev */ 3599 struct iommu_group *group = dev->iommu_group; 3600 3601 mutex_lock(&group->mutex); 3602 __iommu_remove_group_pasid(group, pasid, domain); 3603 xa_erase(&group->pasid_array, pasid); 3604 mutex_unlock(&group->mutex); 3605 } 3606 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3607 3608 ioasid_t iommu_alloc_global_pasid(struct device *dev) 3609 { 3610 int ret; 3611 3612 /* max_pasids == 0 means that the device does not support PASID */ 3613 if (!dev->iommu->max_pasids) 3614 return IOMMU_PASID_INVALID; 3615 3616 /* 3617 * max_pasids is set up by vendor driver based on number of PASID bits 3618 * supported but the IDA allocation is inclusive. 3619 */ 3620 ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID, 3621 dev->iommu->max_pasids - 1, GFP_KERNEL); 3622 return ret < 0 ? IOMMU_PASID_INVALID : ret; 3623 } 3624 EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid); 3625 3626 void iommu_free_global_pasid(ioasid_t pasid) 3627 { 3628 if (WARN_ON(pasid == IOMMU_PASID_INVALID)) 3629 return; 3630 3631 ida_free(&iommu_global_pasid_ida, pasid); 3632 } 3633 EXPORT_SYMBOL_GPL(iommu_free_global_pasid); 3634 3635 /** 3636 * iommu_attach_handle_get - Return the attach handle 3637 * @group: the iommu group that domain was attached to 3638 * @pasid: the pasid within the group 3639 * @type: matched domain type, 0 for any match 3640 * 3641 * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch. 3642 * 3643 * Return the attach handle to the caller. The life cycle of an iommu attach 3644 * handle is from the time when the domain is attached to the time when the 3645 * domain is detached. Callers are required to synchronize the call of 3646 * iommu_attach_handle_get() with domain attachment and detachment. The attach 3647 * handle can only be used during its life cycle. 3648 */ 3649 struct iommu_attach_handle * 3650 iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type) 3651 { 3652 struct iommu_attach_handle *handle; 3653 void *entry; 3654 3655 xa_lock(&group->pasid_array); 3656 entry = xa_load(&group->pasid_array, pasid); 3657 if (!entry || xa_pointer_tag(entry) != IOMMU_PASID_ARRAY_HANDLE) { 3658 handle = ERR_PTR(-ENOENT); 3659 } else { 3660 handle = xa_untag_pointer(entry); 3661 if (type && handle->domain->type != type) 3662 handle = ERR_PTR(-EBUSY); 3663 } 3664 xa_unlock(&group->pasid_array); 3665 3666 return handle; 3667 } 3668 EXPORT_SYMBOL_NS_GPL(iommu_attach_handle_get, "IOMMUFD_INTERNAL"); 3669 3670 /** 3671 * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group 3672 * @domain: IOMMU domain to attach 3673 * @group: IOMMU group that will be attached 3674 * @handle: attach handle 3675 * 3676 * Returns 0 on success and error code on failure. 3677 * 3678 * This is a variant of iommu_attach_group(). It allows the caller to provide 3679 * an attach handle and use it when the domain is attached. This is currently 3680 * used by IOMMUFD to deliver the I/O page faults. 3681 * 3682 * Caller should always provide a new handle to avoid race with the paths 3683 * that have lockless reference to handle. 3684 */ 3685 int iommu_attach_group_handle(struct iommu_domain *domain, 3686 struct iommu_group *group, 3687 struct iommu_attach_handle *handle) 3688 { 3689 void *entry; 3690 int ret; 3691 3692 if (!handle) 3693 return -EINVAL; 3694 3695 mutex_lock(&group->mutex); 3696 entry = iommu_make_pasid_array_entry(domain, handle); 3697 ret = xa_insert(&group->pasid_array, 3698 IOMMU_NO_PASID, XA_ZERO_ENTRY, GFP_KERNEL); 3699 if (ret) 3700 goto out_unlock; 3701 3702 ret = __iommu_attach_group(domain, group); 3703 if (ret) { 3704 xa_release(&group->pasid_array, IOMMU_NO_PASID); 3705 goto out_unlock; 3706 } 3707 3708 /* 3709 * The xa_insert() above reserved the memory, and the group->mutex is 3710 * held, this cannot fail. The new domain cannot be visible until the 3711 * operation succeeds as we cannot tolerate PRIs becoming concurrently 3712 * queued and then failing attach. 3713 */ 3714 WARN_ON(xa_is_err(xa_store(&group->pasid_array, 3715 IOMMU_NO_PASID, entry, GFP_KERNEL))); 3716 3717 out_unlock: 3718 mutex_unlock(&group->mutex); 3719 return ret; 3720 } 3721 EXPORT_SYMBOL_NS_GPL(iommu_attach_group_handle, "IOMMUFD_INTERNAL"); 3722 3723 /** 3724 * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group 3725 * @domain: IOMMU domain to attach 3726 * @group: IOMMU group that will be attached 3727 * 3728 * Detach the specified IOMMU domain from the specified IOMMU group. 3729 * It must be used in conjunction with iommu_attach_group_handle(). 3730 */ 3731 void iommu_detach_group_handle(struct iommu_domain *domain, 3732 struct iommu_group *group) 3733 { 3734 mutex_lock(&group->mutex); 3735 __iommu_group_set_core_domain(group); 3736 xa_erase(&group->pasid_array, IOMMU_NO_PASID); 3737 mutex_unlock(&group->mutex); 3738 } 3739 EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL"); 3740 3741 /** 3742 * iommu_replace_group_handle - replace the domain that a group is attached to 3743 * @group: IOMMU group that will be attached to the new domain 3744 * @new_domain: new IOMMU domain to replace with 3745 * @handle: attach handle 3746 * 3747 * This API allows the group to switch domains without being forced to go to 3748 * the blocking domain in-between. It allows the caller to provide an attach 3749 * handle for the new domain and use it when the domain is attached. 3750 * 3751 * If the currently attached domain is a core domain (e.g. a default_domain), 3752 * it will act just like the iommu_attach_group_handle(). 3753 * 3754 * Caller should always provide a new handle to avoid race with the paths 3755 * that have lockless reference to handle. 3756 */ 3757 int iommu_replace_group_handle(struct iommu_group *group, 3758 struct iommu_domain *new_domain, 3759 struct iommu_attach_handle *handle) 3760 { 3761 void *curr, *entry; 3762 int ret; 3763 3764 if (!new_domain || !handle) 3765 return -EINVAL; 3766 3767 mutex_lock(&group->mutex); 3768 entry = iommu_make_pasid_array_entry(new_domain, handle); 3769 ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); 3770 if (ret) 3771 goto err_unlock; 3772 3773 ret = __iommu_group_set_domain(group, new_domain); 3774 if (ret) 3775 goto err_release; 3776 3777 curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL); 3778 WARN_ON(xa_is_err(curr)); 3779 3780 mutex_unlock(&group->mutex); 3781 3782 return 0; 3783 err_release: 3784 xa_release(&group->pasid_array, IOMMU_NO_PASID); 3785 err_unlock: 3786 mutex_unlock(&group->mutex); 3787 return ret; 3788 } 3789 EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL"); 3790 3791 #if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU) 3792 /** 3793 * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain 3794 * @desc: MSI descriptor, will store the MSI page 3795 * @msi_addr: MSI target address to be mapped 3796 * 3797 * The implementation of sw_msi() should take msi_addr and map it to 3798 * an IOVA in the domain and call msi_desc_set_iommu_msi_iova() with the 3799 * mapping information. 3800 * 3801 * Return: 0 on success or negative error code if the mapping failed. 3802 */ 3803 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 3804 { 3805 struct device *dev = msi_desc_to_dev(desc); 3806 struct iommu_group *group = dev->iommu_group; 3807 int ret = 0; 3808 3809 if (!group) 3810 return 0; 3811 3812 mutex_lock(&group->mutex); 3813 /* An IDENTITY domain must pass through */ 3814 if (group->domain && group->domain->type != IOMMU_DOMAIN_IDENTITY) { 3815 switch (group->domain->cookie_type) { 3816 case IOMMU_COOKIE_DMA_MSI: 3817 case IOMMU_COOKIE_DMA_IOVA: 3818 ret = iommu_dma_sw_msi(group->domain, desc, msi_addr); 3819 break; 3820 case IOMMU_COOKIE_IOMMUFD: 3821 ret = iommufd_sw_msi(group->domain, desc, msi_addr); 3822 break; 3823 default: 3824 ret = -EOPNOTSUPP; 3825 break; 3826 } 3827 } 3828 mutex_unlock(&group->mutex); 3829 return ret; 3830 } 3831 #endif /* CONFIG_IRQ_MSI_IOMMU */ 3832