1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <linux/cdx/cdx_bus.h> 32 #include <trace/events/iommu.h> 33 #include <linux/sched/mm.h> 34 #include <linux/msi.h> 35 36 #include "dma-iommu.h" 37 #include "iommu-priv.h" 38 39 #include "iommu-sva.h" 40 41 static struct kset *iommu_group_kset; 42 static DEFINE_IDA(iommu_group_ida); 43 static DEFINE_IDA(iommu_global_pasid_ida); 44 45 static unsigned int iommu_def_domain_type __read_mostly; 46 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 47 static u32 iommu_cmd_line __read_mostly; 48 49 struct iommu_group { 50 struct kobject kobj; 51 struct kobject *devices_kobj; 52 struct list_head devices; 53 struct xarray pasid_array; 54 struct mutex mutex; 55 void *iommu_data; 56 void (*iommu_data_release)(void *iommu_data); 57 char *name; 58 int id; 59 struct iommu_domain *default_domain; 60 struct iommu_domain *blocking_domain; 61 struct iommu_domain *domain; 62 struct list_head entry; 63 unsigned int owner_cnt; 64 void *owner; 65 }; 66 67 struct group_device { 68 struct list_head list; 69 struct device *dev; 70 char *name; 71 }; 72 73 /* Iterate over each struct group_device in a struct iommu_group */ 74 #define for_each_group_device(group, pos) \ 75 list_for_each_entry(pos, &(group)->devices, list) 76 77 struct iommu_group_attribute { 78 struct attribute attr; 79 ssize_t (*show)(struct iommu_group *group, char *buf); 80 ssize_t (*store)(struct iommu_group *group, 81 const char *buf, size_t count); 82 }; 83 84 static const char * const iommu_group_resv_type_string[] = { 85 [IOMMU_RESV_DIRECT] = "direct", 86 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 87 [IOMMU_RESV_RESERVED] = "reserved", 88 [IOMMU_RESV_MSI] = "msi", 89 [IOMMU_RESV_SW_MSI] = "msi", 90 }; 91 92 #define IOMMU_CMD_LINE_DMA_API BIT(0) 93 #define IOMMU_CMD_LINE_STRICT BIT(1) 94 95 static int iommu_bus_notifier(struct notifier_block *nb, 96 unsigned long action, void *data); 97 static void iommu_release_device(struct device *dev); 98 static struct iommu_domain * 99 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type); 100 static int __iommu_attach_device(struct iommu_domain *domain, 101 struct device *dev); 102 static int __iommu_attach_group(struct iommu_domain *domain, 103 struct iommu_group *group); 104 105 enum { 106 IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, 107 }; 108 109 static int __iommu_device_set_domain(struct iommu_group *group, 110 struct device *dev, 111 struct iommu_domain *new_domain, 112 unsigned int flags); 113 static int __iommu_group_set_domain_internal(struct iommu_group *group, 114 struct iommu_domain *new_domain, 115 unsigned int flags); 116 static int __iommu_group_set_domain(struct iommu_group *group, 117 struct iommu_domain *new_domain) 118 { 119 return __iommu_group_set_domain_internal(group, new_domain, 0); 120 } 121 static void __iommu_group_set_domain_nofail(struct iommu_group *group, 122 struct iommu_domain *new_domain) 123 { 124 WARN_ON(__iommu_group_set_domain_internal( 125 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); 126 } 127 128 static int iommu_setup_default_domain(struct iommu_group *group, 129 int target_type); 130 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 131 struct device *dev); 132 static ssize_t iommu_group_store_type(struct iommu_group *group, 133 const char *buf, size_t count); 134 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 135 struct device *dev); 136 static void __iommu_group_free_device(struct iommu_group *group, 137 struct group_device *grp_dev); 138 139 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 140 struct iommu_group_attribute iommu_group_attr_##_name = \ 141 __ATTR(_name, _mode, _show, _store) 142 143 #define to_iommu_group_attr(_attr) \ 144 container_of(_attr, struct iommu_group_attribute, attr) 145 #define to_iommu_group(_kobj) \ 146 container_of(_kobj, struct iommu_group, kobj) 147 148 static LIST_HEAD(iommu_device_list); 149 static DEFINE_SPINLOCK(iommu_device_lock); 150 151 static struct bus_type * const iommu_buses[] = { 152 &platform_bus_type, 153 #ifdef CONFIG_PCI 154 &pci_bus_type, 155 #endif 156 #ifdef CONFIG_ARM_AMBA 157 &amba_bustype, 158 #endif 159 #ifdef CONFIG_FSL_MC_BUS 160 &fsl_mc_bus_type, 161 #endif 162 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 163 &host1x_context_device_bus_type, 164 #endif 165 #ifdef CONFIG_CDX_BUS 166 &cdx_bus_type, 167 #endif 168 }; 169 170 /* 171 * Use a function instead of an array here because the domain-type is a 172 * bit-field, so an array would waste memory. 173 */ 174 static const char *iommu_domain_type_str(unsigned int t) 175 { 176 switch (t) { 177 case IOMMU_DOMAIN_BLOCKED: 178 return "Blocked"; 179 case IOMMU_DOMAIN_IDENTITY: 180 return "Passthrough"; 181 case IOMMU_DOMAIN_UNMANAGED: 182 return "Unmanaged"; 183 case IOMMU_DOMAIN_DMA: 184 case IOMMU_DOMAIN_DMA_FQ: 185 return "Translated"; 186 case IOMMU_DOMAIN_PLATFORM: 187 return "Platform"; 188 default: 189 return "Unknown"; 190 } 191 } 192 193 static int __init iommu_subsys_init(void) 194 { 195 struct notifier_block *nb; 196 197 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 198 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 199 iommu_set_default_passthrough(false); 200 else 201 iommu_set_default_translated(false); 202 203 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 204 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 205 iommu_set_default_translated(false); 206 } 207 } 208 209 if (!iommu_default_passthrough() && !iommu_dma_strict) 210 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 211 212 pr_info("Default domain type: %s%s\n", 213 iommu_domain_type_str(iommu_def_domain_type), 214 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 215 " (set via kernel command line)" : ""); 216 217 if (!iommu_default_passthrough()) 218 pr_info("DMA domain TLB invalidation policy: %s mode%s\n", 219 iommu_dma_strict ? "strict" : "lazy", 220 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 221 " (set via kernel command line)" : ""); 222 223 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 224 if (!nb) 225 return -ENOMEM; 226 227 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 228 nb[i].notifier_call = iommu_bus_notifier; 229 bus_register_notifier(iommu_buses[i], &nb[i]); 230 } 231 232 return 0; 233 } 234 subsys_initcall(iommu_subsys_init); 235 236 static int remove_iommu_group(struct device *dev, void *data) 237 { 238 if (dev->iommu && dev->iommu->iommu_dev == data) 239 iommu_release_device(dev); 240 241 return 0; 242 } 243 244 /** 245 * iommu_device_register() - Register an IOMMU hardware instance 246 * @iommu: IOMMU handle for the instance 247 * @ops: IOMMU ops to associate with the instance 248 * @hwdev: (optional) actual instance device, used for fwnode lookup 249 * 250 * Return: 0 on success, or an error. 251 */ 252 int iommu_device_register(struct iommu_device *iommu, 253 const struct iommu_ops *ops, struct device *hwdev) 254 { 255 int err = 0; 256 257 /* We need to be able to take module references appropriately */ 258 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 259 return -EINVAL; 260 /* 261 * Temporarily enforce global restriction to a single driver. This was 262 * already the de-facto behaviour, since any possible combination of 263 * existing drivers would compete for at least the PCI or platform bus. 264 */ 265 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 266 return -EBUSY; 267 268 iommu->ops = ops; 269 if (hwdev) 270 iommu->fwnode = dev_fwnode(hwdev); 271 272 spin_lock(&iommu_device_lock); 273 list_add_tail(&iommu->list, &iommu_device_list); 274 spin_unlock(&iommu_device_lock); 275 276 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 277 iommu_buses[i]->iommu_ops = ops; 278 err = bus_iommu_probe(iommu_buses[i]); 279 } 280 if (err) 281 iommu_device_unregister(iommu); 282 return err; 283 } 284 EXPORT_SYMBOL_GPL(iommu_device_register); 285 286 void iommu_device_unregister(struct iommu_device *iommu) 287 { 288 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 289 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 290 291 spin_lock(&iommu_device_lock); 292 list_del(&iommu->list); 293 spin_unlock(&iommu_device_lock); 294 295 /* Pairs with the alloc in generic_single_device_group() */ 296 iommu_group_put(iommu->singleton_group); 297 iommu->singleton_group = NULL; 298 } 299 EXPORT_SYMBOL_GPL(iommu_device_unregister); 300 301 #if IS_ENABLED(CONFIG_IOMMUFD_TEST) 302 void iommu_device_unregister_bus(struct iommu_device *iommu, 303 struct bus_type *bus, 304 struct notifier_block *nb) 305 { 306 bus_unregister_notifier(bus, nb); 307 iommu_device_unregister(iommu); 308 } 309 EXPORT_SYMBOL_GPL(iommu_device_unregister_bus); 310 311 /* 312 * Register an iommu driver against a single bus. This is only used by iommufd 313 * selftest to create a mock iommu driver. The caller must provide 314 * some memory to hold a notifier_block. 315 */ 316 int iommu_device_register_bus(struct iommu_device *iommu, 317 const struct iommu_ops *ops, struct bus_type *bus, 318 struct notifier_block *nb) 319 { 320 int err; 321 322 iommu->ops = ops; 323 nb->notifier_call = iommu_bus_notifier; 324 err = bus_register_notifier(bus, nb); 325 if (err) 326 return err; 327 328 spin_lock(&iommu_device_lock); 329 list_add_tail(&iommu->list, &iommu_device_list); 330 spin_unlock(&iommu_device_lock); 331 332 bus->iommu_ops = ops; 333 err = bus_iommu_probe(bus); 334 if (err) { 335 iommu_device_unregister_bus(iommu, bus, nb); 336 return err; 337 } 338 return 0; 339 } 340 EXPORT_SYMBOL_GPL(iommu_device_register_bus); 341 #endif 342 343 static struct dev_iommu *dev_iommu_get(struct device *dev) 344 { 345 struct dev_iommu *param = dev->iommu; 346 347 if (param) 348 return param; 349 350 param = kzalloc(sizeof(*param), GFP_KERNEL); 351 if (!param) 352 return NULL; 353 354 mutex_init(¶m->lock); 355 dev->iommu = param; 356 return param; 357 } 358 359 static void dev_iommu_free(struct device *dev) 360 { 361 struct dev_iommu *param = dev->iommu; 362 363 dev->iommu = NULL; 364 if (param->fwspec) { 365 fwnode_handle_put(param->fwspec->iommu_fwnode); 366 kfree(param->fwspec); 367 } 368 kfree(param); 369 } 370 371 static u32 dev_iommu_get_max_pasids(struct device *dev) 372 { 373 u32 max_pasids = 0, bits = 0; 374 int ret; 375 376 if (dev_is_pci(dev)) { 377 ret = pci_max_pasids(to_pci_dev(dev)); 378 if (ret > 0) 379 max_pasids = ret; 380 } else { 381 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 382 if (!ret) 383 max_pasids = 1UL << bits; 384 } 385 386 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 387 } 388 389 /* 390 * Init the dev->iommu and dev->iommu_group in the struct device and get the 391 * driver probed 392 */ 393 static int iommu_init_device(struct device *dev, const struct iommu_ops *ops) 394 { 395 struct iommu_device *iommu_dev; 396 struct iommu_group *group; 397 int ret; 398 399 if (!dev_iommu_get(dev)) 400 return -ENOMEM; 401 402 if (!try_module_get(ops->owner)) { 403 ret = -EINVAL; 404 goto err_free; 405 } 406 407 iommu_dev = ops->probe_device(dev); 408 if (IS_ERR(iommu_dev)) { 409 ret = PTR_ERR(iommu_dev); 410 goto err_module_put; 411 } 412 dev->iommu->iommu_dev = iommu_dev; 413 414 ret = iommu_device_link(iommu_dev, dev); 415 if (ret) 416 goto err_release; 417 418 group = ops->device_group(dev); 419 if (WARN_ON_ONCE(group == NULL)) 420 group = ERR_PTR(-EINVAL); 421 if (IS_ERR(group)) { 422 ret = PTR_ERR(group); 423 goto err_unlink; 424 } 425 dev->iommu_group = group; 426 427 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 428 if (ops->is_attach_deferred) 429 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 430 return 0; 431 432 err_unlink: 433 iommu_device_unlink(iommu_dev, dev); 434 err_release: 435 if (ops->release_device) 436 ops->release_device(dev); 437 err_module_put: 438 module_put(ops->owner); 439 err_free: 440 dev->iommu->iommu_dev = NULL; 441 dev_iommu_free(dev); 442 return ret; 443 } 444 445 static void iommu_deinit_device(struct device *dev) 446 { 447 struct iommu_group *group = dev->iommu_group; 448 const struct iommu_ops *ops = dev_iommu_ops(dev); 449 450 lockdep_assert_held(&group->mutex); 451 452 iommu_device_unlink(dev->iommu->iommu_dev, dev); 453 454 /* 455 * release_device() must stop using any attached domain on the device. 456 * If there are still other devices in the group they are not effected 457 * by this callback. 458 * 459 * The IOMMU driver must set the device to either an identity or 460 * blocking translation and stop using any domain pointer, as it is 461 * going to be freed. 462 */ 463 if (ops->release_device) 464 ops->release_device(dev); 465 466 /* 467 * If this is the last driver to use the group then we must free the 468 * domains before we do the module_put(). 469 */ 470 if (list_empty(&group->devices)) { 471 if (group->default_domain) { 472 iommu_domain_free(group->default_domain); 473 group->default_domain = NULL; 474 } 475 if (group->blocking_domain) { 476 iommu_domain_free(group->blocking_domain); 477 group->blocking_domain = NULL; 478 } 479 group->domain = NULL; 480 } 481 482 /* Caller must put iommu_group */ 483 dev->iommu_group = NULL; 484 module_put(ops->owner); 485 dev_iommu_free(dev); 486 } 487 488 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 489 { 490 const struct iommu_ops *ops = dev->bus->iommu_ops; 491 struct iommu_group *group; 492 static DEFINE_MUTEX(iommu_probe_device_lock); 493 struct group_device *gdev; 494 int ret; 495 496 if (!ops) 497 return -ENODEV; 498 /* 499 * Serialise to avoid races between IOMMU drivers registering in 500 * parallel and/or the "replay" calls from ACPI/OF code via client 501 * driver probe. Once the latter have been cleaned up we should 502 * probably be able to use device_lock() here to minimise the scope, 503 * but for now enforcing a simple global ordering is fine. 504 */ 505 mutex_lock(&iommu_probe_device_lock); 506 507 /* Device is probed already if in a group */ 508 if (dev->iommu_group) { 509 ret = 0; 510 goto out_unlock; 511 } 512 513 ret = iommu_init_device(dev, ops); 514 if (ret) 515 goto out_unlock; 516 517 group = dev->iommu_group; 518 gdev = iommu_group_alloc_device(group, dev); 519 mutex_lock(&group->mutex); 520 if (IS_ERR(gdev)) { 521 ret = PTR_ERR(gdev); 522 goto err_put_group; 523 } 524 525 /* 526 * The gdev must be in the list before calling 527 * iommu_setup_default_domain() 528 */ 529 list_add_tail(&gdev->list, &group->devices); 530 WARN_ON(group->default_domain && !group->domain); 531 if (group->default_domain) 532 iommu_create_device_direct_mappings(group->default_domain, dev); 533 if (group->domain) { 534 ret = __iommu_device_set_domain(group, dev, group->domain, 0); 535 if (ret) 536 goto err_remove_gdev; 537 } else if (!group->default_domain && !group_list) { 538 ret = iommu_setup_default_domain(group, 0); 539 if (ret) 540 goto err_remove_gdev; 541 } else if (!group->default_domain) { 542 /* 543 * With a group_list argument we defer the default_domain setup 544 * to the caller by providing a de-duplicated list of groups 545 * that need further setup. 546 */ 547 if (list_empty(&group->entry)) 548 list_add_tail(&group->entry, group_list); 549 } 550 mutex_unlock(&group->mutex); 551 mutex_unlock(&iommu_probe_device_lock); 552 553 if (dev_is_pci(dev)) 554 iommu_dma_set_pci_32bit_workaround(dev); 555 556 return 0; 557 558 err_remove_gdev: 559 list_del(&gdev->list); 560 __iommu_group_free_device(group, gdev); 561 err_put_group: 562 iommu_deinit_device(dev); 563 mutex_unlock(&group->mutex); 564 iommu_group_put(group); 565 out_unlock: 566 mutex_unlock(&iommu_probe_device_lock); 567 568 return ret; 569 } 570 571 int iommu_probe_device(struct device *dev) 572 { 573 const struct iommu_ops *ops; 574 int ret; 575 576 ret = __iommu_probe_device(dev, NULL); 577 if (ret) 578 return ret; 579 580 ops = dev_iommu_ops(dev); 581 if (ops->probe_finalize) 582 ops->probe_finalize(dev); 583 584 return 0; 585 } 586 587 static void __iommu_group_free_device(struct iommu_group *group, 588 struct group_device *grp_dev) 589 { 590 struct device *dev = grp_dev->dev; 591 592 sysfs_remove_link(group->devices_kobj, grp_dev->name); 593 sysfs_remove_link(&dev->kobj, "iommu_group"); 594 595 trace_remove_device_from_group(group->id, dev); 596 597 /* 598 * If the group has become empty then ownership must have been 599 * released, and the current domain must be set back to NULL or 600 * the default domain. 601 */ 602 if (list_empty(&group->devices)) 603 WARN_ON(group->owner_cnt || 604 group->domain != group->default_domain); 605 606 kfree(grp_dev->name); 607 kfree(grp_dev); 608 } 609 610 /* Remove the iommu_group from the struct device. */ 611 static void __iommu_group_remove_device(struct device *dev) 612 { 613 struct iommu_group *group = dev->iommu_group; 614 struct group_device *device; 615 616 mutex_lock(&group->mutex); 617 for_each_group_device(group, device) { 618 if (device->dev != dev) 619 continue; 620 621 list_del(&device->list); 622 __iommu_group_free_device(group, device); 623 if (dev->iommu && dev->iommu->iommu_dev) 624 iommu_deinit_device(dev); 625 else 626 dev->iommu_group = NULL; 627 break; 628 } 629 mutex_unlock(&group->mutex); 630 631 /* 632 * Pairs with the get in iommu_init_device() or 633 * iommu_group_add_device() 634 */ 635 iommu_group_put(group); 636 } 637 638 static void iommu_release_device(struct device *dev) 639 { 640 struct iommu_group *group = dev->iommu_group; 641 642 if (group) 643 __iommu_group_remove_device(dev); 644 645 /* Free any fwspec if no iommu_driver was ever attached */ 646 if (dev->iommu) 647 dev_iommu_free(dev); 648 } 649 650 static int __init iommu_set_def_domain_type(char *str) 651 { 652 bool pt; 653 int ret; 654 655 ret = kstrtobool(str, &pt); 656 if (ret) 657 return ret; 658 659 if (pt) 660 iommu_set_default_passthrough(true); 661 else 662 iommu_set_default_translated(true); 663 664 return 0; 665 } 666 early_param("iommu.passthrough", iommu_set_def_domain_type); 667 668 static int __init iommu_dma_setup(char *str) 669 { 670 int ret = kstrtobool(str, &iommu_dma_strict); 671 672 if (!ret) 673 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 674 return ret; 675 } 676 early_param("iommu.strict", iommu_dma_setup); 677 678 void iommu_set_dma_strict(void) 679 { 680 iommu_dma_strict = true; 681 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 682 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 683 } 684 685 static ssize_t iommu_group_attr_show(struct kobject *kobj, 686 struct attribute *__attr, char *buf) 687 { 688 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 689 struct iommu_group *group = to_iommu_group(kobj); 690 ssize_t ret = -EIO; 691 692 if (attr->show) 693 ret = attr->show(group, buf); 694 return ret; 695 } 696 697 static ssize_t iommu_group_attr_store(struct kobject *kobj, 698 struct attribute *__attr, 699 const char *buf, size_t count) 700 { 701 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 702 struct iommu_group *group = to_iommu_group(kobj); 703 ssize_t ret = -EIO; 704 705 if (attr->store) 706 ret = attr->store(group, buf, count); 707 return ret; 708 } 709 710 static const struct sysfs_ops iommu_group_sysfs_ops = { 711 .show = iommu_group_attr_show, 712 .store = iommu_group_attr_store, 713 }; 714 715 static int iommu_group_create_file(struct iommu_group *group, 716 struct iommu_group_attribute *attr) 717 { 718 return sysfs_create_file(&group->kobj, &attr->attr); 719 } 720 721 static void iommu_group_remove_file(struct iommu_group *group, 722 struct iommu_group_attribute *attr) 723 { 724 sysfs_remove_file(&group->kobj, &attr->attr); 725 } 726 727 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 728 { 729 return sysfs_emit(buf, "%s\n", group->name); 730 } 731 732 /** 733 * iommu_insert_resv_region - Insert a new region in the 734 * list of reserved regions. 735 * @new: new region to insert 736 * @regions: list of regions 737 * 738 * Elements are sorted by start address and overlapping segments 739 * of the same type are merged. 740 */ 741 static int iommu_insert_resv_region(struct iommu_resv_region *new, 742 struct list_head *regions) 743 { 744 struct iommu_resv_region *iter, *tmp, *nr, *top; 745 LIST_HEAD(stack); 746 747 nr = iommu_alloc_resv_region(new->start, new->length, 748 new->prot, new->type, GFP_KERNEL); 749 if (!nr) 750 return -ENOMEM; 751 752 /* First add the new element based on start address sorting */ 753 list_for_each_entry(iter, regions, list) { 754 if (nr->start < iter->start || 755 (nr->start == iter->start && nr->type <= iter->type)) 756 break; 757 } 758 list_add_tail(&nr->list, &iter->list); 759 760 /* Merge overlapping segments of type nr->type in @regions, if any */ 761 list_for_each_entry_safe(iter, tmp, regions, list) { 762 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 763 764 /* no merge needed on elements of different types than @new */ 765 if (iter->type != new->type) { 766 list_move_tail(&iter->list, &stack); 767 continue; 768 } 769 770 /* look for the last stack element of same type as @iter */ 771 list_for_each_entry_reverse(top, &stack, list) 772 if (top->type == iter->type) 773 goto check_overlap; 774 775 list_move_tail(&iter->list, &stack); 776 continue; 777 778 check_overlap: 779 top_end = top->start + top->length - 1; 780 781 if (iter->start > top_end + 1) { 782 list_move_tail(&iter->list, &stack); 783 } else { 784 top->length = max(top_end, iter_end) - top->start + 1; 785 list_del(&iter->list); 786 kfree(iter); 787 } 788 } 789 list_splice(&stack, regions); 790 return 0; 791 } 792 793 static int 794 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 795 struct list_head *group_resv_regions) 796 { 797 struct iommu_resv_region *entry; 798 int ret = 0; 799 800 list_for_each_entry(entry, dev_resv_regions, list) { 801 ret = iommu_insert_resv_region(entry, group_resv_regions); 802 if (ret) 803 break; 804 } 805 return ret; 806 } 807 808 int iommu_get_group_resv_regions(struct iommu_group *group, 809 struct list_head *head) 810 { 811 struct group_device *device; 812 int ret = 0; 813 814 mutex_lock(&group->mutex); 815 for_each_group_device(group, device) { 816 struct list_head dev_resv_regions; 817 818 /* 819 * Non-API groups still expose reserved_regions in sysfs, 820 * so filter out calls that get here that way. 821 */ 822 if (!device->dev->iommu) 823 break; 824 825 INIT_LIST_HEAD(&dev_resv_regions); 826 iommu_get_resv_regions(device->dev, &dev_resv_regions); 827 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 828 iommu_put_resv_regions(device->dev, &dev_resv_regions); 829 if (ret) 830 break; 831 } 832 mutex_unlock(&group->mutex); 833 return ret; 834 } 835 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 836 837 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 838 char *buf) 839 { 840 struct iommu_resv_region *region, *next; 841 struct list_head group_resv_regions; 842 int offset = 0; 843 844 INIT_LIST_HEAD(&group_resv_regions); 845 iommu_get_group_resv_regions(group, &group_resv_regions); 846 847 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 848 offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", 849 (long long)region->start, 850 (long long)(region->start + 851 region->length - 1), 852 iommu_group_resv_type_string[region->type]); 853 kfree(region); 854 } 855 856 return offset; 857 } 858 859 static ssize_t iommu_group_show_type(struct iommu_group *group, 860 char *buf) 861 { 862 char *type = "unknown"; 863 864 mutex_lock(&group->mutex); 865 if (group->default_domain) { 866 switch (group->default_domain->type) { 867 case IOMMU_DOMAIN_BLOCKED: 868 type = "blocked"; 869 break; 870 case IOMMU_DOMAIN_IDENTITY: 871 type = "identity"; 872 break; 873 case IOMMU_DOMAIN_UNMANAGED: 874 type = "unmanaged"; 875 break; 876 case IOMMU_DOMAIN_DMA: 877 type = "DMA"; 878 break; 879 case IOMMU_DOMAIN_DMA_FQ: 880 type = "DMA-FQ"; 881 break; 882 } 883 } 884 mutex_unlock(&group->mutex); 885 886 return sysfs_emit(buf, "%s\n", type); 887 } 888 889 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 890 891 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 892 iommu_group_show_resv_regions, NULL); 893 894 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 895 iommu_group_store_type); 896 897 static void iommu_group_release(struct kobject *kobj) 898 { 899 struct iommu_group *group = to_iommu_group(kobj); 900 901 pr_debug("Releasing group %d\n", group->id); 902 903 if (group->iommu_data_release) 904 group->iommu_data_release(group->iommu_data); 905 906 ida_free(&iommu_group_ida, group->id); 907 908 /* Domains are free'd by iommu_deinit_device() */ 909 WARN_ON(group->default_domain); 910 WARN_ON(group->blocking_domain); 911 912 kfree(group->name); 913 kfree(group); 914 } 915 916 static const struct kobj_type iommu_group_ktype = { 917 .sysfs_ops = &iommu_group_sysfs_ops, 918 .release = iommu_group_release, 919 }; 920 921 /** 922 * iommu_group_alloc - Allocate a new group 923 * 924 * This function is called by an iommu driver to allocate a new iommu 925 * group. The iommu group represents the minimum granularity of the iommu. 926 * Upon successful return, the caller holds a reference to the supplied 927 * group in order to hold the group until devices are added. Use 928 * iommu_group_put() to release this extra reference count, allowing the 929 * group to be automatically reclaimed once it has no devices or external 930 * references. 931 */ 932 struct iommu_group *iommu_group_alloc(void) 933 { 934 struct iommu_group *group; 935 int ret; 936 937 group = kzalloc(sizeof(*group), GFP_KERNEL); 938 if (!group) 939 return ERR_PTR(-ENOMEM); 940 941 group->kobj.kset = iommu_group_kset; 942 mutex_init(&group->mutex); 943 INIT_LIST_HEAD(&group->devices); 944 INIT_LIST_HEAD(&group->entry); 945 xa_init(&group->pasid_array); 946 947 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 948 if (ret < 0) { 949 kfree(group); 950 return ERR_PTR(ret); 951 } 952 group->id = ret; 953 954 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 955 NULL, "%d", group->id); 956 if (ret) { 957 kobject_put(&group->kobj); 958 return ERR_PTR(ret); 959 } 960 961 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 962 if (!group->devices_kobj) { 963 kobject_put(&group->kobj); /* triggers .release & free */ 964 return ERR_PTR(-ENOMEM); 965 } 966 967 /* 968 * The devices_kobj holds a reference on the group kobject, so 969 * as long as that exists so will the group. We can therefore 970 * use the devices_kobj for reference counting. 971 */ 972 kobject_put(&group->kobj); 973 974 ret = iommu_group_create_file(group, 975 &iommu_group_attr_reserved_regions); 976 if (ret) { 977 kobject_put(group->devices_kobj); 978 return ERR_PTR(ret); 979 } 980 981 ret = iommu_group_create_file(group, &iommu_group_attr_type); 982 if (ret) { 983 kobject_put(group->devices_kobj); 984 return ERR_PTR(ret); 985 } 986 987 pr_debug("Allocated group %d\n", group->id); 988 989 return group; 990 } 991 EXPORT_SYMBOL_GPL(iommu_group_alloc); 992 993 /** 994 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 995 * @group: the group 996 * 997 * iommu drivers can store data in the group for use when doing iommu 998 * operations. This function provides a way to retrieve it. Caller 999 * should hold a group reference. 1000 */ 1001 void *iommu_group_get_iommudata(struct iommu_group *group) 1002 { 1003 return group->iommu_data; 1004 } 1005 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 1006 1007 /** 1008 * iommu_group_set_iommudata - set iommu_data for a group 1009 * @group: the group 1010 * @iommu_data: new data 1011 * @release: release function for iommu_data 1012 * 1013 * iommu drivers can store data in the group for use when doing iommu 1014 * operations. This function provides a way to set the data after 1015 * the group has been allocated. Caller should hold a group reference. 1016 */ 1017 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 1018 void (*release)(void *iommu_data)) 1019 { 1020 group->iommu_data = iommu_data; 1021 group->iommu_data_release = release; 1022 } 1023 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 1024 1025 /** 1026 * iommu_group_set_name - set name for a group 1027 * @group: the group 1028 * @name: name 1029 * 1030 * Allow iommu driver to set a name for a group. When set it will 1031 * appear in a name attribute file under the group in sysfs. 1032 */ 1033 int iommu_group_set_name(struct iommu_group *group, const char *name) 1034 { 1035 int ret; 1036 1037 if (group->name) { 1038 iommu_group_remove_file(group, &iommu_group_attr_name); 1039 kfree(group->name); 1040 group->name = NULL; 1041 if (!name) 1042 return 0; 1043 } 1044 1045 group->name = kstrdup(name, GFP_KERNEL); 1046 if (!group->name) 1047 return -ENOMEM; 1048 1049 ret = iommu_group_create_file(group, &iommu_group_attr_name); 1050 if (ret) { 1051 kfree(group->name); 1052 group->name = NULL; 1053 return ret; 1054 } 1055 1056 return 0; 1057 } 1058 EXPORT_SYMBOL_GPL(iommu_group_set_name); 1059 1060 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 1061 struct device *dev) 1062 { 1063 struct iommu_resv_region *entry; 1064 struct list_head mappings; 1065 unsigned long pg_size; 1066 int ret = 0; 1067 1068 pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; 1069 INIT_LIST_HEAD(&mappings); 1070 1071 if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) 1072 return -EINVAL; 1073 1074 iommu_get_resv_regions(dev, &mappings); 1075 1076 /* We need to consider overlapping regions for different devices */ 1077 list_for_each_entry(entry, &mappings, list) { 1078 dma_addr_t start, end, addr; 1079 size_t map_size = 0; 1080 1081 if (entry->type == IOMMU_RESV_DIRECT) 1082 dev->iommu->require_direct = 1; 1083 1084 if ((entry->type != IOMMU_RESV_DIRECT && 1085 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || 1086 !iommu_is_dma_domain(domain)) 1087 continue; 1088 1089 start = ALIGN(entry->start, pg_size); 1090 end = ALIGN(entry->start + entry->length, pg_size); 1091 1092 for (addr = start; addr <= end; addr += pg_size) { 1093 phys_addr_t phys_addr; 1094 1095 if (addr == end) 1096 goto map_end; 1097 1098 phys_addr = iommu_iova_to_phys(domain, addr); 1099 if (!phys_addr) { 1100 map_size += pg_size; 1101 continue; 1102 } 1103 1104 map_end: 1105 if (map_size) { 1106 ret = iommu_map(domain, addr - map_size, 1107 addr - map_size, map_size, 1108 entry->prot, GFP_KERNEL); 1109 if (ret) 1110 goto out; 1111 map_size = 0; 1112 } 1113 } 1114 1115 } 1116 1117 if (!list_empty(&mappings) && iommu_is_dma_domain(domain)) 1118 iommu_flush_iotlb_all(domain); 1119 1120 out: 1121 iommu_put_resv_regions(dev, &mappings); 1122 1123 return ret; 1124 } 1125 1126 /* This is undone by __iommu_group_free_device() */ 1127 static struct group_device *iommu_group_alloc_device(struct iommu_group *group, 1128 struct device *dev) 1129 { 1130 int ret, i = 0; 1131 struct group_device *device; 1132 1133 device = kzalloc(sizeof(*device), GFP_KERNEL); 1134 if (!device) 1135 return ERR_PTR(-ENOMEM); 1136 1137 device->dev = dev; 1138 1139 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 1140 if (ret) 1141 goto err_free_device; 1142 1143 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1144 rename: 1145 if (!device->name) { 1146 ret = -ENOMEM; 1147 goto err_remove_link; 1148 } 1149 1150 ret = sysfs_create_link_nowarn(group->devices_kobj, 1151 &dev->kobj, device->name); 1152 if (ret) { 1153 if (ret == -EEXIST && i >= 0) { 1154 /* 1155 * Account for the slim chance of collision 1156 * and append an instance to the name. 1157 */ 1158 kfree(device->name); 1159 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1160 kobject_name(&dev->kobj), i++); 1161 goto rename; 1162 } 1163 goto err_free_name; 1164 } 1165 1166 trace_add_device_to_group(group->id, dev); 1167 1168 dev_info(dev, "Adding to iommu group %d\n", group->id); 1169 1170 return device; 1171 1172 err_free_name: 1173 kfree(device->name); 1174 err_remove_link: 1175 sysfs_remove_link(&dev->kobj, "iommu_group"); 1176 err_free_device: 1177 kfree(device); 1178 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1179 return ERR_PTR(ret); 1180 } 1181 1182 /** 1183 * iommu_group_add_device - add a device to an iommu group 1184 * @group: the group into which to add the device (reference should be held) 1185 * @dev: the device 1186 * 1187 * This function is called by an iommu driver to add a device into a 1188 * group. Adding a device increments the group reference count. 1189 */ 1190 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1191 { 1192 struct group_device *gdev; 1193 1194 gdev = iommu_group_alloc_device(group, dev); 1195 if (IS_ERR(gdev)) 1196 return PTR_ERR(gdev); 1197 1198 iommu_group_ref_get(group); 1199 dev->iommu_group = group; 1200 1201 mutex_lock(&group->mutex); 1202 list_add_tail(&gdev->list, &group->devices); 1203 mutex_unlock(&group->mutex); 1204 return 0; 1205 } 1206 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1207 1208 /** 1209 * iommu_group_remove_device - remove a device from it's current group 1210 * @dev: device to be removed 1211 * 1212 * This function is called by an iommu driver to remove the device from 1213 * it's current group. This decrements the iommu group reference count. 1214 */ 1215 void iommu_group_remove_device(struct device *dev) 1216 { 1217 struct iommu_group *group = dev->iommu_group; 1218 1219 if (!group) 1220 return; 1221 1222 dev_info(dev, "Removing from iommu group %d\n", group->id); 1223 1224 __iommu_group_remove_device(dev); 1225 } 1226 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1227 1228 /** 1229 * iommu_group_for_each_dev - iterate over each device in the group 1230 * @group: the group 1231 * @data: caller opaque data to be passed to callback function 1232 * @fn: caller supplied callback function 1233 * 1234 * This function is called by group users to iterate over group devices. 1235 * Callers should hold a reference count to the group during callback. 1236 * The group->mutex is held across callbacks, which will block calls to 1237 * iommu_group_add/remove_device. 1238 */ 1239 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1240 int (*fn)(struct device *, void *)) 1241 { 1242 struct group_device *device; 1243 int ret = 0; 1244 1245 mutex_lock(&group->mutex); 1246 for_each_group_device(group, device) { 1247 ret = fn(device->dev, data); 1248 if (ret) 1249 break; 1250 } 1251 mutex_unlock(&group->mutex); 1252 1253 return ret; 1254 } 1255 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1256 1257 /** 1258 * iommu_group_get - Return the group for a device and increment reference 1259 * @dev: get the group that this device belongs to 1260 * 1261 * This function is called by iommu drivers and users to get the group 1262 * for the specified device. If found, the group is returned and the group 1263 * reference in incremented, else NULL. 1264 */ 1265 struct iommu_group *iommu_group_get(struct device *dev) 1266 { 1267 struct iommu_group *group = dev->iommu_group; 1268 1269 if (group) 1270 kobject_get(group->devices_kobj); 1271 1272 return group; 1273 } 1274 EXPORT_SYMBOL_GPL(iommu_group_get); 1275 1276 /** 1277 * iommu_group_ref_get - Increment reference on a group 1278 * @group: the group to use, must not be NULL 1279 * 1280 * This function is called by iommu drivers to take additional references on an 1281 * existing group. Returns the given group for convenience. 1282 */ 1283 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1284 { 1285 kobject_get(group->devices_kobj); 1286 return group; 1287 } 1288 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1289 1290 /** 1291 * iommu_group_put - Decrement group reference 1292 * @group: the group to use 1293 * 1294 * This function is called by iommu drivers and users to release the 1295 * iommu group. Once the reference count is zero, the group is released. 1296 */ 1297 void iommu_group_put(struct iommu_group *group) 1298 { 1299 if (group) 1300 kobject_put(group->devices_kobj); 1301 } 1302 EXPORT_SYMBOL_GPL(iommu_group_put); 1303 1304 /** 1305 * iommu_register_device_fault_handler() - Register a device fault handler 1306 * @dev: the device 1307 * @handler: the fault handler 1308 * @data: private data passed as argument to the handler 1309 * 1310 * When an IOMMU fault event is received, this handler gets called with the 1311 * fault event and data as argument. The handler should return 0 on success. If 1312 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1313 * complete the fault by calling iommu_page_response() with one of the following 1314 * response code: 1315 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1316 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1317 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1318 * page faults if possible. 1319 * 1320 * Return 0 if the fault handler was installed successfully, or an error. 1321 */ 1322 int iommu_register_device_fault_handler(struct device *dev, 1323 iommu_dev_fault_handler_t handler, 1324 void *data) 1325 { 1326 struct dev_iommu *param = dev->iommu; 1327 int ret = 0; 1328 1329 if (!param) 1330 return -EINVAL; 1331 1332 mutex_lock(¶m->lock); 1333 /* Only allow one fault handler registered for each device */ 1334 if (param->fault_param) { 1335 ret = -EBUSY; 1336 goto done_unlock; 1337 } 1338 1339 get_device(dev); 1340 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1341 if (!param->fault_param) { 1342 put_device(dev); 1343 ret = -ENOMEM; 1344 goto done_unlock; 1345 } 1346 param->fault_param->handler = handler; 1347 param->fault_param->data = data; 1348 mutex_init(¶m->fault_param->lock); 1349 INIT_LIST_HEAD(¶m->fault_param->faults); 1350 1351 done_unlock: 1352 mutex_unlock(¶m->lock); 1353 1354 return ret; 1355 } 1356 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1357 1358 /** 1359 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1360 * @dev: the device 1361 * 1362 * Remove the device fault handler installed with 1363 * iommu_register_device_fault_handler(). 1364 * 1365 * Return 0 on success, or an error. 1366 */ 1367 int iommu_unregister_device_fault_handler(struct device *dev) 1368 { 1369 struct dev_iommu *param = dev->iommu; 1370 int ret = 0; 1371 1372 if (!param) 1373 return -EINVAL; 1374 1375 mutex_lock(¶m->lock); 1376 1377 if (!param->fault_param) 1378 goto unlock; 1379 1380 /* we cannot unregister handler if there are pending faults */ 1381 if (!list_empty(¶m->fault_param->faults)) { 1382 ret = -EBUSY; 1383 goto unlock; 1384 } 1385 1386 kfree(param->fault_param); 1387 param->fault_param = NULL; 1388 put_device(dev); 1389 unlock: 1390 mutex_unlock(¶m->lock); 1391 1392 return ret; 1393 } 1394 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1395 1396 /** 1397 * iommu_report_device_fault() - Report fault event to device driver 1398 * @dev: the device 1399 * @evt: fault event data 1400 * 1401 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1402 * handler. When this function fails and the fault is recoverable, it is the 1403 * caller's responsibility to complete the fault. 1404 * 1405 * Return 0 on success, or an error. 1406 */ 1407 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1408 { 1409 struct dev_iommu *param = dev->iommu; 1410 struct iommu_fault_event *evt_pending = NULL; 1411 struct iommu_fault_param *fparam; 1412 int ret = 0; 1413 1414 if (!param || !evt) 1415 return -EINVAL; 1416 1417 /* we only report device fault if there is a handler registered */ 1418 mutex_lock(¶m->lock); 1419 fparam = param->fault_param; 1420 if (!fparam || !fparam->handler) { 1421 ret = -EINVAL; 1422 goto done_unlock; 1423 } 1424 1425 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1426 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1427 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1428 GFP_KERNEL); 1429 if (!evt_pending) { 1430 ret = -ENOMEM; 1431 goto done_unlock; 1432 } 1433 mutex_lock(&fparam->lock); 1434 list_add_tail(&evt_pending->list, &fparam->faults); 1435 mutex_unlock(&fparam->lock); 1436 } 1437 1438 ret = fparam->handler(&evt->fault, fparam->data); 1439 if (ret && evt_pending) { 1440 mutex_lock(&fparam->lock); 1441 list_del(&evt_pending->list); 1442 mutex_unlock(&fparam->lock); 1443 kfree(evt_pending); 1444 } 1445 done_unlock: 1446 mutex_unlock(¶m->lock); 1447 return ret; 1448 } 1449 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1450 1451 int iommu_page_response(struct device *dev, 1452 struct iommu_page_response *msg) 1453 { 1454 bool needs_pasid; 1455 int ret = -EINVAL; 1456 struct iommu_fault_event *evt; 1457 struct iommu_fault_page_request *prm; 1458 struct dev_iommu *param = dev->iommu; 1459 const struct iommu_ops *ops = dev_iommu_ops(dev); 1460 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1461 1462 if (!ops->page_response) 1463 return -ENODEV; 1464 1465 if (!param || !param->fault_param) 1466 return -EINVAL; 1467 1468 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1469 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1470 return -EINVAL; 1471 1472 /* Only send response if there is a fault report pending */ 1473 mutex_lock(¶m->fault_param->lock); 1474 if (list_empty(¶m->fault_param->faults)) { 1475 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1476 goto done_unlock; 1477 } 1478 /* 1479 * Check if we have a matching page request pending to respond, 1480 * otherwise return -EINVAL 1481 */ 1482 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1483 prm = &evt->fault.prm; 1484 if (prm->grpid != msg->grpid) 1485 continue; 1486 1487 /* 1488 * If the PASID is required, the corresponding request is 1489 * matched using the group ID, the PASID valid bit and the PASID 1490 * value. Otherwise only the group ID matches request and 1491 * response. 1492 */ 1493 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1494 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1495 continue; 1496 1497 if (!needs_pasid && has_pasid) { 1498 /* No big deal, just clear it. */ 1499 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1500 msg->pasid = 0; 1501 } 1502 1503 ret = ops->page_response(dev, evt, msg); 1504 list_del(&evt->list); 1505 kfree(evt); 1506 break; 1507 } 1508 1509 done_unlock: 1510 mutex_unlock(¶m->fault_param->lock); 1511 return ret; 1512 } 1513 EXPORT_SYMBOL_GPL(iommu_page_response); 1514 1515 /** 1516 * iommu_group_id - Return ID for a group 1517 * @group: the group to ID 1518 * 1519 * Return the unique ID for the group matching the sysfs group number. 1520 */ 1521 int iommu_group_id(struct iommu_group *group) 1522 { 1523 return group->id; 1524 } 1525 EXPORT_SYMBOL_GPL(iommu_group_id); 1526 1527 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1528 unsigned long *devfns); 1529 1530 /* 1531 * To consider a PCI device isolated, we require ACS to support Source 1532 * Validation, Request Redirection, Completer Redirection, and Upstream 1533 * Forwarding. This effectively means that devices cannot spoof their 1534 * requester ID, requests and completions cannot be redirected, and all 1535 * transactions are forwarded upstream, even as it passes through a 1536 * bridge where the target device is downstream. 1537 */ 1538 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1539 1540 /* 1541 * For multifunction devices which are not isolated from each other, find 1542 * all the other non-isolated functions and look for existing groups. For 1543 * each function, we also need to look for aliases to or from other devices 1544 * that may already have a group. 1545 */ 1546 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1547 unsigned long *devfns) 1548 { 1549 struct pci_dev *tmp = NULL; 1550 struct iommu_group *group; 1551 1552 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1553 return NULL; 1554 1555 for_each_pci_dev(tmp) { 1556 if (tmp == pdev || tmp->bus != pdev->bus || 1557 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1558 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1559 continue; 1560 1561 group = get_pci_alias_group(tmp, devfns); 1562 if (group) { 1563 pci_dev_put(tmp); 1564 return group; 1565 } 1566 } 1567 1568 return NULL; 1569 } 1570 1571 /* 1572 * Look for aliases to or from the given device for existing groups. DMA 1573 * aliases are only supported on the same bus, therefore the search 1574 * space is quite small (especially since we're really only looking at pcie 1575 * device, and therefore only expect multiple slots on the root complex or 1576 * downstream switch ports). It's conceivable though that a pair of 1577 * multifunction devices could have aliases between them that would cause a 1578 * loop. To prevent this, we use a bitmap to track where we've been. 1579 */ 1580 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1581 unsigned long *devfns) 1582 { 1583 struct pci_dev *tmp = NULL; 1584 struct iommu_group *group; 1585 1586 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1587 return NULL; 1588 1589 group = iommu_group_get(&pdev->dev); 1590 if (group) 1591 return group; 1592 1593 for_each_pci_dev(tmp) { 1594 if (tmp == pdev || tmp->bus != pdev->bus) 1595 continue; 1596 1597 /* We alias them or they alias us */ 1598 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1599 group = get_pci_alias_group(tmp, devfns); 1600 if (group) { 1601 pci_dev_put(tmp); 1602 return group; 1603 } 1604 1605 group = get_pci_function_alias_group(tmp, devfns); 1606 if (group) { 1607 pci_dev_put(tmp); 1608 return group; 1609 } 1610 } 1611 } 1612 1613 return NULL; 1614 } 1615 1616 struct group_for_pci_data { 1617 struct pci_dev *pdev; 1618 struct iommu_group *group; 1619 }; 1620 1621 /* 1622 * DMA alias iterator callback, return the last seen device. Stop and return 1623 * the IOMMU group if we find one along the way. 1624 */ 1625 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1626 { 1627 struct group_for_pci_data *data = opaque; 1628 1629 data->pdev = pdev; 1630 data->group = iommu_group_get(&pdev->dev); 1631 1632 return data->group != NULL; 1633 } 1634 1635 /* 1636 * Generic device_group call-back function. It just allocates one 1637 * iommu-group per device. 1638 */ 1639 struct iommu_group *generic_device_group(struct device *dev) 1640 { 1641 return iommu_group_alloc(); 1642 } 1643 EXPORT_SYMBOL_GPL(generic_device_group); 1644 1645 /* 1646 * Generic device_group call-back function. It just allocates one 1647 * iommu-group per iommu driver instance shared by every device 1648 * probed by that iommu driver. 1649 */ 1650 struct iommu_group *generic_single_device_group(struct device *dev) 1651 { 1652 struct iommu_device *iommu = dev->iommu->iommu_dev; 1653 1654 if (!iommu->singleton_group) { 1655 struct iommu_group *group; 1656 1657 group = iommu_group_alloc(); 1658 if (IS_ERR(group)) 1659 return group; 1660 iommu->singleton_group = group; 1661 } 1662 return iommu_group_ref_get(iommu->singleton_group); 1663 } 1664 EXPORT_SYMBOL_GPL(generic_single_device_group); 1665 1666 /* 1667 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1668 * to find or create an IOMMU group for a device. 1669 */ 1670 struct iommu_group *pci_device_group(struct device *dev) 1671 { 1672 struct pci_dev *pdev = to_pci_dev(dev); 1673 struct group_for_pci_data data; 1674 struct pci_bus *bus; 1675 struct iommu_group *group = NULL; 1676 u64 devfns[4] = { 0 }; 1677 1678 if (WARN_ON(!dev_is_pci(dev))) 1679 return ERR_PTR(-EINVAL); 1680 1681 /* 1682 * Find the upstream DMA alias for the device. A device must not 1683 * be aliased due to topology in order to have its own IOMMU group. 1684 * If we find an alias along the way that already belongs to a 1685 * group, use it. 1686 */ 1687 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1688 return data.group; 1689 1690 pdev = data.pdev; 1691 1692 /* 1693 * Continue upstream from the point of minimum IOMMU granularity 1694 * due to aliases to the point where devices are protected from 1695 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1696 * group, use it. 1697 */ 1698 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1699 if (!bus->self) 1700 continue; 1701 1702 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1703 break; 1704 1705 pdev = bus->self; 1706 1707 group = iommu_group_get(&pdev->dev); 1708 if (group) 1709 return group; 1710 } 1711 1712 /* 1713 * Look for existing groups on device aliases. If we alias another 1714 * device or another device aliases us, use the same group. 1715 */ 1716 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1717 if (group) 1718 return group; 1719 1720 /* 1721 * Look for existing groups on non-isolated functions on the same 1722 * slot and aliases of those funcions, if any. No need to clear 1723 * the search bitmap, the tested devfns are still valid. 1724 */ 1725 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1726 if (group) 1727 return group; 1728 1729 /* No shared group found, allocate new */ 1730 return iommu_group_alloc(); 1731 } 1732 EXPORT_SYMBOL_GPL(pci_device_group); 1733 1734 /* Get the IOMMU group for device on fsl-mc bus */ 1735 struct iommu_group *fsl_mc_device_group(struct device *dev) 1736 { 1737 struct device *cont_dev = fsl_mc_cont_dev(dev); 1738 struct iommu_group *group; 1739 1740 group = iommu_group_get(cont_dev); 1741 if (!group) 1742 group = iommu_group_alloc(); 1743 return group; 1744 } 1745 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1746 1747 static struct iommu_domain * 1748 __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1749 { 1750 if (group->default_domain && group->default_domain->type == req_type) 1751 return group->default_domain; 1752 return __iommu_group_domain_alloc(group, req_type); 1753 } 1754 1755 /* 1756 * Returns the iommu_ops for the devices in an iommu group. 1757 * 1758 * It is assumed that all devices in an iommu group are managed by a single 1759 * IOMMU unit. Therefore, this returns the dev_iommu_ops of the first device 1760 * in the group. 1761 */ 1762 static const struct iommu_ops *group_iommu_ops(struct iommu_group *group) 1763 { 1764 struct group_device *device = 1765 list_first_entry(&group->devices, struct group_device, list); 1766 1767 lockdep_assert_held(&group->mutex); 1768 1769 return dev_iommu_ops(device->dev); 1770 } 1771 1772 /* 1773 * req_type of 0 means "auto" which means to select a domain based on 1774 * iommu_def_domain_type or what the driver actually supports. 1775 */ 1776 static struct iommu_domain * 1777 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1778 { 1779 const struct iommu_ops *ops = group_iommu_ops(group); 1780 struct iommu_domain *dom; 1781 1782 lockdep_assert_held(&group->mutex); 1783 1784 /* 1785 * Allow legacy drivers to specify the domain that will be the default 1786 * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM 1787 * domain. Do not use in new drivers. 1788 */ 1789 if (ops->default_domain) { 1790 if (req_type) 1791 return NULL; 1792 return ops->default_domain; 1793 } 1794 1795 if (req_type) 1796 return __iommu_group_alloc_default_domain(group, req_type); 1797 1798 /* The driver gave no guidance on what type to use, try the default */ 1799 dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); 1800 if (dom) 1801 return dom; 1802 1803 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ 1804 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) 1805 return NULL; 1806 dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); 1807 if (!dom) 1808 return NULL; 1809 1810 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1811 iommu_def_domain_type, group->name); 1812 return dom; 1813 } 1814 1815 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1816 { 1817 return group->default_domain; 1818 } 1819 1820 static int probe_iommu_group(struct device *dev, void *data) 1821 { 1822 struct list_head *group_list = data; 1823 int ret; 1824 1825 ret = __iommu_probe_device(dev, group_list); 1826 if (ret == -ENODEV) 1827 ret = 0; 1828 1829 return ret; 1830 } 1831 1832 static int iommu_bus_notifier(struct notifier_block *nb, 1833 unsigned long action, void *data) 1834 { 1835 struct device *dev = data; 1836 1837 if (action == BUS_NOTIFY_ADD_DEVICE) { 1838 int ret; 1839 1840 ret = iommu_probe_device(dev); 1841 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1842 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1843 iommu_release_device(dev); 1844 return NOTIFY_OK; 1845 } 1846 1847 return 0; 1848 } 1849 1850 /* 1851 * Combine the driver's chosen def_domain_type across all the devices in a 1852 * group. Drivers must give a consistent result. 1853 */ 1854 static int iommu_get_def_domain_type(struct iommu_group *group, 1855 struct device *dev, int cur_type) 1856 { 1857 const struct iommu_ops *ops = group_iommu_ops(group); 1858 int type; 1859 1860 if (!ops->def_domain_type) 1861 return cur_type; 1862 1863 type = ops->def_domain_type(dev); 1864 if (!type || cur_type == type) 1865 return cur_type; 1866 if (!cur_type) 1867 return type; 1868 1869 dev_err_ratelimited( 1870 dev, 1871 "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n", 1872 iommu_domain_type_str(cur_type), iommu_domain_type_str(type), 1873 group->id); 1874 1875 /* 1876 * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY 1877 * takes precedence. 1878 */ 1879 if (type == IOMMU_DOMAIN_IDENTITY) 1880 return type; 1881 return cur_type; 1882 } 1883 1884 /* 1885 * A target_type of 0 will select the best domain type. 0 can be returned in 1886 * this case meaning the global default should be used. 1887 */ 1888 static int iommu_get_default_domain_type(struct iommu_group *group, 1889 int target_type) 1890 { 1891 struct device *untrusted = NULL; 1892 struct group_device *gdev; 1893 int driver_type = 0; 1894 1895 lockdep_assert_held(&group->mutex); 1896 1897 /* 1898 * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an 1899 * identity_domain and it will automatically become their default 1900 * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain. 1901 * Override the selection to IDENTITY. 1902 */ 1903 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { 1904 static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) && 1905 IS_ENABLED(CONFIG_IOMMU_DMA))); 1906 driver_type = IOMMU_DOMAIN_IDENTITY; 1907 } 1908 1909 for_each_group_device(group, gdev) { 1910 driver_type = iommu_get_def_domain_type(group, gdev->dev, 1911 driver_type); 1912 1913 if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) { 1914 /* 1915 * No ARM32 using systems will set untrusted, it cannot 1916 * work. 1917 */ 1918 if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))) 1919 return -1; 1920 untrusted = gdev->dev; 1921 } 1922 } 1923 1924 /* 1925 * If the common dma ops are not selected in kconfig then we cannot use 1926 * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been 1927 * selected. 1928 */ 1929 if (!IS_ENABLED(CONFIG_IOMMU_DMA)) { 1930 if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA)) 1931 return -1; 1932 if (!driver_type) 1933 driver_type = IOMMU_DOMAIN_IDENTITY; 1934 } 1935 1936 if (untrusted) { 1937 if (driver_type && driver_type != IOMMU_DOMAIN_DMA) { 1938 dev_err_ratelimited( 1939 untrusted, 1940 "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n", 1941 group->id, iommu_domain_type_str(driver_type)); 1942 return -1; 1943 } 1944 driver_type = IOMMU_DOMAIN_DMA; 1945 } 1946 1947 if (target_type) { 1948 if (driver_type && target_type != driver_type) 1949 return -1; 1950 return target_type; 1951 } 1952 return driver_type; 1953 } 1954 1955 static void iommu_group_do_probe_finalize(struct device *dev) 1956 { 1957 const struct iommu_ops *ops = dev_iommu_ops(dev); 1958 1959 if (ops->probe_finalize) 1960 ops->probe_finalize(dev); 1961 } 1962 1963 int bus_iommu_probe(const struct bus_type *bus) 1964 { 1965 struct iommu_group *group, *next; 1966 LIST_HEAD(group_list); 1967 int ret; 1968 1969 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1970 if (ret) 1971 return ret; 1972 1973 list_for_each_entry_safe(group, next, &group_list, entry) { 1974 struct group_device *gdev; 1975 1976 mutex_lock(&group->mutex); 1977 1978 /* Remove item from the list */ 1979 list_del_init(&group->entry); 1980 1981 /* 1982 * We go to the trouble of deferred default domain creation so 1983 * that the cross-group default domain type and the setup of the 1984 * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. 1985 */ 1986 ret = iommu_setup_default_domain(group, 0); 1987 if (ret) { 1988 mutex_unlock(&group->mutex); 1989 return ret; 1990 } 1991 mutex_unlock(&group->mutex); 1992 1993 /* 1994 * FIXME: Mis-locked because the ops->probe_finalize() call-back 1995 * of some IOMMU drivers calls arm_iommu_attach_device() which 1996 * in-turn might call back into IOMMU core code, where it tries 1997 * to take group->mutex, resulting in a deadlock. 1998 */ 1999 for_each_group_device(group, gdev) 2000 iommu_group_do_probe_finalize(gdev->dev); 2001 } 2002 2003 return 0; 2004 } 2005 2006 bool iommu_present(const struct bus_type *bus) 2007 { 2008 return bus->iommu_ops != NULL; 2009 } 2010 EXPORT_SYMBOL_GPL(iommu_present); 2011 2012 /** 2013 * device_iommu_capable() - check for a general IOMMU capability 2014 * @dev: device to which the capability would be relevant, if available 2015 * @cap: IOMMU capability 2016 * 2017 * Return: true if an IOMMU is present and supports the given capability 2018 * for the given device, otherwise false. 2019 */ 2020 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 2021 { 2022 const struct iommu_ops *ops; 2023 2024 if (!dev->iommu || !dev->iommu->iommu_dev) 2025 return false; 2026 2027 ops = dev_iommu_ops(dev); 2028 if (!ops->capable) 2029 return false; 2030 2031 return ops->capable(dev, cap); 2032 } 2033 EXPORT_SYMBOL_GPL(device_iommu_capable); 2034 2035 /** 2036 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() 2037 * for a group 2038 * @group: Group to query 2039 * 2040 * IOMMU groups should not have differing values of 2041 * msi_device_has_isolated_msi() for devices in a group. However nothing 2042 * directly prevents this, so ensure mistakes don't result in isolation failures 2043 * by checking that all the devices are the same. 2044 */ 2045 bool iommu_group_has_isolated_msi(struct iommu_group *group) 2046 { 2047 struct group_device *group_dev; 2048 bool ret = true; 2049 2050 mutex_lock(&group->mutex); 2051 for_each_group_device(group, group_dev) 2052 ret &= msi_device_has_isolated_msi(group_dev->dev); 2053 mutex_unlock(&group->mutex); 2054 return ret; 2055 } 2056 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); 2057 2058 /** 2059 * iommu_set_fault_handler() - set a fault handler for an iommu domain 2060 * @domain: iommu domain 2061 * @handler: fault handler 2062 * @token: user data, will be passed back to the fault handler 2063 * 2064 * This function should be used by IOMMU users which want to be notified 2065 * whenever an IOMMU fault happens. 2066 * 2067 * The fault handler itself should return 0 on success, and an appropriate 2068 * error code otherwise. 2069 */ 2070 void iommu_set_fault_handler(struct iommu_domain *domain, 2071 iommu_fault_handler_t handler, 2072 void *token) 2073 { 2074 BUG_ON(!domain); 2075 2076 domain->handler = handler; 2077 domain->handler_token = token; 2078 } 2079 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 2080 2081 static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops, 2082 struct device *dev, 2083 unsigned int type) 2084 { 2085 struct iommu_domain *domain; 2086 unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; 2087 2088 if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain) 2089 return ops->identity_domain; 2090 else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain) 2091 return ops->blocked_domain; 2092 else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging) 2093 domain = ops->domain_alloc_paging(dev); 2094 else if (ops->domain_alloc) 2095 domain = ops->domain_alloc(alloc_type); 2096 else 2097 return NULL; 2098 2099 if (!domain) 2100 return NULL; 2101 2102 domain->type = type; 2103 /* 2104 * If not already set, assume all sizes by default; the driver 2105 * may override this later 2106 */ 2107 if (!domain->pgsize_bitmap) 2108 domain->pgsize_bitmap = ops->pgsize_bitmap; 2109 2110 if (!domain->ops) 2111 domain->ops = ops->default_domain_ops; 2112 2113 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 2114 iommu_domain_free(domain); 2115 domain = NULL; 2116 } 2117 return domain; 2118 } 2119 2120 static struct iommu_domain * 2121 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) 2122 { 2123 struct device *dev = 2124 list_first_entry(&group->devices, struct group_device, list) 2125 ->dev; 2126 2127 return __iommu_domain_alloc(group_iommu_ops(group), dev, type); 2128 } 2129 2130 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 2131 { 2132 if (bus == NULL || bus->iommu_ops == NULL) 2133 return NULL; 2134 return __iommu_domain_alloc(bus->iommu_ops, NULL, 2135 IOMMU_DOMAIN_UNMANAGED); 2136 } 2137 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 2138 2139 void iommu_domain_free(struct iommu_domain *domain) 2140 { 2141 if (domain->type == IOMMU_DOMAIN_SVA) 2142 mmdrop(domain->mm); 2143 iommu_put_dma_cookie(domain); 2144 if (domain->ops->free) 2145 domain->ops->free(domain); 2146 } 2147 EXPORT_SYMBOL_GPL(iommu_domain_free); 2148 2149 /* 2150 * Put the group's domain back to the appropriate core-owned domain - either the 2151 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 2152 */ 2153 static void __iommu_group_set_core_domain(struct iommu_group *group) 2154 { 2155 struct iommu_domain *new_domain; 2156 2157 if (group->owner) 2158 new_domain = group->blocking_domain; 2159 else 2160 new_domain = group->default_domain; 2161 2162 __iommu_group_set_domain_nofail(group, new_domain); 2163 } 2164 2165 static int __iommu_attach_device(struct iommu_domain *domain, 2166 struct device *dev) 2167 { 2168 int ret; 2169 2170 if (unlikely(domain->ops->attach_dev == NULL)) 2171 return -ENODEV; 2172 2173 ret = domain->ops->attach_dev(domain, dev); 2174 if (ret) 2175 return ret; 2176 dev->iommu->attach_deferred = 0; 2177 trace_attach_device_to_domain(dev); 2178 return 0; 2179 } 2180 2181 /** 2182 * iommu_attach_device - Attach an IOMMU domain to a device 2183 * @domain: IOMMU domain to attach 2184 * @dev: Device that will be attached 2185 * 2186 * Returns 0 on success and error code on failure 2187 * 2188 * Note that EINVAL can be treated as a soft failure, indicating 2189 * that certain configuration of the domain is incompatible with 2190 * the device. In this case attaching a different domain to the 2191 * device may succeed. 2192 */ 2193 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2194 { 2195 /* Caller must be a probed driver on dev */ 2196 struct iommu_group *group = dev->iommu_group; 2197 int ret; 2198 2199 if (!group) 2200 return -ENODEV; 2201 2202 /* 2203 * Lock the group to make sure the device-count doesn't 2204 * change while we are attaching 2205 */ 2206 mutex_lock(&group->mutex); 2207 ret = -EINVAL; 2208 if (list_count_nodes(&group->devices) != 1) 2209 goto out_unlock; 2210 2211 ret = __iommu_attach_group(domain, group); 2212 2213 out_unlock: 2214 mutex_unlock(&group->mutex); 2215 return ret; 2216 } 2217 EXPORT_SYMBOL_GPL(iommu_attach_device); 2218 2219 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2220 { 2221 if (dev->iommu && dev->iommu->attach_deferred) 2222 return __iommu_attach_device(domain, dev); 2223 2224 return 0; 2225 } 2226 2227 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2228 { 2229 /* Caller must be a probed driver on dev */ 2230 struct iommu_group *group = dev->iommu_group; 2231 2232 if (!group) 2233 return; 2234 2235 mutex_lock(&group->mutex); 2236 if (WARN_ON(domain != group->domain) || 2237 WARN_ON(list_count_nodes(&group->devices) != 1)) 2238 goto out_unlock; 2239 __iommu_group_set_core_domain(group); 2240 2241 out_unlock: 2242 mutex_unlock(&group->mutex); 2243 } 2244 EXPORT_SYMBOL_GPL(iommu_detach_device); 2245 2246 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2247 { 2248 /* Caller must be a probed driver on dev */ 2249 struct iommu_group *group = dev->iommu_group; 2250 2251 if (!group) 2252 return NULL; 2253 2254 return group->domain; 2255 } 2256 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2257 2258 /* 2259 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2260 * guarantees that the group and its default domain are valid and correct. 2261 */ 2262 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2263 { 2264 return dev->iommu_group->default_domain; 2265 } 2266 2267 static int __iommu_attach_group(struct iommu_domain *domain, 2268 struct iommu_group *group) 2269 { 2270 if (group->domain && group->domain != group->default_domain && 2271 group->domain != group->blocking_domain) 2272 return -EBUSY; 2273 2274 return __iommu_group_set_domain(group, domain); 2275 } 2276 2277 /** 2278 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2279 * @domain: IOMMU domain to attach 2280 * @group: IOMMU group that will be attached 2281 * 2282 * Returns 0 on success and error code on failure 2283 * 2284 * Note that EINVAL can be treated as a soft failure, indicating 2285 * that certain configuration of the domain is incompatible with 2286 * the group. In this case attaching a different domain to the 2287 * group may succeed. 2288 */ 2289 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2290 { 2291 int ret; 2292 2293 mutex_lock(&group->mutex); 2294 ret = __iommu_attach_group(domain, group); 2295 mutex_unlock(&group->mutex); 2296 2297 return ret; 2298 } 2299 EXPORT_SYMBOL_GPL(iommu_attach_group); 2300 2301 /** 2302 * iommu_group_replace_domain - replace the domain that a group is attached to 2303 * @new_domain: new IOMMU domain to replace with 2304 * @group: IOMMU group that will be attached to the new domain 2305 * 2306 * This API allows the group to switch domains without being forced to go to 2307 * the blocking domain in-between. 2308 * 2309 * If the currently attached domain is a core domain (e.g. a default_domain), 2310 * it will act just like the iommu_attach_group(). 2311 */ 2312 int iommu_group_replace_domain(struct iommu_group *group, 2313 struct iommu_domain *new_domain) 2314 { 2315 int ret; 2316 2317 if (!new_domain) 2318 return -EINVAL; 2319 2320 mutex_lock(&group->mutex); 2321 ret = __iommu_group_set_domain(group, new_domain); 2322 mutex_unlock(&group->mutex); 2323 return ret; 2324 } 2325 EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, IOMMUFD_INTERNAL); 2326 2327 static int __iommu_device_set_domain(struct iommu_group *group, 2328 struct device *dev, 2329 struct iommu_domain *new_domain, 2330 unsigned int flags) 2331 { 2332 int ret; 2333 2334 /* 2335 * If the device requires IOMMU_RESV_DIRECT then we cannot allow 2336 * the blocking domain to be attached as it does not contain the 2337 * required 1:1 mapping. This test effectively excludes the device 2338 * being used with iommu_group_claim_dma_owner() which will block 2339 * vfio and iommufd as well. 2340 */ 2341 if (dev->iommu->require_direct && 2342 (new_domain->type == IOMMU_DOMAIN_BLOCKED || 2343 new_domain == group->blocking_domain)) { 2344 dev_warn(dev, 2345 "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n"); 2346 return -EINVAL; 2347 } 2348 2349 if (dev->iommu->attach_deferred) { 2350 if (new_domain == group->default_domain) 2351 return 0; 2352 dev->iommu->attach_deferred = 0; 2353 } 2354 2355 ret = __iommu_attach_device(new_domain, dev); 2356 if (ret) { 2357 /* 2358 * If we have a blocking domain then try to attach that in hopes 2359 * of avoiding a UAF. Modern drivers should implement blocking 2360 * domains as global statics that cannot fail. 2361 */ 2362 if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && 2363 group->blocking_domain && 2364 group->blocking_domain != new_domain) 2365 __iommu_attach_device(group->blocking_domain, dev); 2366 return ret; 2367 } 2368 return 0; 2369 } 2370 2371 /* 2372 * If 0 is returned the group's domain is new_domain. If an error is returned 2373 * then the group's domain will be set back to the existing domain unless 2374 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's 2375 * domains is left inconsistent. This is a driver bug to fail attach with a 2376 * previously good domain. We try to avoid a kernel UAF because of this. 2377 * 2378 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU 2379 * API works on domains and devices. Bridge that gap by iterating over the 2380 * devices in a group. Ideally we'd have a single device which represents the 2381 * requestor ID of the group, but we also allow IOMMU drivers to create policy 2382 * defined minimum sets, where the physical hardware may be able to distiguish 2383 * members, but we wish to group them at a higher level (ex. untrusted 2384 * multi-function PCI devices). Thus we attach each device. 2385 */ 2386 static int __iommu_group_set_domain_internal(struct iommu_group *group, 2387 struct iommu_domain *new_domain, 2388 unsigned int flags) 2389 { 2390 struct group_device *last_gdev; 2391 struct group_device *gdev; 2392 int result; 2393 int ret; 2394 2395 lockdep_assert_held(&group->mutex); 2396 2397 if (group->domain == new_domain) 2398 return 0; 2399 2400 if (WARN_ON(!new_domain)) 2401 return -EINVAL; 2402 2403 /* 2404 * Changing the domain is done by calling attach_dev() on the new 2405 * domain. This switch does not have to be atomic and DMA can be 2406 * discarded during the transition. DMA must only be able to access 2407 * either new_domain or group->domain, never something else. 2408 */ 2409 result = 0; 2410 for_each_group_device(group, gdev) { 2411 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, 2412 flags); 2413 if (ret) { 2414 result = ret; 2415 /* 2416 * Keep trying the other devices in the group. If a 2417 * driver fails attach to an otherwise good domain, and 2418 * does not support blocking domains, it should at least 2419 * drop its reference on the current domain so we don't 2420 * UAF. 2421 */ 2422 if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) 2423 continue; 2424 goto err_revert; 2425 } 2426 } 2427 group->domain = new_domain; 2428 return result; 2429 2430 err_revert: 2431 /* 2432 * This is called in error unwind paths. A well behaved driver should 2433 * always allow us to attach to a domain that was already attached. 2434 */ 2435 last_gdev = gdev; 2436 for_each_group_device(group, gdev) { 2437 /* 2438 * A NULL domain can happen only for first probe, in which case 2439 * we leave group->domain as NULL and let release clean 2440 * everything up. 2441 */ 2442 if (group->domain) 2443 WARN_ON(__iommu_device_set_domain( 2444 group, gdev->dev, group->domain, 2445 IOMMU_SET_DOMAIN_MUST_SUCCEED)); 2446 if (gdev == last_gdev) 2447 break; 2448 } 2449 return ret; 2450 } 2451 2452 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2453 { 2454 mutex_lock(&group->mutex); 2455 __iommu_group_set_core_domain(group); 2456 mutex_unlock(&group->mutex); 2457 } 2458 EXPORT_SYMBOL_GPL(iommu_detach_group); 2459 2460 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2461 { 2462 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2463 return iova; 2464 2465 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2466 return 0; 2467 2468 return domain->ops->iova_to_phys(domain, iova); 2469 } 2470 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2471 2472 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2473 phys_addr_t paddr, size_t size, size_t *count) 2474 { 2475 unsigned int pgsize_idx, pgsize_idx_next; 2476 unsigned long pgsizes; 2477 size_t offset, pgsize, pgsize_next; 2478 unsigned long addr_merge = paddr | iova; 2479 2480 /* Page sizes supported by the hardware and small enough for @size */ 2481 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2482 2483 /* Constrain the page sizes further based on the maximum alignment */ 2484 if (likely(addr_merge)) 2485 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2486 2487 /* Make sure we have at least one suitable page size */ 2488 BUG_ON(!pgsizes); 2489 2490 /* Pick the biggest page size remaining */ 2491 pgsize_idx = __fls(pgsizes); 2492 pgsize = BIT(pgsize_idx); 2493 if (!count) 2494 return pgsize; 2495 2496 /* Find the next biggest support page size, if it exists */ 2497 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2498 if (!pgsizes) 2499 goto out_set_count; 2500 2501 pgsize_idx_next = __ffs(pgsizes); 2502 pgsize_next = BIT(pgsize_idx_next); 2503 2504 /* 2505 * There's no point trying a bigger page size unless the virtual 2506 * and physical addresses are similarly offset within the larger page. 2507 */ 2508 if ((iova ^ paddr) & (pgsize_next - 1)) 2509 goto out_set_count; 2510 2511 /* Calculate the offset to the next page size alignment boundary */ 2512 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2513 2514 /* 2515 * If size is big enough to accommodate the larger page, reduce 2516 * the number of smaller pages. 2517 */ 2518 if (offset + pgsize_next <= size) 2519 size = offset; 2520 2521 out_set_count: 2522 *count = size >> pgsize_idx; 2523 return pgsize; 2524 } 2525 2526 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2527 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2528 { 2529 const struct iommu_domain_ops *ops = domain->ops; 2530 unsigned long orig_iova = iova; 2531 unsigned int min_pagesz; 2532 size_t orig_size = size; 2533 phys_addr_t orig_paddr = paddr; 2534 int ret = 0; 2535 2536 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2537 return -EINVAL; 2538 2539 if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL)) 2540 return -ENODEV; 2541 2542 /* find out the minimum page size supported */ 2543 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2544 2545 /* 2546 * both the virtual address and the physical one, as well as 2547 * the size of the mapping, must be aligned (at least) to the 2548 * size of the smallest page supported by the hardware 2549 */ 2550 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2551 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2552 iova, &paddr, size, min_pagesz); 2553 return -EINVAL; 2554 } 2555 2556 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2557 2558 while (size) { 2559 size_t pgsize, count, mapped = 0; 2560 2561 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2562 2563 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2564 iova, &paddr, pgsize, count); 2565 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2566 gfp, &mapped); 2567 /* 2568 * Some pages may have been mapped, even if an error occurred, 2569 * so we should account for those so they can be unmapped. 2570 */ 2571 size -= mapped; 2572 2573 if (ret) 2574 break; 2575 2576 iova += mapped; 2577 paddr += mapped; 2578 } 2579 2580 /* unroll mapping in case something went wrong */ 2581 if (ret) 2582 iommu_unmap(domain, orig_iova, orig_size - size); 2583 else 2584 trace_map(orig_iova, orig_paddr, orig_size); 2585 2586 return ret; 2587 } 2588 2589 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2590 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2591 { 2592 const struct iommu_domain_ops *ops = domain->ops; 2593 int ret; 2594 2595 might_sleep_if(gfpflags_allow_blocking(gfp)); 2596 2597 /* Discourage passing strange GFP flags */ 2598 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2599 __GFP_HIGHMEM))) 2600 return -EINVAL; 2601 2602 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2603 if (ret == 0 && ops->iotlb_sync_map) { 2604 ret = ops->iotlb_sync_map(domain, iova, size); 2605 if (ret) 2606 goto out_err; 2607 } 2608 2609 return ret; 2610 2611 out_err: 2612 /* undo mappings already done */ 2613 iommu_unmap(domain, iova, size); 2614 2615 return ret; 2616 } 2617 EXPORT_SYMBOL_GPL(iommu_map); 2618 2619 static size_t __iommu_unmap(struct iommu_domain *domain, 2620 unsigned long iova, size_t size, 2621 struct iommu_iotlb_gather *iotlb_gather) 2622 { 2623 const struct iommu_domain_ops *ops = domain->ops; 2624 size_t unmapped_page, unmapped = 0; 2625 unsigned long orig_iova = iova; 2626 unsigned int min_pagesz; 2627 2628 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2629 return 0; 2630 2631 if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) 2632 return 0; 2633 2634 /* find out the minimum page size supported */ 2635 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2636 2637 /* 2638 * The virtual address, as well as the size of the mapping, must be 2639 * aligned (at least) to the size of the smallest page supported 2640 * by the hardware 2641 */ 2642 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2643 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2644 iova, size, min_pagesz); 2645 return 0; 2646 } 2647 2648 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2649 2650 /* 2651 * Keep iterating until we either unmap 'size' bytes (or more) 2652 * or we hit an area that isn't mapped. 2653 */ 2654 while (unmapped < size) { 2655 size_t pgsize, count; 2656 2657 pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count); 2658 unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); 2659 if (!unmapped_page) 2660 break; 2661 2662 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2663 iova, unmapped_page); 2664 2665 iova += unmapped_page; 2666 unmapped += unmapped_page; 2667 } 2668 2669 trace_unmap(orig_iova, size, unmapped); 2670 return unmapped; 2671 } 2672 2673 size_t iommu_unmap(struct iommu_domain *domain, 2674 unsigned long iova, size_t size) 2675 { 2676 struct iommu_iotlb_gather iotlb_gather; 2677 size_t ret; 2678 2679 iommu_iotlb_gather_init(&iotlb_gather); 2680 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2681 iommu_iotlb_sync(domain, &iotlb_gather); 2682 2683 return ret; 2684 } 2685 EXPORT_SYMBOL_GPL(iommu_unmap); 2686 2687 size_t iommu_unmap_fast(struct iommu_domain *domain, 2688 unsigned long iova, size_t size, 2689 struct iommu_iotlb_gather *iotlb_gather) 2690 { 2691 return __iommu_unmap(domain, iova, size, iotlb_gather); 2692 } 2693 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2694 2695 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2696 struct scatterlist *sg, unsigned int nents, int prot, 2697 gfp_t gfp) 2698 { 2699 const struct iommu_domain_ops *ops = domain->ops; 2700 size_t len = 0, mapped = 0; 2701 phys_addr_t start; 2702 unsigned int i = 0; 2703 int ret; 2704 2705 might_sleep_if(gfpflags_allow_blocking(gfp)); 2706 2707 /* Discourage passing strange GFP flags */ 2708 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2709 __GFP_HIGHMEM))) 2710 return -EINVAL; 2711 2712 while (i <= nents) { 2713 phys_addr_t s_phys = sg_phys(sg); 2714 2715 if (len && s_phys != start + len) { 2716 ret = __iommu_map(domain, iova + mapped, start, 2717 len, prot, gfp); 2718 2719 if (ret) 2720 goto out_err; 2721 2722 mapped += len; 2723 len = 0; 2724 } 2725 2726 if (sg_dma_is_bus_address(sg)) 2727 goto next; 2728 2729 if (len) { 2730 len += sg->length; 2731 } else { 2732 len = sg->length; 2733 start = s_phys; 2734 } 2735 2736 next: 2737 if (++i < nents) 2738 sg = sg_next(sg); 2739 } 2740 2741 if (ops->iotlb_sync_map) { 2742 ret = ops->iotlb_sync_map(domain, iova, mapped); 2743 if (ret) 2744 goto out_err; 2745 } 2746 return mapped; 2747 2748 out_err: 2749 /* undo mappings already done */ 2750 iommu_unmap(domain, iova, mapped); 2751 2752 return ret; 2753 } 2754 EXPORT_SYMBOL_GPL(iommu_map_sg); 2755 2756 /** 2757 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2758 * @domain: the iommu domain where the fault has happened 2759 * @dev: the device where the fault has happened 2760 * @iova: the faulting address 2761 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2762 * 2763 * This function should be called by the low-level IOMMU implementations 2764 * whenever IOMMU faults happen, to allow high-level users, that are 2765 * interested in such events, to know about them. 2766 * 2767 * This event may be useful for several possible use cases: 2768 * - mere logging of the event 2769 * - dynamic TLB/PTE loading 2770 * - if restarting of the faulting device is required 2771 * 2772 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2773 * PTE/TLB loading will one day be supported, implementations will be able 2774 * to tell whether it succeeded or not according to this return value). 2775 * 2776 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2777 * (though fault handlers can also return -ENOSYS, in case they want to 2778 * elicit the default behavior of the IOMMU drivers). 2779 */ 2780 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2781 unsigned long iova, int flags) 2782 { 2783 int ret = -ENOSYS; 2784 2785 /* 2786 * if upper layers showed interest and installed a fault handler, 2787 * invoke it. 2788 */ 2789 if (domain->handler) 2790 ret = domain->handler(domain, dev, iova, flags, 2791 domain->handler_token); 2792 2793 trace_io_page_fault(dev, iova, flags); 2794 return ret; 2795 } 2796 EXPORT_SYMBOL_GPL(report_iommu_fault); 2797 2798 static int __init iommu_init(void) 2799 { 2800 iommu_group_kset = kset_create_and_add("iommu_groups", 2801 NULL, kernel_kobj); 2802 BUG_ON(!iommu_group_kset); 2803 2804 iommu_debugfs_setup(); 2805 2806 return 0; 2807 } 2808 core_initcall(iommu_init); 2809 2810 int iommu_enable_nesting(struct iommu_domain *domain) 2811 { 2812 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2813 return -EINVAL; 2814 if (!domain->ops->enable_nesting) 2815 return -EINVAL; 2816 return domain->ops->enable_nesting(domain); 2817 } 2818 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2819 2820 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2821 unsigned long quirk) 2822 { 2823 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2824 return -EINVAL; 2825 if (!domain->ops->set_pgtable_quirks) 2826 return -EINVAL; 2827 return domain->ops->set_pgtable_quirks(domain, quirk); 2828 } 2829 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2830 2831 /** 2832 * iommu_get_resv_regions - get reserved regions 2833 * @dev: device for which to get reserved regions 2834 * @list: reserved region list for device 2835 * 2836 * This returns a list of reserved IOVA regions specific to this device. 2837 * A domain user should not map IOVA in these ranges. 2838 */ 2839 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2840 { 2841 const struct iommu_ops *ops = dev_iommu_ops(dev); 2842 2843 if (ops->get_resv_regions) 2844 ops->get_resv_regions(dev, list); 2845 } 2846 EXPORT_SYMBOL_GPL(iommu_get_resv_regions); 2847 2848 /** 2849 * iommu_put_resv_regions - release reserved regions 2850 * @dev: device for which to free reserved regions 2851 * @list: reserved region list for device 2852 * 2853 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2854 */ 2855 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2856 { 2857 struct iommu_resv_region *entry, *next; 2858 2859 list_for_each_entry_safe(entry, next, list, list) { 2860 if (entry->free) 2861 entry->free(dev, entry); 2862 else 2863 kfree(entry); 2864 } 2865 } 2866 EXPORT_SYMBOL(iommu_put_resv_regions); 2867 2868 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2869 size_t length, int prot, 2870 enum iommu_resv_type type, 2871 gfp_t gfp) 2872 { 2873 struct iommu_resv_region *region; 2874 2875 region = kzalloc(sizeof(*region), gfp); 2876 if (!region) 2877 return NULL; 2878 2879 INIT_LIST_HEAD(®ion->list); 2880 region->start = start; 2881 region->length = length; 2882 region->prot = prot; 2883 region->type = type; 2884 return region; 2885 } 2886 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2887 2888 void iommu_set_default_passthrough(bool cmd_line) 2889 { 2890 if (cmd_line) 2891 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2892 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2893 } 2894 2895 void iommu_set_default_translated(bool cmd_line) 2896 { 2897 if (cmd_line) 2898 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2899 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2900 } 2901 2902 bool iommu_default_passthrough(void) 2903 { 2904 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2905 } 2906 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2907 2908 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2909 { 2910 const struct iommu_ops *ops = NULL; 2911 struct iommu_device *iommu; 2912 2913 spin_lock(&iommu_device_lock); 2914 list_for_each_entry(iommu, &iommu_device_list, list) 2915 if (iommu->fwnode == fwnode) { 2916 ops = iommu->ops; 2917 break; 2918 } 2919 spin_unlock(&iommu_device_lock); 2920 return ops; 2921 } 2922 2923 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2924 const struct iommu_ops *ops) 2925 { 2926 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2927 2928 if (fwspec) 2929 return ops == fwspec->ops ? 0 : -EINVAL; 2930 2931 if (!dev_iommu_get(dev)) 2932 return -ENOMEM; 2933 2934 /* Preallocate for the overwhelmingly common case of 1 ID */ 2935 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2936 if (!fwspec) 2937 return -ENOMEM; 2938 2939 of_node_get(to_of_node(iommu_fwnode)); 2940 fwspec->iommu_fwnode = iommu_fwnode; 2941 fwspec->ops = ops; 2942 dev_iommu_fwspec_set(dev, fwspec); 2943 return 0; 2944 } 2945 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2946 2947 void iommu_fwspec_free(struct device *dev) 2948 { 2949 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2950 2951 if (fwspec) { 2952 fwnode_handle_put(fwspec->iommu_fwnode); 2953 kfree(fwspec); 2954 dev_iommu_fwspec_set(dev, NULL); 2955 } 2956 } 2957 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2958 2959 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2960 { 2961 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2962 int i, new_num; 2963 2964 if (!fwspec) 2965 return -EINVAL; 2966 2967 new_num = fwspec->num_ids + num_ids; 2968 if (new_num > 1) { 2969 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2970 GFP_KERNEL); 2971 if (!fwspec) 2972 return -ENOMEM; 2973 2974 dev_iommu_fwspec_set(dev, fwspec); 2975 } 2976 2977 for (i = 0; i < num_ids; i++) 2978 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2979 2980 fwspec->num_ids = new_num; 2981 return 0; 2982 } 2983 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2984 2985 /* 2986 * Per device IOMMU features. 2987 */ 2988 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2989 { 2990 if (dev->iommu && dev->iommu->iommu_dev) { 2991 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2992 2993 if (ops->dev_enable_feat) 2994 return ops->dev_enable_feat(dev, feat); 2995 } 2996 2997 return -ENODEV; 2998 } 2999 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 3000 3001 /* 3002 * The device drivers should do the necessary cleanups before calling this. 3003 */ 3004 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 3005 { 3006 if (dev->iommu && dev->iommu->iommu_dev) { 3007 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 3008 3009 if (ops->dev_disable_feat) 3010 return ops->dev_disable_feat(dev, feat); 3011 } 3012 3013 return -EBUSY; 3014 } 3015 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 3016 3017 /** 3018 * iommu_setup_default_domain - Set the default_domain for the group 3019 * @group: Group to change 3020 * @target_type: Domain type to set as the default_domain 3021 * 3022 * Allocate a default domain and set it as the current domain on the group. If 3023 * the group already has a default domain it will be changed to the target_type. 3024 * When target_type is 0 the default domain is selected based on driver and 3025 * system preferences. 3026 */ 3027 static int iommu_setup_default_domain(struct iommu_group *group, 3028 int target_type) 3029 { 3030 struct iommu_domain *old_dom = group->default_domain; 3031 struct group_device *gdev; 3032 struct iommu_domain *dom; 3033 bool direct_failed; 3034 int req_type; 3035 int ret; 3036 3037 lockdep_assert_held(&group->mutex); 3038 3039 req_type = iommu_get_default_domain_type(group, target_type); 3040 if (req_type < 0) 3041 return -EINVAL; 3042 3043 dom = iommu_group_alloc_default_domain(group, req_type); 3044 if (!dom) 3045 return -ENODEV; 3046 3047 if (group->default_domain == dom) 3048 return 0; 3049 3050 /* 3051 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be 3052 * mapped before their device is attached, in order to guarantee 3053 * continuity with any FW activity 3054 */ 3055 direct_failed = false; 3056 for_each_group_device(group, gdev) { 3057 if (iommu_create_device_direct_mappings(dom, gdev->dev)) { 3058 direct_failed = true; 3059 dev_warn_once( 3060 gdev->dev->iommu->iommu_dev->dev, 3061 "IOMMU driver was not able to establish FW requested direct mapping."); 3062 } 3063 } 3064 3065 /* We must set default_domain early for __iommu_device_set_domain */ 3066 group->default_domain = dom; 3067 if (!group->domain) { 3068 /* 3069 * Drivers are not allowed to fail the first domain attach. 3070 * The only way to recover from this is to fail attaching the 3071 * iommu driver and call ops->release_device. Put the domain 3072 * in group->default_domain so it is freed after. 3073 */ 3074 ret = __iommu_group_set_domain_internal( 3075 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 3076 if (WARN_ON(ret)) 3077 goto out_free_old; 3078 } else { 3079 ret = __iommu_group_set_domain(group, dom); 3080 if (ret) 3081 goto err_restore_def_domain; 3082 } 3083 3084 /* 3085 * Drivers are supposed to allow mappings to be installed in a domain 3086 * before device attachment, but some don't. Hack around this defect by 3087 * trying again after attaching. If this happens it means the device 3088 * will not continuously have the IOMMU_RESV_DIRECT map. 3089 */ 3090 if (direct_failed) { 3091 for_each_group_device(group, gdev) { 3092 ret = iommu_create_device_direct_mappings(dom, gdev->dev); 3093 if (ret) 3094 goto err_restore_domain; 3095 } 3096 } 3097 3098 out_free_old: 3099 if (old_dom) 3100 iommu_domain_free(old_dom); 3101 return ret; 3102 3103 err_restore_domain: 3104 if (old_dom) 3105 __iommu_group_set_domain_internal( 3106 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 3107 err_restore_def_domain: 3108 if (old_dom) { 3109 iommu_domain_free(dom); 3110 group->default_domain = old_dom; 3111 } 3112 return ret; 3113 } 3114 3115 /* 3116 * Changing the default domain through sysfs requires the users to unbind the 3117 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 3118 * transition. Return failure if this isn't met. 3119 * 3120 * We need to consider the race between this and the device release path. 3121 * group->mutex is used here to guarantee that the device release path 3122 * will not be entered at the same time. 3123 */ 3124 static ssize_t iommu_group_store_type(struct iommu_group *group, 3125 const char *buf, size_t count) 3126 { 3127 struct group_device *gdev; 3128 int ret, req_type; 3129 3130 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3131 return -EACCES; 3132 3133 if (WARN_ON(!group) || !group->default_domain) 3134 return -EINVAL; 3135 3136 if (sysfs_streq(buf, "identity")) 3137 req_type = IOMMU_DOMAIN_IDENTITY; 3138 else if (sysfs_streq(buf, "DMA")) 3139 req_type = IOMMU_DOMAIN_DMA; 3140 else if (sysfs_streq(buf, "DMA-FQ")) 3141 req_type = IOMMU_DOMAIN_DMA_FQ; 3142 else if (sysfs_streq(buf, "auto")) 3143 req_type = 0; 3144 else 3145 return -EINVAL; 3146 3147 mutex_lock(&group->mutex); 3148 /* We can bring up a flush queue without tearing down the domain. */ 3149 if (req_type == IOMMU_DOMAIN_DMA_FQ && 3150 group->default_domain->type == IOMMU_DOMAIN_DMA) { 3151 ret = iommu_dma_init_fq(group->default_domain); 3152 if (ret) 3153 goto out_unlock; 3154 3155 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; 3156 ret = count; 3157 goto out_unlock; 3158 } 3159 3160 /* Otherwise, ensure that device exists and no driver is bound. */ 3161 if (list_empty(&group->devices) || group->owner_cnt) { 3162 ret = -EPERM; 3163 goto out_unlock; 3164 } 3165 3166 ret = iommu_setup_default_domain(group, req_type); 3167 if (ret) 3168 goto out_unlock; 3169 3170 /* 3171 * Release the mutex here because ops->probe_finalize() call-back of 3172 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 3173 * in-turn might call back into IOMMU core code, where it tries to take 3174 * group->mutex, resulting in a deadlock. 3175 */ 3176 mutex_unlock(&group->mutex); 3177 3178 /* Make sure dma_ops is appropriatley set */ 3179 for_each_group_device(group, gdev) 3180 iommu_group_do_probe_finalize(gdev->dev); 3181 return count; 3182 3183 out_unlock: 3184 mutex_unlock(&group->mutex); 3185 return ret ?: count; 3186 } 3187 3188 /** 3189 * iommu_device_use_default_domain() - Device driver wants to handle device 3190 * DMA through the kernel DMA API. 3191 * @dev: The device. 3192 * 3193 * The device driver about to bind @dev wants to do DMA through the kernel 3194 * DMA API. Return 0 if it is allowed, otherwise an error. 3195 */ 3196 int iommu_device_use_default_domain(struct device *dev) 3197 { 3198 /* Caller is the driver core during the pre-probe path */ 3199 struct iommu_group *group = dev->iommu_group; 3200 int ret = 0; 3201 3202 if (!group) 3203 return 0; 3204 3205 mutex_lock(&group->mutex); 3206 if (group->owner_cnt) { 3207 if (group->domain != group->default_domain || group->owner || 3208 !xa_empty(&group->pasid_array)) { 3209 ret = -EBUSY; 3210 goto unlock_out; 3211 } 3212 } 3213 3214 group->owner_cnt++; 3215 3216 unlock_out: 3217 mutex_unlock(&group->mutex); 3218 return ret; 3219 } 3220 3221 /** 3222 * iommu_device_unuse_default_domain() - Device driver stops handling device 3223 * DMA through the kernel DMA API. 3224 * @dev: The device. 3225 * 3226 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3227 * It must be called after iommu_device_use_default_domain(). 3228 */ 3229 void iommu_device_unuse_default_domain(struct device *dev) 3230 { 3231 /* Caller is the driver core during the post-probe path */ 3232 struct iommu_group *group = dev->iommu_group; 3233 3234 if (!group) 3235 return; 3236 3237 mutex_lock(&group->mutex); 3238 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3239 group->owner_cnt--; 3240 3241 mutex_unlock(&group->mutex); 3242 } 3243 3244 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3245 { 3246 if (group->blocking_domain) 3247 return 0; 3248 3249 group->blocking_domain = 3250 __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); 3251 if (!group->blocking_domain) { 3252 /* 3253 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3254 * create an empty domain instead. 3255 */ 3256 group->blocking_domain = __iommu_group_domain_alloc( 3257 group, IOMMU_DOMAIN_UNMANAGED); 3258 if (!group->blocking_domain) 3259 return -EINVAL; 3260 } 3261 return 0; 3262 } 3263 3264 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3265 { 3266 int ret; 3267 3268 if ((group->domain && group->domain != group->default_domain) || 3269 !xa_empty(&group->pasid_array)) 3270 return -EBUSY; 3271 3272 ret = __iommu_group_alloc_blocking_domain(group); 3273 if (ret) 3274 return ret; 3275 ret = __iommu_group_set_domain(group, group->blocking_domain); 3276 if (ret) 3277 return ret; 3278 3279 group->owner = owner; 3280 group->owner_cnt++; 3281 return 0; 3282 } 3283 3284 /** 3285 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3286 * @group: The group. 3287 * @owner: Caller specified pointer. Used for exclusive ownership. 3288 * 3289 * This is to support backward compatibility for vfio which manages the dma 3290 * ownership in iommu_group level. New invocations on this interface should be 3291 * prohibited. Only a single owner may exist for a group. 3292 */ 3293 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3294 { 3295 int ret = 0; 3296 3297 if (WARN_ON(!owner)) 3298 return -EINVAL; 3299 3300 mutex_lock(&group->mutex); 3301 if (group->owner_cnt) { 3302 ret = -EPERM; 3303 goto unlock_out; 3304 } 3305 3306 ret = __iommu_take_dma_ownership(group, owner); 3307 unlock_out: 3308 mutex_unlock(&group->mutex); 3309 3310 return ret; 3311 } 3312 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3313 3314 /** 3315 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3316 * @dev: The device. 3317 * @owner: Caller specified pointer. Used for exclusive ownership. 3318 * 3319 * Claim the DMA ownership of a device. Multiple devices in the same group may 3320 * concurrently claim ownership if they present the same owner value. Returns 0 3321 * on success and error code on failure 3322 */ 3323 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3324 { 3325 /* Caller must be a probed driver on dev */ 3326 struct iommu_group *group = dev->iommu_group; 3327 int ret = 0; 3328 3329 if (WARN_ON(!owner)) 3330 return -EINVAL; 3331 3332 if (!group) 3333 return -ENODEV; 3334 3335 mutex_lock(&group->mutex); 3336 if (group->owner_cnt) { 3337 if (group->owner != owner) { 3338 ret = -EPERM; 3339 goto unlock_out; 3340 } 3341 group->owner_cnt++; 3342 goto unlock_out; 3343 } 3344 3345 ret = __iommu_take_dma_ownership(group, owner); 3346 unlock_out: 3347 mutex_unlock(&group->mutex); 3348 return ret; 3349 } 3350 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3351 3352 static void __iommu_release_dma_ownership(struct iommu_group *group) 3353 { 3354 if (WARN_ON(!group->owner_cnt || !group->owner || 3355 !xa_empty(&group->pasid_array))) 3356 return; 3357 3358 group->owner_cnt = 0; 3359 group->owner = NULL; 3360 __iommu_group_set_domain_nofail(group, group->default_domain); 3361 } 3362 3363 /** 3364 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3365 * @group: The group 3366 * 3367 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3368 */ 3369 void iommu_group_release_dma_owner(struct iommu_group *group) 3370 { 3371 mutex_lock(&group->mutex); 3372 __iommu_release_dma_ownership(group); 3373 mutex_unlock(&group->mutex); 3374 } 3375 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3376 3377 /** 3378 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3379 * @dev: The device. 3380 * 3381 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3382 */ 3383 void iommu_device_release_dma_owner(struct device *dev) 3384 { 3385 /* Caller must be a probed driver on dev */ 3386 struct iommu_group *group = dev->iommu_group; 3387 3388 mutex_lock(&group->mutex); 3389 if (group->owner_cnt > 1) 3390 group->owner_cnt--; 3391 else 3392 __iommu_release_dma_ownership(group); 3393 mutex_unlock(&group->mutex); 3394 } 3395 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3396 3397 /** 3398 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3399 * @group: The group. 3400 * 3401 * This provides status query on a given group. It is racy and only for 3402 * non-binding status reporting. 3403 */ 3404 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3405 { 3406 unsigned int user; 3407 3408 mutex_lock(&group->mutex); 3409 user = group->owner_cnt; 3410 mutex_unlock(&group->mutex); 3411 3412 return user; 3413 } 3414 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3415 3416 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3417 struct iommu_group *group, ioasid_t pasid) 3418 { 3419 struct group_device *device; 3420 int ret = 0; 3421 3422 for_each_group_device(group, device) { 3423 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3424 if (ret) 3425 break; 3426 } 3427 3428 return ret; 3429 } 3430 3431 static void __iommu_remove_group_pasid(struct iommu_group *group, 3432 ioasid_t pasid) 3433 { 3434 struct group_device *device; 3435 const struct iommu_ops *ops; 3436 3437 for_each_group_device(group, device) { 3438 ops = dev_iommu_ops(device->dev); 3439 ops->remove_dev_pasid(device->dev, pasid); 3440 } 3441 } 3442 3443 /* 3444 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3445 * @domain: the iommu domain. 3446 * @dev: the attached device. 3447 * @pasid: the pasid of the device. 3448 * 3449 * Return: 0 on success, or an error. 3450 */ 3451 int iommu_attach_device_pasid(struct iommu_domain *domain, 3452 struct device *dev, ioasid_t pasid) 3453 { 3454 /* Caller must be a probed driver on dev */ 3455 struct iommu_group *group = dev->iommu_group; 3456 void *curr; 3457 int ret; 3458 3459 if (!domain->ops->set_dev_pasid) 3460 return -EOPNOTSUPP; 3461 3462 if (!group) 3463 return -ENODEV; 3464 3465 mutex_lock(&group->mutex); 3466 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3467 if (curr) { 3468 ret = xa_err(curr) ? : -EBUSY; 3469 goto out_unlock; 3470 } 3471 3472 ret = __iommu_set_group_pasid(domain, group, pasid); 3473 if (ret) { 3474 __iommu_remove_group_pasid(group, pasid); 3475 xa_erase(&group->pasid_array, pasid); 3476 } 3477 out_unlock: 3478 mutex_unlock(&group->mutex); 3479 return ret; 3480 } 3481 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3482 3483 /* 3484 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3485 * @domain: the iommu domain. 3486 * @dev: the attached device. 3487 * @pasid: the pasid of the device. 3488 * 3489 * The @domain must have been attached to @pasid of the @dev with 3490 * iommu_attach_device_pasid(). 3491 */ 3492 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3493 ioasid_t pasid) 3494 { 3495 /* Caller must be a probed driver on dev */ 3496 struct iommu_group *group = dev->iommu_group; 3497 3498 mutex_lock(&group->mutex); 3499 __iommu_remove_group_pasid(group, pasid); 3500 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3501 mutex_unlock(&group->mutex); 3502 } 3503 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3504 3505 /* 3506 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3507 * @dev: the queried device 3508 * @pasid: the pasid of the device 3509 * @type: matched domain type, 0 for any match 3510 * 3511 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3512 * domain attached to pasid of a device. Callers must hold a lock around this 3513 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3514 * type is being manipulated. This API does not internally resolve races with 3515 * attach/detach. 3516 * 3517 * Return: attached domain on success, NULL otherwise. 3518 */ 3519 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3520 ioasid_t pasid, 3521 unsigned int type) 3522 { 3523 /* Caller must be a probed driver on dev */ 3524 struct iommu_group *group = dev->iommu_group; 3525 struct iommu_domain *domain; 3526 3527 if (!group) 3528 return NULL; 3529 3530 xa_lock(&group->pasid_array); 3531 domain = xa_load(&group->pasid_array, pasid); 3532 if (type && domain && domain->type != type) 3533 domain = ERR_PTR(-EBUSY); 3534 xa_unlock(&group->pasid_array); 3535 3536 return domain; 3537 } 3538 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3539 3540 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3541 struct mm_struct *mm) 3542 { 3543 const struct iommu_ops *ops = dev_iommu_ops(dev); 3544 struct iommu_domain *domain; 3545 3546 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3547 if (!domain) 3548 return NULL; 3549 3550 domain->type = IOMMU_DOMAIN_SVA; 3551 mmgrab(mm); 3552 domain->mm = mm; 3553 domain->iopf_handler = iommu_sva_handle_iopf; 3554 domain->fault_data = mm; 3555 3556 return domain; 3557 } 3558 3559 ioasid_t iommu_alloc_global_pasid(struct device *dev) 3560 { 3561 int ret; 3562 3563 /* max_pasids == 0 means that the device does not support PASID */ 3564 if (!dev->iommu->max_pasids) 3565 return IOMMU_PASID_INVALID; 3566 3567 /* 3568 * max_pasids is set up by vendor driver based on number of PASID bits 3569 * supported but the IDA allocation is inclusive. 3570 */ 3571 ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID, 3572 dev->iommu->max_pasids - 1, GFP_KERNEL); 3573 return ret < 0 ? IOMMU_PASID_INVALID : ret; 3574 } 3575 EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid); 3576 3577 void iommu_free_global_pasid(ioasid_t pasid) 3578 { 3579 if (WARN_ON(pasid == IOMMU_PASID_INVALID)) 3580 return; 3581 3582 ida_free(&iommu_global_pasid_ida, pasid); 3583 } 3584 EXPORT_SYMBOL_GPL(iommu_free_global_pasid); 3585