1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/device.h> 10 #include <linux/kernel.h> 11 #include <linux/bug.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/export.h> 15 #include <linux/slab.h> 16 #include <linux/errno.h> 17 #include <linux/iommu.h> 18 #include <linux/idr.h> 19 #include <linux/notifier.h> 20 #include <linux/err.h> 21 #include <linux/pci.h> 22 #include <linux/bitops.h> 23 #include <linux/property.h> 24 #include <linux/fsl/mc.h> 25 #include <linux/module.h> 26 #include <trace/events/iommu.h> 27 28 static struct kset *iommu_group_kset; 29 static DEFINE_IDA(iommu_group_ida); 30 31 static unsigned int iommu_def_domain_type __read_mostly; 32 static bool iommu_dma_strict __read_mostly = true; 33 static u32 iommu_cmd_line __read_mostly; 34 35 struct iommu_group { 36 struct kobject kobj; 37 struct kobject *devices_kobj; 38 struct list_head devices; 39 struct mutex mutex; 40 struct blocking_notifier_head notifier; 41 void *iommu_data; 42 void (*iommu_data_release)(void *iommu_data); 43 char *name; 44 int id; 45 struct iommu_domain *default_domain; 46 struct iommu_domain *domain; 47 struct list_head entry; 48 }; 49 50 struct group_device { 51 struct list_head list; 52 struct device *dev; 53 char *name; 54 }; 55 56 struct iommu_group_attribute { 57 struct attribute attr; 58 ssize_t (*show)(struct iommu_group *group, char *buf); 59 ssize_t (*store)(struct iommu_group *group, 60 const char *buf, size_t count); 61 }; 62 63 static const char * const iommu_group_resv_type_string[] = { 64 [IOMMU_RESV_DIRECT] = "direct", 65 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 66 [IOMMU_RESV_RESERVED] = "reserved", 67 [IOMMU_RESV_MSI] = "msi", 68 [IOMMU_RESV_SW_MSI] = "msi", 69 }; 70 71 #define IOMMU_CMD_LINE_DMA_API BIT(0) 72 #define IOMMU_CMD_LINE_STRICT BIT(1) 73 74 static int iommu_alloc_default_domain(struct iommu_group *group, 75 struct device *dev); 76 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 77 unsigned type); 78 static int __iommu_attach_device(struct iommu_domain *domain, 79 struct device *dev); 80 static int __iommu_attach_group(struct iommu_domain *domain, 81 struct iommu_group *group); 82 static void __iommu_detach_group(struct iommu_domain *domain, 83 struct iommu_group *group); 84 static int iommu_create_device_direct_mappings(struct iommu_group *group, 85 struct device *dev); 86 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 87 static ssize_t iommu_group_store_type(struct iommu_group *group, 88 const char *buf, size_t count); 89 90 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 91 struct iommu_group_attribute iommu_group_attr_##_name = \ 92 __ATTR(_name, _mode, _show, _store) 93 94 #define to_iommu_group_attr(_attr) \ 95 container_of(_attr, struct iommu_group_attribute, attr) 96 #define to_iommu_group(_kobj) \ 97 container_of(_kobj, struct iommu_group, kobj) 98 99 static LIST_HEAD(iommu_device_list); 100 static DEFINE_SPINLOCK(iommu_device_lock); 101 102 /* 103 * Use a function instead of an array here because the domain-type is a 104 * bit-field, so an array would waste memory. 105 */ 106 static const char *iommu_domain_type_str(unsigned int t) 107 { 108 switch (t) { 109 case IOMMU_DOMAIN_BLOCKED: 110 return "Blocked"; 111 case IOMMU_DOMAIN_IDENTITY: 112 return "Passthrough"; 113 case IOMMU_DOMAIN_UNMANAGED: 114 return "Unmanaged"; 115 case IOMMU_DOMAIN_DMA: 116 return "Translated"; 117 default: 118 return "Unknown"; 119 } 120 } 121 122 static int __init iommu_subsys_init(void) 123 { 124 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 125 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 126 iommu_set_default_passthrough(false); 127 else 128 iommu_set_default_translated(false); 129 130 if (iommu_default_passthrough() && mem_encrypt_active()) { 131 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 132 iommu_set_default_translated(false); 133 } 134 } 135 136 pr_info("Default domain type: %s %s\n", 137 iommu_domain_type_str(iommu_def_domain_type), 138 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 139 "(set via kernel command line)" : ""); 140 141 return 0; 142 } 143 subsys_initcall(iommu_subsys_init); 144 145 /** 146 * iommu_device_register() - Register an IOMMU hardware instance 147 * @iommu: IOMMU handle for the instance 148 * @ops: IOMMU ops to associate with the instance 149 * @hwdev: (optional) actual instance device, used for fwnode lookup 150 * 151 * Return: 0 on success, or an error. 152 */ 153 int iommu_device_register(struct iommu_device *iommu, 154 const struct iommu_ops *ops, struct device *hwdev) 155 { 156 /* We need to be able to take module references appropriately */ 157 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 158 return -EINVAL; 159 160 iommu->ops = ops; 161 if (hwdev) 162 iommu->fwnode = hwdev->fwnode; 163 164 spin_lock(&iommu_device_lock); 165 list_add_tail(&iommu->list, &iommu_device_list); 166 spin_unlock(&iommu_device_lock); 167 return 0; 168 } 169 EXPORT_SYMBOL_GPL(iommu_device_register); 170 171 void iommu_device_unregister(struct iommu_device *iommu) 172 { 173 spin_lock(&iommu_device_lock); 174 list_del(&iommu->list); 175 spin_unlock(&iommu_device_lock); 176 } 177 EXPORT_SYMBOL_GPL(iommu_device_unregister); 178 179 static struct dev_iommu *dev_iommu_get(struct device *dev) 180 { 181 struct dev_iommu *param = dev->iommu; 182 183 if (param) 184 return param; 185 186 param = kzalloc(sizeof(*param), GFP_KERNEL); 187 if (!param) 188 return NULL; 189 190 mutex_init(¶m->lock); 191 dev->iommu = param; 192 return param; 193 } 194 195 static void dev_iommu_free(struct device *dev) 196 { 197 iommu_fwspec_free(dev); 198 kfree(dev->iommu); 199 dev->iommu = NULL; 200 } 201 202 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 203 { 204 const struct iommu_ops *ops = dev->bus->iommu_ops; 205 struct iommu_device *iommu_dev; 206 struct iommu_group *group; 207 int ret; 208 209 if (!ops) 210 return -ENODEV; 211 212 if (!dev_iommu_get(dev)) 213 return -ENOMEM; 214 215 if (!try_module_get(ops->owner)) { 216 ret = -EINVAL; 217 goto err_free; 218 } 219 220 iommu_dev = ops->probe_device(dev); 221 if (IS_ERR(iommu_dev)) { 222 ret = PTR_ERR(iommu_dev); 223 goto out_module_put; 224 } 225 226 dev->iommu->iommu_dev = iommu_dev; 227 228 group = iommu_group_get_for_dev(dev); 229 if (IS_ERR(group)) { 230 ret = PTR_ERR(group); 231 goto out_release; 232 } 233 iommu_group_put(group); 234 235 if (group_list && !group->default_domain && list_empty(&group->entry)) 236 list_add_tail(&group->entry, group_list); 237 238 iommu_device_link(iommu_dev, dev); 239 240 return 0; 241 242 out_release: 243 ops->release_device(dev); 244 245 out_module_put: 246 module_put(ops->owner); 247 248 err_free: 249 dev_iommu_free(dev); 250 251 return ret; 252 } 253 254 int iommu_probe_device(struct device *dev) 255 { 256 const struct iommu_ops *ops = dev->bus->iommu_ops; 257 struct iommu_group *group; 258 int ret; 259 260 ret = __iommu_probe_device(dev, NULL); 261 if (ret) 262 goto err_out; 263 264 group = iommu_group_get(dev); 265 if (!group) { 266 ret = -ENODEV; 267 goto err_release; 268 } 269 270 /* 271 * Try to allocate a default domain - needs support from the 272 * IOMMU driver. There are still some drivers which don't 273 * support default domains, so the return value is not yet 274 * checked. 275 */ 276 iommu_alloc_default_domain(group, dev); 277 278 if (group->default_domain) { 279 ret = __iommu_attach_device(group->default_domain, dev); 280 if (ret) { 281 iommu_group_put(group); 282 goto err_release; 283 } 284 } 285 286 iommu_create_device_direct_mappings(group, dev); 287 288 iommu_group_put(group); 289 290 if (ops->probe_finalize) 291 ops->probe_finalize(dev); 292 293 return 0; 294 295 err_release: 296 iommu_release_device(dev); 297 298 err_out: 299 return ret; 300 301 } 302 303 void iommu_release_device(struct device *dev) 304 { 305 const struct iommu_ops *ops = dev->bus->iommu_ops; 306 307 if (!dev->iommu) 308 return; 309 310 iommu_device_unlink(dev->iommu->iommu_dev, dev); 311 312 ops->release_device(dev); 313 314 iommu_group_remove_device(dev); 315 module_put(ops->owner); 316 dev_iommu_free(dev); 317 } 318 319 static int __init iommu_set_def_domain_type(char *str) 320 { 321 bool pt; 322 int ret; 323 324 ret = kstrtobool(str, &pt); 325 if (ret) 326 return ret; 327 328 if (pt) 329 iommu_set_default_passthrough(true); 330 else 331 iommu_set_default_translated(true); 332 333 return 0; 334 } 335 early_param("iommu.passthrough", iommu_set_def_domain_type); 336 337 static int __init iommu_dma_setup(char *str) 338 { 339 int ret = kstrtobool(str, &iommu_dma_strict); 340 341 if (!ret) 342 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 343 return ret; 344 } 345 early_param("iommu.strict", iommu_dma_setup); 346 347 void iommu_set_dma_strict(bool strict) 348 { 349 if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT)) 350 iommu_dma_strict = strict; 351 } 352 353 bool iommu_get_dma_strict(struct iommu_domain *domain) 354 { 355 /* only allow lazy flushing for DMA domains */ 356 if (domain->type == IOMMU_DOMAIN_DMA) 357 return iommu_dma_strict; 358 return true; 359 } 360 EXPORT_SYMBOL_GPL(iommu_get_dma_strict); 361 362 static ssize_t iommu_group_attr_show(struct kobject *kobj, 363 struct attribute *__attr, char *buf) 364 { 365 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 366 struct iommu_group *group = to_iommu_group(kobj); 367 ssize_t ret = -EIO; 368 369 if (attr->show) 370 ret = attr->show(group, buf); 371 return ret; 372 } 373 374 static ssize_t iommu_group_attr_store(struct kobject *kobj, 375 struct attribute *__attr, 376 const char *buf, size_t count) 377 { 378 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 379 struct iommu_group *group = to_iommu_group(kobj); 380 ssize_t ret = -EIO; 381 382 if (attr->store) 383 ret = attr->store(group, buf, count); 384 return ret; 385 } 386 387 static const struct sysfs_ops iommu_group_sysfs_ops = { 388 .show = iommu_group_attr_show, 389 .store = iommu_group_attr_store, 390 }; 391 392 static int iommu_group_create_file(struct iommu_group *group, 393 struct iommu_group_attribute *attr) 394 { 395 return sysfs_create_file(&group->kobj, &attr->attr); 396 } 397 398 static void iommu_group_remove_file(struct iommu_group *group, 399 struct iommu_group_attribute *attr) 400 { 401 sysfs_remove_file(&group->kobj, &attr->attr); 402 } 403 404 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 405 { 406 return sprintf(buf, "%s\n", group->name); 407 } 408 409 /** 410 * iommu_insert_resv_region - Insert a new region in the 411 * list of reserved regions. 412 * @new: new region to insert 413 * @regions: list of regions 414 * 415 * Elements are sorted by start address and overlapping segments 416 * of the same type are merged. 417 */ 418 static int iommu_insert_resv_region(struct iommu_resv_region *new, 419 struct list_head *regions) 420 { 421 struct iommu_resv_region *iter, *tmp, *nr, *top; 422 LIST_HEAD(stack); 423 424 nr = iommu_alloc_resv_region(new->start, new->length, 425 new->prot, new->type); 426 if (!nr) 427 return -ENOMEM; 428 429 /* First add the new element based on start address sorting */ 430 list_for_each_entry(iter, regions, list) { 431 if (nr->start < iter->start || 432 (nr->start == iter->start && nr->type <= iter->type)) 433 break; 434 } 435 list_add_tail(&nr->list, &iter->list); 436 437 /* Merge overlapping segments of type nr->type in @regions, if any */ 438 list_for_each_entry_safe(iter, tmp, regions, list) { 439 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 440 441 /* no merge needed on elements of different types than @new */ 442 if (iter->type != new->type) { 443 list_move_tail(&iter->list, &stack); 444 continue; 445 } 446 447 /* look for the last stack element of same type as @iter */ 448 list_for_each_entry_reverse(top, &stack, list) 449 if (top->type == iter->type) 450 goto check_overlap; 451 452 list_move_tail(&iter->list, &stack); 453 continue; 454 455 check_overlap: 456 top_end = top->start + top->length - 1; 457 458 if (iter->start > top_end + 1) { 459 list_move_tail(&iter->list, &stack); 460 } else { 461 top->length = max(top_end, iter_end) - top->start + 1; 462 list_del(&iter->list); 463 kfree(iter); 464 } 465 } 466 list_splice(&stack, regions); 467 return 0; 468 } 469 470 static int 471 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 472 struct list_head *group_resv_regions) 473 { 474 struct iommu_resv_region *entry; 475 int ret = 0; 476 477 list_for_each_entry(entry, dev_resv_regions, list) { 478 ret = iommu_insert_resv_region(entry, group_resv_regions); 479 if (ret) 480 break; 481 } 482 return ret; 483 } 484 485 int iommu_get_group_resv_regions(struct iommu_group *group, 486 struct list_head *head) 487 { 488 struct group_device *device; 489 int ret = 0; 490 491 mutex_lock(&group->mutex); 492 list_for_each_entry(device, &group->devices, list) { 493 struct list_head dev_resv_regions; 494 495 INIT_LIST_HEAD(&dev_resv_regions); 496 iommu_get_resv_regions(device->dev, &dev_resv_regions); 497 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 498 iommu_put_resv_regions(device->dev, &dev_resv_regions); 499 if (ret) 500 break; 501 } 502 mutex_unlock(&group->mutex); 503 return ret; 504 } 505 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 506 507 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 508 char *buf) 509 { 510 struct iommu_resv_region *region, *next; 511 struct list_head group_resv_regions; 512 char *str = buf; 513 514 INIT_LIST_HEAD(&group_resv_regions); 515 iommu_get_group_resv_regions(group, &group_resv_regions); 516 517 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 518 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 519 (long long int)region->start, 520 (long long int)(region->start + 521 region->length - 1), 522 iommu_group_resv_type_string[region->type]); 523 kfree(region); 524 } 525 526 return (str - buf); 527 } 528 529 static ssize_t iommu_group_show_type(struct iommu_group *group, 530 char *buf) 531 { 532 char *type = "unknown\n"; 533 534 mutex_lock(&group->mutex); 535 if (group->default_domain) { 536 switch (group->default_domain->type) { 537 case IOMMU_DOMAIN_BLOCKED: 538 type = "blocked\n"; 539 break; 540 case IOMMU_DOMAIN_IDENTITY: 541 type = "identity\n"; 542 break; 543 case IOMMU_DOMAIN_UNMANAGED: 544 type = "unmanaged\n"; 545 break; 546 case IOMMU_DOMAIN_DMA: 547 type = "DMA\n"; 548 break; 549 } 550 } 551 mutex_unlock(&group->mutex); 552 strcpy(buf, type); 553 554 return strlen(type); 555 } 556 557 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 558 559 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 560 iommu_group_show_resv_regions, NULL); 561 562 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 563 iommu_group_store_type); 564 565 static void iommu_group_release(struct kobject *kobj) 566 { 567 struct iommu_group *group = to_iommu_group(kobj); 568 569 pr_debug("Releasing group %d\n", group->id); 570 571 if (group->iommu_data_release) 572 group->iommu_data_release(group->iommu_data); 573 574 ida_simple_remove(&iommu_group_ida, group->id); 575 576 if (group->default_domain) 577 iommu_domain_free(group->default_domain); 578 579 kfree(group->name); 580 kfree(group); 581 } 582 583 static struct kobj_type iommu_group_ktype = { 584 .sysfs_ops = &iommu_group_sysfs_ops, 585 .release = iommu_group_release, 586 }; 587 588 /** 589 * iommu_group_alloc - Allocate a new group 590 * 591 * This function is called by an iommu driver to allocate a new iommu 592 * group. The iommu group represents the minimum granularity of the iommu. 593 * Upon successful return, the caller holds a reference to the supplied 594 * group in order to hold the group until devices are added. Use 595 * iommu_group_put() to release this extra reference count, allowing the 596 * group to be automatically reclaimed once it has no devices or external 597 * references. 598 */ 599 struct iommu_group *iommu_group_alloc(void) 600 { 601 struct iommu_group *group; 602 int ret; 603 604 group = kzalloc(sizeof(*group), GFP_KERNEL); 605 if (!group) 606 return ERR_PTR(-ENOMEM); 607 608 group->kobj.kset = iommu_group_kset; 609 mutex_init(&group->mutex); 610 INIT_LIST_HEAD(&group->devices); 611 INIT_LIST_HEAD(&group->entry); 612 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); 613 614 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); 615 if (ret < 0) { 616 kfree(group); 617 return ERR_PTR(ret); 618 } 619 group->id = ret; 620 621 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 622 NULL, "%d", group->id); 623 if (ret) { 624 ida_simple_remove(&iommu_group_ida, group->id); 625 kobject_put(&group->kobj); 626 return ERR_PTR(ret); 627 } 628 629 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 630 if (!group->devices_kobj) { 631 kobject_put(&group->kobj); /* triggers .release & free */ 632 return ERR_PTR(-ENOMEM); 633 } 634 635 /* 636 * The devices_kobj holds a reference on the group kobject, so 637 * as long as that exists so will the group. We can therefore 638 * use the devices_kobj for reference counting. 639 */ 640 kobject_put(&group->kobj); 641 642 ret = iommu_group_create_file(group, 643 &iommu_group_attr_reserved_regions); 644 if (ret) 645 return ERR_PTR(ret); 646 647 ret = iommu_group_create_file(group, &iommu_group_attr_type); 648 if (ret) 649 return ERR_PTR(ret); 650 651 pr_debug("Allocated group %d\n", group->id); 652 653 return group; 654 } 655 EXPORT_SYMBOL_GPL(iommu_group_alloc); 656 657 struct iommu_group *iommu_group_get_by_id(int id) 658 { 659 struct kobject *group_kobj; 660 struct iommu_group *group; 661 const char *name; 662 663 if (!iommu_group_kset) 664 return NULL; 665 666 name = kasprintf(GFP_KERNEL, "%d", id); 667 if (!name) 668 return NULL; 669 670 group_kobj = kset_find_obj(iommu_group_kset, name); 671 kfree(name); 672 673 if (!group_kobj) 674 return NULL; 675 676 group = container_of(group_kobj, struct iommu_group, kobj); 677 BUG_ON(group->id != id); 678 679 kobject_get(group->devices_kobj); 680 kobject_put(&group->kobj); 681 682 return group; 683 } 684 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 685 686 /** 687 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 688 * @group: the group 689 * 690 * iommu drivers can store data in the group for use when doing iommu 691 * operations. This function provides a way to retrieve it. Caller 692 * should hold a group reference. 693 */ 694 void *iommu_group_get_iommudata(struct iommu_group *group) 695 { 696 return group->iommu_data; 697 } 698 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 699 700 /** 701 * iommu_group_set_iommudata - set iommu_data for a group 702 * @group: the group 703 * @iommu_data: new data 704 * @release: release function for iommu_data 705 * 706 * iommu drivers can store data in the group for use when doing iommu 707 * operations. This function provides a way to set the data after 708 * the group has been allocated. Caller should hold a group reference. 709 */ 710 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 711 void (*release)(void *iommu_data)) 712 { 713 group->iommu_data = iommu_data; 714 group->iommu_data_release = release; 715 } 716 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 717 718 /** 719 * iommu_group_set_name - set name for a group 720 * @group: the group 721 * @name: name 722 * 723 * Allow iommu driver to set a name for a group. When set it will 724 * appear in a name attribute file under the group in sysfs. 725 */ 726 int iommu_group_set_name(struct iommu_group *group, const char *name) 727 { 728 int ret; 729 730 if (group->name) { 731 iommu_group_remove_file(group, &iommu_group_attr_name); 732 kfree(group->name); 733 group->name = NULL; 734 if (!name) 735 return 0; 736 } 737 738 group->name = kstrdup(name, GFP_KERNEL); 739 if (!group->name) 740 return -ENOMEM; 741 742 ret = iommu_group_create_file(group, &iommu_group_attr_name); 743 if (ret) { 744 kfree(group->name); 745 group->name = NULL; 746 return ret; 747 } 748 749 return 0; 750 } 751 EXPORT_SYMBOL_GPL(iommu_group_set_name); 752 753 static int iommu_create_device_direct_mappings(struct iommu_group *group, 754 struct device *dev) 755 { 756 struct iommu_domain *domain = group->default_domain; 757 struct iommu_resv_region *entry; 758 struct list_head mappings; 759 unsigned long pg_size; 760 int ret = 0; 761 762 if (!domain || domain->type != IOMMU_DOMAIN_DMA) 763 return 0; 764 765 BUG_ON(!domain->pgsize_bitmap); 766 767 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 768 INIT_LIST_HEAD(&mappings); 769 770 iommu_get_resv_regions(dev, &mappings); 771 772 /* We need to consider overlapping regions for different devices */ 773 list_for_each_entry(entry, &mappings, list) { 774 dma_addr_t start, end, addr; 775 size_t map_size = 0; 776 777 if (domain->ops->apply_resv_region) 778 domain->ops->apply_resv_region(dev, domain, entry); 779 780 start = ALIGN(entry->start, pg_size); 781 end = ALIGN(entry->start + entry->length, pg_size); 782 783 if (entry->type != IOMMU_RESV_DIRECT && 784 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 785 continue; 786 787 for (addr = start; addr <= end; addr += pg_size) { 788 phys_addr_t phys_addr; 789 790 if (addr == end) 791 goto map_end; 792 793 phys_addr = iommu_iova_to_phys(domain, addr); 794 if (!phys_addr) { 795 map_size += pg_size; 796 continue; 797 } 798 799 map_end: 800 if (map_size) { 801 ret = iommu_map(domain, addr - map_size, 802 addr - map_size, map_size, 803 entry->prot); 804 if (ret) 805 goto out; 806 map_size = 0; 807 } 808 } 809 810 } 811 812 iommu_flush_iotlb_all(domain); 813 814 out: 815 iommu_put_resv_regions(dev, &mappings); 816 817 return ret; 818 } 819 820 static bool iommu_is_attach_deferred(struct iommu_domain *domain, 821 struct device *dev) 822 { 823 if (domain->ops->is_attach_deferred) 824 return domain->ops->is_attach_deferred(domain, dev); 825 826 return false; 827 } 828 829 /** 830 * iommu_group_add_device - add a device to an iommu group 831 * @group: the group into which to add the device (reference should be held) 832 * @dev: the device 833 * 834 * This function is called by an iommu driver to add a device into a 835 * group. Adding a device increments the group reference count. 836 */ 837 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 838 { 839 int ret, i = 0; 840 struct group_device *device; 841 842 device = kzalloc(sizeof(*device), GFP_KERNEL); 843 if (!device) 844 return -ENOMEM; 845 846 device->dev = dev; 847 848 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 849 if (ret) 850 goto err_free_device; 851 852 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 853 rename: 854 if (!device->name) { 855 ret = -ENOMEM; 856 goto err_remove_link; 857 } 858 859 ret = sysfs_create_link_nowarn(group->devices_kobj, 860 &dev->kobj, device->name); 861 if (ret) { 862 if (ret == -EEXIST && i >= 0) { 863 /* 864 * Account for the slim chance of collision 865 * and append an instance to the name. 866 */ 867 kfree(device->name); 868 device->name = kasprintf(GFP_KERNEL, "%s.%d", 869 kobject_name(&dev->kobj), i++); 870 goto rename; 871 } 872 goto err_free_name; 873 } 874 875 kobject_get(group->devices_kobj); 876 877 dev->iommu_group = group; 878 879 mutex_lock(&group->mutex); 880 list_add_tail(&device->list, &group->devices); 881 if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) 882 ret = __iommu_attach_device(group->domain, dev); 883 mutex_unlock(&group->mutex); 884 if (ret) 885 goto err_put_group; 886 887 /* Notify any listeners about change to group. */ 888 blocking_notifier_call_chain(&group->notifier, 889 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); 890 891 trace_add_device_to_group(group->id, dev); 892 893 dev_info(dev, "Adding to iommu group %d\n", group->id); 894 895 return 0; 896 897 err_put_group: 898 mutex_lock(&group->mutex); 899 list_del(&device->list); 900 mutex_unlock(&group->mutex); 901 dev->iommu_group = NULL; 902 kobject_put(group->devices_kobj); 903 sysfs_remove_link(group->devices_kobj, device->name); 904 err_free_name: 905 kfree(device->name); 906 err_remove_link: 907 sysfs_remove_link(&dev->kobj, "iommu_group"); 908 err_free_device: 909 kfree(device); 910 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 911 return ret; 912 } 913 EXPORT_SYMBOL_GPL(iommu_group_add_device); 914 915 /** 916 * iommu_group_remove_device - remove a device from it's current group 917 * @dev: device to be removed 918 * 919 * This function is called by an iommu driver to remove the device from 920 * it's current group. This decrements the iommu group reference count. 921 */ 922 void iommu_group_remove_device(struct device *dev) 923 { 924 struct iommu_group *group = dev->iommu_group; 925 struct group_device *tmp_device, *device = NULL; 926 927 if (!group) 928 return; 929 930 dev_info(dev, "Removing from iommu group %d\n", group->id); 931 932 /* Pre-notify listeners that a device is being removed. */ 933 blocking_notifier_call_chain(&group->notifier, 934 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); 935 936 mutex_lock(&group->mutex); 937 list_for_each_entry(tmp_device, &group->devices, list) { 938 if (tmp_device->dev == dev) { 939 device = tmp_device; 940 list_del(&device->list); 941 break; 942 } 943 } 944 mutex_unlock(&group->mutex); 945 946 if (!device) 947 return; 948 949 sysfs_remove_link(group->devices_kobj, device->name); 950 sysfs_remove_link(&dev->kobj, "iommu_group"); 951 952 trace_remove_device_from_group(group->id, dev); 953 954 kfree(device->name); 955 kfree(device); 956 dev->iommu_group = NULL; 957 kobject_put(group->devices_kobj); 958 } 959 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 960 961 static int iommu_group_device_count(struct iommu_group *group) 962 { 963 struct group_device *entry; 964 int ret = 0; 965 966 list_for_each_entry(entry, &group->devices, list) 967 ret++; 968 969 return ret; 970 } 971 972 /** 973 * iommu_group_for_each_dev - iterate over each device in the group 974 * @group: the group 975 * @data: caller opaque data to be passed to callback function 976 * @fn: caller supplied callback function 977 * 978 * This function is called by group users to iterate over group devices. 979 * Callers should hold a reference count to the group during callback. 980 * The group->mutex is held across callbacks, which will block calls to 981 * iommu_group_add/remove_device. 982 */ 983 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 984 int (*fn)(struct device *, void *)) 985 { 986 struct group_device *device; 987 int ret = 0; 988 989 list_for_each_entry(device, &group->devices, list) { 990 ret = fn(device->dev, data); 991 if (ret) 992 break; 993 } 994 return ret; 995 } 996 997 998 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 999 int (*fn)(struct device *, void *)) 1000 { 1001 int ret; 1002 1003 mutex_lock(&group->mutex); 1004 ret = __iommu_group_for_each_dev(group, data, fn); 1005 mutex_unlock(&group->mutex); 1006 1007 return ret; 1008 } 1009 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1010 1011 /** 1012 * iommu_group_get - Return the group for a device and increment reference 1013 * @dev: get the group that this device belongs to 1014 * 1015 * This function is called by iommu drivers and users to get the group 1016 * for the specified device. If found, the group is returned and the group 1017 * reference in incremented, else NULL. 1018 */ 1019 struct iommu_group *iommu_group_get(struct device *dev) 1020 { 1021 struct iommu_group *group = dev->iommu_group; 1022 1023 if (group) 1024 kobject_get(group->devices_kobj); 1025 1026 return group; 1027 } 1028 EXPORT_SYMBOL_GPL(iommu_group_get); 1029 1030 /** 1031 * iommu_group_ref_get - Increment reference on a group 1032 * @group: the group to use, must not be NULL 1033 * 1034 * This function is called by iommu drivers to take additional references on an 1035 * existing group. Returns the given group for convenience. 1036 */ 1037 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1038 { 1039 kobject_get(group->devices_kobj); 1040 return group; 1041 } 1042 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1043 1044 /** 1045 * iommu_group_put - Decrement group reference 1046 * @group: the group to use 1047 * 1048 * This function is called by iommu drivers and users to release the 1049 * iommu group. Once the reference count is zero, the group is released. 1050 */ 1051 void iommu_group_put(struct iommu_group *group) 1052 { 1053 if (group) 1054 kobject_put(group->devices_kobj); 1055 } 1056 EXPORT_SYMBOL_GPL(iommu_group_put); 1057 1058 /** 1059 * iommu_group_register_notifier - Register a notifier for group changes 1060 * @group: the group to watch 1061 * @nb: notifier block to signal 1062 * 1063 * This function allows iommu group users to track changes in a group. 1064 * See include/linux/iommu.h for actions sent via this notifier. Caller 1065 * should hold a reference to the group throughout notifier registration. 1066 */ 1067 int iommu_group_register_notifier(struct iommu_group *group, 1068 struct notifier_block *nb) 1069 { 1070 return blocking_notifier_chain_register(&group->notifier, nb); 1071 } 1072 EXPORT_SYMBOL_GPL(iommu_group_register_notifier); 1073 1074 /** 1075 * iommu_group_unregister_notifier - Unregister a notifier 1076 * @group: the group to watch 1077 * @nb: notifier block to signal 1078 * 1079 * Unregister a previously registered group notifier block. 1080 */ 1081 int iommu_group_unregister_notifier(struct iommu_group *group, 1082 struct notifier_block *nb) 1083 { 1084 return blocking_notifier_chain_unregister(&group->notifier, nb); 1085 } 1086 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); 1087 1088 /** 1089 * iommu_register_device_fault_handler() - Register a device fault handler 1090 * @dev: the device 1091 * @handler: the fault handler 1092 * @data: private data passed as argument to the handler 1093 * 1094 * When an IOMMU fault event is received, this handler gets called with the 1095 * fault event and data as argument. The handler should return 0 on success. If 1096 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1097 * complete the fault by calling iommu_page_response() with one of the following 1098 * response code: 1099 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1100 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1101 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1102 * page faults if possible. 1103 * 1104 * Return 0 if the fault handler was installed successfully, or an error. 1105 */ 1106 int iommu_register_device_fault_handler(struct device *dev, 1107 iommu_dev_fault_handler_t handler, 1108 void *data) 1109 { 1110 struct dev_iommu *param = dev->iommu; 1111 int ret = 0; 1112 1113 if (!param) 1114 return -EINVAL; 1115 1116 mutex_lock(¶m->lock); 1117 /* Only allow one fault handler registered for each device */ 1118 if (param->fault_param) { 1119 ret = -EBUSY; 1120 goto done_unlock; 1121 } 1122 1123 get_device(dev); 1124 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1125 if (!param->fault_param) { 1126 put_device(dev); 1127 ret = -ENOMEM; 1128 goto done_unlock; 1129 } 1130 param->fault_param->handler = handler; 1131 param->fault_param->data = data; 1132 mutex_init(¶m->fault_param->lock); 1133 INIT_LIST_HEAD(¶m->fault_param->faults); 1134 1135 done_unlock: 1136 mutex_unlock(¶m->lock); 1137 1138 return ret; 1139 } 1140 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1141 1142 /** 1143 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1144 * @dev: the device 1145 * 1146 * Remove the device fault handler installed with 1147 * iommu_register_device_fault_handler(). 1148 * 1149 * Return 0 on success, or an error. 1150 */ 1151 int iommu_unregister_device_fault_handler(struct device *dev) 1152 { 1153 struct dev_iommu *param = dev->iommu; 1154 int ret = 0; 1155 1156 if (!param) 1157 return -EINVAL; 1158 1159 mutex_lock(¶m->lock); 1160 1161 if (!param->fault_param) 1162 goto unlock; 1163 1164 /* we cannot unregister handler if there are pending faults */ 1165 if (!list_empty(¶m->fault_param->faults)) { 1166 ret = -EBUSY; 1167 goto unlock; 1168 } 1169 1170 kfree(param->fault_param); 1171 param->fault_param = NULL; 1172 put_device(dev); 1173 unlock: 1174 mutex_unlock(¶m->lock); 1175 1176 return ret; 1177 } 1178 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1179 1180 /** 1181 * iommu_report_device_fault() - Report fault event to device driver 1182 * @dev: the device 1183 * @evt: fault event data 1184 * 1185 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1186 * handler. When this function fails and the fault is recoverable, it is the 1187 * caller's responsibility to complete the fault. 1188 * 1189 * Return 0 on success, or an error. 1190 */ 1191 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1192 { 1193 struct dev_iommu *param = dev->iommu; 1194 struct iommu_fault_event *evt_pending = NULL; 1195 struct iommu_fault_param *fparam; 1196 int ret = 0; 1197 1198 if (!param || !evt) 1199 return -EINVAL; 1200 1201 /* we only report device fault if there is a handler registered */ 1202 mutex_lock(¶m->lock); 1203 fparam = param->fault_param; 1204 if (!fparam || !fparam->handler) { 1205 ret = -EINVAL; 1206 goto done_unlock; 1207 } 1208 1209 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1210 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1211 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1212 GFP_KERNEL); 1213 if (!evt_pending) { 1214 ret = -ENOMEM; 1215 goto done_unlock; 1216 } 1217 mutex_lock(&fparam->lock); 1218 list_add_tail(&evt_pending->list, &fparam->faults); 1219 mutex_unlock(&fparam->lock); 1220 } 1221 1222 ret = fparam->handler(&evt->fault, fparam->data); 1223 if (ret && evt_pending) { 1224 mutex_lock(&fparam->lock); 1225 list_del(&evt_pending->list); 1226 mutex_unlock(&fparam->lock); 1227 kfree(evt_pending); 1228 } 1229 done_unlock: 1230 mutex_unlock(¶m->lock); 1231 return ret; 1232 } 1233 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1234 1235 int iommu_page_response(struct device *dev, 1236 struct iommu_page_response *msg) 1237 { 1238 bool needs_pasid; 1239 int ret = -EINVAL; 1240 struct iommu_fault_event *evt; 1241 struct iommu_fault_page_request *prm; 1242 struct dev_iommu *param = dev->iommu; 1243 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1244 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1245 1246 if (!domain || !domain->ops->page_response) 1247 return -ENODEV; 1248 1249 if (!param || !param->fault_param) 1250 return -EINVAL; 1251 1252 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1253 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1254 return -EINVAL; 1255 1256 /* Only send response if there is a fault report pending */ 1257 mutex_lock(¶m->fault_param->lock); 1258 if (list_empty(¶m->fault_param->faults)) { 1259 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1260 goto done_unlock; 1261 } 1262 /* 1263 * Check if we have a matching page request pending to respond, 1264 * otherwise return -EINVAL 1265 */ 1266 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1267 prm = &evt->fault.prm; 1268 if (prm->grpid != msg->grpid) 1269 continue; 1270 1271 /* 1272 * If the PASID is required, the corresponding request is 1273 * matched using the group ID, the PASID valid bit and the PASID 1274 * value. Otherwise only the group ID matches request and 1275 * response. 1276 */ 1277 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1278 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1279 continue; 1280 1281 if (!needs_pasid && has_pasid) { 1282 /* No big deal, just clear it. */ 1283 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1284 msg->pasid = 0; 1285 } 1286 1287 ret = domain->ops->page_response(dev, evt, msg); 1288 list_del(&evt->list); 1289 kfree(evt); 1290 break; 1291 } 1292 1293 done_unlock: 1294 mutex_unlock(¶m->fault_param->lock); 1295 return ret; 1296 } 1297 EXPORT_SYMBOL_GPL(iommu_page_response); 1298 1299 /** 1300 * iommu_group_id - Return ID for a group 1301 * @group: the group to ID 1302 * 1303 * Return the unique ID for the group matching the sysfs group number. 1304 */ 1305 int iommu_group_id(struct iommu_group *group) 1306 { 1307 return group->id; 1308 } 1309 EXPORT_SYMBOL_GPL(iommu_group_id); 1310 1311 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1312 unsigned long *devfns); 1313 1314 /* 1315 * To consider a PCI device isolated, we require ACS to support Source 1316 * Validation, Request Redirection, Completer Redirection, and Upstream 1317 * Forwarding. This effectively means that devices cannot spoof their 1318 * requester ID, requests and completions cannot be redirected, and all 1319 * transactions are forwarded upstream, even as it passes through a 1320 * bridge where the target device is downstream. 1321 */ 1322 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1323 1324 /* 1325 * For multifunction devices which are not isolated from each other, find 1326 * all the other non-isolated functions and look for existing groups. For 1327 * each function, we also need to look for aliases to or from other devices 1328 * that may already have a group. 1329 */ 1330 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1331 unsigned long *devfns) 1332 { 1333 struct pci_dev *tmp = NULL; 1334 struct iommu_group *group; 1335 1336 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1337 return NULL; 1338 1339 for_each_pci_dev(tmp) { 1340 if (tmp == pdev || tmp->bus != pdev->bus || 1341 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1342 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1343 continue; 1344 1345 group = get_pci_alias_group(tmp, devfns); 1346 if (group) { 1347 pci_dev_put(tmp); 1348 return group; 1349 } 1350 } 1351 1352 return NULL; 1353 } 1354 1355 /* 1356 * Look for aliases to or from the given device for existing groups. DMA 1357 * aliases are only supported on the same bus, therefore the search 1358 * space is quite small (especially since we're really only looking at pcie 1359 * device, and therefore only expect multiple slots on the root complex or 1360 * downstream switch ports). It's conceivable though that a pair of 1361 * multifunction devices could have aliases between them that would cause a 1362 * loop. To prevent this, we use a bitmap to track where we've been. 1363 */ 1364 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1365 unsigned long *devfns) 1366 { 1367 struct pci_dev *tmp = NULL; 1368 struct iommu_group *group; 1369 1370 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1371 return NULL; 1372 1373 group = iommu_group_get(&pdev->dev); 1374 if (group) 1375 return group; 1376 1377 for_each_pci_dev(tmp) { 1378 if (tmp == pdev || tmp->bus != pdev->bus) 1379 continue; 1380 1381 /* We alias them or they alias us */ 1382 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1383 group = get_pci_alias_group(tmp, devfns); 1384 if (group) { 1385 pci_dev_put(tmp); 1386 return group; 1387 } 1388 1389 group = get_pci_function_alias_group(tmp, devfns); 1390 if (group) { 1391 pci_dev_put(tmp); 1392 return group; 1393 } 1394 } 1395 } 1396 1397 return NULL; 1398 } 1399 1400 struct group_for_pci_data { 1401 struct pci_dev *pdev; 1402 struct iommu_group *group; 1403 }; 1404 1405 /* 1406 * DMA alias iterator callback, return the last seen device. Stop and return 1407 * the IOMMU group if we find one along the way. 1408 */ 1409 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1410 { 1411 struct group_for_pci_data *data = opaque; 1412 1413 data->pdev = pdev; 1414 data->group = iommu_group_get(&pdev->dev); 1415 1416 return data->group != NULL; 1417 } 1418 1419 /* 1420 * Generic device_group call-back function. It just allocates one 1421 * iommu-group per device. 1422 */ 1423 struct iommu_group *generic_device_group(struct device *dev) 1424 { 1425 return iommu_group_alloc(); 1426 } 1427 EXPORT_SYMBOL_GPL(generic_device_group); 1428 1429 /* 1430 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1431 * to find or create an IOMMU group for a device. 1432 */ 1433 struct iommu_group *pci_device_group(struct device *dev) 1434 { 1435 struct pci_dev *pdev = to_pci_dev(dev); 1436 struct group_for_pci_data data; 1437 struct pci_bus *bus; 1438 struct iommu_group *group = NULL; 1439 u64 devfns[4] = { 0 }; 1440 1441 if (WARN_ON(!dev_is_pci(dev))) 1442 return ERR_PTR(-EINVAL); 1443 1444 /* 1445 * Find the upstream DMA alias for the device. A device must not 1446 * be aliased due to topology in order to have its own IOMMU group. 1447 * If we find an alias along the way that already belongs to a 1448 * group, use it. 1449 */ 1450 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1451 return data.group; 1452 1453 pdev = data.pdev; 1454 1455 /* 1456 * Continue upstream from the point of minimum IOMMU granularity 1457 * due to aliases to the point where devices are protected from 1458 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1459 * group, use it. 1460 */ 1461 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1462 if (!bus->self) 1463 continue; 1464 1465 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1466 break; 1467 1468 pdev = bus->self; 1469 1470 group = iommu_group_get(&pdev->dev); 1471 if (group) 1472 return group; 1473 } 1474 1475 /* 1476 * Look for existing groups on device aliases. If we alias another 1477 * device or another device aliases us, use the same group. 1478 */ 1479 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1480 if (group) 1481 return group; 1482 1483 /* 1484 * Look for existing groups on non-isolated functions on the same 1485 * slot and aliases of those funcions, if any. No need to clear 1486 * the search bitmap, the tested devfns are still valid. 1487 */ 1488 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1489 if (group) 1490 return group; 1491 1492 /* No shared group found, allocate new */ 1493 return iommu_group_alloc(); 1494 } 1495 EXPORT_SYMBOL_GPL(pci_device_group); 1496 1497 /* Get the IOMMU group for device on fsl-mc bus */ 1498 struct iommu_group *fsl_mc_device_group(struct device *dev) 1499 { 1500 struct device *cont_dev = fsl_mc_cont_dev(dev); 1501 struct iommu_group *group; 1502 1503 group = iommu_group_get(cont_dev); 1504 if (!group) 1505 group = iommu_group_alloc(); 1506 return group; 1507 } 1508 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1509 1510 static int iommu_get_def_domain_type(struct device *dev) 1511 { 1512 const struct iommu_ops *ops = dev->bus->iommu_ops; 1513 1514 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1515 return IOMMU_DOMAIN_DMA; 1516 1517 if (ops->def_domain_type) 1518 return ops->def_domain_type(dev); 1519 1520 return 0; 1521 } 1522 1523 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1524 struct iommu_group *group, 1525 unsigned int type) 1526 { 1527 struct iommu_domain *dom; 1528 1529 dom = __iommu_domain_alloc(bus, type); 1530 if (!dom && type != IOMMU_DOMAIN_DMA) { 1531 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1532 if (dom) 1533 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1534 type, group->name); 1535 } 1536 1537 if (!dom) 1538 return -ENOMEM; 1539 1540 group->default_domain = dom; 1541 if (!group->domain) 1542 group->domain = dom; 1543 return 0; 1544 } 1545 1546 static int iommu_alloc_default_domain(struct iommu_group *group, 1547 struct device *dev) 1548 { 1549 unsigned int type; 1550 1551 if (group->default_domain) 1552 return 0; 1553 1554 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; 1555 1556 return iommu_group_alloc_default_domain(dev->bus, group, type); 1557 } 1558 1559 /** 1560 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1561 * @dev: target device 1562 * 1563 * This function is intended to be called by IOMMU drivers and extended to 1564 * support common, bus-defined algorithms when determining or creating the 1565 * IOMMU group for a device. On success, the caller will hold a reference 1566 * to the returned IOMMU group, which will already include the provided 1567 * device. The reference should be released with iommu_group_put(). 1568 */ 1569 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1570 { 1571 const struct iommu_ops *ops = dev->bus->iommu_ops; 1572 struct iommu_group *group; 1573 int ret; 1574 1575 group = iommu_group_get(dev); 1576 if (group) 1577 return group; 1578 1579 if (!ops) 1580 return ERR_PTR(-EINVAL); 1581 1582 group = ops->device_group(dev); 1583 if (WARN_ON_ONCE(group == NULL)) 1584 return ERR_PTR(-EINVAL); 1585 1586 if (IS_ERR(group)) 1587 return group; 1588 1589 ret = iommu_group_add_device(group, dev); 1590 if (ret) 1591 goto out_put_group; 1592 1593 return group; 1594 1595 out_put_group: 1596 iommu_group_put(group); 1597 1598 return ERR_PTR(ret); 1599 } 1600 1601 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1602 { 1603 return group->default_domain; 1604 } 1605 1606 static int probe_iommu_group(struct device *dev, void *data) 1607 { 1608 struct list_head *group_list = data; 1609 struct iommu_group *group; 1610 int ret; 1611 1612 /* Device is probed already if in a group */ 1613 group = iommu_group_get(dev); 1614 if (group) { 1615 iommu_group_put(group); 1616 return 0; 1617 } 1618 1619 ret = __iommu_probe_device(dev, group_list); 1620 if (ret == -ENODEV) 1621 ret = 0; 1622 1623 return ret; 1624 } 1625 1626 static int remove_iommu_group(struct device *dev, void *data) 1627 { 1628 iommu_release_device(dev); 1629 1630 return 0; 1631 } 1632 1633 static int iommu_bus_notifier(struct notifier_block *nb, 1634 unsigned long action, void *data) 1635 { 1636 unsigned long group_action = 0; 1637 struct device *dev = data; 1638 struct iommu_group *group; 1639 1640 /* 1641 * ADD/DEL call into iommu driver ops if provided, which may 1642 * result in ADD/DEL notifiers to group->notifier 1643 */ 1644 if (action == BUS_NOTIFY_ADD_DEVICE) { 1645 int ret; 1646 1647 ret = iommu_probe_device(dev); 1648 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1649 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1650 iommu_release_device(dev); 1651 return NOTIFY_OK; 1652 } 1653 1654 /* 1655 * Remaining BUS_NOTIFYs get filtered and republished to the 1656 * group, if anyone is listening 1657 */ 1658 group = iommu_group_get(dev); 1659 if (!group) 1660 return 0; 1661 1662 switch (action) { 1663 case BUS_NOTIFY_BIND_DRIVER: 1664 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; 1665 break; 1666 case BUS_NOTIFY_BOUND_DRIVER: 1667 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; 1668 break; 1669 case BUS_NOTIFY_UNBIND_DRIVER: 1670 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; 1671 break; 1672 case BUS_NOTIFY_UNBOUND_DRIVER: 1673 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; 1674 break; 1675 } 1676 1677 if (group_action) 1678 blocking_notifier_call_chain(&group->notifier, 1679 group_action, dev); 1680 1681 iommu_group_put(group); 1682 return 0; 1683 } 1684 1685 struct __group_domain_type { 1686 struct device *dev; 1687 unsigned int type; 1688 }; 1689 1690 static int probe_get_default_domain_type(struct device *dev, void *data) 1691 { 1692 struct __group_domain_type *gtype = data; 1693 unsigned int type = iommu_get_def_domain_type(dev); 1694 1695 if (type) { 1696 if (gtype->type && gtype->type != type) { 1697 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1698 iommu_domain_type_str(type), 1699 dev_name(gtype->dev), 1700 iommu_domain_type_str(gtype->type)); 1701 gtype->type = 0; 1702 } 1703 1704 if (!gtype->dev) { 1705 gtype->dev = dev; 1706 gtype->type = type; 1707 } 1708 } 1709 1710 return 0; 1711 } 1712 1713 static void probe_alloc_default_domain(struct bus_type *bus, 1714 struct iommu_group *group) 1715 { 1716 struct __group_domain_type gtype; 1717 1718 memset(>ype, 0, sizeof(gtype)); 1719 1720 /* Ask for default domain requirements of all devices in the group */ 1721 __iommu_group_for_each_dev(group, >ype, 1722 probe_get_default_domain_type); 1723 1724 if (!gtype.type) 1725 gtype.type = iommu_def_domain_type; 1726 1727 iommu_group_alloc_default_domain(bus, group, gtype.type); 1728 1729 } 1730 1731 static int iommu_group_do_dma_attach(struct device *dev, void *data) 1732 { 1733 struct iommu_domain *domain = data; 1734 int ret = 0; 1735 1736 if (!iommu_is_attach_deferred(domain, dev)) 1737 ret = __iommu_attach_device(domain, dev); 1738 1739 return ret; 1740 } 1741 1742 static int __iommu_group_dma_attach(struct iommu_group *group) 1743 { 1744 return __iommu_group_for_each_dev(group, group->default_domain, 1745 iommu_group_do_dma_attach); 1746 } 1747 1748 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1749 { 1750 struct iommu_domain *domain = data; 1751 1752 if (domain->ops->probe_finalize) 1753 domain->ops->probe_finalize(dev); 1754 1755 return 0; 1756 } 1757 1758 static void __iommu_group_dma_finalize(struct iommu_group *group) 1759 { 1760 __iommu_group_for_each_dev(group, group->default_domain, 1761 iommu_group_do_probe_finalize); 1762 } 1763 1764 static int iommu_do_create_direct_mappings(struct device *dev, void *data) 1765 { 1766 struct iommu_group *group = data; 1767 1768 iommu_create_device_direct_mappings(group, dev); 1769 1770 return 0; 1771 } 1772 1773 static int iommu_group_create_direct_mappings(struct iommu_group *group) 1774 { 1775 return __iommu_group_for_each_dev(group, group, 1776 iommu_do_create_direct_mappings); 1777 } 1778 1779 int bus_iommu_probe(struct bus_type *bus) 1780 { 1781 struct iommu_group *group, *next; 1782 LIST_HEAD(group_list); 1783 int ret; 1784 1785 /* 1786 * This code-path does not allocate the default domain when 1787 * creating the iommu group, so do it after the groups are 1788 * created. 1789 */ 1790 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1791 if (ret) 1792 return ret; 1793 1794 list_for_each_entry_safe(group, next, &group_list, entry) { 1795 /* Remove item from the list */ 1796 list_del_init(&group->entry); 1797 1798 mutex_lock(&group->mutex); 1799 1800 /* Try to allocate default domain */ 1801 probe_alloc_default_domain(bus, group); 1802 1803 if (!group->default_domain) { 1804 mutex_unlock(&group->mutex); 1805 continue; 1806 } 1807 1808 iommu_group_create_direct_mappings(group); 1809 1810 ret = __iommu_group_dma_attach(group); 1811 1812 mutex_unlock(&group->mutex); 1813 1814 if (ret) 1815 break; 1816 1817 __iommu_group_dma_finalize(group); 1818 } 1819 1820 return ret; 1821 } 1822 1823 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) 1824 { 1825 struct notifier_block *nb; 1826 int err; 1827 1828 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 1829 if (!nb) 1830 return -ENOMEM; 1831 1832 nb->notifier_call = iommu_bus_notifier; 1833 1834 err = bus_register_notifier(bus, nb); 1835 if (err) 1836 goto out_free; 1837 1838 err = bus_iommu_probe(bus); 1839 if (err) 1840 goto out_err; 1841 1842 1843 return 0; 1844 1845 out_err: 1846 /* Clean up */ 1847 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group); 1848 bus_unregister_notifier(bus, nb); 1849 1850 out_free: 1851 kfree(nb); 1852 1853 return err; 1854 } 1855 1856 /** 1857 * bus_set_iommu - set iommu-callbacks for the bus 1858 * @bus: bus. 1859 * @ops: the callbacks provided by the iommu-driver 1860 * 1861 * This function is called by an iommu driver to set the iommu methods 1862 * used for a particular bus. Drivers for devices on that bus can use 1863 * the iommu-api after these ops are registered. 1864 * This special function is needed because IOMMUs are usually devices on 1865 * the bus itself, so the iommu drivers are not initialized when the bus 1866 * is set up. With this function the iommu-driver can set the iommu-ops 1867 * afterwards. 1868 */ 1869 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 1870 { 1871 int err; 1872 1873 if (ops == NULL) { 1874 bus->iommu_ops = NULL; 1875 return 0; 1876 } 1877 1878 if (bus->iommu_ops != NULL) 1879 return -EBUSY; 1880 1881 bus->iommu_ops = ops; 1882 1883 /* Do IOMMU specific setup for this bus-type */ 1884 err = iommu_bus_init(bus, ops); 1885 if (err) 1886 bus->iommu_ops = NULL; 1887 1888 return err; 1889 } 1890 EXPORT_SYMBOL_GPL(bus_set_iommu); 1891 1892 bool iommu_present(struct bus_type *bus) 1893 { 1894 return bus->iommu_ops != NULL; 1895 } 1896 EXPORT_SYMBOL_GPL(iommu_present); 1897 1898 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 1899 { 1900 if (!bus->iommu_ops || !bus->iommu_ops->capable) 1901 return false; 1902 1903 return bus->iommu_ops->capable(cap); 1904 } 1905 EXPORT_SYMBOL_GPL(iommu_capable); 1906 1907 /** 1908 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1909 * @domain: iommu domain 1910 * @handler: fault handler 1911 * @token: user data, will be passed back to the fault handler 1912 * 1913 * This function should be used by IOMMU users which want to be notified 1914 * whenever an IOMMU fault happens. 1915 * 1916 * The fault handler itself should return 0 on success, and an appropriate 1917 * error code otherwise. 1918 */ 1919 void iommu_set_fault_handler(struct iommu_domain *domain, 1920 iommu_fault_handler_t handler, 1921 void *token) 1922 { 1923 BUG_ON(!domain); 1924 1925 domain->handler = handler; 1926 domain->handler_token = token; 1927 } 1928 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1929 1930 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1931 unsigned type) 1932 { 1933 struct iommu_domain *domain; 1934 1935 if (bus == NULL || bus->iommu_ops == NULL) 1936 return NULL; 1937 1938 domain = bus->iommu_ops->domain_alloc(type); 1939 if (!domain) 1940 return NULL; 1941 1942 domain->ops = bus->iommu_ops; 1943 domain->type = type; 1944 /* Assume all sizes by default; the driver may override this later */ 1945 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1946 1947 return domain; 1948 } 1949 1950 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1951 { 1952 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1953 } 1954 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1955 1956 void iommu_domain_free(struct iommu_domain *domain) 1957 { 1958 domain->ops->domain_free(domain); 1959 } 1960 EXPORT_SYMBOL_GPL(iommu_domain_free); 1961 1962 static int __iommu_attach_device(struct iommu_domain *domain, 1963 struct device *dev) 1964 { 1965 int ret; 1966 1967 if (unlikely(domain->ops->attach_dev == NULL)) 1968 return -ENODEV; 1969 1970 ret = domain->ops->attach_dev(domain, dev); 1971 if (!ret) 1972 trace_attach_device_to_domain(dev); 1973 return ret; 1974 } 1975 1976 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 1977 { 1978 struct iommu_group *group; 1979 int ret; 1980 1981 group = iommu_group_get(dev); 1982 if (!group) 1983 return -ENODEV; 1984 1985 /* 1986 * Lock the group to make sure the device-count doesn't 1987 * change while we are attaching 1988 */ 1989 mutex_lock(&group->mutex); 1990 ret = -EINVAL; 1991 if (iommu_group_device_count(group) != 1) 1992 goto out_unlock; 1993 1994 ret = __iommu_attach_group(domain, group); 1995 1996 out_unlock: 1997 mutex_unlock(&group->mutex); 1998 iommu_group_put(group); 1999 2000 return ret; 2001 } 2002 EXPORT_SYMBOL_GPL(iommu_attach_device); 2003 2004 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2005 { 2006 const struct iommu_ops *ops = domain->ops; 2007 2008 if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) 2009 return __iommu_attach_device(domain, dev); 2010 2011 return 0; 2012 } 2013 2014 /* 2015 * Check flags and other user provided data for valid combinations. We also 2016 * make sure no reserved fields or unused flags are set. This is to ensure 2017 * not breaking userspace in the future when these fields or flags are used. 2018 */ 2019 static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info) 2020 { 2021 u32 mask; 2022 int i; 2023 2024 if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) 2025 return -EINVAL; 2026 2027 mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1; 2028 if (info->cache & ~mask) 2029 return -EINVAL; 2030 2031 if (info->granularity >= IOMMU_INV_GRANU_NR) 2032 return -EINVAL; 2033 2034 switch (info->granularity) { 2035 case IOMMU_INV_GRANU_ADDR: 2036 if (info->cache & IOMMU_CACHE_INV_TYPE_PASID) 2037 return -EINVAL; 2038 2039 mask = IOMMU_INV_ADDR_FLAGS_PASID | 2040 IOMMU_INV_ADDR_FLAGS_ARCHID | 2041 IOMMU_INV_ADDR_FLAGS_LEAF; 2042 2043 if (info->granu.addr_info.flags & ~mask) 2044 return -EINVAL; 2045 break; 2046 case IOMMU_INV_GRANU_PASID: 2047 mask = IOMMU_INV_PASID_FLAGS_PASID | 2048 IOMMU_INV_PASID_FLAGS_ARCHID; 2049 if (info->granu.pasid_info.flags & ~mask) 2050 return -EINVAL; 2051 2052 break; 2053 case IOMMU_INV_GRANU_DOMAIN: 2054 if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) 2055 return -EINVAL; 2056 break; 2057 default: 2058 return -EINVAL; 2059 } 2060 2061 /* Check reserved padding fields */ 2062 for (i = 0; i < sizeof(info->padding); i++) { 2063 if (info->padding[i]) 2064 return -EINVAL; 2065 } 2066 2067 return 0; 2068 } 2069 2070 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, 2071 void __user *uinfo) 2072 { 2073 struct iommu_cache_invalidate_info inv_info = { 0 }; 2074 u32 minsz; 2075 int ret; 2076 2077 if (unlikely(!domain->ops->cache_invalidate)) 2078 return -ENODEV; 2079 2080 /* 2081 * No new spaces can be added before the variable sized union, the 2082 * minimum size is the offset to the union. 2083 */ 2084 minsz = offsetof(struct iommu_cache_invalidate_info, granu); 2085 2086 /* Copy minsz from user to get flags and argsz */ 2087 if (copy_from_user(&inv_info, uinfo, minsz)) 2088 return -EFAULT; 2089 2090 /* Fields before the variable size union are mandatory */ 2091 if (inv_info.argsz < minsz) 2092 return -EINVAL; 2093 2094 /* PASID and address granu require additional info beyond minsz */ 2095 if (inv_info.granularity == IOMMU_INV_GRANU_PASID && 2096 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info)) 2097 return -EINVAL; 2098 2099 if (inv_info.granularity == IOMMU_INV_GRANU_ADDR && 2100 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info)) 2101 return -EINVAL; 2102 2103 /* 2104 * User might be using a newer UAPI header which has a larger data 2105 * size, we shall support the existing flags within the current 2106 * size. Copy the remaining user data _after_ minsz but not more 2107 * than the current kernel supported size. 2108 */ 2109 if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz, 2110 min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz)) 2111 return -EFAULT; 2112 2113 /* Now the argsz is validated, check the content */ 2114 ret = iommu_check_cache_invl_data(&inv_info); 2115 if (ret) 2116 return ret; 2117 2118 return domain->ops->cache_invalidate(domain, dev, &inv_info); 2119 } 2120 EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); 2121 2122 static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) 2123 { 2124 u64 mask; 2125 int i; 2126 2127 if (data->version != IOMMU_GPASID_BIND_VERSION_1) 2128 return -EINVAL; 2129 2130 /* Check the range of supported formats */ 2131 if (data->format >= IOMMU_PASID_FORMAT_LAST) 2132 return -EINVAL; 2133 2134 /* Check all flags */ 2135 mask = IOMMU_SVA_GPASID_VAL; 2136 if (data->flags & ~mask) 2137 return -EINVAL; 2138 2139 /* Check reserved padding fields */ 2140 for (i = 0; i < sizeof(data->padding); i++) { 2141 if (data->padding[i]) 2142 return -EINVAL; 2143 } 2144 2145 return 0; 2146 } 2147 2148 static int iommu_sva_prepare_bind_data(void __user *udata, 2149 struct iommu_gpasid_bind_data *data) 2150 { 2151 u32 minsz; 2152 2153 /* 2154 * No new spaces can be added before the variable sized union, the 2155 * minimum size is the offset to the union. 2156 */ 2157 minsz = offsetof(struct iommu_gpasid_bind_data, vendor); 2158 2159 /* Copy minsz from user to get flags and argsz */ 2160 if (copy_from_user(data, udata, minsz)) 2161 return -EFAULT; 2162 2163 /* Fields before the variable size union are mandatory */ 2164 if (data->argsz < minsz) 2165 return -EINVAL; 2166 /* 2167 * User might be using a newer UAPI header, we shall let IOMMU vendor 2168 * driver decide on what size it needs. Since the guest PASID bind data 2169 * can be vendor specific, larger argsz could be the result of extension 2170 * for one vendor but it should not affect another vendor. 2171 * Copy the remaining user data _after_ minsz 2172 */ 2173 if (copy_from_user((void *)data + minsz, udata + minsz, 2174 min_t(u32, data->argsz, sizeof(*data)) - minsz)) 2175 return -EFAULT; 2176 2177 return iommu_check_bind_data(data); 2178 } 2179 2180 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, 2181 void __user *udata) 2182 { 2183 struct iommu_gpasid_bind_data data = { 0 }; 2184 int ret; 2185 2186 if (unlikely(!domain->ops->sva_bind_gpasid)) 2187 return -ENODEV; 2188 2189 ret = iommu_sva_prepare_bind_data(udata, &data); 2190 if (ret) 2191 return ret; 2192 2193 return domain->ops->sva_bind_gpasid(domain, dev, &data); 2194 } 2195 EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid); 2196 2197 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, 2198 ioasid_t pasid) 2199 { 2200 if (unlikely(!domain->ops->sva_unbind_gpasid)) 2201 return -ENODEV; 2202 2203 return domain->ops->sva_unbind_gpasid(dev, pasid); 2204 } 2205 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); 2206 2207 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, 2208 void __user *udata) 2209 { 2210 struct iommu_gpasid_bind_data data = { 0 }; 2211 int ret; 2212 2213 if (unlikely(!domain->ops->sva_bind_gpasid)) 2214 return -ENODEV; 2215 2216 ret = iommu_sva_prepare_bind_data(udata, &data); 2217 if (ret) 2218 return ret; 2219 2220 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); 2221 } 2222 EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid); 2223 2224 static void __iommu_detach_device(struct iommu_domain *domain, 2225 struct device *dev) 2226 { 2227 if (iommu_is_attach_deferred(domain, dev)) 2228 return; 2229 2230 if (unlikely(domain->ops->detach_dev == NULL)) 2231 return; 2232 2233 domain->ops->detach_dev(domain, dev); 2234 trace_detach_device_from_domain(dev); 2235 } 2236 2237 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2238 { 2239 struct iommu_group *group; 2240 2241 group = iommu_group_get(dev); 2242 if (!group) 2243 return; 2244 2245 mutex_lock(&group->mutex); 2246 if (iommu_group_device_count(group) != 1) { 2247 WARN_ON(1); 2248 goto out_unlock; 2249 } 2250 2251 __iommu_detach_group(domain, group); 2252 2253 out_unlock: 2254 mutex_unlock(&group->mutex); 2255 iommu_group_put(group); 2256 } 2257 EXPORT_SYMBOL_GPL(iommu_detach_device); 2258 2259 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2260 { 2261 struct iommu_domain *domain; 2262 struct iommu_group *group; 2263 2264 group = iommu_group_get(dev); 2265 if (!group) 2266 return NULL; 2267 2268 domain = group->domain; 2269 2270 iommu_group_put(group); 2271 2272 return domain; 2273 } 2274 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2275 2276 /* 2277 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2278 * guarantees that the group and its default domain are valid and correct. 2279 */ 2280 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2281 { 2282 return dev->iommu_group->default_domain; 2283 } 2284 2285 /* 2286 * IOMMU groups are really the natural working unit of the IOMMU, but 2287 * the IOMMU API works on domains and devices. Bridge that gap by 2288 * iterating over the devices in a group. Ideally we'd have a single 2289 * device which represents the requestor ID of the group, but we also 2290 * allow IOMMU drivers to create policy defined minimum sets, where 2291 * the physical hardware may be able to distiguish members, but we 2292 * wish to group them at a higher level (ex. untrusted multi-function 2293 * PCI devices). Thus we attach each device. 2294 */ 2295 static int iommu_group_do_attach_device(struct device *dev, void *data) 2296 { 2297 struct iommu_domain *domain = data; 2298 2299 return __iommu_attach_device(domain, dev); 2300 } 2301 2302 static int __iommu_attach_group(struct iommu_domain *domain, 2303 struct iommu_group *group) 2304 { 2305 int ret; 2306 2307 if (group->default_domain && group->domain != group->default_domain) 2308 return -EBUSY; 2309 2310 ret = __iommu_group_for_each_dev(group, domain, 2311 iommu_group_do_attach_device); 2312 if (ret == 0) 2313 group->domain = domain; 2314 2315 return ret; 2316 } 2317 2318 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2319 { 2320 int ret; 2321 2322 mutex_lock(&group->mutex); 2323 ret = __iommu_attach_group(domain, group); 2324 mutex_unlock(&group->mutex); 2325 2326 return ret; 2327 } 2328 EXPORT_SYMBOL_GPL(iommu_attach_group); 2329 2330 static int iommu_group_do_detach_device(struct device *dev, void *data) 2331 { 2332 struct iommu_domain *domain = data; 2333 2334 __iommu_detach_device(domain, dev); 2335 2336 return 0; 2337 } 2338 2339 static void __iommu_detach_group(struct iommu_domain *domain, 2340 struct iommu_group *group) 2341 { 2342 int ret; 2343 2344 if (!group->default_domain) { 2345 __iommu_group_for_each_dev(group, domain, 2346 iommu_group_do_detach_device); 2347 group->domain = NULL; 2348 return; 2349 } 2350 2351 if (group->domain == group->default_domain) 2352 return; 2353 2354 /* Detach by re-attaching to the default domain */ 2355 ret = __iommu_group_for_each_dev(group, group->default_domain, 2356 iommu_group_do_attach_device); 2357 if (ret != 0) 2358 WARN_ON(1); 2359 else 2360 group->domain = group->default_domain; 2361 } 2362 2363 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2364 { 2365 mutex_lock(&group->mutex); 2366 __iommu_detach_group(domain, group); 2367 mutex_unlock(&group->mutex); 2368 } 2369 EXPORT_SYMBOL_GPL(iommu_detach_group); 2370 2371 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2372 { 2373 if (unlikely(domain->ops->iova_to_phys == NULL)) 2374 return 0; 2375 2376 return domain->ops->iova_to_phys(domain, iova); 2377 } 2378 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2379 2380 static size_t iommu_pgsize(struct iommu_domain *domain, 2381 unsigned long addr_merge, size_t size) 2382 { 2383 unsigned int pgsize_idx; 2384 size_t pgsize; 2385 2386 /* Max page size that still fits into 'size' */ 2387 pgsize_idx = __fls(size); 2388 2389 /* need to consider alignment requirements ? */ 2390 if (likely(addr_merge)) { 2391 /* Max page size allowed by address */ 2392 unsigned int align_pgsize_idx = __ffs(addr_merge); 2393 pgsize_idx = min(pgsize_idx, align_pgsize_idx); 2394 } 2395 2396 /* build a mask of acceptable page sizes */ 2397 pgsize = (1UL << (pgsize_idx + 1)) - 1; 2398 2399 /* throw away page sizes not supported by the hardware */ 2400 pgsize &= domain->pgsize_bitmap; 2401 2402 /* make sure we're still sane */ 2403 BUG_ON(!pgsize); 2404 2405 /* pick the biggest page */ 2406 pgsize_idx = __fls(pgsize); 2407 pgsize = 1UL << pgsize_idx; 2408 2409 return pgsize; 2410 } 2411 2412 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2413 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2414 { 2415 const struct iommu_ops *ops = domain->ops; 2416 unsigned long orig_iova = iova; 2417 unsigned int min_pagesz; 2418 size_t orig_size = size; 2419 phys_addr_t orig_paddr = paddr; 2420 int ret = 0; 2421 2422 if (unlikely(ops->map == NULL || 2423 domain->pgsize_bitmap == 0UL)) 2424 return -ENODEV; 2425 2426 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2427 return -EINVAL; 2428 2429 /* find out the minimum page size supported */ 2430 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2431 2432 /* 2433 * both the virtual address and the physical one, as well as 2434 * the size of the mapping, must be aligned (at least) to the 2435 * size of the smallest page supported by the hardware 2436 */ 2437 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2438 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2439 iova, &paddr, size, min_pagesz); 2440 return -EINVAL; 2441 } 2442 2443 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2444 2445 while (size) { 2446 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); 2447 2448 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", 2449 iova, &paddr, pgsize); 2450 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2451 2452 if (ret) 2453 break; 2454 2455 iova += pgsize; 2456 paddr += pgsize; 2457 size -= pgsize; 2458 } 2459 2460 /* unroll mapping in case something went wrong */ 2461 if (ret) 2462 iommu_unmap(domain, orig_iova, orig_size - size); 2463 else 2464 trace_map(orig_iova, orig_paddr, orig_size); 2465 2466 return ret; 2467 } 2468 2469 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, 2470 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2471 { 2472 const struct iommu_ops *ops = domain->ops; 2473 int ret; 2474 2475 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2476 if (ret == 0 && ops->iotlb_sync_map) 2477 ops->iotlb_sync_map(domain, iova, size); 2478 2479 return ret; 2480 } 2481 2482 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2483 phys_addr_t paddr, size_t size, int prot) 2484 { 2485 might_sleep(); 2486 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); 2487 } 2488 EXPORT_SYMBOL_GPL(iommu_map); 2489 2490 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 2491 phys_addr_t paddr, size_t size, int prot) 2492 { 2493 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); 2494 } 2495 EXPORT_SYMBOL_GPL(iommu_map_atomic); 2496 2497 static size_t __iommu_unmap(struct iommu_domain *domain, 2498 unsigned long iova, size_t size, 2499 struct iommu_iotlb_gather *iotlb_gather) 2500 { 2501 const struct iommu_ops *ops = domain->ops; 2502 size_t unmapped_page, unmapped = 0; 2503 unsigned long orig_iova = iova; 2504 unsigned int min_pagesz; 2505 2506 if (unlikely(ops->unmap == NULL || 2507 domain->pgsize_bitmap == 0UL)) 2508 return 0; 2509 2510 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2511 return 0; 2512 2513 /* find out the minimum page size supported */ 2514 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2515 2516 /* 2517 * The virtual address, as well as the size of the mapping, must be 2518 * aligned (at least) to the size of the smallest page supported 2519 * by the hardware 2520 */ 2521 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2522 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2523 iova, size, min_pagesz); 2524 return 0; 2525 } 2526 2527 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2528 2529 /* 2530 * Keep iterating until we either unmap 'size' bytes (or more) 2531 * or we hit an area that isn't mapped. 2532 */ 2533 while (unmapped < size) { 2534 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); 2535 2536 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); 2537 if (!unmapped_page) 2538 break; 2539 2540 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2541 iova, unmapped_page); 2542 2543 iova += unmapped_page; 2544 unmapped += unmapped_page; 2545 } 2546 2547 trace_unmap(orig_iova, size, unmapped); 2548 return unmapped; 2549 } 2550 2551 size_t iommu_unmap(struct iommu_domain *domain, 2552 unsigned long iova, size_t size) 2553 { 2554 struct iommu_iotlb_gather iotlb_gather; 2555 size_t ret; 2556 2557 iommu_iotlb_gather_init(&iotlb_gather); 2558 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2559 iommu_iotlb_sync(domain, &iotlb_gather); 2560 2561 return ret; 2562 } 2563 EXPORT_SYMBOL_GPL(iommu_unmap); 2564 2565 size_t iommu_unmap_fast(struct iommu_domain *domain, 2566 unsigned long iova, size_t size, 2567 struct iommu_iotlb_gather *iotlb_gather) 2568 { 2569 return __iommu_unmap(domain, iova, size, iotlb_gather); 2570 } 2571 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2572 2573 static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2574 struct scatterlist *sg, unsigned int nents, int prot, 2575 gfp_t gfp) 2576 { 2577 const struct iommu_ops *ops = domain->ops; 2578 size_t len = 0, mapped = 0; 2579 phys_addr_t start; 2580 unsigned int i = 0; 2581 int ret; 2582 2583 while (i <= nents) { 2584 phys_addr_t s_phys = sg_phys(sg); 2585 2586 if (len && s_phys != start + len) { 2587 ret = __iommu_map(domain, iova + mapped, start, 2588 len, prot, gfp); 2589 2590 if (ret) 2591 goto out_err; 2592 2593 mapped += len; 2594 len = 0; 2595 } 2596 2597 if (len) { 2598 len += sg->length; 2599 } else { 2600 len = sg->length; 2601 start = s_phys; 2602 } 2603 2604 if (++i < nents) 2605 sg = sg_next(sg); 2606 } 2607 2608 if (ops->iotlb_sync_map) 2609 ops->iotlb_sync_map(domain, iova, mapped); 2610 return mapped; 2611 2612 out_err: 2613 /* undo mappings already done */ 2614 iommu_unmap(domain, iova, mapped); 2615 2616 return 0; 2617 2618 } 2619 2620 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2621 struct scatterlist *sg, unsigned int nents, int prot) 2622 { 2623 might_sleep(); 2624 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); 2625 } 2626 EXPORT_SYMBOL_GPL(iommu_map_sg); 2627 2628 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2629 struct scatterlist *sg, unsigned int nents, int prot) 2630 { 2631 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); 2632 } 2633 2634 /** 2635 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2636 * @domain: the iommu domain where the fault has happened 2637 * @dev: the device where the fault has happened 2638 * @iova: the faulting address 2639 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2640 * 2641 * This function should be called by the low-level IOMMU implementations 2642 * whenever IOMMU faults happen, to allow high-level users, that are 2643 * interested in such events, to know about them. 2644 * 2645 * This event may be useful for several possible use cases: 2646 * - mere logging of the event 2647 * - dynamic TLB/PTE loading 2648 * - if restarting of the faulting device is required 2649 * 2650 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2651 * PTE/TLB loading will one day be supported, implementations will be able 2652 * to tell whether it succeeded or not according to this return value). 2653 * 2654 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2655 * (though fault handlers can also return -ENOSYS, in case they want to 2656 * elicit the default behavior of the IOMMU drivers). 2657 */ 2658 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2659 unsigned long iova, int flags) 2660 { 2661 int ret = -ENOSYS; 2662 2663 /* 2664 * if upper layers showed interest and installed a fault handler, 2665 * invoke it. 2666 */ 2667 if (domain->handler) 2668 ret = domain->handler(domain, dev, iova, flags, 2669 domain->handler_token); 2670 2671 trace_io_page_fault(dev, iova, flags); 2672 return ret; 2673 } 2674 EXPORT_SYMBOL_GPL(report_iommu_fault); 2675 2676 static int __init iommu_init(void) 2677 { 2678 iommu_group_kset = kset_create_and_add("iommu_groups", 2679 NULL, kernel_kobj); 2680 BUG_ON(!iommu_group_kset); 2681 2682 iommu_debugfs_setup(); 2683 2684 return 0; 2685 } 2686 core_initcall(iommu_init); 2687 2688 int iommu_enable_nesting(struct iommu_domain *domain) 2689 { 2690 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2691 return -EINVAL; 2692 if (!domain->ops->enable_nesting) 2693 return -EINVAL; 2694 return domain->ops->enable_nesting(domain); 2695 } 2696 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2697 2698 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2699 unsigned long quirk) 2700 { 2701 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2702 return -EINVAL; 2703 if (!domain->ops->set_pgtable_quirks) 2704 return -EINVAL; 2705 return domain->ops->set_pgtable_quirks(domain, quirk); 2706 } 2707 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2708 2709 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2710 { 2711 const struct iommu_ops *ops = dev->bus->iommu_ops; 2712 2713 if (ops && ops->get_resv_regions) 2714 ops->get_resv_regions(dev, list); 2715 } 2716 2717 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2718 { 2719 const struct iommu_ops *ops = dev->bus->iommu_ops; 2720 2721 if (ops && ops->put_resv_regions) 2722 ops->put_resv_regions(dev, list); 2723 } 2724 2725 /** 2726 * generic_iommu_put_resv_regions - Reserved region driver helper 2727 * @dev: device for which to free reserved regions 2728 * @list: reserved region list for device 2729 * 2730 * IOMMU drivers can use this to implement their .put_resv_regions() callback 2731 * for simple reservations. Memory allocated for each reserved region will be 2732 * freed. If an IOMMU driver allocates additional resources per region, it is 2733 * going to have to implement a custom callback. 2734 */ 2735 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list) 2736 { 2737 struct iommu_resv_region *entry, *next; 2738 2739 list_for_each_entry_safe(entry, next, list, list) 2740 kfree(entry); 2741 } 2742 EXPORT_SYMBOL(generic_iommu_put_resv_regions); 2743 2744 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2745 size_t length, int prot, 2746 enum iommu_resv_type type) 2747 { 2748 struct iommu_resv_region *region; 2749 2750 region = kzalloc(sizeof(*region), GFP_KERNEL); 2751 if (!region) 2752 return NULL; 2753 2754 INIT_LIST_HEAD(®ion->list); 2755 region->start = start; 2756 region->length = length; 2757 region->prot = prot; 2758 region->type = type; 2759 return region; 2760 } 2761 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2762 2763 void iommu_set_default_passthrough(bool cmd_line) 2764 { 2765 if (cmd_line) 2766 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2767 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2768 } 2769 2770 void iommu_set_default_translated(bool cmd_line) 2771 { 2772 if (cmd_line) 2773 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2774 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2775 } 2776 2777 bool iommu_default_passthrough(void) 2778 { 2779 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2780 } 2781 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2782 2783 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2784 { 2785 const struct iommu_ops *ops = NULL; 2786 struct iommu_device *iommu; 2787 2788 spin_lock(&iommu_device_lock); 2789 list_for_each_entry(iommu, &iommu_device_list, list) 2790 if (iommu->fwnode == fwnode) { 2791 ops = iommu->ops; 2792 break; 2793 } 2794 spin_unlock(&iommu_device_lock); 2795 return ops; 2796 } 2797 2798 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2799 const struct iommu_ops *ops) 2800 { 2801 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2802 2803 if (fwspec) 2804 return ops == fwspec->ops ? 0 : -EINVAL; 2805 2806 if (!dev_iommu_get(dev)) 2807 return -ENOMEM; 2808 2809 /* Preallocate for the overwhelmingly common case of 1 ID */ 2810 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2811 if (!fwspec) 2812 return -ENOMEM; 2813 2814 of_node_get(to_of_node(iommu_fwnode)); 2815 fwspec->iommu_fwnode = iommu_fwnode; 2816 fwspec->ops = ops; 2817 dev_iommu_fwspec_set(dev, fwspec); 2818 return 0; 2819 } 2820 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2821 2822 void iommu_fwspec_free(struct device *dev) 2823 { 2824 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2825 2826 if (fwspec) { 2827 fwnode_handle_put(fwspec->iommu_fwnode); 2828 kfree(fwspec); 2829 dev_iommu_fwspec_set(dev, NULL); 2830 } 2831 } 2832 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2833 2834 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2835 { 2836 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2837 int i, new_num; 2838 2839 if (!fwspec) 2840 return -EINVAL; 2841 2842 new_num = fwspec->num_ids + num_ids; 2843 if (new_num > 1) { 2844 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2845 GFP_KERNEL); 2846 if (!fwspec) 2847 return -ENOMEM; 2848 2849 dev_iommu_fwspec_set(dev, fwspec); 2850 } 2851 2852 for (i = 0; i < num_ids; i++) 2853 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2854 2855 fwspec->num_ids = new_num; 2856 return 0; 2857 } 2858 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2859 2860 /* 2861 * Per device IOMMU features. 2862 */ 2863 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2864 { 2865 if (dev->iommu && dev->iommu->iommu_dev) { 2866 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2867 2868 if (ops->dev_enable_feat) 2869 return ops->dev_enable_feat(dev, feat); 2870 } 2871 2872 return -ENODEV; 2873 } 2874 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2875 2876 /* 2877 * The device drivers should do the necessary cleanups before calling this. 2878 * For example, before disabling the aux-domain feature, the device driver 2879 * should detach all aux-domains. Otherwise, this will return -EBUSY. 2880 */ 2881 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2882 { 2883 if (dev->iommu && dev->iommu->iommu_dev) { 2884 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2885 2886 if (ops->dev_disable_feat) 2887 return ops->dev_disable_feat(dev, feat); 2888 } 2889 2890 return -EBUSY; 2891 } 2892 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2893 2894 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 2895 { 2896 if (dev->iommu && dev->iommu->iommu_dev) { 2897 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2898 2899 if (ops->dev_feat_enabled) 2900 return ops->dev_feat_enabled(dev, feat); 2901 } 2902 2903 return false; 2904 } 2905 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); 2906 2907 /* 2908 * Aux-domain specific attach/detach. 2909 * 2910 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns 2911 * true. Also, as long as domains are attached to a device through this 2912 * interface, any tries to call iommu_attach_device() should fail 2913 * (iommu_detach_device() can't fail, so we fail when trying to re-attach). 2914 * This should make us safe against a device being attached to a guest as a 2915 * whole while there are still pasid users on it (aux and sva). 2916 */ 2917 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 2918 { 2919 int ret = -ENODEV; 2920 2921 if (domain->ops->aux_attach_dev) 2922 ret = domain->ops->aux_attach_dev(domain, dev); 2923 2924 if (!ret) 2925 trace_attach_device_to_domain(dev); 2926 2927 return ret; 2928 } 2929 EXPORT_SYMBOL_GPL(iommu_aux_attach_device); 2930 2931 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 2932 { 2933 if (domain->ops->aux_detach_dev) { 2934 domain->ops->aux_detach_dev(domain, dev); 2935 trace_detach_device_from_domain(dev); 2936 } 2937 } 2938 EXPORT_SYMBOL_GPL(iommu_aux_detach_device); 2939 2940 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 2941 { 2942 int ret = -ENODEV; 2943 2944 if (domain->ops->aux_get_pasid) 2945 ret = domain->ops->aux_get_pasid(domain, dev); 2946 2947 return ret; 2948 } 2949 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); 2950 2951 /** 2952 * iommu_sva_bind_device() - Bind a process address space to a device 2953 * @dev: the device 2954 * @mm: the mm to bind, caller must hold a reference to it 2955 * 2956 * Create a bond between device and address space, allowing the device to access 2957 * the mm using the returned PASID. If a bond already exists between @device and 2958 * @mm, it is returned and an additional reference is taken. Caller must call 2959 * iommu_sva_unbind_device() to release each reference. 2960 * 2961 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to 2962 * initialize the required SVA features. 2963 * 2964 * On error, returns an ERR_PTR value. 2965 */ 2966 struct iommu_sva * 2967 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 2968 { 2969 struct iommu_group *group; 2970 struct iommu_sva *handle = ERR_PTR(-EINVAL); 2971 const struct iommu_ops *ops = dev->bus->iommu_ops; 2972 2973 if (!ops || !ops->sva_bind) 2974 return ERR_PTR(-ENODEV); 2975 2976 group = iommu_group_get(dev); 2977 if (!group) 2978 return ERR_PTR(-ENODEV); 2979 2980 /* Ensure device count and domain don't change while we're binding */ 2981 mutex_lock(&group->mutex); 2982 2983 /* 2984 * To keep things simple, SVA currently doesn't support IOMMU groups 2985 * with more than one device. Existing SVA-capable systems are not 2986 * affected by the problems that required IOMMU groups (lack of ACS 2987 * isolation, device ID aliasing and other hardware issues). 2988 */ 2989 if (iommu_group_device_count(group) != 1) 2990 goto out_unlock; 2991 2992 handle = ops->sva_bind(dev, mm, drvdata); 2993 2994 out_unlock: 2995 mutex_unlock(&group->mutex); 2996 iommu_group_put(group); 2997 2998 return handle; 2999 } 3000 EXPORT_SYMBOL_GPL(iommu_sva_bind_device); 3001 3002 /** 3003 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device 3004 * @handle: the handle returned by iommu_sva_bind_device() 3005 * 3006 * Put reference to a bond between device and address space. The device should 3007 * not be issuing any more transaction for this PASID. All outstanding page 3008 * requests for this PASID must have been flushed to the IOMMU. 3009 */ 3010 void iommu_sva_unbind_device(struct iommu_sva *handle) 3011 { 3012 struct iommu_group *group; 3013 struct device *dev = handle->dev; 3014 const struct iommu_ops *ops = dev->bus->iommu_ops; 3015 3016 if (!ops || !ops->sva_unbind) 3017 return; 3018 3019 group = iommu_group_get(dev); 3020 if (!group) 3021 return; 3022 3023 mutex_lock(&group->mutex); 3024 ops->sva_unbind(handle); 3025 mutex_unlock(&group->mutex); 3026 3027 iommu_group_put(group); 3028 } 3029 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); 3030 3031 u32 iommu_sva_get_pasid(struct iommu_sva *handle) 3032 { 3033 const struct iommu_ops *ops = handle->dev->bus->iommu_ops; 3034 3035 if (!ops || !ops->sva_get_pasid) 3036 return IOMMU_PASID_INVALID; 3037 3038 return ops->sva_get_pasid(handle); 3039 } 3040 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); 3041 3042 /* 3043 * Changes the default domain of an iommu group that has *only* one device 3044 * 3045 * @group: The group for which the default domain should be changed 3046 * @prev_dev: The device in the group (this is used to make sure that the device 3047 * hasn't changed after the caller has called this function) 3048 * @type: The type of the new default domain that gets associated with the group 3049 * 3050 * Returns 0 on success and error code on failure 3051 * 3052 * Note: 3053 * 1. Presently, this function is called only when user requests to change the 3054 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type 3055 * Please take a closer look if intended to use for other purposes. 3056 */ 3057 static int iommu_change_dev_def_domain(struct iommu_group *group, 3058 struct device *prev_dev, int type) 3059 { 3060 struct iommu_domain *prev_dom; 3061 struct group_device *grp_dev; 3062 int ret, dev_def_dom; 3063 struct device *dev; 3064 3065 mutex_lock(&group->mutex); 3066 3067 if (group->default_domain != group->domain) { 3068 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); 3069 ret = -EBUSY; 3070 goto out; 3071 } 3072 3073 /* 3074 * iommu group wasn't locked while acquiring device lock in 3075 * iommu_group_store_type(). So, make sure that the device count hasn't 3076 * changed while acquiring device lock. 3077 * 3078 * Changing default domain of an iommu group with two or more devices 3079 * isn't supported because there could be a potential deadlock. Consider 3080 * the following scenario. T1 is trying to acquire device locks of all 3081 * the devices in the group and before it could acquire all of them, 3082 * there could be another thread T2 (from different sub-system and use 3083 * case) that has already acquired some of the device locks and might be 3084 * waiting for T1 to release other device locks. 3085 */ 3086 if (iommu_group_device_count(group) != 1) { 3087 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); 3088 ret = -EINVAL; 3089 goto out; 3090 } 3091 3092 /* Since group has only one device */ 3093 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3094 dev = grp_dev->dev; 3095 3096 if (prev_dev != dev) { 3097 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); 3098 ret = -EBUSY; 3099 goto out; 3100 } 3101 3102 prev_dom = group->default_domain; 3103 if (!prev_dom) { 3104 ret = -EINVAL; 3105 goto out; 3106 } 3107 3108 dev_def_dom = iommu_get_def_domain_type(dev); 3109 if (!type) { 3110 /* 3111 * If the user hasn't requested any specific type of domain and 3112 * if the device supports both the domains, then default to the 3113 * domain the device was booted with 3114 */ 3115 type = dev_def_dom ? : iommu_def_domain_type; 3116 } else if (dev_def_dom && type != dev_def_dom) { 3117 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", 3118 iommu_domain_type_str(type)); 3119 ret = -EINVAL; 3120 goto out; 3121 } 3122 3123 /* 3124 * Switch to a new domain only if the requested domain type is different 3125 * from the existing default domain type 3126 */ 3127 if (prev_dom->type == type) { 3128 ret = 0; 3129 goto out; 3130 } 3131 3132 /* Sets group->default_domain to the newly allocated domain */ 3133 ret = iommu_group_alloc_default_domain(dev->bus, group, type); 3134 if (ret) 3135 goto out; 3136 3137 ret = iommu_create_device_direct_mappings(group, dev); 3138 if (ret) 3139 goto free_new_domain; 3140 3141 ret = __iommu_attach_device(group->default_domain, dev); 3142 if (ret) 3143 goto free_new_domain; 3144 3145 group->domain = group->default_domain; 3146 3147 /* 3148 * Release the mutex here because ops->probe_finalize() call-back of 3149 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 3150 * in-turn might call back into IOMMU core code, where it tries to take 3151 * group->mutex, resulting in a deadlock. 3152 */ 3153 mutex_unlock(&group->mutex); 3154 3155 /* Make sure dma_ops is appropriatley set */ 3156 iommu_group_do_probe_finalize(dev, group->default_domain); 3157 iommu_domain_free(prev_dom); 3158 return 0; 3159 3160 free_new_domain: 3161 iommu_domain_free(group->default_domain); 3162 group->default_domain = prev_dom; 3163 group->domain = prev_dom; 3164 3165 out: 3166 mutex_unlock(&group->mutex); 3167 3168 return ret; 3169 } 3170 3171 /* 3172 * Changing the default domain through sysfs requires the users to ubind the 3173 * drivers from the devices in the iommu group. Return failure if this doesn't 3174 * meet. 3175 * 3176 * We need to consider the race between this and the device release path. 3177 * device_lock(dev) is used here to guarantee that the device release path 3178 * will not be entered at the same time. 3179 */ 3180 static ssize_t iommu_group_store_type(struct iommu_group *group, 3181 const char *buf, size_t count) 3182 { 3183 struct group_device *grp_dev; 3184 struct device *dev; 3185 int ret, req_type; 3186 3187 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3188 return -EACCES; 3189 3190 if (WARN_ON(!group)) 3191 return -EINVAL; 3192 3193 if (sysfs_streq(buf, "identity")) 3194 req_type = IOMMU_DOMAIN_IDENTITY; 3195 else if (sysfs_streq(buf, "DMA")) 3196 req_type = IOMMU_DOMAIN_DMA; 3197 else if (sysfs_streq(buf, "auto")) 3198 req_type = 0; 3199 else 3200 return -EINVAL; 3201 3202 /* 3203 * Lock/Unlock the group mutex here before device lock to 3204 * 1. Make sure that the iommu group has only one device (this is a 3205 * prerequisite for step 2) 3206 * 2. Get struct *dev which is needed to lock device 3207 */ 3208 mutex_lock(&group->mutex); 3209 if (iommu_group_device_count(group) != 1) { 3210 mutex_unlock(&group->mutex); 3211 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); 3212 return -EINVAL; 3213 } 3214 3215 /* Since group has only one device */ 3216 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3217 dev = grp_dev->dev; 3218 get_device(dev); 3219 3220 /* 3221 * Don't hold the group mutex because taking group mutex first and then 3222 * the device lock could potentially cause a deadlock as below. Assume 3223 * two threads T1 and T2. T1 is trying to change default domain of an 3224 * iommu group and T2 is trying to hot unplug a device or release [1] VF 3225 * of a PCIe device which is in the same iommu group. T1 takes group 3226 * mutex and before it could take device lock assume T2 has taken device 3227 * lock and is yet to take group mutex. Now, both the threads will be 3228 * waiting for the other thread to release lock. Below, lock order was 3229 * suggested. 3230 * device_lock(dev); 3231 * mutex_lock(&group->mutex); 3232 * iommu_change_dev_def_domain(); 3233 * mutex_unlock(&group->mutex); 3234 * device_unlock(dev); 3235 * 3236 * [1] Typical device release path 3237 * device_lock() from device/driver core code 3238 * -> bus_notifier() 3239 * -> iommu_bus_notifier() 3240 * -> iommu_release_device() 3241 * -> ops->release_device() vendor driver calls back iommu core code 3242 * -> mutex_lock() from iommu core code 3243 */ 3244 mutex_unlock(&group->mutex); 3245 3246 /* Check if the device in the group still has a driver bound to it */ 3247 device_lock(dev); 3248 if (device_is_bound(dev)) { 3249 pr_err_ratelimited("Device is still bound to driver\n"); 3250 ret = -EBUSY; 3251 goto out; 3252 } 3253 3254 ret = iommu_change_dev_def_domain(group, dev, req_type); 3255 ret = ret ?: count; 3256 3257 out: 3258 device_unlock(dev); 3259 put_device(dev); 3260 3261 return ret; 3262 } 3263