1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/device.h> 10 #include <linux/kernel.h> 11 #include <linux/bug.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/export.h> 15 #include <linux/slab.h> 16 #include <linux/errno.h> 17 #include <linux/iommu.h> 18 #include <linux/idr.h> 19 #include <linux/notifier.h> 20 #include <linux/err.h> 21 #include <linux/pci.h> 22 #include <linux/bitops.h> 23 #include <linux/property.h> 24 #include <linux/fsl/mc.h> 25 #include <linux/module.h> 26 #include <trace/events/iommu.h> 27 28 static struct kset *iommu_group_kset; 29 static DEFINE_IDA(iommu_group_ida); 30 31 static unsigned int iommu_def_domain_type __read_mostly; 32 static bool iommu_dma_strict __read_mostly = true; 33 static u32 iommu_cmd_line __read_mostly; 34 35 struct iommu_group { 36 struct kobject kobj; 37 struct kobject *devices_kobj; 38 struct list_head devices; 39 struct mutex mutex; 40 struct blocking_notifier_head notifier; 41 void *iommu_data; 42 void (*iommu_data_release)(void *iommu_data); 43 char *name; 44 int id; 45 struct iommu_domain *default_domain; 46 struct iommu_domain *domain; 47 struct list_head entry; 48 }; 49 50 struct group_device { 51 struct list_head list; 52 struct device *dev; 53 char *name; 54 }; 55 56 struct iommu_group_attribute { 57 struct attribute attr; 58 ssize_t (*show)(struct iommu_group *group, char *buf); 59 ssize_t (*store)(struct iommu_group *group, 60 const char *buf, size_t count); 61 }; 62 63 static const char * const iommu_group_resv_type_string[] = { 64 [IOMMU_RESV_DIRECT] = "direct", 65 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 66 [IOMMU_RESV_RESERVED] = "reserved", 67 [IOMMU_RESV_MSI] = "msi", 68 [IOMMU_RESV_SW_MSI] = "msi", 69 }; 70 71 #define IOMMU_CMD_LINE_DMA_API BIT(0) 72 73 static void iommu_set_cmd_line_dma_api(void) 74 { 75 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 76 } 77 78 static bool iommu_cmd_line_dma_api(void) 79 { 80 return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API); 81 } 82 83 static int iommu_alloc_default_domain(struct iommu_group *group, 84 struct device *dev); 85 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 86 unsigned type); 87 static int __iommu_attach_device(struct iommu_domain *domain, 88 struct device *dev); 89 static int __iommu_attach_group(struct iommu_domain *domain, 90 struct iommu_group *group); 91 static void __iommu_detach_group(struct iommu_domain *domain, 92 struct iommu_group *group); 93 static int iommu_create_device_direct_mappings(struct iommu_group *group, 94 struct device *dev); 95 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 96 static ssize_t iommu_group_store_type(struct iommu_group *group, 97 const char *buf, size_t count); 98 99 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 100 struct iommu_group_attribute iommu_group_attr_##_name = \ 101 __ATTR(_name, _mode, _show, _store) 102 103 #define to_iommu_group_attr(_attr) \ 104 container_of(_attr, struct iommu_group_attribute, attr) 105 #define to_iommu_group(_kobj) \ 106 container_of(_kobj, struct iommu_group, kobj) 107 108 static LIST_HEAD(iommu_device_list); 109 static DEFINE_SPINLOCK(iommu_device_lock); 110 111 /* 112 * Use a function instead of an array here because the domain-type is a 113 * bit-field, so an array would waste memory. 114 */ 115 static const char *iommu_domain_type_str(unsigned int t) 116 { 117 switch (t) { 118 case IOMMU_DOMAIN_BLOCKED: 119 return "Blocked"; 120 case IOMMU_DOMAIN_IDENTITY: 121 return "Passthrough"; 122 case IOMMU_DOMAIN_UNMANAGED: 123 return "Unmanaged"; 124 case IOMMU_DOMAIN_DMA: 125 return "Translated"; 126 default: 127 return "Unknown"; 128 } 129 } 130 131 static int __init iommu_subsys_init(void) 132 { 133 bool cmd_line = iommu_cmd_line_dma_api(); 134 135 if (!cmd_line) { 136 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 137 iommu_set_default_passthrough(false); 138 else 139 iommu_set_default_translated(false); 140 141 if (iommu_default_passthrough() && mem_encrypt_active()) { 142 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 143 iommu_set_default_translated(false); 144 } 145 } 146 147 pr_info("Default domain type: %s %s\n", 148 iommu_domain_type_str(iommu_def_domain_type), 149 cmd_line ? "(set via kernel command line)" : ""); 150 151 return 0; 152 } 153 subsys_initcall(iommu_subsys_init); 154 155 int iommu_device_register(struct iommu_device *iommu) 156 { 157 spin_lock(&iommu_device_lock); 158 list_add_tail(&iommu->list, &iommu_device_list); 159 spin_unlock(&iommu_device_lock); 160 return 0; 161 } 162 EXPORT_SYMBOL_GPL(iommu_device_register); 163 164 void iommu_device_unregister(struct iommu_device *iommu) 165 { 166 spin_lock(&iommu_device_lock); 167 list_del(&iommu->list); 168 spin_unlock(&iommu_device_lock); 169 } 170 EXPORT_SYMBOL_GPL(iommu_device_unregister); 171 172 static struct dev_iommu *dev_iommu_get(struct device *dev) 173 { 174 struct dev_iommu *param = dev->iommu; 175 176 if (param) 177 return param; 178 179 param = kzalloc(sizeof(*param), GFP_KERNEL); 180 if (!param) 181 return NULL; 182 183 mutex_init(¶m->lock); 184 dev->iommu = param; 185 return param; 186 } 187 188 static void dev_iommu_free(struct device *dev) 189 { 190 iommu_fwspec_free(dev); 191 kfree(dev->iommu); 192 dev->iommu = NULL; 193 } 194 195 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 196 { 197 const struct iommu_ops *ops = dev->bus->iommu_ops; 198 struct iommu_device *iommu_dev; 199 struct iommu_group *group; 200 int ret; 201 202 if (!ops) 203 return -ENODEV; 204 205 if (!dev_iommu_get(dev)) 206 return -ENOMEM; 207 208 if (!try_module_get(ops->owner)) { 209 ret = -EINVAL; 210 goto err_free; 211 } 212 213 iommu_dev = ops->probe_device(dev); 214 if (IS_ERR(iommu_dev)) { 215 ret = PTR_ERR(iommu_dev); 216 goto out_module_put; 217 } 218 219 dev->iommu->iommu_dev = iommu_dev; 220 221 group = iommu_group_get_for_dev(dev); 222 if (IS_ERR(group)) { 223 ret = PTR_ERR(group); 224 goto out_release; 225 } 226 iommu_group_put(group); 227 228 if (group_list && !group->default_domain && list_empty(&group->entry)) 229 list_add_tail(&group->entry, group_list); 230 231 iommu_device_link(iommu_dev, dev); 232 233 return 0; 234 235 out_release: 236 ops->release_device(dev); 237 238 out_module_put: 239 module_put(ops->owner); 240 241 err_free: 242 dev_iommu_free(dev); 243 244 return ret; 245 } 246 247 int iommu_probe_device(struct device *dev) 248 { 249 const struct iommu_ops *ops = dev->bus->iommu_ops; 250 struct iommu_group *group; 251 int ret; 252 253 ret = __iommu_probe_device(dev, NULL); 254 if (ret) 255 goto err_out; 256 257 group = iommu_group_get(dev); 258 if (!group) { 259 ret = -ENODEV; 260 goto err_release; 261 } 262 263 /* 264 * Try to allocate a default domain - needs support from the 265 * IOMMU driver. There are still some drivers which don't 266 * support default domains, so the return value is not yet 267 * checked. 268 */ 269 iommu_alloc_default_domain(group, dev); 270 271 if (group->default_domain) { 272 ret = __iommu_attach_device(group->default_domain, dev); 273 if (ret) { 274 iommu_group_put(group); 275 goto err_release; 276 } 277 } 278 279 iommu_create_device_direct_mappings(group, dev); 280 281 iommu_group_put(group); 282 283 if (ops->probe_finalize) 284 ops->probe_finalize(dev); 285 286 return 0; 287 288 err_release: 289 iommu_release_device(dev); 290 291 err_out: 292 return ret; 293 294 } 295 296 void iommu_release_device(struct device *dev) 297 { 298 const struct iommu_ops *ops = dev->bus->iommu_ops; 299 300 if (!dev->iommu) 301 return; 302 303 iommu_device_unlink(dev->iommu->iommu_dev, dev); 304 305 ops->release_device(dev); 306 307 iommu_group_remove_device(dev); 308 module_put(ops->owner); 309 dev_iommu_free(dev); 310 } 311 312 static int __init iommu_set_def_domain_type(char *str) 313 { 314 bool pt; 315 int ret; 316 317 ret = kstrtobool(str, &pt); 318 if (ret) 319 return ret; 320 321 if (pt) 322 iommu_set_default_passthrough(true); 323 else 324 iommu_set_default_translated(true); 325 326 return 0; 327 } 328 early_param("iommu.passthrough", iommu_set_def_domain_type); 329 330 static int __init iommu_dma_setup(char *str) 331 { 332 return kstrtobool(str, &iommu_dma_strict); 333 } 334 early_param("iommu.strict", iommu_dma_setup); 335 336 static ssize_t iommu_group_attr_show(struct kobject *kobj, 337 struct attribute *__attr, char *buf) 338 { 339 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 340 struct iommu_group *group = to_iommu_group(kobj); 341 ssize_t ret = -EIO; 342 343 if (attr->show) 344 ret = attr->show(group, buf); 345 return ret; 346 } 347 348 static ssize_t iommu_group_attr_store(struct kobject *kobj, 349 struct attribute *__attr, 350 const char *buf, size_t count) 351 { 352 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 353 struct iommu_group *group = to_iommu_group(kobj); 354 ssize_t ret = -EIO; 355 356 if (attr->store) 357 ret = attr->store(group, buf, count); 358 return ret; 359 } 360 361 static const struct sysfs_ops iommu_group_sysfs_ops = { 362 .show = iommu_group_attr_show, 363 .store = iommu_group_attr_store, 364 }; 365 366 static int iommu_group_create_file(struct iommu_group *group, 367 struct iommu_group_attribute *attr) 368 { 369 return sysfs_create_file(&group->kobj, &attr->attr); 370 } 371 372 static void iommu_group_remove_file(struct iommu_group *group, 373 struct iommu_group_attribute *attr) 374 { 375 sysfs_remove_file(&group->kobj, &attr->attr); 376 } 377 378 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 379 { 380 return sprintf(buf, "%s\n", group->name); 381 } 382 383 /** 384 * iommu_insert_resv_region - Insert a new region in the 385 * list of reserved regions. 386 * @new: new region to insert 387 * @regions: list of regions 388 * 389 * Elements are sorted by start address and overlapping segments 390 * of the same type are merged. 391 */ 392 static int iommu_insert_resv_region(struct iommu_resv_region *new, 393 struct list_head *regions) 394 { 395 struct iommu_resv_region *iter, *tmp, *nr, *top; 396 LIST_HEAD(stack); 397 398 nr = iommu_alloc_resv_region(new->start, new->length, 399 new->prot, new->type); 400 if (!nr) 401 return -ENOMEM; 402 403 /* First add the new element based on start address sorting */ 404 list_for_each_entry(iter, regions, list) { 405 if (nr->start < iter->start || 406 (nr->start == iter->start && nr->type <= iter->type)) 407 break; 408 } 409 list_add_tail(&nr->list, &iter->list); 410 411 /* Merge overlapping segments of type nr->type in @regions, if any */ 412 list_for_each_entry_safe(iter, tmp, regions, list) { 413 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 414 415 /* no merge needed on elements of different types than @new */ 416 if (iter->type != new->type) { 417 list_move_tail(&iter->list, &stack); 418 continue; 419 } 420 421 /* look for the last stack element of same type as @iter */ 422 list_for_each_entry_reverse(top, &stack, list) 423 if (top->type == iter->type) 424 goto check_overlap; 425 426 list_move_tail(&iter->list, &stack); 427 continue; 428 429 check_overlap: 430 top_end = top->start + top->length - 1; 431 432 if (iter->start > top_end + 1) { 433 list_move_tail(&iter->list, &stack); 434 } else { 435 top->length = max(top_end, iter_end) - top->start + 1; 436 list_del(&iter->list); 437 kfree(iter); 438 } 439 } 440 list_splice(&stack, regions); 441 return 0; 442 } 443 444 static int 445 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 446 struct list_head *group_resv_regions) 447 { 448 struct iommu_resv_region *entry; 449 int ret = 0; 450 451 list_for_each_entry(entry, dev_resv_regions, list) { 452 ret = iommu_insert_resv_region(entry, group_resv_regions); 453 if (ret) 454 break; 455 } 456 return ret; 457 } 458 459 int iommu_get_group_resv_regions(struct iommu_group *group, 460 struct list_head *head) 461 { 462 struct group_device *device; 463 int ret = 0; 464 465 mutex_lock(&group->mutex); 466 list_for_each_entry(device, &group->devices, list) { 467 struct list_head dev_resv_regions; 468 469 INIT_LIST_HEAD(&dev_resv_regions); 470 iommu_get_resv_regions(device->dev, &dev_resv_regions); 471 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 472 iommu_put_resv_regions(device->dev, &dev_resv_regions); 473 if (ret) 474 break; 475 } 476 mutex_unlock(&group->mutex); 477 return ret; 478 } 479 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 480 481 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 482 char *buf) 483 { 484 struct iommu_resv_region *region, *next; 485 struct list_head group_resv_regions; 486 char *str = buf; 487 488 INIT_LIST_HEAD(&group_resv_regions); 489 iommu_get_group_resv_regions(group, &group_resv_regions); 490 491 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 492 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 493 (long long int)region->start, 494 (long long int)(region->start + 495 region->length - 1), 496 iommu_group_resv_type_string[region->type]); 497 kfree(region); 498 } 499 500 return (str - buf); 501 } 502 503 static ssize_t iommu_group_show_type(struct iommu_group *group, 504 char *buf) 505 { 506 char *type = "unknown\n"; 507 508 mutex_lock(&group->mutex); 509 if (group->default_domain) { 510 switch (group->default_domain->type) { 511 case IOMMU_DOMAIN_BLOCKED: 512 type = "blocked\n"; 513 break; 514 case IOMMU_DOMAIN_IDENTITY: 515 type = "identity\n"; 516 break; 517 case IOMMU_DOMAIN_UNMANAGED: 518 type = "unmanaged\n"; 519 break; 520 case IOMMU_DOMAIN_DMA: 521 type = "DMA\n"; 522 break; 523 } 524 } 525 mutex_unlock(&group->mutex); 526 strcpy(buf, type); 527 528 return strlen(type); 529 } 530 531 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 532 533 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 534 iommu_group_show_resv_regions, NULL); 535 536 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 537 iommu_group_store_type); 538 539 static void iommu_group_release(struct kobject *kobj) 540 { 541 struct iommu_group *group = to_iommu_group(kobj); 542 543 pr_debug("Releasing group %d\n", group->id); 544 545 if (group->iommu_data_release) 546 group->iommu_data_release(group->iommu_data); 547 548 ida_simple_remove(&iommu_group_ida, group->id); 549 550 if (group->default_domain) 551 iommu_domain_free(group->default_domain); 552 553 kfree(group->name); 554 kfree(group); 555 } 556 557 static struct kobj_type iommu_group_ktype = { 558 .sysfs_ops = &iommu_group_sysfs_ops, 559 .release = iommu_group_release, 560 }; 561 562 /** 563 * iommu_group_alloc - Allocate a new group 564 * 565 * This function is called by an iommu driver to allocate a new iommu 566 * group. The iommu group represents the minimum granularity of the iommu. 567 * Upon successful return, the caller holds a reference to the supplied 568 * group in order to hold the group until devices are added. Use 569 * iommu_group_put() to release this extra reference count, allowing the 570 * group to be automatically reclaimed once it has no devices or external 571 * references. 572 */ 573 struct iommu_group *iommu_group_alloc(void) 574 { 575 struct iommu_group *group; 576 int ret; 577 578 group = kzalloc(sizeof(*group), GFP_KERNEL); 579 if (!group) 580 return ERR_PTR(-ENOMEM); 581 582 group->kobj.kset = iommu_group_kset; 583 mutex_init(&group->mutex); 584 INIT_LIST_HEAD(&group->devices); 585 INIT_LIST_HEAD(&group->entry); 586 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); 587 588 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); 589 if (ret < 0) { 590 kfree(group); 591 return ERR_PTR(ret); 592 } 593 group->id = ret; 594 595 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 596 NULL, "%d", group->id); 597 if (ret) { 598 ida_simple_remove(&iommu_group_ida, group->id); 599 kobject_put(&group->kobj); 600 return ERR_PTR(ret); 601 } 602 603 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 604 if (!group->devices_kobj) { 605 kobject_put(&group->kobj); /* triggers .release & free */ 606 return ERR_PTR(-ENOMEM); 607 } 608 609 /* 610 * The devices_kobj holds a reference on the group kobject, so 611 * as long as that exists so will the group. We can therefore 612 * use the devices_kobj for reference counting. 613 */ 614 kobject_put(&group->kobj); 615 616 ret = iommu_group_create_file(group, 617 &iommu_group_attr_reserved_regions); 618 if (ret) 619 return ERR_PTR(ret); 620 621 ret = iommu_group_create_file(group, &iommu_group_attr_type); 622 if (ret) 623 return ERR_PTR(ret); 624 625 pr_debug("Allocated group %d\n", group->id); 626 627 return group; 628 } 629 EXPORT_SYMBOL_GPL(iommu_group_alloc); 630 631 struct iommu_group *iommu_group_get_by_id(int id) 632 { 633 struct kobject *group_kobj; 634 struct iommu_group *group; 635 const char *name; 636 637 if (!iommu_group_kset) 638 return NULL; 639 640 name = kasprintf(GFP_KERNEL, "%d", id); 641 if (!name) 642 return NULL; 643 644 group_kobj = kset_find_obj(iommu_group_kset, name); 645 kfree(name); 646 647 if (!group_kobj) 648 return NULL; 649 650 group = container_of(group_kobj, struct iommu_group, kobj); 651 BUG_ON(group->id != id); 652 653 kobject_get(group->devices_kobj); 654 kobject_put(&group->kobj); 655 656 return group; 657 } 658 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 659 660 /** 661 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 662 * @group: the group 663 * 664 * iommu drivers can store data in the group for use when doing iommu 665 * operations. This function provides a way to retrieve it. Caller 666 * should hold a group reference. 667 */ 668 void *iommu_group_get_iommudata(struct iommu_group *group) 669 { 670 return group->iommu_data; 671 } 672 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 673 674 /** 675 * iommu_group_set_iommudata - set iommu_data for a group 676 * @group: the group 677 * @iommu_data: new data 678 * @release: release function for iommu_data 679 * 680 * iommu drivers can store data in the group for use when doing iommu 681 * operations. This function provides a way to set the data after 682 * the group has been allocated. Caller should hold a group reference. 683 */ 684 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 685 void (*release)(void *iommu_data)) 686 { 687 group->iommu_data = iommu_data; 688 group->iommu_data_release = release; 689 } 690 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 691 692 /** 693 * iommu_group_set_name - set name for a group 694 * @group: the group 695 * @name: name 696 * 697 * Allow iommu driver to set a name for a group. When set it will 698 * appear in a name attribute file under the group in sysfs. 699 */ 700 int iommu_group_set_name(struct iommu_group *group, const char *name) 701 { 702 int ret; 703 704 if (group->name) { 705 iommu_group_remove_file(group, &iommu_group_attr_name); 706 kfree(group->name); 707 group->name = NULL; 708 if (!name) 709 return 0; 710 } 711 712 group->name = kstrdup(name, GFP_KERNEL); 713 if (!group->name) 714 return -ENOMEM; 715 716 ret = iommu_group_create_file(group, &iommu_group_attr_name); 717 if (ret) { 718 kfree(group->name); 719 group->name = NULL; 720 return ret; 721 } 722 723 return 0; 724 } 725 EXPORT_SYMBOL_GPL(iommu_group_set_name); 726 727 static int iommu_create_device_direct_mappings(struct iommu_group *group, 728 struct device *dev) 729 { 730 struct iommu_domain *domain = group->default_domain; 731 struct iommu_resv_region *entry; 732 struct list_head mappings; 733 unsigned long pg_size; 734 int ret = 0; 735 736 if (!domain || domain->type != IOMMU_DOMAIN_DMA) 737 return 0; 738 739 BUG_ON(!domain->pgsize_bitmap); 740 741 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 742 INIT_LIST_HEAD(&mappings); 743 744 iommu_get_resv_regions(dev, &mappings); 745 746 /* We need to consider overlapping regions for different devices */ 747 list_for_each_entry(entry, &mappings, list) { 748 dma_addr_t start, end, addr; 749 size_t map_size = 0; 750 751 if (domain->ops->apply_resv_region) 752 domain->ops->apply_resv_region(dev, domain, entry); 753 754 start = ALIGN(entry->start, pg_size); 755 end = ALIGN(entry->start + entry->length, pg_size); 756 757 if (entry->type != IOMMU_RESV_DIRECT && 758 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 759 continue; 760 761 for (addr = start; addr <= end; addr += pg_size) { 762 phys_addr_t phys_addr; 763 764 if (addr == end) 765 goto map_end; 766 767 phys_addr = iommu_iova_to_phys(domain, addr); 768 if (!phys_addr) { 769 map_size += pg_size; 770 continue; 771 } 772 773 map_end: 774 if (map_size) { 775 ret = iommu_map(domain, addr - map_size, 776 addr - map_size, map_size, 777 entry->prot); 778 if (ret) 779 goto out; 780 map_size = 0; 781 } 782 } 783 784 } 785 786 iommu_flush_iotlb_all(domain); 787 788 out: 789 iommu_put_resv_regions(dev, &mappings); 790 791 return ret; 792 } 793 794 static bool iommu_is_attach_deferred(struct iommu_domain *domain, 795 struct device *dev) 796 { 797 if (domain->ops->is_attach_deferred) 798 return domain->ops->is_attach_deferred(domain, dev); 799 800 return false; 801 } 802 803 /** 804 * iommu_group_add_device - add a device to an iommu group 805 * @group: the group into which to add the device (reference should be held) 806 * @dev: the device 807 * 808 * This function is called by an iommu driver to add a device into a 809 * group. Adding a device increments the group reference count. 810 */ 811 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 812 { 813 int ret, i = 0; 814 struct group_device *device; 815 816 device = kzalloc(sizeof(*device), GFP_KERNEL); 817 if (!device) 818 return -ENOMEM; 819 820 device->dev = dev; 821 822 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 823 if (ret) 824 goto err_free_device; 825 826 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 827 rename: 828 if (!device->name) { 829 ret = -ENOMEM; 830 goto err_remove_link; 831 } 832 833 ret = sysfs_create_link_nowarn(group->devices_kobj, 834 &dev->kobj, device->name); 835 if (ret) { 836 if (ret == -EEXIST && i >= 0) { 837 /* 838 * Account for the slim chance of collision 839 * and append an instance to the name. 840 */ 841 kfree(device->name); 842 device->name = kasprintf(GFP_KERNEL, "%s.%d", 843 kobject_name(&dev->kobj), i++); 844 goto rename; 845 } 846 goto err_free_name; 847 } 848 849 kobject_get(group->devices_kobj); 850 851 dev->iommu_group = group; 852 853 mutex_lock(&group->mutex); 854 list_add_tail(&device->list, &group->devices); 855 if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) 856 ret = __iommu_attach_device(group->domain, dev); 857 mutex_unlock(&group->mutex); 858 if (ret) 859 goto err_put_group; 860 861 /* Notify any listeners about change to group. */ 862 blocking_notifier_call_chain(&group->notifier, 863 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); 864 865 trace_add_device_to_group(group->id, dev); 866 867 dev_info(dev, "Adding to iommu group %d\n", group->id); 868 869 return 0; 870 871 err_put_group: 872 mutex_lock(&group->mutex); 873 list_del(&device->list); 874 mutex_unlock(&group->mutex); 875 dev->iommu_group = NULL; 876 kobject_put(group->devices_kobj); 877 sysfs_remove_link(group->devices_kobj, device->name); 878 err_free_name: 879 kfree(device->name); 880 err_remove_link: 881 sysfs_remove_link(&dev->kobj, "iommu_group"); 882 err_free_device: 883 kfree(device); 884 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 885 return ret; 886 } 887 EXPORT_SYMBOL_GPL(iommu_group_add_device); 888 889 /** 890 * iommu_group_remove_device - remove a device from it's current group 891 * @dev: device to be removed 892 * 893 * This function is called by an iommu driver to remove the device from 894 * it's current group. This decrements the iommu group reference count. 895 */ 896 void iommu_group_remove_device(struct device *dev) 897 { 898 struct iommu_group *group = dev->iommu_group; 899 struct group_device *tmp_device, *device = NULL; 900 901 dev_info(dev, "Removing from iommu group %d\n", group->id); 902 903 /* Pre-notify listeners that a device is being removed. */ 904 blocking_notifier_call_chain(&group->notifier, 905 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); 906 907 mutex_lock(&group->mutex); 908 list_for_each_entry(tmp_device, &group->devices, list) { 909 if (tmp_device->dev == dev) { 910 device = tmp_device; 911 list_del(&device->list); 912 break; 913 } 914 } 915 mutex_unlock(&group->mutex); 916 917 if (!device) 918 return; 919 920 sysfs_remove_link(group->devices_kobj, device->name); 921 sysfs_remove_link(&dev->kobj, "iommu_group"); 922 923 trace_remove_device_from_group(group->id, dev); 924 925 kfree(device->name); 926 kfree(device); 927 dev->iommu_group = NULL; 928 kobject_put(group->devices_kobj); 929 } 930 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 931 932 static int iommu_group_device_count(struct iommu_group *group) 933 { 934 struct group_device *entry; 935 int ret = 0; 936 937 list_for_each_entry(entry, &group->devices, list) 938 ret++; 939 940 return ret; 941 } 942 943 /** 944 * iommu_group_for_each_dev - iterate over each device in the group 945 * @group: the group 946 * @data: caller opaque data to be passed to callback function 947 * @fn: caller supplied callback function 948 * 949 * This function is called by group users to iterate over group devices. 950 * Callers should hold a reference count to the group during callback. 951 * The group->mutex is held across callbacks, which will block calls to 952 * iommu_group_add/remove_device. 953 */ 954 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 955 int (*fn)(struct device *, void *)) 956 { 957 struct group_device *device; 958 int ret = 0; 959 960 list_for_each_entry(device, &group->devices, list) { 961 ret = fn(device->dev, data); 962 if (ret) 963 break; 964 } 965 return ret; 966 } 967 968 969 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 970 int (*fn)(struct device *, void *)) 971 { 972 int ret; 973 974 mutex_lock(&group->mutex); 975 ret = __iommu_group_for_each_dev(group, data, fn); 976 mutex_unlock(&group->mutex); 977 978 return ret; 979 } 980 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 981 982 /** 983 * iommu_group_get - Return the group for a device and increment reference 984 * @dev: get the group that this device belongs to 985 * 986 * This function is called by iommu drivers and users to get the group 987 * for the specified device. If found, the group is returned and the group 988 * reference in incremented, else NULL. 989 */ 990 struct iommu_group *iommu_group_get(struct device *dev) 991 { 992 struct iommu_group *group = dev->iommu_group; 993 994 if (group) 995 kobject_get(group->devices_kobj); 996 997 return group; 998 } 999 EXPORT_SYMBOL_GPL(iommu_group_get); 1000 1001 /** 1002 * iommu_group_ref_get - Increment reference on a group 1003 * @group: the group to use, must not be NULL 1004 * 1005 * This function is called by iommu drivers to take additional references on an 1006 * existing group. Returns the given group for convenience. 1007 */ 1008 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1009 { 1010 kobject_get(group->devices_kobj); 1011 return group; 1012 } 1013 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1014 1015 /** 1016 * iommu_group_put - Decrement group reference 1017 * @group: the group to use 1018 * 1019 * This function is called by iommu drivers and users to release the 1020 * iommu group. Once the reference count is zero, the group is released. 1021 */ 1022 void iommu_group_put(struct iommu_group *group) 1023 { 1024 if (group) 1025 kobject_put(group->devices_kobj); 1026 } 1027 EXPORT_SYMBOL_GPL(iommu_group_put); 1028 1029 /** 1030 * iommu_group_register_notifier - Register a notifier for group changes 1031 * @group: the group to watch 1032 * @nb: notifier block to signal 1033 * 1034 * This function allows iommu group users to track changes in a group. 1035 * See include/linux/iommu.h for actions sent via this notifier. Caller 1036 * should hold a reference to the group throughout notifier registration. 1037 */ 1038 int iommu_group_register_notifier(struct iommu_group *group, 1039 struct notifier_block *nb) 1040 { 1041 return blocking_notifier_chain_register(&group->notifier, nb); 1042 } 1043 EXPORT_SYMBOL_GPL(iommu_group_register_notifier); 1044 1045 /** 1046 * iommu_group_unregister_notifier - Unregister a notifier 1047 * @group: the group to watch 1048 * @nb: notifier block to signal 1049 * 1050 * Unregister a previously registered group notifier block. 1051 */ 1052 int iommu_group_unregister_notifier(struct iommu_group *group, 1053 struct notifier_block *nb) 1054 { 1055 return blocking_notifier_chain_unregister(&group->notifier, nb); 1056 } 1057 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); 1058 1059 /** 1060 * iommu_register_device_fault_handler() - Register a device fault handler 1061 * @dev: the device 1062 * @handler: the fault handler 1063 * @data: private data passed as argument to the handler 1064 * 1065 * When an IOMMU fault event is received, this handler gets called with the 1066 * fault event and data as argument. The handler should return 0 on success. If 1067 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1068 * complete the fault by calling iommu_page_response() with one of the following 1069 * response code: 1070 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1071 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1072 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1073 * page faults if possible. 1074 * 1075 * Return 0 if the fault handler was installed successfully, or an error. 1076 */ 1077 int iommu_register_device_fault_handler(struct device *dev, 1078 iommu_dev_fault_handler_t handler, 1079 void *data) 1080 { 1081 struct dev_iommu *param = dev->iommu; 1082 int ret = 0; 1083 1084 if (!param) 1085 return -EINVAL; 1086 1087 mutex_lock(¶m->lock); 1088 /* Only allow one fault handler registered for each device */ 1089 if (param->fault_param) { 1090 ret = -EBUSY; 1091 goto done_unlock; 1092 } 1093 1094 get_device(dev); 1095 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1096 if (!param->fault_param) { 1097 put_device(dev); 1098 ret = -ENOMEM; 1099 goto done_unlock; 1100 } 1101 param->fault_param->handler = handler; 1102 param->fault_param->data = data; 1103 mutex_init(¶m->fault_param->lock); 1104 INIT_LIST_HEAD(¶m->fault_param->faults); 1105 1106 done_unlock: 1107 mutex_unlock(¶m->lock); 1108 1109 return ret; 1110 } 1111 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1112 1113 /** 1114 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1115 * @dev: the device 1116 * 1117 * Remove the device fault handler installed with 1118 * iommu_register_device_fault_handler(). 1119 * 1120 * Return 0 on success, or an error. 1121 */ 1122 int iommu_unregister_device_fault_handler(struct device *dev) 1123 { 1124 struct dev_iommu *param = dev->iommu; 1125 int ret = 0; 1126 1127 if (!param) 1128 return -EINVAL; 1129 1130 mutex_lock(¶m->lock); 1131 1132 if (!param->fault_param) 1133 goto unlock; 1134 1135 /* we cannot unregister handler if there are pending faults */ 1136 if (!list_empty(¶m->fault_param->faults)) { 1137 ret = -EBUSY; 1138 goto unlock; 1139 } 1140 1141 kfree(param->fault_param); 1142 param->fault_param = NULL; 1143 put_device(dev); 1144 unlock: 1145 mutex_unlock(¶m->lock); 1146 1147 return ret; 1148 } 1149 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1150 1151 /** 1152 * iommu_report_device_fault() - Report fault event to device driver 1153 * @dev: the device 1154 * @evt: fault event data 1155 * 1156 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1157 * handler. When this function fails and the fault is recoverable, it is the 1158 * caller's responsibility to complete the fault. 1159 * 1160 * Return 0 on success, or an error. 1161 */ 1162 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1163 { 1164 struct dev_iommu *param = dev->iommu; 1165 struct iommu_fault_event *evt_pending = NULL; 1166 struct iommu_fault_param *fparam; 1167 int ret = 0; 1168 1169 if (!param || !evt) 1170 return -EINVAL; 1171 1172 /* we only report device fault if there is a handler registered */ 1173 mutex_lock(¶m->lock); 1174 fparam = param->fault_param; 1175 if (!fparam || !fparam->handler) { 1176 ret = -EINVAL; 1177 goto done_unlock; 1178 } 1179 1180 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1181 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1182 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1183 GFP_KERNEL); 1184 if (!evt_pending) { 1185 ret = -ENOMEM; 1186 goto done_unlock; 1187 } 1188 mutex_lock(&fparam->lock); 1189 list_add_tail(&evt_pending->list, &fparam->faults); 1190 mutex_unlock(&fparam->lock); 1191 } 1192 1193 ret = fparam->handler(&evt->fault, fparam->data); 1194 if (ret && evt_pending) { 1195 mutex_lock(&fparam->lock); 1196 list_del(&evt_pending->list); 1197 mutex_unlock(&fparam->lock); 1198 kfree(evt_pending); 1199 } 1200 done_unlock: 1201 mutex_unlock(¶m->lock); 1202 return ret; 1203 } 1204 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1205 1206 int iommu_page_response(struct device *dev, 1207 struct iommu_page_response *msg) 1208 { 1209 bool needs_pasid; 1210 int ret = -EINVAL; 1211 struct iommu_fault_event *evt; 1212 struct iommu_fault_page_request *prm; 1213 struct dev_iommu *param = dev->iommu; 1214 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1215 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1216 1217 if (!domain || !domain->ops->page_response) 1218 return -ENODEV; 1219 1220 if (!param || !param->fault_param) 1221 return -EINVAL; 1222 1223 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1224 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1225 return -EINVAL; 1226 1227 /* Only send response if there is a fault report pending */ 1228 mutex_lock(¶m->fault_param->lock); 1229 if (list_empty(¶m->fault_param->faults)) { 1230 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1231 goto done_unlock; 1232 } 1233 /* 1234 * Check if we have a matching page request pending to respond, 1235 * otherwise return -EINVAL 1236 */ 1237 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1238 prm = &evt->fault.prm; 1239 if (prm->grpid != msg->grpid) 1240 continue; 1241 1242 /* 1243 * If the PASID is required, the corresponding request is 1244 * matched using the group ID, the PASID valid bit and the PASID 1245 * value. Otherwise only the group ID matches request and 1246 * response. 1247 */ 1248 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1249 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1250 continue; 1251 1252 if (!needs_pasid && has_pasid) { 1253 /* No big deal, just clear it. */ 1254 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1255 msg->pasid = 0; 1256 } 1257 1258 ret = domain->ops->page_response(dev, evt, msg); 1259 list_del(&evt->list); 1260 kfree(evt); 1261 break; 1262 } 1263 1264 done_unlock: 1265 mutex_unlock(¶m->fault_param->lock); 1266 return ret; 1267 } 1268 EXPORT_SYMBOL_GPL(iommu_page_response); 1269 1270 /** 1271 * iommu_group_id - Return ID for a group 1272 * @group: the group to ID 1273 * 1274 * Return the unique ID for the group matching the sysfs group number. 1275 */ 1276 int iommu_group_id(struct iommu_group *group) 1277 { 1278 return group->id; 1279 } 1280 EXPORT_SYMBOL_GPL(iommu_group_id); 1281 1282 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1283 unsigned long *devfns); 1284 1285 /* 1286 * To consider a PCI device isolated, we require ACS to support Source 1287 * Validation, Request Redirection, Completer Redirection, and Upstream 1288 * Forwarding. This effectively means that devices cannot spoof their 1289 * requester ID, requests and completions cannot be redirected, and all 1290 * transactions are forwarded upstream, even as it passes through a 1291 * bridge where the target device is downstream. 1292 */ 1293 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1294 1295 /* 1296 * For multifunction devices which are not isolated from each other, find 1297 * all the other non-isolated functions and look for existing groups. For 1298 * each function, we also need to look for aliases to or from other devices 1299 * that may already have a group. 1300 */ 1301 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1302 unsigned long *devfns) 1303 { 1304 struct pci_dev *tmp = NULL; 1305 struct iommu_group *group; 1306 1307 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1308 return NULL; 1309 1310 for_each_pci_dev(tmp) { 1311 if (tmp == pdev || tmp->bus != pdev->bus || 1312 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1313 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1314 continue; 1315 1316 group = get_pci_alias_group(tmp, devfns); 1317 if (group) { 1318 pci_dev_put(tmp); 1319 return group; 1320 } 1321 } 1322 1323 return NULL; 1324 } 1325 1326 /* 1327 * Look for aliases to or from the given device for existing groups. DMA 1328 * aliases are only supported on the same bus, therefore the search 1329 * space is quite small (especially since we're really only looking at pcie 1330 * device, and therefore only expect multiple slots on the root complex or 1331 * downstream switch ports). It's conceivable though that a pair of 1332 * multifunction devices could have aliases between them that would cause a 1333 * loop. To prevent this, we use a bitmap to track where we've been. 1334 */ 1335 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1336 unsigned long *devfns) 1337 { 1338 struct pci_dev *tmp = NULL; 1339 struct iommu_group *group; 1340 1341 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1342 return NULL; 1343 1344 group = iommu_group_get(&pdev->dev); 1345 if (group) 1346 return group; 1347 1348 for_each_pci_dev(tmp) { 1349 if (tmp == pdev || tmp->bus != pdev->bus) 1350 continue; 1351 1352 /* We alias them or they alias us */ 1353 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1354 group = get_pci_alias_group(tmp, devfns); 1355 if (group) { 1356 pci_dev_put(tmp); 1357 return group; 1358 } 1359 1360 group = get_pci_function_alias_group(tmp, devfns); 1361 if (group) { 1362 pci_dev_put(tmp); 1363 return group; 1364 } 1365 } 1366 } 1367 1368 return NULL; 1369 } 1370 1371 struct group_for_pci_data { 1372 struct pci_dev *pdev; 1373 struct iommu_group *group; 1374 }; 1375 1376 /* 1377 * DMA alias iterator callback, return the last seen device. Stop and return 1378 * the IOMMU group if we find one along the way. 1379 */ 1380 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1381 { 1382 struct group_for_pci_data *data = opaque; 1383 1384 data->pdev = pdev; 1385 data->group = iommu_group_get(&pdev->dev); 1386 1387 return data->group != NULL; 1388 } 1389 1390 /* 1391 * Generic device_group call-back function. It just allocates one 1392 * iommu-group per device. 1393 */ 1394 struct iommu_group *generic_device_group(struct device *dev) 1395 { 1396 return iommu_group_alloc(); 1397 } 1398 EXPORT_SYMBOL_GPL(generic_device_group); 1399 1400 /* 1401 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1402 * to find or create an IOMMU group for a device. 1403 */ 1404 struct iommu_group *pci_device_group(struct device *dev) 1405 { 1406 struct pci_dev *pdev = to_pci_dev(dev); 1407 struct group_for_pci_data data; 1408 struct pci_bus *bus; 1409 struct iommu_group *group = NULL; 1410 u64 devfns[4] = { 0 }; 1411 1412 if (WARN_ON(!dev_is_pci(dev))) 1413 return ERR_PTR(-EINVAL); 1414 1415 /* 1416 * Find the upstream DMA alias for the device. A device must not 1417 * be aliased due to topology in order to have its own IOMMU group. 1418 * If we find an alias along the way that already belongs to a 1419 * group, use it. 1420 */ 1421 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1422 return data.group; 1423 1424 pdev = data.pdev; 1425 1426 /* 1427 * Continue upstream from the point of minimum IOMMU granularity 1428 * due to aliases to the point where devices are protected from 1429 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1430 * group, use it. 1431 */ 1432 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1433 if (!bus->self) 1434 continue; 1435 1436 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1437 break; 1438 1439 pdev = bus->self; 1440 1441 group = iommu_group_get(&pdev->dev); 1442 if (group) 1443 return group; 1444 } 1445 1446 /* 1447 * Look for existing groups on device aliases. If we alias another 1448 * device or another device aliases us, use the same group. 1449 */ 1450 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1451 if (group) 1452 return group; 1453 1454 /* 1455 * Look for existing groups on non-isolated functions on the same 1456 * slot and aliases of those funcions, if any. No need to clear 1457 * the search bitmap, the tested devfns are still valid. 1458 */ 1459 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1460 if (group) 1461 return group; 1462 1463 /* No shared group found, allocate new */ 1464 return iommu_group_alloc(); 1465 } 1466 EXPORT_SYMBOL_GPL(pci_device_group); 1467 1468 /* Get the IOMMU group for device on fsl-mc bus */ 1469 struct iommu_group *fsl_mc_device_group(struct device *dev) 1470 { 1471 struct device *cont_dev = fsl_mc_cont_dev(dev); 1472 struct iommu_group *group; 1473 1474 group = iommu_group_get(cont_dev); 1475 if (!group) 1476 group = iommu_group_alloc(); 1477 return group; 1478 } 1479 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1480 1481 static int iommu_get_def_domain_type(struct device *dev) 1482 { 1483 const struct iommu_ops *ops = dev->bus->iommu_ops; 1484 1485 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1486 return IOMMU_DOMAIN_DMA; 1487 1488 if (ops->def_domain_type) 1489 return ops->def_domain_type(dev); 1490 1491 return 0; 1492 } 1493 1494 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1495 struct iommu_group *group, 1496 unsigned int type) 1497 { 1498 struct iommu_domain *dom; 1499 1500 dom = __iommu_domain_alloc(bus, type); 1501 if (!dom && type != IOMMU_DOMAIN_DMA) { 1502 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1503 if (dom) 1504 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1505 type, group->name); 1506 } 1507 1508 if (!dom) 1509 return -ENOMEM; 1510 1511 group->default_domain = dom; 1512 if (!group->domain) 1513 group->domain = dom; 1514 1515 if (!iommu_dma_strict) { 1516 int attr = 1; 1517 iommu_domain_set_attr(dom, 1518 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, 1519 &attr); 1520 } 1521 1522 return 0; 1523 } 1524 1525 static int iommu_alloc_default_domain(struct iommu_group *group, 1526 struct device *dev) 1527 { 1528 unsigned int type; 1529 1530 if (group->default_domain) 1531 return 0; 1532 1533 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; 1534 1535 return iommu_group_alloc_default_domain(dev->bus, group, type); 1536 } 1537 1538 /** 1539 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1540 * @dev: target device 1541 * 1542 * This function is intended to be called by IOMMU drivers and extended to 1543 * support common, bus-defined algorithms when determining or creating the 1544 * IOMMU group for a device. On success, the caller will hold a reference 1545 * to the returned IOMMU group, which will already include the provided 1546 * device. The reference should be released with iommu_group_put(). 1547 */ 1548 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1549 { 1550 const struct iommu_ops *ops = dev->bus->iommu_ops; 1551 struct iommu_group *group; 1552 int ret; 1553 1554 group = iommu_group_get(dev); 1555 if (group) 1556 return group; 1557 1558 if (!ops) 1559 return ERR_PTR(-EINVAL); 1560 1561 group = ops->device_group(dev); 1562 if (WARN_ON_ONCE(group == NULL)) 1563 return ERR_PTR(-EINVAL); 1564 1565 if (IS_ERR(group)) 1566 return group; 1567 1568 ret = iommu_group_add_device(group, dev); 1569 if (ret) 1570 goto out_put_group; 1571 1572 return group; 1573 1574 out_put_group: 1575 iommu_group_put(group); 1576 1577 return ERR_PTR(ret); 1578 } 1579 1580 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1581 { 1582 return group->default_domain; 1583 } 1584 1585 static int probe_iommu_group(struct device *dev, void *data) 1586 { 1587 struct list_head *group_list = data; 1588 struct iommu_group *group; 1589 int ret; 1590 1591 /* Device is probed already if in a group */ 1592 group = iommu_group_get(dev); 1593 if (group) { 1594 iommu_group_put(group); 1595 return 0; 1596 } 1597 1598 ret = __iommu_probe_device(dev, group_list); 1599 if (ret == -ENODEV) 1600 ret = 0; 1601 1602 return ret; 1603 } 1604 1605 static int remove_iommu_group(struct device *dev, void *data) 1606 { 1607 iommu_release_device(dev); 1608 1609 return 0; 1610 } 1611 1612 static int iommu_bus_notifier(struct notifier_block *nb, 1613 unsigned long action, void *data) 1614 { 1615 unsigned long group_action = 0; 1616 struct device *dev = data; 1617 struct iommu_group *group; 1618 1619 /* 1620 * ADD/DEL call into iommu driver ops if provided, which may 1621 * result in ADD/DEL notifiers to group->notifier 1622 */ 1623 if (action == BUS_NOTIFY_ADD_DEVICE) { 1624 int ret; 1625 1626 ret = iommu_probe_device(dev); 1627 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1628 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1629 iommu_release_device(dev); 1630 return NOTIFY_OK; 1631 } 1632 1633 /* 1634 * Remaining BUS_NOTIFYs get filtered and republished to the 1635 * group, if anyone is listening 1636 */ 1637 group = iommu_group_get(dev); 1638 if (!group) 1639 return 0; 1640 1641 switch (action) { 1642 case BUS_NOTIFY_BIND_DRIVER: 1643 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; 1644 break; 1645 case BUS_NOTIFY_BOUND_DRIVER: 1646 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; 1647 break; 1648 case BUS_NOTIFY_UNBIND_DRIVER: 1649 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; 1650 break; 1651 case BUS_NOTIFY_UNBOUND_DRIVER: 1652 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; 1653 break; 1654 } 1655 1656 if (group_action) 1657 blocking_notifier_call_chain(&group->notifier, 1658 group_action, dev); 1659 1660 iommu_group_put(group); 1661 return 0; 1662 } 1663 1664 struct __group_domain_type { 1665 struct device *dev; 1666 unsigned int type; 1667 }; 1668 1669 static int probe_get_default_domain_type(struct device *dev, void *data) 1670 { 1671 struct __group_domain_type *gtype = data; 1672 unsigned int type = iommu_get_def_domain_type(dev); 1673 1674 if (type) { 1675 if (gtype->type && gtype->type != type) { 1676 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1677 iommu_domain_type_str(type), 1678 dev_name(gtype->dev), 1679 iommu_domain_type_str(gtype->type)); 1680 gtype->type = 0; 1681 } 1682 1683 if (!gtype->dev) { 1684 gtype->dev = dev; 1685 gtype->type = type; 1686 } 1687 } 1688 1689 return 0; 1690 } 1691 1692 static void probe_alloc_default_domain(struct bus_type *bus, 1693 struct iommu_group *group) 1694 { 1695 struct __group_domain_type gtype; 1696 1697 memset(>ype, 0, sizeof(gtype)); 1698 1699 /* Ask for default domain requirements of all devices in the group */ 1700 __iommu_group_for_each_dev(group, >ype, 1701 probe_get_default_domain_type); 1702 1703 if (!gtype.type) 1704 gtype.type = iommu_def_domain_type; 1705 1706 iommu_group_alloc_default_domain(bus, group, gtype.type); 1707 1708 } 1709 1710 static int iommu_group_do_dma_attach(struct device *dev, void *data) 1711 { 1712 struct iommu_domain *domain = data; 1713 int ret = 0; 1714 1715 if (!iommu_is_attach_deferred(domain, dev)) 1716 ret = __iommu_attach_device(domain, dev); 1717 1718 return ret; 1719 } 1720 1721 static int __iommu_group_dma_attach(struct iommu_group *group) 1722 { 1723 return __iommu_group_for_each_dev(group, group->default_domain, 1724 iommu_group_do_dma_attach); 1725 } 1726 1727 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1728 { 1729 struct iommu_domain *domain = data; 1730 1731 if (domain->ops->probe_finalize) 1732 domain->ops->probe_finalize(dev); 1733 1734 return 0; 1735 } 1736 1737 static void __iommu_group_dma_finalize(struct iommu_group *group) 1738 { 1739 __iommu_group_for_each_dev(group, group->default_domain, 1740 iommu_group_do_probe_finalize); 1741 } 1742 1743 static int iommu_do_create_direct_mappings(struct device *dev, void *data) 1744 { 1745 struct iommu_group *group = data; 1746 1747 iommu_create_device_direct_mappings(group, dev); 1748 1749 return 0; 1750 } 1751 1752 static int iommu_group_create_direct_mappings(struct iommu_group *group) 1753 { 1754 return __iommu_group_for_each_dev(group, group, 1755 iommu_do_create_direct_mappings); 1756 } 1757 1758 int bus_iommu_probe(struct bus_type *bus) 1759 { 1760 struct iommu_group *group, *next; 1761 LIST_HEAD(group_list); 1762 int ret; 1763 1764 /* 1765 * This code-path does not allocate the default domain when 1766 * creating the iommu group, so do it after the groups are 1767 * created. 1768 */ 1769 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1770 if (ret) 1771 return ret; 1772 1773 list_for_each_entry_safe(group, next, &group_list, entry) { 1774 /* Remove item from the list */ 1775 list_del_init(&group->entry); 1776 1777 mutex_lock(&group->mutex); 1778 1779 /* Try to allocate default domain */ 1780 probe_alloc_default_domain(bus, group); 1781 1782 if (!group->default_domain) { 1783 mutex_unlock(&group->mutex); 1784 continue; 1785 } 1786 1787 iommu_group_create_direct_mappings(group); 1788 1789 ret = __iommu_group_dma_attach(group); 1790 1791 mutex_unlock(&group->mutex); 1792 1793 if (ret) 1794 break; 1795 1796 __iommu_group_dma_finalize(group); 1797 } 1798 1799 return ret; 1800 } 1801 1802 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) 1803 { 1804 struct notifier_block *nb; 1805 int err; 1806 1807 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 1808 if (!nb) 1809 return -ENOMEM; 1810 1811 nb->notifier_call = iommu_bus_notifier; 1812 1813 err = bus_register_notifier(bus, nb); 1814 if (err) 1815 goto out_free; 1816 1817 err = bus_iommu_probe(bus); 1818 if (err) 1819 goto out_err; 1820 1821 1822 return 0; 1823 1824 out_err: 1825 /* Clean up */ 1826 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group); 1827 bus_unregister_notifier(bus, nb); 1828 1829 out_free: 1830 kfree(nb); 1831 1832 return err; 1833 } 1834 1835 /** 1836 * bus_set_iommu - set iommu-callbacks for the bus 1837 * @bus: bus. 1838 * @ops: the callbacks provided by the iommu-driver 1839 * 1840 * This function is called by an iommu driver to set the iommu methods 1841 * used for a particular bus. Drivers for devices on that bus can use 1842 * the iommu-api after these ops are registered. 1843 * This special function is needed because IOMMUs are usually devices on 1844 * the bus itself, so the iommu drivers are not initialized when the bus 1845 * is set up. With this function the iommu-driver can set the iommu-ops 1846 * afterwards. 1847 */ 1848 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 1849 { 1850 int err; 1851 1852 if (ops == NULL) { 1853 bus->iommu_ops = NULL; 1854 return 0; 1855 } 1856 1857 if (bus->iommu_ops != NULL) 1858 return -EBUSY; 1859 1860 bus->iommu_ops = ops; 1861 1862 /* Do IOMMU specific setup for this bus-type */ 1863 err = iommu_bus_init(bus, ops); 1864 if (err) 1865 bus->iommu_ops = NULL; 1866 1867 return err; 1868 } 1869 EXPORT_SYMBOL_GPL(bus_set_iommu); 1870 1871 bool iommu_present(struct bus_type *bus) 1872 { 1873 return bus->iommu_ops != NULL; 1874 } 1875 EXPORT_SYMBOL_GPL(iommu_present); 1876 1877 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 1878 { 1879 if (!bus->iommu_ops || !bus->iommu_ops->capable) 1880 return false; 1881 1882 return bus->iommu_ops->capable(cap); 1883 } 1884 EXPORT_SYMBOL_GPL(iommu_capable); 1885 1886 /** 1887 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1888 * @domain: iommu domain 1889 * @handler: fault handler 1890 * @token: user data, will be passed back to the fault handler 1891 * 1892 * This function should be used by IOMMU users which want to be notified 1893 * whenever an IOMMU fault happens. 1894 * 1895 * The fault handler itself should return 0 on success, and an appropriate 1896 * error code otherwise. 1897 */ 1898 void iommu_set_fault_handler(struct iommu_domain *domain, 1899 iommu_fault_handler_t handler, 1900 void *token) 1901 { 1902 BUG_ON(!domain); 1903 1904 domain->handler = handler; 1905 domain->handler_token = token; 1906 } 1907 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1908 1909 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1910 unsigned type) 1911 { 1912 struct iommu_domain *domain; 1913 1914 if (bus == NULL || bus->iommu_ops == NULL) 1915 return NULL; 1916 1917 domain = bus->iommu_ops->domain_alloc(type); 1918 if (!domain) 1919 return NULL; 1920 1921 domain->ops = bus->iommu_ops; 1922 domain->type = type; 1923 /* Assume all sizes by default; the driver may override this later */ 1924 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1925 1926 return domain; 1927 } 1928 1929 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1930 { 1931 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1932 } 1933 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1934 1935 void iommu_domain_free(struct iommu_domain *domain) 1936 { 1937 domain->ops->domain_free(domain); 1938 } 1939 EXPORT_SYMBOL_GPL(iommu_domain_free); 1940 1941 static int __iommu_attach_device(struct iommu_domain *domain, 1942 struct device *dev) 1943 { 1944 int ret; 1945 1946 if (unlikely(domain->ops->attach_dev == NULL)) 1947 return -ENODEV; 1948 1949 ret = domain->ops->attach_dev(domain, dev); 1950 if (!ret) 1951 trace_attach_device_to_domain(dev); 1952 return ret; 1953 } 1954 1955 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 1956 { 1957 struct iommu_group *group; 1958 int ret; 1959 1960 group = iommu_group_get(dev); 1961 if (!group) 1962 return -ENODEV; 1963 1964 /* 1965 * Lock the group to make sure the device-count doesn't 1966 * change while we are attaching 1967 */ 1968 mutex_lock(&group->mutex); 1969 ret = -EINVAL; 1970 if (iommu_group_device_count(group) != 1) 1971 goto out_unlock; 1972 1973 ret = __iommu_attach_group(domain, group); 1974 1975 out_unlock: 1976 mutex_unlock(&group->mutex); 1977 iommu_group_put(group); 1978 1979 return ret; 1980 } 1981 EXPORT_SYMBOL_GPL(iommu_attach_device); 1982 1983 /* 1984 * Check flags and other user provided data for valid combinations. We also 1985 * make sure no reserved fields or unused flags are set. This is to ensure 1986 * not breaking userspace in the future when these fields or flags are used. 1987 */ 1988 static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info) 1989 { 1990 u32 mask; 1991 int i; 1992 1993 if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) 1994 return -EINVAL; 1995 1996 mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1; 1997 if (info->cache & ~mask) 1998 return -EINVAL; 1999 2000 if (info->granularity >= IOMMU_INV_GRANU_NR) 2001 return -EINVAL; 2002 2003 switch (info->granularity) { 2004 case IOMMU_INV_GRANU_ADDR: 2005 if (info->cache & IOMMU_CACHE_INV_TYPE_PASID) 2006 return -EINVAL; 2007 2008 mask = IOMMU_INV_ADDR_FLAGS_PASID | 2009 IOMMU_INV_ADDR_FLAGS_ARCHID | 2010 IOMMU_INV_ADDR_FLAGS_LEAF; 2011 2012 if (info->granu.addr_info.flags & ~mask) 2013 return -EINVAL; 2014 break; 2015 case IOMMU_INV_GRANU_PASID: 2016 mask = IOMMU_INV_PASID_FLAGS_PASID | 2017 IOMMU_INV_PASID_FLAGS_ARCHID; 2018 if (info->granu.pasid_info.flags & ~mask) 2019 return -EINVAL; 2020 2021 break; 2022 case IOMMU_INV_GRANU_DOMAIN: 2023 if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) 2024 return -EINVAL; 2025 break; 2026 default: 2027 return -EINVAL; 2028 } 2029 2030 /* Check reserved padding fields */ 2031 for (i = 0; i < sizeof(info->padding); i++) { 2032 if (info->padding[i]) 2033 return -EINVAL; 2034 } 2035 2036 return 0; 2037 } 2038 2039 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, 2040 void __user *uinfo) 2041 { 2042 struct iommu_cache_invalidate_info inv_info = { 0 }; 2043 u32 minsz; 2044 int ret; 2045 2046 if (unlikely(!domain->ops->cache_invalidate)) 2047 return -ENODEV; 2048 2049 /* 2050 * No new spaces can be added before the variable sized union, the 2051 * minimum size is the offset to the union. 2052 */ 2053 minsz = offsetof(struct iommu_cache_invalidate_info, granu); 2054 2055 /* Copy minsz from user to get flags and argsz */ 2056 if (copy_from_user(&inv_info, uinfo, minsz)) 2057 return -EFAULT; 2058 2059 /* Fields before the variable size union are mandatory */ 2060 if (inv_info.argsz < minsz) 2061 return -EINVAL; 2062 2063 /* PASID and address granu require additional info beyond minsz */ 2064 if (inv_info.granularity == IOMMU_INV_GRANU_PASID && 2065 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info)) 2066 return -EINVAL; 2067 2068 if (inv_info.granularity == IOMMU_INV_GRANU_ADDR && 2069 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info)) 2070 return -EINVAL; 2071 2072 /* 2073 * User might be using a newer UAPI header which has a larger data 2074 * size, we shall support the existing flags within the current 2075 * size. Copy the remaining user data _after_ minsz but not more 2076 * than the current kernel supported size. 2077 */ 2078 if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz, 2079 min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz)) 2080 return -EFAULT; 2081 2082 /* Now the argsz is validated, check the content */ 2083 ret = iommu_check_cache_invl_data(&inv_info); 2084 if (ret) 2085 return ret; 2086 2087 return domain->ops->cache_invalidate(domain, dev, &inv_info); 2088 } 2089 EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); 2090 2091 static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) 2092 { 2093 u64 mask; 2094 int i; 2095 2096 if (data->version != IOMMU_GPASID_BIND_VERSION_1) 2097 return -EINVAL; 2098 2099 /* Check the range of supported formats */ 2100 if (data->format >= IOMMU_PASID_FORMAT_LAST) 2101 return -EINVAL; 2102 2103 /* Check all flags */ 2104 mask = IOMMU_SVA_GPASID_VAL; 2105 if (data->flags & ~mask) 2106 return -EINVAL; 2107 2108 /* Check reserved padding fields */ 2109 for (i = 0; i < sizeof(data->padding); i++) { 2110 if (data->padding[i]) 2111 return -EINVAL; 2112 } 2113 2114 return 0; 2115 } 2116 2117 static int iommu_sva_prepare_bind_data(void __user *udata, 2118 struct iommu_gpasid_bind_data *data) 2119 { 2120 u32 minsz; 2121 2122 /* 2123 * No new spaces can be added before the variable sized union, the 2124 * minimum size is the offset to the union. 2125 */ 2126 minsz = offsetof(struct iommu_gpasid_bind_data, vendor); 2127 2128 /* Copy minsz from user to get flags and argsz */ 2129 if (copy_from_user(data, udata, minsz)) 2130 return -EFAULT; 2131 2132 /* Fields before the variable size union are mandatory */ 2133 if (data->argsz < minsz) 2134 return -EINVAL; 2135 /* 2136 * User might be using a newer UAPI header, we shall let IOMMU vendor 2137 * driver decide on what size it needs. Since the guest PASID bind data 2138 * can be vendor specific, larger argsz could be the result of extension 2139 * for one vendor but it should not affect another vendor. 2140 * Copy the remaining user data _after_ minsz 2141 */ 2142 if (copy_from_user((void *)data + minsz, udata + minsz, 2143 min_t(u32, data->argsz, sizeof(*data)) - minsz)) 2144 return -EFAULT; 2145 2146 return iommu_check_bind_data(data); 2147 } 2148 2149 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, 2150 void __user *udata) 2151 { 2152 struct iommu_gpasid_bind_data data = { 0 }; 2153 int ret; 2154 2155 if (unlikely(!domain->ops->sva_bind_gpasid)) 2156 return -ENODEV; 2157 2158 ret = iommu_sva_prepare_bind_data(udata, &data); 2159 if (ret) 2160 return ret; 2161 2162 return domain->ops->sva_bind_gpasid(domain, dev, &data); 2163 } 2164 EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid); 2165 2166 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, 2167 ioasid_t pasid) 2168 { 2169 if (unlikely(!domain->ops->sva_unbind_gpasid)) 2170 return -ENODEV; 2171 2172 return domain->ops->sva_unbind_gpasid(dev, pasid); 2173 } 2174 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); 2175 2176 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, 2177 void __user *udata) 2178 { 2179 struct iommu_gpasid_bind_data data = { 0 }; 2180 int ret; 2181 2182 if (unlikely(!domain->ops->sva_bind_gpasid)) 2183 return -ENODEV; 2184 2185 ret = iommu_sva_prepare_bind_data(udata, &data); 2186 if (ret) 2187 return ret; 2188 2189 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); 2190 } 2191 EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid); 2192 2193 static void __iommu_detach_device(struct iommu_domain *domain, 2194 struct device *dev) 2195 { 2196 if (iommu_is_attach_deferred(domain, dev)) 2197 return; 2198 2199 if (unlikely(domain->ops->detach_dev == NULL)) 2200 return; 2201 2202 domain->ops->detach_dev(domain, dev); 2203 trace_detach_device_from_domain(dev); 2204 } 2205 2206 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2207 { 2208 struct iommu_group *group; 2209 2210 group = iommu_group_get(dev); 2211 if (!group) 2212 return; 2213 2214 mutex_lock(&group->mutex); 2215 if (iommu_group_device_count(group) != 1) { 2216 WARN_ON(1); 2217 goto out_unlock; 2218 } 2219 2220 __iommu_detach_group(domain, group); 2221 2222 out_unlock: 2223 mutex_unlock(&group->mutex); 2224 iommu_group_put(group); 2225 } 2226 EXPORT_SYMBOL_GPL(iommu_detach_device); 2227 2228 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2229 { 2230 struct iommu_domain *domain; 2231 struct iommu_group *group; 2232 2233 group = iommu_group_get(dev); 2234 if (!group) 2235 return NULL; 2236 2237 domain = group->domain; 2238 2239 iommu_group_put(group); 2240 2241 return domain; 2242 } 2243 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2244 2245 /* 2246 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2247 * guarantees that the group and its default domain are valid and correct. 2248 */ 2249 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2250 { 2251 return dev->iommu_group->default_domain; 2252 } 2253 2254 /* 2255 * IOMMU groups are really the natural working unit of the IOMMU, but 2256 * the IOMMU API works on domains and devices. Bridge that gap by 2257 * iterating over the devices in a group. Ideally we'd have a single 2258 * device which represents the requestor ID of the group, but we also 2259 * allow IOMMU drivers to create policy defined minimum sets, where 2260 * the physical hardware may be able to distiguish members, but we 2261 * wish to group them at a higher level (ex. untrusted multi-function 2262 * PCI devices). Thus we attach each device. 2263 */ 2264 static int iommu_group_do_attach_device(struct device *dev, void *data) 2265 { 2266 struct iommu_domain *domain = data; 2267 2268 return __iommu_attach_device(domain, dev); 2269 } 2270 2271 static int __iommu_attach_group(struct iommu_domain *domain, 2272 struct iommu_group *group) 2273 { 2274 int ret; 2275 2276 if (group->default_domain && group->domain != group->default_domain) 2277 return -EBUSY; 2278 2279 ret = __iommu_group_for_each_dev(group, domain, 2280 iommu_group_do_attach_device); 2281 if (ret == 0) 2282 group->domain = domain; 2283 2284 return ret; 2285 } 2286 2287 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2288 { 2289 int ret; 2290 2291 mutex_lock(&group->mutex); 2292 ret = __iommu_attach_group(domain, group); 2293 mutex_unlock(&group->mutex); 2294 2295 return ret; 2296 } 2297 EXPORT_SYMBOL_GPL(iommu_attach_group); 2298 2299 static int iommu_group_do_detach_device(struct device *dev, void *data) 2300 { 2301 struct iommu_domain *domain = data; 2302 2303 __iommu_detach_device(domain, dev); 2304 2305 return 0; 2306 } 2307 2308 static void __iommu_detach_group(struct iommu_domain *domain, 2309 struct iommu_group *group) 2310 { 2311 int ret; 2312 2313 if (!group->default_domain) { 2314 __iommu_group_for_each_dev(group, domain, 2315 iommu_group_do_detach_device); 2316 group->domain = NULL; 2317 return; 2318 } 2319 2320 if (group->domain == group->default_domain) 2321 return; 2322 2323 /* Detach by re-attaching to the default domain */ 2324 ret = __iommu_group_for_each_dev(group, group->default_domain, 2325 iommu_group_do_attach_device); 2326 if (ret != 0) 2327 WARN_ON(1); 2328 else 2329 group->domain = group->default_domain; 2330 } 2331 2332 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2333 { 2334 mutex_lock(&group->mutex); 2335 __iommu_detach_group(domain, group); 2336 mutex_unlock(&group->mutex); 2337 } 2338 EXPORT_SYMBOL_GPL(iommu_detach_group); 2339 2340 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2341 { 2342 if (unlikely(domain->ops->iova_to_phys == NULL)) 2343 return 0; 2344 2345 return domain->ops->iova_to_phys(domain, iova); 2346 } 2347 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2348 2349 static size_t iommu_pgsize(struct iommu_domain *domain, 2350 unsigned long addr_merge, size_t size) 2351 { 2352 unsigned int pgsize_idx; 2353 size_t pgsize; 2354 2355 /* Max page size that still fits into 'size' */ 2356 pgsize_idx = __fls(size); 2357 2358 /* need to consider alignment requirements ? */ 2359 if (likely(addr_merge)) { 2360 /* Max page size allowed by address */ 2361 unsigned int align_pgsize_idx = __ffs(addr_merge); 2362 pgsize_idx = min(pgsize_idx, align_pgsize_idx); 2363 } 2364 2365 /* build a mask of acceptable page sizes */ 2366 pgsize = (1UL << (pgsize_idx + 1)) - 1; 2367 2368 /* throw away page sizes not supported by the hardware */ 2369 pgsize &= domain->pgsize_bitmap; 2370 2371 /* make sure we're still sane */ 2372 BUG_ON(!pgsize); 2373 2374 /* pick the biggest page */ 2375 pgsize_idx = __fls(pgsize); 2376 pgsize = 1UL << pgsize_idx; 2377 2378 return pgsize; 2379 } 2380 2381 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2382 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2383 { 2384 const struct iommu_ops *ops = domain->ops; 2385 unsigned long orig_iova = iova; 2386 unsigned int min_pagesz; 2387 size_t orig_size = size; 2388 phys_addr_t orig_paddr = paddr; 2389 int ret = 0; 2390 2391 if (unlikely(ops->map == NULL || 2392 domain->pgsize_bitmap == 0UL)) 2393 return -ENODEV; 2394 2395 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2396 return -EINVAL; 2397 2398 /* find out the minimum page size supported */ 2399 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2400 2401 /* 2402 * both the virtual address and the physical one, as well as 2403 * the size of the mapping, must be aligned (at least) to the 2404 * size of the smallest page supported by the hardware 2405 */ 2406 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2407 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2408 iova, &paddr, size, min_pagesz); 2409 return -EINVAL; 2410 } 2411 2412 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2413 2414 while (size) { 2415 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); 2416 2417 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", 2418 iova, &paddr, pgsize); 2419 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2420 2421 if (ret) 2422 break; 2423 2424 iova += pgsize; 2425 paddr += pgsize; 2426 size -= pgsize; 2427 } 2428 2429 if (ops->iotlb_sync_map) 2430 ops->iotlb_sync_map(domain); 2431 2432 /* unroll mapping in case something went wrong */ 2433 if (ret) 2434 iommu_unmap(domain, orig_iova, orig_size - size); 2435 else 2436 trace_map(orig_iova, orig_paddr, orig_size); 2437 2438 return ret; 2439 } 2440 2441 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2442 phys_addr_t paddr, size_t size, int prot) 2443 { 2444 might_sleep(); 2445 return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); 2446 } 2447 EXPORT_SYMBOL_GPL(iommu_map); 2448 2449 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 2450 phys_addr_t paddr, size_t size, int prot) 2451 { 2452 return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); 2453 } 2454 EXPORT_SYMBOL_GPL(iommu_map_atomic); 2455 2456 static size_t __iommu_unmap(struct iommu_domain *domain, 2457 unsigned long iova, size_t size, 2458 struct iommu_iotlb_gather *iotlb_gather) 2459 { 2460 const struct iommu_ops *ops = domain->ops; 2461 size_t unmapped_page, unmapped = 0; 2462 unsigned long orig_iova = iova; 2463 unsigned int min_pagesz; 2464 2465 if (unlikely(ops->unmap == NULL || 2466 domain->pgsize_bitmap == 0UL)) 2467 return 0; 2468 2469 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2470 return 0; 2471 2472 /* find out the minimum page size supported */ 2473 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2474 2475 /* 2476 * The virtual address, as well as the size of the mapping, must be 2477 * aligned (at least) to the size of the smallest page supported 2478 * by the hardware 2479 */ 2480 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2481 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2482 iova, size, min_pagesz); 2483 return 0; 2484 } 2485 2486 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2487 2488 /* 2489 * Keep iterating until we either unmap 'size' bytes (or more) 2490 * or we hit an area that isn't mapped. 2491 */ 2492 while (unmapped < size) { 2493 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); 2494 2495 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); 2496 if (!unmapped_page) 2497 break; 2498 2499 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2500 iova, unmapped_page); 2501 2502 iova += unmapped_page; 2503 unmapped += unmapped_page; 2504 } 2505 2506 trace_unmap(orig_iova, size, unmapped); 2507 return unmapped; 2508 } 2509 2510 size_t iommu_unmap(struct iommu_domain *domain, 2511 unsigned long iova, size_t size) 2512 { 2513 struct iommu_iotlb_gather iotlb_gather; 2514 size_t ret; 2515 2516 iommu_iotlb_gather_init(&iotlb_gather); 2517 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2518 iommu_iotlb_sync(domain, &iotlb_gather); 2519 2520 return ret; 2521 } 2522 EXPORT_SYMBOL_GPL(iommu_unmap); 2523 2524 size_t iommu_unmap_fast(struct iommu_domain *domain, 2525 unsigned long iova, size_t size, 2526 struct iommu_iotlb_gather *iotlb_gather) 2527 { 2528 return __iommu_unmap(domain, iova, size, iotlb_gather); 2529 } 2530 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2531 2532 static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2533 struct scatterlist *sg, unsigned int nents, int prot, 2534 gfp_t gfp) 2535 { 2536 size_t len = 0, mapped = 0; 2537 phys_addr_t start; 2538 unsigned int i = 0; 2539 int ret; 2540 2541 while (i <= nents) { 2542 phys_addr_t s_phys = sg_phys(sg); 2543 2544 if (len && s_phys != start + len) { 2545 ret = __iommu_map(domain, iova + mapped, start, 2546 len, prot, gfp); 2547 2548 if (ret) 2549 goto out_err; 2550 2551 mapped += len; 2552 len = 0; 2553 } 2554 2555 if (len) { 2556 len += sg->length; 2557 } else { 2558 len = sg->length; 2559 start = s_phys; 2560 } 2561 2562 if (++i < nents) 2563 sg = sg_next(sg); 2564 } 2565 2566 return mapped; 2567 2568 out_err: 2569 /* undo mappings already done */ 2570 iommu_unmap(domain, iova, mapped); 2571 2572 return 0; 2573 2574 } 2575 2576 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2577 struct scatterlist *sg, unsigned int nents, int prot) 2578 { 2579 might_sleep(); 2580 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); 2581 } 2582 EXPORT_SYMBOL_GPL(iommu_map_sg); 2583 2584 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2585 struct scatterlist *sg, unsigned int nents, int prot) 2586 { 2587 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); 2588 } 2589 EXPORT_SYMBOL_GPL(iommu_map_sg_atomic); 2590 2591 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 2592 phys_addr_t paddr, u64 size, int prot) 2593 { 2594 if (unlikely(domain->ops->domain_window_enable == NULL)) 2595 return -ENODEV; 2596 2597 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, 2598 prot); 2599 } 2600 EXPORT_SYMBOL_GPL(iommu_domain_window_enable); 2601 2602 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) 2603 { 2604 if (unlikely(domain->ops->domain_window_disable == NULL)) 2605 return; 2606 2607 return domain->ops->domain_window_disable(domain, wnd_nr); 2608 } 2609 EXPORT_SYMBOL_GPL(iommu_domain_window_disable); 2610 2611 /** 2612 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2613 * @domain: the iommu domain where the fault has happened 2614 * @dev: the device where the fault has happened 2615 * @iova: the faulting address 2616 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2617 * 2618 * This function should be called by the low-level IOMMU implementations 2619 * whenever IOMMU faults happen, to allow high-level users, that are 2620 * interested in such events, to know about them. 2621 * 2622 * This event may be useful for several possible use cases: 2623 * - mere logging of the event 2624 * - dynamic TLB/PTE loading 2625 * - if restarting of the faulting device is required 2626 * 2627 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2628 * PTE/TLB loading will one day be supported, implementations will be able 2629 * to tell whether it succeeded or not according to this return value). 2630 * 2631 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2632 * (though fault handlers can also return -ENOSYS, in case they want to 2633 * elicit the default behavior of the IOMMU drivers). 2634 */ 2635 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2636 unsigned long iova, int flags) 2637 { 2638 int ret = -ENOSYS; 2639 2640 /* 2641 * if upper layers showed interest and installed a fault handler, 2642 * invoke it. 2643 */ 2644 if (domain->handler) 2645 ret = domain->handler(domain, dev, iova, flags, 2646 domain->handler_token); 2647 2648 trace_io_page_fault(dev, iova, flags); 2649 return ret; 2650 } 2651 EXPORT_SYMBOL_GPL(report_iommu_fault); 2652 2653 static int __init iommu_init(void) 2654 { 2655 iommu_group_kset = kset_create_and_add("iommu_groups", 2656 NULL, kernel_kobj); 2657 BUG_ON(!iommu_group_kset); 2658 2659 iommu_debugfs_setup(); 2660 2661 return 0; 2662 } 2663 core_initcall(iommu_init); 2664 2665 int iommu_domain_get_attr(struct iommu_domain *domain, 2666 enum iommu_attr attr, void *data) 2667 { 2668 struct iommu_domain_geometry *geometry; 2669 bool *paging; 2670 int ret = 0; 2671 2672 switch (attr) { 2673 case DOMAIN_ATTR_GEOMETRY: 2674 geometry = data; 2675 *geometry = domain->geometry; 2676 2677 break; 2678 case DOMAIN_ATTR_PAGING: 2679 paging = data; 2680 *paging = (domain->pgsize_bitmap != 0UL); 2681 break; 2682 default: 2683 if (!domain->ops->domain_get_attr) 2684 return -EINVAL; 2685 2686 ret = domain->ops->domain_get_attr(domain, attr, data); 2687 } 2688 2689 return ret; 2690 } 2691 EXPORT_SYMBOL_GPL(iommu_domain_get_attr); 2692 2693 int iommu_domain_set_attr(struct iommu_domain *domain, 2694 enum iommu_attr attr, void *data) 2695 { 2696 int ret = 0; 2697 2698 switch (attr) { 2699 default: 2700 if (domain->ops->domain_set_attr == NULL) 2701 return -EINVAL; 2702 2703 ret = domain->ops->domain_set_attr(domain, attr, data); 2704 } 2705 2706 return ret; 2707 } 2708 EXPORT_SYMBOL_GPL(iommu_domain_set_attr); 2709 2710 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2711 { 2712 const struct iommu_ops *ops = dev->bus->iommu_ops; 2713 2714 if (ops && ops->get_resv_regions) 2715 ops->get_resv_regions(dev, list); 2716 } 2717 2718 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2719 { 2720 const struct iommu_ops *ops = dev->bus->iommu_ops; 2721 2722 if (ops && ops->put_resv_regions) 2723 ops->put_resv_regions(dev, list); 2724 } 2725 2726 /** 2727 * generic_iommu_put_resv_regions - Reserved region driver helper 2728 * @dev: device for which to free reserved regions 2729 * @list: reserved region list for device 2730 * 2731 * IOMMU drivers can use this to implement their .put_resv_regions() callback 2732 * for simple reservations. Memory allocated for each reserved region will be 2733 * freed. If an IOMMU driver allocates additional resources per region, it is 2734 * going to have to implement a custom callback. 2735 */ 2736 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list) 2737 { 2738 struct iommu_resv_region *entry, *next; 2739 2740 list_for_each_entry_safe(entry, next, list, list) 2741 kfree(entry); 2742 } 2743 EXPORT_SYMBOL(generic_iommu_put_resv_regions); 2744 2745 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2746 size_t length, int prot, 2747 enum iommu_resv_type type) 2748 { 2749 struct iommu_resv_region *region; 2750 2751 region = kzalloc(sizeof(*region), GFP_KERNEL); 2752 if (!region) 2753 return NULL; 2754 2755 INIT_LIST_HEAD(®ion->list); 2756 region->start = start; 2757 region->length = length; 2758 region->prot = prot; 2759 region->type = type; 2760 return region; 2761 } 2762 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2763 2764 void iommu_set_default_passthrough(bool cmd_line) 2765 { 2766 if (cmd_line) 2767 iommu_set_cmd_line_dma_api(); 2768 2769 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2770 } 2771 2772 void iommu_set_default_translated(bool cmd_line) 2773 { 2774 if (cmd_line) 2775 iommu_set_cmd_line_dma_api(); 2776 2777 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2778 } 2779 2780 bool iommu_default_passthrough(void) 2781 { 2782 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2783 } 2784 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2785 2786 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2787 { 2788 const struct iommu_ops *ops = NULL; 2789 struct iommu_device *iommu; 2790 2791 spin_lock(&iommu_device_lock); 2792 list_for_each_entry(iommu, &iommu_device_list, list) 2793 if (iommu->fwnode == fwnode) { 2794 ops = iommu->ops; 2795 break; 2796 } 2797 spin_unlock(&iommu_device_lock); 2798 return ops; 2799 } 2800 2801 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2802 const struct iommu_ops *ops) 2803 { 2804 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2805 2806 if (fwspec) 2807 return ops == fwspec->ops ? 0 : -EINVAL; 2808 2809 if (!dev_iommu_get(dev)) 2810 return -ENOMEM; 2811 2812 /* Preallocate for the overwhelmingly common case of 1 ID */ 2813 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2814 if (!fwspec) 2815 return -ENOMEM; 2816 2817 of_node_get(to_of_node(iommu_fwnode)); 2818 fwspec->iommu_fwnode = iommu_fwnode; 2819 fwspec->ops = ops; 2820 dev_iommu_fwspec_set(dev, fwspec); 2821 return 0; 2822 } 2823 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2824 2825 void iommu_fwspec_free(struct device *dev) 2826 { 2827 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2828 2829 if (fwspec) { 2830 fwnode_handle_put(fwspec->iommu_fwnode); 2831 kfree(fwspec); 2832 dev_iommu_fwspec_set(dev, NULL); 2833 } 2834 } 2835 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2836 2837 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2838 { 2839 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2840 int i, new_num; 2841 2842 if (!fwspec) 2843 return -EINVAL; 2844 2845 new_num = fwspec->num_ids + num_ids; 2846 if (new_num > 1) { 2847 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2848 GFP_KERNEL); 2849 if (!fwspec) 2850 return -ENOMEM; 2851 2852 dev_iommu_fwspec_set(dev, fwspec); 2853 } 2854 2855 for (i = 0; i < num_ids; i++) 2856 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2857 2858 fwspec->num_ids = new_num; 2859 return 0; 2860 } 2861 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2862 2863 /* 2864 * Per device IOMMU features. 2865 */ 2866 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) 2867 { 2868 const struct iommu_ops *ops = dev->bus->iommu_ops; 2869 2870 if (ops && ops->dev_has_feat) 2871 return ops->dev_has_feat(dev, feat); 2872 2873 return false; 2874 } 2875 EXPORT_SYMBOL_GPL(iommu_dev_has_feature); 2876 2877 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2878 { 2879 const struct iommu_ops *ops = dev->bus->iommu_ops; 2880 2881 if (ops && ops->dev_enable_feat) 2882 return ops->dev_enable_feat(dev, feat); 2883 2884 return -ENODEV; 2885 } 2886 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2887 2888 /* 2889 * The device drivers should do the necessary cleanups before calling this. 2890 * For example, before disabling the aux-domain feature, the device driver 2891 * should detach all aux-domains. Otherwise, this will return -EBUSY. 2892 */ 2893 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2894 { 2895 const struct iommu_ops *ops = dev->bus->iommu_ops; 2896 2897 if (ops && ops->dev_disable_feat) 2898 return ops->dev_disable_feat(dev, feat); 2899 2900 return -EBUSY; 2901 } 2902 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2903 2904 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 2905 { 2906 const struct iommu_ops *ops = dev->bus->iommu_ops; 2907 2908 if (ops && ops->dev_feat_enabled) 2909 return ops->dev_feat_enabled(dev, feat); 2910 2911 return false; 2912 } 2913 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); 2914 2915 /* 2916 * Aux-domain specific attach/detach. 2917 * 2918 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns 2919 * true. Also, as long as domains are attached to a device through this 2920 * interface, any tries to call iommu_attach_device() should fail 2921 * (iommu_detach_device() can't fail, so we fail when trying to re-attach). 2922 * This should make us safe against a device being attached to a guest as a 2923 * whole while there are still pasid users on it (aux and sva). 2924 */ 2925 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 2926 { 2927 int ret = -ENODEV; 2928 2929 if (domain->ops->aux_attach_dev) 2930 ret = domain->ops->aux_attach_dev(domain, dev); 2931 2932 if (!ret) 2933 trace_attach_device_to_domain(dev); 2934 2935 return ret; 2936 } 2937 EXPORT_SYMBOL_GPL(iommu_aux_attach_device); 2938 2939 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 2940 { 2941 if (domain->ops->aux_detach_dev) { 2942 domain->ops->aux_detach_dev(domain, dev); 2943 trace_detach_device_from_domain(dev); 2944 } 2945 } 2946 EXPORT_SYMBOL_GPL(iommu_aux_detach_device); 2947 2948 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 2949 { 2950 int ret = -ENODEV; 2951 2952 if (domain->ops->aux_get_pasid) 2953 ret = domain->ops->aux_get_pasid(domain, dev); 2954 2955 return ret; 2956 } 2957 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); 2958 2959 /** 2960 * iommu_sva_bind_device() - Bind a process address space to a device 2961 * @dev: the device 2962 * @mm: the mm to bind, caller must hold a reference to it 2963 * 2964 * Create a bond between device and address space, allowing the device to access 2965 * the mm using the returned PASID. If a bond already exists between @device and 2966 * @mm, it is returned and an additional reference is taken. Caller must call 2967 * iommu_sva_unbind_device() to release each reference. 2968 * 2969 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to 2970 * initialize the required SVA features. 2971 * 2972 * On error, returns an ERR_PTR value. 2973 */ 2974 struct iommu_sva * 2975 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 2976 { 2977 struct iommu_group *group; 2978 struct iommu_sva *handle = ERR_PTR(-EINVAL); 2979 const struct iommu_ops *ops = dev->bus->iommu_ops; 2980 2981 if (!ops || !ops->sva_bind) 2982 return ERR_PTR(-ENODEV); 2983 2984 group = iommu_group_get(dev); 2985 if (!group) 2986 return ERR_PTR(-ENODEV); 2987 2988 /* Ensure device count and domain don't change while we're binding */ 2989 mutex_lock(&group->mutex); 2990 2991 /* 2992 * To keep things simple, SVA currently doesn't support IOMMU groups 2993 * with more than one device. Existing SVA-capable systems are not 2994 * affected by the problems that required IOMMU groups (lack of ACS 2995 * isolation, device ID aliasing and other hardware issues). 2996 */ 2997 if (iommu_group_device_count(group) != 1) 2998 goto out_unlock; 2999 3000 handle = ops->sva_bind(dev, mm, drvdata); 3001 3002 out_unlock: 3003 mutex_unlock(&group->mutex); 3004 iommu_group_put(group); 3005 3006 return handle; 3007 } 3008 EXPORT_SYMBOL_GPL(iommu_sva_bind_device); 3009 3010 /** 3011 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device 3012 * @handle: the handle returned by iommu_sva_bind_device() 3013 * 3014 * Put reference to a bond between device and address space. The device should 3015 * not be issuing any more transaction for this PASID. All outstanding page 3016 * requests for this PASID must have been flushed to the IOMMU. 3017 */ 3018 void iommu_sva_unbind_device(struct iommu_sva *handle) 3019 { 3020 struct iommu_group *group; 3021 struct device *dev = handle->dev; 3022 const struct iommu_ops *ops = dev->bus->iommu_ops; 3023 3024 if (!ops || !ops->sva_unbind) 3025 return; 3026 3027 group = iommu_group_get(dev); 3028 if (!group) 3029 return; 3030 3031 mutex_lock(&group->mutex); 3032 ops->sva_unbind(handle); 3033 mutex_unlock(&group->mutex); 3034 3035 iommu_group_put(group); 3036 } 3037 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); 3038 3039 u32 iommu_sva_get_pasid(struct iommu_sva *handle) 3040 { 3041 const struct iommu_ops *ops = handle->dev->bus->iommu_ops; 3042 3043 if (!ops || !ops->sva_get_pasid) 3044 return IOMMU_PASID_INVALID; 3045 3046 return ops->sva_get_pasid(handle); 3047 } 3048 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); 3049 3050 /* 3051 * Changes the default domain of an iommu group that has *only* one device 3052 * 3053 * @group: The group for which the default domain should be changed 3054 * @prev_dev: The device in the group (this is used to make sure that the device 3055 * hasn't changed after the caller has called this function) 3056 * @type: The type of the new default domain that gets associated with the group 3057 * 3058 * Returns 0 on success and error code on failure 3059 * 3060 * Note: 3061 * 1. Presently, this function is called only when user requests to change the 3062 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type 3063 * Please take a closer look if intended to use for other purposes. 3064 */ 3065 static int iommu_change_dev_def_domain(struct iommu_group *group, 3066 struct device *prev_dev, int type) 3067 { 3068 struct iommu_domain *prev_dom; 3069 struct group_device *grp_dev; 3070 int ret, dev_def_dom; 3071 struct device *dev; 3072 3073 if (!group) 3074 return -EINVAL; 3075 3076 mutex_lock(&group->mutex); 3077 3078 if (group->default_domain != group->domain) { 3079 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); 3080 ret = -EBUSY; 3081 goto out; 3082 } 3083 3084 /* 3085 * iommu group wasn't locked while acquiring device lock in 3086 * iommu_group_store_type(). So, make sure that the device count hasn't 3087 * changed while acquiring device lock. 3088 * 3089 * Changing default domain of an iommu group with two or more devices 3090 * isn't supported because there could be a potential deadlock. Consider 3091 * the following scenario. T1 is trying to acquire device locks of all 3092 * the devices in the group and before it could acquire all of them, 3093 * there could be another thread T2 (from different sub-system and use 3094 * case) that has already acquired some of the device locks and might be 3095 * waiting for T1 to release other device locks. 3096 */ 3097 if (iommu_group_device_count(group) != 1) { 3098 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); 3099 ret = -EINVAL; 3100 goto out; 3101 } 3102 3103 /* Since group has only one device */ 3104 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3105 dev = grp_dev->dev; 3106 3107 if (prev_dev != dev) { 3108 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); 3109 ret = -EBUSY; 3110 goto out; 3111 } 3112 3113 prev_dom = group->default_domain; 3114 if (!prev_dom) { 3115 ret = -EINVAL; 3116 goto out; 3117 } 3118 3119 dev_def_dom = iommu_get_def_domain_type(dev); 3120 if (!type) { 3121 /* 3122 * If the user hasn't requested any specific type of domain and 3123 * if the device supports both the domains, then default to the 3124 * domain the device was booted with 3125 */ 3126 type = dev_def_dom ? : iommu_def_domain_type; 3127 } else if (dev_def_dom && type != dev_def_dom) { 3128 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", 3129 iommu_domain_type_str(type)); 3130 ret = -EINVAL; 3131 goto out; 3132 } 3133 3134 /* 3135 * Switch to a new domain only if the requested domain type is different 3136 * from the existing default domain type 3137 */ 3138 if (prev_dom->type == type) { 3139 ret = 0; 3140 goto out; 3141 } 3142 3143 /* Sets group->default_domain to the newly allocated domain */ 3144 ret = iommu_group_alloc_default_domain(dev->bus, group, type); 3145 if (ret) 3146 goto out; 3147 3148 ret = iommu_create_device_direct_mappings(group, dev); 3149 if (ret) 3150 goto free_new_domain; 3151 3152 ret = __iommu_attach_device(group->default_domain, dev); 3153 if (ret) 3154 goto free_new_domain; 3155 3156 group->domain = group->default_domain; 3157 3158 /* 3159 * Release the mutex here because ops->probe_finalize() call-back of 3160 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 3161 * in-turn might call back into IOMMU core code, where it tries to take 3162 * group->mutex, resulting in a deadlock. 3163 */ 3164 mutex_unlock(&group->mutex); 3165 3166 /* Make sure dma_ops is appropriatley set */ 3167 iommu_group_do_probe_finalize(dev, group->default_domain); 3168 iommu_domain_free(prev_dom); 3169 return 0; 3170 3171 free_new_domain: 3172 iommu_domain_free(group->default_domain); 3173 group->default_domain = prev_dom; 3174 group->domain = prev_dom; 3175 3176 out: 3177 mutex_unlock(&group->mutex); 3178 3179 return ret; 3180 } 3181 3182 /* 3183 * Changing the default domain through sysfs requires the users to ubind the 3184 * drivers from the devices in the iommu group. Return failure if this doesn't 3185 * meet. 3186 * 3187 * We need to consider the race between this and the device release path. 3188 * device_lock(dev) is used here to guarantee that the device release path 3189 * will not be entered at the same time. 3190 */ 3191 static ssize_t iommu_group_store_type(struct iommu_group *group, 3192 const char *buf, size_t count) 3193 { 3194 struct group_device *grp_dev; 3195 struct device *dev; 3196 int ret, req_type; 3197 3198 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3199 return -EACCES; 3200 3201 if (WARN_ON(!group)) 3202 return -EINVAL; 3203 3204 if (sysfs_streq(buf, "identity")) 3205 req_type = IOMMU_DOMAIN_IDENTITY; 3206 else if (sysfs_streq(buf, "DMA")) 3207 req_type = IOMMU_DOMAIN_DMA; 3208 else if (sysfs_streq(buf, "auto")) 3209 req_type = 0; 3210 else 3211 return -EINVAL; 3212 3213 /* 3214 * Lock/Unlock the group mutex here before device lock to 3215 * 1. Make sure that the iommu group has only one device (this is a 3216 * prerequisite for step 2) 3217 * 2. Get struct *dev which is needed to lock device 3218 */ 3219 mutex_lock(&group->mutex); 3220 if (iommu_group_device_count(group) != 1) { 3221 mutex_unlock(&group->mutex); 3222 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); 3223 return -EINVAL; 3224 } 3225 3226 /* Since group has only one device */ 3227 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3228 dev = grp_dev->dev; 3229 get_device(dev); 3230 3231 /* 3232 * Don't hold the group mutex because taking group mutex first and then 3233 * the device lock could potentially cause a deadlock as below. Assume 3234 * two threads T1 and T2. T1 is trying to change default domain of an 3235 * iommu group and T2 is trying to hot unplug a device or release [1] VF 3236 * of a PCIe device which is in the same iommu group. T1 takes group 3237 * mutex and before it could take device lock assume T2 has taken device 3238 * lock and is yet to take group mutex. Now, both the threads will be 3239 * waiting for the other thread to release lock. Below, lock order was 3240 * suggested. 3241 * device_lock(dev); 3242 * mutex_lock(&group->mutex); 3243 * iommu_change_dev_def_domain(); 3244 * mutex_unlock(&group->mutex); 3245 * device_unlock(dev); 3246 * 3247 * [1] Typical device release path 3248 * device_lock() from device/driver core code 3249 * -> bus_notifier() 3250 * -> iommu_bus_notifier() 3251 * -> iommu_release_device() 3252 * -> ops->release_device() vendor driver calls back iommu core code 3253 * -> mutex_lock() from iommu core code 3254 */ 3255 mutex_unlock(&group->mutex); 3256 3257 /* Check if the device in the group still has a driver bound to it */ 3258 device_lock(dev); 3259 if (device_is_bound(dev)) { 3260 pr_err_ratelimited("Device is still bound to driver\n"); 3261 ret = -EBUSY; 3262 goto out; 3263 } 3264 3265 ret = iommu_change_dev_def_domain(group, dev, req_type); 3266 ret = ret ?: count; 3267 3268 out: 3269 device_unlock(dev); 3270 put_device(dev); 3271 3272 return ret; 3273 } 3274