1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt bus support 4 * 5 * Copyright (C) 2017, Intel Corporation 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/idr.h> 11 #include <linux/module.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/slab.h> 14 #include <linux/random.h> 15 #include <crypto/sha2.h> 16 #include <crypto/utils.h> 17 18 #include "tb.h" 19 20 static DEFINE_IDA(tb_domain_ida); 21 22 static bool match_service_id(const struct tb_service_id *id, 23 const struct tb_service *svc) 24 { 25 if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) { 26 if (strcmp(id->protocol_key, svc->key)) 27 return false; 28 } 29 30 if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) { 31 if (id->protocol_id != svc->prtcid) 32 return false; 33 } 34 35 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { 36 if (id->protocol_version != svc->prtcvers) 37 return false; 38 } 39 40 if (id->match_flags & TBSVC_MATCH_PROTOCOL_REVISION) { 41 if (id->protocol_revision != svc->prtcrevs) 42 return false; 43 } 44 45 return true; 46 } 47 48 static const struct tb_service_id *__tb_service_match(struct device *dev, 49 const struct device_driver *drv) 50 { 51 const struct tb_service_driver *driver; 52 const struct tb_service_id *ids; 53 struct tb_service *svc; 54 55 svc = tb_to_service(dev); 56 if (!svc) 57 return NULL; 58 59 driver = container_of_const(drv, struct tb_service_driver, driver); 60 if (!driver->id_table) 61 return NULL; 62 63 for (ids = driver->id_table; ids->match_flags != 0; ids++) { 64 if (match_service_id(ids, svc)) 65 return ids; 66 } 67 68 return NULL; 69 } 70 71 static int tb_service_match(struct device *dev, const struct device_driver *drv) 72 { 73 return !!__tb_service_match(dev, drv); 74 } 75 76 static int tb_service_probe(struct device *dev) 77 { 78 struct tb_service *svc = tb_to_service(dev); 79 struct tb_service_driver *driver; 80 const struct tb_service_id *id; 81 82 driver = container_of(dev->driver, struct tb_service_driver, driver); 83 id = __tb_service_match(dev, &driver->driver); 84 85 return driver->probe(svc, id); 86 } 87 88 static void tb_service_remove(struct device *dev) 89 { 90 struct tb_service *svc = tb_to_service(dev); 91 struct tb_service_driver *driver; 92 93 driver = container_of(dev->driver, struct tb_service_driver, driver); 94 if (driver->remove) 95 driver->remove(svc); 96 } 97 98 static void tb_service_shutdown(struct device *dev) 99 { 100 struct tb_service_driver *driver; 101 struct tb_service *svc; 102 103 svc = tb_to_service(dev); 104 if (!svc || !dev->driver) 105 return; 106 107 driver = container_of(dev->driver, struct tb_service_driver, driver); 108 if (driver->shutdown) 109 driver->shutdown(svc); 110 } 111 112 static const char * const tb_security_names[] = { 113 [TB_SECURITY_NONE] = "none", 114 [TB_SECURITY_USER] = "user", 115 [TB_SECURITY_SECURE] = "secure", 116 [TB_SECURITY_DPONLY] = "dponly", 117 [TB_SECURITY_USBONLY] = "usbonly", 118 [TB_SECURITY_NOPCIE] = "nopcie", 119 }; 120 121 static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr, 122 char *buf) 123 { 124 struct tb *tb = container_of(dev, struct tb, dev); 125 uuid_t *uuids; 126 ssize_t ret; 127 int i; 128 129 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); 130 if (!uuids) 131 return -ENOMEM; 132 133 pm_runtime_get_sync(&tb->dev); 134 135 if (mutex_lock_interruptible(&tb->lock)) { 136 ret = -ERESTARTSYS; 137 goto out; 138 } 139 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl); 140 if (ret) { 141 mutex_unlock(&tb->lock); 142 goto out; 143 } 144 mutex_unlock(&tb->lock); 145 146 for (ret = 0, i = 0; i < tb->nboot_acl; i++) { 147 if (!uuid_is_null(&uuids[i])) 148 ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]); 149 150 ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n"); 151 } 152 153 out: 154 pm_runtime_mark_last_busy(&tb->dev); 155 pm_runtime_put_autosuspend(&tb->dev); 156 kfree(uuids); 157 158 return ret; 159 } 160 161 static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr, 162 const char *buf, size_t count) 163 { 164 struct tb *tb = container_of(dev, struct tb, dev); 165 char *str, *s, *uuid_str; 166 ssize_t ret = 0; 167 uuid_t *acl; 168 int i = 0; 169 170 /* 171 * Make sure the value is not bigger than tb->nboot_acl * UUID 172 * length + commas and optional "\n". Also the smallest allowable 173 * string is tb->nboot_acl * ",". 174 */ 175 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1) 176 return -EINVAL; 177 if (count < tb->nboot_acl - 1) 178 return -EINVAL; 179 180 str = kstrdup(buf, GFP_KERNEL); 181 if (!str) 182 return -ENOMEM; 183 184 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); 185 if (!acl) { 186 ret = -ENOMEM; 187 goto err_free_str; 188 } 189 190 uuid_str = strim(str); 191 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) { 192 size_t len = strlen(s); 193 194 if (len) { 195 if (len != UUID_STRING_LEN) { 196 ret = -EINVAL; 197 goto err_free_acl; 198 } 199 ret = uuid_parse(s, &acl[i]); 200 if (ret) 201 goto err_free_acl; 202 } 203 204 i++; 205 } 206 207 if (s || i < tb->nboot_acl) { 208 ret = -EINVAL; 209 goto err_free_acl; 210 } 211 212 pm_runtime_get_sync(&tb->dev); 213 214 if (mutex_lock_interruptible(&tb->lock)) { 215 ret = -ERESTARTSYS; 216 goto err_rpm_put; 217 } 218 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); 219 if (!ret) { 220 /* Notify userspace about the change */ 221 tb_domain_event(tb, NULL); 222 } 223 mutex_unlock(&tb->lock); 224 225 err_rpm_put: 226 pm_runtime_mark_last_busy(&tb->dev); 227 pm_runtime_put_autosuspend(&tb->dev); 228 err_free_acl: 229 kfree(acl); 230 err_free_str: 231 kfree(str); 232 233 return ret ?: count; 234 } 235 static DEVICE_ATTR_RW(boot_acl); 236 237 static ssize_t deauthorization_show(struct device *dev, 238 struct device_attribute *attr, 239 char *buf) 240 { 241 const struct tb *tb = container_of(dev, struct tb, dev); 242 bool deauthorization = false; 243 244 /* Only meaningful if authorization is supported */ 245 if (tb->security_level == TB_SECURITY_USER || 246 tb->security_level == TB_SECURITY_SECURE) 247 deauthorization = !!tb->cm_ops->disapprove_switch; 248 249 return sysfs_emit(buf, "%d\n", deauthorization); 250 } 251 static DEVICE_ATTR_RO(deauthorization); 252 253 static ssize_t iommu_dma_protection_show(struct device *dev, 254 struct device_attribute *attr, 255 char *buf) 256 { 257 struct tb *tb = container_of(dev, struct tb, dev); 258 259 return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection); 260 } 261 static DEVICE_ATTR_RO(iommu_dma_protection); 262 263 static ssize_t security_show(struct device *dev, struct device_attribute *attr, 264 char *buf) 265 { 266 struct tb *tb = container_of(dev, struct tb, dev); 267 const char *name = "unknown"; 268 269 if (tb->security_level < ARRAY_SIZE(tb_security_names)) 270 name = tb_security_names[tb->security_level]; 271 272 return sysfs_emit(buf, "%s\n", name); 273 } 274 static DEVICE_ATTR_RO(security); 275 276 static struct attribute *domain_attrs[] = { 277 &dev_attr_boot_acl.attr, 278 &dev_attr_deauthorization.attr, 279 &dev_attr_iommu_dma_protection.attr, 280 &dev_attr_security.attr, 281 NULL, 282 }; 283 284 static umode_t domain_attr_is_visible(struct kobject *kobj, 285 struct attribute *attr, int n) 286 { 287 struct device *dev = kobj_to_dev(kobj); 288 struct tb *tb = container_of(dev, struct tb, dev); 289 290 if (attr == &dev_attr_boot_acl.attr) { 291 if (tb->nboot_acl && 292 tb->cm_ops->get_boot_acl && 293 tb->cm_ops->set_boot_acl) 294 return attr->mode; 295 return 0; 296 } 297 298 return attr->mode; 299 } 300 301 static const struct attribute_group domain_attr_group = { 302 .is_visible = domain_attr_is_visible, 303 .attrs = domain_attrs, 304 }; 305 306 static const struct attribute_group *domain_attr_groups[] = { 307 &domain_attr_group, 308 NULL, 309 }; 310 311 const struct bus_type tb_bus_type = { 312 .name = "thunderbolt", 313 .match = tb_service_match, 314 .probe = tb_service_probe, 315 .remove = tb_service_remove, 316 .shutdown = tb_service_shutdown, 317 }; 318 319 static void tb_domain_release(struct device *dev) 320 { 321 struct tb *tb = container_of(dev, struct tb, dev); 322 323 tb_ctl_free(tb->ctl); 324 destroy_workqueue(tb->wq); 325 ida_free(&tb_domain_ida, tb->index); 326 mutex_destroy(&tb->lock); 327 kfree(tb); 328 } 329 330 const struct device_type tb_domain_type = { 331 .name = "thunderbolt_domain", 332 .release = tb_domain_release, 333 }; 334 335 static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type, 336 const void *buf, size_t size) 337 { 338 struct tb *tb = data; 339 340 if (!tb->cm_ops->handle_event) { 341 tb_warn(tb, "domain does not have event handler\n"); 342 return true; 343 } 344 345 switch (type) { 346 case TB_CFG_PKG_XDOMAIN_REQ: 347 case TB_CFG_PKG_XDOMAIN_RESP: 348 if (tb_is_xdomain_enabled()) 349 return tb_xdomain_handle_request(tb, type, buf, size); 350 break; 351 352 default: 353 tb->cm_ops->handle_event(tb, type, buf, size); 354 } 355 356 return true; 357 } 358 359 /** 360 * tb_domain_alloc() - Allocate a domain 361 * @nhi: Pointer to the host controller 362 * @timeout_msec: Control channel timeout for non-raw messages 363 * @privsize: Size of the connection manager private data 364 * 365 * Allocates and initializes a new Thunderbolt domain. Connection 366 * managers are expected to call this and then fill in @cm_ops 367 * accordingly. 368 * 369 * Call tb_domain_put() to release the domain before it has been added 370 * to the system. 371 * 372 * Return: Pointer to &struct tb or %NULL in case of error. 373 */ 374 struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize) 375 { 376 struct tb *tb; 377 378 /* 379 * Make sure the structure sizes map with that the hardware 380 * expects because bit-fields are being used. 381 */ 382 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4); 383 BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4); 384 BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4); 385 386 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL); 387 if (!tb) 388 return NULL; 389 390 tb->nhi = nhi; 391 mutex_init(&tb->lock); 392 393 tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL); 394 if (tb->index < 0) 395 goto err_free; 396 397 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index); 398 if (!tb->wq) 399 goto err_remove_ida; 400 401 tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb); 402 if (!tb->ctl) 403 goto err_destroy_wq; 404 405 tb->dev.parent = &nhi->pdev->dev; 406 tb->dev.bus = &tb_bus_type; 407 tb->dev.type = &tb_domain_type; 408 tb->dev.groups = domain_attr_groups; 409 dev_set_name(&tb->dev, "domain%d", tb->index); 410 device_initialize(&tb->dev); 411 412 return tb; 413 414 err_destroy_wq: 415 destroy_workqueue(tb->wq); 416 err_remove_ida: 417 ida_free(&tb_domain_ida, tb->index); 418 err_free: 419 kfree(tb); 420 421 return NULL; 422 } 423 424 /** 425 * tb_domain_add() - Add domain to the system 426 * @tb: Domain to add 427 * @reset: Issue reset to the host router 428 * 429 * Starts the domain and adds it to the system. Hotplugging devices will 430 * work after this has been returned successfully. In order to remove 431 * and release the domain after this function has been called, call 432 * tb_domain_remove(). 433 * 434 * Return: %0 on success, negative errno otherwise. 435 */ 436 int tb_domain_add(struct tb *tb, bool reset) 437 { 438 int ret; 439 440 if (WARN_ON(!tb->cm_ops)) 441 return -EINVAL; 442 443 mutex_lock(&tb->lock); 444 /* 445 * tb_schedule_hotplug_handler may be called as soon as the config 446 * channel is started. Thats why we have to hold the lock here. 447 */ 448 tb_ctl_start(tb->ctl); 449 450 if (tb->cm_ops->driver_ready) { 451 ret = tb->cm_ops->driver_ready(tb); 452 if (ret) 453 goto err_ctl_stop; 454 } 455 456 tb_dbg(tb, "security level set to %s\n", 457 tb_security_names[tb->security_level]); 458 459 ret = device_add(&tb->dev); 460 if (ret) 461 goto err_ctl_stop; 462 463 /* Start the domain */ 464 if (tb->cm_ops->start) { 465 ret = tb->cm_ops->start(tb, reset); 466 if (ret) 467 goto err_domain_del; 468 } 469 470 /* This starts event processing */ 471 mutex_unlock(&tb->lock); 472 473 device_init_wakeup(&tb->dev, true); 474 475 pm_runtime_no_callbacks(&tb->dev); 476 pm_runtime_set_active(&tb->dev); 477 pm_runtime_enable(&tb->dev); 478 pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY); 479 pm_runtime_mark_last_busy(&tb->dev); 480 pm_runtime_use_autosuspend(&tb->dev); 481 482 return 0; 483 484 err_domain_del: 485 device_del(&tb->dev); 486 err_ctl_stop: 487 tb_ctl_stop(tb->ctl); 488 mutex_unlock(&tb->lock); 489 490 return ret; 491 } 492 493 /** 494 * tb_domain_remove() - Removes and releases a domain 495 * @tb: Domain to remove 496 * 497 * Stops the domain, removes it from the system and releases all 498 * resources once the last reference has been released. 499 */ 500 void tb_domain_remove(struct tb *tb) 501 { 502 mutex_lock(&tb->lock); 503 if (tb->cm_ops->stop) 504 tb->cm_ops->stop(tb); 505 /* Stop the domain control traffic */ 506 tb_ctl_stop(tb->ctl); 507 mutex_unlock(&tb->lock); 508 509 flush_workqueue(tb->wq); 510 511 if (tb->cm_ops->deinit) 512 tb->cm_ops->deinit(tb); 513 514 device_unregister(&tb->dev); 515 } 516 517 /** 518 * tb_domain_suspend_noirq() - Suspend a domain 519 * @tb: Domain to suspend 520 * 521 * Suspends all devices in the domain and stops the control channel. 522 * 523 * Return: %0 on success, negative errno otherwise. 524 */ 525 int tb_domain_suspend_noirq(struct tb *tb) 526 { 527 int ret = 0; 528 529 /* 530 * The control channel interrupt is left enabled during suspend 531 * and taking the lock here prevents any events happening before 532 * we actually have stopped the domain and the control channel. 533 */ 534 mutex_lock(&tb->lock); 535 if (tb->cm_ops->suspend_noirq) 536 ret = tb->cm_ops->suspend_noirq(tb); 537 if (!ret) 538 tb_ctl_stop(tb->ctl); 539 mutex_unlock(&tb->lock); 540 541 return ret; 542 } 543 544 /** 545 * tb_domain_resume_noirq() - Resume a domain 546 * @tb: Domain to resume 547 * 548 * Re-starts the control channel, and resumes all devices connected to 549 * the domain. 550 * 551 * Return: %0 on success, negative errno otherwise. 552 */ 553 int tb_domain_resume_noirq(struct tb *tb) 554 { 555 int ret = 0; 556 557 mutex_lock(&tb->lock); 558 tb_ctl_start(tb->ctl); 559 if (tb->cm_ops->resume_noirq) 560 ret = tb->cm_ops->resume_noirq(tb); 561 mutex_unlock(&tb->lock); 562 563 return ret; 564 } 565 566 int tb_domain_suspend(struct tb *tb) 567 { 568 return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0; 569 } 570 571 int tb_domain_freeze_noirq(struct tb *tb) 572 { 573 int ret = 0; 574 575 mutex_lock(&tb->lock); 576 if (tb->cm_ops->freeze_noirq) 577 ret = tb->cm_ops->freeze_noirq(tb); 578 if (!ret) 579 tb_ctl_stop(tb->ctl); 580 mutex_unlock(&tb->lock); 581 582 return ret; 583 } 584 585 int tb_domain_thaw_noirq(struct tb *tb) 586 { 587 int ret = 0; 588 589 mutex_lock(&tb->lock); 590 tb_ctl_start(tb->ctl); 591 if (tb->cm_ops->thaw_noirq) 592 ret = tb->cm_ops->thaw_noirq(tb); 593 mutex_unlock(&tb->lock); 594 595 return ret; 596 } 597 598 void tb_domain_complete(struct tb *tb) 599 { 600 if (tb->cm_ops->complete) 601 tb->cm_ops->complete(tb); 602 } 603 604 int tb_domain_runtime_suspend(struct tb *tb) 605 { 606 if (tb->cm_ops->runtime_suspend) { 607 int ret = tb->cm_ops->runtime_suspend(tb); 608 if (ret) 609 return ret; 610 } 611 tb_ctl_stop(tb->ctl); 612 return 0; 613 } 614 615 int tb_domain_runtime_resume(struct tb *tb) 616 { 617 tb_ctl_start(tb->ctl); 618 if (tb->cm_ops->runtime_resume) { 619 int ret = tb->cm_ops->runtime_resume(tb); 620 if (ret) 621 return ret; 622 } 623 return 0; 624 } 625 626 /** 627 * tb_domain_disapprove_switch() - Disapprove switch 628 * @tb: Domain the switch belongs to 629 * @sw: Switch to disapprove 630 * 631 * This will disconnect PCIe tunnel from parent to this @sw. 632 * 633 * Return: %0 on success and negative errno in case of failure. 634 */ 635 int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw) 636 { 637 if (!tb->cm_ops->disapprove_switch) 638 return -EPERM; 639 640 return tb->cm_ops->disapprove_switch(tb, sw); 641 } 642 643 /** 644 * tb_domain_approve_switch() - Approve switch 645 * @tb: Domain the switch belongs to 646 * @sw: Switch to approve 647 * 648 * This will approve switch by connection manager specific means. In 649 * case of success the connection manager will create PCIe tunnel from 650 * parent to @sw. 651 * 652 * Return: %0 on success, negative errno otherwise. 653 */ 654 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw) 655 { 656 struct tb_switch *parent_sw; 657 658 if (!tb->cm_ops->approve_switch) 659 return -EPERM; 660 661 /* The parent switch must be authorized before this one */ 662 parent_sw = tb_to_switch(sw->dev.parent); 663 if (!parent_sw || !parent_sw->authorized) 664 return -EINVAL; 665 666 return tb->cm_ops->approve_switch(tb, sw); 667 } 668 669 /** 670 * tb_domain_approve_switch_key() - Approve switch and add key 671 * @tb: Domain the switch belongs to 672 * @sw: Switch to approve 673 * 674 * For switches that support secure connect, this function first adds 675 * key to the switch NVM using connection manager specific means. If 676 * adding the key is successful, the switch is approved and connected. 677 * 678 * Return: %0 on success and negative errno in case of failure. 679 */ 680 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw) 681 { 682 struct tb_switch *parent_sw; 683 int ret; 684 685 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key) 686 return -EPERM; 687 688 /* The parent switch must be authorized before this one */ 689 parent_sw = tb_to_switch(sw->dev.parent); 690 if (!parent_sw || !parent_sw->authorized) 691 return -EINVAL; 692 693 ret = tb->cm_ops->add_switch_key(tb, sw); 694 if (ret) 695 return ret; 696 697 return tb->cm_ops->approve_switch(tb, sw); 698 } 699 700 /** 701 * tb_domain_challenge_switch_key() - Challenge and approve switch 702 * @tb: Domain the switch belongs to 703 * @sw: Switch to approve 704 * 705 * For switches that support secure connect, this function generates 706 * random challenge and sends it to the switch. The switch responds to 707 * this and if the response matches our random challenge, the switch is 708 * approved and connected. 709 * 710 * Return: %0 on success and negative errno in case of failure. 711 */ 712 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) 713 { 714 u8 challenge[TB_SWITCH_KEY_SIZE]; 715 u8 response[TB_SWITCH_KEY_SIZE]; 716 u8 hmac[TB_SWITCH_KEY_SIZE]; 717 struct tb_switch *parent_sw; 718 int ret; 719 720 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key) 721 return -EPERM; 722 723 /* The parent switch must be authorized before this one */ 724 parent_sw = tb_to_switch(sw->dev.parent); 725 if (!parent_sw || !parent_sw->authorized) 726 return -EINVAL; 727 728 get_random_bytes(challenge, sizeof(challenge)); 729 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response); 730 if (ret) 731 return ret; 732 733 static_assert(sizeof(hmac) == SHA256_DIGEST_SIZE); 734 hmac_sha256_usingrawkey(sw->key, TB_SWITCH_KEY_SIZE, 735 challenge, sizeof(challenge), hmac); 736 737 /* The returned HMAC must match the one we calculated */ 738 if (crypto_memneq(response, hmac, sizeof(hmac))) 739 return -EKEYREJECTED; 740 741 return tb->cm_ops->approve_switch(tb, sw); 742 } 743 744 /** 745 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths 746 * @tb: Domain whose PCIe paths to disconnect 747 * 748 * This needs to be called in preparation for NVM upgrade of the host 749 * controller. Makes sure all PCIe paths are disconnected. 750 * 751 * Return: %0 on success and negative errno in case of error. 752 */ 753 int tb_domain_disconnect_pcie_paths(struct tb *tb) 754 { 755 if (!tb->cm_ops->disconnect_pcie_paths) 756 return -EPERM; 757 758 return tb->cm_ops->disconnect_pcie_paths(tb); 759 } 760 761 /** 762 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain 763 * @tb: Domain enabling the DMA paths 764 * @xd: XDomain DMA paths are created to 765 * @transmit_path: HopID we are using to send out packets 766 * @transmit_ring: DMA ring used to send out packets 767 * @receive_path: HopID the other end is using to send packets to us 768 * @receive_ring: DMA ring used to receive packets from @receive_path 769 * 770 * Calls connection manager specific method to enable DMA paths to the 771 * XDomain in question. 772 * 773 * Return: 774 * * %0 - On success. 775 * * %-ENOTSUPP - If the connection manager implementation does not support 776 * XDomains. 777 * * Negative errno - An error occurred. 778 */ 779 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 780 int transmit_path, int transmit_ring, 781 int receive_path, int receive_ring) 782 { 783 if (!tb->cm_ops->approve_xdomain_paths) 784 return -ENOTSUPP; 785 786 return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path, 787 transmit_ring, receive_path, receive_ring); 788 } 789 790 /** 791 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain 792 * @tb: Domain disabling the DMA paths 793 * @xd: XDomain whose DMA paths are disconnected 794 * @transmit_path: HopID we are using to send out packets 795 * @transmit_ring: DMA ring used to send out packets 796 * @receive_path: HopID the other end is using to send packets to us 797 * @receive_ring: DMA ring used to receive packets from @receive_path 798 * 799 * Calls connection manager specific method to disconnect DMA paths to 800 * the XDomain in question. 801 * 802 * Return: 803 * * %0 - On success. 804 * * %-ENOTSUPP - If the connection manager implementation does not support 805 * XDomains. 806 * * Negative errno - An error occurred. 807 */ 808 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 809 int transmit_path, int transmit_ring, 810 int receive_path, int receive_ring) 811 { 812 if (!tb->cm_ops->disconnect_xdomain_paths) 813 return -ENOTSUPP; 814 815 return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path, 816 transmit_ring, receive_path, receive_ring); 817 } 818 819 static int disconnect_xdomain(struct device *dev, void *data) 820 { 821 struct tb_xdomain *xd; 822 struct tb *tb = data; 823 int ret = 0; 824 825 xd = tb_to_xdomain(dev); 826 if (xd && xd->tb == tb) 827 ret = tb_xdomain_disable_all_paths(xd); 828 829 return ret; 830 } 831 832 /** 833 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain 834 * @tb: Domain whose paths are disconnected 835 * 836 * This function can be used to disconnect all paths (PCIe, XDomain) for 837 * example in preparation for host NVM firmware upgrade. After this is 838 * called the paths cannot be established without resetting the switch. 839 * 840 * Return: %0 in case of success and negative errno otherwise. 841 */ 842 int tb_domain_disconnect_all_paths(struct tb *tb) 843 { 844 int ret; 845 846 ret = tb_domain_disconnect_pcie_paths(tb); 847 if (ret) 848 return ret; 849 850 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain); 851 } 852 853 int tb_domain_init(void) 854 { 855 int ret; 856 857 tb_debugfs_init(); 858 tb_acpi_init(); 859 860 ret = tb_xdomain_init(); 861 if (ret) 862 goto err_acpi; 863 ret = bus_register(&tb_bus_type); 864 if (ret) 865 goto err_xdomain; 866 867 return 0; 868 869 err_xdomain: 870 tb_xdomain_exit(); 871 err_acpi: 872 tb_acpi_exit(); 873 tb_debugfs_exit(); 874 875 return ret; 876 } 877 878 void tb_domain_exit(void) 879 { 880 bus_unregister(&tb_bus_type); 881 ida_destroy(&tb_domain_ida); 882 tb_nvm_exit(); 883 tb_xdomain_exit(); 884 tb_acpi_exit(); 885 tb_debugfs_exit(); 886 } 887