1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/platform_device.h> 4 #include <linux/memregion.h> 5 #include <linux/workqueue.h> 6 #include <linux/debugfs.h> 7 #include <linux/device.h> 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/slab.h> 11 #include <linux/idr.h> 12 #include <linux/node.h> 13 #include <cxl/einj.h> 14 #include <cxlmem.h> 15 #include <cxlpci.h> 16 #include <cxl.h> 17 #include "core.h" 18 19 /** 20 * DOC: cxl core 21 * 22 * The CXL core provides a set of interfaces that can be consumed by CXL aware 23 * drivers. The interfaces allow for creation, modification, and destruction of 24 * regions, memory devices, ports, and decoders. CXL aware drivers must register 25 * with the CXL core via these interfaces in order to be able to participate in 26 * cross-device interleave coordination. The CXL core also establishes and 27 * maintains the bridge to the nvdimm subsystem. 28 * 29 * CXL core introduces sysfs hierarchy to control the devices that are 30 * instantiated by the core. 31 */ 32 33 /* 34 * All changes to the interleave configuration occur with this lock held 35 * for write. 36 */ 37 DECLARE_RWSEM(cxl_region_rwsem); 38 39 static DEFINE_IDA(cxl_port_ida); 40 static DEFINE_XARRAY(cxl_root_buses); 41 42 int cxl_num_decoders_committed(struct cxl_port *port) 43 { 44 lockdep_assert_held(&cxl_region_rwsem); 45 46 return port->commit_end + 1; 47 } 48 49 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, 50 char *buf) 51 { 52 return sysfs_emit(buf, "%s\n", dev->type->name); 53 } 54 static DEVICE_ATTR_RO(devtype); 55 56 static int cxl_device_id(const struct device *dev) 57 { 58 if (dev->type == &cxl_nvdimm_bridge_type) 59 return CXL_DEVICE_NVDIMM_BRIDGE; 60 if (dev->type == &cxl_nvdimm_type) 61 return CXL_DEVICE_NVDIMM; 62 if (dev->type == CXL_PMEM_REGION_TYPE()) 63 return CXL_DEVICE_PMEM_REGION; 64 if (dev->type == CXL_DAX_REGION_TYPE()) 65 return CXL_DEVICE_DAX_REGION; 66 if (is_cxl_port(dev)) { 67 if (is_cxl_root(to_cxl_port(dev))) 68 return CXL_DEVICE_ROOT; 69 return CXL_DEVICE_PORT; 70 } 71 if (is_cxl_memdev(dev)) 72 return CXL_DEVICE_MEMORY_EXPANDER; 73 if (dev->type == CXL_REGION_TYPE()) 74 return CXL_DEVICE_REGION; 75 if (dev->type == &cxl_pmu_type) 76 return CXL_DEVICE_PMU; 77 return 0; 78 } 79 80 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 81 char *buf) 82 { 83 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev)); 84 } 85 static DEVICE_ATTR_RO(modalias); 86 87 static struct attribute *cxl_base_attributes[] = { 88 &dev_attr_devtype.attr, 89 &dev_attr_modalias.attr, 90 NULL, 91 }; 92 93 struct attribute_group cxl_base_attribute_group = { 94 .attrs = cxl_base_attributes, 95 }; 96 97 static ssize_t start_show(struct device *dev, struct device_attribute *attr, 98 char *buf) 99 { 100 struct cxl_decoder *cxld = to_cxl_decoder(dev); 101 102 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start); 103 } 104 static DEVICE_ATTR_ADMIN_RO(start); 105 106 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 107 char *buf) 108 { 109 struct cxl_decoder *cxld = to_cxl_decoder(dev); 110 111 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range)); 112 } 113 static DEVICE_ATTR_RO(size); 114 115 #define CXL_DECODER_FLAG_ATTR(name, flag) \ 116 static ssize_t name##_show(struct device *dev, \ 117 struct device_attribute *attr, char *buf) \ 118 { \ 119 struct cxl_decoder *cxld = to_cxl_decoder(dev); \ 120 \ 121 return sysfs_emit(buf, "%s\n", \ 122 (cxld->flags & (flag)) ? "1" : "0"); \ 123 } \ 124 static DEVICE_ATTR_RO(name) 125 126 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM); 127 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM); 128 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2); 129 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3); 130 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK); 131 132 static ssize_t target_type_show(struct device *dev, 133 struct device_attribute *attr, char *buf) 134 { 135 struct cxl_decoder *cxld = to_cxl_decoder(dev); 136 137 switch (cxld->target_type) { 138 case CXL_DECODER_DEVMEM: 139 return sysfs_emit(buf, "accelerator\n"); 140 case CXL_DECODER_HOSTONLYMEM: 141 return sysfs_emit(buf, "expander\n"); 142 } 143 return -ENXIO; 144 } 145 static DEVICE_ATTR_RO(target_type); 146 147 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf) 148 { 149 struct cxl_decoder *cxld = &cxlsd->cxld; 150 ssize_t offset = 0; 151 int i, rc = 0; 152 153 for (i = 0; i < cxld->interleave_ways; i++) { 154 struct cxl_dport *dport = cxlsd->target[i]; 155 struct cxl_dport *next = NULL; 156 157 if (!dport) 158 break; 159 160 if (i + 1 < cxld->interleave_ways) 161 next = cxlsd->target[i + 1]; 162 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, 163 next ? "," : ""); 164 if (rc < 0) 165 return rc; 166 offset += rc; 167 } 168 169 return offset; 170 } 171 172 static ssize_t target_list_show(struct device *dev, 173 struct device_attribute *attr, char *buf) 174 { 175 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 176 ssize_t offset; 177 int rc; 178 179 guard(rwsem_read)(&cxl_region_rwsem); 180 rc = emit_target_list(cxlsd, buf); 181 if (rc < 0) 182 return rc; 183 offset = rc; 184 185 rc = sysfs_emit_at(buf, offset, "\n"); 186 if (rc < 0) 187 return rc; 188 189 return offset + rc; 190 } 191 static DEVICE_ATTR_RO(target_list); 192 193 static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 194 char *buf) 195 { 196 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 197 198 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode)); 199 } 200 201 static ssize_t mode_store(struct device *dev, struct device_attribute *attr, 202 const char *buf, size_t len) 203 { 204 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 205 enum cxl_decoder_mode mode; 206 ssize_t rc; 207 208 if (sysfs_streq(buf, "pmem")) 209 mode = CXL_DECODER_PMEM; 210 else if (sysfs_streq(buf, "ram")) 211 mode = CXL_DECODER_RAM; 212 else 213 return -EINVAL; 214 215 rc = cxl_dpa_set_mode(cxled, mode); 216 if (rc) 217 return rc; 218 219 return len; 220 } 221 static DEVICE_ATTR_RW(mode); 222 223 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr, 224 char *buf) 225 { 226 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 227 228 guard(rwsem_read)(&cxl_dpa_rwsem); 229 return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled)); 230 } 231 static DEVICE_ATTR_RO(dpa_resource); 232 233 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr, 234 char *buf) 235 { 236 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 237 resource_size_t size = cxl_dpa_size(cxled); 238 239 return sysfs_emit(buf, "%pa\n", &size); 240 } 241 242 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr, 243 const char *buf, size_t len) 244 { 245 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 246 unsigned long long size; 247 ssize_t rc; 248 249 rc = kstrtoull(buf, 0, &size); 250 if (rc) 251 return rc; 252 253 if (!IS_ALIGNED(size, SZ_256M)) 254 return -EINVAL; 255 256 rc = cxl_dpa_free(cxled); 257 if (rc) 258 return rc; 259 260 if (size == 0) 261 return len; 262 263 rc = cxl_dpa_alloc(cxled, size); 264 if (rc) 265 return rc; 266 267 return len; 268 } 269 static DEVICE_ATTR_RW(dpa_size); 270 271 static ssize_t interleave_granularity_show(struct device *dev, 272 struct device_attribute *attr, 273 char *buf) 274 { 275 struct cxl_decoder *cxld = to_cxl_decoder(dev); 276 277 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity); 278 } 279 280 static DEVICE_ATTR_RO(interleave_granularity); 281 282 static ssize_t interleave_ways_show(struct device *dev, 283 struct device_attribute *attr, char *buf) 284 { 285 struct cxl_decoder *cxld = to_cxl_decoder(dev); 286 287 return sysfs_emit(buf, "%d\n", cxld->interleave_ways); 288 } 289 290 static DEVICE_ATTR_RO(interleave_ways); 291 292 static ssize_t qos_class_show(struct device *dev, 293 struct device_attribute *attr, char *buf) 294 { 295 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 296 297 return sysfs_emit(buf, "%d\n", cxlrd->qos_class); 298 } 299 static DEVICE_ATTR_RO(qos_class); 300 301 static struct attribute *cxl_decoder_base_attrs[] = { 302 &dev_attr_start.attr, 303 &dev_attr_size.attr, 304 &dev_attr_locked.attr, 305 &dev_attr_interleave_granularity.attr, 306 &dev_attr_interleave_ways.attr, 307 NULL, 308 }; 309 310 static struct attribute_group cxl_decoder_base_attribute_group = { 311 .attrs = cxl_decoder_base_attrs, 312 }; 313 314 static struct attribute *cxl_decoder_root_attrs[] = { 315 &dev_attr_cap_pmem.attr, 316 &dev_attr_cap_ram.attr, 317 &dev_attr_cap_type2.attr, 318 &dev_attr_cap_type3.attr, 319 &dev_attr_target_list.attr, 320 &dev_attr_qos_class.attr, 321 SET_CXL_REGION_ATTR(create_pmem_region) 322 SET_CXL_REGION_ATTR(create_ram_region) 323 SET_CXL_REGION_ATTR(delete_region) 324 NULL, 325 }; 326 327 static bool can_create_pmem(struct cxl_root_decoder *cxlrd) 328 { 329 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM; 330 331 return (cxlrd->cxlsd.cxld.flags & flags) == flags; 332 } 333 334 static bool can_create_ram(struct cxl_root_decoder *cxlrd) 335 { 336 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM; 337 338 return (cxlrd->cxlsd.cxld.flags & flags) == flags; 339 } 340 341 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n) 342 { 343 struct device *dev = kobj_to_dev(kobj); 344 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 345 346 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd)) 347 return 0; 348 349 if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd)) 350 return 0; 351 352 if (a == CXL_REGION_ATTR(delete_region) && 353 !(can_create_pmem(cxlrd) || can_create_ram(cxlrd))) 354 return 0; 355 356 return a->mode; 357 } 358 359 static struct attribute_group cxl_decoder_root_attribute_group = { 360 .attrs = cxl_decoder_root_attrs, 361 .is_visible = cxl_root_decoder_visible, 362 }; 363 364 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { 365 &cxl_decoder_root_attribute_group, 366 &cxl_decoder_base_attribute_group, 367 &cxl_base_attribute_group, 368 NULL, 369 }; 370 371 static struct attribute *cxl_decoder_switch_attrs[] = { 372 &dev_attr_target_type.attr, 373 &dev_attr_target_list.attr, 374 SET_CXL_REGION_ATTR(region) 375 NULL, 376 }; 377 378 static struct attribute_group cxl_decoder_switch_attribute_group = { 379 .attrs = cxl_decoder_switch_attrs, 380 }; 381 382 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { 383 &cxl_decoder_switch_attribute_group, 384 &cxl_decoder_base_attribute_group, 385 &cxl_base_attribute_group, 386 NULL, 387 }; 388 389 static struct attribute *cxl_decoder_endpoint_attrs[] = { 390 &dev_attr_target_type.attr, 391 &dev_attr_mode.attr, 392 &dev_attr_dpa_size.attr, 393 &dev_attr_dpa_resource.attr, 394 SET_CXL_REGION_ATTR(region) 395 NULL, 396 }; 397 398 static struct attribute_group cxl_decoder_endpoint_attribute_group = { 399 .attrs = cxl_decoder_endpoint_attrs, 400 }; 401 402 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = { 403 &cxl_decoder_base_attribute_group, 404 &cxl_decoder_endpoint_attribute_group, 405 &cxl_base_attribute_group, 406 NULL, 407 }; 408 409 static void __cxl_decoder_release(struct cxl_decoder *cxld) 410 { 411 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 412 413 ida_free(&port->decoder_ida, cxld->id); 414 put_device(&port->dev); 415 } 416 417 static void cxl_endpoint_decoder_release(struct device *dev) 418 { 419 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 420 421 __cxl_decoder_release(&cxled->cxld); 422 kfree(cxled); 423 } 424 425 static void cxl_switch_decoder_release(struct device *dev) 426 { 427 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 428 429 __cxl_decoder_release(&cxlsd->cxld); 430 kfree(cxlsd); 431 } 432 433 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev) 434 { 435 if (dev_WARN_ONCE(dev, !is_root_decoder(dev), 436 "not a cxl_root_decoder device\n")) 437 return NULL; 438 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev); 439 } 440 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL); 441 442 static void cxl_root_decoder_release(struct device *dev) 443 { 444 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 445 446 if (atomic_read(&cxlrd->region_id) >= 0) 447 memregion_free(atomic_read(&cxlrd->region_id)); 448 __cxl_decoder_release(&cxlrd->cxlsd.cxld); 449 kfree(cxlrd); 450 } 451 452 static const struct device_type cxl_decoder_endpoint_type = { 453 .name = "cxl_decoder_endpoint", 454 .release = cxl_endpoint_decoder_release, 455 .groups = cxl_decoder_endpoint_attribute_groups, 456 }; 457 458 static const struct device_type cxl_decoder_switch_type = { 459 .name = "cxl_decoder_switch", 460 .release = cxl_switch_decoder_release, 461 .groups = cxl_decoder_switch_attribute_groups, 462 }; 463 464 static const struct device_type cxl_decoder_root_type = { 465 .name = "cxl_decoder_root", 466 .release = cxl_root_decoder_release, 467 .groups = cxl_decoder_root_attribute_groups, 468 }; 469 470 bool is_endpoint_decoder(struct device *dev) 471 { 472 return dev->type == &cxl_decoder_endpoint_type; 473 } 474 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL); 475 476 bool is_root_decoder(struct device *dev) 477 { 478 return dev->type == &cxl_decoder_root_type; 479 } 480 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL); 481 482 bool is_switch_decoder(struct device *dev) 483 { 484 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; 485 } 486 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL); 487 488 struct cxl_decoder *to_cxl_decoder(struct device *dev) 489 { 490 if (dev_WARN_ONCE(dev, 491 !is_switch_decoder(dev) && !is_endpoint_decoder(dev), 492 "not a cxl_decoder device\n")) 493 return NULL; 494 return container_of(dev, struct cxl_decoder, dev); 495 } 496 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL); 497 498 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev) 499 { 500 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev), 501 "not a cxl_endpoint_decoder device\n")) 502 return NULL; 503 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev); 504 } 505 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL); 506 507 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) 508 { 509 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev), 510 "not a cxl_switch_decoder device\n")) 511 return NULL; 512 return container_of(dev, struct cxl_switch_decoder, cxld.dev); 513 } 514 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL); 515 516 static void cxl_ep_release(struct cxl_ep *ep) 517 { 518 put_device(ep->ep); 519 kfree(ep); 520 } 521 522 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep) 523 { 524 if (!ep) 525 return; 526 xa_erase(&port->endpoints, (unsigned long) ep->ep); 527 cxl_ep_release(ep); 528 } 529 530 static void cxl_port_release(struct device *dev) 531 { 532 struct cxl_port *port = to_cxl_port(dev); 533 unsigned long index; 534 struct cxl_ep *ep; 535 536 xa_for_each(&port->endpoints, index, ep) 537 cxl_ep_remove(port, ep); 538 xa_destroy(&port->endpoints); 539 xa_destroy(&port->dports); 540 xa_destroy(&port->regions); 541 ida_free(&cxl_port_ida, port->id); 542 if (is_cxl_root(port)) 543 kfree(to_cxl_root(port)); 544 else 545 kfree(port); 546 } 547 548 static ssize_t decoders_committed_show(struct device *dev, 549 struct device_attribute *attr, char *buf) 550 { 551 struct cxl_port *port = to_cxl_port(dev); 552 int rc; 553 554 down_read(&cxl_region_rwsem); 555 rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port)); 556 up_read(&cxl_region_rwsem); 557 558 return rc; 559 } 560 561 static DEVICE_ATTR_RO(decoders_committed); 562 563 static struct attribute *cxl_port_attrs[] = { 564 &dev_attr_decoders_committed.attr, 565 NULL, 566 }; 567 568 static struct attribute_group cxl_port_attribute_group = { 569 .attrs = cxl_port_attrs, 570 }; 571 572 static const struct attribute_group *cxl_port_attribute_groups[] = { 573 &cxl_base_attribute_group, 574 &cxl_port_attribute_group, 575 NULL, 576 }; 577 578 static const struct device_type cxl_port_type = { 579 .name = "cxl_port", 580 .release = cxl_port_release, 581 .groups = cxl_port_attribute_groups, 582 }; 583 584 bool is_cxl_port(const struct device *dev) 585 { 586 return dev->type == &cxl_port_type; 587 } 588 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL); 589 590 struct cxl_port *to_cxl_port(const struct device *dev) 591 { 592 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, 593 "not a cxl_port device\n")) 594 return NULL; 595 return container_of(dev, struct cxl_port, dev); 596 } 597 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL); 598 599 static void unregister_port(void *_port) 600 { 601 struct cxl_port *port = _port; 602 struct cxl_port *parent; 603 struct device *lock_dev; 604 605 if (is_cxl_root(port)) 606 parent = NULL; 607 else 608 parent = to_cxl_port(port->dev.parent); 609 610 /* 611 * CXL root port's and the first level of ports are unregistered 612 * under the platform firmware device lock, all other ports are 613 * unregistered while holding their parent port lock. 614 */ 615 if (!parent) 616 lock_dev = port->uport_dev; 617 else if (is_cxl_root(parent)) 618 lock_dev = parent->uport_dev; 619 else 620 lock_dev = &parent->dev; 621 622 device_lock_assert(lock_dev); 623 port->dead = true; 624 device_unregister(&port->dev); 625 } 626 627 static void cxl_unlink_uport(void *_port) 628 { 629 struct cxl_port *port = _port; 630 631 sysfs_remove_link(&port->dev.kobj, "uport"); 632 } 633 634 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) 635 { 636 int rc; 637 638 rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj, 639 "uport"); 640 if (rc) 641 return rc; 642 return devm_add_action_or_reset(host, cxl_unlink_uport, port); 643 } 644 645 static void cxl_unlink_parent_dport(void *_port) 646 { 647 struct cxl_port *port = _port; 648 649 sysfs_remove_link(&port->dev.kobj, "parent_dport"); 650 } 651 652 static int devm_cxl_link_parent_dport(struct device *host, 653 struct cxl_port *port, 654 struct cxl_dport *parent_dport) 655 { 656 int rc; 657 658 if (!parent_dport) 659 return 0; 660 661 rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj, 662 "parent_dport"); 663 if (rc) 664 return rc; 665 return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port); 666 } 667 668 static struct lock_class_key cxl_port_key; 669 670 static struct cxl_port *cxl_port_alloc(struct device *uport_dev, 671 struct cxl_dport *parent_dport) 672 { 673 struct cxl_root *cxl_root __free(kfree) = NULL; 674 struct cxl_port *port, *_port __free(kfree) = NULL; 675 struct device *dev; 676 int rc; 677 678 /* No parent_dport, root cxl_port */ 679 if (!parent_dport) { 680 cxl_root = kzalloc(sizeof(*cxl_root), GFP_KERNEL); 681 if (!cxl_root) 682 return ERR_PTR(-ENOMEM); 683 } else { 684 _port = kzalloc(sizeof(*port), GFP_KERNEL); 685 if (!_port) 686 return ERR_PTR(-ENOMEM); 687 } 688 689 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL); 690 if (rc < 0) 691 return ERR_PTR(rc); 692 693 if (cxl_root) 694 port = &no_free_ptr(cxl_root)->port; 695 else 696 port = no_free_ptr(_port); 697 698 port->id = rc; 699 port->uport_dev = uport_dev; 700 701 /* 702 * The top-level cxl_port "cxl_root" does not have a cxl_port as 703 * its parent and it does not have any corresponding component 704 * registers as its decode is described by a fixed platform 705 * description. 706 */ 707 dev = &port->dev; 708 if (parent_dport) { 709 struct cxl_port *parent_port = parent_dport->port; 710 struct cxl_port *iter; 711 712 dev->parent = &parent_port->dev; 713 port->depth = parent_port->depth + 1; 714 port->parent_dport = parent_dport; 715 716 /* 717 * walk to the host bridge, or the first ancestor that knows 718 * the host bridge 719 */ 720 iter = port; 721 while (!iter->host_bridge && 722 !is_cxl_root(to_cxl_port(iter->dev.parent))) 723 iter = to_cxl_port(iter->dev.parent); 724 if (iter->host_bridge) 725 port->host_bridge = iter->host_bridge; 726 else if (parent_dport->rch) 727 port->host_bridge = parent_dport->dport_dev; 728 else 729 port->host_bridge = iter->uport_dev; 730 dev_dbg(uport_dev, "host-bridge: %s\n", 731 dev_name(port->host_bridge)); 732 } else 733 dev->parent = uport_dev; 734 735 ida_init(&port->decoder_ida); 736 port->hdm_end = -1; 737 port->commit_end = -1; 738 xa_init(&port->dports); 739 xa_init(&port->endpoints); 740 xa_init(&port->regions); 741 742 device_initialize(dev); 743 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); 744 device_set_pm_not_required(dev); 745 dev->bus = &cxl_bus_type; 746 dev->type = &cxl_port_type; 747 748 return port; 749 } 750 751 static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map, 752 resource_size_t component_reg_phys) 753 { 754 *map = (struct cxl_register_map) { 755 .host = host, 756 .reg_type = CXL_REGLOC_RBI_EMPTY, 757 .resource = component_reg_phys, 758 }; 759 760 if (component_reg_phys == CXL_RESOURCE_NONE) 761 return 0; 762 763 map->reg_type = CXL_REGLOC_RBI_COMPONENT; 764 map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE; 765 766 return cxl_setup_regs(map); 767 } 768 769 static int cxl_port_setup_regs(struct cxl_port *port, 770 resource_size_t component_reg_phys) 771 { 772 if (dev_is_platform(port->uport_dev)) 773 return 0; 774 return cxl_setup_comp_regs(&port->dev, &port->reg_map, 775 component_reg_phys); 776 } 777 778 static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, 779 resource_size_t component_reg_phys) 780 { 781 int rc; 782 783 if (dev_is_platform(dport->dport_dev)) 784 return 0; 785 786 /* 787 * use @dport->dport_dev for the context for error messages during 788 * register probing, and fixup @host after the fact, since @host may be 789 * NULL. 790 */ 791 rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map, 792 component_reg_phys); 793 dport->reg_map.host = host; 794 return rc; 795 } 796 797 DEFINE_SHOW_ATTRIBUTE(einj_cxl_available_error_type); 798 799 static int cxl_einj_inject(void *data, u64 type) 800 { 801 struct cxl_dport *dport = data; 802 803 if (dport->rch) 804 return einj_cxl_inject_rch_error(dport->rcrb.base, type); 805 806 return einj_cxl_inject_error(to_pci_dev(dport->dport_dev), type); 807 } 808 DEFINE_DEBUGFS_ATTRIBUTE(cxl_einj_inject_fops, NULL, cxl_einj_inject, 809 "0x%llx\n"); 810 811 static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport) 812 { 813 struct dentry *dir; 814 815 if (!einj_cxl_is_initialized()) 816 return; 817 818 /* 819 * dport_dev needs to be a PCIe port for CXL 2.0+ ports because 820 * EINJ expects a dport SBDF to be specified for 2.0 error injection. 821 */ 822 if (!dport->rch && !dev_is_pci(dport->dport_dev)) 823 return; 824 825 dir = cxl_debugfs_create_dir(dev_name(dport->dport_dev)); 826 827 debugfs_create_file("einj_inject", 0200, dir, dport, 828 &cxl_einj_inject_fops); 829 } 830 831 static int cxl_port_add(struct cxl_port *port, 832 resource_size_t component_reg_phys, 833 struct cxl_dport *parent_dport) 834 { 835 struct device *dev __free(put_device) = &port->dev; 836 int rc; 837 838 if (is_cxl_memdev(port->uport_dev)) { 839 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); 840 struct cxl_dev_state *cxlds = cxlmd->cxlds; 841 842 rc = dev_set_name(dev, "endpoint%d", port->id); 843 if (rc) 844 return rc; 845 846 /* 847 * The endpoint driver already enumerated the component and RAS 848 * registers. Reuse that enumeration while prepping them to be 849 * mapped by the cxl_port driver. 850 */ 851 port->reg_map = cxlds->reg_map; 852 port->reg_map.host = &port->dev; 853 cxlmd->endpoint = port; 854 } else if (parent_dport) { 855 rc = dev_set_name(dev, "port%d", port->id); 856 if (rc) 857 return rc; 858 859 rc = cxl_port_setup_regs(port, component_reg_phys); 860 if (rc) 861 return rc; 862 } else { 863 rc = dev_set_name(dev, "root%d", port->id); 864 if (rc) 865 return rc; 866 } 867 868 rc = device_add(dev); 869 if (rc) 870 return rc; 871 872 /* Inhibit the cleanup function invoked */ 873 dev = NULL; 874 return 0; 875 } 876 877 static struct cxl_port *__devm_cxl_add_port(struct device *host, 878 struct device *uport_dev, 879 resource_size_t component_reg_phys, 880 struct cxl_dport *parent_dport) 881 { 882 struct cxl_port *port; 883 int rc; 884 885 port = cxl_port_alloc(uport_dev, parent_dport); 886 if (IS_ERR(port)) 887 return port; 888 889 rc = cxl_port_add(port, component_reg_phys, parent_dport); 890 if (rc) 891 return ERR_PTR(rc); 892 893 rc = devm_add_action_or_reset(host, unregister_port, port); 894 if (rc) 895 return ERR_PTR(rc); 896 897 rc = devm_cxl_link_uport(host, port); 898 if (rc) 899 return ERR_PTR(rc); 900 901 rc = devm_cxl_link_parent_dport(host, port, parent_dport); 902 if (rc) 903 return ERR_PTR(rc); 904 905 if (parent_dport && dev_is_pci(uport_dev)) 906 port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev)); 907 908 return port; 909 } 910 911 /** 912 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy 913 * @host: host device for devm operations 914 * @uport_dev: "physical" device implementing this upstream port 915 * @component_reg_phys: (optional) for configurable cxl_port instances 916 * @parent_dport: next hop up in the CXL memory decode hierarchy 917 */ 918 struct cxl_port *devm_cxl_add_port(struct device *host, 919 struct device *uport_dev, 920 resource_size_t component_reg_phys, 921 struct cxl_dport *parent_dport) 922 { 923 struct cxl_port *port, *parent_port; 924 925 port = __devm_cxl_add_port(host, uport_dev, component_reg_phys, 926 parent_dport); 927 928 parent_port = parent_dport ? parent_dport->port : NULL; 929 if (IS_ERR(port)) { 930 dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n", 931 parent_port ? " port to " : "", 932 parent_port ? dev_name(&parent_port->dev) : "", 933 parent_port ? "" : " root port", 934 PTR_ERR(port)); 935 } else { 936 dev_dbg(uport_dev, "%s added%s%s%s\n", 937 dev_name(&port->dev), 938 parent_port ? " to " : "", 939 parent_port ? dev_name(&parent_port->dev) : "", 940 parent_port ? "" : " (root port)"); 941 } 942 943 return port; 944 } 945 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); 946 947 struct cxl_root *devm_cxl_add_root(struct device *host, 948 const struct cxl_root_ops *ops) 949 { 950 struct cxl_root *cxl_root; 951 struct cxl_port *port; 952 953 port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); 954 if (IS_ERR(port)) 955 return ERR_CAST(port); 956 957 cxl_root = to_cxl_root(port); 958 cxl_root->ops = ops; 959 return cxl_root; 960 } 961 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL); 962 963 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) 964 { 965 /* There is no pci_bus associated with a CXL platform-root port */ 966 if (is_cxl_root(port)) 967 return NULL; 968 969 if (dev_is_pci(port->uport_dev)) { 970 struct pci_dev *pdev = to_pci_dev(port->uport_dev); 971 972 return pdev->subordinate; 973 } 974 975 return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev); 976 } 977 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL); 978 979 static void unregister_pci_bus(void *uport_dev) 980 { 981 xa_erase(&cxl_root_buses, (unsigned long)uport_dev); 982 } 983 984 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, 985 struct pci_bus *bus) 986 { 987 int rc; 988 989 if (dev_is_pci(uport_dev)) 990 return -EINVAL; 991 992 rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus, 993 GFP_KERNEL); 994 if (rc) 995 return rc; 996 return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev); 997 } 998 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL); 999 1000 static bool dev_is_cxl_root_child(struct device *dev) 1001 { 1002 struct cxl_port *port, *parent; 1003 1004 if (!is_cxl_port(dev)) 1005 return false; 1006 1007 port = to_cxl_port(dev); 1008 if (is_cxl_root(port)) 1009 return false; 1010 1011 parent = to_cxl_port(port->dev.parent); 1012 if (is_cxl_root(parent)) 1013 return true; 1014 1015 return false; 1016 } 1017 1018 struct cxl_root *find_cxl_root(struct cxl_port *port) 1019 { 1020 struct cxl_port *iter = port; 1021 1022 while (iter && !is_cxl_root(iter)) 1023 iter = to_cxl_port(iter->dev.parent); 1024 1025 if (!iter) 1026 return NULL; 1027 get_device(&iter->dev); 1028 return to_cxl_root(iter); 1029 } 1030 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); 1031 1032 void put_cxl_root(struct cxl_root *cxl_root) 1033 { 1034 if (!cxl_root) 1035 return; 1036 1037 put_device(&cxl_root->port.dev); 1038 } 1039 EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL); 1040 1041 static struct cxl_dport *find_dport(struct cxl_port *port, int id) 1042 { 1043 struct cxl_dport *dport; 1044 unsigned long index; 1045 1046 device_lock_assert(&port->dev); 1047 xa_for_each(&port->dports, index, dport) 1048 if (dport->port_id == id) 1049 return dport; 1050 return NULL; 1051 } 1052 1053 static int add_dport(struct cxl_port *port, struct cxl_dport *dport) 1054 { 1055 struct cxl_dport *dup; 1056 int rc; 1057 1058 device_lock_assert(&port->dev); 1059 dup = find_dport(port, dport->port_id); 1060 if (dup) { 1061 dev_err(&port->dev, 1062 "unable to add dport%d-%s non-unique port id (%s)\n", 1063 dport->port_id, dev_name(dport->dport_dev), 1064 dev_name(dup->dport_dev)); 1065 return -EBUSY; 1066 } 1067 1068 rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport, 1069 GFP_KERNEL); 1070 if (rc) 1071 return rc; 1072 1073 port->nr_dports++; 1074 return 0; 1075 } 1076 1077 /* 1078 * Since root-level CXL dports cannot be enumerated by PCI they are not 1079 * enumerated by the common port driver that acquires the port lock over 1080 * dport add/remove. Instead, root dports are manually added by a 1081 * platform driver and cond_cxl_root_lock() is used to take the missing 1082 * port lock in that case. 1083 */ 1084 static void cond_cxl_root_lock(struct cxl_port *port) 1085 { 1086 if (is_cxl_root(port)) 1087 device_lock(&port->dev); 1088 } 1089 1090 static void cond_cxl_root_unlock(struct cxl_port *port) 1091 { 1092 if (is_cxl_root(port)) 1093 device_unlock(&port->dev); 1094 } 1095 1096 static void cxl_dport_remove(void *data) 1097 { 1098 struct cxl_dport *dport = data; 1099 struct cxl_port *port = dport->port; 1100 1101 xa_erase(&port->dports, (unsigned long) dport->dport_dev); 1102 put_device(dport->dport_dev); 1103 } 1104 1105 static void cxl_dport_unlink(void *data) 1106 { 1107 struct cxl_dport *dport = data; 1108 struct cxl_port *port = dport->port; 1109 char link_name[CXL_TARGET_STRLEN]; 1110 1111 sprintf(link_name, "dport%d", dport->port_id); 1112 sysfs_remove_link(&port->dev.kobj, link_name); 1113 } 1114 1115 static struct cxl_dport * 1116 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, 1117 int port_id, resource_size_t component_reg_phys, 1118 resource_size_t rcrb) 1119 { 1120 char link_name[CXL_TARGET_STRLEN]; 1121 struct cxl_dport *dport; 1122 struct device *host; 1123 int rc; 1124 1125 if (is_cxl_root(port)) 1126 host = port->uport_dev; 1127 else 1128 host = &port->dev; 1129 1130 if (!host->driver) { 1131 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n", 1132 dev_name(dport_dev)); 1133 return ERR_PTR(-ENXIO); 1134 } 1135 1136 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >= 1137 CXL_TARGET_STRLEN) 1138 return ERR_PTR(-EINVAL); 1139 1140 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL); 1141 if (!dport) 1142 return ERR_PTR(-ENOMEM); 1143 1144 dport->dport_dev = dport_dev; 1145 dport->port_id = port_id; 1146 dport->port = port; 1147 1148 if (rcrb == CXL_RESOURCE_NONE) { 1149 rc = cxl_dport_setup_regs(&port->dev, dport, 1150 component_reg_phys); 1151 if (rc) 1152 return ERR_PTR(rc); 1153 } else { 1154 dport->rcrb.base = rcrb; 1155 component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb, 1156 CXL_RCRB_DOWNSTREAM); 1157 if (component_reg_phys == CXL_RESOURCE_NONE) { 1158 dev_warn(dport_dev, "Invalid Component Registers in RCRB"); 1159 return ERR_PTR(-ENXIO); 1160 } 1161 1162 /* 1163 * RCH @dport is not ready to map until associated with its 1164 * memdev 1165 */ 1166 rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys); 1167 if (rc) 1168 return ERR_PTR(rc); 1169 1170 dport->rch = true; 1171 } 1172 1173 if (component_reg_phys != CXL_RESOURCE_NONE) 1174 dev_dbg(dport_dev, "Component Registers found for dport: %pa\n", 1175 &component_reg_phys); 1176 1177 cond_cxl_root_lock(port); 1178 rc = add_dport(port, dport); 1179 cond_cxl_root_unlock(port); 1180 if (rc) 1181 return ERR_PTR(rc); 1182 1183 get_device(dport_dev); 1184 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); 1185 if (rc) 1186 return ERR_PTR(rc); 1187 1188 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name); 1189 if (rc) 1190 return ERR_PTR(rc); 1191 1192 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport); 1193 if (rc) 1194 return ERR_PTR(rc); 1195 1196 if (dev_is_pci(dport_dev)) 1197 dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev)); 1198 1199 cxl_debugfs_create_dport_dir(dport); 1200 1201 return dport; 1202 } 1203 1204 /** 1205 * devm_cxl_add_dport - append VH downstream port data to a cxl_port 1206 * @port: the cxl_port that references this dport 1207 * @dport_dev: firmware or PCI device representing the dport 1208 * @port_id: identifier for this dport in a decoder's target list 1209 * @component_reg_phys: optional location of CXL component registers 1210 * 1211 * Note that dports are appended to the devm release action's of the 1212 * either the port's host (for root ports), or the port itself (for 1213 * switch ports) 1214 */ 1215 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, 1216 struct device *dport_dev, int port_id, 1217 resource_size_t component_reg_phys) 1218 { 1219 struct cxl_dport *dport; 1220 1221 dport = __devm_cxl_add_dport(port, dport_dev, port_id, 1222 component_reg_phys, CXL_RESOURCE_NONE); 1223 if (IS_ERR(dport)) { 1224 dev_dbg(dport_dev, "failed to add dport to %s: %ld\n", 1225 dev_name(&port->dev), PTR_ERR(dport)); 1226 } else { 1227 dev_dbg(dport_dev, "dport added to %s\n", 1228 dev_name(&port->dev)); 1229 } 1230 1231 return dport; 1232 } 1233 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); 1234 1235 /** 1236 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port 1237 * @port: the cxl_port that references this dport 1238 * @dport_dev: firmware or PCI device representing the dport 1239 * @port_id: identifier for this dport in a decoder's target list 1240 * @rcrb: mandatory location of a Root Complex Register Block 1241 * 1242 * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH 1243 */ 1244 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, 1245 struct device *dport_dev, int port_id, 1246 resource_size_t rcrb) 1247 { 1248 struct cxl_dport *dport; 1249 1250 if (rcrb == CXL_RESOURCE_NONE) { 1251 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n"); 1252 return ERR_PTR(-EINVAL); 1253 } 1254 1255 dport = __devm_cxl_add_dport(port, dport_dev, port_id, 1256 CXL_RESOURCE_NONE, rcrb); 1257 if (IS_ERR(dport)) { 1258 dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n", 1259 dev_name(&port->dev), PTR_ERR(dport)); 1260 } else { 1261 dev_dbg(dport_dev, "RCH dport added to %s\n", 1262 dev_name(&port->dev)); 1263 } 1264 1265 return dport; 1266 } 1267 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL); 1268 1269 static int add_ep(struct cxl_ep *new) 1270 { 1271 struct cxl_port *port = new->dport->port; 1272 1273 guard(device)(&port->dev); 1274 if (port->dead) 1275 return -ENXIO; 1276 1277 return xa_insert(&port->endpoints, (unsigned long)new->ep, 1278 new, GFP_KERNEL); 1279 } 1280 1281 /** 1282 * cxl_add_ep - register an endpoint's interest in a port 1283 * @dport: the dport that routes to @ep_dev 1284 * @ep_dev: device representing the endpoint 1285 * 1286 * Intermediate CXL ports are scanned based on the arrival of endpoints. 1287 * When those endpoints depart the port can be destroyed once all 1288 * endpoints that care about that port have been removed. 1289 */ 1290 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev) 1291 { 1292 struct cxl_ep *ep; 1293 int rc; 1294 1295 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 1296 if (!ep) 1297 return -ENOMEM; 1298 1299 ep->ep = get_device(ep_dev); 1300 ep->dport = dport; 1301 1302 rc = add_ep(ep); 1303 if (rc) 1304 cxl_ep_release(ep); 1305 return rc; 1306 } 1307 1308 struct cxl_find_port_ctx { 1309 const struct device *dport_dev; 1310 const struct cxl_port *parent_port; 1311 struct cxl_dport **dport; 1312 }; 1313 1314 static int match_port_by_dport(struct device *dev, const void *data) 1315 { 1316 const struct cxl_find_port_ctx *ctx = data; 1317 struct cxl_dport *dport; 1318 struct cxl_port *port; 1319 1320 if (!is_cxl_port(dev)) 1321 return 0; 1322 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev) 1323 return 0; 1324 1325 port = to_cxl_port(dev); 1326 dport = cxl_find_dport_by_dev(port, ctx->dport_dev); 1327 if (ctx->dport) 1328 *ctx->dport = dport; 1329 return dport != NULL; 1330 } 1331 1332 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx) 1333 { 1334 struct device *dev; 1335 1336 if (!ctx->dport_dev) 1337 return NULL; 1338 1339 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport); 1340 if (dev) 1341 return to_cxl_port(dev); 1342 return NULL; 1343 } 1344 1345 static struct cxl_port *find_cxl_port(struct device *dport_dev, 1346 struct cxl_dport **dport) 1347 { 1348 struct cxl_find_port_ctx ctx = { 1349 .dport_dev = dport_dev, 1350 .dport = dport, 1351 }; 1352 struct cxl_port *port; 1353 1354 port = __find_cxl_port(&ctx); 1355 return port; 1356 } 1357 1358 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port, 1359 struct device *dport_dev, 1360 struct cxl_dport **dport) 1361 { 1362 struct cxl_find_port_ctx ctx = { 1363 .dport_dev = dport_dev, 1364 .parent_port = parent_port, 1365 .dport = dport, 1366 }; 1367 struct cxl_port *port; 1368 1369 port = __find_cxl_port(&ctx); 1370 return port; 1371 } 1372 1373 /* 1374 * All users of grandparent() are using it to walk PCIe-like switch port 1375 * hierarchy. A PCIe switch is comprised of a bridge device representing the 1376 * upstream switch port and N bridges representing downstream switch ports. When 1377 * bridges stack the grand-parent of a downstream switch port is another 1378 * downstream switch port in the immediate ancestor switch. 1379 */ 1380 static struct device *grandparent(struct device *dev) 1381 { 1382 if (dev && dev->parent) 1383 return dev->parent->parent; 1384 return NULL; 1385 } 1386 1387 static struct device *endpoint_host(struct cxl_port *endpoint) 1388 { 1389 struct cxl_port *port = to_cxl_port(endpoint->dev.parent); 1390 1391 if (is_cxl_root(port)) 1392 return port->uport_dev; 1393 return &port->dev; 1394 } 1395 1396 static void delete_endpoint(void *data) 1397 { 1398 struct cxl_memdev *cxlmd = data; 1399 struct cxl_port *endpoint = cxlmd->endpoint; 1400 struct device *host = endpoint_host(endpoint); 1401 1402 scoped_guard(device, host) { 1403 if (host->driver && !endpoint->dead) { 1404 devm_release_action(host, cxl_unlink_parent_dport, endpoint); 1405 devm_release_action(host, cxl_unlink_uport, endpoint); 1406 devm_release_action(host, unregister_port, endpoint); 1407 } 1408 cxlmd->endpoint = NULL; 1409 } 1410 put_device(&endpoint->dev); 1411 put_device(host); 1412 } 1413 1414 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) 1415 { 1416 struct device *host = endpoint_host(endpoint); 1417 struct device *dev = &cxlmd->dev; 1418 1419 get_device(host); 1420 get_device(&endpoint->dev); 1421 cxlmd->depth = endpoint->depth; 1422 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); 1423 } 1424 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL); 1425 1426 /* 1427 * The natural end of life of a non-root 'cxl_port' is when its parent port goes 1428 * through a ->remove() event ("top-down" unregistration). The unnatural trigger 1429 * for a port to be unregistered is when all memdevs beneath that port have gone 1430 * through ->remove(). This "bottom-up" removal selectively removes individual 1431 * child ports manually. This depends on devm_cxl_add_port() to not change is 1432 * devm action registration order, and for dports to have already been 1433 * destroyed by reap_dports(). 1434 */ 1435 static void delete_switch_port(struct cxl_port *port) 1436 { 1437 devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port); 1438 devm_release_action(port->dev.parent, cxl_unlink_uport, port); 1439 devm_release_action(port->dev.parent, unregister_port, port); 1440 } 1441 1442 static void reap_dports(struct cxl_port *port) 1443 { 1444 struct cxl_dport *dport; 1445 unsigned long index; 1446 1447 device_lock_assert(&port->dev); 1448 1449 xa_for_each(&port->dports, index, dport) { 1450 devm_release_action(&port->dev, cxl_dport_unlink, dport); 1451 devm_release_action(&port->dev, cxl_dport_remove, dport); 1452 devm_kfree(&port->dev, dport); 1453 } 1454 } 1455 1456 struct detach_ctx { 1457 struct cxl_memdev *cxlmd; 1458 int depth; 1459 }; 1460 1461 static int port_has_memdev(struct device *dev, const void *data) 1462 { 1463 const struct detach_ctx *ctx = data; 1464 struct cxl_port *port; 1465 1466 if (!is_cxl_port(dev)) 1467 return 0; 1468 1469 port = to_cxl_port(dev); 1470 if (port->depth != ctx->depth) 1471 return 0; 1472 1473 return !!cxl_ep_load(port, ctx->cxlmd); 1474 } 1475 1476 static void cxl_detach_ep(void *data) 1477 { 1478 struct cxl_memdev *cxlmd = data; 1479 1480 for (int i = cxlmd->depth - 1; i >= 1; i--) { 1481 struct cxl_port *port, *parent_port; 1482 struct detach_ctx ctx = { 1483 .cxlmd = cxlmd, 1484 .depth = i, 1485 }; 1486 struct cxl_ep *ep; 1487 bool died = false; 1488 1489 struct device *dev __free(put_device) = 1490 bus_find_device(&cxl_bus_type, NULL, &ctx, port_has_memdev); 1491 if (!dev) 1492 continue; 1493 port = to_cxl_port(dev); 1494 1495 parent_port = to_cxl_port(port->dev.parent); 1496 device_lock(&parent_port->dev); 1497 device_lock(&port->dev); 1498 ep = cxl_ep_load(port, cxlmd); 1499 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", 1500 ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); 1501 cxl_ep_remove(port, ep); 1502 if (ep && !port->dead && xa_empty(&port->endpoints) && 1503 !is_cxl_root(parent_port) && parent_port->dev.driver) { 1504 /* 1505 * This was the last ep attached to a dynamically 1506 * enumerated port. Block new cxl_add_ep() and garbage 1507 * collect the port. 1508 */ 1509 died = true; 1510 port->dead = true; 1511 reap_dports(port); 1512 } 1513 device_unlock(&port->dev); 1514 1515 if (died) { 1516 dev_dbg(&cxlmd->dev, "delete %s\n", 1517 dev_name(&port->dev)); 1518 delete_switch_port(port); 1519 } 1520 device_unlock(&parent_port->dev); 1521 } 1522 } 1523 1524 static resource_size_t find_component_registers(struct device *dev) 1525 { 1526 struct cxl_register_map map; 1527 struct pci_dev *pdev; 1528 1529 /* 1530 * Theoretically, CXL component registers can be hosted on a 1531 * non-PCI device, in practice, only cxl_test hits this case. 1532 */ 1533 if (!dev_is_pci(dev)) 1534 return CXL_RESOURCE_NONE; 1535 1536 pdev = to_pci_dev(dev); 1537 1538 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 1539 return map.resource; 1540 } 1541 1542 static int add_port_attach_ep(struct cxl_memdev *cxlmd, 1543 struct device *uport_dev, 1544 struct device *dport_dev) 1545 { 1546 struct device *dparent = grandparent(dport_dev); 1547 struct cxl_dport *dport, *parent_dport; 1548 resource_size_t component_reg_phys; 1549 int rc; 1550 1551 if (!dparent) { 1552 /* 1553 * The iteration reached the topology root without finding the 1554 * CXL-root 'cxl_port' on a previous iteration, fail for now to 1555 * be re-probed after platform driver attaches. 1556 */ 1557 dev_dbg(&cxlmd->dev, "%s is a root dport\n", 1558 dev_name(dport_dev)); 1559 return -ENXIO; 1560 } 1561 1562 struct cxl_port *parent_port __free(put_cxl_port) = 1563 find_cxl_port(dparent, &parent_dport); 1564 if (!parent_port) { 1565 /* iterate to create this parent_port */ 1566 return -EAGAIN; 1567 } 1568 1569 /* 1570 * Definition with __free() here to keep the sequence of 1571 * dereferencing the device of the port before the parent_port releasing. 1572 */ 1573 struct cxl_port *port __free(put_cxl_port) = NULL; 1574 scoped_guard(device, &parent_port->dev) { 1575 if (!parent_port->dev.driver) { 1576 dev_warn(&cxlmd->dev, 1577 "port %s:%s disabled, failed to enumerate CXL.mem\n", 1578 dev_name(&parent_port->dev), dev_name(uport_dev)); 1579 return -ENXIO; 1580 } 1581 1582 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1583 if (!port) { 1584 component_reg_phys = find_component_registers(uport_dev); 1585 port = devm_cxl_add_port(&parent_port->dev, uport_dev, 1586 component_reg_phys, parent_dport); 1587 if (IS_ERR(port)) 1588 return PTR_ERR(port); 1589 1590 /* retry find to pick up the new dport information */ 1591 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1592 if (!port) 1593 return -ENXIO; 1594 } 1595 } 1596 1597 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", 1598 dev_name(&port->dev), dev_name(port->uport_dev)); 1599 rc = cxl_add_ep(dport, &cxlmd->dev); 1600 if (rc == -EBUSY) { 1601 /* 1602 * "can't" happen, but this error code means 1603 * something to the caller, so translate it. 1604 */ 1605 rc = -ENXIO; 1606 } 1607 1608 return rc; 1609 } 1610 1611 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) 1612 { 1613 struct device *dev = &cxlmd->dev; 1614 struct device *iter; 1615 int rc; 1616 1617 /* 1618 * Skip intermediate port enumeration in the RCH case, there 1619 * are no ports in between a host bridge and an endpoint. 1620 */ 1621 if (cxlmd->cxlds->rcd) 1622 return 0; 1623 1624 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd); 1625 if (rc) 1626 return rc; 1627 1628 /* 1629 * Scan for and add all cxl_ports in this device's ancestry. 1630 * Repeat until no more ports are added. Abort if a port add 1631 * attempt fails. 1632 */ 1633 retry: 1634 for (iter = dev; iter; iter = grandparent(iter)) { 1635 struct device *dport_dev = grandparent(iter); 1636 struct device *uport_dev; 1637 struct cxl_dport *dport; 1638 1639 /* 1640 * The terminal "grandparent" in PCI is NULL and @platform_bus 1641 * for platform devices 1642 */ 1643 if (!dport_dev || dport_dev == &platform_bus) 1644 return 0; 1645 1646 uport_dev = dport_dev->parent; 1647 if (!uport_dev) { 1648 dev_warn(dev, "at %s no parent for dport: %s\n", 1649 dev_name(iter), dev_name(dport_dev)); 1650 return -ENXIO; 1651 } 1652 1653 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", 1654 dev_name(iter), dev_name(dport_dev), 1655 dev_name(uport_dev)); 1656 struct cxl_port *port __free(put_cxl_port) = 1657 find_cxl_port(dport_dev, &dport); 1658 if (port) { 1659 dev_dbg(&cxlmd->dev, 1660 "found already registered port %s:%s\n", 1661 dev_name(&port->dev), 1662 dev_name(port->uport_dev)); 1663 rc = cxl_add_ep(dport, &cxlmd->dev); 1664 1665 /* 1666 * If the endpoint already exists in the port's list, 1667 * that's ok, it was added on a previous pass. 1668 * Otherwise, retry in add_port_attach_ep() after taking 1669 * the parent_port lock as the current port may be being 1670 * reaped. 1671 */ 1672 if (rc && rc != -EBUSY) 1673 return rc; 1674 1675 /* Any more ports to add between this one and the root? */ 1676 if (!dev_is_cxl_root_child(&port->dev)) 1677 continue; 1678 1679 return 0; 1680 } 1681 1682 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev); 1683 /* port missing, try to add parent */ 1684 if (rc == -EAGAIN) 1685 continue; 1686 /* failed to add ep or port */ 1687 if (rc) 1688 return rc; 1689 /* port added, new descendants possible, start over */ 1690 goto retry; 1691 } 1692 1693 return 0; 1694 } 1695 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL); 1696 1697 struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev, 1698 struct cxl_dport **dport) 1699 { 1700 return find_cxl_port(pdev->dev.parent, dport); 1701 } 1702 EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL); 1703 1704 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, 1705 struct cxl_dport **dport) 1706 { 1707 return find_cxl_port(grandparent(&cxlmd->dev), dport); 1708 } 1709 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL); 1710 1711 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, 1712 struct cxl_port *port, int *target_map) 1713 { 1714 int i; 1715 1716 if (!target_map) 1717 return 0; 1718 1719 device_lock_assert(&port->dev); 1720 1721 if (xa_empty(&port->dports)) 1722 return -EINVAL; 1723 1724 guard(rwsem_write)(&cxl_region_rwsem); 1725 for (i = 0; i < cxlsd->cxld.interleave_ways; i++) { 1726 struct cxl_dport *dport = find_dport(port, target_map[i]); 1727 1728 if (!dport) 1729 return -ENXIO; 1730 cxlsd->target[i] = dport; 1731 } 1732 1733 return 0; 1734 } 1735 1736 static struct lock_class_key cxl_decoder_key; 1737 1738 /** 1739 * cxl_decoder_init - Common decoder setup / initialization 1740 * @port: owning port of this decoder 1741 * @cxld: common decoder properties to initialize 1742 * 1743 * A port may contain one or more decoders. Each of those decoders 1744 * enable some address space for CXL.mem utilization. A decoder is 1745 * expected to be configured by the caller before registering via 1746 * cxl_decoder_add() 1747 */ 1748 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld) 1749 { 1750 struct device *dev; 1751 int rc; 1752 1753 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); 1754 if (rc < 0) 1755 return rc; 1756 1757 /* need parent to stick around to release the id */ 1758 get_device(&port->dev); 1759 cxld->id = rc; 1760 1761 dev = &cxld->dev; 1762 device_initialize(dev); 1763 lockdep_set_class(&dev->mutex, &cxl_decoder_key); 1764 device_set_pm_not_required(dev); 1765 dev->parent = &port->dev; 1766 dev->bus = &cxl_bus_type; 1767 1768 /* Pre initialize an "empty" decoder */ 1769 cxld->interleave_ways = 1; 1770 cxld->interleave_granularity = PAGE_SIZE; 1771 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 1772 cxld->hpa_range = (struct range) { 1773 .start = 0, 1774 .end = -1, 1775 }; 1776 1777 return 0; 1778 } 1779 1780 static int cxl_switch_decoder_init(struct cxl_port *port, 1781 struct cxl_switch_decoder *cxlsd, 1782 int nr_targets) 1783 { 1784 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) 1785 return -EINVAL; 1786 1787 cxlsd->nr_targets = nr_targets; 1788 return cxl_decoder_init(port, &cxlsd->cxld); 1789 } 1790 1791 /** 1792 * cxl_root_decoder_alloc - Allocate a root level decoder 1793 * @port: owning CXL root of this decoder 1794 * @nr_targets: static number of downstream targets 1795 * 1796 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1797 * 'CXL root' decoder is one that decodes from a top-level / static platform 1798 * firmware description of CXL resources into a CXL standard decode 1799 * topology. 1800 */ 1801 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, 1802 unsigned int nr_targets) 1803 { 1804 struct cxl_root_decoder *cxlrd; 1805 struct cxl_switch_decoder *cxlsd; 1806 struct cxl_decoder *cxld; 1807 int rc; 1808 1809 if (!is_cxl_root(port)) 1810 return ERR_PTR(-EINVAL); 1811 1812 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets), 1813 GFP_KERNEL); 1814 if (!cxlrd) 1815 return ERR_PTR(-ENOMEM); 1816 1817 cxlsd = &cxlrd->cxlsd; 1818 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1819 if (rc) { 1820 kfree(cxlrd); 1821 return ERR_PTR(rc); 1822 } 1823 1824 mutex_init(&cxlrd->range_lock); 1825 1826 cxld = &cxlsd->cxld; 1827 cxld->dev.type = &cxl_decoder_root_type; 1828 /* 1829 * cxl_root_decoder_release() special cases negative ids to 1830 * detect memregion_alloc() failures. 1831 */ 1832 atomic_set(&cxlrd->region_id, -1); 1833 rc = memregion_alloc(GFP_KERNEL); 1834 if (rc < 0) { 1835 put_device(&cxld->dev); 1836 return ERR_PTR(rc); 1837 } 1838 1839 atomic_set(&cxlrd->region_id, rc); 1840 cxlrd->qos_class = CXL_QOS_CLASS_INVALID; 1841 return cxlrd; 1842 } 1843 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); 1844 1845 /** 1846 * cxl_switch_decoder_alloc - Allocate a switch level decoder 1847 * @port: owning CXL switch port of this decoder 1848 * @nr_targets: max number of dynamically addressable downstream targets 1849 * 1850 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1851 * 'switch' decoder is any decoder that can be enumerated by PCIe 1852 * topology and the HDM Decoder Capability. This includes the decoders 1853 * that sit between Switch Upstream Ports / Switch Downstream Ports and 1854 * Host Bridges / Root Ports. 1855 */ 1856 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, 1857 unsigned int nr_targets) 1858 { 1859 struct cxl_switch_decoder *cxlsd; 1860 struct cxl_decoder *cxld; 1861 int rc; 1862 1863 if (is_cxl_root(port) || is_cxl_endpoint(port)) 1864 return ERR_PTR(-EINVAL); 1865 1866 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL); 1867 if (!cxlsd) 1868 return ERR_PTR(-ENOMEM); 1869 1870 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1871 if (rc) { 1872 kfree(cxlsd); 1873 return ERR_PTR(rc); 1874 } 1875 1876 cxld = &cxlsd->cxld; 1877 cxld->dev.type = &cxl_decoder_switch_type; 1878 return cxlsd; 1879 } 1880 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); 1881 1882 /** 1883 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder 1884 * @port: owning port of this decoder 1885 * 1886 * Return: A new cxl decoder to be registered by cxl_decoder_add() 1887 */ 1888 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) 1889 { 1890 struct cxl_endpoint_decoder *cxled; 1891 struct cxl_decoder *cxld; 1892 int rc; 1893 1894 if (!is_cxl_endpoint(port)) 1895 return ERR_PTR(-EINVAL); 1896 1897 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL); 1898 if (!cxled) 1899 return ERR_PTR(-ENOMEM); 1900 1901 cxled->pos = -1; 1902 cxld = &cxled->cxld; 1903 rc = cxl_decoder_init(port, cxld); 1904 if (rc) { 1905 kfree(cxled); 1906 return ERR_PTR(rc); 1907 } 1908 1909 cxld->dev.type = &cxl_decoder_endpoint_type; 1910 return cxled; 1911 } 1912 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); 1913 1914 /** 1915 * cxl_decoder_add_locked - Add a decoder with targets 1916 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1917 * @target_map: A list of downstream ports that this decoder can direct memory 1918 * traffic to. These numbers should correspond with the port number 1919 * in the PCIe Link Capabilities structure. 1920 * 1921 * Certain types of decoders may not have any targets. The main example of this 1922 * is an endpoint device. A more awkward example is a hostbridge whose root 1923 * ports get hot added (technically possible, though unlikely). 1924 * 1925 * This is the locked variant of cxl_decoder_add(). 1926 * 1927 * Context: Process context. Expects the device lock of the port that owns the 1928 * @cxld to be held. 1929 * 1930 * Return: Negative error code if the decoder wasn't properly configured; else 1931 * returns 0. 1932 */ 1933 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) 1934 { 1935 struct cxl_port *port; 1936 struct device *dev; 1937 int rc; 1938 1939 if (WARN_ON_ONCE(!cxld)) 1940 return -EINVAL; 1941 1942 if (WARN_ON_ONCE(IS_ERR(cxld))) 1943 return PTR_ERR(cxld); 1944 1945 if (cxld->interleave_ways < 1) 1946 return -EINVAL; 1947 1948 dev = &cxld->dev; 1949 1950 port = to_cxl_port(cxld->dev.parent); 1951 if (!is_endpoint_decoder(dev)) { 1952 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 1953 1954 rc = decoder_populate_targets(cxlsd, port, target_map); 1955 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { 1956 dev_err(&port->dev, 1957 "Failed to populate active decoder targets\n"); 1958 return rc; 1959 } 1960 } 1961 1962 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); 1963 if (rc) 1964 return rc; 1965 1966 return device_add(dev); 1967 } 1968 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); 1969 1970 /** 1971 * cxl_decoder_add - Add a decoder with targets 1972 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1973 * @target_map: A list of downstream ports that this decoder can direct memory 1974 * traffic to. These numbers should correspond with the port number 1975 * in the PCIe Link Capabilities structure. 1976 * 1977 * This is the unlocked variant of cxl_decoder_add_locked(). 1978 * See cxl_decoder_add_locked(). 1979 * 1980 * Context: Process context. Takes and releases the device lock of the port that 1981 * owns the @cxld. 1982 */ 1983 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) 1984 { 1985 struct cxl_port *port; 1986 1987 if (WARN_ON_ONCE(!cxld)) 1988 return -EINVAL; 1989 1990 if (WARN_ON_ONCE(IS_ERR(cxld))) 1991 return PTR_ERR(cxld); 1992 1993 port = to_cxl_port(cxld->dev.parent); 1994 1995 guard(device)(&port->dev); 1996 return cxl_decoder_add_locked(cxld, target_map); 1997 } 1998 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); 1999 2000 static void cxld_unregister(void *dev) 2001 { 2002 struct cxl_endpoint_decoder *cxled; 2003 2004 if (is_endpoint_decoder(dev)) { 2005 cxled = to_cxl_endpoint_decoder(dev); 2006 cxl_decoder_kill_region(cxled); 2007 } 2008 2009 device_unregister(dev); 2010 } 2011 2012 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) 2013 { 2014 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); 2015 } 2016 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL); 2017 2018 /** 2019 * __cxl_driver_register - register a driver for the cxl bus 2020 * @cxl_drv: cxl driver structure to attach 2021 * @owner: owning module/driver 2022 * @modname: KBUILD_MODNAME for parent driver 2023 */ 2024 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, 2025 const char *modname) 2026 { 2027 if (!cxl_drv->probe) { 2028 pr_debug("%s ->probe() must be specified\n", modname); 2029 return -EINVAL; 2030 } 2031 2032 if (!cxl_drv->name) { 2033 pr_debug("%s ->name must be specified\n", modname); 2034 return -EINVAL; 2035 } 2036 2037 if (!cxl_drv->id) { 2038 pr_debug("%s ->id must be specified\n", modname); 2039 return -EINVAL; 2040 } 2041 2042 cxl_drv->drv.bus = &cxl_bus_type; 2043 cxl_drv->drv.owner = owner; 2044 cxl_drv->drv.mod_name = modname; 2045 cxl_drv->drv.name = cxl_drv->name; 2046 2047 return driver_register(&cxl_drv->drv); 2048 } 2049 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL); 2050 2051 void cxl_driver_unregister(struct cxl_driver *cxl_drv) 2052 { 2053 driver_unregister(&cxl_drv->drv); 2054 } 2055 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL); 2056 2057 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 2058 { 2059 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT, 2060 cxl_device_id(dev)); 2061 } 2062 2063 static int cxl_bus_match(struct device *dev, const struct device_driver *drv) 2064 { 2065 return cxl_device_id(dev) == to_cxl_drv(drv)->id; 2066 } 2067 2068 static int cxl_bus_probe(struct device *dev) 2069 { 2070 int rc; 2071 2072 rc = to_cxl_drv(dev->driver)->probe(dev); 2073 dev_dbg(dev, "probe: %d\n", rc); 2074 return rc; 2075 } 2076 2077 static void cxl_bus_remove(struct device *dev) 2078 { 2079 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); 2080 2081 if (cxl_drv->remove) 2082 cxl_drv->remove(dev); 2083 } 2084 2085 static struct workqueue_struct *cxl_bus_wq; 2086 2087 static void cxl_bus_rescan_queue(struct work_struct *w) 2088 { 2089 int rc = bus_rescan_devices(&cxl_bus_type); 2090 2091 pr_debug("CXL bus rescan result: %d\n", rc); 2092 } 2093 2094 void cxl_bus_rescan(void) 2095 { 2096 static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue); 2097 2098 queue_work(cxl_bus_wq, &rescan_work); 2099 } 2100 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL); 2101 2102 void cxl_bus_drain(void) 2103 { 2104 drain_workqueue(cxl_bus_wq); 2105 } 2106 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL); 2107 2108 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) 2109 { 2110 return queue_work(cxl_bus_wq, &cxlmd->detach_work); 2111 } 2112 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL); 2113 2114 static void add_latency(struct access_coordinate *c, long latency) 2115 { 2116 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { 2117 c[i].write_latency += latency; 2118 c[i].read_latency += latency; 2119 } 2120 } 2121 2122 static bool coordinates_valid(struct access_coordinate *c) 2123 { 2124 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { 2125 if (c[i].read_bandwidth && c[i].write_bandwidth && 2126 c[i].read_latency && c[i].write_latency) 2127 continue; 2128 return false; 2129 } 2130 2131 return true; 2132 } 2133 2134 static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw) 2135 { 2136 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { 2137 c[i].write_bandwidth = min(c[i].write_bandwidth, bw); 2138 c[i].read_bandwidth = min(c[i].read_bandwidth, bw); 2139 } 2140 } 2141 2142 static void set_access_coordinates(struct access_coordinate *out, 2143 struct access_coordinate *in) 2144 { 2145 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) 2146 out[i] = in[i]; 2147 } 2148 2149 static bool parent_port_is_cxl_root(struct cxl_port *port) 2150 { 2151 return is_cxl_root(to_cxl_port(port->dev.parent)); 2152 } 2153 2154 /** 2155 * cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports 2156 * of CXL path 2157 * @port: endpoint cxl_port 2158 * @coord: output performance data 2159 * 2160 * Return: errno on failure, 0 on success. 2161 */ 2162 int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, 2163 struct access_coordinate *coord) 2164 { 2165 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); 2166 struct access_coordinate c[] = { 2167 { 2168 .read_bandwidth = UINT_MAX, 2169 .write_bandwidth = UINT_MAX, 2170 }, 2171 { 2172 .read_bandwidth = UINT_MAX, 2173 .write_bandwidth = UINT_MAX, 2174 }, 2175 }; 2176 struct cxl_port *iter = port; 2177 struct cxl_dport *dport; 2178 struct pci_dev *pdev; 2179 struct device *dev; 2180 unsigned int bw; 2181 bool is_cxl_root; 2182 2183 if (!is_cxl_endpoint(port)) 2184 return -EINVAL; 2185 2186 /* 2187 * Skip calculation for RCD. Expectation is HMAT already covers RCD case 2188 * since RCH does not support hotplug. 2189 */ 2190 if (cxlmd->cxlds->rcd) 2191 return 0; 2192 2193 /* 2194 * Exit the loop when the parent port of the current iter port is cxl 2195 * root. The iterative loop starts at the endpoint and gathers the 2196 * latency of the CXL link from the current device/port to the connected 2197 * downstream port each iteration. 2198 */ 2199 do { 2200 dport = iter->parent_dport; 2201 iter = to_cxl_port(iter->dev.parent); 2202 is_cxl_root = parent_port_is_cxl_root(iter); 2203 2204 /* 2205 * There's no valid access_coordinate for a root port since RPs do not 2206 * have CDAT and therefore needs to be skipped. 2207 */ 2208 if (!is_cxl_root) { 2209 if (!coordinates_valid(dport->coord)) 2210 return -EINVAL; 2211 cxl_coordinates_combine(c, c, dport->coord); 2212 } 2213 add_latency(c, dport->link_latency); 2214 } while (!is_cxl_root); 2215 2216 dport = iter->parent_dport; 2217 /* Retrieve HB coords */ 2218 if (!coordinates_valid(dport->coord)) 2219 return -EINVAL; 2220 cxl_coordinates_combine(c, c, dport->coord); 2221 2222 dev = port->uport_dev->parent; 2223 if (!dev_is_pci(dev)) 2224 return -ENODEV; 2225 2226 /* Get the calculated PCI paths bandwidth */ 2227 pdev = to_pci_dev(dev); 2228 bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL); 2229 if (bw == 0) 2230 return -ENXIO; 2231 bw /= BITS_PER_BYTE; 2232 2233 set_min_bandwidth(c, bw); 2234 set_access_coordinates(coord, c); 2235 2236 return 0; 2237 } 2238 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL); 2239 2240 int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, 2241 struct access_coordinate *c) 2242 { 2243 struct cxl_dport *dport = port->parent_dport; 2244 2245 /* Check this port is connected to a switch DSP and not an RP */ 2246 if (parent_port_is_cxl_root(to_cxl_port(port->dev.parent))) 2247 return -ENODEV; 2248 2249 if (!coordinates_valid(dport->coord)) 2250 return -EINVAL; 2251 2252 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { 2253 c[i].read_bandwidth = dport->coord[i].read_bandwidth; 2254 c[i].write_bandwidth = dport->coord[i].write_bandwidth; 2255 } 2256 2257 return 0; 2258 } 2259 2260 /* for user tooling to ensure port disable work has completed */ 2261 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count) 2262 { 2263 if (sysfs_streq(buf, "1")) { 2264 flush_workqueue(cxl_bus_wq); 2265 return count; 2266 } 2267 2268 return -EINVAL; 2269 } 2270 2271 static BUS_ATTR_WO(flush); 2272 2273 static struct attribute *cxl_bus_attributes[] = { 2274 &bus_attr_flush.attr, 2275 NULL, 2276 }; 2277 2278 static struct attribute_group cxl_bus_attribute_group = { 2279 .attrs = cxl_bus_attributes, 2280 }; 2281 2282 static const struct attribute_group *cxl_bus_attribute_groups[] = { 2283 &cxl_bus_attribute_group, 2284 NULL, 2285 }; 2286 2287 struct bus_type cxl_bus_type = { 2288 .name = "cxl", 2289 .uevent = cxl_bus_uevent, 2290 .match = cxl_bus_match, 2291 .probe = cxl_bus_probe, 2292 .remove = cxl_bus_remove, 2293 .bus_groups = cxl_bus_attribute_groups, 2294 }; 2295 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL); 2296 2297 static struct dentry *cxl_debugfs; 2298 2299 struct dentry *cxl_debugfs_create_dir(const char *dir) 2300 { 2301 return debugfs_create_dir(dir, cxl_debugfs); 2302 } 2303 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL); 2304 2305 static __init int cxl_core_init(void) 2306 { 2307 int rc; 2308 2309 cxl_debugfs = debugfs_create_dir("cxl", NULL); 2310 2311 if (einj_cxl_is_initialized()) 2312 debugfs_create_file("einj_types", 0400, cxl_debugfs, NULL, 2313 &einj_cxl_available_error_type_fops); 2314 2315 cxl_mbox_init(); 2316 2317 rc = cxl_memdev_init(); 2318 if (rc) 2319 return rc; 2320 2321 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0); 2322 if (!cxl_bus_wq) { 2323 rc = -ENOMEM; 2324 goto err_wq; 2325 } 2326 2327 rc = bus_register(&cxl_bus_type); 2328 if (rc) 2329 goto err_bus; 2330 2331 rc = cxl_region_init(); 2332 if (rc) 2333 goto err_region; 2334 2335 return 0; 2336 2337 err_region: 2338 bus_unregister(&cxl_bus_type); 2339 err_bus: 2340 destroy_workqueue(cxl_bus_wq); 2341 err_wq: 2342 cxl_memdev_exit(); 2343 return rc; 2344 } 2345 2346 static void cxl_core_exit(void) 2347 { 2348 cxl_region_exit(); 2349 bus_unregister(&cxl_bus_type); 2350 destroy_workqueue(cxl_bus_wq); 2351 cxl_memdev_exit(); 2352 debugfs_remove_recursive(cxl_debugfs); 2353 } 2354 2355 subsys_initcall(cxl_core_init); 2356 module_exit(cxl_core_exit); 2357 MODULE_DESCRIPTION("CXL: Core Compute Express Link support"); 2358 MODULE_LICENSE("GPL v2"); 2359 MODULE_IMPORT_NS(CXL); 2360