1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/platform_device.h> 4 #include <linux/memregion.h> 5 #include <linux/workqueue.h> 6 #include <linux/debugfs.h> 7 #include <linux/device.h> 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/slab.h> 11 #include <linux/idr.h> 12 #include <cxlmem.h> 13 #include <cxlpci.h> 14 #include <cxl.h> 15 #include "core.h" 16 17 /** 18 * DOC: cxl core 19 * 20 * The CXL core provides a set of interfaces that can be consumed by CXL aware 21 * drivers. The interfaces allow for creation, modification, and destruction of 22 * regions, memory devices, ports, and decoders. CXL aware drivers must register 23 * with the CXL core via these interfaces in order to be able to participate in 24 * cross-device interleave coordination. The CXL core also establishes and 25 * maintains the bridge to the nvdimm subsystem. 26 * 27 * CXL core introduces sysfs hierarchy to control the devices that are 28 * instantiated by the core. 29 */ 30 31 /* 32 * All changes to the interleave configuration occur with this lock held 33 * for write. 34 */ 35 DECLARE_RWSEM(cxl_region_rwsem); 36 37 static DEFINE_IDA(cxl_port_ida); 38 static DEFINE_XARRAY(cxl_root_buses); 39 40 int cxl_num_decoders_committed(struct cxl_port *port) 41 { 42 lockdep_assert_held(&cxl_region_rwsem); 43 44 return port->commit_end + 1; 45 } 46 47 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, 48 char *buf) 49 { 50 return sysfs_emit(buf, "%s\n", dev->type->name); 51 } 52 static DEVICE_ATTR_RO(devtype); 53 54 static int cxl_device_id(const struct device *dev) 55 { 56 if (dev->type == &cxl_nvdimm_bridge_type) 57 return CXL_DEVICE_NVDIMM_BRIDGE; 58 if (dev->type == &cxl_nvdimm_type) 59 return CXL_DEVICE_NVDIMM; 60 if (dev->type == CXL_PMEM_REGION_TYPE()) 61 return CXL_DEVICE_PMEM_REGION; 62 if (dev->type == CXL_DAX_REGION_TYPE()) 63 return CXL_DEVICE_DAX_REGION; 64 if (is_cxl_port(dev)) { 65 if (is_cxl_root(to_cxl_port(dev))) 66 return CXL_DEVICE_ROOT; 67 return CXL_DEVICE_PORT; 68 } 69 if (is_cxl_memdev(dev)) 70 return CXL_DEVICE_MEMORY_EXPANDER; 71 if (dev->type == CXL_REGION_TYPE()) 72 return CXL_DEVICE_REGION; 73 if (dev->type == &cxl_pmu_type) 74 return CXL_DEVICE_PMU; 75 return 0; 76 } 77 78 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 79 char *buf) 80 { 81 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev)); 82 } 83 static DEVICE_ATTR_RO(modalias); 84 85 static struct attribute *cxl_base_attributes[] = { 86 &dev_attr_devtype.attr, 87 &dev_attr_modalias.attr, 88 NULL, 89 }; 90 91 struct attribute_group cxl_base_attribute_group = { 92 .attrs = cxl_base_attributes, 93 }; 94 95 static ssize_t start_show(struct device *dev, struct device_attribute *attr, 96 char *buf) 97 { 98 struct cxl_decoder *cxld = to_cxl_decoder(dev); 99 100 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start); 101 } 102 static DEVICE_ATTR_ADMIN_RO(start); 103 104 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 105 char *buf) 106 { 107 struct cxl_decoder *cxld = to_cxl_decoder(dev); 108 109 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range)); 110 } 111 static DEVICE_ATTR_RO(size); 112 113 #define CXL_DECODER_FLAG_ATTR(name, flag) \ 114 static ssize_t name##_show(struct device *dev, \ 115 struct device_attribute *attr, char *buf) \ 116 { \ 117 struct cxl_decoder *cxld = to_cxl_decoder(dev); \ 118 \ 119 return sysfs_emit(buf, "%s\n", \ 120 (cxld->flags & (flag)) ? "1" : "0"); \ 121 } \ 122 static DEVICE_ATTR_RO(name) 123 124 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM); 125 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM); 126 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2); 127 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3); 128 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK); 129 130 static ssize_t target_type_show(struct device *dev, 131 struct device_attribute *attr, char *buf) 132 { 133 struct cxl_decoder *cxld = to_cxl_decoder(dev); 134 135 switch (cxld->target_type) { 136 case CXL_DECODER_DEVMEM: 137 return sysfs_emit(buf, "accelerator\n"); 138 case CXL_DECODER_HOSTONLYMEM: 139 return sysfs_emit(buf, "expander\n"); 140 } 141 return -ENXIO; 142 } 143 static DEVICE_ATTR_RO(target_type); 144 145 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf) 146 { 147 struct cxl_decoder *cxld = &cxlsd->cxld; 148 ssize_t offset = 0; 149 int i, rc = 0; 150 151 for (i = 0; i < cxld->interleave_ways; i++) { 152 struct cxl_dport *dport = cxlsd->target[i]; 153 struct cxl_dport *next = NULL; 154 155 if (!dport) 156 break; 157 158 if (i + 1 < cxld->interleave_ways) 159 next = cxlsd->target[i + 1]; 160 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, 161 next ? "," : ""); 162 if (rc < 0) 163 return rc; 164 offset += rc; 165 } 166 167 return offset; 168 } 169 170 static ssize_t target_list_show(struct device *dev, 171 struct device_attribute *attr, char *buf) 172 { 173 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 174 ssize_t offset; 175 unsigned int seq; 176 int rc; 177 178 do { 179 seq = read_seqbegin(&cxlsd->target_lock); 180 rc = emit_target_list(cxlsd, buf); 181 } while (read_seqretry(&cxlsd->target_lock, seq)); 182 183 if (rc < 0) 184 return rc; 185 offset = rc; 186 187 rc = sysfs_emit_at(buf, offset, "\n"); 188 if (rc < 0) 189 return rc; 190 191 return offset + rc; 192 } 193 static DEVICE_ATTR_RO(target_list); 194 195 static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 196 char *buf) 197 { 198 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 199 200 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode)); 201 } 202 203 static ssize_t mode_store(struct device *dev, struct device_attribute *attr, 204 const char *buf, size_t len) 205 { 206 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 207 enum cxl_decoder_mode mode; 208 ssize_t rc; 209 210 if (sysfs_streq(buf, "pmem")) 211 mode = CXL_DECODER_PMEM; 212 else if (sysfs_streq(buf, "ram")) 213 mode = CXL_DECODER_RAM; 214 else 215 return -EINVAL; 216 217 rc = cxl_dpa_set_mode(cxled, mode); 218 if (rc) 219 return rc; 220 221 return len; 222 } 223 static DEVICE_ATTR_RW(mode); 224 225 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr, 226 char *buf) 227 { 228 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 229 230 guard(rwsem_read)(&cxl_dpa_rwsem); 231 return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled)); 232 } 233 static DEVICE_ATTR_RO(dpa_resource); 234 235 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr, 236 char *buf) 237 { 238 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 239 resource_size_t size = cxl_dpa_size(cxled); 240 241 return sysfs_emit(buf, "%pa\n", &size); 242 } 243 244 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr, 245 const char *buf, size_t len) 246 { 247 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 248 unsigned long long size; 249 ssize_t rc; 250 251 rc = kstrtoull(buf, 0, &size); 252 if (rc) 253 return rc; 254 255 if (!IS_ALIGNED(size, SZ_256M)) 256 return -EINVAL; 257 258 rc = cxl_dpa_free(cxled); 259 if (rc) 260 return rc; 261 262 if (size == 0) 263 return len; 264 265 rc = cxl_dpa_alloc(cxled, size); 266 if (rc) 267 return rc; 268 269 return len; 270 } 271 static DEVICE_ATTR_RW(dpa_size); 272 273 static ssize_t interleave_granularity_show(struct device *dev, 274 struct device_attribute *attr, 275 char *buf) 276 { 277 struct cxl_decoder *cxld = to_cxl_decoder(dev); 278 279 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity); 280 } 281 282 static DEVICE_ATTR_RO(interleave_granularity); 283 284 static ssize_t interleave_ways_show(struct device *dev, 285 struct device_attribute *attr, char *buf) 286 { 287 struct cxl_decoder *cxld = to_cxl_decoder(dev); 288 289 return sysfs_emit(buf, "%d\n", cxld->interleave_ways); 290 } 291 292 static DEVICE_ATTR_RO(interleave_ways); 293 294 static ssize_t qos_class_show(struct device *dev, 295 struct device_attribute *attr, char *buf) 296 { 297 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 298 299 return sysfs_emit(buf, "%d\n", cxlrd->qos_class); 300 } 301 static DEVICE_ATTR_RO(qos_class); 302 303 static struct attribute *cxl_decoder_base_attrs[] = { 304 &dev_attr_start.attr, 305 &dev_attr_size.attr, 306 &dev_attr_locked.attr, 307 &dev_attr_interleave_granularity.attr, 308 &dev_attr_interleave_ways.attr, 309 NULL, 310 }; 311 312 static struct attribute_group cxl_decoder_base_attribute_group = { 313 .attrs = cxl_decoder_base_attrs, 314 }; 315 316 static struct attribute *cxl_decoder_root_attrs[] = { 317 &dev_attr_cap_pmem.attr, 318 &dev_attr_cap_ram.attr, 319 &dev_attr_cap_type2.attr, 320 &dev_attr_cap_type3.attr, 321 &dev_attr_target_list.attr, 322 &dev_attr_qos_class.attr, 323 SET_CXL_REGION_ATTR(create_pmem_region) 324 SET_CXL_REGION_ATTR(create_ram_region) 325 SET_CXL_REGION_ATTR(delete_region) 326 NULL, 327 }; 328 329 static bool can_create_pmem(struct cxl_root_decoder *cxlrd) 330 { 331 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM; 332 333 return (cxlrd->cxlsd.cxld.flags & flags) == flags; 334 } 335 336 static bool can_create_ram(struct cxl_root_decoder *cxlrd) 337 { 338 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM; 339 340 return (cxlrd->cxlsd.cxld.flags & flags) == flags; 341 } 342 343 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n) 344 { 345 struct device *dev = kobj_to_dev(kobj); 346 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 347 348 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd)) 349 return 0; 350 351 if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd)) 352 return 0; 353 354 if (a == CXL_REGION_ATTR(delete_region) && 355 !(can_create_pmem(cxlrd) || can_create_ram(cxlrd))) 356 return 0; 357 358 return a->mode; 359 } 360 361 static struct attribute_group cxl_decoder_root_attribute_group = { 362 .attrs = cxl_decoder_root_attrs, 363 .is_visible = cxl_root_decoder_visible, 364 }; 365 366 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { 367 &cxl_decoder_root_attribute_group, 368 &cxl_decoder_base_attribute_group, 369 &cxl_base_attribute_group, 370 NULL, 371 }; 372 373 static struct attribute *cxl_decoder_switch_attrs[] = { 374 &dev_attr_target_type.attr, 375 &dev_attr_target_list.attr, 376 SET_CXL_REGION_ATTR(region) 377 NULL, 378 }; 379 380 static struct attribute_group cxl_decoder_switch_attribute_group = { 381 .attrs = cxl_decoder_switch_attrs, 382 }; 383 384 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { 385 &cxl_decoder_switch_attribute_group, 386 &cxl_decoder_base_attribute_group, 387 &cxl_base_attribute_group, 388 NULL, 389 }; 390 391 static struct attribute *cxl_decoder_endpoint_attrs[] = { 392 &dev_attr_target_type.attr, 393 &dev_attr_mode.attr, 394 &dev_attr_dpa_size.attr, 395 &dev_attr_dpa_resource.attr, 396 SET_CXL_REGION_ATTR(region) 397 NULL, 398 }; 399 400 static struct attribute_group cxl_decoder_endpoint_attribute_group = { 401 .attrs = cxl_decoder_endpoint_attrs, 402 }; 403 404 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = { 405 &cxl_decoder_base_attribute_group, 406 &cxl_decoder_endpoint_attribute_group, 407 &cxl_base_attribute_group, 408 NULL, 409 }; 410 411 static void __cxl_decoder_release(struct cxl_decoder *cxld) 412 { 413 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 414 415 ida_free(&port->decoder_ida, cxld->id); 416 put_device(&port->dev); 417 } 418 419 static void cxl_endpoint_decoder_release(struct device *dev) 420 { 421 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 422 423 __cxl_decoder_release(&cxled->cxld); 424 kfree(cxled); 425 } 426 427 static void cxl_switch_decoder_release(struct device *dev) 428 { 429 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 430 431 __cxl_decoder_release(&cxlsd->cxld); 432 kfree(cxlsd); 433 } 434 435 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev) 436 { 437 if (dev_WARN_ONCE(dev, !is_root_decoder(dev), 438 "not a cxl_root_decoder device\n")) 439 return NULL; 440 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev); 441 } 442 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL); 443 444 static void cxl_root_decoder_release(struct device *dev) 445 { 446 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 447 448 if (atomic_read(&cxlrd->region_id) >= 0) 449 memregion_free(atomic_read(&cxlrd->region_id)); 450 __cxl_decoder_release(&cxlrd->cxlsd.cxld); 451 kfree(cxlrd); 452 } 453 454 static const struct device_type cxl_decoder_endpoint_type = { 455 .name = "cxl_decoder_endpoint", 456 .release = cxl_endpoint_decoder_release, 457 .groups = cxl_decoder_endpoint_attribute_groups, 458 }; 459 460 static const struct device_type cxl_decoder_switch_type = { 461 .name = "cxl_decoder_switch", 462 .release = cxl_switch_decoder_release, 463 .groups = cxl_decoder_switch_attribute_groups, 464 }; 465 466 static const struct device_type cxl_decoder_root_type = { 467 .name = "cxl_decoder_root", 468 .release = cxl_root_decoder_release, 469 .groups = cxl_decoder_root_attribute_groups, 470 }; 471 472 bool is_endpoint_decoder(struct device *dev) 473 { 474 return dev->type == &cxl_decoder_endpoint_type; 475 } 476 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL); 477 478 bool is_root_decoder(struct device *dev) 479 { 480 return dev->type == &cxl_decoder_root_type; 481 } 482 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL); 483 484 bool is_switch_decoder(struct device *dev) 485 { 486 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; 487 } 488 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL); 489 490 struct cxl_decoder *to_cxl_decoder(struct device *dev) 491 { 492 if (dev_WARN_ONCE(dev, 493 !is_switch_decoder(dev) && !is_endpoint_decoder(dev), 494 "not a cxl_decoder device\n")) 495 return NULL; 496 return container_of(dev, struct cxl_decoder, dev); 497 } 498 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL); 499 500 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev) 501 { 502 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev), 503 "not a cxl_endpoint_decoder device\n")) 504 return NULL; 505 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev); 506 } 507 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL); 508 509 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) 510 { 511 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev), 512 "not a cxl_switch_decoder device\n")) 513 return NULL; 514 return container_of(dev, struct cxl_switch_decoder, cxld.dev); 515 } 516 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL); 517 518 static void cxl_ep_release(struct cxl_ep *ep) 519 { 520 put_device(ep->ep); 521 kfree(ep); 522 } 523 524 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep) 525 { 526 if (!ep) 527 return; 528 xa_erase(&port->endpoints, (unsigned long) ep->ep); 529 cxl_ep_release(ep); 530 } 531 532 static void cxl_port_release(struct device *dev) 533 { 534 struct cxl_port *port = to_cxl_port(dev); 535 unsigned long index; 536 struct cxl_ep *ep; 537 538 xa_for_each(&port->endpoints, index, ep) 539 cxl_ep_remove(port, ep); 540 xa_destroy(&port->endpoints); 541 xa_destroy(&port->dports); 542 xa_destroy(&port->regions); 543 ida_free(&cxl_port_ida, port->id); 544 kfree(port); 545 } 546 547 static ssize_t decoders_committed_show(struct device *dev, 548 struct device_attribute *attr, char *buf) 549 { 550 struct cxl_port *port = to_cxl_port(dev); 551 int rc; 552 553 down_read(&cxl_region_rwsem); 554 rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port)); 555 up_read(&cxl_region_rwsem); 556 557 return rc; 558 } 559 560 static DEVICE_ATTR_RO(decoders_committed); 561 562 static struct attribute *cxl_port_attrs[] = { 563 &dev_attr_decoders_committed.attr, 564 NULL, 565 }; 566 567 static struct attribute_group cxl_port_attribute_group = { 568 .attrs = cxl_port_attrs, 569 }; 570 571 static const struct attribute_group *cxl_port_attribute_groups[] = { 572 &cxl_base_attribute_group, 573 &cxl_port_attribute_group, 574 NULL, 575 }; 576 577 static const struct device_type cxl_port_type = { 578 .name = "cxl_port", 579 .release = cxl_port_release, 580 .groups = cxl_port_attribute_groups, 581 }; 582 583 bool is_cxl_port(const struct device *dev) 584 { 585 return dev->type == &cxl_port_type; 586 } 587 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL); 588 589 struct cxl_port *to_cxl_port(const struct device *dev) 590 { 591 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, 592 "not a cxl_port device\n")) 593 return NULL; 594 return container_of(dev, struct cxl_port, dev); 595 } 596 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL); 597 598 static void unregister_port(void *_port) 599 { 600 struct cxl_port *port = _port; 601 struct cxl_port *parent; 602 struct device *lock_dev; 603 604 if (is_cxl_root(port)) 605 parent = NULL; 606 else 607 parent = to_cxl_port(port->dev.parent); 608 609 /* 610 * CXL root port's and the first level of ports are unregistered 611 * under the platform firmware device lock, all other ports are 612 * unregistered while holding their parent port lock. 613 */ 614 if (!parent) 615 lock_dev = port->uport_dev; 616 else if (is_cxl_root(parent)) 617 lock_dev = parent->uport_dev; 618 else 619 lock_dev = &parent->dev; 620 621 device_lock_assert(lock_dev); 622 port->dead = true; 623 device_unregister(&port->dev); 624 } 625 626 static void cxl_unlink_uport(void *_port) 627 { 628 struct cxl_port *port = _port; 629 630 sysfs_remove_link(&port->dev.kobj, "uport"); 631 } 632 633 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) 634 { 635 int rc; 636 637 rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj, 638 "uport"); 639 if (rc) 640 return rc; 641 return devm_add_action_or_reset(host, cxl_unlink_uport, port); 642 } 643 644 static void cxl_unlink_parent_dport(void *_port) 645 { 646 struct cxl_port *port = _port; 647 648 sysfs_remove_link(&port->dev.kobj, "parent_dport"); 649 } 650 651 static int devm_cxl_link_parent_dport(struct device *host, 652 struct cxl_port *port, 653 struct cxl_dport *parent_dport) 654 { 655 int rc; 656 657 if (!parent_dport) 658 return 0; 659 660 rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj, 661 "parent_dport"); 662 if (rc) 663 return rc; 664 return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port); 665 } 666 667 static struct lock_class_key cxl_port_key; 668 669 static struct cxl_port *cxl_port_alloc(struct device *uport_dev, 670 struct cxl_dport *parent_dport) 671 { 672 struct cxl_port *port; 673 struct device *dev; 674 int rc; 675 676 port = kzalloc(sizeof(*port), GFP_KERNEL); 677 if (!port) 678 return ERR_PTR(-ENOMEM); 679 680 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL); 681 if (rc < 0) 682 goto err; 683 port->id = rc; 684 port->uport_dev = uport_dev; 685 686 /* 687 * The top-level cxl_port "cxl_root" does not have a cxl_port as 688 * its parent and it does not have any corresponding component 689 * registers as its decode is described by a fixed platform 690 * description. 691 */ 692 dev = &port->dev; 693 if (parent_dport) { 694 struct cxl_port *parent_port = parent_dport->port; 695 struct cxl_port *iter; 696 697 dev->parent = &parent_port->dev; 698 port->depth = parent_port->depth + 1; 699 port->parent_dport = parent_dport; 700 701 /* 702 * walk to the host bridge, or the first ancestor that knows 703 * the host bridge 704 */ 705 iter = port; 706 while (!iter->host_bridge && 707 !is_cxl_root(to_cxl_port(iter->dev.parent))) 708 iter = to_cxl_port(iter->dev.parent); 709 if (iter->host_bridge) 710 port->host_bridge = iter->host_bridge; 711 else if (parent_dport->rch) 712 port->host_bridge = parent_dport->dport_dev; 713 else 714 port->host_bridge = iter->uport_dev; 715 dev_dbg(uport_dev, "host-bridge: %s\n", 716 dev_name(port->host_bridge)); 717 } else 718 dev->parent = uport_dev; 719 720 ida_init(&port->decoder_ida); 721 port->hdm_end = -1; 722 port->commit_end = -1; 723 xa_init(&port->dports); 724 xa_init(&port->endpoints); 725 xa_init(&port->regions); 726 727 device_initialize(dev); 728 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); 729 device_set_pm_not_required(dev); 730 dev->bus = &cxl_bus_type; 731 dev->type = &cxl_port_type; 732 733 return port; 734 735 err: 736 kfree(port); 737 return ERR_PTR(rc); 738 } 739 740 static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map, 741 resource_size_t component_reg_phys) 742 { 743 *map = (struct cxl_register_map) { 744 .host = host, 745 .reg_type = CXL_REGLOC_RBI_EMPTY, 746 .resource = component_reg_phys, 747 }; 748 749 if (component_reg_phys == CXL_RESOURCE_NONE) 750 return 0; 751 752 map->reg_type = CXL_REGLOC_RBI_COMPONENT; 753 map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE; 754 755 return cxl_setup_regs(map); 756 } 757 758 static int cxl_port_setup_regs(struct cxl_port *port, 759 resource_size_t component_reg_phys) 760 { 761 if (dev_is_platform(port->uport_dev)) 762 return 0; 763 return cxl_setup_comp_regs(&port->dev, &port->reg_map, 764 component_reg_phys); 765 } 766 767 static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, 768 resource_size_t component_reg_phys) 769 { 770 int rc; 771 772 if (dev_is_platform(dport->dport_dev)) 773 return 0; 774 775 /* 776 * use @dport->dport_dev for the context for error messages during 777 * register probing, and fixup @host after the fact, since @host may be 778 * NULL. 779 */ 780 rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map, 781 component_reg_phys); 782 dport->reg_map.host = host; 783 return rc; 784 } 785 786 static struct cxl_port *__devm_cxl_add_port(struct device *host, 787 struct device *uport_dev, 788 resource_size_t component_reg_phys, 789 struct cxl_dport *parent_dport) 790 { 791 struct cxl_port *port; 792 struct device *dev; 793 int rc; 794 795 port = cxl_port_alloc(uport_dev, parent_dport); 796 if (IS_ERR(port)) 797 return port; 798 799 dev = &port->dev; 800 if (is_cxl_memdev(uport_dev)) { 801 struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev); 802 struct cxl_dev_state *cxlds = cxlmd->cxlds; 803 804 rc = dev_set_name(dev, "endpoint%d", port->id); 805 if (rc) 806 goto err; 807 808 /* 809 * The endpoint driver already enumerated the component and RAS 810 * registers. Reuse that enumeration while prepping them to be 811 * mapped by the cxl_port driver. 812 */ 813 port->reg_map = cxlds->reg_map; 814 port->reg_map.host = &port->dev; 815 } else if (parent_dport) { 816 rc = dev_set_name(dev, "port%d", port->id); 817 if (rc) 818 goto err; 819 820 rc = cxl_port_setup_regs(port, component_reg_phys); 821 if (rc) 822 goto err; 823 } else 824 rc = dev_set_name(dev, "root%d", port->id); 825 if (rc) 826 goto err; 827 828 rc = device_add(dev); 829 if (rc) 830 goto err; 831 832 rc = devm_add_action_or_reset(host, unregister_port, port); 833 if (rc) 834 return ERR_PTR(rc); 835 836 rc = devm_cxl_link_uport(host, port); 837 if (rc) 838 return ERR_PTR(rc); 839 840 rc = devm_cxl_link_parent_dport(host, port, parent_dport); 841 if (rc) 842 return ERR_PTR(rc); 843 844 return port; 845 846 err: 847 put_device(dev); 848 return ERR_PTR(rc); 849 } 850 851 /** 852 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy 853 * @host: host device for devm operations 854 * @uport_dev: "physical" device implementing this upstream port 855 * @component_reg_phys: (optional) for configurable cxl_port instances 856 * @parent_dport: next hop up in the CXL memory decode hierarchy 857 */ 858 struct cxl_port *devm_cxl_add_port(struct device *host, 859 struct device *uport_dev, 860 resource_size_t component_reg_phys, 861 struct cxl_dport *parent_dport) 862 { 863 struct cxl_port *port, *parent_port; 864 865 port = __devm_cxl_add_port(host, uport_dev, component_reg_phys, 866 parent_dport); 867 868 parent_port = parent_dport ? parent_dport->port : NULL; 869 if (IS_ERR(port)) { 870 dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n", 871 parent_port ? " port to " : "", 872 parent_port ? dev_name(&parent_port->dev) : "", 873 parent_port ? "" : " root port", 874 PTR_ERR(port)); 875 } else { 876 dev_dbg(uport_dev, "%s added%s%s%s\n", 877 dev_name(&port->dev), 878 parent_port ? " to " : "", 879 parent_port ? dev_name(&parent_port->dev) : "", 880 parent_port ? "" : " (root port)"); 881 } 882 883 return port; 884 } 885 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); 886 887 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) 888 { 889 /* There is no pci_bus associated with a CXL platform-root port */ 890 if (is_cxl_root(port)) 891 return NULL; 892 893 if (dev_is_pci(port->uport_dev)) { 894 struct pci_dev *pdev = to_pci_dev(port->uport_dev); 895 896 return pdev->subordinate; 897 } 898 899 return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev); 900 } 901 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL); 902 903 static void unregister_pci_bus(void *uport_dev) 904 { 905 xa_erase(&cxl_root_buses, (unsigned long)uport_dev); 906 } 907 908 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, 909 struct pci_bus *bus) 910 { 911 int rc; 912 913 if (dev_is_pci(uport_dev)) 914 return -EINVAL; 915 916 rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus, 917 GFP_KERNEL); 918 if (rc) 919 return rc; 920 return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev); 921 } 922 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL); 923 924 static bool dev_is_cxl_root_child(struct device *dev) 925 { 926 struct cxl_port *port, *parent; 927 928 if (!is_cxl_port(dev)) 929 return false; 930 931 port = to_cxl_port(dev); 932 if (is_cxl_root(port)) 933 return false; 934 935 parent = to_cxl_port(port->dev.parent); 936 if (is_cxl_root(parent)) 937 return true; 938 939 return false; 940 } 941 942 struct cxl_port *find_cxl_root(struct cxl_port *port) 943 { 944 struct cxl_port *iter = port; 945 946 while (iter && !is_cxl_root(iter)) 947 iter = to_cxl_port(iter->dev.parent); 948 949 if (!iter) 950 return NULL; 951 get_device(&iter->dev); 952 return iter; 953 } 954 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); 955 956 static struct cxl_dport *find_dport(struct cxl_port *port, int id) 957 { 958 struct cxl_dport *dport; 959 unsigned long index; 960 961 device_lock_assert(&port->dev); 962 xa_for_each(&port->dports, index, dport) 963 if (dport->port_id == id) 964 return dport; 965 return NULL; 966 } 967 968 static int add_dport(struct cxl_port *port, struct cxl_dport *dport) 969 { 970 struct cxl_dport *dup; 971 int rc; 972 973 device_lock_assert(&port->dev); 974 dup = find_dport(port, dport->port_id); 975 if (dup) { 976 dev_err(&port->dev, 977 "unable to add dport%d-%s non-unique port id (%s)\n", 978 dport->port_id, dev_name(dport->dport_dev), 979 dev_name(dup->dport_dev)); 980 return -EBUSY; 981 } 982 983 rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport, 984 GFP_KERNEL); 985 if (rc) 986 return rc; 987 988 port->nr_dports++; 989 return 0; 990 } 991 992 /* 993 * Since root-level CXL dports cannot be enumerated by PCI they are not 994 * enumerated by the common port driver that acquires the port lock over 995 * dport add/remove. Instead, root dports are manually added by a 996 * platform driver and cond_cxl_root_lock() is used to take the missing 997 * port lock in that case. 998 */ 999 static void cond_cxl_root_lock(struct cxl_port *port) 1000 { 1001 if (is_cxl_root(port)) 1002 device_lock(&port->dev); 1003 } 1004 1005 static void cond_cxl_root_unlock(struct cxl_port *port) 1006 { 1007 if (is_cxl_root(port)) 1008 device_unlock(&port->dev); 1009 } 1010 1011 static void cxl_dport_remove(void *data) 1012 { 1013 struct cxl_dport *dport = data; 1014 struct cxl_port *port = dport->port; 1015 1016 xa_erase(&port->dports, (unsigned long) dport->dport_dev); 1017 put_device(dport->dport_dev); 1018 } 1019 1020 static void cxl_dport_unlink(void *data) 1021 { 1022 struct cxl_dport *dport = data; 1023 struct cxl_port *port = dport->port; 1024 char link_name[CXL_TARGET_STRLEN]; 1025 1026 sprintf(link_name, "dport%d", dport->port_id); 1027 sysfs_remove_link(&port->dev.kobj, link_name); 1028 } 1029 1030 static struct cxl_dport * 1031 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, 1032 int port_id, resource_size_t component_reg_phys, 1033 resource_size_t rcrb) 1034 { 1035 char link_name[CXL_TARGET_STRLEN]; 1036 struct cxl_dport *dport; 1037 struct device *host; 1038 int rc; 1039 1040 if (is_cxl_root(port)) 1041 host = port->uport_dev; 1042 else 1043 host = &port->dev; 1044 1045 if (!host->driver) { 1046 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n", 1047 dev_name(dport_dev)); 1048 return ERR_PTR(-ENXIO); 1049 } 1050 1051 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >= 1052 CXL_TARGET_STRLEN) 1053 return ERR_PTR(-EINVAL); 1054 1055 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL); 1056 if (!dport) 1057 return ERR_PTR(-ENOMEM); 1058 1059 dport->dport_dev = dport_dev; 1060 dport->port_id = port_id; 1061 dport->port = port; 1062 1063 if (rcrb == CXL_RESOURCE_NONE) { 1064 rc = cxl_dport_setup_regs(&port->dev, dport, 1065 component_reg_phys); 1066 if (rc) 1067 return ERR_PTR(rc); 1068 } else { 1069 dport->rcrb.base = rcrb; 1070 component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb, 1071 CXL_RCRB_DOWNSTREAM); 1072 if (component_reg_phys == CXL_RESOURCE_NONE) { 1073 dev_warn(dport_dev, "Invalid Component Registers in RCRB"); 1074 return ERR_PTR(-ENXIO); 1075 } 1076 1077 /* 1078 * RCH @dport is not ready to map until associated with its 1079 * memdev 1080 */ 1081 rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys); 1082 if (rc) 1083 return ERR_PTR(rc); 1084 1085 dport->rch = true; 1086 } 1087 1088 if (component_reg_phys != CXL_RESOURCE_NONE) 1089 dev_dbg(dport_dev, "Component Registers found for dport: %pa\n", 1090 &component_reg_phys); 1091 1092 cond_cxl_root_lock(port); 1093 rc = add_dport(port, dport); 1094 cond_cxl_root_unlock(port); 1095 if (rc) 1096 return ERR_PTR(rc); 1097 1098 get_device(dport_dev); 1099 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); 1100 if (rc) 1101 return ERR_PTR(rc); 1102 1103 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name); 1104 if (rc) 1105 return ERR_PTR(rc); 1106 1107 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport); 1108 if (rc) 1109 return ERR_PTR(rc); 1110 1111 return dport; 1112 } 1113 1114 /** 1115 * devm_cxl_add_dport - append VH downstream port data to a cxl_port 1116 * @port: the cxl_port that references this dport 1117 * @dport_dev: firmware or PCI device representing the dport 1118 * @port_id: identifier for this dport in a decoder's target list 1119 * @component_reg_phys: optional location of CXL component registers 1120 * 1121 * Note that dports are appended to the devm release action's of the 1122 * either the port's host (for root ports), or the port itself (for 1123 * switch ports) 1124 */ 1125 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, 1126 struct device *dport_dev, int port_id, 1127 resource_size_t component_reg_phys) 1128 { 1129 struct cxl_dport *dport; 1130 1131 dport = __devm_cxl_add_dport(port, dport_dev, port_id, 1132 component_reg_phys, CXL_RESOURCE_NONE); 1133 if (IS_ERR(dport)) { 1134 dev_dbg(dport_dev, "failed to add dport to %s: %ld\n", 1135 dev_name(&port->dev), PTR_ERR(dport)); 1136 } else { 1137 dev_dbg(dport_dev, "dport added to %s\n", 1138 dev_name(&port->dev)); 1139 } 1140 1141 return dport; 1142 } 1143 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); 1144 1145 /** 1146 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port 1147 * @port: the cxl_port that references this dport 1148 * @dport_dev: firmware or PCI device representing the dport 1149 * @port_id: identifier for this dport in a decoder's target list 1150 * @rcrb: mandatory location of a Root Complex Register Block 1151 * 1152 * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH 1153 */ 1154 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, 1155 struct device *dport_dev, int port_id, 1156 resource_size_t rcrb) 1157 { 1158 struct cxl_dport *dport; 1159 1160 if (rcrb == CXL_RESOURCE_NONE) { 1161 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n"); 1162 return ERR_PTR(-EINVAL); 1163 } 1164 1165 dport = __devm_cxl_add_dport(port, dport_dev, port_id, 1166 CXL_RESOURCE_NONE, rcrb); 1167 if (IS_ERR(dport)) { 1168 dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n", 1169 dev_name(&port->dev), PTR_ERR(dport)); 1170 } else { 1171 dev_dbg(dport_dev, "RCH dport added to %s\n", 1172 dev_name(&port->dev)); 1173 } 1174 1175 return dport; 1176 } 1177 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL); 1178 1179 static int add_ep(struct cxl_ep *new) 1180 { 1181 struct cxl_port *port = new->dport->port; 1182 int rc; 1183 1184 device_lock(&port->dev); 1185 if (port->dead) { 1186 device_unlock(&port->dev); 1187 return -ENXIO; 1188 } 1189 rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new, 1190 GFP_KERNEL); 1191 device_unlock(&port->dev); 1192 1193 return rc; 1194 } 1195 1196 /** 1197 * cxl_add_ep - register an endpoint's interest in a port 1198 * @dport: the dport that routes to @ep_dev 1199 * @ep_dev: device representing the endpoint 1200 * 1201 * Intermediate CXL ports are scanned based on the arrival of endpoints. 1202 * When those endpoints depart the port can be destroyed once all 1203 * endpoints that care about that port have been removed. 1204 */ 1205 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev) 1206 { 1207 struct cxl_ep *ep; 1208 int rc; 1209 1210 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 1211 if (!ep) 1212 return -ENOMEM; 1213 1214 ep->ep = get_device(ep_dev); 1215 ep->dport = dport; 1216 1217 rc = add_ep(ep); 1218 if (rc) 1219 cxl_ep_release(ep); 1220 return rc; 1221 } 1222 1223 struct cxl_find_port_ctx { 1224 const struct device *dport_dev; 1225 const struct cxl_port *parent_port; 1226 struct cxl_dport **dport; 1227 }; 1228 1229 static int match_port_by_dport(struct device *dev, const void *data) 1230 { 1231 const struct cxl_find_port_ctx *ctx = data; 1232 struct cxl_dport *dport; 1233 struct cxl_port *port; 1234 1235 if (!is_cxl_port(dev)) 1236 return 0; 1237 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev) 1238 return 0; 1239 1240 port = to_cxl_port(dev); 1241 dport = cxl_find_dport_by_dev(port, ctx->dport_dev); 1242 if (ctx->dport) 1243 *ctx->dport = dport; 1244 return dport != NULL; 1245 } 1246 1247 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx) 1248 { 1249 struct device *dev; 1250 1251 if (!ctx->dport_dev) 1252 return NULL; 1253 1254 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport); 1255 if (dev) 1256 return to_cxl_port(dev); 1257 return NULL; 1258 } 1259 1260 static struct cxl_port *find_cxl_port(struct device *dport_dev, 1261 struct cxl_dport **dport) 1262 { 1263 struct cxl_find_port_ctx ctx = { 1264 .dport_dev = dport_dev, 1265 .dport = dport, 1266 }; 1267 struct cxl_port *port; 1268 1269 port = __find_cxl_port(&ctx); 1270 return port; 1271 } 1272 1273 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port, 1274 struct device *dport_dev, 1275 struct cxl_dport **dport) 1276 { 1277 struct cxl_find_port_ctx ctx = { 1278 .dport_dev = dport_dev, 1279 .parent_port = parent_port, 1280 .dport = dport, 1281 }; 1282 struct cxl_port *port; 1283 1284 port = __find_cxl_port(&ctx); 1285 return port; 1286 } 1287 1288 /* 1289 * All users of grandparent() are using it to walk PCIe-like switch port 1290 * hierarchy. A PCIe switch is comprised of a bridge device representing the 1291 * upstream switch port and N bridges representing downstream switch ports. When 1292 * bridges stack the grand-parent of a downstream switch port is another 1293 * downstream switch port in the immediate ancestor switch. 1294 */ 1295 static struct device *grandparent(struct device *dev) 1296 { 1297 if (dev && dev->parent) 1298 return dev->parent->parent; 1299 return NULL; 1300 } 1301 1302 static struct device *endpoint_host(struct cxl_port *endpoint) 1303 { 1304 struct cxl_port *port = to_cxl_port(endpoint->dev.parent); 1305 1306 if (is_cxl_root(port)) 1307 return port->uport_dev; 1308 return &port->dev; 1309 } 1310 1311 static void delete_endpoint(void *data) 1312 { 1313 struct cxl_memdev *cxlmd = data; 1314 struct cxl_port *endpoint = cxlmd->endpoint; 1315 struct device *host = endpoint_host(endpoint); 1316 1317 device_lock(host); 1318 if (host->driver && !endpoint->dead) { 1319 devm_release_action(host, cxl_unlink_parent_dport, endpoint); 1320 devm_release_action(host, cxl_unlink_uport, endpoint); 1321 devm_release_action(host, unregister_port, endpoint); 1322 } 1323 cxlmd->endpoint = NULL; 1324 device_unlock(host); 1325 put_device(&endpoint->dev); 1326 put_device(host); 1327 } 1328 1329 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) 1330 { 1331 struct device *host = endpoint_host(endpoint); 1332 struct device *dev = &cxlmd->dev; 1333 1334 get_device(host); 1335 get_device(&endpoint->dev); 1336 cxlmd->endpoint = endpoint; 1337 cxlmd->depth = endpoint->depth; 1338 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); 1339 } 1340 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL); 1341 1342 /* 1343 * The natural end of life of a non-root 'cxl_port' is when its parent port goes 1344 * through a ->remove() event ("top-down" unregistration). The unnatural trigger 1345 * for a port to be unregistered is when all memdevs beneath that port have gone 1346 * through ->remove(). This "bottom-up" removal selectively removes individual 1347 * child ports manually. This depends on devm_cxl_add_port() to not change is 1348 * devm action registration order, and for dports to have already been 1349 * destroyed by reap_dports(). 1350 */ 1351 static void delete_switch_port(struct cxl_port *port) 1352 { 1353 devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port); 1354 devm_release_action(port->dev.parent, cxl_unlink_uport, port); 1355 devm_release_action(port->dev.parent, unregister_port, port); 1356 } 1357 1358 static void reap_dports(struct cxl_port *port) 1359 { 1360 struct cxl_dport *dport; 1361 unsigned long index; 1362 1363 device_lock_assert(&port->dev); 1364 1365 xa_for_each(&port->dports, index, dport) { 1366 devm_release_action(&port->dev, cxl_dport_unlink, dport); 1367 devm_release_action(&port->dev, cxl_dport_remove, dport); 1368 devm_kfree(&port->dev, dport); 1369 } 1370 } 1371 1372 struct detach_ctx { 1373 struct cxl_memdev *cxlmd; 1374 int depth; 1375 }; 1376 1377 static int port_has_memdev(struct device *dev, const void *data) 1378 { 1379 const struct detach_ctx *ctx = data; 1380 struct cxl_port *port; 1381 1382 if (!is_cxl_port(dev)) 1383 return 0; 1384 1385 port = to_cxl_port(dev); 1386 if (port->depth != ctx->depth) 1387 return 0; 1388 1389 return !!cxl_ep_load(port, ctx->cxlmd); 1390 } 1391 1392 static void cxl_detach_ep(void *data) 1393 { 1394 struct cxl_memdev *cxlmd = data; 1395 1396 for (int i = cxlmd->depth - 1; i >= 1; i--) { 1397 struct cxl_port *port, *parent_port; 1398 struct detach_ctx ctx = { 1399 .cxlmd = cxlmd, 1400 .depth = i, 1401 }; 1402 struct device *dev; 1403 struct cxl_ep *ep; 1404 bool died = false; 1405 1406 dev = bus_find_device(&cxl_bus_type, NULL, &ctx, 1407 port_has_memdev); 1408 if (!dev) 1409 continue; 1410 port = to_cxl_port(dev); 1411 1412 parent_port = to_cxl_port(port->dev.parent); 1413 device_lock(&parent_port->dev); 1414 device_lock(&port->dev); 1415 ep = cxl_ep_load(port, cxlmd); 1416 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", 1417 ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); 1418 cxl_ep_remove(port, ep); 1419 if (ep && !port->dead && xa_empty(&port->endpoints) && 1420 !is_cxl_root(parent_port) && parent_port->dev.driver) { 1421 /* 1422 * This was the last ep attached to a dynamically 1423 * enumerated port. Block new cxl_add_ep() and garbage 1424 * collect the port. 1425 */ 1426 died = true; 1427 port->dead = true; 1428 reap_dports(port); 1429 } 1430 device_unlock(&port->dev); 1431 1432 if (died) { 1433 dev_dbg(&cxlmd->dev, "delete %s\n", 1434 dev_name(&port->dev)); 1435 delete_switch_port(port); 1436 } 1437 put_device(&port->dev); 1438 device_unlock(&parent_port->dev); 1439 } 1440 } 1441 1442 static resource_size_t find_component_registers(struct device *dev) 1443 { 1444 struct cxl_register_map map; 1445 struct pci_dev *pdev; 1446 1447 /* 1448 * Theoretically, CXL component registers can be hosted on a 1449 * non-PCI device, in practice, only cxl_test hits this case. 1450 */ 1451 if (!dev_is_pci(dev)) 1452 return CXL_RESOURCE_NONE; 1453 1454 pdev = to_pci_dev(dev); 1455 1456 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 1457 return map.resource; 1458 } 1459 1460 static int add_port_attach_ep(struct cxl_memdev *cxlmd, 1461 struct device *uport_dev, 1462 struct device *dport_dev) 1463 { 1464 struct device *dparent = grandparent(dport_dev); 1465 struct cxl_port *port, *parent_port = NULL; 1466 struct cxl_dport *dport, *parent_dport; 1467 resource_size_t component_reg_phys; 1468 int rc; 1469 1470 if (!dparent) { 1471 /* 1472 * The iteration reached the topology root without finding the 1473 * CXL-root 'cxl_port' on a previous iteration, fail for now to 1474 * be re-probed after platform driver attaches. 1475 */ 1476 dev_dbg(&cxlmd->dev, "%s is a root dport\n", 1477 dev_name(dport_dev)); 1478 return -ENXIO; 1479 } 1480 1481 parent_port = find_cxl_port(dparent, &parent_dport); 1482 if (!parent_port) { 1483 /* iterate to create this parent_port */ 1484 return -EAGAIN; 1485 } 1486 1487 device_lock(&parent_port->dev); 1488 if (!parent_port->dev.driver) { 1489 dev_warn(&cxlmd->dev, 1490 "port %s:%s disabled, failed to enumerate CXL.mem\n", 1491 dev_name(&parent_port->dev), dev_name(uport_dev)); 1492 port = ERR_PTR(-ENXIO); 1493 goto out; 1494 } 1495 1496 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1497 if (!port) { 1498 component_reg_phys = find_component_registers(uport_dev); 1499 port = devm_cxl_add_port(&parent_port->dev, uport_dev, 1500 component_reg_phys, parent_dport); 1501 /* retry find to pick up the new dport information */ 1502 if (!IS_ERR(port)) 1503 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1504 } 1505 out: 1506 device_unlock(&parent_port->dev); 1507 1508 if (IS_ERR(port)) 1509 rc = PTR_ERR(port); 1510 else { 1511 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", 1512 dev_name(&port->dev), dev_name(port->uport_dev)); 1513 rc = cxl_add_ep(dport, &cxlmd->dev); 1514 if (rc == -EBUSY) { 1515 /* 1516 * "can't" happen, but this error code means 1517 * something to the caller, so translate it. 1518 */ 1519 rc = -ENXIO; 1520 } 1521 put_device(&port->dev); 1522 } 1523 1524 put_device(&parent_port->dev); 1525 return rc; 1526 } 1527 1528 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) 1529 { 1530 struct device *dev = &cxlmd->dev; 1531 struct device *iter; 1532 int rc; 1533 1534 /* 1535 * Skip intermediate port enumeration in the RCH case, there 1536 * are no ports in between a host bridge and an endpoint. 1537 */ 1538 if (cxlmd->cxlds->rcd) 1539 return 0; 1540 1541 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd); 1542 if (rc) 1543 return rc; 1544 1545 /* 1546 * Scan for and add all cxl_ports in this device's ancestry. 1547 * Repeat until no more ports are added. Abort if a port add 1548 * attempt fails. 1549 */ 1550 retry: 1551 for (iter = dev; iter; iter = grandparent(iter)) { 1552 struct device *dport_dev = grandparent(iter); 1553 struct device *uport_dev; 1554 struct cxl_dport *dport; 1555 struct cxl_port *port; 1556 1557 /* 1558 * The terminal "grandparent" in PCI is NULL and @platform_bus 1559 * for platform devices 1560 */ 1561 if (!dport_dev || dport_dev == &platform_bus) 1562 return 0; 1563 1564 uport_dev = dport_dev->parent; 1565 if (!uport_dev) { 1566 dev_warn(dev, "at %s no parent for dport: %s\n", 1567 dev_name(iter), dev_name(dport_dev)); 1568 return -ENXIO; 1569 } 1570 1571 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", 1572 dev_name(iter), dev_name(dport_dev), 1573 dev_name(uport_dev)); 1574 port = find_cxl_port(dport_dev, &dport); 1575 if (port) { 1576 dev_dbg(&cxlmd->dev, 1577 "found already registered port %s:%s\n", 1578 dev_name(&port->dev), 1579 dev_name(port->uport_dev)); 1580 rc = cxl_add_ep(dport, &cxlmd->dev); 1581 1582 /* 1583 * If the endpoint already exists in the port's list, 1584 * that's ok, it was added on a previous pass. 1585 * Otherwise, retry in add_port_attach_ep() after taking 1586 * the parent_port lock as the current port may be being 1587 * reaped. 1588 */ 1589 if (rc && rc != -EBUSY) { 1590 put_device(&port->dev); 1591 return rc; 1592 } 1593 1594 /* Any more ports to add between this one and the root? */ 1595 if (!dev_is_cxl_root_child(&port->dev)) { 1596 put_device(&port->dev); 1597 continue; 1598 } 1599 1600 put_device(&port->dev); 1601 return 0; 1602 } 1603 1604 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev); 1605 /* port missing, try to add parent */ 1606 if (rc == -EAGAIN) 1607 continue; 1608 /* failed to add ep or port */ 1609 if (rc) 1610 return rc; 1611 /* port added, new descendants possible, start over */ 1612 goto retry; 1613 } 1614 1615 return 0; 1616 } 1617 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL); 1618 1619 struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev, 1620 struct cxl_dport **dport) 1621 { 1622 return find_cxl_port(pdev->dev.parent, dport); 1623 } 1624 EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL); 1625 1626 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, 1627 struct cxl_dport **dport) 1628 { 1629 return find_cxl_port(grandparent(&cxlmd->dev), dport); 1630 } 1631 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL); 1632 1633 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, 1634 struct cxl_port *port, int *target_map) 1635 { 1636 int i, rc = 0; 1637 1638 if (!target_map) 1639 return 0; 1640 1641 device_lock_assert(&port->dev); 1642 1643 if (xa_empty(&port->dports)) 1644 return -EINVAL; 1645 1646 write_seqlock(&cxlsd->target_lock); 1647 for (i = 0; i < cxlsd->nr_targets; i++) { 1648 struct cxl_dport *dport = find_dport(port, target_map[i]); 1649 1650 if (!dport) { 1651 rc = -ENXIO; 1652 break; 1653 } 1654 cxlsd->target[i] = dport; 1655 } 1656 write_sequnlock(&cxlsd->target_lock); 1657 1658 return rc; 1659 } 1660 1661 struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos) 1662 { 1663 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; 1664 struct cxl_decoder *cxld = &cxlsd->cxld; 1665 int iw; 1666 1667 iw = cxld->interleave_ways; 1668 if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets, 1669 "misconfigured root decoder\n")) 1670 return NULL; 1671 1672 return cxlrd->cxlsd.target[pos % iw]; 1673 } 1674 EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL); 1675 1676 static struct lock_class_key cxl_decoder_key; 1677 1678 /** 1679 * cxl_decoder_init - Common decoder setup / initialization 1680 * @port: owning port of this decoder 1681 * @cxld: common decoder properties to initialize 1682 * 1683 * A port may contain one or more decoders. Each of those decoders 1684 * enable some address space for CXL.mem utilization. A decoder is 1685 * expected to be configured by the caller before registering via 1686 * cxl_decoder_add() 1687 */ 1688 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld) 1689 { 1690 struct device *dev; 1691 int rc; 1692 1693 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); 1694 if (rc < 0) 1695 return rc; 1696 1697 /* need parent to stick around to release the id */ 1698 get_device(&port->dev); 1699 cxld->id = rc; 1700 1701 dev = &cxld->dev; 1702 device_initialize(dev); 1703 lockdep_set_class(&dev->mutex, &cxl_decoder_key); 1704 device_set_pm_not_required(dev); 1705 dev->parent = &port->dev; 1706 dev->bus = &cxl_bus_type; 1707 1708 /* Pre initialize an "empty" decoder */ 1709 cxld->interleave_ways = 1; 1710 cxld->interleave_granularity = PAGE_SIZE; 1711 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 1712 cxld->hpa_range = (struct range) { 1713 .start = 0, 1714 .end = -1, 1715 }; 1716 1717 return 0; 1718 } 1719 1720 static int cxl_switch_decoder_init(struct cxl_port *port, 1721 struct cxl_switch_decoder *cxlsd, 1722 int nr_targets) 1723 { 1724 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) 1725 return -EINVAL; 1726 1727 cxlsd->nr_targets = nr_targets; 1728 seqlock_init(&cxlsd->target_lock); 1729 return cxl_decoder_init(port, &cxlsd->cxld); 1730 } 1731 1732 /** 1733 * cxl_root_decoder_alloc - Allocate a root level decoder 1734 * @port: owning CXL root of this decoder 1735 * @nr_targets: static number of downstream targets 1736 * @calc_hb: which host bridge covers the n'th position by granularity 1737 * 1738 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1739 * 'CXL root' decoder is one that decodes from a top-level / static platform 1740 * firmware description of CXL resources into a CXL standard decode 1741 * topology. 1742 */ 1743 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, 1744 unsigned int nr_targets, 1745 cxl_calc_hb_fn calc_hb) 1746 { 1747 struct cxl_root_decoder *cxlrd; 1748 struct cxl_switch_decoder *cxlsd; 1749 struct cxl_decoder *cxld; 1750 int rc; 1751 1752 if (!is_cxl_root(port)) 1753 return ERR_PTR(-EINVAL); 1754 1755 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets), 1756 GFP_KERNEL); 1757 if (!cxlrd) 1758 return ERR_PTR(-ENOMEM); 1759 1760 cxlsd = &cxlrd->cxlsd; 1761 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1762 if (rc) { 1763 kfree(cxlrd); 1764 return ERR_PTR(rc); 1765 } 1766 1767 cxlrd->calc_hb = calc_hb; 1768 mutex_init(&cxlrd->range_lock); 1769 1770 cxld = &cxlsd->cxld; 1771 cxld->dev.type = &cxl_decoder_root_type; 1772 /* 1773 * cxl_root_decoder_release() special cases negative ids to 1774 * detect memregion_alloc() failures. 1775 */ 1776 atomic_set(&cxlrd->region_id, -1); 1777 rc = memregion_alloc(GFP_KERNEL); 1778 if (rc < 0) { 1779 put_device(&cxld->dev); 1780 return ERR_PTR(rc); 1781 } 1782 1783 atomic_set(&cxlrd->region_id, rc); 1784 cxlrd->qos_class = CXL_QOS_CLASS_INVALID; 1785 return cxlrd; 1786 } 1787 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); 1788 1789 /** 1790 * cxl_switch_decoder_alloc - Allocate a switch level decoder 1791 * @port: owning CXL switch port of this decoder 1792 * @nr_targets: max number of dynamically addressable downstream targets 1793 * 1794 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1795 * 'switch' decoder is any decoder that can be enumerated by PCIe 1796 * topology and the HDM Decoder Capability. This includes the decoders 1797 * that sit between Switch Upstream Ports / Switch Downstream Ports and 1798 * Host Bridges / Root Ports. 1799 */ 1800 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, 1801 unsigned int nr_targets) 1802 { 1803 struct cxl_switch_decoder *cxlsd; 1804 struct cxl_decoder *cxld; 1805 int rc; 1806 1807 if (is_cxl_root(port) || is_cxl_endpoint(port)) 1808 return ERR_PTR(-EINVAL); 1809 1810 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL); 1811 if (!cxlsd) 1812 return ERR_PTR(-ENOMEM); 1813 1814 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1815 if (rc) { 1816 kfree(cxlsd); 1817 return ERR_PTR(rc); 1818 } 1819 1820 cxld = &cxlsd->cxld; 1821 cxld->dev.type = &cxl_decoder_switch_type; 1822 return cxlsd; 1823 } 1824 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); 1825 1826 /** 1827 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder 1828 * @port: owning port of this decoder 1829 * 1830 * Return: A new cxl decoder to be registered by cxl_decoder_add() 1831 */ 1832 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) 1833 { 1834 struct cxl_endpoint_decoder *cxled; 1835 struct cxl_decoder *cxld; 1836 int rc; 1837 1838 if (!is_cxl_endpoint(port)) 1839 return ERR_PTR(-EINVAL); 1840 1841 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL); 1842 if (!cxled) 1843 return ERR_PTR(-ENOMEM); 1844 1845 cxled->pos = -1; 1846 cxld = &cxled->cxld; 1847 rc = cxl_decoder_init(port, cxld); 1848 if (rc) { 1849 kfree(cxled); 1850 return ERR_PTR(rc); 1851 } 1852 1853 cxld->dev.type = &cxl_decoder_endpoint_type; 1854 return cxled; 1855 } 1856 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); 1857 1858 /** 1859 * cxl_decoder_add_locked - Add a decoder with targets 1860 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1861 * @target_map: A list of downstream ports that this decoder can direct memory 1862 * traffic to. These numbers should correspond with the port number 1863 * in the PCIe Link Capabilities structure. 1864 * 1865 * Certain types of decoders may not have any targets. The main example of this 1866 * is an endpoint device. A more awkward example is a hostbridge whose root 1867 * ports get hot added (technically possible, though unlikely). 1868 * 1869 * This is the locked variant of cxl_decoder_add(). 1870 * 1871 * Context: Process context. Expects the device lock of the port that owns the 1872 * @cxld to be held. 1873 * 1874 * Return: Negative error code if the decoder wasn't properly configured; else 1875 * returns 0. 1876 */ 1877 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) 1878 { 1879 struct cxl_port *port; 1880 struct device *dev; 1881 int rc; 1882 1883 if (WARN_ON_ONCE(!cxld)) 1884 return -EINVAL; 1885 1886 if (WARN_ON_ONCE(IS_ERR(cxld))) 1887 return PTR_ERR(cxld); 1888 1889 if (cxld->interleave_ways < 1) 1890 return -EINVAL; 1891 1892 dev = &cxld->dev; 1893 1894 port = to_cxl_port(cxld->dev.parent); 1895 if (!is_endpoint_decoder(dev)) { 1896 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 1897 1898 rc = decoder_populate_targets(cxlsd, port, target_map); 1899 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { 1900 dev_err(&port->dev, 1901 "Failed to populate active decoder targets\n"); 1902 return rc; 1903 } 1904 } 1905 1906 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); 1907 if (rc) 1908 return rc; 1909 1910 return device_add(dev); 1911 } 1912 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); 1913 1914 /** 1915 * cxl_decoder_add - Add a decoder with targets 1916 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1917 * @target_map: A list of downstream ports that this decoder can direct memory 1918 * traffic to. These numbers should correspond with the port number 1919 * in the PCIe Link Capabilities structure. 1920 * 1921 * This is the unlocked variant of cxl_decoder_add_locked(). 1922 * See cxl_decoder_add_locked(). 1923 * 1924 * Context: Process context. Takes and releases the device lock of the port that 1925 * owns the @cxld. 1926 */ 1927 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) 1928 { 1929 struct cxl_port *port; 1930 int rc; 1931 1932 if (WARN_ON_ONCE(!cxld)) 1933 return -EINVAL; 1934 1935 if (WARN_ON_ONCE(IS_ERR(cxld))) 1936 return PTR_ERR(cxld); 1937 1938 port = to_cxl_port(cxld->dev.parent); 1939 1940 device_lock(&port->dev); 1941 rc = cxl_decoder_add_locked(cxld, target_map); 1942 device_unlock(&port->dev); 1943 1944 return rc; 1945 } 1946 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); 1947 1948 static void cxld_unregister(void *dev) 1949 { 1950 struct cxl_endpoint_decoder *cxled; 1951 1952 if (is_endpoint_decoder(dev)) { 1953 cxled = to_cxl_endpoint_decoder(dev); 1954 cxl_decoder_kill_region(cxled); 1955 } 1956 1957 device_unregister(dev); 1958 } 1959 1960 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) 1961 { 1962 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); 1963 } 1964 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL); 1965 1966 /** 1967 * __cxl_driver_register - register a driver for the cxl bus 1968 * @cxl_drv: cxl driver structure to attach 1969 * @owner: owning module/driver 1970 * @modname: KBUILD_MODNAME for parent driver 1971 */ 1972 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, 1973 const char *modname) 1974 { 1975 if (!cxl_drv->probe) { 1976 pr_debug("%s ->probe() must be specified\n", modname); 1977 return -EINVAL; 1978 } 1979 1980 if (!cxl_drv->name) { 1981 pr_debug("%s ->name must be specified\n", modname); 1982 return -EINVAL; 1983 } 1984 1985 if (!cxl_drv->id) { 1986 pr_debug("%s ->id must be specified\n", modname); 1987 return -EINVAL; 1988 } 1989 1990 cxl_drv->drv.bus = &cxl_bus_type; 1991 cxl_drv->drv.owner = owner; 1992 cxl_drv->drv.mod_name = modname; 1993 cxl_drv->drv.name = cxl_drv->name; 1994 1995 return driver_register(&cxl_drv->drv); 1996 } 1997 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL); 1998 1999 void cxl_driver_unregister(struct cxl_driver *cxl_drv) 2000 { 2001 driver_unregister(&cxl_drv->drv); 2002 } 2003 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL); 2004 2005 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 2006 { 2007 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT, 2008 cxl_device_id(dev)); 2009 } 2010 2011 static int cxl_bus_match(struct device *dev, struct device_driver *drv) 2012 { 2013 return cxl_device_id(dev) == to_cxl_drv(drv)->id; 2014 } 2015 2016 static int cxl_bus_probe(struct device *dev) 2017 { 2018 int rc; 2019 2020 rc = to_cxl_drv(dev->driver)->probe(dev); 2021 dev_dbg(dev, "probe: %d\n", rc); 2022 return rc; 2023 } 2024 2025 static void cxl_bus_remove(struct device *dev) 2026 { 2027 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); 2028 2029 if (cxl_drv->remove) 2030 cxl_drv->remove(dev); 2031 } 2032 2033 static struct workqueue_struct *cxl_bus_wq; 2034 2035 static void cxl_bus_rescan_queue(struct work_struct *w) 2036 { 2037 int rc = bus_rescan_devices(&cxl_bus_type); 2038 2039 pr_debug("CXL bus rescan result: %d\n", rc); 2040 } 2041 2042 void cxl_bus_rescan(void) 2043 { 2044 static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue); 2045 2046 queue_work(cxl_bus_wq, &rescan_work); 2047 } 2048 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL); 2049 2050 void cxl_bus_drain(void) 2051 { 2052 drain_workqueue(cxl_bus_wq); 2053 } 2054 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL); 2055 2056 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) 2057 { 2058 return queue_work(cxl_bus_wq, &cxlmd->detach_work); 2059 } 2060 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL); 2061 2062 /* for user tooling to ensure port disable work has completed */ 2063 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count) 2064 { 2065 if (sysfs_streq(buf, "1")) { 2066 flush_workqueue(cxl_bus_wq); 2067 return count; 2068 } 2069 2070 return -EINVAL; 2071 } 2072 2073 static BUS_ATTR_WO(flush); 2074 2075 static struct attribute *cxl_bus_attributes[] = { 2076 &bus_attr_flush.attr, 2077 NULL, 2078 }; 2079 2080 static struct attribute_group cxl_bus_attribute_group = { 2081 .attrs = cxl_bus_attributes, 2082 }; 2083 2084 static const struct attribute_group *cxl_bus_attribute_groups[] = { 2085 &cxl_bus_attribute_group, 2086 NULL, 2087 }; 2088 2089 struct bus_type cxl_bus_type = { 2090 .name = "cxl", 2091 .uevent = cxl_bus_uevent, 2092 .match = cxl_bus_match, 2093 .probe = cxl_bus_probe, 2094 .remove = cxl_bus_remove, 2095 .bus_groups = cxl_bus_attribute_groups, 2096 }; 2097 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL); 2098 2099 static struct dentry *cxl_debugfs; 2100 2101 struct dentry *cxl_debugfs_create_dir(const char *dir) 2102 { 2103 return debugfs_create_dir(dir, cxl_debugfs); 2104 } 2105 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL); 2106 2107 static __init int cxl_core_init(void) 2108 { 2109 int rc; 2110 2111 cxl_debugfs = debugfs_create_dir("cxl", NULL); 2112 2113 cxl_mbox_init(); 2114 2115 rc = cxl_memdev_init(); 2116 if (rc) 2117 return rc; 2118 2119 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0); 2120 if (!cxl_bus_wq) { 2121 rc = -ENOMEM; 2122 goto err_wq; 2123 } 2124 2125 rc = bus_register(&cxl_bus_type); 2126 if (rc) 2127 goto err_bus; 2128 2129 rc = cxl_region_init(); 2130 if (rc) 2131 goto err_region; 2132 2133 return 0; 2134 2135 err_region: 2136 bus_unregister(&cxl_bus_type); 2137 err_bus: 2138 destroy_workqueue(cxl_bus_wq); 2139 err_wq: 2140 cxl_memdev_exit(); 2141 return rc; 2142 } 2143 2144 static void cxl_core_exit(void) 2145 { 2146 cxl_region_exit(); 2147 bus_unregister(&cxl_bus_type); 2148 destroy_workqueue(cxl_bus_wq); 2149 cxl_memdev_exit(); 2150 debugfs_remove_recursive(cxl_debugfs); 2151 } 2152 2153 subsys_initcall(cxl_core_init); 2154 module_exit(cxl_core_exit); 2155 MODULE_LICENSE("GPL v2"); 2156 MODULE_IMPORT_NS(CXL); 2157