1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/io-64-nonatomic-lo-hi.h> 4 #include <linux/memregion.h> 5 #include <linux/workqueue.h> 6 #include <linux/debugfs.h> 7 #include <linux/device.h> 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/slab.h> 11 #include <linux/idr.h> 12 #include <cxlmem.h> 13 #include <cxlpci.h> 14 #include <cxl.h> 15 #include "core.h" 16 17 /** 18 * DOC: cxl core 19 * 20 * The CXL core provides a set of interfaces that can be consumed by CXL aware 21 * drivers. The interfaces allow for creation, modification, and destruction of 22 * regions, memory devices, ports, and decoders. CXL aware drivers must register 23 * with the CXL core via these interfaces in order to be able to participate in 24 * cross-device interleave coordination. The CXL core also establishes and 25 * maintains the bridge to the nvdimm subsystem. 26 * 27 * CXL core introduces sysfs hierarchy to control the devices that are 28 * instantiated by the core. 29 */ 30 31 static DEFINE_IDA(cxl_port_ida); 32 static DEFINE_XARRAY(cxl_root_buses); 33 34 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, 35 char *buf) 36 { 37 return sysfs_emit(buf, "%s\n", dev->type->name); 38 } 39 static DEVICE_ATTR_RO(devtype); 40 41 static int cxl_device_id(struct device *dev) 42 { 43 if (dev->type == &cxl_nvdimm_bridge_type) 44 return CXL_DEVICE_NVDIMM_BRIDGE; 45 if (dev->type == &cxl_nvdimm_type) 46 return CXL_DEVICE_NVDIMM; 47 if (dev->type == CXL_PMEM_REGION_TYPE()) 48 return CXL_DEVICE_PMEM_REGION; 49 if (is_cxl_port(dev)) { 50 if (is_cxl_root(to_cxl_port(dev))) 51 return CXL_DEVICE_ROOT; 52 return CXL_DEVICE_PORT; 53 } 54 if (is_cxl_memdev(dev)) 55 return CXL_DEVICE_MEMORY_EXPANDER; 56 if (dev->type == CXL_REGION_TYPE()) 57 return CXL_DEVICE_REGION; 58 return 0; 59 } 60 61 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 62 char *buf) 63 { 64 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev)); 65 } 66 static DEVICE_ATTR_RO(modalias); 67 68 static struct attribute *cxl_base_attributes[] = { 69 &dev_attr_devtype.attr, 70 &dev_attr_modalias.attr, 71 NULL, 72 }; 73 74 struct attribute_group cxl_base_attribute_group = { 75 .attrs = cxl_base_attributes, 76 }; 77 78 static ssize_t start_show(struct device *dev, struct device_attribute *attr, 79 char *buf) 80 { 81 struct cxl_decoder *cxld = to_cxl_decoder(dev); 82 83 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start); 84 } 85 static DEVICE_ATTR_ADMIN_RO(start); 86 87 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 88 char *buf) 89 { 90 struct cxl_decoder *cxld = to_cxl_decoder(dev); 91 92 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range)); 93 } 94 static DEVICE_ATTR_RO(size); 95 96 #define CXL_DECODER_FLAG_ATTR(name, flag) \ 97 static ssize_t name##_show(struct device *dev, \ 98 struct device_attribute *attr, char *buf) \ 99 { \ 100 struct cxl_decoder *cxld = to_cxl_decoder(dev); \ 101 \ 102 return sysfs_emit(buf, "%s\n", \ 103 (cxld->flags & (flag)) ? "1" : "0"); \ 104 } \ 105 static DEVICE_ATTR_RO(name) 106 107 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM); 108 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM); 109 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2); 110 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3); 111 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK); 112 113 static ssize_t target_type_show(struct device *dev, 114 struct device_attribute *attr, char *buf) 115 { 116 struct cxl_decoder *cxld = to_cxl_decoder(dev); 117 118 switch (cxld->target_type) { 119 case CXL_DECODER_ACCELERATOR: 120 return sysfs_emit(buf, "accelerator\n"); 121 case CXL_DECODER_EXPANDER: 122 return sysfs_emit(buf, "expander\n"); 123 } 124 return -ENXIO; 125 } 126 static DEVICE_ATTR_RO(target_type); 127 128 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf) 129 { 130 struct cxl_decoder *cxld = &cxlsd->cxld; 131 ssize_t offset = 0; 132 int i, rc = 0; 133 134 for (i = 0; i < cxld->interleave_ways; i++) { 135 struct cxl_dport *dport = cxlsd->target[i]; 136 struct cxl_dport *next = NULL; 137 138 if (!dport) 139 break; 140 141 if (i + 1 < cxld->interleave_ways) 142 next = cxlsd->target[i + 1]; 143 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, 144 next ? "," : ""); 145 if (rc < 0) 146 return rc; 147 offset += rc; 148 } 149 150 return offset; 151 } 152 153 static ssize_t target_list_show(struct device *dev, 154 struct device_attribute *attr, char *buf) 155 { 156 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 157 ssize_t offset; 158 unsigned int seq; 159 int rc; 160 161 do { 162 seq = read_seqbegin(&cxlsd->target_lock); 163 rc = emit_target_list(cxlsd, buf); 164 } while (read_seqretry(&cxlsd->target_lock, seq)); 165 166 if (rc < 0) 167 return rc; 168 offset = rc; 169 170 rc = sysfs_emit_at(buf, offset, "\n"); 171 if (rc < 0) 172 return rc; 173 174 return offset + rc; 175 } 176 static DEVICE_ATTR_RO(target_list); 177 178 static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 179 char *buf) 180 { 181 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 182 183 switch (cxled->mode) { 184 case CXL_DECODER_RAM: 185 return sysfs_emit(buf, "ram\n"); 186 case CXL_DECODER_PMEM: 187 return sysfs_emit(buf, "pmem\n"); 188 case CXL_DECODER_NONE: 189 return sysfs_emit(buf, "none\n"); 190 case CXL_DECODER_MIXED: 191 default: 192 return sysfs_emit(buf, "mixed\n"); 193 } 194 } 195 196 static ssize_t mode_store(struct device *dev, struct device_attribute *attr, 197 const char *buf, size_t len) 198 { 199 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 200 enum cxl_decoder_mode mode; 201 ssize_t rc; 202 203 if (sysfs_streq(buf, "pmem")) 204 mode = CXL_DECODER_PMEM; 205 else if (sysfs_streq(buf, "ram")) 206 mode = CXL_DECODER_RAM; 207 else 208 return -EINVAL; 209 210 rc = cxl_dpa_set_mode(cxled, mode); 211 if (rc) 212 return rc; 213 214 return len; 215 } 216 static DEVICE_ATTR_RW(mode); 217 218 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr, 219 char *buf) 220 { 221 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 222 u64 base = cxl_dpa_resource_start(cxled); 223 224 return sysfs_emit(buf, "%#llx\n", base); 225 } 226 static DEVICE_ATTR_RO(dpa_resource); 227 228 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr, 229 char *buf) 230 { 231 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 232 resource_size_t size = cxl_dpa_size(cxled); 233 234 return sysfs_emit(buf, "%pa\n", &size); 235 } 236 237 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr, 238 const char *buf, size_t len) 239 { 240 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 241 unsigned long long size; 242 ssize_t rc; 243 244 rc = kstrtoull(buf, 0, &size); 245 if (rc) 246 return rc; 247 248 if (!IS_ALIGNED(size, SZ_256M)) 249 return -EINVAL; 250 251 rc = cxl_dpa_free(cxled); 252 if (rc) 253 return rc; 254 255 if (size == 0) 256 return len; 257 258 rc = cxl_dpa_alloc(cxled, size); 259 if (rc) 260 return rc; 261 262 return len; 263 } 264 static DEVICE_ATTR_RW(dpa_size); 265 266 static ssize_t interleave_granularity_show(struct device *dev, 267 struct device_attribute *attr, 268 char *buf) 269 { 270 struct cxl_decoder *cxld = to_cxl_decoder(dev); 271 272 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity); 273 } 274 275 static DEVICE_ATTR_RO(interleave_granularity); 276 277 static ssize_t interleave_ways_show(struct device *dev, 278 struct device_attribute *attr, char *buf) 279 { 280 struct cxl_decoder *cxld = to_cxl_decoder(dev); 281 282 return sysfs_emit(buf, "%d\n", cxld->interleave_ways); 283 } 284 285 static DEVICE_ATTR_RO(interleave_ways); 286 287 static struct attribute *cxl_decoder_base_attrs[] = { 288 &dev_attr_start.attr, 289 &dev_attr_size.attr, 290 &dev_attr_locked.attr, 291 &dev_attr_interleave_granularity.attr, 292 &dev_attr_interleave_ways.attr, 293 NULL, 294 }; 295 296 static struct attribute_group cxl_decoder_base_attribute_group = { 297 .attrs = cxl_decoder_base_attrs, 298 }; 299 300 static struct attribute *cxl_decoder_root_attrs[] = { 301 &dev_attr_cap_pmem.attr, 302 &dev_attr_cap_ram.attr, 303 &dev_attr_cap_type2.attr, 304 &dev_attr_cap_type3.attr, 305 &dev_attr_target_list.attr, 306 SET_CXL_REGION_ATTR(create_pmem_region) 307 SET_CXL_REGION_ATTR(delete_region) 308 NULL, 309 }; 310 311 static bool can_create_pmem(struct cxl_root_decoder *cxlrd) 312 { 313 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM; 314 315 return (cxlrd->cxlsd.cxld.flags & flags) == flags; 316 } 317 318 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n) 319 { 320 struct device *dev = kobj_to_dev(kobj); 321 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 322 323 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd)) 324 return 0; 325 326 if (a == CXL_REGION_ATTR(delete_region) && !can_create_pmem(cxlrd)) 327 return 0; 328 329 return a->mode; 330 } 331 332 static struct attribute_group cxl_decoder_root_attribute_group = { 333 .attrs = cxl_decoder_root_attrs, 334 .is_visible = cxl_root_decoder_visible, 335 }; 336 337 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { 338 &cxl_decoder_root_attribute_group, 339 &cxl_decoder_base_attribute_group, 340 &cxl_base_attribute_group, 341 NULL, 342 }; 343 344 static struct attribute *cxl_decoder_switch_attrs[] = { 345 &dev_attr_target_type.attr, 346 &dev_attr_target_list.attr, 347 SET_CXL_REGION_ATTR(region) 348 NULL, 349 }; 350 351 static struct attribute_group cxl_decoder_switch_attribute_group = { 352 .attrs = cxl_decoder_switch_attrs, 353 }; 354 355 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { 356 &cxl_decoder_switch_attribute_group, 357 &cxl_decoder_base_attribute_group, 358 &cxl_base_attribute_group, 359 NULL, 360 }; 361 362 static struct attribute *cxl_decoder_endpoint_attrs[] = { 363 &dev_attr_target_type.attr, 364 &dev_attr_mode.attr, 365 &dev_attr_dpa_size.attr, 366 &dev_attr_dpa_resource.attr, 367 SET_CXL_REGION_ATTR(region) 368 NULL, 369 }; 370 371 static struct attribute_group cxl_decoder_endpoint_attribute_group = { 372 .attrs = cxl_decoder_endpoint_attrs, 373 }; 374 375 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = { 376 &cxl_decoder_base_attribute_group, 377 &cxl_decoder_endpoint_attribute_group, 378 &cxl_base_attribute_group, 379 NULL, 380 }; 381 382 static void __cxl_decoder_release(struct cxl_decoder *cxld) 383 { 384 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 385 386 ida_free(&port->decoder_ida, cxld->id); 387 put_device(&port->dev); 388 } 389 390 static void cxl_endpoint_decoder_release(struct device *dev) 391 { 392 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 393 394 __cxl_decoder_release(&cxled->cxld); 395 kfree(cxled); 396 } 397 398 static void cxl_switch_decoder_release(struct device *dev) 399 { 400 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 401 402 __cxl_decoder_release(&cxlsd->cxld); 403 kfree(cxlsd); 404 } 405 406 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev) 407 { 408 if (dev_WARN_ONCE(dev, !is_root_decoder(dev), 409 "not a cxl_root_decoder device\n")) 410 return NULL; 411 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev); 412 } 413 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL); 414 415 static void cxl_root_decoder_release(struct device *dev) 416 { 417 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 418 419 if (atomic_read(&cxlrd->region_id) >= 0) 420 memregion_free(atomic_read(&cxlrd->region_id)); 421 __cxl_decoder_release(&cxlrd->cxlsd.cxld); 422 kfree(cxlrd); 423 } 424 425 static const struct device_type cxl_decoder_endpoint_type = { 426 .name = "cxl_decoder_endpoint", 427 .release = cxl_endpoint_decoder_release, 428 .groups = cxl_decoder_endpoint_attribute_groups, 429 }; 430 431 static const struct device_type cxl_decoder_switch_type = { 432 .name = "cxl_decoder_switch", 433 .release = cxl_switch_decoder_release, 434 .groups = cxl_decoder_switch_attribute_groups, 435 }; 436 437 static const struct device_type cxl_decoder_root_type = { 438 .name = "cxl_decoder_root", 439 .release = cxl_root_decoder_release, 440 .groups = cxl_decoder_root_attribute_groups, 441 }; 442 443 bool is_endpoint_decoder(struct device *dev) 444 { 445 return dev->type == &cxl_decoder_endpoint_type; 446 } 447 448 bool is_root_decoder(struct device *dev) 449 { 450 return dev->type == &cxl_decoder_root_type; 451 } 452 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL); 453 454 bool is_switch_decoder(struct device *dev) 455 { 456 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; 457 } 458 459 struct cxl_decoder *to_cxl_decoder(struct device *dev) 460 { 461 if (dev_WARN_ONCE(dev, 462 !is_switch_decoder(dev) && !is_endpoint_decoder(dev), 463 "not a cxl_decoder device\n")) 464 return NULL; 465 return container_of(dev, struct cxl_decoder, dev); 466 } 467 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL); 468 469 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev) 470 { 471 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev), 472 "not a cxl_endpoint_decoder device\n")) 473 return NULL; 474 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev); 475 } 476 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL); 477 478 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) 479 { 480 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev), 481 "not a cxl_switch_decoder device\n")) 482 return NULL; 483 return container_of(dev, struct cxl_switch_decoder, cxld.dev); 484 } 485 486 static void cxl_ep_release(struct cxl_ep *ep) 487 { 488 put_device(ep->ep); 489 kfree(ep); 490 } 491 492 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep) 493 { 494 if (!ep) 495 return; 496 xa_erase(&port->endpoints, (unsigned long) ep->ep); 497 cxl_ep_release(ep); 498 } 499 500 static void cxl_port_release(struct device *dev) 501 { 502 struct cxl_port *port = to_cxl_port(dev); 503 unsigned long index; 504 struct cxl_ep *ep; 505 506 xa_for_each(&port->endpoints, index, ep) 507 cxl_ep_remove(port, ep); 508 xa_destroy(&port->endpoints); 509 xa_destroy(&port->dports); 510 xa_destroy(&port->regions); 511 ida_free(&cxl_port_ida, port->id); 512 kfree(port); 513 } 514 515 static const struct attribute_group *cxl_port_attribute_groups[] = { 516 &cxl_base_attribute_group, 517 NULL, 518 }; 519 520 static const struct device_type cxl_port_type = { 521 .name = "cxl_port", 522 .release = cxl_port_release, 523 .groups = cxl_port_attribute_groups, 524 }; 525 526 bool is_cxl_port(struct device *dev) 527 { 528 return dev->type == &cxl_port_type; 529 } 530 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL); 531 532 struct cxl_port *to_cxl_port(struct device *dev) 533 { 534 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, 535 "not a cxl_port device\n")) 536 return NULL; 537 return container_of(dev, struct cxl_port, dev); 538 } 539 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL); 540 541 static void unregister_port(void *_port) 542 { 543 struct cxl_port *port = _port; 544 struct cxl_port *parent; 545 struct device *lock_dev; 546 547 if (is_cxl_root(port)) 548 parent = NULL; 549 else 550 parent = to_cxl_port(port->dev.parent); 551 552 /* 553 * CXL root port's and the first level of ports are unregistered 554 * under the platform firmware device lock, all other ports are 555 * unregistered while holding their parent port lock. 556 */ 557 if (!parent) 558 lock_dev = port->uport; 559 else if (is_cxl_root(parent)) 560 lock_dev = parent->uport; 561 else 562 lock_dev = &parent->dev; 563 564 device_lock_assert(lock_dev); 565 port->dead = true; 566 device_unregister(&port->dev); 567 } 568 569 static void cxl_unlink_uport(void *_port) 570 { 571 struct cxl_port *port = _port; 572 573 sysfs_remove_link(&port->dev.kobj, "uport"); 574 } 575 576 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) 577 { 578 int rc; 579 580 rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport"); 581 if (rc) 582 return rc; 583 return devm_add_action_or_reset(host, cxl_unlink_uport, port); 584 } 585 586 static struct lock_class_key cxl_port_key; 587 588 static struct cxl_port *cxl_port_alloc(struct device *uport, 589 resource_size_t component_reg_phys, 590 struct cxl_dport *parent_dport) 591 { 592 struct cxl_port *port; 593 struct device *dev; 594 int rc; 595 596 port = kzalloc(sizeof(*port), GFP_KERNEL); 597 if (!port) 598 return ERR_PTR(-ENOMEM); 599 600 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL); 601 if (rc < 0) 602 goto err; 603 port->id = rc; 604 port->uport = uport; 605 606 /* 607 * The top-level cxl_port "cxl_root" does not have a cxl_port as 608 * its parent and it does not have any corresponding component 609 * registers as its decode is described by a fixed platform 610 * description. 611 */ 612 dev = &port->dev; 613 if (parent_dport) { 614 struct cxl_port *parent_port = parent_dport->port; 615 struct cxl_port *iter; 616 617 dev->parent = &parent_port->dev; 618 port->depth = parent_port->depth + 1; 619 port->parent_dport = parent_dport; 620 621 /* 622 * walk to the host bridge, or the first ancestor that knows 623 * the host bridge 624 */ 625 iter = port; 626 while (!iter->host_bridge && 627 !is_cxl_root(to_cxl_port(iter->dev.parent))) 628 iter = to_cxl_port(iter->dev.parent); 629 if (iter->host_bridge) 630 port->host_bridge = iter->host_bridge; 631 else 632 port->host_bridge = iter->uport; 633 dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge)); 634 } else 635 dev->parent = uport; 636 637 port->component_reg_phys = component_reg_phys; 638 ida_init(&port->decoder_ida); 639 port->hdm_end = -1; 640 port->commit_end = -1; 641 xa_init(&port->dports); 642 xa_init(&port->endpoints); 643 xa_init(&port->regions); 644 645 device_initialize(dev); 646 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); 647 device_set_pm_not_required(dev); 648 dev->bus = &cxl_bus_type; 649 dev->type = &cxl_port_type; 650 651 return port; 652 653 err: 654 kfree(port); 655 return ERR_PTR(rc); 656 } 657 658 /** 659 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy 660 * @host: host device for devm operations 661 * @uport: "physical" device implementing this upstream port 662 * @component_reg_phys: (optional) for configurable cxl_port instances 663 * @parent_dport: next hop up in the CXL memory decode hierarchy 664 */ 665 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, 666 resource_size_t component_reg_phys, 667 struct cxl_dport *parent_dport) 668 { 669 struct cxl_port *port; 670 struct device *dev; 671 int rc; 672 673 port = cxl_port_alloc(uport, component_reg_phys, parent_dport); 674 if (IS_ERR(port)) 675 return port; 676 677 dev = &port->dev; 678 if (is_cxl_memdev(uport)) 679 rc = dev_set_name(dev, "endpoint%d", port->id); 680 else if (parent_dport) 681 rc = dev_set_name(dev, "port%d", port->id); 682 else 683 rc = dev_set_name(dev, "root%d", port->id); 684 if (rc) 685 goto err; 686 687 rc = device_add(dev); 688 if (rc) 689 goto err; 690 691 rc = devm_add_action_or_reset(host, unregister_port, port); 692 if (rc) 693 return ERR_PTR(rc); 694 695 rc = devm_cxl_link_uport(host, port); 696 if (rc) 697 return ERR_PTR(rc); 698 699 return port; 700 701 err: 702 put_device(dev); 703 return ERR_PTR(rc); 704 } 705 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); 706 707 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) 708 { 709 /* There is no pci_bus associated with a CXL platform-root port */ 710 if (is_cxl_root(port)) 711 return NULL; 712 713 if (dev_is_pci(port->uport)) { 714 struct pci_dev *pdev = to_pci_dev(port->uport); 715 716 return pdev->subordinate; 717 } 718 719 return xa_load(&cxl_root_buses, (unsigned long)port->uport); 720 } 721 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL); 722 723 static void unregister_pci_bus(void *uport) 724 { 725 xa_erase(&cxl_root_buses, (unsigned long)uport); 726 } 727 728 int devm_cxl_register_pci_bus(struct device *host, struct device *uport, 729 struct pci_bus *bus) 730 { 731 int rc; 732 733 if (dev_is_pci(uport)) 734 return -EINVAL; 735 736 rc = xa_insert(&cxl_root_buses, (unsigned long)uport, bus, GFP_KERNEL); 737 if (rc) 738 return rc; 739 return devm_add_action_or_reset(host, unregister_pci_bus, uport); 740 } 741 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL); 742 743 static bool dev_is_cxl_root_child(struct device *dev) 744 { 745 struct cxl_port *port, *parent; 746 747 if (!is_cxl_port(dev)) 748 return false; 749 750 port = to_cxl_port(dev); 751 if (is_cxl_root(port)) 752 return false; 753 754 parent = to_cxl_port(port->dev.parent); 755 if (is_cxl_root(parent)) 756 return true; 757 758 return false; 759 } 760 761 /* Find a 2nd level CXL port that has a dport that is an ancestor of @match */ 762 static int match_root_child(struct device *dev, const void *match) 763 { 764 const struct device *iter = NULL; 765 struct cxl_dport *dport; 766 struct cxl_port *port; 767 768 if (!dev_is_cxl_root_child(dev)) 769 return 0; 770 771 port = to_cxl_port(dev); 772 iter = match; 773 while (iter) { 774 dport = cxl_find_dport_by_dev(port, iter); 775 if (dport) 776 break; 777 iter = iter->parent; 778 } 779 780 return !!iter; 781 } 782 783 struct cxl_port *find_cxl_root(struct device *dev) 784 { 785 struct device *port_dev; 786 struct cxl_port *root; 787 788 port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child); 789 if (!port_dev) 790 return NULL; 791 792 root = to_cxl_port(port_dev->parent); 793 get_device(&root->dev); 794 put_device(port_dev); 795 return root; 796 } 797 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); 798 799 static struct cxl_dport *find_dport(struct cxl_port *port, int id) 800 { 801 struct cxl_dport *dport; 802 unsigned long index; 803 804 device_lock_assert(&port->dev); 805 xa_for_each(&port->dports, index, dport) 806 if (dport->port_id == id) 807 return dport; 808 return NULL; 809 } 810 811 static int add_dport(struct cxl_port *port, struct cxl_dport *new) 812 { 813 struct cxl_dport *dup; 814 815 device_lock_assert(&port->dev); 816 dup = find_dport(port, new->port_id); 817 if (dup) { 818 dev_err(&port->dev, 819 "unable to add dport%d-%s non-unique port id (%s)\n", 820 new->port_id, dev_name(new->dport), 821 dev_name(dup->dport)); 822 return -EBUSY; 823 } 824 return xa_insert(&port->dports, (unsigned long)new->dport, new, 825 GFP_KERNEL); 826 } 827 828 /* 829 * Since root-level CXL dports cannot be enumerated by PCI they are not 830 * enumerated by the common port driver that acquires the port lock over 831 * dport add/remove. Instead, root dports are manually added by a 832 * platform driver and cond_cxl_root_lock() is used to take the missing 833 * port lock in that case. 834 */ 835 static void cond_cxl_root_lock(struct cxl_port *port) 836 { 837 if (is_cxl_root(port)) 838 device_lock(&port->dev); 839 } 840 841 static void cond_cxl_root_unlock(struct cxl_port *port) 842 { 843 if (is_cxl_root(port)) 844 device_unlock(&port->dev); 845 } 846 847 static void cxl_dport_remove(void *data) 848 { 849 struct cxl_dport *dport = data; 850 struct cxl_port *port = dport->port; 851 852 xa_erase(&port->dports, (unsigned long) dport->dport); 853 put_device(dport->dport); 854 } 855 856 static void cxl_dport_unlink(void *data) 857 { 858 struct cxl_dport *dport = data; 859 struct cxl_port *port = dport->port; 860 char link_name[CXL_TARGET_STRLEN]; 861 862 sprintf(link_name, "dport%d", dport->port_id); 863 sysfs_remove_link(&port->dev.kobj, link_name); 864 } 865 866 /** 867 * devm_cxl_add_dport - append downstream port data to a cxl_port 868 * @port: the cxl_port that references this dport 869 * @dport_dev: firmware or PCI device representing the dport 870 * @port_id: identifier for this dport in a decoder's target list 871 * @component_reg_phys: optional location of CXL component registers 872 * 873 * Note that dports are appended to the devm release action's of the 874 * either the port's host (for root ports), or the port itself (for 875 * switch ports) 876 */ 877 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, 878 struct device *dport_dev, int port_id, 879 resource_size_t component_reg_phys) 880 { 881 char link_name[CXL_TARGET_STRLEN]; 882 struct cxl_dport *dport; 883 struct device *host; 884 int rc; 885 886 if (is_cxl_root(port)) 887 host = port->uport; 888 else 889 host = &port->dev; 890 891 if (!host->driver) { 892 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n", 893 dev_name(dport_dev)); 894 return ERR_PTR(-ENXIO); 895 } 896 897 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >= 898 CXL_TARGET_STRLEN) 899 return ERR_PTR(-EINVAL); 900 901 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL); 902 if (!dport) 903 return ERR_PTR(-ENOMEM); 904 905 dport->dport = dport_dev; 906 dport->port_id = port_id; 907 dport->component_reg_phys = component_reg_phys; 908 dport->port = port; 909 910 cond_cxl_root_lock(port); 911 rc = add_dport(port, dport); 912 cond_cxl_root_unlock(port); 913 if (rc) 914 return ERR_PTR(rc); 915 916 get_device(dport_dev); 917 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); 918 if (rc) 919 return ERR_PTR(rc); 920 921 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name); 922 if (rc) 923 return ERR_PTR(rc); 924 925 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport); 926 if (rc) 927 return ERR_PTR(rc); 928 929 return dport; 930 } 931 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); 932 933 static int add_ep(struct cxl_ep *new) 934 { 935 struct cxl_port *port = new->dport->port; 936 int rc; 937 938 device_lock(&port->dev); 939 if (port->dead) { 940 device_unlock(&port->dev); 941 return -ENXIO; 942 } 943 rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new, 944 GFP_KERNEL); 945 device_unlock(&port->dev); 946 947 return rc; 948 } 949 950 /** 951 * cxl_add_ep - register an endpoint's interest in a port 952 * @dport: the dport that routes to @ep_dev 953 * @ep_dev: device representing the endpoint 954 * 955 * Intermediate CXL ports are scanned based on the arrival of endpoints. 956 * When those endpoints depart the port can be destroyed once all 957 * endpoints that care about that port have been removed. 958 */ 959 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev) 960 { 961 struct cxl_ep *ep; 962 int rc; 963 964 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 965 if (!ep) 966 return -ENOMEM; 967 968 ep->ep = get_device(ep_dev); 969 ep->dport = dport; 970 971 rc = add_ep(ep); 972 if (rc) 973 cxl_ep_release(ep); 974 return rc; 975 } 976 977 struct cxl_find_port_ctx { 978 const struct device *dport_dev; 979 const struct cxl_port *parent_port; 980 struct cxl_dport **dport; 981 }; 982 983 static int match_port_by_dport(struct device *dev, const void *data) 984 { 985 const struct cxl_find_port_ctx *ctx = data; 986 struct cxl_dport *dport; 987 struct cxl_port *port; 988 989 if (!is_cxl_port(dev)) 990 return 0; 991 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev) 992 return 0; 993 994 port = to_cxl_port(dev); 995 dport = cxl_find_dport_by_dev(port, ctx->dport_dev); 996 if (ctx->dport) 997 *ctx->dport = dport; 998 return dport != NULL; 999 } 1000 1001 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx) 1002 { 1003 struct device *dev; 1004 1005 if (!ctx->dport_dev) 1006 return NULL; 1007 1008 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport); 1009 if (dev) 1010 return to_cxl_port(dev); 1011 return NULL; 1012 } 1013 1014 static struct cxl_port *find_cxl_port(struct device *dport_dev, 1015 struct cxl_dport **dport) 1016 { 1017 struct cxl_find_port_ctx ctx = { 1018 .dport_dev = dport_dev, 1019 .dport = dport, 1020 }; 1021 struct cxl_port *port; 1022 1023 port = __find_cxl_port(&ctx); 1024 return port; 1025 } 1026 1027 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port, 1028 struct device *dport_dev, 1029 struct cxl_dport **dport) 1030 { 1031 struct cxl_find_port_ctx ctx = { 1032 .dport_dev = dport_dev, 1033 .parent_port = parent_port, 1034 .dport = dport, 1035 }; 1036 struct cxl_port *port; 1037 1038 port = __find_cxl_port(&ctx); 1039 return port; 1040 } 1041 1042 /* 1043 * All users of grandparent() are using it to walk PCIe-like swich port 1044 * hierarchy. A PCIe switch is comprised of a bridge device representing the 1045 * upstream switch port and N bridges representing downstream switch ports. When 1046 * bridges stack the grand-parent of a downstream switch port is another 1047 * downstream switch port in the immediate ancestor switch. 1048 */ 1049 static struct device *grandparent(struct device *dev) 1050 { 1051 if (dev && dev->parent) 1052 return dev->parent->parent; 1053 return NULL; 1054 } 1055 1056 static void delete_endpoint(void *data) 1057 { 1058 struct cxl_memdev *cxlmd = data; 1059 struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev); 1060 struct cxl_port *parent_port; 1061 struct device *parent; 1062 1063 parent_port = cxl_mem_find_port(cxlmd, NULL); 1064 if (!parent_port) 1065 goto out; 1066 parent = &parent_port->dev; 1067 1068 device_lock(parent); 1069 if (parent->driver && !endpoint->dead) { 1070 devm_release_action(parent, cxl_unlink_uport, endpoint); 1071 devm_release_action(parent, unregister_port, endpoint); 1072 } 1073 device_unlock(parent); 1074 put_device(parent); 1075 out: 1076 put_device(&endpoint->dev); 1077 } 1078 1079 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) 1080 { 1081 struct device *dev = &cxlmd->dev; 1082 1083 get_device(&endpoint->dev); 1084 dev_set_drvdata(dev, endpoint); 1085 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); 1086 } 1087 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL); 1088 1089 /* 1090 * The natural end of life of a non-root 'cxl_port' is when its parent port goes 1091 * through a ->remove() event ("top-down" unregistration). The unnatural trigger 1092 * for a port to be unregistered is when all memdevs beneath that port have gone 1093 * through ->remove(). This "bottom-up" removal selectively removes individual 1094 * child ports manually. This depends on devm_cxl_add_port() to not change is 1095 * devm action registration order, and for dports to have already been 1096 * destroyed by reap_dports(). 1097 */ 1098 static void delete_switch_port(struct cxl_port *port) 1099 { 1100 devm_release_action(port->dev.parent, cxl_unlink_uport, port); 1101 devm_release_action(port->dev.parent, unregister_port, port); 1102 } 1103 1104 static void reap_dports(struct cxl_port *port) 1105 { 1106 struct cxl_dport *dport; 1107 unsigned long index; 1108 1109 device_lock_assert(&port->dev); 1110 1111 xa_for_each(&port->dports, index, dport) { 1112 devm_release_action(&port->dev, cxl_dport_unlink, dport); 1113 devm_release_action(&port->dev, cxl_dport_remove, dport); 1114 devm_kfree(&port->dev, dport); 1115 } 1116 } 1117 1118 int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd, 1119 struct cxl_dport *parent_dport) 1120 { 1121 struct cxl_port *parent_port = parent_dport->port; 1122 struct cxl_dev_state *cxlds = cxlmd->cxlds; 1123 struct cxl_port *endpoint, *iter, *down; 1124 int rc; 1125 1126 /* 1127 * Now that the path to the root is established record all the 1128 * intervening ports in the chain. 1129 */ 1130 for (iter = parent_port, down = NULL; !is_cxl_root(iter); 1131 down = iter, iter = to_cxl_port(iter->dev.parent)) { 1132 struct cxl_ep *ep; 1133 1134 ep = cxl_ep_load(iter, cxlmd); 1135 ep->next = down; 1136 } 1137 1138 endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev, 1139 cxlds->component_reg_phys, parent_dport); 1140 if (IS_ERR(endpoint)) 1141 return PTR_ERR(endpoint); 1142 1143 dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev)); 1144 1145 rc = cxl_endpoint_autoremove(cxlmd, endpoint); 1146 if (rc) 1147 return rc; 1148 1149 if (!endpoint->dev.driver) { 1150 dev_err(&cxlmd->dev, "%s failed probe\n", 1151 dev_name(&endpoint->dev)); 1152 return -ENXIO; 1153 } 1154 1155 return 0; 1156 } 1157 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL); 1158 1159 static void cxl_detach_ep(void *data) 1160 { 1161 struct cxl_memdev *cxlmd = data; 1162 struct device *iter; 1163 1164 for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) { 1165 struct device *dport_dev = grandparent(iter); 1166 struct cxl_port *port, *parent_port; 1167 struct cxl_ep *ep; 1168 bool died = false; 1169 1170 if (!dport_dev) 1171 break; 1172 1173 port = find_cxl_port(dport_dev, NULL); 1174 if (!port) 1175 continue; 1176 1177 if (is_cxl_root(port)) { 1178 put_device(&port->dev); 1179 continue; 1180 } 1181 1182 parent_port = to_cxl_port(port->dev.parent); 1183 device_lock(&parent_port->dev); 1184 if (!parent_port->dev.driver) { 1185 /* 1186 * The bottom-up race to delete the port lost to a 1187 * top-down port disable, give up here, because the 1188 * parent_port ->remove() will have cleaned up all 1189 * descendants. 1190 */ 1191 device_unlock(&parent_port->dev); 1192 put_device(&port->dev); 1193 continue; 1194 } 1195 1196 device_lock(&port->dev); 1197 ep = cxl_ep_load(port, cxlmd); 1198 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", 1199 ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); 1200 cxl_ep_remove(port, ep); 1201 if (ep && !port->dead && xa_empty(&port->endpoints) && 1202 !is_cxl_root(parent_port)) { 1203 /* 1204 * This was the last ep attached to a dynamically 1205 * enumerated port. Block new cxl_add_ep() and garbage 1206 * collect the port. 1207 */ 1208 died = true; 1209 port->dead = true; 1210 reap_dports(port); 1211 } 1212 device_unlock(&port->dev); 1213 1214 if (died) { 1215 dev_dbg(&cxlmd->dev, "delete %s\n", 1216 dev_name(&port->dev)); 1217 delete_switch_port(port); 1218 } 1219 put_device(&port->dev); 1220 device_unlock(&parent_port->dev); 1221 } 1222 } 1223 1224 static resource_size_t find_component_registers(struct device *dev) 1225 { 1226 struct cxl_register_map map; 1227 struct pci_dev *pdev; 1228 1229 /* 1230 * Theoretically, CXL component registers can be hosted on a 1231 * non-PCI device, in practice, only cxl_test hits this case. 1232 */ 1233 if (!dev_is_pci(dev)) 1234 return CXL_RESOURCE_NONE; 1235 1236 pdev = to_pci_dev(dev); 1237 1238 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 1239 return cxl_regmap_to_base(pdev, &map); 1240 } 1241 1242 static int add_port_attach_ep(struct cxl_memdev *cxlmd, 1243 struct device *uport_dev, 1244 struct device *dport_dev) 1245 { 1246 struct device *dparent = grandparent(dport_dev); 1247 struct cxl_port *port, *parent_port = NULL; 1248 struct cxl_dport *dport, *parent_dport; 1249 resource_size_t component_reg_phys; 1250 int rc; 1251 1252 if (!dparent) { 1253 /* 1254 * The iteration reached the topology root without finding the 1255 * CXL-root 'cxl_port' on a previous iteration, fail for now to 1256 * be re-probed after platform driver attaches. 1257 */ 1258 dev_dbg(&cxlmd->dev, "%s is a root dport\n", 1259 dev_name(dport_dev)); 1260 return -ENXIO; 1261 } 1262 1263 parent_port = find_cxl_port(dparent, &parent_dport); 1264 if (!parent_port) { 1265 /* iterate to create this parent_port */ 1266 return -EAGAIN; 1267 } 1268 1269 device_lock(&parent_port->dev); 1270 if (!parent_port->dev.driver) { 1271 dev_warn(&cxlmd->dev, 1272 "port %s:%s disabled, failed to enumerate CXL.mem\n", 1273 dev_name(&parent_port->dev), dev_name(uport_dev)); 1274 port = ERR_PTR(-ENXIO); 1275 goto out; 1276 } 1277 1278 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1279 if (!port) { 1280 component_reg_phys = find_component_registers(uport_dev); 1281 port = devm_cxl_add_port(&parent_port->dev, uport_dev, 1282 component_reg_phys, parent_dport); 1283 /* retry find to pick up the new dport information */ 1284 if (!IS_ERR(port)) 1285 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1286 } 1287 out: 1288 device_unlock(&parent_port->dev); 1289 1290 if (IS_ERR(port)) 1291 rc = PTR_ERR(port); 1292 else { 1293 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", 1294 dev_name(&port->dev), dev_name(port->uport)); 1295 rc = cxl_add_ep(dport, &cxlmd->dev); 1296 if (rc == -EBUSY) { 1297 /* 1298 * "can't" happen, but this error code means 1299 * something to the caller, so translate it. 1300 */ 1301 rc = -ENXIO; 1302 } 1303 put_device(&port->dev); 1304 } 1305 1306 put_device(&parent_port->dev); 1307 return rc; 1308 } 1309 1310 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) 1311 { 1312 struct device *dev = &cxlmd->dev; 1313 struct device *iter; 1314 int rc; 1315 1316 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd); 1317 if (rc) 1318 return rc; 1319 1320 /* 1321 * Scan for and add all cxl_ports in this device's ancestry. 1322 * Repeat until no more ports are added. Abort if a port add 1323 * attempt fails. 1324 */ 1325 retry: 1326 for (iter = dev; iter; iter = grandparent(iter)) { 1327 struct device *dport_dev = grandparent(iter); 1328 struct device *uport_dev; 1329 struct cxl_dport *dport; 1330 struct cxl_port *port; 1331 1332 if (!dport_dev) 1333 return 0; 1334 1335 uport_dev = dport_dev->parent; 1336 if (!uport_dev) { 1337 dev_warn(dev, "at %s no parent for dport: %s\n", 1338 dev_name(iter), dev_name(dport_dev)); 1339 return -ENXIO; 1340 } 1341 1342 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", 1343 dev_name(iter), dev_name(dport_dev), 1344 dev_name(uport_dev)); 1345 port = find_cxl_port(dport_dev, &dport); 1346 if (port) { 1347 dev_dbg(&cxlmd->dev, 1348 "found already registered port %s:%s\n", 1349 dev_name(&port->dev), dev_name(port->uport)); 1350 rc = cxl_add_ep(dport, &cxlmd->dev); 1351 1352 /* 1353 * If the endpoint already exists in the port's list, 1354 * that's ok, it was added on a previous pass. 1355 * Otherwise, retry in add_port_attach_ep() after taking 1356 * the parent_port lock as the current port may be being 1357 * reaped. 1358 */ 1359 if (rc && rc != -EBUSY) { 1360 put_device(&port->dev); 1361 return rc; 1362 } 1363 1364 /* Any more ports to add between this one and the root? */ 1365 if (!dev_is_cxl_root_child(&port->dev)) { 1366 put_device(&port->dev); 1367 continue; 1368 } 1369 1370 put_device(&port->dev); 1371 return 0; 1372 } 1373 1374 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev); 1375 /* port missing, try to add parent */ 1376 if (rc == -EAGAIN) 1377 continue; 1378 /* failed to add ep or port */ 1379 if (rc) 1380 return rc; 1381 /* port added, new descendants possible, start over */ 1382 goto retry; 1383 } 1384 1385 return 0; 1386 } 1387 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL); 1388 1389 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, 1390 struct cxl_dport **dport) 1391 { 1392 return find_cxl_port(grandparent(&cxlmd->dev), dport); 1393 } 1394 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL); 1395 1396 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, 1397 struct cxl_port *port, int *target_map) 1398 { 1399 int i, rc = 0; 1400 1401 if (!target_map) 1402 return 0; 1403 1404 device_lock_assert(&port->dev); 1405 1406 if (xa_empty(&port->dports)) 1407 return -EINVAL; 1408 1409 write_seqlock(&cxlsd->target_lock); 1410 for (i = 0; i < cxlsd->nr_targets; i++) { 1411 struct cxl_dport *dport = find_dport(port, target_map[i]); 1412 1413 if (!dport) { 1414 rc = -ENXIO; 1415 break; 1416 } 1417 cxlsd->target[i] = dport; 1418 } 1419 write_sequnlock(&cxlsd->target_lock); 1420 1421 return rc; 1422 } 1423 1424 static struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos) 1425 { 1426 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; 1427 struct cxl_decoder *cxld = &cxlsd->cxld; 1428 int iw; 1429 1430 iw = cxld->interleave_ways; 1431 if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets, 1432 "misconfigured root decoder\n")) 1433 return NULL; 1434 1435 return cxlrd->cxlsd.target[pos % iw]; 1436 } 1437 1438 static struct lock_class_key cxl_decoder_key; 1439 1440 /** 1441 * cxl_decoder_init - Common decoder setup / initialization 1442 * @port: owning port of this decoder 1443 * @cxld: common decoder properties to initialize 1444 * 1445 * A port may contain one or more decoders. Each of those decoders 1446 * enable some address space for CXL.mem utilization. A decoder is 1447 * expected to be configured by the caller before registering via 1448 * cxl_decoder_add() 1449 */ 1450 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld) 1451 { 1452 struct device *dev; 1453 int rc; 1454 1455 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); 1456 if (rc < 0) 1457 return rc; 1458 1459 /* need parent to stick around to release the id */ 1460 get_device(&port->dev); 1461 cxld->id = rc; 1462 1463 dev = &cxld->dev; 1464 device_initialize(dev); 1465 lockdep_set_class(&dev->mutex, &cxl_decoder_key); 1466 device_set_pm_not_required(dev); 1467 dev->parent = &port->dev; 1468 dev->bus = &cxl_bus_type; 1469 1470 /* Pre initialize an "empty" decoder */ 1471 cxld->interleave_ways = 1; 1472 cxld->interleave_granularity = PAGE_SIZE; 1473 cxld->target_type = CXL_DECODER_EXPANDER; 1474 cxld->hpa_range = (struct range) { 1475 .start = 0, 1476 .end = -1, 1477 }; 1478 1479 return 0; 1480 } 1481 1482 static int cxl_switch_decoder_init(struct cxl_port *port, 1483 struct cxl_switch_decoder *cxlsd, 1484 int nr_targets) 1485 { 1486 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) 1487 return -EINVAL; 1488 1489 cxlsd->nr_targets = nr_targets; 1490 seqlock_init(&cxlsd->target_lock); 1491 return cxl_decoder_init(port, &cxlsd->cxld); 1492 } 1493 1494 /** 1495 * cxl_root_decoder_alloc - Allocate a root level decoder 1496 * @port: owning CXL root of this decoder 1497 * @nr_targets: static number of downstream targets 1498 * 1499 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1500 * 'CXL root' decoder is one that decodes from a top-level / static platform 1501 * firmware description of CXL resources into a CXL standard decode 1502 * topology. 1503 */ 1504 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, 1505 unsigned int nr_targets) 1506 { 1507 struct cxl_root_decoder *cxlrd; 1508 struct cxl_switch_decoder *cxlsd; 1509 struct cxl_decoder *cxld; 1510 int rc; 1511 1512 if (!is_cxl_root(port)) 1513 return ERR_PTR(-EINVAL); 1514 1515 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets), 1516 GFP_KERNEL); 1517 if (!cxlrd) 1518 return ERR_PTR(-ENOMEM); 1519 1520 cxlsd = &cxlrd->cxlsd; 1521 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1522 if (rc) { 1523 kfree(cxlrd); 1524 return ERR_PTR(rc); 1525 } 1526 1527 cxlrd->calc_hb = cxl_hb_modulo; 1528 1529 cxld = &cxlsd->cxld; 1530 cxld->dev.type = &cxl_decoder_root_type; 1531 /* 1532 * cxl_root_decoder_release() special cases negative ids to 1533 * detect memregion_alloc() failures. 1534 */ 1535 atomic_set(&cxlrd->region_id, -1); 1536 rc = memregion_alloc(GFP_KERNEL); 1537 if (rc < 0) { 1538 put_device(&cxld->dev); 1539 return ERR_PTR(rc); 1540 } 1541 1542 atomic_set(&cxlrd->region_id, rc); 1543 return cxlrd; 1544 } 1545 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); 1546 1547 /** 1548 * cxl_switch_decoder_alloc - Allocate a switch level decoder 1549 * @port: owning CXL switch port of this decoder 1550 * @nr_targets: max number of dynamically addressable downstream targets 1551 * 1552 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1553 * 'switch' decoder is any decoder that can be enumerated by PCIe 1554 * topology and the HDM Decoder Capability. This includes the decoders 1555 * that sit between Switch Upstream Ports / Switch Downstream Ports and 1556 * Host Bridges / Root Ports. 1557 */ 1558 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, 1559 unsigned int nr_targets) 1560 { 1561 struct cxl_switch_decoder *cxlsd; 1562 struct cxl_decoder *cxld; 1563 int rc; 1564 1565 if (is_cxl_root(port) || is_cxl_endpoint(port)) 1566 return ERR_PTR(-EINVAL); 1567 1568 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL); 1569 if (!cxlsd) 1570 return ERR_PTR(-ENOMEM); 1571 1572 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1573 if (rc) { 1574 kfree(cxlsd); 1575 return ERR_PTR(rc); 1576 } 1577 1578 cxld = &cxlsd->cxld; 1579 cxld->dev.type = &cxl_decoder_switch_type; 1580 return cxlsd; 1581 } 1582 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); 1583 1584 /** 1585 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder 1586 * @port: owning port of this decoder 1587 * 1588 * Return: A new cxl decoder to be registered by cxl_decoder_add() 1589 */ 1590 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) 1591 { 1592 struct cxl_endpoint_decoder *cxled; 1593 struct cxl_decoder *cxld; 1594 int rc; 1595 1596 if (!is_cxl_endpoint(port)) 1597 return ERR_PTR(-EINVAL); 1598 1599 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL); 1600 if (!cxled) 1601 return ERR_PTR(-ENOMEM); 1602 1603 cxled->pos = -1; 1604 cxld = &cxled->cxld; 1605 rc = cxl_decoder_init(port, cxld); 1606 if (rc) { 1607 kfree(cxled); 1608 return ERR_PTR(rc); 1609 } 1610 1611 cxld->dev.type = &cxl_decoder_endpoint_type; 1612 return cxled; 1613 } 1614 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); 1615 1616 /** 1617 * cxl_decoder_add_locked - Add a decoder with targets 1618 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1619 * @target_map: A list of downstream ports that this decoder can direct memory 1620 * traffic to. These numbers should correspond with the port number 1621 * in the PCIe Link Capabilities structure. 1622 * 1623 * Certain types of decoders may not have any targets. The main example of this 1624 * is an endpoint device. A more awkward example is a hostbridge whose root 1625 * ports get hot added (technically possible, though unlikely). 1626 * 1627 * This is the locked variant of cxl_decoder_add(). 1628 * 1629 * Context: Process context. Expects the device lock of the port that owns the 1630 * @cxld to be held. 1631 * 1632 * Return: Negative error code if the decoder wasn't properly configured; else 1633 * returns 0. 1634 */ 1635 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) 1636 { 1637 struct cxl_port *port; 1638 struct device *dev; 1639 int rc; 1640 1641 if (WARN_ON_ONCE(!cxld)) 1642 return -EINVAL; 1643 1644 if (WARN_ON_ONCE(IS_ERR(cxld))) 1645 return PTR_ERR(cxld); 1646 1647 if (cxld->interleave_ways < 1) 1648 return -EINVAL; 1649 1650 dev = &cxld->dev; 1651 1652 port = to_cxl_port(cxld->dev.parent); 1653 if (!is_endpoint_decoder(dev)) { 1654 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 1655 1656 rc = decoder_populate_targets(cxlsd, port, target_map); 1657 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { 1658 dev_err(&port->dev, 1659 "Failed to populate active decoder targets\n"); 1660 return rc; 1661 } 1662 } 1663 1664 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); 1665 if (rc) 1666 return rc; 1667 1668 return device_add(dev); 1669 } 1670 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); 1671 1672 /** 1673 * cxl_decoder_add - Add a decoder with targets 1674 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1675 * @target_map: A list of downstream ports that this decoder can direct memory 1676 * traffic to. These numbers should correspond with the port number 1677 * in the PCIe Link Capabilities structure. 1678 * 1679 * This is the unlocked variant of cxl_decoder_add_locked(). 1680 * See cxl_decoder_add_locked(). 1681 * 1682 * Context: Process context. Takes and releases the device lock of the port that 1683 * owns the @cxld. 1684 */ 1685 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) 1686 { 1687 struct cxl_port *port; 1688 int rc; 1689 1690 if (WARN_ON_ONCE(!cxld)) 1691 return -EINVAL; 1692 1693 if (WARN_ON_ONCE(IS_ERR(cxld))) 1694 return PTR_ERR(cxld); 1695 1696 port = to_cxl_port(cxld->dev.parent); 1697 1698 device_lock(&port->dev); 1699 rc = cxl_decoder_add_locked(cxld, target_map); 1700 device_unlock(&port->dev); 1701 1702 return rc; 1703 } 1704 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); 1705 1706 static void cxld_unregister(void *dev) 1707 { 1708 struct cxl_endpoint_decoder *cxled; 1709 1710 if (is_endpoint_decoder(dev)) { 1711 cxled = to_cxl_endpoint_decoder(dev); 1712 cxl_decoder_kill_region(cxled); 1713 } 1714 1715 device_unregister(dev); 1716 } 1717 1718 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) 1719 { 1720 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); 1721 } 1722 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL); 1723 1724 /** 1725 * __cxl_driver_register - register a driver for the cxl bus 1726 * @cxl_drv: cxl driver structure to attach 1727 * @owner: owning module/driver 1728 * @modname: KBUILD_MODNAME for parent driver 1729 */ 1730 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, 1731 const char *modname) 1732 { 1733 if (!cxl_drv->probe) { 1734 pr_debug("%s ->probe() must be specified\n", modname); 1735 return -EINVAL; 1736 } 1737 1738 if (!cxl_drv->name) { 1739 pr_debug("%s ->name must be specified\n", modname); 1740 return -EINVAL; 1741 } 1742 1743 if (!cxl_drv->id) { 1744 pr_debug("%s ->id must be specified\n", modname); 1745 return -EINVAL; 1746 } 1747 1748 cxl_drv->drv.bus = &cxl_bus_type; 1749 cxl_drv->drv.owner = owner; 1750 cxl_drv->drv.mod_name = modname; 1751 cxl_drv->drv.name = cxl_drv->name; 1752 1753 return driver_register(&cxl_drv->drv); 1754 } 1755 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL); 1756 1757 void cxl_driver_unregister(struct cxl_driver *cxl_drv) 1758 { 1759 driver_unregister(&cxl_drv->drv); 1760 } 1761 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL); 1762 1763 static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 1764 { 1765 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT, 1766 cxl_device_id(dev)); 1767 } 1768 1769 static int cxl_bus_match(struct device *dev, struct device_driver *drv) 1770 { 1771 return cxl_device_id(dev) == to_cxl_drv(drv)->id; 1772 } 1773 1774 static int cxl_bus_probe(struct device *dev) 1775 { 1776 int rc; 1777 1778 rc = to_cxl_drv(dev->driver)->probe(dev); 1779 dev_dbg(dev, "probe: %d\n", rc); 1780 return rc; 1781 } 1782 1783 static void cxl_bus_remove(struct device *dev) 1784 { 1785 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); 1786 1787 if (cxl_drv->remove) 1788 cxl_drv->remove(dev); 1789 } 1790 1791 static struct workqueue_struct *cxl_bus_wq; 1792 1793 int cxl_bus_rescan(void) 1794 { 1795 return bus_rescan_devices(&cxl_bus_type); 1796 } 1797 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL); 1798 1799 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) 1800 { 1801 return queue_work(cxl_bus_wq, &cxlmd->detach_work); 1802 } 1803 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL); 1804 1805 /* for user tooling to ensure port disable work has completed */ 1806 static ssize_t flush_store(struct bus_type *bus, const char *buf, size_t count) 1807 { 1808 if (sysfs_streq(buf, "1")) { 1809 flush_workqueue(cxl_bus_wq); 1810 return count; 1811 } 1812 1813 return -EINVAL; 1814 } 1815 1816 static BUS_ATTR_WO(flush); 1817 1818 static struct attribute *cxl_bus_attributes[] = { 1819 &bus_attr_flush.attr, 1820 NULL, 1821 }; 1822 1823 static struct attribute_group cxl_bus_attribute_group = { 1824 .attrs = cxl_bus_attributes, 1825 }; 1826 1827 static const struct attribute_group *cxl_bus_attribute_groups[] = { 1828 &cxl_bus_attribute_group, 1829 NULL, 1830 }; 1831 1832 struct bus_type cxl_bus_type = { 1833 .name = "cxl", 1834 .uevent = cxl_bus_uevent, 1835 .match = cxl_bus_match, 1836 .probe = cxl_bus_probe, 1837 .remove = cxl_bus_remove, 1838 .bus_groups = cxl_bus_attribute_groups, 1839 }; 1840 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL); 1841 1842 static struct dentry *cxl_debugfs; 1843 1844 struct dentry *cxl_debugfs_create_dir(const char *dir) 1845 { 1846 return debugfs_create_dir(dir, cxl_debugfs); 1847 } 1848 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL); 1849 1850 static __init int cxl_core_init(void) 1851 { 1852 int rc; 1853 1854 cxl_debugfs = debugfs_create_dir("cxl", NULL); 1855 1856 cxl_mbox_init(); 1857 1858 rc = cxl_memdev_init(); 1859 if (rc) 1860 return rc; 1861 1862 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0); 1863 if (!cxl_bus_wq) { 1864 rc = -ENOMEM; 1865 goto err_wq; 1866 } 1867 1868 rc = bus_register(&cxl_bus_type); 1869 if (rc) 1870 goto err_bus; 1871 1872 rc = cxl_region_init(); 1873 if (rc) 1874 goto err_region; 1875 1876 return 0; 1877 1878 err_region: 1879 bus_unregister(&cxl_bus_type); 1880 err_bus: 1881 destroy_workqueue(cxl_bus_wq); 1882 err_wq: 1883 cxl_memdev_exit(); 1884 return rc; 1885 } 1886 1887 static void cxl_core_exit(void) 1888 { 1889 cxl_region_exit(); 1890 bus_unregister(&cxl_bus_type); 1891 destroy_workqueue(cxl_bus_wq); 1892 cxl_memdev_exit(); 1893 debugfs_remove_recursive(cxl_debugfs); 1894 } 1895 1896 module_init(cxl_core_init); 1897 module_exit(cxl_core_exit); 1898 MODULE_LICENSE("GPL v2"); 1899