1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ 3 #include <linux/platform_device.h> 4 #include <linux/module.h> 5 #include <linux/device.h> 6 #include <linux/kernel.h> 7 #include <linux/acpi.h> 8 #include <linux/pci.h> 9 #include <linux/node.h> 10 #include <asm/div64.h> 11 #include "cxlpci.h" 12 #include "cxl.h" 13 14 struct cxl_cxims_data { 15 int nr_maps; 16 u64 xormaps[] __counted_by(nr_maps); 17 }; 18 19 static const guid_t acpi_cxl_qtg_id_guid = 20 GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071, 21 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52); 22 23 24 static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa) 25 { 26 struct cxl_cxims_data *cximsd = cxlrd->platform_data; 27 int hbiw = cxlrd->cxlsd.nr_targets; 28 u64 val; 29 int pos; 30 31 /* No xormaps for host bridge interleave ways of 1 or 3 */ 32 if (hbiw == 1 || hbiw == 3) 33 return hpa; 34 35 /* 36 * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) restore 37 * the position bit to its value before the xormap was applied at 38 * HPA->DPA translation. 39 * 40 * pos is the lowest set bit in an XORMAP 41 * val is the XORALLBITS(HPA & XORMAP) 42 * 43 * XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS 44 * as an operation that outputs a single bit by XORing all the 45 * bits in the input (hpa & xormap). Implement XORALLBITS using 46 * hweight64(). If the hamming weight is even the XOR of those 47 * bits results in val==0, if odd the XOR result is val==1. 48 */ 49 50 for (int i = 0; i < cximsd->nr_maps; i++) { 51 if (!cximsd->xormaps[i]) 52 continue; 53 pos = __ffs(cximsd->xormaps[i]); 54 val = (hweight64(hpa & cximsd->xormaps[i]) & 1); 55 hpa = (hpa & ~(1ULL << pos)) | (val << pos); 56 } 57 58 return hpa; 59 } 60 61 struct cxl_cxims_context { 62 struct device *dev; 63 struct cxl_root_decoder *cxlrd; 64 }; 65 66 static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg, 67 const unsigned long end) 68 { 69 struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header; 70 struct cxl_cxims_context *ctx = arg; 71 struct cxl_root_decoder *cxlrd = ctx->cxlrd; 72 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 73 struct device *dev = ctx->dev; 74 struct cxl_cxims_data *cximsd; 75 unsigned int hbig, nr_maps; 76 int rc; 77 78 rc = eig_to_granularity(cxims->hbig, &hbig); 79 if (rc) 80 return rc; 81 82 /* Does this CXIMS entry apply to the given CXL Window? */ 83 if (hbig != cxld->interleave_granularity) 84 return 0; 85 86 /* IW 1,3 do not use xormaps and skip this parsing entirely */ 87 if (is_power_of_2(cxld->interleave_ways)) 88 /* 2, 4, 8, 16 way */ 89 nr_maps = ilog2(cxld->interleave_ways); 90 else 91 /* 6, 12 way */ 92 nr_maps = ilog2(cxld->interleave_ways / 3); 93 94 if (cxims->nr_xormaps < nr_maps) { 95 dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n", 96 cxims->nr_xormaps, nr_maps); 97 return -ENXIO; 98 } 99 100 cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps), 101 GFP_KERNEL); 102 if (!cximsd) 103 return -ENOMEM; 104 cximsd->nr_maps = nr_maps; 105 memcpy(cximsd->xormaps, cxims->xormap_list, 106 nr_maps * sizeof(*cximsd->xormaps)); 107 cxlrd->platform_data = cximsd; 108 109 return 0; 110 } 111 112 static unsigned long cfmws_to_decoder_flags(int restrictions) 113 { 114 unsigned long flags = CXL_DECODER_F_ENABLE; 115 116 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2) 117 flags |= CXL_DECODER_F_TYPE2; 118 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3) 119 flags |= CXL_DECODER_F_TYPE3; 120 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE) 121 flags |= CXL_DECODER_F_RAM; 122 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM) 123 flags |= CXL_DECODER_F_PMEM; 124 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED) 125 flags |= CXL_DECODER_F_LOCK; 126 127 return flags; 128 } 129 130 static int cxl_acpi_cfmws_verify(struct device *dev, 131 struct acpi_cedt_cfmws *cfmws) 132 { 133 int rc, expected_len; 134 unsigned int ways; 135 136 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO && 137 cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { 138 dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n", 139 cfmws->interleave_arithmetic); 140 return -EINVAL; 141 } 142 143 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) { 144 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n"); 145 return -EINVAL; 146 } 147 148 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) { 149 dev_err(dev, "CFMWS Window Size not 256MB aligned\n"); 150 return -EINVAL; 151 } 152 153 rc = eiw_to_ways(cfmws->interleave_ways, &ways); 154 if (rc) { 155 dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n", 156 cfmws->interleave_ways); 157 return -EINVAL; 158 } 159 160 expected_len = struct_size(cfmws, interleave_targets, ways); 161 162 if (cfmws->header.length < expected_len) { 163 dev_err(dev, "CFMWS length %d less than expected %d\n", 164 cfmws->header.length, expected_len); 165 return -EINVAL; 166 } 167 168 if (cfmws->header.length > expected_len) 169 dev_dbg(dev, "CFMWS length %d greater than expected %d\n", 170 cfmws->header.length, expected_len); 171 172 return 0; 173 } 174 175 /* 176 * Note, @dev must be the first member, see 'struct cxl_chbs_context' 177 * and mock_acpi_table_parse_cedt() 178 */ 179 struct cxl_cfmws_context { 180 struct device *dev; 181 struct cxl_port *root_port; 182 struct resource *cxl_res; 183 int id; 184 }; 185 186 /** 187 * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM 188 * @handle: ACPI handle 189 * @coord: performance access coordinates 190 * @entries: number of QTG IDs to return 191 * @qos_class: int array provided by caller to return QTG IDs 192 * 193 * Return: number of QTG IDs returned, or -errno for errors 194 * 195 * Issue QTG _DSM with accompanied bandwidth and latency data in order to get 196 * the QTG IDs that are suitable for the performance point in order of most 197 * suitable to least suitable. Write back array of QTG IDs and return the 198 * actual number of QTG IDs written back. 199 */ 200 static int 201 cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord, 202 int entries, int *qos_class) 203 { 204 union acpi_object *out_obj, *out_buf, *obj; 205 union acpi_object in_array[4] = { 206 [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency }, 207 [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency }, 208 [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth }, 209 [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth }, 210 }; 211 union acpi_object in_obj = { 212 .package = { 213 .type = ACPI_TYPE_PACKAGE, 214 .count = 4, 215 .elements = in_array, 216 }, 217 }; 218 int count, pkg_entries, i; 219 u16 max_qtg; 220 int rc; 221 222 if (!entries) 223 return -EINVAL; 224 225 out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj); 226 if (!out_obj) 227 return -ENXIO; 228 229 if (out_obj->type != ACPI_TYPE_PACKAGE) { 230 rc = -ENXIO; 231 goto out; 232 } 233 234 /* Check Max QTG ID */ 235 obj = &out_obj->package.elements[0]; 236 if (obj->type != ACPI_TYPE_INTEGER) { 237 rc = -ENXIO; 238 goto out; 239 } 240 241 max_qtg = obj->integer.value; 242 243 /* It's legal to have 0 QTG entries */ 244 pkg_entries = out_obj->package.count; 245 if (pkg_entries <= 1) { 246 rc = 0; 247 goto out; 248 } 249 250 /* Retrieve QTG IDs package */ 251 obj = &out_obj->package.elements[1]; 252 if (obj->type != ACPI_TYPE_PACKAGE) { 253 rc = -ENXIO; 254 goto out; 255 } 256 257 pkg_entries = obj->package.count; 258 count = min(entries, pkg_entries); 259 for (i = 0; i < count; i++) { 260 u16 qtg_id; 261 262 out_buf = &obj->package.elements[i]; 263 if (out_buf->type != ACPI_TYPE_INTEGER) { 264 rc = -ENXIO; 265 goto out; 266 } 267 268 qtg_id = out_buf->integer.value; 269 if (qtg_id > max_qtg) 270 pr_warn("QTG ID %u greater than MAX %u\n", 271 qtg_id, max_qtg); 272 273 qos_class[i] = qtg_id; 274 } 275 rc = count; 276 277 out: 278 ACPI_FREE(out_obj); 279 return rc; 280 } 281 282 static int cxl_acpi_qos_class(struct cxl_root *cxl_root, 283 struct access_coordinate *coord, int entries, 284 int *qos_class) 285 { 286 struct device *dev = cxl_root->port.uport_dev; 287 acpi_handle handle; 288 289 if (!dev_is_platform(dev)) 290 return -ENODEV; 291 292 handle = ACPI_HANDLE(dev); 293 if (!handle) 294 return -ENODEV; 295 296 return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class); 297 } 298 299 static const struct cxl_root_ops acpi_root_ops = { 300 .qos_class = cxl_acpi_qos_class, 301 }; 302 303 static void del_cxl_resource(struct resource *res) 304 { 305 if (!res) 306 return; 307 kfree(res->name); 308 kfree(res); 309 } 310 311 static struct resource *alloc_cxl_resource(resource_size_t base, 312 resource_size_t n, int id) 313 { 314 struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL); 315 316 if (!res) 317 return NULL; 318 319 res->start = base; 320 res->end = base + n - 1; 321 res->flags = IORESOURCE_MEM; 322 res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id); 323 if (!res->name) 324 return NULL; 325 326 return no_free_ptr(res); 327 } 328 329 static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res) 330 { 331 int rc = insert_resource(parent, res); 332 333 if (rc) 334 del_cxl_resource(res); 335 return rc; 336 } 337 338 static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd) 339 { 340 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 341 struct range *hpa = &cxld->hpa_range; 342 resource_size_t size = range_len(hpa); 343 resource_size_t start = hpa->start; 344 resource_size_t cache_size; 345 struct resource res; 346 int nid, rc; 347 348 res = DEFINE_RES(start, size, 0); 349 nid = phys_to_target_node(start); 350 351 rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size); 352 if (rc) 353 return rc; 354 355 /* 356 * The cache range is expected to be within the CFMWS. 357 * Currently there is only support cache_size == cxl_size. CXL 358 * size is then half of the total CFMWS window size. 359 */ 360 size = size >> 1; 361 if (cache_size && size != cache_size) { 362 dev_warn(&cxld->dev, 363 "Extended Linear Cache size %pa != CXL size %pa. No Support!", 364 &cache_size, &size); 365 return -ENXIO; 366 } 367 368 cxlrd->cache_size = cache_size; 369 370 return 0; 371 } 372 373 static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd) 374 { 375 int rc; 376 377 rc = cxl_acpi_set_cache_size(cxlrd); 378 if (!rc) 379 return; 380 381 if (rc != -EOPNOTSUPP) { 382 /* 383 * Failing to support extended linear cache region resize does not 384 * prevent the region from functioning. Only causes cxl list showing 385 * incorrect region size. 386 */ 387 dev_warn(cxlrd->cxlsd.cxld.dev.parent, 388 "Extended linear cache calculation failed rc:%d\n", rc); 389 } 390 391 /* Ignoring return code */ 392 cxlrd->cache_size = 0; 393 } 394 395 DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *, 396 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev)) 397 DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T)) 398 static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, 399 struct cxl_cfmws_context *ctx) 400 { 401 int target_map[CXL_DECODER_MAX_INTERLEAVE]; 402 struct cxl_port *root_port = ctx->root_port; 403 struct cxl_cxims_context cxims_ctx; 404 struct device *dev = ctx->dev; 405 struct cxl_decoder *cxld; 406 unsigned int ways, i, ig; 407 int rc; 408 409 rc = cxl_acpi_cfmws_verify(dev, cfmws); 410 if (rc) 411 return rc; 412 413 rc = eiw_to_ways(cfmws->interleave_ways, &ways); 414 if (rc) 415 return rc; 416 rc = eig_to_granularity(cfmws->granularity, &ig); 417 if (rc) 418 return rc; 419 for (i = 0; i < ways; i++) 420 target_map[i] = cfmws->interleave_targets[i]; 421 422 struct resource *res __free(del_cxl_resource) = alloc_cxl_resource( 423 cfmws->base_hpa, cfmws->window_size, ctx->id++); 424 if (!res) 425 return -ENOMEM; 426 427 /* add to the local resource tracking to establish a sort order */ 428 rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res)); 429 if (rc) 430 return rc; 431 432 struct cxl_root_decoder *cxlrd __free(put_cxlrd) = 433 cxl_root_decoder_alloc(root_port, ways); 434 435 if (IS_ERR(cxlrd)) 436 return PTR_ERR(cxlrd); 437 438 cxld = &cxlrd->cxlsd.cxld; 439 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); 440 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 441 cxld->hpa_range = (struct range) { 442 .start = cfmws->base_hpa, 443 .end = cfmws->base_hpa + cfmws->window_size - 1, 444 }; 445 cxld->interleave_ways = ways; 446 /* 447 * Minimize the x1 granularity to advertise support for any 448 * valid region granularity 449 */ 450 if (ways == 1) 451 ig = CXL_DECODER_MIN_GRANULARITY; 452 cxld->interleave_granularity = ig; 453 454 cxl_setup_extended_linear_cache(cxlrd); 455 456 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { 457 if (ways != 1 && ways != 3) { 458 cxims_ctx = (struct cxl_cxims_context) { 459 .dev = dev, 460 .cxlrd = cxlrd, 461 }; 462 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS, 463 cxl_parse_cxims, &cxims_ctx); 464 if (rc < 0) 465 return rc; 466 if (!cxlrd->platform_data) { 467 dev_err(dev, "No CXIMS for HBIG %u\n", ig); 468 return -EINVAL; 469 } 470 } 471 } 472 473 cxlrd->qos_class = cfmws->qtg_id; 474 475 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) 476 cxlrd->hpa_to_spa = cxl_xor_hpa_to_spa; 477 478 rc = cxl_decoder_add(cxld, target_map); 479 if (rc) 480 return rc; 481 482 rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd)); 483 if (rc) 484 return rc; 485 486 dev_dbg(root_port->dev.parent, "%s added to %s\n", 487 dev_name(&cxld->dev), dev_name(&root_port->dev)); 488 489 return 0; 490 } 491 492 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, 493 const unsigned long end) 494 { 495 struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header; 496 struct cxl_cfmws_context *ctx = arg; 497 struct device *dev = ctx->dev; 498 int rc; 499 500 rc = __cxl_parse_cfmws(cfmws, ctx); 501 if (rc) 502 dev_err(dev, 503 "Failed to add decode range: [%#llx - %#llx] (%d)\n", 504 cfmws->base_hpa, 505 cfmws->base_hpa + cfmws->window_size - 1, rc); 506 else 507 dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n", 508 phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa, 509 cfmws->base_hpa + cfmws->window_size - 1); 510 511 /* never fail cxl_acpi load for a single window failure */ 512 return 0; 513 } 514 515 __mock struct acpi_device *to_cxl_host_bridge(struct device *host, 516 struct device *dev) 517 { 518 struct acpi_device *adev = to_acpi_device(dev); 519 520 if (!acpi_pci_find_root(adev->handle)) 521 return NULL; 522 523 if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) 524 return adev; 525 return NULL; 526 } 527 528 /* Note, @dev is used by mock_acpi_table_parse_cedt() */ 529 struct cxl_chbs_context { 530 struct device *dev; 531 unsigned long long uid; 532 resource_size_t base; 533 u32 cxl_version; 534 int nr_versions; 535 u32 saved_version; 536 }; 537 538 static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg, 539 const unsigned long end) 540 { 541 struct cxl_chbs_context *ctx = arg; 542 struct acpi_cedt_chbs *chbs; 543 544 chbs = (struct acpi_cedt_chbs *) header; 545 546 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 && 547 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11) 548 return 0; 549 550 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 && 551 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20) 552 return 0; 553 554 if (!chbs->base) 555 return 0; 556 557 if (ctx->saved_version != chbs->cxl_version) { 558 /* 559 * cxl_version cannot be overwritten before the next two 560 * checks, then use saved_version 561 */ 562 ctx->saved_version = chbs->cxl_version; 563 ctx->nr_versions++; 564 } 565 566 if (ctx->base != CXL_RESOURCE_NONE) 567 return 0; 568 569 if (ctx->uid != chbs->uid) 570 return 0; 571 572 ctx->cxl_version = chbs->cxl_version; 573 ctx->base = chbs->base; 574 575 return 0; 576 } 577 578 static int cxl_get_chbs(struct device *dev, struct acpi_device *hb, 579 struct cxl_chbs_context *ctx) 580 { 581 unsigned long long uid; 582 int rc; 583 584 rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid); 585 if (rc != AE_OK) { 586 dev_err(dev, "unable to retrieve _UID\n"); 587 return -ENOENT; 588 } 589 590 dev_dbg(dev, "UID found: %lld\n", uid); 591 *ctx = (struct cxl_chbs_context) { 592 .dev = dev, 593 .uid = uid, 594 .base = CXL_RESOURCE_NONE, 595 .cxl_version = UINT_MAX, 596 .saved_version = UINT_MAX, 597 }; 598 599 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbs_iter, ctx); 600 601 if (ctx->nr_versions > 1) { 602 /* 603 * Disclaim eRCD support given some component register may 604 * only be found via CHBCR 605 */ 606 dev_info(dev, "Unsupported platform config, mixed Virtual Host and Restricted CXL Host hierarchy."); 607 } 608 609 return 0; 610 } 611 612 static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport) 613 { 614 struct acpi_device *hb = to_cxl_host_bridge(NULL, dev); 615 u32 uid; 616 617 if (kstrtou32(acpi_device_uid(hb), 0, &uid)) 618 return -EINVAL; 619 620 return acpi_get_genport_coordinates(uid, dport->coord); 621 } 622 623 static int add_host_bridge_dport(struct device *match, void *arg) 624 { 625 int ret; 626 acpi_status rc; 627 struct device *bridge; 628 struct cxl_dport *dport; 629 struct cxl_chbs_context ctx; 630 struct acpi_pci_root *pci_root; 631 struct cxl_port *root_port = arg; 632 struct device *host = root_port->dev.parent; 633 struct acpi_device *hb = to_cxl_host_bridge(host, match); 634 635 if (!hb) 636 return 0; 637 638 rc = cxl_get_chbs(match, hb, &ctx); 639 if (rc) 640 return rc; 641 642 if (ctx.cxl_version == UINT_MAX) { 643 dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n", 644 ctx.uid); 645 return 0; 646 } 647 648 if (ctx.base == CXL_RESOURCE_NONE) { 649 dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n", 650 ctx.uid); 651 return 0; 652 } 653 654 pci_root = acpi_pci_find_root(hb->handle); 655 bridge = pci_root->bus->bridge; 656 657 /* 658 * In RCH mode, bind the component regs base to the dport. In 659 * VH mode it will be bound to the CXL host bridge's port 660 * object later in add_host_bridge_uport(). 661 */ 662 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) { 663 dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid, 664 &ctx.base); 665 dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid, 666 ctx.base); 667 } else { 668 dport = devm_cxl_add_dport(root_port, bridge, ctx.uid, 669 CXL_RESOURCE_NONE); 670 } 671 672 if (IS_ERR(dport)) 673 return PTR_ERR(dport); 674 675 ret = get_genport_coordinates(match, dport); 676 if (ret) 677 dev_dbg(match, "Failed to get generic port perf coordinates.\n"); 678 679 return 0; 680 } 681 682 /* 683 * A host bridge is a dport to a CFMWS decode and it is a uport to the 684 * dport (PCIe Root Ports) in the host bridge. 685 */ 686 static int add_host_bridge_uport(struct device *match, void *arg) 687 { 688 struct cxl_port *root_port = arg; 689 struct device *host = root_port->dev.parent; 690 struct acpi_device *hb = to_cxl_host_bridge(host, match); 691 struct acpi_pci_root *pci_root; 692 struct cxl_dport *dport; 693 struct cxl_port *port; 694 struct device *bridge; 695 struct cxl_chbs_context ctx; 696 resource_size_t component_reg_phys; 697 int rc; 698 699 if (!hb) 700 return 0; 701 702 pci_root = acpi_pci_find_root(hb->handle); 703 bridge = pci_root->bus->bridge; 704 dport = cxl_find_dport_by_dev(root_port, bridge); 705 if (!dport) { 706 dev_dbg(host, "host bridge expected and not found\n"); 707 return 0; 708 } 709 710 if (dport->rch) { 711 dev_info(bridge, "host supports CXL (restricted)\n"); 712 return 0; 713 } 714 715 rc = cxl_get_chbs(match, hb, &ctx); 716 if (rc) 717 return rc; 718 719 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) { 720 dev_warn(bridge, 721 "CXL CHBS version mismatch, skip port registration\n"); 722 return 0; 723 } 724 725 component_reg_phys = ctx.base; 726 if (component_reg_phys != CXL_RESOURCE_NONE) 727 dev_dbg(match, "CHBCR found for UID %lld: %pa\n", 728 ctx.uid, &component_reg_phys); 729 730 rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus); 731 if (rc) 732 return rc; 733 734 port = devm_cxl_add_port(host, bridge, component_reg_phys, dport); 735 if (IS_ERR(port)) 736 return PTR_ERR(port); 737 738 dev_info(bridge, "host supports CXL\n"); 739 740 return 0; 741 } 742 743 static int add_root_nvdimm_bridge(struct device *match, void *data) 744 { 745 struct cxl_decoder *cxld; 746 struct cxl_port *root_port = data; 747 struct cxl_nvdimm_bridge *cxl_nvb; 748 struct device *host = root_port->dev.parent; 749 750 if (!is_root_decoder(match)) 751 return 0; 752 753 cxld = to_cxl_decoder(match); 754 if (!(cxld->flags & CXL_DECODER_F_PMEM)) 755 return 0; 756 757 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port); 758 if (IS_ERR(cxl_nvb)) { 759 dev_dbg(host, "failed to register pmem\n"); 760 return PTR_ERR(cxl_nvb); 761 } 762 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev), 763 dev_name(&cxl_nvb->dev)); 764 return 1; 765 } 766 767 static struct lock_class_key cxl_root_key; 768 769 static void cxl_acpi_lock_reset_class(void *dev) 770 { 771 device_lock_reset_class(dev); 772 } 773 774 static void cxl_set_public_resource(struct resource *priv, struct resource *pub) 775 { 776 priv->desc = (unsigned long) pub; 777 } 778 779 static struct resource *cxl_get_public_resource(struct resource *priv) 780 { 781 return (struct resource *) priv->desc; 782 } 783 784 static void remove_cxl_resources(void *data) 785 { 786 struct resource *res, *next, *cxl = data; 787 788 for (res = cxl->child; res; res = next) { 789 struct resource *victim = cxl_get_public_resource(res); 790 791 next = res->sibling; 792 remove_resource(res); 793 794 if (victim) { 795 remove_resource(victim); 796 kfree(victim); 797 } 798 799 del_cxl_resource(res); 800 } 801 } 802 803 /** 804 * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource 805 * @cxl_res: A standalone resource tree where each CXL window is a sibling 806 * 807 * Walk each CXL window in @cxl_res and add it to iomem_resource potentially 808 * expanding its boundaries to ensure that any conflicting resources become 809 * children. If a window is expanded it may then conflict with a another window 810 * entry and require the window to be truncated or trimmed. Consider this 811 * situation:: 812 * 813 * |-- "CXL Window 0" --||----- "CXL Window 1" -----| 814 * |--------------- "System RAM" -------------| 815 * 816 * ...where platform firmware has established as System RAM resource across 2 817 * windows, but has left some portion of window 1 for dynamic CXL region 818 * provisioning. In this case "Window 0" will span the entirety of the "System 819 * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end 820 * of that "System RAM" resource. 821 */ 822 static int add_cxl_resources(struct resource *cxl_res) 823 { 824 struct resource *res, *new, *next; 825 826 for (res = cxl_res->child; res; res = next) { 827 new = kzalloc(sizeof(*new), GFP_KERNEL); 828 if (!new) 829 return -ENOMEM; 830 new->name = res->name; 831 new->start = res->start; 832 new->end = res->end; 833 new->flags = IORESOURCE_MEM; 834 new->desc = IORES_DESC_CXL; 835 836 /* 837 * Record the public resource in the private cxl_res tree for 838 * later removal. 839 */ 840 cxl_set_public_resource(res, new); 841 842 insert_resource_expand_to_fit(&iomem_resource, new); 843 844 next = res->sibling; 845 while (next && resource_overlaps(new, next)) { 846 if (resource_contains(new, next)) { 847 struct resource *_next = next->sibling; 848 849 remove_resource(next); 850 del_cxl_resource(next); 851 next = _next; 852 } else 853 next->start = new->end + 1; 854 } 855 } 856 return 0; 857 } 858 859 static int pair_cxl_resource(struct device *dev, void *data) 860 { 861 struct resource *cxl_res = data; 862 struct resource *p; 863 864 if (!is_root_decoder(dev)) 865 return 0; 866 867 for (p = cxl_res->child; p; p = p->sibling) { 868 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 869 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 870 struct resource res = { 871 .start = cxld->hpa_range.start, 872 .end = cxld->hpa_range.end, 873 .flags = IORESOURCE_MEM, 874 }; 875 876 if (resource_contains(p, &res)) { 877 cxlrd->res = cxl_get_public_resource(p); 878 break; 879 } 880 } 881 882 return 0; 883 } 884 885 static int cxl_acpi_probe(struct platform_device *pdev) 886 { 887 int rc; 888 struct resource *cxl_res; 889 struct cxl_root *cxl_root; 890 struct cxl_port *root_port; 891 struct device *host = &pdev->dev; 892 struct acpi_device *adev = ACPI_COMPANION(host); 893 struct cxl_cfmws_context ctx; 894 895 device_lock_set_class(&pdev->dev, &cxl_root_key); 896 rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class, 897 &pdev->dev); 898 if (rc) 899 return rc; 900 901 cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL); 902 if (!cxl_res) 903 return -ENOMEM; 904 cxl_res->name = "CXL mem"; 905 cxl_res->start = 0; 906 cxl_res->end = -1; 907 cxl_res->flags = IORESOURCE_MEM; 908 909 cxl_root = devm_cxl_add_root(host, &acpi_root_ops); 910 if (IS_ERR(cxl_root)) 911 return PTR_ERR(cxl_root); 912 root_port = &cxl_root->port; 913 914 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 915 add_host_bridge_dport); 916 if (rc < 0) 917 return rc; 918 919 rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res); 920 if (rc) 921 return rc; 922 923 ctx = (struct cxl_cfmws_context) { 924 .dev = host, 925 .root_port = root_port, 926 .cxl_res = cxl_res, 927 }; 928 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx); 929 if (rc < 0) 930 return -ENXIO; 931 932 rc = add_cxl_resources(cxl_res); 933 if (rc) 934 return rc; 935 936 /* 937 * Populate the root decoders with their related iomem resource, 938 * if present 939 */ 940 device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource); 941 942 /* 943 * Root level scanned with host-bridge as dports, now scan host-bridges 944 * for their role as CXL uports to their CXL-capable PCIe Root Ports. 945 */ 946 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 947 add_host_bridge_uport); 948 if (rc < 0) 949 return rc; 950 951 if (IS_ENABLED(CONFIG_CXL_PMEM)) 952 rc = device_for_each_child(&root_port->dev, root_port, 953 add_root_nvdimm_bridge); 954 if (rc < 0) 955 return rc; 956 957 /* In case PCI is scanned before ACPI re-trigger memdev attach */ 958 cxl_bus_rescan(); 959 return 0; 960 } 961 962 static const struct acpi_device_id cxl_acpi_ids[] = { 963 { "ACPI0017" }, 964 { }, 965 }; 966 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids); 967 968 static const struct platform_device_id cxl_test_ids[] = { 969 { "cxl_acpi" }, 970 { }, 971 }; 972 MODULE_DEVICE_TABLE(platform, cxl_test_ids); 973 974 static struct platform_driver cxl_acpi_driver = { 975 .probe = cxl_acpi_probe, 976 .driver = { 977 .name = KBUILD_MODNAME, 978 .acpi_match_table = cxl_acpi_ids, 979 }, 980 .id_table = cxl_test_ids, 981 }; 982 983 static int __init cxl_acpi_init(void) 984 { 985 return platform_driver_register(&cxl_acpi_driver); 986 } 987 988 static void __exit cxl_acpi_exit(void) 989 { 990 platform_driver_unregister(&cxl_acpi_driver); 991 cxl_bus_drain(); 992 } 993 994 /* load before dax_hmem sees 'Soft Reserved' CXL ranges */ 995 subsys_initcall(cxl_acpi_init); 996 997 /* 998 * Arrange for host-bridge ports to be active synchronous with 999 * cxl_acpi_probe() exit. 1000 */ 1001 MODULE_SOFTDEP("pre: cxl_port"); 1002 1003 module_exit(cxl_acpi_exit); 1004 MODULE_DESCRIPTION("CXL ACPI: Platform Support"); 1005 MODULE_LICENSE("GPL v2"); 1006 MODULE_IMPORT_NS("CXL"); 1007 MODULE_IMPORT_NS("ACPI"); 1008