1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ 3 #include <linux/platform_device.h> 4 #include <linux/module.h> 5 #include <linux/device.h> 6 #include <linux/kernel.h> 7 #include <linux/acpi.h> 8 #include <linux/pci.h> 9 #include <linux/node.h> 10 #include <asm/div64.h> 11 #include "cxlpci.h" 12 #include "cxl.h" 13 14 struct cxl_cxims_data { 15 int nr_maps; 16 u64 xormaps[] __counted_by(nr_maps); 17 }; 18 19 static const guid_t acpi_cxl_qtg_id_guid = 20 GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071, 21 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52); 22 23 static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr) 24 { 25 struct cxl_cxims_data *cximsd = cxlrd->platform_data; 26 int hbiw = cxlrd->cxlsd.nr_targets; 27 u64 val; 28 int pos; 29 30 /* No xormaps for host bridge interleave ways of 1 or 3 */ 31 if (hbiw == 1 || hbiw == 3) 32 return addr; 33 34 /* 35 * In regions using XOR interleave arithmetic the CXL HPA may not 36 * be the same as the SPA. This helper performs the SPA->CXL HPA 37 * or the CXL HPA->SPA translation. Since XOR is self-inverting, 38 * so is this function. 39 * 40 * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) applying the 41 * xormaps will toggle a position bit. 42 * 43 * pos is the lowest set bit in an XORMAP 44 * val is the XORALLBITS(addr & XORMAP) 45 * 46 * XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS 47 * as an operation that outputs a single bit by XORing all the 48 * bits in the input (addr & xormap). Implement XORALLBITS using 49 * hweight64(). If the hamming weight is even the XOR of those 50 * bits results in val==0, if odd the XOR result is val==1. 51 */ 52 53 for (int i = 0; i < cximsd->nr_maps; i++) { 54 if (!cximsd->xormaps[i]) 55 continue; 56 pos = __ffs(cximsd->xormaps[i]); 57 val = (hweight64(addr & cximsd->xormaps[i]) & 1); 58 addr = (addr & ~(1ULL << pos)) | (val << pos); 59 } 60 61 return addr; 62 } 63 64 struct cxl_cxims_context { 65 struct device *dev; 66 struct cxl_root_decoder *cxlrd; 67 }; 68 69 static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg, 70 const unsigned long end) 71 { 72 struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header; 73 struct cxl_cxims_context *ctx = arg; 74 struct cxl_root_decoder *cxlrd = ctx->cxlrd; 75 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 76 struct device *dev = ctx->dev; 77 struct cxl_cxims_data *cximsd; 78 unsigned int hbig, nr_maps; 79 int rc; 80 81 rc = eig_to_granularity(cxims->hbig, &hbig); 82 if (rc) 83 return rc; 84 85 /* Does this CXIMS entry apply to the given CXL Window? */ 86 if (hbig != cxld->interleave_granularity) 87 return 0; 88 89 /* IW 1,3 do not use xormaps and skip this parsing entirely */ 90 if (is_power_of_2(cxld->interleave_ways)) 91 /* 2, 4, 8, 16 way */ 92 nr_maps = ilog2(cxld->interleave_ways); 93 else 94 /* 6, 12 way */ 95 nr_maps = ilog2(cxld->interleave_ways / 3); 96 97 if (cxims->nr_xormaps < nr_maps) { 98 dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n", 99 cxims->nr_xormaps, nr_maps); 100 return -ENXIO; 101 } 102 103 cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps), 104 GFP_KERNEL); 105 if (!cximsd) 106 return -ENOMEM; 107 cximsd->nr_maps = nr_maps; 108 memcpy(cximsd->xormaps, cxims->xormap_list, 109 nr_maps * sizeof(*cximsd->xormaps)); 110 cxlrd->platform_data = cximsd; 111 112 return 0; 113 } 114 115 static unsigned long cfmws_to_decoder_flags(int restrictions) 116 { 117 unsigned long flags = CXL_DECODER_F_ENABLE; 118 119 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2) 120 flags |= CXL_DECODER_F_TYPE2; 121 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3) 122 flags |= CXL_DECODER_F_TYPE3; 123 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE) 124 flags |= CXL_DECODER_F_RAM; 125 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM) 126 flags |= CXL_DECODER_F_PMEM; 127 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED) 128 flags |= CXL_DECODER_F_LOCK; 129 130 return flags; 131 } 132 133 static int cxl_acpi_cfmws_verify(struct device *dev, 134 struct acpi_cedt_cfmws *cfmws) 135 { 136 int rc, expected_len; 137 unsigned int ways; 138 139 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO && 140 cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { 141 dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n", 142 cfmws->interleave_arithmetic); 143 return -EINVAL; 144 } 145 146 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) { 147 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n"); 148 return -EINVAL; 149 } 150 151 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) { 152 dev_err(dev, "CFMWS Window Size not 256MB aligned\n"); 153 return -EINVAL; 154 } 155 156 rc = eiw_to_ways(cfmws->interleave_ways, &ways); 157 if (rc) { 158 dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n", 159 cfmws->interleave_ways); 160 return -EINVAL; 161 } 162 163 expected_len = struct_size(cfmws, interleave_targets, ways); 164 165 if (cfmws->header.length < expected_len) { 166 dev_err(dev, "CFMWS length %d less than expected %d\n", 167 cfmws->header.length, expected_len); 168 return -EINVAL; 169 } 170 171 if (cfmws->header.length > expected_len) 172 dev_dbg(dev, "CFMWS length %d greater than expected %d\n", 173 cfmws->header.length, expected_len); 174 175 return 0; 176 } 177 178 /* 179 * Note, @dev must be the first member, see 'struct cxl_chbs_context' 180 * and mock_acpi_table_parse_cedt() 181 */ 182 struct cxl_cfmws_context { 183 struct device *dev; 184 struct cxl_port *root_port; 185 struct resource *cxl_res; 186 int id; 187 }; 188 189 /** 190 * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM 191 * @handle: ACPI handle 192 * @coord: performance access coordinates 193 * @entries: number of QTG IDs to return 194 * @qos_class: int array provided by caller to return QTG IDs 195 * 196 * Return: number of QTG IDs returned, or -errno for errors 197 * 198 * Issue QTG _DSM with accompanied bandwidth and latency data in order to get 199 * the QTG IDs that are suitable for the performance point in order of most 200 * suitable to least suitable. Write back array of QTG IDs and return the 201 * actual number of QTG IDs written back. 202 */ 203 static int 204 cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord, 205 int entries, int *qos_class) 206 { 207 union acpi_object *out_obj, *out_buf, *obj; 208 union acpi_object in_array[4] = { 209 [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency }, 210 [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency }, 211 [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth }, 212 [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth }, 213 }; 214 union acpi_object in_obj = { 215 .package = { 216 .type = ACPI_TYPE_PACKAGE, 217 .count = 4, 218 .elements = in_array, 219 }, 220 }; 221 int count, pkg_entries, i; 222 u16 max_qtg; 223 int rc; 224 225 if (!entries) 226 return -EINVAL; 227 228 out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj); 229 if (!out_obj) 230 return -ENXIO; 231 232 if (out_obj->type != ACPI_TYPE_PACKAGE) { 233 rc = -ENXIO; 234 goto out; 235 } 236 237 /* Check Max QTG ID */ 238 obj = &out_obj->package.elements[0]; 239 if (obj->type != ACPI_TYPE_INTEGER) { 240 rc = -ENXIO; 241 goto out; 242 } 243 244 max_qtg = obj->integer.value; 245 246 /* It's legal to have 0 QTG entries */ 247 pkg_entries = out_obj->package.count; 248 if (pkg_entries <= 1) { 249 rc = 0; 250 goto out; 251 } 252 253 /* Retrieve QTG IDs package */ 254 obj = &out_obj->package.elements[1]; 255 if (obj->type != ACPI_TYPE_PACKAGE) { 256 rc = -ENXIO; 257 goto out; 258 } 259 260 pkg_entries = obj->package.count; 261 count = min(entries, pkg_entries); 262 for (i = 0; i < count; i++) { 263 u16 qtg_id; 264 265 out_buf = &obj->package.elements[i]; 266 if (out_buf->type != ACPI_TYPE_INTEGER) { 267 rc = -ENXIO; 268 goto out; 269 } 270 271 qtg_id = out_buf->integer.value; 272 if (qtg_id > max_qtg) 273 pr_warn("QTG ID %u greater than MAX %u\n", 274 qtg_id, max_qtg); 275 276 qos_class[i] = qtg_id; 277 } 278 rc = count; 279 280 out: 281 ACPI_FREE(out_obj); 282 return rc; 283 } 284 285 static int cxl_acpi_qos_class(struct cxl_root *cxl_root, 286 struct access_coordinate *coord, int entries, 287 int *qos_class) 288 { 289 struct device *dev = cxl_root->port.uport_dev; 290 acpi_handle handle; 291 292 if (!dev_is_platform(dev)) 293 return -ENODEV; 294 295 handle = ACPI_HANDLE(dev); 296 if (!handle) 297 return -ENODEV; 298 299 return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class); 300 } 301 302 static const struct cxl_root_ops acpi_root_ops = { 303 .qos_class = cxl_acpi_qos_class, 304 }; 305 306 static void del_cxl_resource(struct resource *res) 307 { 308 if (!res) 309 return; 310 kfree(res->name); 311 kfree(res); 312 } 313 314 static struct resource *alloc_cxl_resource(resource_size_t base, 315 resource_size_t n, int id) 316 { 317 struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL); 318 319 if (!res) 320 return NULL; 321 322 res->start = base; 323 res->end = base + n - 1; 324 res->flags = IORESOURCE_MEM; 325 res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id); 326 if (!res->name) 327 return NULL; 328 329 return no_free_ptr(res); 330 } 331 332 static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res) 333 { 334 int rc = insert_resource(parent, res); 335 336 if (rc) 337 del_cxl_resource(res); 338 return rc; 339 } 340 341 static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd) 342 { 343 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 344 struct range *hpa = &cxld->hpa_range; 345 resource_size_t size = range_len(hpa); 346 resource_size_t start = hpa->start; 347 resource_size_t cache_size; 348 struct resource res; 349 int nid, rc; 350 351 res = DEFINE_RES(start, size, 0); 352 nid = phys_to_target_node(start); 353 354 rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size); 355 if (rc) 356 return rc; 357 358 /* 359 * The cache range is expected to be within the CFMWS. 360 * Currently there is only support cache_size == cxl_size. CXL 361 * size is then half of the total CFMWS window size. 362 */ 363 size = size >> 1; 364 if (cache_size && size != cache_size) { 365 dev_warn(&cxld->dev, 366 "Extended Linear Cache size %pa != CXL size %pa. No Support!", 367 &cache_size, &size); 368 return -ENXIO; 369 } 370 371 cxlrd->cache_size = cache_size; 372 373 return 0; 374 } 375 376 static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd) 377 { 378 int rc; 379 380 rc = cxl_acpi_set_cache_size(cxlrd); 381 if (!rc) 382 return; 383 384 if (rc != -EOPNOTSUPP) { 385 /* 386 * Failing to support extended linear cache region resize does not 387 * prevent the region from functioning. Only causes cxl list showing 388 * incorrect region size. 389 */ 390 dev_warn(cxlrd->cxlsd.cxld.dev.parent, 391 "Extended linear cache calculation failed rc:%d\n", rc); 392 } 393 394 /* Ignoring return code */ 395 cxlrd->cache_size = 0; 396 } 397 398 DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *, 399 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev)) 400 DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T)) 401 static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, 402 struct cxl_cfmws_context *ctx) 403 { 404 int target_map[CXL_DECODER_MAX_INTERLEAVE]; 405 struct cxl_port *root_port = ctx->root_port; 406 struct cxl_cxims_context cxims_ctx; 407 struct device *dev = ctx->dev; 408 struct cxl_decoder *cxld; 409 unsigned int ways, i, ig; 410 int rc; 411 412 rc = cxl_acpi_cfmws_verify(dev, cfmws); 413 if (rc) 414 return rc; 415 416 rc = eiw_to_ways(cfmws->interleave_ways, &ways); 417 if (rc) 418 return rc; 419 rc = eig_to_granularity(cfmws->granularity, &ig); 420 if (rc) 421 return rc; 422 for (i = 0; i < ways; i++) 423 target_map[i] = cfmws->interleave_targets[i]; 424 425 struct resource *res __free(del_cxl_resource) = alloc_cxl_resource( 426 cfmws->base_hpa, cfmws->window_size, ctx->id++); 427 if (!res) 428 return -ENOMEM; 429 430 /* add to the local resource tracking to establish a sort order */ 431 rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res)); 432 if (rc) 433 return rc; 434 435 struct cxl_root_decoder *cxlrd __free(put_cxlrd) = 436 cxl_root_decoder_alloc(root_port, ways); 437 438 if (IS_ERR(cxlrd)) 439 return PTR_ERR(cxlrd); 440 441 cxld = &cxlrd->cxlsd.cxld; 442 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); 443 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 444 cxld->hpa_range = (struct range) { 445 .start = cfmws->base_hpa, 446 .end = cfmws->base_hpa + cfmws->window_size - 1, 447 }; 448 cxld->interleave_ways = ways; 449 /* 450 * Minimize the x1 granularity to advertise support for any 451 * valid region granularity 452 */ 453 if (ways == 1) 454 ig = CXL_DECODER_MIN_GRANULARITY; 455 cxld->interleave_granularity = ig; 456 457 cxl_setup_extended_linear_cache(cxlrd); 458 459 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { 460 if (ways != 1 && ways != 3) { 461 cxims_ctx = (struct cxl_cxims_context) { 462 .dev = dev, 463 .cxlrd = cxlrd, 464 }; 465 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS, 466 cxl_parse_cxims, &cxims_ctx); 467 if (rc < 0) 468 return rc; 469 if (!cxlrd->platform_data) { 470 dev_err(dev, "No CXIMS for HBIG %u\n", ig); 471 return -EINVAL; 472 } 473 } 474 } 475 476 cxlrd->qos_class = cfmws->qtg_id; 477 478 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { 479 cxlrd->ops = kzalloc(sizeof(*cxlrd->ops), GFP_KERNEL); 480 if (!cxlrd->ops) 481 return -ENOMEM; 482 483 cxlrd->ops->hpa_to_spa = cxl_apply_xor_maps; 484 cxlrd->ops->spa_to_hpa = cxl_apply_xor_maps; 485 } 486 487 rc = cxl_decoder_add(cxld, target_map); 488 if (rc) 489 return rc; 490 491 rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd)); 492 if (rc) 493 return rc; 494 495 dev_dbg(root_port->dev.parent, "%s added to %s\n", 496 dev_name(&cxld->dev), dev_name(&root_port->dev)); 497 498 return 0; 499 } 500 501 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, 502 const unsigned long end) 503 { 504 struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header; 505 struct cxl_cfmws_context *ctx = arg; 506 struct device *dev = ctx->dev; 507 int rc; 508 509 rc = __cxl_parse_cfmws(cfmws, ctx); 510 if (rc) 511 dev_err(dev, 512 "Failed to add decode range: [%#llx - %#llx] (%d)\n", 513 cfmws->base_hpa, 514 cfmws->base_hpa + cfmws->window_size - 1, rc); 515 else 516 dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n", 517 phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa, 518 cfmws->base_hpa + cfmws->window_size - 1); 519 520 /* never fail cxl_acpi load for a single window failure */ 521 return 0; 522 } 523 524 __mock struct acpi_device *to_cxl_host_bridge(struct device *host, 525 struct device *dev) 526 { 527 struct acpi_device *adev = to_acpi_device(dev); 528 529 if (!acpi_pci_find_root(adev->handle)) 530 return NULL; 531 532 if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) 533 return adev; 534 return NULL; 535 } 536 537 /* Note, @dev is used by mock_acpi_table_parse_cedt() */ 538 struct cxl_chbs_context { 539 struct device *dev; 540 unsigned long long uid; 541 resource_size_t base; 542 u32 cxl_version; 543 int nr_versions; 544 u32 saved_version; 545 }; 546 547 static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg, 548 const unsigned long end) 549 { 550 struct cxl_chbs_context *ctx = arg; 551 struct acpi_cedt_chbs *chbs; 552 553 chbs = (struct acpi_cedt_chbs *) header; 554 555 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 && 556 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11) 557 return 0; 558 559 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 && 560 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20) 561 return 0; 562 563 if (!chbs->base) 564 return 0; 565 566 if (ctx->saved_version != chbs->cxl_version) { 567 /* 568 * cxl_version cannot be overwritten before the next two 569 * checks, then use saved_version 570 */ 571 ctx->saved_version = chbs->cxl_version; 572 ctx->nr_versions++; 573 } 574 575 if (ctx->base != CXL_RESOURCE_NONE) 576 return 0; 577 578 if (ctx->uid != chbs->uid) 579 return 0; 580 581 ctx->cxl_version = chbs->cxl_version; 582 ctx->base = chbs->base; 583 584 return 0; 585 } 586 587 static int cxl_get_chbs(struct device *dev, struct acpi_device *hb, 588 struct cxl_chbs_context *ctx) 589 { 590 unsigned long long uid; 591 int rc; 592 593 rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid); 594 if (rc != AE_OK) { 595 dev_err(dev, "unable to retrieve _UID\n"); 596 return -ENOENT; 597 } 598 599 dev_dbg(dev, "UID found: %lld\n", uid); 600 *ctx = (struct cxl_chbs_context) { 601 .dev = dev, 602 .uid = uid, 603 .base = CXL_RESOURCE_NONE, 604 .cxl_version = UINT_MAX, 605 .saved_version = UINT_MAX, 606 }; 607 608 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbs_iter, ctx); 609 610 if (ctx->nr_versions > 1) { 611 /* 612 * Disclaim eRCD support given some component register may 613 * only be found via CHBCR 614 */ 615 dev_info(dev, "Unsupported platform config, mixed Virtual Host and Restricted CXL Host hierarchy."); 616 } 617 618 return 0; 619 } 620 621 static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport) 622 { 623 struct acpi_device *hb = to_cxl_host_bridge(NULL, dev); 624 u32 uid; 625 626 if (kstrtou32(acpi_device_uid(hb), 0, &uid)) 627 return -EINVAL; 628 629 return acpi_get_genport_coordinates(uid, dport->coord); 630 } 631 632 static int add_host_bridge_dport(struct device *match, void *arg) 633 { 634 int ret; 635 acpi_status rc; 636 struct device *bridge; 637 struct cxl_dport *dport; 638 struct cxl_chbs_context ctx; 639 struct acpi_pci_root *pci_root; 640 struct cxl_port *root_port = arg; 641 struct device *host = root_port->dev.parent; 642 struct acpi_device *hb = to_cxl_host_bridge(host, match); 643 644 if (!hb) 645 return 0; 646 647 rc = cxl_get_chbs(match, hb, &ctx); 648 if (rc) 649 return rc; 650 651 if (ctx.cxl_version == UINT_MAX) { 652 dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n", 653 ctx.uid); 654 return 0; 655 } 656 657 if (ctx.base == CXL_RESOURCE_NONE) { 658 dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n", 659 ctx.uid); 660 return 0; 661 } 662 663 pci_root = acpi_pci_find_root(hb->handle); 664 bridge = pci_root->bus->bridge; 665 666 /* 667 * In RCH mode, bind the component regs base to the dport. In 668 * VH mode it will be bound to the CXL host bridge's port 669 * object later in add_host_bridge_uport(). 670 */ 671 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) { 672 dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid, 673 &ctx.base); 674 dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid, 675 ctx.base); 676 } else { 677 dport = devm_cxl_add_dport(root_port, bridge, ctx.uid, 678 CXL_RESOURCE_NONE); 679 } 680 681 if (IS_ERR(dport)) 682 return PTR_ERR(dport); 683 684 ret = get_genport_coordinates(match, dport); 685 if (ret) 686 dev_dbg(match, "Failed to get generic port perf coordinates.\n"); 687 688 return 0; 689 } 690 691 /* 692 * A host bridge is a dport to a CFMWS decode and it is a uport to the 693 * dport (PCIe Root Ports) in the host bridge. 694 */ 695 static int add_host_bridge_uport(struct device *match, void *arg) 696 { 697 struct cxl_port *root_port = arg; 698 struct device *host = root_port->dev.parent; 699 struct acpi_device *hb = to_cxl_host_bridge(host, match); 700 struct acpi_pci_root *pci_root; 701 struct cxl_dport *dport; 702 struct cxl_port *port; 703 struct device *bridge; 704 struct cxl_chbs_context ctx; 705 resource_size_t component_reg_phys; 706 int rc; 707 708 if (!hb) 709 return 0; 710 711 pci_root = acpi_pci_find_root(hb->handle); 712 bridge = pci_root->bus->bridge; 713 dport = cxl_find_dport_by_dev(root_port, bridge); 714 if (!dport) { 715 dev_dbg(host, "host bridge expected and not found\n"); 716 return 0; 717 } 718 719 if (dport->rch) { 720 dev_info(bridge, "host supports CXL (restricted)\n"); 721 return 0; 722 } 723 724 rc = cxl_get_chbs(match, hb, &ctx); 725 if (rc) 726 return rc; 727 728 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) { 729 dev_warn(bridge, 730 "CXL CHBS version mismatch, skip port registration\n"); 731 return 0; 732 } 733 734 component_reg_phys = ctx.base; 735 if (component_reg_phys != CXL_RESOURCE_NONE) 736 dev_dbg(match, "CHBCR found for UID %lld: %pa\n", 737 ctx.uid, &component_reg_phys); 738 739 rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus); 740 if (rc) 741 return rc; 742 743 port = devm_cxl_add_port(host, bridge, component_reg_phys, dport); 744 if (IS_ERR(port)) 745 return PTR_ERR(port); 746 747 dev_info(bridge, "host supports CXL\n"); 748 749 return 0; 750 } 751 752 static int add_root_nvdimm_bridge(struct device *match, void *data) 753 { 754 struct cxl_decoder *cxld; 755 struct cxl_port *root_port = data; 756 struct cxl_nvdimm_bridge *cxl_nvb; 757 struct device *host = root_port->dev.parent; 758 759 if (!is_root_decoder(match)) 760 return 0; 761 762 cxld = to_cxl_decoder(match); 763 if (!(cxld->flags & CXL_DECODER_F_PMEM)) 764 return 0; 765 766 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port); 767 if (IS_ERR(cxl_nvb)) { 768 dev_dbg(host, "failed to register pmem\n"); 769 return PTR_ERR(cxl_nvb); 770 } 771 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev), 772 dev_name(&cxl_nvb->dev)); 773 return 1; 774 } 775 776 static struct lock_class_key cxl_root_key; 777 778 static void cxl_acpi_lock_reset_class(void *dev) 779 { 780 device_lock_reset_class(dev); 781 } 782 783 static void cxl_set_public_resource(struct resource *priv, struct resource *pub) 784 { 785 priv->desc = (unsigned long) pub; 786 } 787 788 static struct resource *cxl_get_public_resource(struct resource *priv) 789 { 790 return (struct resource *) priv->desc; 791 } 792 793 static void remove_cxl_resources(void *data) 794 { 795 struct resource *res, *next, *cxl = data; 796 797 for (res = cxl->child; res; res = next) { 798 struct resource *victim = cxl_get_public_resource(res); 799 800 next = res->sibling; 801 remove_resource(res); 802 803 if (victim) { 804 remove_resource(victim); 805 kfree(victim); 806 } 807 808 del_cxl_resource(res); 809 } 810 } 811 812 /** 813 * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource 814 * @cxl_res: A standalone resource tree where each CXL window is a sibling 815 * 816 * Walk each CXL window in @cxl_res and add it to iomem_resource potentially 817 * expanding its boundaries to ensure that any conflicting resources become 818 * children. If a window is expanded it may then conflict with a another window 819 * entry and require the window to be truncated or trimmed. Consider this 820 * situation:: 821 * 822 * |-- "CXL Window 0" --||----- "CXL Window 1" -----| 823 * |--------------- "System RAM" -------------| 824 * 825 * ...where platform firmware has established as System RAM resource across 2 826 * windows, but has left some portion of window 1 for dynamic CXL region 827 * provisioning. In this case "Window 0" will span the entirety of the "System 828 * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end 829 * of that "System RAM" resource. 830 */ 831 static int add_cxl_resources(struct resource *cxl_res) 832 { 833 struct resource *res, *new, *next; 834 835 for (res = cxl_res->child; res; res = next) { 836 new = kzalloc(sizeof(*new), GFP_KERNEL); 837 if (!new) 838 return -ENOMEM; 839 new->name = res->name; 840 new->start = res->start; 841 new->end = res->end; 842 new->flags = IORESOURCE_MEM; 843 new->desc = IORES_DESC_CXL; 844 845 /* 846 * Record the public resource in the private cxl_res tree for 847 * later removal. 848 */ 849 cxl_set_public_resource(res, new); 850 851 insert_resource_expand_to_fit(&iomem_resource, new); 852 853 next = res->sibling; 854 while (next && resource_overlaps(new, next)) { 855 if (resource_contains(new, next)) { 856 struct resource *_next = next->sibling; 857 858 remove_resource(next); 859 del_cxl_resource(next); 860 next = _next; 861 } else 862 next->start = new->end + 1; 863 } 864 } 865 return 0; 866 } 867 868 static int pair_cxl_resource(struct device *dev, void *data) 869 { 870 struct resource *cxl_res = data; 871 struct resource *p; 872 873 if (!is_root_decoder(dev)) 874 return 0; 875 876 for (p = cxl_res->child; p; p = p->sibling) { 877 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 878 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 879 struct resource res = { 880 .start = cxld->hpa_range.start, 881 .end = cxld->hpa_range.end, 882 .flags = IORESOURCE_MEM, 883 }; 884 885 if (resource_contains(p, &res)) { 886 cxlrd->res = cxl_get_public_resource(p); 887 break; 888 } 889 } 890 891 return 0; 892 } 893 894 static int cxl_acpi_probe(struct platform_device *pdev) 895 { 896 int rc; 897 struct resource *cxl_res; 898 struct cxl_root *cxl_root; 899 struct cxl_port *root_port; 900 struct device *host = &pdev->dev; 901 struct acpi_device *adev = ACPI_COMPANION(host); 902 struct cxl_cfmws_context ctx; 903 904 device_lock_set_class(&pdev->dev, &cxl_root_key); 905 rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class, 906 &pdev->dev); 907 if (rc) 908 return rc; 909 910 cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL); 911 if (!cxl_res) 912 return -ENOMEM; 913 cxl_res->name = "CXL mem"; 914 cxl_res->start = 0; 915 cxl_res->end = -1; 916 cxl_res->flags = IORESOURCE_MEM; 917 918 cxl_root = devm_cxl_add_root(host, &acpi_root_ops); 919 if (IS_ERR(cxl_root)) 920 return PTR_ERR(cxl_root); 921 root_port = &cxl_root->port; 922 923 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 924 add_host_bridge_dport); 925 if (rc < 0) 926 return rc; 927 928 rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res); 929 if (rc) 930 return rc; 931 932 ctx = (struct cxl_cfmws_context) { 933 .dev = host, 934 .root_port = root_port, 935 .cxl_res = cxl_res, 936 }; 937 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx); 938 if (rc < 0) 939 return -ENXIO; 940 941 rc = add_cxl_resources(cxl_res); 942 if (rc) 943 return rc; 944 945 /* 946 * Populate the root decoders with their related iomem resource, 947 * if present 948 */ 949 device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource); 950 951 /* 952 * Root level scanned with host-bridge as dports, now scan host-bridges 953 * for their role as CXL uports to their CXL-capable PCIe Root Ports. 954 */ 955 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 956 add_host_bridge_uport); 957 if (rc < 0) 958 return rc; 959 960 if (IS_ENABLED(CONFIG_CXL_PMEM)) 961 rc = device_for_each_child(&root_port->dev, root_port, 962 add_root_nvdimm_bridge); 963 if (rc < 0) 964 return rc; 965 966 /* In case PCI is scanned before ACPI re-trigger memdev attach */ 967 cxl_bus_rescan(); 968 return 0; 969 } 970 971 static const struct acpi_device_id cxl_acpi_ids[] = { 972 { "ACPI0017" }, 973 { }, 974 }; 975 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids); 976 977 static const struct platform_device_id cxl_test_ids[] = { 978 { "cxl_acpi" }, 979 { }, 980 }; 981 MODULE_DEVICE_TABLE(platform, cxl_test_ids); 982 983 static struct platform_driver cxl_acpi_driver = { 984 .probe = cxl_acpi_probe, 985 .driver = { 986 .name = KBUILD_MODNAME, 987 .acpi_match_table = cxl_acpi_ids, 988 }, 989 .id_table = cxl_test_ids, 990 }; 991 992 static int __init cxl_acpi_init(void) 993 { 994 return platform_driver_register(&cxl_acpi_driver); 995 } 996 997 static void __exit cxl_acpi_exit(void) 998 { 999 platform_driver_unregister(&cxl_acpi_driver); 1000 cxl_bus_drain(); 1001 } 1002 1003 /* load before dax_hmem sees 'Soft Reserved' CXL ranges */ 1004 subsys_initcall(cxl_acpi_init); 1005 1006 /* 1007 * Arrange for host-bridge ports to be active synchronous with 1008 * cxl_acpi_probe() exit. 1009 */ 1010 MODULE_SOFTDEP("pre: cxl_port"); 1011 1012 module_exit(cxl_acpi_exit); 1013 MODULE_DESCRIPTION("CXL ACPI: Platform Support"); 1014 MODULE_LICENSE("GPL v2"); 1015 MODULE_IMPORT_NS("CXL"); 1016 MODULE_IMPORT_NS("ACPI"); 1017