1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ 3 #include <linux/platform_device.h> 4 #include <linux/module.h> 5 #include <linux/device.h> 6 #include <linux/kernel.h> 7 #include <linux/acpi.h> 8 #include <linux/pci.h> 9 #include <linux/node.h> 10 #include <asm/div64.h> 11 #include "cxlpci.h" 12 #include "cxl.h" 13 14 static const guid_t acpi_cxl_qtg_id_guid = 15 GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071, 16 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52); 17 18 #define HBIW_TO_NR_MAPS_SIZE (CXL_DECODER_MAX_INTERLEAVE + 1) 19 static const int hbiw_to_nr_maps[HBIW_TO_NR_MAPS_SIZE] = { 20 [1] = 0, [2] = 1, [3] = 0, [4] = 2, [6] = 1, [8] = 3, [12] = 2, [16] = 4 21 }; 22 23 static const int valid_hbiw[] = { 1, 2, 3, 4, 6, 8, 12, 16 }; 24 25 u64 cxl_do_xormap_calc(struct cxl_cxims_data *cximsd, u64 addr, int hbiw) 26 { 27 int nr_maps_to_apply = -1; 28 u64 val; 29 int pos; 30 31 /* 32 * Strictly validate hbiw since this function is used for testing and 33 * that nullifies any expectation of trusted parameters from the CXL 34 * Region Driver. 35 */ 36 for (int i = 0; i < ARRAY_SIZE(valid_hbiw); i++) { 37 if (valid_hbiw[i] == hbiw) { 38 nr_maps_to_apply = hbiw_to_nr_maps[hbiw]; 39 break; 40 } 41 } 42 if (nr_maps_to_apply == -1 || nr_maps_to_apply > cximsd->nr_maps) 43 return ULLONG_MAX; 44 45 /* 46 * In regions using XOR interleave arithmetic the CXL HPA may not 47 * be the same as the SPA. This helper performs the SPA->CXL HPA 48 * or the CXL HPA->SPA translation. Since XOR is self-inverting, 49 * so is this function. 50 * 51 * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) applying the 52 * xormaps will toggle a position bit. 53 * 54 * pos is the lowest set bit in an XORMAP 55 * val is the XORALLBITS(addr & XORMAP) 56 * 57 * XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS 58 * as an operation that outputs a single bit by XORing all the 59 * bits in the input (addr & xormap). Implement XORALLBITS using 60 * hweight64(). If the hamming weight is even the XOR of those 61 * bits results in val==0, if odd the XOR result is val==1. 62 */ 63 64 for (int i = 0; i < cximsd->nr_maps; i++) { 65 if (!cximsd->xormaps[i]) 66 continue; 67 pos = __ffs(cximsd->xormaps[i]); 68 val = (hweight64(addr & cximsd->xormaps[i]) & 1); 69 addr = (addr & ~(1ULL << pos)) | (val << pos); 70 } 71 72 return addr; 73 } 74 EXPORT_SYMBOL_FOR_MODULES(cxl_do_xormap_calc, "cxl_translate"); 75 76 static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr) 77 { 78 struct cxl_cxims_data *cximsd = cxlrd->platform_data; 79 80 return cxl_do_xormap_calc(cximsd, addr, cxlrd->cxlsd.nr_targets); 81 } 82 83 struct cxl_cxims_context { 84 struct device *dev; 85 struct cxl_root_decoder *cxlrd; 86 }; 87 88 static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg, 89 const unsigned long end) 90 { 91 struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header; 92 struct cxl_cxims_context *ctx = arg; 93 struct cxl_root_decoder *cxlrd = ctx->cxlrd; 94 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 95 struct device *dev = ctx->dev; 96 struct cxl_cxims_data *cximsd; 97 unsigned int hbig, nr_maps; 98 int rc; 99 100 rc = eig_to_granularity(cxims->hbig, &hbig); 101 if (rc) 102 return rc; 103 104 /* Does this CXIMS entry apply to the given CXL Window? */ 105 if (hbig != cxld->interleave_granularity) 106 return 0; 107 108 /* IW 1,3 do not use xormaps and skip this parsing entirely */ 109 if (is_power_of_2(cxld->interleave_ways)) 110 /* 2, 4, 8, 16 way */ 111 nr_maps = ilog2(cxld->interleave_ways); 112 else 113 /* 6, 12 way */ 114 nr_maps = ilog2(cxld->interleave_ways / 3); 115 116 if (cxims->nr_xormaps < nr_maps) { 117 dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n", 118 cxims->nr_xormaps, nr_maps); 119 return -ENXIO; 120 } 121 122 cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps), 123 GFP_KERNEL); 124 if (!cximsd) 125 return -ENOMEM; 126 cximsd->nr_maps = nr_maps; 127 memcpy(cximsd->xormaps, cxims->xormap_list, 128 nr_maps * sizeof(*cximsd->xormaps)); 129 cxlrd->platform_data = cximsd; 130 131 return 0; 132 } 133 134 static unsigned long cfmws_to_decoder_flags(int restrictions) 135 { 136 unsigned long flags = CXL_DECODER_F_ENABLE; 137 138 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_DEVMEM) 139 flags |= CXL_DECODER_F_TYPE2; 140 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM) 141 flags |= CXL_DECODER_F_TYPE3; 142 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE) 143 flags |= CXL_DECODER_F_RAM; 144 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM) 145 flags |= CXL_DECODER_F_PMEM; 146 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED) 147 flags |= CXL_DECODER_F_LOCK; 148 149 return flags; 150 } 151 152 static int cxl_acpi_cfmws_verify(struct device *dev, 153 struct acpi_cedt_cfmws *cfmws) 154 { 155 int rc, expected_len; 156 unsigned int ways; 157 158 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO && 159 cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { 160 dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n", 161 cfmws->interleave_arithmetic); 162 return -EINVAL; 163 } 164 165 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) { 166 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n"); 167 return -EINVAL; 168 } 169 170 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) { 171 dev_err(dev, "CFMWS Window Size not 256MB aligned\n"); 172 return -EINVAL; 173 } 174 175 rc = eiw_to_ways(cfmws->interleave_ways, &ways); 176 if (rc) { 177 dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n", 178 cfmws->interleave_ways); 179 return -EINVAL; 180 } 181 182 expected_len = struct_size(cfmws, interleave_targets, ways); 183 184 if (cfmws->header.length < expected_len) { 185 dev_err(dev, "CFMWS length %d less than expected %d\n", 186 cfmws->header.length, expected_len); 187 return -EINVAL; 188 } 189 190 if (cfmws->header.length > expected_len) 191 dev_dbg(dev, "CFMWS length %d greater than expected %d\n", 192 cfmws->header.length, expected_len); 193 194 return 0; 195 } 196 197 /* 198 * Note, @dev must be the first member, see 'struct cxl_chbs_context' 199 * and mock_acpi_table_parse_cedt() 200 */ 201 struct cxl_cfmws_context { 202 struct device *dev; 203 struct cxl_port *root_port; 204 struct resource *cxl_res; 205 int id; 206 }; 207 208 /** 209 * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM 210 * @handle: ACPI handle 211 * @coord: performance access coordinates 212 * @entries: number of QTG IDs to return 213 * @qos_class: int array provided by caller to return QTG IDs 214 * 215 * Return: number of QTG IDs returned, or -errno for errors 216 * 217 * Issue QTG _DSM with accompanied bandwidth and latency data in order to get 218 * the QTG IDs that are suitable for the performance point in order of most 219 * suitable to least suitable. Write back array of QTG IDs and return the 220 * actual number of QTG IDs written back. 221 */ 222 static int 223 cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord, 224 int entries, int *qos_class) 225 { 226 union acpi_object *out_obj, *out_buf, *obj; 227 union acpi_object in_array[4] = { 228 [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency }, 229 [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency }, 230 [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth }, 231 [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth }, 232 }; 233 union acpi_object in_obj = { 234 .package = { 235 .type = ACPI_TYPE_PACKAGE, 236 .count = 4, 237 .elements = in_array, 238 }, 239 }; 240 int count, pkg_entries, i; 241 u16 max_qtg; 242 int rc; 243 244 if (!entries) 245 return -EINVAL; 246 247 out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj); 248 if (!out_obj) 249 return -ENXIO; 250 251 if (out_obj->type != ACPI_TYPE_PACKAGE) { 252 rc = -ENXIO; 253 goto out; 254 } 255 256 /* Check Max QTG ID */ 257 obj = &out_obj->package.elements[0]; 258 if (obj->type != ACPI_TYPE_INTEGER) { 259 rc = -ENXIO; 260 goto out; 261 } 262 263 max_qtg = obj->integer.value; 264 265 /* It's legal to have 0 QTG entries */ 266 pkg_entries = out_obj->package.count; 267 if (pkg_entries <= 1) { 268 rc = 0; 269 goto out; 270 } 271 272 /* Retrieve QTG IDs package */ 273 obj = &out_obj->package.elements[1]; 274 if (obj->type != ACPI_TYPE_PACKAGE) { 275 rc = -ENXIO; 276 goto out; 277 } 278 279 pkg_entries = obj->package.count; 280 count = min(entries, pkg_entries); 281 for (i = 0; i < count; i++) { 282 u16 qtg_id; 283 284 out_buf = &obj->package.elements[i]; 285 if (out_buf->type != ACPI_TYPE_INTEGER) { 286 rc = -ENXIO; 287 goto out; 288 } 289 290 qtg_id = out_buf->integer.value; 291 if (qtg_id > max_qtg) 292 pr_warn("QTG ID %u greater than MAX %u\n", 293 qtg_id, max_qtg); 294 295 qos_class[i] = qtg_id; 296 } 297 rc = count; 298 299 out: 300 ACPI_FREE(out_obj); 301 return rc; 302 } 303 304 static int cxl_acpi_qos_class(struct cxl_root *cxl_root, 305 struct access_coordinate *coord, int entries, 306 int *qos_class) 307 { 308 struct device *dev = cxl_root->port.uport_dev; 309 acpi_handle handle; 310 311 if (!dev_is_platform(dev)) 312 return -ENODEV; 313 314 handle = ACPI_HANDLE(dev); 315 if (!handle) 316 return -ENODEV; 317 318 return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class); 319 } 320 321 static const struct cxl_root_ops acpi_root_ops = { 322 .qos_class = cxl_acpi_qos_class, 323 }; 324 325 static void del_cxl_resource(struct resource *res) 326 { 327 if (!res) 328 return; 329 kfree(res->name); 330 kfree(res); 331 } 332 333 static struct resource *alloc_cxl_resource(resource_size_t base, 334 resource_size_t n, int id) 335 { 336 struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL); 337 338 if (!res) 339 return NULL; 340 341 res->start = base; 342 res->end = base + n - 1; 343 res->flags = IORESOURCE_MEM; 344 res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id); 345 if (!res->name) 346 return NULL; 347 348 return no_free_ptr(res); 349 } 350 351 static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res) 352 { 353 int rc = insert_resource(parent, res); 354 355 if (rc) 356 del_cxl_resource(res); 357 return rc; 358 } 359 360 static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd) 361 { 362 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 363 struct range *hpa = &cxld->hpa_range; 364 resource_size_t size = range_len(hpa); 365 resource_size_t start = hpa->start; 366 resource_size_t cache_size; 367 struct resource res; 368 int nid, rc; 369 370 res = DEFINE_RES_MEM(start, size); 371 nid = phys_to_target_node(start); 372 373 rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size); 374 if (rc) 375 return 0; 376 377 /* 378 * The cache range is expected to be within the CFMWS. 379 * Currently there is only support cache_size == cxl_size. CXL 380 * size is then half of the total CFMWS window size. 381 */ 382 size = size >> 1; 383 if (cache_size && size != cache_size) { 384 dev_warn(&cxld->dev, 385 "Extended Linear Cache size %pa != CXL size %pa. No Support!", 386 &cache_size, &size); 387 return -ENXIO; 388 } 389 390 cxlrd->cache_size = cache_size; 391 392 return 0; 393 } 394 395 static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd) 396 { 397 int rc; 398 399 rc = cxl_acpi_set_cache_size(cxlrd); 400 if (rc) { 401 /* 402 * Failing to retrieve extended linear cache region resize does not 403 * prevent the region from functioning. Only causes cxl list showing 404 * incorrect region size. 405 */ 406 dev_warn(cxlrd->cxlsd.cxld.dev.parent, 407 "Extended linear cache retrieval failed rc:%d\n", rc); 408 409 /* Ignoring return code */ 410 cxlrd->cache_size = 0; 411 } 412 } 413 414 DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *, 415 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev)) 416 DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T)) 417 static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, 418 struct cxl_cfmws_context *ctx) 419 { 420 struct cxl_port *root_port = ctx->root_port; 421 struct cxl_cxims_context cxims_ctx; 422 struct device *dev = ctx->dev; 423 struct cxl_decoder *cxld; 424 unsigned int ways, i, ig; 425 int rc; 426 427 rc = cxl_acpi_cfmws_verify(dev, cfmws); 428 if (rc) 429 return rc; 430 431 rc = eiw_to_ways(cfmws->interleave_ways, &ways); 432 if (rc) 433 return rc; 434 rc = eig_to_granularity(cfmws->granularity, &ig); 435 if (rc) 436 return rc; 437 438 struct resource *res __free(del_cxl_resource) = alloc_cxl_resource( 439 cfmws->base_hpa, cfmws->window_size, ctx->id++); 440 if (!res) 441 return -ENOMEM; 442 443 /* add to the local resource tracking to establish a sort order */ 444 rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res)); 445 if (rc) 446 return rc; 447 448 struct cxl_root_decoder *cxlrd __free(put_cxlrd) = 449 cxl_root_decoder_alloc(root_port, ways); 450 451 if (IS_ERR(cxlrd)) 452 return PTR_ERR(cxlrd); 453 454 cxld = &cxlrd->cxlsd.cxld; 455 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); 456 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 457 cxld->hpa_range = (struct range) { 458 .start = cfmws->base_hpa, 459 .end = cfmws->base_hpa + cfmws->window_size - 1, 460 }; 461 cxld->interleave_ways = ways; 462 for (i = 0; i < ways; i++) 463 cxld->target_map[i] = cfmws->interleave_targets[i]; 464 /* 465 * Minimize the x1 granularity to advertise support for any 466 * valid region granularity 467 */ 468 if (ways == 1) 469 ig = CXL_DECODER_MIN_GRANULARITY; 470 cxld->interleave_granularity = ig; 471 472 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { 473 if (ways != 1 && ways != 3) { 474 cxims_ctx = (struct cxl_cxims_context) { 475 .dev = dev, 476 .cxlrd = cxlrd, 477 }; 478 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS, 479 cxl_parse_cxims, &cxims_ctx); 480 if (rc < 0) 481 return rc; 482 if (!cxlrd->platform_data) { 483 dev_err(dev, "No CXIMS for HBIG %u\n", ig); 484 return -EINVAL; 485 } 486 } 487 cxlrd->ops.hpa_to_spa = cxl_apply_xor_maps; 488 cxlrd->ops.spa_to_hpa = cxl_apply_xor_maps; 489 } 490 491 cxl_setup_extended_linear_cache(cxlrd); 492 493 cxlrd->qos_class = cfmws->qtg_id; 494 495 rc = cxl_decoder_add(cxld); 496 if (rc) 497 return rc; 498 499 rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd)); 500 if (rc) 501 return rc; 502 503 dev_dbg(root_port->dev.parent, "%s added to %s\n", 504 dev_name(&cxld->dev), dev_name(&root_port->dev)); 505 506 return 0; 507 } 508 509 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, 510 const unsigned long end) 511 { 512 struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header; 513 struct cxl_cfmws_context *ctx = arg; 514 struct device *dev = ctx->dev; 515 int rc; 516 517 rc = __cxl_parse_cfmws(cfmws, ctx); 518 if (rc) 519 dev_err(dev, 520 "Failed to add decode range: [%#llx - %#llx] (%d)\n", 521 cfmws->base_hpa, 522 cfmws->base_hpa + cfmws->window_size - 1, rc); 523 else 524 dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n", 525 phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa, 526 cfmws->base_hpa + cfmws->window_size - 1); 527 528 /* never fail cxl_acpi load for a single window failure */ 529 return 0; 530 } 531 532 __mock struct acpi_device *to_cxl_host_bridge(struct device *host, 533 struct device *dev) 534 { 535 struct acpi_device *adev = to_acpi_device(dev); 536 537 if (!acpi_pci_find_root(adev->handle)) 538 return NULL; 539 540 if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) 541 return adev; 542 return NULL; 543 } 544 545 /* Note, @dev is used by mock_acpi_table_parse_cedt() */ 546 struct cxl_chbs_context { 547 struct device *dev; 548 unsigned long long uid; 549 resource_size_t base; 550 u32 cxl_version; 551 int nr_versions; 552 u32 saved_version; 553 }; 554 555 static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg, 556 const unsigned long end) 557 { 558 struct cxl_chbs_context *ctx = arg; 559 struct acpi_cedt_chbs *chbs; 560 561 chbs = (struct acpi_cedt_chbs *) header; 562 563 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 && 564 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11) 565 return 0; 566 567 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 && 568 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20) 569 return 0; 570 571 if (!chbs->base) 572 return 0; 573 574 if (ctx->saved_version != chbs->cxl_version) { 575 /* 576 * cxl_version cannot be overwritten before the next two 577 * checks, then use saved_version 578 */ 579 ctx->saved_version = chbs->cxl_version; 580 ctx->nr_versions++; 581 } 582 583 if (ctx->base != CXL_RESOURCE_NONE) 584 return 0; 585 586 if (ctx->uid != chbs->uid) 587 return 0; 588 589 ctx->cxl_version = chbs->cxl_version; 590 ctx->base = chbs->base; 591 592 return 0; 593 } 594 595 static int cxl_get_chbs(struct device *dev, struct acpi_device *hb, 596 struct cxl_chbs_context *ctx) 597 { 598 unsigned long long uid; 599 int rc; 600 601 rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid); 602 if (rc != AE_OK) { 603 dev_err(dev, "unable to retrieve _UID\n"); 604 return -ENOENT; 605 } 606 607 dev_dbg(dev, "UID found: %lld\n", uid); 608 *ctx = (struct cxl_chbs_context) { 609 .dev = dev, 610 .uid = uid, 611 .base = CXL_RESOURCE_NONE, 612 .cxl_version = UINT_MAX, 613 .saved_version = UINT_MAX, 614 }; 615 616 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbs_iter, ctx); 617 618 if (ctx->nr_versions > 1) { 619 /* 620 * Disclaim eRCD support given some component register may 621 * only be found via CHBCR 622 */ 623 dev_info(dev, "Unsupported platform config, mixed Virtual Host and Restricted CXL Host hierarchy."); 624 } 625 626 return 0; 627 } 628 629 static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport) 630 { 631 struct acpi_device *hb = to_cxl_host_bridge(NULL, dev); 632 u32 uid; 633 634 if (kstrtou32(acpi_device_uid(hb), 0, &uid)) 635 return -EINVAL; 636 637 return acpi_get_genport_coordinates(uid, dport->coord); 638 } 639 640 static int add_host_bridge_dport(struct device *match, void *arg) 641 { 642 int ret; 643 acpi_status rc; 644 struct device *bridge; 645 struct cxl_dport *dport; 646 struct cxl_chbs_context ctx; 647 struct acpi_pci_root *pci_root; 648 struct cxl_port *root_port = arg; 649 struct device *host = root_port->dev.parent; 650 struct acpi_device *hb = to_cxl_host_bridge(host, match); 651 652 if (!hb) 653 return 0; 654 655 rc = cxl_get_chbs(match, hb, &ctx); 656 if (rc) 657 return rc; 658 659 if (ctx.cxl_version == UINT_MAX) { 660 dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n", 661 ctx.uid); 662 return 0; 663 } 664 665 if (ctx.base == CXL_RESOURCE_NONE) { 666 dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n", 667 ctx.uid); 668 return 0; 669 } 670 671 pci_root = acpi_pci_find_root(hb->handle); 672 bridge = pci_root->bus->bridge; 673 674 /* 675 * In RCH mode, bind the component regs base to the dport. In 676 * VH mode it will be bound to the CXL host bridge's port 677 * object later in add_host_bridge_uport(). 678 */ 679 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) { 680 dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid, 681 &ctx.base); 682 dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid, 683 ctx.base); 684 } else { 685 dport = devm_cxl_add_dport(root_port, bridge, ctx.uid, 686 CXL_RESOURCE_NONE); 687 } 688 689 if (IS_ERR(dport)) 690 return PTR_ERR(dport); 691 692 ret = get_genport_coordinates(match, dport); 693 if (ret) 694 dev_dbg(match, "Failed to get generic port perf coordinates.\n"); 695 696 return 0; 697 } 698 699 /* 700 * A host bridge is a dport to a CFMWS decode and it is a uport to the 701 * dport (PCIe Root Ports) in the host bridge. 702 */ 703 static int add_host_bridge_uport(struct device *match, void *arg) 704 { 705 struct cxl_port *root_port = arg; 706 struct device *host = root_port->dev.parent; 707 struct acpi_device *hb = to_cxl_host_bridge(host, match); 708 struct acpi_pci_root *pci_root; 709 struct cxl_dport *dport; 710 struct cxl_port *port; 711 struct device *bridge; 712 struct cxl_chbs_context ctx; 713 resource_size_t component_reg_phys; 714 int rc; 715 716 if (!hb) 717 return 0; 718 719 pci_root = acpi_pci_find_root(hb->handle); 720 bridge = pci_root->bus->bridge; 721 dport = cxl_find_dport_by_dev(root_port, bridge); 722 if (!dport) { 723 dev_dbg(host, "host bridge expected and not found\n"); 724 return 0; 725 } 726 727 if (dport->rch) { 728 dev_info(bridge, "host supports CXL (restricted)\n"); 729 return 0; 730 } 731 732 rc = cxl_get_chbs(match, hb, &ctx); 733 if (rc) 734 return rc; 735 736 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) { 737 dev_warn(bridge, 738 "CXL CHBS version mismatch, skip port registration\n"); 739 return 0; 740 } 741 742 component_reg_phys = ctx.base; 743 if (component_reg_phys != CXL_RESOURCE_NONE) 744 dev_dbg(match, "CHBCR found for UID %lld: %pa\n", 745 ctx.uid, &component_reg_phys); 746 747 rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus); 748 if (rc) 749 return rc; 750 751 port = devm_cxl_add_port(host, bridge, component_reg_phys, dport); 752 if (IS_ERR(port)) 753 return PTR_ERR(port); 754 755 dev_info(bridge, "host supports CXL\n"); 756 757 return 0; 758 } 759 760 static int add_root_nvdimm_bridge(struct device *match, void *data) 761 { 762 struct cxl_decoder *cxld; 763 struct cxl_port *root_port = data; 764 struct cxl_nvdimm_bridge *cxl_nvb; 765 struct device *host = root_port->dev.parent; 766 767 if (!is_root_decoder(match)) 768 return 0; 769 770 cxld = to_cxl_decoder(match); 771 if (!(cxld->flags & CXL_DECODER_F_PMEM)) 772 return 0; 773 774 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port); 775 if (IS_ERR(cxl_nvb)) { 776 dev_dbg(host, "failed to register pmem\n"); 777 return PTR_ERR(cxl_nvb); 778 } 779 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev), 780 dev_name(&cxl_nvb->dev)); 781 return 1; 782 } 783 784 static struct lock_class_key cxl_root_key; 785 786 static void cxl_acpi_lock_reset_class(void *dev) 787 { 788 device_lock_reset_class(dev); 789 } 790 791 static void cxl_set_public_resource(struct resource *priv, struct resource *pub) 792 { 793 priv->desc = (unsigned long) pub; 794 } 795 796 static struct resource *cxl_get_public_resource(struct resource *priv) 797 { 798 return (struct resource *) priv->desc; 799 } 800 801 static void remove_cxl_resources(void *data) 802 { 803 struct resource *res, *next, *cxl = data; 804 805 for (res = cxl->child; res; res = next) { 806 struct resource *victim = cxl_get_public_resource(res); 807 808 next = res->sibling; 809 remove_resource(res); 810 811 if (victim) { 812 remove_resource(victim); 813 kfree(victim); 814 } 815 816 del_cxl_resource(res); 817 } 818 } 819 820 /** 821 * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource 822 * @cxl_res: A standalone resource tree where each CXL window is a sibling 823 * 824 * Walk each CXL window in @cxl_res and add it to iomem_resource potentially 825 * expanding its boundaries to ensure that any conflicting resources become 826 * children. If a window is expanded it may then conflict with a another window 827 * entry and require the window to be truncated or trimmed. Consider this 828 * situation:: 829 * 830 * |-- "CXL Window 0" --||----- "CXL Window 1" -----| 831 * |--------------- "System RAM" -------------| 832 * 833 * ...where platform firmware has established as System RAM resource across 2 834 * windows, but has left some portion of window 1 for dynamic CXL region 835 * provisioning. In this case "Window 0" will span the entirety of the "System 836 * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end 837 * of that "System RAM" resource. 838 */ 839 static int add_cxl_resources(struct resource *cxl_res) 840 { 841 struct resource *res, *new, *next; 842 843 for (res = cxl_res->child; res; res = next) { 844 new = kzalloc(sizeof(*new), GFP_KERNEL); 845 if (!new) 846 return -ENOMEM; 847 new->name = res->name; 848 new->start = res->start; 849 new->end = res->end; 850 new->flags = IORESOURCE_MEM; 851 new->desc = IORES_DESC_CXL; 852 853 /* 854 * Record the public resource in the private cxl_res tree for 855 * later removal. 856 */ 857 cxl_set_public_resource(res, new); 858 859 insert_resource_expand_to_fit(&iomem_resource, new); 860 861 next = res->sibling; 862 while (next && resource_overlaps(new, next)) { 863 if (resource_contains(new, next)) { 864 struct resource *_next = next->sibling; 865 866 remove_resource(next); 867 del_cxl_resource(next); 868 next = _next; 869 } else 870 next->start = new->end + 1; 871 } 872 } 873 return 0; 874 } 875 876 static int pair_cxl_resource(struct device *dev, void *data) 877 { 878 struct resource *cxl_res = data; 879 struct resource *p; 880 881 if (!is_root_decoder(dev)) 882 return 0; 883 884 for (p = cxl_res->child; p; p = p->sibling) { 885 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 886 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 887 struct resource res = { 888 .start = cxld->hpa_range.start, 889 .end = cxld->hpa_range.end, 890 .flags = IORESOURCE_MEM, 891 }; 892 893 if (resource_contains(p, &res)) { 894 cxlrd->res = cxl_get_public_resource(p); 895 break; 896 } 897 } 898 899 return 0; 900 } 901 902 static int cxl_acpi_probe(struct platform_device *pdev) 903 { 904 int rc; 905 struct resource *cxl_res; 906 struct cxl_root *cxl_root; 907 struct cxl_port *root_port; 908 struct device *host = &pdev->dev; 909 struct acpi_device *adev = ACPI_COMPANION(host); 910 struct cxl_cfmws_context ctx; 911 912 device_lock_set_class(&pdev->dev, &cxl_root_key); 913 rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class, 914 &pdev->dev); 915 if (rc) 916 return rc; 917 918 cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL); 919 if (!cxl_res) 920 return -ENOMEM; 921 cxl_res->name = "CXL mem"; 922 cxl_res->start = 0; 923 cxl_res->end = -1; 924 cxl_res->flags = IORESOURCE_MEM; 925 926 cxl_root = devm_cxl_add_root(host, &acpi_root_ops); 927 if (IS_ERR(cxl_root)) 928 return PTR_ERR(cxl_root); 929 root_port = &cxl_root->port; 930 931 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 932 add_host_bridge_dport); 933 if (rc < 0) 934 return rc; 935 936 rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res); 937 if (rc) 938 return rc; 939 940 ctx = (struct cxl_cfmws_context) { 941 .dev = host, 942 .root_port = root_port, 943 .cxl_res = cxl_res, 944 }; 945 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx); 946 if (rc < 0) 947 return -ENXIO; 948 949 rc = add_cxl_resources(cxl_res); 950 if (rc) 951 return rc; 952 953 /* 954 * Populate the root decoders with their related iomem resource, 955 * if present 956 */ 957 device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource); 958 959 /* 960 * Root level scanned with host-bridge as dports, now scan host-bridges 961 * for their role as CXL uports to their CXL-capable PCIe Root Ports. 962 */ 963 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 964 add_host_bridge_uport); 965 if (rc < 0) 966 return rc; 967 968 if (IS_ENABLED(CONFIG_CXL_PMEM)) 969 rc = device_for_each_child(&root_port->dev, root_port, 970 add_root_nvdimm_bridge); 971 if (rc < 0) 972 return rc; 973 974 /* In case PCI is scanned before ACPI re-trigger memdev attach */ 975 cxl_bus_rescan(); 976 return 0; 977 } 978 979 static const struct acpi_device_id cxl_acpi_ids[] = { 980 { "ACPI0017" }, 981 { }, 982 }; 983 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids); 984 985 static const struct platform_device_id cxl_test_ids[] = { 986 { "cxl_acpi" }, 987 { }, 988 }; 989 MODULE_DEVICE_TABLE(platform, cxl_test_ids); 990 991 static struct platform_driver cxl_acpi_driver = { 992 .probe = cxl_acpi_probe, 993 .driver = { 994 .name = KBUILD_MODNAME, 995 .acpi_match_table = cxl_acpi_ids, 996 }, 997 .id_table = cxl_test_ids, 998 }; 999 1000 static int __init cxl_acpi_init(void) 1001 { 1002 return platform_driver_register(&cxl_acpi_driver); 1003 } 1004 1005 static void __exit cxl_acpi_exit(void) 1006 { 1007 platform_driver_unregister(&cxl_acpi_driver); 1008 cxl_bus_drain(); 1009 } 1010 1011 /* load before dax_hmem sees 'Soft Reserved' CXL ranges */ 1012 subsys_initcall(cxl_acpi_init); 1013 1014 /* 1015 * Arrange for host-bridge ports to be active synchronous with 1016 * cxl_acpi_probe() exit. 1017 */ 1018 MODULE_SOFTDEP("pre: cxl_port"); 1019 1020 module_exit(cxl_acpi_exit); 1021 MODULE_DESCRIPTION("CXL ACPI: Platform Support"); 1022 MODULE_LICENSE("GPL v2"); 1023 MODULE_IMPORT_NS("CXL"); 1024 MODULE_IMPORT_NS("ACPI"); 1025