1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright(c) 2021 Intel Corporation. All rights reserved. 3 4 #include <linux/platform_device.h> 5 #include <linux/memory_hotplug.h> 6 #include <linux/genalloc.h> 7 #include <linux/module.h> 8 #include <linux/mutex.h> 9 #include <linux/acpi.h> 10 #include <linux/pci.h> 11 #include <linux/mm.h> 12 #include <cxlmem.h> 13 14 #include "../watermark.h" 15 #include "mock.h" 16 17 static int interleave_arithmetic; 18 19 #define FAKE_QTG_ID 42 20 21 #define NR_CXL_HOST_BRIDGES 2 22 #define NR_CXL_SINGLE_HOST 1 23 #define NR_CXL_RCH 1 24 #define NR_CXL_ROOT_PORTS 2 25 #define NR_CXL_SWITCH_PORTS 2 26 #define NR_CXL_PORT_DECODERS 8 27 #define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH) 28 29 static struct platform_device *cxl_acpi; 30 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES]; 31 #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS) 32 static struct platform_device *cxl_root_port[NR_MULTI_ROOT]; 33 static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT]; 34 #define NR_MEM_MULTI \ 35 (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS) 36 static struct platform_device *cxl_switch_dport[NR_MEM_MULTI]; 37 38 static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST]; 39 static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST]; 40 static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST]; 41 #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS) 42 static struct platform_device *cxl_swd_single[NR_MEM_SINGLE]; 43 44 struct platform_device *cxl_mem[NR_MEM_MULTI]; 45 struct platform_device *cxl_mem_single[NR_MEM_SINGLE]; 46 47 static struct platform_device *cxl_rch[NR_CXL_RCH]; 48 static struct platform_device *cxl_rcd[NR_CXL_RCH]; 49 50 static inline bool is_multi_bridge(struct device *dev) 51 { 52 int i; 53 54 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 55 if (&cxl_host_bridge[i]->dev == dev) 56 return true; 57 return false; 58 } 59 60 static inline bool is_single_bridge(struct device *dev) 61 { 62 int i; 63 64 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 65 if (&cxl_hb_single[i]->dev == dev) 66 return true; 67 return false; 68 } 69 70 static struct acpi_device acpi0017_mock; 71 static struct acpi_device host_bridge[NR_BRIDGES] = { 72 [0] = { 73 .handle = &host_bridge[0], 74 .pnp.unique_id = "0", 75 }, 76 [1] = { 77 .handle = &host_bridge[1], 78 .pnp.unique_id = "1", 79 }, 80 [2] = { 81 .handle = &host_bridge[2], 82 .pnp.unique_id = "2", 83 }, 84 [3] = { 85 .handle = &host_bridge[3], 86 .pnp.unique_id = "3", 87 }, 88 }; 89 90 static bool is_mock_dev(struct device *dev) 91 { 92 int i; 93 94 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) 95 if (dev == &cxl_mem[i]->dev) 96 return true; 97 for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) 98 if (dev == &cxl_mem_single[i]->dev) 99 return true; 100 for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) 101 if (dev == &cxl_rcd[i]->dev) 102 return true; 103 if (dev == &cxl_acpi->dev) 104 return true; 105 return false; 106 } 107 108 static bool is_mock_adev(struct acpi_device *adev) 109 { 110 int i; 111 112 if (adev == &acpi0017_mock) 113 return true; 114 115 for (i = 0; i < ARRAY_SIZE(host_bridge); i++) 116 if (adev == &host_bridge[i]) 117 return true; 118 119 return false; 120 } 121 122 static struct { 123 struct acpi_table_cedt cedt; 124 struct acpi_cedt_chbs chbs[NR_BRIDGES]; 125 struct { 126 struct acpi_cedt_cfmws cfmws; 127 u32 target[1]; 128 } cfmws0; 129 struct { 130 struct acpi_cedt_cfmws cfmws; 131 u32 target[2]; 132 } cfmws1; 133 struct { 134 struct acpi_cedt_cfmws cfmws; 135 u32 target[1]; 136 } cfmws2; 137 struct { 138 struct acpi_cedt_cfmws cfmws; 139 u32 target[2]; 140 } cfmws3; 141 struct { 142 struct acpi_cedt_cfmws cfmws; 143 u32 target[1]; 144 } cfmws4; 145 struct { 146 struct acpi_cedt_cfmws cfmws; 147 u32 target[1]; 148 } cfmws5; 149 struct { 150 struct acpi_cedt_cfmws cfmws; 151 u32 target[1]; 152 } cfmws6; 153 struct { 154 struct acpi_cedt_cfmws cfmws; 155 u32 target[2]; 156 } cfmws7; 157 struct { 158 struct acpi_cedt_cfmws cfmws; 159 u32 target[3]; 160 } cfmws8; 161 struct { 162 struct acpi_cedt_cxims cxims; 163 u64 xormap_list[2]; 164 } cxims0; 165 } __packed mock_cedt = { 166 .cedt = { 167 .header = { 168 .signature = "CEDT", 169 .length = sizeof(mock_cedt), 170 .revision = 1, 171 }, 172 }, 173 .chbs[0] = { 174 .header = { 175 .type = ACPI_CEDT_TYPE_CHBS, 176 .length = sizeof(mock_cedt.chbs[0]), 177 }, 178 .uid = 0, 179 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 180 }, 181 .chbs[1] = { 182 .header = { 183 .type = ACPI_CEDT_TYPE_CHBS, 184 .length = sizeof(mock_cedt.chbs[0]), 185 }, 186 .uid = 1, 187 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 188 }, 189 .chbs[2] = { 190 .header = { 191 .type = ACPI_CEDT_TYPE_CHBS, 192 .length = sizeof(mock_cedt.chbs[0]), 193 }, 194 .uid = 2, 195 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 196 }, 197 .chbs[3] = { 198 .header = { 199 .type = ACPI_CEDT_TYPE_CHBS, 200 .length = sizeof(mock_cedt.chbs[0]), 201 }, 202 .uid = 3, 203 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11, 204 }, 205 .cfmws0 = { 206 .cfmws = { 207 .header = { 208 .type = ACPI_CEDT_TYPE_CFMWS, 209 .length = sizeof(mock_cedt.cfmws0), 210 }, 211 .interleave_ways = 0, 212 .granularity = 4, 213 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 214 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE, 215 .qtg_id = FAKE_QTG_ID, 216 .window_size = SZ_256M * 4UL, 217 }, 218 .target = { 0 }, 219 }, 220 .cfmws1 = { 221 .cfmws = { 222 .header = { 223 .type = ACPI_CEDT_TYPE_CFMWS, 224 .length = sizeof(mock_cedt.cfmws1), 225 }, 226 .interleave_ways = 1, 227 .granularity = 4, 228 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 229 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE, 230 .qtg_id = FAKE_QTG_ID, 231 .window_size = SZ_256M * 8UL, 232 }, 233 .target = { 0, 1, }, 234 }, 235 .cfmws2 = { 236 .cfmws = { 237 .header = { 238 .type = ACPI_CEDT_TYPE_CFMWS, 239 .length = sizeof(mock_cedt.cfmws2), 240 }, 241 .interleave_ways = 0, 242 .granularity = 4, 243 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 244 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 245 .qtg_id = FAKE_QTG_ID, 246 .window_size = SZ_256M * 4UL, 247 }, 248 .target = { 0 }, 249 }, 250 .cfmws3 = { 251 .cfmws = { 252 .header = { 253 .type = ACPI_CEDT_TYPE_CFMWS, 254 .length = sizeof(mock_cedt.cfmws3), 255 }, 256 .interleave_ways = 1, 257 .granularity = 4, 258 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 259 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 260 .qtg_id = FAKE_QTG_ID, 261 .window_size = SZ_256M * 8UL, 262 }, 263 .target = { 0, 1, }, 264 }, 265 .cfmws4 = { 266 .cfmws = { 267 .header = { 268 .type = ACPI_CEDT_TYPE_CFMWS, 269 .length = sizeof(mock_cedt.cfmws4), 270 }, 271 .interleave_ways = 0, 272 .granularity = 4, 273 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 274 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 275 .qtg_id = FAKE_QTG_ID, 276 .window_size = SZ_256M * 4UL, 277 }, 278 .target = { 2 }, 279 }, 280 .cfmws5 = { 281 .cfmws = { 282 .header = { 283 .type = ACPI_CEDT_TYPE_CFMWS, 284 .length = sizeof(mock_cedt.cfmws5), 285 }, 286 .interleave_ways = 0, 287 .granularity = 4, 288 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 289 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE, 290 .qtg_id = FAKE_QTG_ID, 291 .window_size = SZ_256M, 292 }, 293 .target = { 3 }, 294 }, 295 /* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */ 296 .cfmws6 = { 297 .cfmws = { 298 .header = { 299 .type = ACPI_CEDT_TYPE_CFMWS, 300 .length = sizeof(mock_cedt.cfmws6), 301 }, 302 .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR, 303 .interleave_ways = 0, 304 .granularity = 4, 305 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 306 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 307 .qtg_id = FAKE_QTG_ID, 308 .window_size = SZ_256M * 8UL, 309 }, 310 .target = { 0, }, 311 }, 312 .cfmws7 = { 313 .cfmws = { 314 .header = { 315 .type = ACPI_CEDT_TYPE_CFMWS, 316 .length = sizeof(mock_cedt.cfmws7), 317 }, 318 .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR, 319 .interleave_ways = 1, 320 .granularity = 0, 321 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 322 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 323 .qtg_id = FAKE_QTG_ID, 324 .window_size = SZ_256M * 8UL, 325 }, 326 .target = { 0, 1, }, 327 }, 328 .cfmws8 = { 329 .cfmws = { 330 .header = { 331 .type = ACPI_CEDT_TYPE_CFMWS, 332 .length = sizeof(mock_cedt.cfmws8), 333 }, 334 .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR, 335 .interleave_ways = 8, 336 .granularity = 1, 337 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 338 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 339 .qtg_id = FAKE_QTG_ID, 340 .window_size = SZ_512M * 6UL, 341 }, 342 .target = { 0, 1, 2, }, 343 }, 344 .cxims0 = { 345 .cxims = { 346 .header = { 347 .type = ACPI_CEDT_TYPE_CXIMS, 348 .length = sizeof(mock_cedt.cxims0), 349 }, 350 .hbig = 0, 351 .nr_xormaps = 2, 352 }, 353 .xormap_list = { 0x404100, 0x808200, }, 354 }, 355 }; 356 357 struct acpi_cedt_cfmws *mock_cfmws[] = { 358 [0] = &mock_cedt.cfmws0.cfmws, 359 [1] = &mock_cedt.cfmws1.cfmws, 360 [2] = &mock_cedt.cfmws2.cfmws, 361 [3] = &mock_cedt.cfmws3.cfmws, 362 [4] = &mock_cedt.cfmws4.cfmws, 363 [5] = &mock_cedt.cfmws5.cfmws, 364 /* Modulo Math above, XOR Math below */ 365 [6] = &mock_cedt.cfmws6.cfmws, 366 [7] = &mock_cedt.cfmws7.cfmws, 367 [8] = &mock_cedt.cfmws8.cfmws, 368 }; 369 370 static int cfmws_start; 371 static int cfmws_end; 372 #define CFMWS_MOD_ARRAY_START 0 373 #define CFMWS_MOD_ARRAY_END 5 374 #define CFMWS_XOR_ARRAY_START 6 375 #define CFMWS_XOR_ARRAY_END 8 376 377 struct acpi_cedt_cxims *mock_cxims[1] = { 378 [0] = &mock_cedt.cxims0.cxims, 379 }; 380 381 struct cxl_mock_res { 382 struct list_head list; 383 struct range range; 384 }; 385 386 static LIST_HEAD(mock_res); 387 static DEFINE_MUTEX(mock_res_lock); 388 static struct gen_pool *cxl_mock_pool; 389 390 static void depopulate_all_mock_resources(void) 391 { 392 struct cxl_mock_res *res, *_res; 393 394 mutex_lock(&mock_res_lock); 395 list_for_each_entry_safe(res, _res, &mock_res, list) { 396 gen_pool_free(cxl_mock_pool, res->range.start, 397 range_len(&res->range)); 398 list_del(&res->list); 399 kfree(res); 400 } 401 mutex_unlock(&mock_res_lock); 402 } 403 404 static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align) 405 { 406 struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL); 407 struct genpool_data_align data = { 408 .align = align, 409 }; 410 unsigned long phys; 411 412 INIT_LIST_HEAD(&res->list); 413 phys = gen_pool_alloc_algo(cxl_mock_pool, size, 414 gen_pool_first_fit_align, &data); 415 if (!phys) 416 return NULL; 417 418 res->range = (struct range) { 419 .start = phys, 420 .end = phys + size - 1, 421 }; 422 mutex_lock(&mock_res_lock); 423 list_add(&res->list, &mock_res); 424 mutex_unlock(&mock_res_lock); 425 426 return res; 427 } 428 429 static int populate_cedt(void) 430 { 431 struct cxl_mock_res *res; 432 int i; 433 434 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) { 435 struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i]; 436 resource_size_t size; 437 438 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20) 439 size = ACPI_CEDT_CHBS_LENGTH_CXL20; 440 else 441 size = ACPI_CEDT_CHBS_LENGTH_CXL11; 442 443 res = alloc_mock_res(size, size); 444 if (!res) 445 return -ENOMEM; 446 chbs->base = res->range.start; 447 chbs->length = size; 448 } 449 450 for (i = cfmws_start; i <= cfmws_end; i++) { 451 struct acpi_cedt_cfmws *window = mock_cfmws[i]; 452 453 res = alloc_mock_res(window->window_size, SZ_256M); 454 if (!res) 455 return -ENOMEM; 456 window->base_hpa = res->range.start; 457 } 458 459 return 0; 460 } 461 462 static bool is_mock_port(struct device *dev); 463 464 /* 465 * WARNING, this hack assumes the format of 'struct cxl_cfmws_context' 466 * and 'struct cxl_chbs_context' share the property that the first 467 * struct member is a cxl_test device being probed by the cxl_acpi 468 * driver. 469 */ 470 struct cxl_cedt_context { 471 struct device *dev; 472 }; 473 474 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id, 475 acpi_tbl_entry_handler_arg handler_arg, 476 void *arg) 477 { 478 struct cxl_cedt_context *ctx = arg; 479 struct device *dev = ctx->dev; 480 union acpi_subtable_headers *h; 481 unsigned long end; 482 int i; 483 484 if (!is_mock_port(dev) && !is_mock_dev(dev)) 485 return acpi_table_parse_cedt(id, handler_arg, arg); 486 487 if (id == ACPI_CEDT_TYPE_CHBS) 488 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) { 489 h = (union acpi_subtable_headers *)&mock_cedt.chbs[i]; 490 end = (unsigned long)&mock_cedt.chbs[i + 1]; 491 handler_arg(h, arg, end); 492 } 493 494 if (id == ACPI_CEDT_TYPE_CFMWS) 495 for (i = cfmws_start; i <= cfmws_end; i++) { 496 h = (union acpi_subtable_headers *) mock_cfmws[i]; 497 end = (unsigned long) h + mock_cfmws[i]->header.length; 498 handler_arg(h, arg, end); 499 } 500 501 if (id == ACPI_CEDT_TYPE_CXIMS) 502 for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) { 503 h = (union acpi_subtable_headers *)mock_cxims[i]; 504 end = (unsigned long)h + mock_cxims[i]->header.length; 505 handler_arg(h, arg, end); 506 } 507 508 return 0; 509 } 510 511 static bool is_mock_bridge(struct device *dev) 512 { 513 int i; 514 515 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 516 if (dev == &cxl_host_bridge[i]->dev) 517 return true; 518 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 519 if (dev == &cxl_hb_single[i]->dev) 520 return true; 521 for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) 522 if (dev == &cxl_rch[i]->dev) 523 return true; 524 525 return false; 526 } 527 528 static bool is_mock_port(struct device *dev) 529 { 530 int i; 531 532 if (is_mock_bridge(dev)) 533 return true; 534 535 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) 536 if (dev == &cxl_root_port[i]->dev) 537 return true; 538 539 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) 540 if (dev == &cxl_switch_uport[i]->dev) 541 return true; 542 543 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) 544 if (dev == &cxl_switch_dport[i]->dev) 545 return true; 546 547 for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) 548 if (dev == &cxl_root_single[i]->dev) 549 return true; 550 551 for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) 552 if (dev == &cxl_swu_single[i]->dev) 553 return true; 554 555 for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) 556 if (dev == &cxl_swd_single[i]->dev) 557 return true; 558 559 if (is_cxl_memdev(dev)) 560 return is_mock_dev(dev->parent); 561 562 return false; 563 } 564 565 static int host_bridge_index(struct acpi_device *adev) 566 { 567 return adev - host_bridge; 568 } 569 570 static struct acpi_device *find_host_bridge(acpi_handle handle) 571 { 572 int i; 573 574 for (i = 0; i < ARRAY_SIZE(host_bridge); i++) 575 if (handle == host_bridge[i].handle) 576 return &host_bridge[i]; 577 return NULL; 578 } 579 580 static acpi_status 581 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, 582 struct acpi_object_list *arguments, 583 unsigned long long *data) 584 { 585 struct acpi_device *adev = find_host_bridge(handle); 586 587 if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0) 588 return acpi_evaluate_integer(handle, pathname, arguments, data); 589 590 *data = host_bridge_index(adev); 591 return AE_OK; 592 } 593 594 static struct pci_bus mock_pci_bus[NR_BRIDGES]; 595 static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = { 596 [0] = { 597 .bus = &mock_pci_bus[0], 598 }, 599 [1] = { 600 .bus = &mock_pci_bus[1], 601 }, 602 [2] = { 603 .bus = &mock_pci_bus[2], 604 }, 605 [3] = { 606 .bus = &mock_pci_bus[3], 607 }, 608 609 }; 610 611 static bool is_mock_bus(struct pci_bus *bus) 612 { 613 int i; 614 615 for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++) 616 if (bus == &mock_pci_bus[i]) 617 return true; 618 return false; 619 } 620 621 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle) 622 { 623 struct acpi_device *adev = find_host_bridge(handle); 624 625 if (!adev) 626 return acpi_pci_find_root(handle); 627 return &mock_pci_root[host_bridge_index(adev)]; 628 } 629 630 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port, 631 struct cxl_endpoint_dvsec_info *info) 632 { 633 struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL); 634 struct device *dev = &port->dev; 635 636 if (!cxlhdm) 637 return ERR_PTR(-ENOMEM); 638 639 cxlhdm->port = port; 640 cxlhdm->interleave_mask = ~0U; 641 cxlhdm->iw_cap_mask = ~0UL; 642 dev_set_drvdata(dev, cxlhdm); 643 return cxlhdm; 644 } 645 646 struct target_map_ctx { 647 u32 *target_map; 648 int index; 649 int target_count; 650 }; 651 652 static int map_targets(struct device *dev, void *data) 653 { 654 struct platform_device *pdev = to_platform_device(dev); 655 struct target_map_ctx *ctx = data; 656 657 ctx->target_map[ctx->index++] = pdev->id; 658 659 if (ctx->index > ctx->target_count) { 660 dev_WARN_ONCE(dev, 1, "too many targets found?\n"); 661 return -ENXIO; 662 } 663 664 return 0; 665 } 666 667 static int mock_decoder_commit(struct cxl_decoder *cxld) 668 { 669 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 670 int id = cxld->id; 671 672 if (cxld->flags & CXL_DECODER_F_ENABLE) 673 return 0; 674 675 dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev)); 676 if (cxl_num_decoders_committed(port) != id) { 677 dev_dbg(&port->dev, 678 "%s: out of order commit, expected decoder%d.%d\n", 679 dev_name(&cxld->dev), port->id, 680 cxl_num_decoders_committed(port)); 681 return -EBUSY; 682 } 683 684 port->commit_end++; 685 cxld->flags |= CXL_DECODER_F_ENABLE; 686 687 return 0; 688 } 689 690 static void mock_decoder_reset(struct cxl_decoder *cxld) 691 { 692 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 693 int id = cxld->id; 694 695 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 696 return; 697 698 dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev)); 699 if (port->commit_end == id) 700 cxl_port_commit_reap(cxld); 701 else 702 dev_dbg(&port->dev, 703 "%s: out of order reset, expected decoder%d.%d\n", 704 dev_name(&cxld->dev), port->id, port->commit_end); 705 cxld->flags &= ~CXL_DECODER_F_ENABLE; 706 } 707 708 static void default_mock_decoder(struct cxl_decoder *cxld) 709 { 710 cxld->hpa_range = (struct range){ 711 .start = 0, 712 .end = -1, 713 }; 714 715 cxld->interleave_ways = 1; 716 cxld->interleave_granularity = 256; 717 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 718 cxld->commit = mock_decoder_commit; 719 cxld->reset = mock_decoder_reset; 720 } 721 722 static int first_decoder(struct device *dev, const void *data) 723 { 724 struct cxl_decoder *cxld; 725 726 if (!is_switch_decoder(dev)) 727 return 0; 728 cxld = to_cxl_decoder(dev); 729 if (cxld->id == 0) 730 return 1; 731 return 0; 732 } 733 734 static void mock_init_hdm_decoder(struct cxl_decoder *cxld) 735 { 736 struct acpi_cedt_cfmws *window = mock_cfmws[0]; 737 struct platform_device *pdev = NULL; 738 struct cxl_endpoint_decoder *cxled; 739 struct cxl_switch_decoder *cxlsd; 740 struct cxl_port *port, *iter; 741 const int size = SZ_512M; 742 struct cxl_memdev *cxlmd; 743 struct cxl_dport *dport; 744 struct device *dev; 745 bool hb0 = false; 746 u64 base; 747 int i; 748 749 if (is_endpoint_decoder(&cxld->dev)) { 750 cxled = to_cxl_endpoint_decoder(&cxld->dev); 751 cxlmd = cxled_to_memdev(cxled); 752 WARN_ON(!dev_is_platform(cxlmd->dev.parent)); 753 pdev = to_platform_device(cxlmd->dev.parent); 754 755 /* check is endpoint is attach to host-bridge0 */ 756 port = cxled_to_port(cxled); 757 do { 758 if (port->uport_dev == &cxl_host_bridge[0]->dev) { 759 hb0 = true; 760 break; 761 } 762 if (is_cxl_port(port->dev.parent)) 763 port = to_cxl_port(port->dev.parent); 764 else 765 port = NULL; 766 } while (port); 767 port = cxled_to_port(cxled); 768 } 769 770 /* 771 * The first decoder on the first 2 devices on the first switch 772 * attached to host-bridge0 mock a fake / static RAM region. All 773 * other decoders are default disabled. Given the round robin 774 * assignment those devices are named cxl_mem.0, and cxl_mem.4. 775 * 776 * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4' 777 */ 778 if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) { 779 default_mock_decoder(cxld); 780 return; 781 } 782 783 base = window->base_hpa; 784 cxld->hpa_range = (struct range) { 785 .start = base, 786 .end = base + size - 1, 787 }; 788 789 cxld->interleave_ways = 2; 790 eig_to_granularity(window->granularity, &cxld->interleave_granularity); 791 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 792 cxld->flags = CXL_DECODER_F_ENABLE; 793 cxled->state = CXL_DECODER_STATE_AUTO; 794 port->commit_end = cxld->id; 795 devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0); 796 cxld->commit = mock_decoder_commit; 797 cxld->reset = mock_decoder_reset; 798 799 /* 800 * Now that endpoint decoder is set up, walk up the hierarchy 801 * and setup the switch and root port decoders targeting @cxlmd. 802 */ 803 iter = port; 804 for (i = 0; i < 2; i++) { 805 dport = iter->parent_dport; 806 iter = dport->port; 807 dev = device_find_child(&iter->dev, NULL, first_decoder); 808 /* 809 * Ancestor ports are guaranteed to be enumerated before 810 * @port, and all ports have at least one decoder. 811 */ 812 if (WARN_ON(!dev)) 813 continue; 814 cxlsd = to_cxl_switch_decoder(dev); 815 if (i == 0) { 816 /* put cxl_mem.4 second in the decode order */ 817 if (pdev->id == 4) 818 cxlsd->target[1] = dport; 819 else 820 cxlsd->target[0] = dport; 821 } else 822 cxlsd->target[0] = dport; 823 cxld = &cxlsd->cxld; 824 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 825 cxld->flags = CXL_DECODER_F_ENABLE; 826 iter->commit_end = 0; 827 /* 828 * Switch targets 2 endpoints, while host bridge targets 829 * one root port 830 */ 831 if (i == 0) 832 cxld->interleave_ways = 2; 833 else 834 cxld->interleave_ways = 1; 835 cxld->interleave_granularity = 4096; 836 cxld->hpa_range = (struct range) { 837 .start = base, 838 .end = base + size - 1, 839 }; 840 put_device(dev); 841 } 842 } 843 844 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 845 struct cxl_endpoint_dvsec_info *info) 846 { 847 struct cxl_port *port = cxlhdm->port; 848 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); 849 int target_count, i; 850 851 if (is_cxl_endpoint(port)) 852 target_count = 0; 853 else if (is_cxl_root(parent_port)) 854 target_count = NR_CXL_ROOT_PORTS; 855 else 856 target_count = NR_CXL_SWITCH_PORTS; 857 858 for (i = 0; i < NR_CXL_PORT_DECODERS; i++) { 859 struct target_map_ctx ctx = { 860 .target_count = target_count, 861 }; 862 struct cxl_decoder *cxld; 863 int rc; 864 865 if (target_count) { 866 struct cxl_switch_decoder *cxlsd; 867 868 cxlsd = cxl_switch_decoder_alloc(port, target_count); 869 if (IS_ERR(cxlsd)) { 870 dev_warn(&port->dev, 871 "Failed to allocate the decoder\n"); 872 return PTR_ERR(cxlsd); 873 } 874 cxld = &cxlsd->cxld; 875 } else { 876 struct cxl_endpoint_decoder *cxled; 877 878 cxled = cxl_endpoint_decoder_alloc(port); 879 880 if (IS_ERR(cxled)) { 881 dev_warn(&port->dev, 882 "Failed to allocate the decoder\n"); 883 return PTR_ERR(cxled); 884 } 885 cxld = &cxled->cxld; 886 } 887 888 ctx.target_map = cxld->target_map; 889 890 mock_init_hdm_decoder(cxld); 891 892 if (target_count) { 893 rc = device_for_each_child(port->uport_dev, &ctx, 894 map_targets); 895 if (rc) { 896 put_device(&cxld->dev); 897 return rc; 898 } 899 } 900 901 rc = cxl_decoder_add_locked(cxld); 902 if (rc) { 903 put_device(&cxld->dev); 904 dev_err(&port->dev, "Failed to add decoder\n"); 905 return rc; 906 } 907 908 rc = cxl_decoder_autoremove(&port->dev, cxld); 909 if (rc) 910 return rc; 911 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); 912 } 913 914 return 0; 915 } 916 917 static int __mock_cxl_decoders_setup(struct cxl_port *port) 918 { 919 struct cxl_hdm *cxlhdm; 920 921 cxlhdm = mock_cxl_setup_hdm(port, NULL); 922 if (IS_ERR(cxlhdm)) { 923 if (PTR_ERR(cxlhdm) != -ENODEV) 924 dev_err(&port->dev, "Failed to map HDM decoder capability\n"); 925 return PTR_ERR(cxlhdm); 926 } 927 928 return mock_cxl_enumerate_decoders(cxlhdm, NULL); 929 } 930 931 static int mock_cxl_switch_port_decoders_setup(struct cxl_port *port) 932 { 933 if (is_cxl_root(port) || is_cxl_endpoint(port)) 934 return -EOPNOTSUPP; 935 936 return __mock_cxl_decoders_setup(port); 937 } 938 939 static int mock_cxl_endpoint_decoders_setup(struct cxl_port *port) 940 { 941 if (!is_cxl_endpoint(port)) 942 return -EOPNOTSUPP; 943 944 return __mock_cxl_decoders_setup(port); 945 } 946 947 static int get_port_array(struct cxl_port *port, 948 struct platform_device ***port_array, 949 int *port_array_size) 950 { 951 struct platform_device **array; 952 int array_size; 953 954 if (port->depth == 1) { 955 if (is_multi_bridge(port->uport_dev)) { 956 array_size = ARRAY_SIZE(cxl_root_port); 957 array = cxl_root_port; 958 } else if (is_single_bridge(port->uport_dev)) { 959 array_size = ARRAY_SIZE(cxl_root_single); 960 array = cxl_root_single; 961 } else { 962 dev_dbg(&port->dev, "%s: unknown bridge type\n", 963 dev_name(port->uport_dev)); 964 return -ENXIO; 965 } 966 } else if (port->depth == 2) { 967 struct cxl_port *parent = to_cxl_port(port->dev.parent); 968 969 if (is_multi_bridge(parent->uport_dev)) { 970 array_size = ARRAY_SIZE(cxl_switch_dport); 971 array = cxl_switch_dport; 972 } else if (is_single_bridge(parent->uport_dev)) { 973 array_size = ARRAY_SIZE(cxl_swd_single); 974 array = cxl_swd_single; 975 } else { 976 dev_dbg(&port->dev, "%s: unknown bridge type\n", 977 dev_name(port->uport_dev)); 978 return -ENXIO; 979 } 980 } else { 981 dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n", 982 port->depth); 983 return -ENXIO; 984 } 985 986 *port_array = array; 987 *port_array_size = array_size; 988 989 return 0; 990 } 991 992 static int mock_cxl_port_enumerate_dports(struct cxl_port *port) 993 { 994 struct platform_device **array; 995 int i, array_size; 996 int rc; 997 998 rc = get_port_array(port, &array, &array_size); 999 if (rc) 1000 return rc; 1001 1002 for (i = 0; i < array_size; i++) { 1003 struct platform_device *pdev = array[i]; 1004 struct cxl_dport *dport; 1005 1006 if (pdev->dev.parent != port->uport_dev) { 1007 dev_dbg(&port->dev, "%s: mismatch parent %s\n", 1008 dev_name(port->uport_dev), 1009 dev_name(pdev->dev.parent)); 1010 continue; 1011 } 1012 1013 dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id, 1014 CXL_RESOURCE_NONE); 1015 1016 if (IS_ERR(dport)) 1017 return PTR_ERR(dport); 1018 } 1019 1020 return 0; 1021 } 1022 1023 static struct cxl_dport *mock_cxl_add_dport_by_dev(struct cxl_port *port, 1024 struct device *dport_dev) 1025 { 1026 struct platform_device **array; 1027 int rc, i, array_size; 1028 1029 rc = get_port_array(port, &array, &array_size); 1030 if (rc) 1031 return ERR_PTR(rc); 1032 1033 for (i = 0; i < array_size; i++) { 1034 struct platform_device *pdev = array[i]; 1035 1036 if (pdev->dev.parent != port->uport_dev) { 1037 dev_dbg(&port->dev, "%s: mismatch parent %s\n", 1038 dev_name(port->uport_dev), 1039 dev_name(pdev->dev.parent)); 1040 continue; 1041 } 1042 1043 if (&pdev->dev != dport_dev) 1044 continue; 1045 1046 return devm_cxl_add_dport(port, &pdev->dev, pdev->id, 1047 CXL_RESOURCE_NONE); 1048 } 1049 1050 return ERR_PTR(-ENODEV); 1051 } 1052 1053 /* 1054 * Faking the cxl_dpa_perf for the memdev when appropriate. 1055 */ 1056 static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range, 1057 struct cxl_dpa_perf *dpa_perf) 1058 { 1059 dpa_perf->qos_class = FAKE_QTG_ID; 1060 dpa_perf->dpa_range = *range; 1061 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { 1062 dpa_perf->coord[i].read_latency = 500; 1063 dpa_perf->coord[i].write_latency = 500; 1064 dpa_perf->coord[i].read_bandwidth = 1000; 1065 dpa_perf->coord[i].write_bandwidth = 1000; 1066 } 1067 } 1068 1069 static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port) 1070 { 1071 struct cxl_root *cxl_root __free(put_cxl_root) = 1072 find_cxl_root(port); 1073 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); 1074 struct cxl_dev_state *cxlds = cxlmd->cxlds; 1075 struct access_coordinate ep_c[ACCESS_COORDINATE_MAX]; 1076 1077 if (!cxl_root) 1078 return; 1079 1080 for (int i = 0; i < cxlds->nr_partitions; i++) { 1081 struct resource *res = &cxlds->part[i].res; 1082 struct cxl_dpa_perf *perf = &cxlds->part[i].perf; 1083 struct range range = { 1084 .start = res->start, 1085 .end = res->end, 1086 }; 1087 1088 dpa_perf_setup(port, &range, perf); 1089 } 1090 1091 cxl_memdev_update_perf(cxlmd); 1092 1093 /* 1094 * This function is here to only test the topology iterator. It serves 1095 * no other purpose. 1096 */ 1097 cxl_endpoint_get_perf_coordinates(port, ep_c); 1098 } 1099 1100 static struct cxl_mock_ops cxl_mock_ops = { 1101 .is_mock_adev = is_mock_adev, 1102 .is_mock_bridge = is_mock_bridge, 1103 .is_mock_bus = is_mock_bus, 1104 .is_mock_port = is_mock_port, 1105 .is_mock_dev = is_mock_dev, 1106 .acpi_table_parse_cedt = mock_acpi_table_parse_cedt, 1107 .acpi_evaluate_integer = mock_acpi_evaluate_integer, 1108 .acpi_pci_find_root = mock_acpi_pci_find_root, 1109 .devm_cxl_switch_port_decoders_setup = mock_cxl_switch_port_decoders_setup, 1110 .devm_cxl_endpoint_decoders_setup = mock_cxl_endpoint_decoders_setup, 1111 .devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports, 1112 .cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat, 1113 .devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev, 1114 .list = LIST_HEAD_INIT(cxl_mock_ops.list), 1115 }; 1116 1117 static void mock_companion(struct acpi_device *adev, struct device *dev) 1118 { 1119 device_initialize(&adev->dev); 1120 fwnode_init(&adev->fwnode, NULL); 1121 dev->fwnode = &adev->fwnode; 1122 adev->fwnode.dev = dev; 1123 } 1124 1125 #ifndef SZ_64G 1126 #define SZ_64G (SZ_32G * 2) 1127 #endif 1128 1129 static __init int cxl_rch_topo_init(void) 1130 { 1131 int rc, i; 1132 1133 for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) { 1134 int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i; 1135 struct acpi_device *adev = &host_bridge[idx]; 1136 struct platform_device *pdev; 1137 1138 pdev = platform_device_alloc("cxl_host_bridge", idx); 1139 if (!pdev) 1140 goto err_bridge; 1141 1142 mock_companion(adev, &pdev->dev); 1143 rc = platform_device_add(pdev); 1144 if (rc) { 1145 platform_device_put(pdev); 1146 goto err_bridge; 1147 } 1148 1149 cxl_rch[i] = pdev; 1150 mock_pci_bus[idx].bridge = &pdev->dev; 1151 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, 1152 "firmware_node"); 1153 if (rc) 1154 goto err_bridge; 1155 } 1156 1157 return 0; 1158 1159 err_bridge: 1160 for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) { 1161 struct platform_device *pdev = cxl_rch[i]; 1162 1163 if (!pdev) 1164 continue; 1165 sysfs_remove_link(&pdev->dev.kobj, "firmware_node"); 1166 platform_device_unregister(cxl_rch[i]); 1167 } 1168 1169 return rc; 1170 } 1171 1172 static void cxl_rch_topo_exit(void) 1173 { 1174 int i; 1175 1176 for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) { 1177 struct platform_device *pdev = cxl_rch[i]; 1178 1179 if (!pdev) 1180 continue; 1181 sysfs_remove_link(&pdev->dev.kobj, "firmware_node"); 1182 platform_device_unregister(cxl_rch[i]); 1183 } 1184 } 1185 1186 static __init int cxl_single_topo_init(void) 1187 { 1188 int i, rc; 1189 1190 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) { 1191 struct acpi_device *adev = 1192 &host_bridge[NR_CXL_HOST_BRIDGES + i]; 1193 struct platform_device *pdev; 1194 1195 pdev = platform_device_alloc("cxl_host_bridge", 1196 NR_CXL_HOST_BRIDGES + i); 1197 if (!pdev) 1198 goto err_bridge; 1199 1200 mock_companion(adev, &pdev->dev); 1201 rc = platform_device_add(pdev); 1202 if (rc) { 1203 platform_device_put(pdev); 1204 goto err_bridge; 1205 } 1206 1207 cxl_hb_single[i] = pdev; 1208 mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev; 1209 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, 1210 "physical_node"); 1211 if (rc) 1212 goto err_bridge; 1213 } 1214 1215 for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) { 1216 struct platform_device *bridge = 1217 cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)]; 1218 struct platform_device *pdev; 1219 1220 pdev = platform_device_alloc("cxl_root_port", 1221 NR_MULTI_ROOT + i); 1222 if (!pdev) 1223 goto err_port; 1224 pdev->dev.parent = &bridge->dev; 1225 1226 rc = platform_device_add(pdev); 1227 if (rc) { 1228 platform_device_put(pdev); 1229 goto err_port; 1230 } 1231 cxl_root_single[i] = pdev; 1232 } 1233 1234 for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) { 1235 struct platform_device *root_port = cxl_root_single[i]; 1236 struct platform_device *pdev; 1237 1238 pdev = platform_device_alloc("cxl_switch_uport", 1239 NR_MULTI_ROOT + i); 1240 if (!pdev) 1241 goto err_uport; 1242 pdev->dev.parent = &root_port->dev; 1243 1244 rc = platform_device_add(pdev); 1245 if (rc) { 1246 platform_device_put(pdev); 1247 goto err_uport; 1248 } 1249 cxl_swu_single[i] = pdev; 1250 } 1251 1252 for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) { 1253 struct platform_device *uport = 1254 cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)]; 1255 struct platform_device *pdev; 1256 1257 pdev = platform_device_alloc("cxl_switch_dport", 1258 i + NR_MEM_MULTI); 1259 if (!pdev) 1260 goto err_dport; 1261 pdev->dev.parent = &uport->dev; 1262 1263 rc = platform_device_add(pdev); 1264 if (rc) { 1265 platform_device_put(pdev); 1266 goto err_dport; 1267 } 1268 cxl_swd_single[i] = pdev; 1269 } 1270 1271 return 0; 1272 1273 err_dport: 1274 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 1275 platform_device_unregister(cxl_swd_single[i]); 1276 err_uport: 1277 for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 1278 platform_device_unregister(cxl_swu_single[i]); 1279 err_port: 1280 for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 1281 platform_device_unregister(cxl_root_single[i]); 1282 err_bridge: 1283 for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 1284 struct platform_device *pdev = cxl_hb_single[i]; 1285 1286 if (!pdev) 1287 continue; 1288 sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 1289 platform_device_unregister(cxl_hb_single[i]); 1290 } 1291 1292 return rc; 1293 } 1294 1295 static void cxl_single_topo_exit(void) 1296 { 1297 int i; 1298 1299 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 1300 platform_device_unregister(cxl_swd_single[i]); 1301 for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 1302 platform_device_unregister(cxl_swu_single[i]); 1303 for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 1304 platform_device_unregister(cxl_root_single[i]); 1305 for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 1306 struct platform_device *pdev = cxl_hb_single[i]; 1307 1308 if (!pdev) 1309 continue; 1310 sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 1311 platform_device_unregister(cxl_hb_single[i]); 1312 } 1313 } 1314 1315 static void cxl_mem_exit(void) 1316 { 1317 int i; 1318 1319 for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) 1320 platform_device_unregister(cxl_rcd[i]); 1321 for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 1322 platform_device_unregister(cxl_mem_single[i]); 1323 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1324 platform_device_unregister(cxl_mem[i]); 1325 } 1326 1327 static int cxl_mem_init(void) 1328 { 1329 int i, rc; 1330 1331 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) { 1332 struct platform_device *dport = cxl_switch_dport[i]; 1333 struct platform_device *pdev; 1334 1335 pdev = platform_device_alloc("cxl_mem", i); 1336 if (!pdev) 1337 goto err_mem; 1338 pdev->dev.parent = &dport->dev; 1339 set_dev_node(&pdev->dev, i % 2); 1340 1341 rc = platform_device_add(pdev); 1342 if (rc) { 1343 platform_device_put(pdev); 1344 goto err_mem; 1345 } 1346 cxl_mem[i] = pdev; 1347 } 1348 1349 for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) { 1350 struct platform_device *dport = cxl_swd_single[i]; 1351 struct platform_device *pdev; 1352 1353 pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i); 1354 if (!pdev) 1355 goto err_single; 1356 pdev->dev.parent = &dport->dev; 1357 set_dev_node(&pdev->dev, i % 2); 1358 1359 rc = platform_device_add(pdev); 1360 if (rc) { 1361 platform_device_put(pdev); 1362 goto err_single; 1363 } 1364 cxl_mem_single[i] = pdev; 1365 } 1366 1367 for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) { 1368 int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i; 1369 struct platform_device *rch = cxl_rch[i]; 1370 struct platform_device *pdev; 1371 1372 pdev = platform_device_alloc("cxl_rcd", idx); 1373 if (!pdev) 1374 goto err_rcd; 1375 pdev->dev.parent = &rch->dev; 1376 set_dev_node(&pdev->dev, i % 2); 1377 1378 rc = platform_device_add(pdev); 1379 if (rc) { 1380 platform_device_put(pdev); 1381 goto err_rcd; 1382 } 1383 cxl_rcd[i] = pdev; 1384 } 1385 1386 return 0; 1387 1388 err_rcd: 1389 for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) 1390 platform_device_unregister(cxl_rcd[i]); 1391 err_single: 1392 for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 1393 platform_device_unregister(cxl_mem_single[i]); 1394 err_mem: 1395 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1396 platform_device_unregister(cxl_mem[i]); 1397 return rc; 1398 } 1399 1400 static __init int cxl_test_init(void) 1401 { 1402 int rc, i; 1403 struct range mappable; 1404 1405 cxl_acpi_test(); 1406 cxl_core_test(); 1407 cxl_mem_test(); 1408 cxl_pmem_test(); 1409 cxl_port_test(); 1410 1411 register_cxl_mock_ops(&cxl_mock_ops); 1412 1413 cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE); 1414 if (!cxl_mock_pool) { 1415 rc = -ENOMEM; 1416 goto err_gen_pool_create; 1417 } 1418 mappable = mhp_get_pluggable_range(true); 1419 1420 rc = gen_pool_add(cxl_mock_pool, 1421 min(iomem_resource.end + 1 - SZ_64G, 1422 mappable.end + 1 - SZ_64G), 1423 SZ_64G, NUMA_NO_NODE); 1424 if (rc) 1425 goto err_gen_pool_add; 1426 1427 if (interleave_arithmetic == 1) { 1428 cfmws_start = CFMWS_XOR_ARRAY_START; 1429 cfmws_end = CFMWS_XOR_ARRAY_END; 1430 } else { 1431 cfmws_start = CFMWS_MOD_ARRAY_START; 1432 cfmws_end = CFMWS_MOD_ARRAY_END; 1433 } 1434 1435 rc = populate_cedt(); 1436 if (rc) 1437 goto err_populate; 1438 1439 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) { 1440 struct acpi_device *adev = &host_bridge[i]; 1441 struct platform_device *pdev; 1442 1443 pdev = platform_device_alloc("cxl_host_bridge", i); 1444 if (!pdev) 1445 goto err_bridge; 1446 1447 mock_companion(adev, &pdev->dev); 1448 rc = platform_device_add(pdev); 1449 if (rc) { 1450 platform_device_put(pdev); 1451 goto err_bridge; 1452 } 1453 1454 cxl_host_bridge[i] = pdev; 1455 mock_pci_bus[i].bridge = &pdev->dev; 1456 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, 1457 "physical_node"); 1458 if (rc) 1459 goto err_bridge; 1460 } 1461 1462 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) { 1463 struct platform_device *bridge = 1464 cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)]; 1465 struct platform_device *pdev; 1466 1467 pdev = platform_device_alloc("cxl_root_port", i); 1468 if (!pdev) 1469 goto err_port; 1470 pdev->dev.parent = &bridge->dev; 1471 1472 rc = platform_device_add(pdev); 1473 if (rc) { 1474 platform_device_put(pdev); 1475 goto err_port; 1476 } 1477 cxl_root_port[i] = pdev; 1478 } 1479 1480 BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port)); 1481 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) { 1482 struct platform_device *root_port = cxl_root_port[i]; 1483 struct platform_device *pdev; 1484 1485 pdev = platform_device_alloc("cxl_switch_uport", i); 1486 if (!pdev) 1487 goto err_uport; 1488 pdev->dev.parent = &root_port->dev; 1489 1490 rc = platform_device_add(pdev); 1491 if (rc) { 1492 platform_device_put(pdev); 1493 goto err_uport; 1494 } 1495 cxl_switch_uport[i] = pdev; 1496 } 1497 1498 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) { 1499 struct platform_device *uport = 1500 cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)]; 1501 struct platform_device *pdev; 1502 1503 pdev = platform_device_alloc("cxl_switch_dport", i); 1504 if (!pdev) 1505 goto err_dport; 1506 pdev->dev.parent = &uport->dev; 1507 1508 rc = platform_device_add(pdev); 1509 if (rc) { 1510 platform_device_put(pdev); 1511 goto err_dport; 1512 } 1513 cxl_switch_dport[i] = pdev; 1514 } 1515 1516 rc = cxl_single_topo_init(); 1517 if (rc) 1518 goto err_dport; 1519 1520 rc = cxl_rch_topo_init(); 1521 if (rc) 1522 goto err_single; 1523 1524 cxl_acpi = platform_device_alloc("cxl_acpi", 0); 1525 if (!cxl_acpi) 1526 goto err_rch; 1527 1528 mock_companion(&acpi0017_mock, &cxl_acpi->dev); 1529 acpi0017_mock.dev.bus = &platform_bus_type; 1530 1531 rc = platform_device_add(cxl_acpi); 1532 if (rc) 1533 goto err_root; 1534 1535 rc = cxl_mem_init(); 1536 if (rc) 1537 goto err_root; 1538 1539 return 0; 1540 1541 err_root: 1542 platform_device_put(cxl_acpi); 1543 err_rch: 1544 cxl_rch_topo_exit(); 1545 err_single: 1546 cxl_single_topo_exit(); 1547 err_dport: 1548 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--) 1549 platform_device_unregister(cxl_switch_dport[i]); 1550 err_uport: 1551 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--) 1552 platform_device_unregister(cxl_switch_uport[i]); 1553 err_port: 1554 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--) 1555 platform_device_unregister(cxl_root_port[i]); 1556 err_bridge: 1557 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) { 1558 struct platform_device *pdev = cxl_host_bridge[i]; 1559 1560 if (!pdev) 1561 continue; 1562 sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 1563 platform_device_unregister(cxl_host_bridge[i]); 1564 } 1565 err_populate: 1566 depopulate_all_mock_resources(); 1567 err_gen_pool_add: 1568 gen_pool_destroy(cxl_mock_pool); 1569 err_gen_pool_create: 1570 unregister_cxl_mock_ops(&cxl_mock_ops); 1571 return rc; 1572 } 1573 1574 static __exit void cxl_test_exit(void) 1575 { 1576 int i; 1577 1578 cxl_mem_exit(); 1579 platform_device_unregister(cxl_acpi); 1580 cxl_rch_topo_exit(); 1581 cxl_single_topo_exit(); 1582 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--) 1583 platform_device_unregister(cxl_switch_dport[i]); 1584 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--) 1585 platform_device_unregister(cxl_switch_uport[i]); 1586 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--) 1587 platform_device_unregister(cxl_root_port[i]); 1588 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) { 1589 struct platform_device *pdev = cxl_host_bridge[i]; 1590 1591 if (!pdev) 1592 continue; 1593 sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 1594 platform_device_unregister(cxl_host_bridge[i]); 1595 } 1596 depopulate_all_mock_resources(); 1597 gen_pool_destroy(cxl_mock_pool); 1598 unregister_cxl_mock_ops(&cxl_mock_ops); 1599 } 1600 1601 module_param(interleave_arithmetic, int, 0444); 1602 MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1"); 1603 module_init(cxl_test_init); 1604 module_exit(cxl_test_exit); 1605 MODULE_LICENSE("GPL v2"); 1606 MODULE_DESCRIPTION("cxl_test: setup module"); 1607 MODULE_IMPORT_NS("ACPI"); 1608 MODULE_IMPORT_NS("CXL"); 1609