1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright(c) 2021 Intel Corporation. All rights reserved. 3 4 #include <linux/platform_device.h> 5 #include <linux/genalloc.h> 6 #include <linux/module.h> 7 #include <linux/mutex.h> 8 #include <linux/acpi.h> 9 #include <linux/pci.h> 10 #include <linux/mm.h> 11 #include <cxlmem.h> 12 13 #include "../watermark.h" 14 #include "mock.h" 15 16 static int interleave_arithmetic; 17 18 #define NR_CXL_HOST_BRIDGES 2 19 #define NR_CXL_SINGLE_HOST 1 20 #define NR_CXL_RCH 1 21 #define NR_CXL_ROOT_PORTS 2 22 #define NR_CXL_SWITCH_PORTS 2 23 #define NR_CXL_PORT_DECODERS 8 24 #define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH) 25 26 static struct platform_device *cxl_acpi; 27 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES]; 28 #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS) 29 static struct platform_device *cxl_root_port[NR_MULTI_ROOT]; 30 static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT]; 31 #define NR_MEM_MULTI \ 32 (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS) 33 static struct platform_device *cxl_switch_dport[NR_MEM_MULTI]; 34 35 static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST]; 36 static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST]; 37 static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST]; 38 #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS) 39 static struct platform_device *cxl_swd_single[NR_MEM_SINGLE]; 40 41 struct platform_device *cxl_mem[NR_MEM_MULTI]; 42 struct platform_device *cxl_mem_single[NR_MEM_SINGLE]; 43 44 static struct platform_device *cxl_rch[NR_CXL_RCH]; 45 static struct platform_device *cxl_rcd[NR_CXL_RCH]; 46 47 static inline bool is_multi_bridge(struct device *dev) 48 { 49 int i; 50 51 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 52 if (&cxl_host_bridge[i]->dev == dev) 53 return true; 54 return false; 55 } 56 57 static inline bool is_single_bridge(struct device *dev) 58 { 59 int i; 60 61 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 62 if (&cxl_hb_single[i]->dev == dev) 63 return true; 64 return false; 65 } 66 67 static struct acpi_device acpi0017_mock; 68 static struct acpi_device host_bridge[NR_BRIDGES] = { 69 [0] = { 70 .handle = &host_bridge[0], 71 }, 72 [1] = { 73 .handle = &host_bridge[1], 74 }, 75 [2] = { 76 .handle = &host_bridge[2], 77 }, 78 [3] = { 79 .handle = &host_bridge[3], 80 }, 81 }; 82 83 static bool is_mock_dev(struct device *dev) 84 { 85 int i; 86 87 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) 88 if (dev == &cxl_mem[i]->dev) 89 return true; 90 for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) 91 if (dev == &cxl_mem_single[i]->dev) 92 return true; 93 for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) 94 if (dev == &cxl_rcd[i]->dev) 95 return true; 96 if (dev == &cxl_acpi->dev) 97 return true; 98 return false; 99 } 100 101 static bool is_mock_adev(struct acpi_device *adev) 102 { 103 int i; 104 105 if (adev == &acpi0017_mock) 106 return true; 107 108 for (i = 0; i < ARRAY_SIZE(host_bridge); i++) 109 if (adev == &host_bridge[i]) 110 return true; 111 112 return false; 113 } 114 115 static struct { 116 struct acpi_table_cedt cedt; 117 struct acpi_cedt_chbs chbs[NR_BRIDGES]; 118 struct { 119 struct acpi_cedt_cfmws cfmws; 120 u32 target[1]; 121 } cfmws0; 122 struct { 123 struct acpi_cedt_cfmws cfmws; 124 u32 target[2]; 125 } cfmws1; 126 struct { 127 struct acpi_cedt_cfmws cfmws; 128 u32 target[1]; 129 } cfmws2; 130 struct { 131 struct acpi_cedt_cfmws cfmws; 132 u32 target[2]; 133 } cfmws3; 134 struct { 135 struct acpi_cedt_cfmws cfmws; 136 u32 target[1]; 137 } cfmws4; 138 struct { 139 struct acpi_cedt_cfmws cfmws; 140 u32 target[1]; 141 } cfmws5; 142 struct { 143 struct acpi_cedt_cfmws cfmws; 144 u32 target[1]; 145 } cfmws6; 146 struct { 147 struct acpi_cedt_cfmws cfmws; 148 u32 target[2]; 149 } cfmws7; 150 struct { 151 struct acpi_cedt_cfmws cfmws; 152 u32 target[4]; 153 } cfmws8; 154 struct { 155 struct acpi_cedt_cxims cxims; 156 u64 xormap_list[2]; 157 } cxims0; 158 } __packed mock_cedt = { 159 .cedt = { 160 .header = { 161 .signature = "CEDT", 162 .length = sizeof(mock_cedt), 163 .revision = 1, 164 }, 165 }, 166 .chbs[0] = { 167 .header = { 168 .type = ACPI_CEDT_TYPE_CHBS, 169 .length = sizeof(mock_cedt.chbs[0]), 170 }, 171 .uid = 0, 172 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 173 }, 174 .chbs[1] = { 175 .header = { 176 .type = ACPI_CEDT_TYPE_CHBS, 177 .length = sizeof(mock_cedt.chbs[0]), 178 }, 179 .uid = 1, 180 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 181 }, 182 .chbs[2] = { 183 .header = { 184 .type = ACPI_CEDT_TYPE_CHBS, 185 .length = sizeof(mock_cedt.chbs[0]), 186 }, 187 .uid = 2, 188 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 189 }, 190 .chbs[3] = { 191 .header = { 192 .type = ACPI_CEDT_TYPE_CHBS, 193 .length = sizeof(mock_cedt.chbs[0]), 194 }, 195 .uid = 3, 196 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11, 197 }, 198 .cfmws0 = { 199 .cfmws = { 200 .header = { 201 .type = ACPI_CEDT_TYPE_CFMWS, 202 .length = sizeof(mock_cedt.cfmws0), 203 }, 204 .interleave_ways = 0, 205 .granularity = 4, 206 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 207 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE, 208 .qtg_id = 0, 209 .window_size = SZ_256M * 4UL, 210 }, 211 .target = { 0 }, 212 }, 213 .cfmws1 = { 214 .cfmws = { 215 .header = { 216 .type = ACPI_CEDT_TYPE_CFMWS, 217 .length = sizeof(mock_cedt.cfmws1), 218 }, 219 .interleave_ways = 1, 220 .granularity = 4, 221 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 222 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE, 223 .qtg_id = 1, 224 .window_size = SZ_256M * 8UL, 225 }, 226 .target = { 0, 1, }, 227 }, 228 .cfmws2 = { 229 .cfmws = { 230 .header = { 231 .type = ACPI_CEDT_TYPE_CFMWS, 232 .length = sizeof(mock_cedt.cfmws2), 233 }, 234 .interleave_ways = 0, 235 .granularity = 4, 236 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 237 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 238 .qtg_id = 2, 239 .window_size = SZ_256M * 4UL, 240 }, 241 .target = { 0 }, 242 }, 243 .cfmws3 = { 244 .cfmws = { 245 .header = { 246 .type = ACPI_CEDT_TYPE_CFMWS, 247 .length = sizeof(mock_cedt.cfmws3), 248 }, 249 .interleave_ways = 1, 250 .granularity = 4, 251 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 252 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 253 .qtg_id = 3, 254 .window_size = SZ_256M * 8UL, 255 }, 256 .target = { 0, 1, }, 257 }, 258 .cfmws4 = { 259 .cfmws = { 260 .header = { 261 .type = ACPI_CEDT_TYPE_CFMWS, 262 .length = sizeof(mock_cedt.cfmws4), 263 }, 264 .interleave_ways = 0, 265 .granularity = 4, 266 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 267 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 268 .qtg_id = 4, 269 .window_size = SZ_256M * 4UL, 270 }, 271 .target = { 2 }, 272 }, 273 .cfmws5 = { 274 .cfmws = { 275 .header = { 276 .type = ACPI_CEDT_TYPE_CFMWS, 277 .length = sizeof(mock_cedt.cfmws5), 278 }, 279 .interleave_ways = 0, 280 .granularity = 4, 281 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 282 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE, 283 .qtg_id = 5, 284 .window_size = SZ_256M, 285 }, 286 .target = { 3 }, 287 }, 288 /* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */ 289 .cfmws6 = { 290 .cfmws = { 291 .header = { 292 .type = ACPI_CEDT_TYPE_CFMWS, 293 .length = sizeof(mock_cedt.cfmws6), 294 }, 295 .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR, 296 .interleave_ways = 0, 297 .granularity = 4, 298 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 299 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 300 .qtg_id = 0, 301 .window_size = SZ_256M * 8UL, 302 }, 303 .target = { 0, }, 304 }, 305 .cfmws7 = { 306 .cfmws = { 307 .header = { 308 .type = ACPI_CEDT_TYPE_CFMWS, 309 .length = sizeof(mock_cedt.cfmws7), 310 }, 311 .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR, 312 .interleave_ways = 1, 313 .granularity = 0, 314 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 315 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 316 .qtg_id = 1, 317 .window_size = SZ_256M * 8UL, 318 }, 319 .target = { 0, 1, }, 320 }, 321 .cfmws8 = { 322 .cfmws = { 323 .header = { 324 .type = ACPI_CEDT_TYPE_CFMWS, 325 .length = sizeof(mock_cedt.cfmws8), 326 }, 327 .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR, 328 .interleave_ways = 2, 329 .granularity = 0, 330 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 331 ACPI_CEDT_CFMWS_RESTRICT_PMEM, 332 .qtg_id = 0, 333 .window_size = SZ_256M * 16UL, 334 }, 335 .target = { 0, 1, 0, 1, }, 336 }, 337 .cxims0 = { 338 .cxims = { 339 .header = { 340 .type = ACPI_CEDT_TYPE_CXIMS, 341 .length = sizeof(mock_cedt.cxims0), 342 }, 343 .hbig = 0, 344 .nr_xormaps = 2, 345 }, 346 .xormap_list = { 0x404100, 0x808200, }, 347 }, 348 }; 349 350 struct acpi_cedt_cfmws *mock_cfmws[] = { 351 [0] = &mock_cedt.cfmws0.cfmws, 352 [1] = &mock_cedt.cfmws1.cfmws, 353 [2] = &mock_cedt.cfmws2.cfmws, 354 [3] = &mock_cedt.cfmws3.cfmws, 355 [4] = &mock_cedt.cfmws4.cfmws, 356 [5] = &mock_cedt.cfmws5.cfmws, 357 /* Modulo Math above, XOR Math below */ 358 [6] = &mock_cedt.cfmws6.cfmws, 359 [7] = &mock_cedt.cfmws7.cfmws, 360 [8] = &mock_cedt.cfmws8.cfmws, 361 }; 362 363 static int cfmws_start; 364 static int cfmws_end; 365 #define CFMWS_MOD_ARRAY_START 0 366 #define CFMWS_MOD_ARRAY_END 5 367 #define CFMWS_XOR_ARRAY_START 6 368 #define CFMWS_XOR_ARRAY_END 8 369 370 struct acpi_cedt_cxims *mock_cxims[1] = { 371 [0] = &mock_cedt.cxims0.cxims, 372 }; 373 374 struct cxl_mock_res { 375 struct list_head list; 376 struct range range; 377 }; 378 379 static LIST_HEAD(mock_res); 380 static DEFINE_MUTEX(mock_res_lock); 381 static struct gen_pool *cxl_mock_pool; 382 383 static void depopulate_all_mock_resources(void) 384 { 385 struct cxl_mock_res *res, *_res; 386 387 mutex_lock(&mock_res_lock); 388 list_for_each_entry_safe(res, _res, &mock_res, list) { 389 gen_pool_free(cxl_mock_pool, res->range.start, 390 range_len(&res->range)); 391 list_del(&res->list); 392 kfree(res); 393 } 394 mutex_unlock(&mock_res_lock); 395 } 396 397 static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align) 398 { 399 struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL); 400 struct genpool_data_align data = { 401 .align = align, 402 }; 403 unsigned long phys; 404 405 INIT_LIST_HEAD(&res->list); 406 phys = gen_pool_alloc_algo(cxl_mock_pool, size, 407 gen_pool_first_fit_align, &data); 408 if (!phys) 409 return NULL; 410 411 res->range = (struct range) { 412 .start = phys, 413 .end = phys + size - 1, 414 }; 415 mutex_lock(&mock_res_lock); 416 list_add(&res->list, &mock_res); 417 mutex_unlock(&mock_res_lock); 418 419 return res; 420 } 421 422 static int populate_cedt(void) 423 { 424 struct cxl_mock_res *res; 425 int i; 426 427 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) { 428 struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i]; 429 resource_size_t size; 430 431 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20) 432 size = ACPI_CEDT_CHBS_LENGTH_CXL20; 433 else 434 size = ACPI_CEDT_CHBS_LENGTH_CXL11; 435 436 res = alloc_mock_res(size, size); 437 if (!res) 438 return -ENOMEM; 439 chbs->base = res->range.start; 440 chbs->length = size; 441 } 442 443 for (i = cfmws_start; i <= cfmws_end; i++) { 444 struct acpi_cedt_cfmws *window = mock_cfmws[i]; 445 446 res = alloc_mock_res(window->window_size, SZ_256M); 447 if (!res) 448 return -ENOMEM; 449 window->base_hpa = res->range.start; 450 } 451 452 return 0; 453 } 454 455 static bool is_mock_port(struct device *dev); 456 457 /* 458 * WARNING, this hack assumes the format of 'struct cxl_cfmws_context' 459 * and 'struct cxl_chbs_context' share the property that the first 460 * struct member is a cxl_test device being probed by the cxl_acpi 461 * driver. 462 */ 463 struct cxl_cedt_context { 464 struct device *dev; 465 }; 466 467 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id, 468 acpi_tbl_entry_handler_arg handler_arg, 469 void *arg) 470 { 471 struct cxl_cedt_context *ctx = arg; 472 struct device *dev = ctx->dev; 473 union acpi_subtable_headers *h; 474 unsigned long end; 475 int i; 476 477 if (!is_mock_port(dev) && !is_mock_dev(dev)) 478 return acpi_table_parse_cedt(id, handler_arg, arg); 479 480 if (id == ACPI_CEDT_TYPE_CHBS) 481 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) { 482 h = (union acpi_subtable_headers *)&mock_cedt.chbs[i]; 483 end = (unsigned long)&mock_cedt.chbs[i + 1]; 484 handler_arg(h, arg, end); 485 } 486 487 if (id == ACPI_CEDT_TYPE_CFMWS) 488 for (i = cfmws_start; i <= cfmws_end; i++) { 489 h = (union acpi_subtable_headers *) mock_cfmws[i]; 490 end = (unsigned long) h + mock_cfmws[i]->header.length; 491 handler_arg(h, arg, end); 492 } 493 494 if (id == ACPI_CEDT_TYPE_CXIMS) 495 for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) { 496 h = (union acpi_subtable_headers *)mock_cxims[i]; 497 end = (unsigned long)h + mock_cxims[i]->header.length; 498 handler_arg(h, arg, end); 499 } 500 501 return 0; 502 } 503 504 static bool is_mock_bridge(struct device *dev) 505 { 506 int i; 507 508 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 509 if (dev == &cxl_host_bridge[i]->dev) 510 return true; 511 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 512 if (dev == &cxl_hb_single[i]->dev) 513 return true; 514 for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) 515 if (dev == &cxl_rch[i]->dev) 516 return true; 517 518 return false; 519 } 520 521 static bool is_mock_port(struct device *dev) 522 { 523 int i; 524 525 if (is_mock_bridge(dev)) 526 return true; 527 528 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) 529 if (dev == &cxl_root_port[i]->dev) 530 return true; 531 532 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) 533 if (dev == &cxl_switch_uport[i]->dev) 534 return true; 535 536 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) 537 if (dev == &cxl_switch_dport[i]->dev) 538 return true; 539 540 for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) 541 if (dev == &cxl_root_single[i]->dev) 542 return true; 543 544 for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) 545 if (dev == &cxl_swu_single[i]->dev) 546 return true; 547 548 for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) 549 if (dev == &cxl_swd_single[i]->dev) 550 return true; 551 552 if (is_cxl_memdev(dev)) 553 return is_mock_dev(dev->parent); 554 555 return false; 556 } 557 558 static int host_bridge_index(struct acpi_device *adev) 559 { 560 return adev - host_bridge; 561 } 562 563 static struct acpi_device *find_host_bridge(acpi_handle handle) 564 { 565 int i; 566 567 for (i = 0; i < ARRAY_SIZE(host_bridge); i++) 568 if (handle == host_bridge[i].handle) 569 return &host_bridge[i]; 570 return NULL; 571 } 572 573 static acpi_status 574 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, 575 struct acpi_object_list *arguments, 576 unsigned long long *data) 577 { 578 struct acpi_device *adev = find_host_bridge(handle); 579 580 if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0) 581 return acpi_evaluate_integer(handle, pathname, arguments, data); 582 583 *data = host_bridge_index(adev); 584 return AE_OK; 585 } 586 587 static struct pci_bus mock_pci_bus[NR_BRIDGES]; 588 static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = { 589 [0] = { 590 .bus = &mock_pci_bus[0], 591 }, 592 [1] = { 593 .bus = &mock_pci_bus[1], 594 }, 595 [2] = { 596 .bus = &mock_pci_bus[2], 597 }, 598 [3] = { 599 .bus = &mock_pci_bus[3], 600 }, 601 602 }; 603 604 static bool is_mock_bus(struct pci_bus *bus) 605 { 606 int i; 607 608 for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++) 609 if (bus == &mock_pci_bus[i]) 610 return true; 611 return false; 612 } 613 614 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle) 615 { 616 struct acpi_device *adev = find_host_bridge(handle); 617 618 if (!adev) 619 return acpi_pci_find_root(handle); 620 return &mock_pci_root[host_bridge_index(adev)]; 621 } 622 623 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port) 624 { 625 struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL); 626 627 if (!cxlhdm) 628 return ERR_PTR(-ENOMEM); 629 630 cxlhdm->port = port; 631 return cxlhdm; 632 } 633 634 static int mock_cxl_add_passthrough_decoder(struct cxl_port *port) 635 { 636 dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n"); 637 return -EOPNOTSUPP; 638 } 639 640 641 struct target_map_ctx { 642 int *target_map; 643 int index; 644 int target_count; 645 }; 646 647 static int map_targets(struct device *dev, void *data) 648 { 649 struct platform_device *pdev = to_platform_device(dev); 650 struct target_map_ctx *ctx = data; 651 652 ctx->target_map[ctx->index++] = pdev->id; 653 654 if (ctx->index > ctx->target_count) { 655 dev_WARN_ONCE(dev, 1, "too many targets found?\n"); 656 return -ENXIO; 657 } 658 659 return 0; 660 } 661 662 static int mock_decoder_commit(struct cxl_decoder *cxld) 663 { 664 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 665 int id = cxld->id; 666 667 if (cxld->flags & CXL_DECODER_F_ENABLE) 668 return 0; 669 670 dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev)); 671 if (port->commit_end + 1 != id) { 672 dev_dbg(&port->dev, 673 "%s: out of order commit, expected decoder%d.%d\n", 674 dev_name(&cxld->dev), port->id, port->commit_end + 1); 675 return -EBUSY; 676 } 677 678 port->commit_end++; 679 cxld->flags |= CXL_DECODER_F_ENABLE; 680 681 return 0; 682 } 683 684 static int mock_decoder_reset(struct cxl_decoder *cxld) 685 { 686 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 687 int id = cxld->id; 688 689 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 690 return 0; 691 692 dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev)); 693 if (port->commit_end != id) { 694 dev_dbg(&port->dev, 695 "%s: out of order reset, expected decoder%d.%d\n", 696 dev_name(&cxld->dev), port->id, port->commit_end); 697 return -EBUSY; 698 } 699 700 port->commit_end--; 701 cxld->flags &= ~CXL_DECODER_F_ENABLE; 702 703 return 0; 704 } 705 706 static void default_mock_decoder(struct cxl_decoder *cxld) 707 { 708 cxld->hpa_range = (struct range){ 709 .start = 0, 710 .end = -1, 711 }; 712 713 cxld->interleave_ways = 1; 714 cxld->interleave_granularity = 256; 715 cxld->target_type = CXL_DECODER_EXPANDER; 716 cxld->commit = mock_decoder_commit; 717 cxld->reset = mock_decoder_reset; 718 } 719 720 static int first_decoder(struct device *dev, void *data) 721 { 722 struct cxl_decoder *cxld; 723 724 if (!is_switch_decoder(dev)) 725 return 0; 726 cxld = to_cxl_decoder(dev); 727 if (cxld->id == 0) 728 return 1; 729 return 0; 730 } 731 732 static void mock_init_hdm_decoder(struct cxl_decoder *cxld) 733 { 734 struct acpi_cedt_cfmws *window = mock_cfmws[0]; 735 struct platform_device *pdev = NULL; 736 struct cxl_endpoint_decoder *cxled; 737 struct cxl_switch_decoder *cxlsd; 738 struct cxl_port *port, *iter; 739 const int size = SZ_512M; 740 struct cxl_memdev *cxlmd; 741 struct cxl_dport *dport; 742 struct device *dev; 743 bool hb0 = false; 744 u64 base; 745 int i; 746 747 if (is_endpoint_decoder(&cxld->dev)) { 748 cxled = to_cxl_endpoint_decoder(&cxld->dev); 749 cxlmd = cxled_to_memdev(cxled); 750 WARN_ON(!dev_is_platform(cxlmd->dev.parent)); 751 pdev = to_platform_device(cxlmd->dev.parent); 752 753 /* check is endpoint is attach to host-bridge0 */ 754 port = cxled_to_port(cxled); 755 do { 756 if (port->uport == &cxl_host_bridge[0]->dev) { 757 hb0 = true; 758 break; 759 } 760 if (is_cxl_port(port->dev.parent)) 761 port = to_cxl_port(port->dev.parent); 762 else 763 port = NULL; 764 } while (port); 765 port = cxled_to_port(cxled); 766 } 767 768 /* 769 * The first decoder on the first 2 devices on the first switch 770 * attached to host-bridge0 mock a fake / static RAM region. All 771 * other decoders are default disabled. Given the round robin 772 * assignment those devices are named cxl_mem.0, and cxl_mem.4. 773 * 774 * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4' 775 */ 776 if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) { 777 default_mock_decoder(cxld); 778 return; 779 } 780 781 base = window->base_hpa; 782 cxld->hpa_range = (struct range) { 783 .start = base, 784 .end = base + size - 1, 785 }; 786 787 cxld->interleave_ways = 2; 788 eig_to_granularity(window->granularity, &cxld->interleave_granularity); 789 cxld->target_type = CXL_DECODER_EXPANDER; 790 cxld->flags = CXL_DECODER_F_ENABLE; 791 cxled->state = CXL_DECODER_STATE_AUTO; 792 port->commit_end = cxld->id; 793 devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0); 794 cxld->commit = mock_decoder_commit; 795 cxld->reset = mock_decoder_reset; 796 797 /* 798 * Now that endpoint decoder is set up, walk up the hierarchy 799 * and setup the switch and root port decoders targeting @cxlmd. 800 */ 801 iter = port; 802 for (i = 0; i < 2; i++) { 803 dport = iter->parent_dport; 804 iter = dport->port; 805 dev = device_find_child(&iter->dev, NULL, first_decoder); 806 /* 807 * Ancestor ports are guaranteed to be enumerated before 808 * @port, and all ports have at least one decoder. 809 */ 810 if (WARN_ON(!dev)) 811 continue; 812 cxlsd = to_cxl_switch_decoder(dev); 813 if (i == 0) { 814 /* put cxl_mem.4 second in the decode order */ 815 if (pdev->id == 4) 816 cxlsd->target[1] = dport; 817 else 818 cxlsd->target[0] = dport; 819 } else 820 cxlsd->target[0] = dport; 821 cxld = &cxlsd->cxld; 822 cxld->target_type = CXL_DECODER_EXPANDER; 823 cxld->flags = CXL_DECODER_F_ENABLE; 824 iter->commit_end = 0; 825 /* 826 * Switch targets 2 endpoints, while host bridge targets 827 * one root port 828 */ 829 if (i == 0) 830 cxld->interleave_ways = 2; 831 else 832 cxld->interleave_ways = 1; 833 cxld->interleave_granularity = 256; 834 cxld->hpa_range = (struct range) { 835 .start = base, 836 .end = base + size - 1, 837 }; 838 put_device(dev); 839 } 840 } 841 842 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) 843 { 844 struct cxl_port *port = cxlhdm->port; 845 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); 846 int target_count, i; 847 848 if (is_cxl_endpoint(port)) 849 target_count = 0; 850 else if (is_cxl_root(parent_port)) 851 target_count = NR_CXL_ROOT_PORTS; 852 else 853 target_count = NR_CXL_SWITCH_PORTS; 854 855 for (i = 0; i < NR_CXL_PORT_DECODERS; i++) { 856 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; 857 struct target_map_ctx ctx = { 858 .target_map = target_map, 859 .target_count = target_count, 860 }; 861 struct cxl_decoder *cxld; 862 int rc; 863 864 if (target_count) { 865 struct cxl_switch_decoder *cxlsd; 866 867 cxlsd = cxl_switch_decoder_alloc(port, target_count); 868 if (IS_ERR(cxlsd)) { 869 dev_warn(&port->dev, 870 "Failed to allocate the decoder\n"); 871 return PTR_ERR(cxlsd); 872 } 873 cxld = &cxlsd->cxld; 874 } else { 875 struct cxl_endpoint_decoder *cxled; 876 877 cxled = cxl_endpoint_decoder_alloc(port); 878 879 if (IS_ERR(cxled)) { 880 dev_warn(&port->dev, 881 "Failed to allocate the decoder\n"); 882 return PTR_ERR(cxled); 883 } 884 cxld = &cxled->cxld; 885 } 886 887 mock_init_hdm_decoder(cxld); 888 889 if (target_count) { 890 rc = device_for_each_child(port->uport, &ctx, 891 map_targets); 892 if (rc) { 893 put_device(&cxld->dev); 894 return rc; 895 } 896 } 897 898 rc = cxl_decoder_add_locked(cxld, target_map); 899 if (rc) { 900 put_device(&cxld->dev); 901 dev_err(&port->dev, "Failed to add decoder\n"); 902 return rc; 903 } 904 905 rc = cxl_decoder_autoremove(&port->dev, cxld); 906 if (rc) 907 return rc; 908 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); 909 } 910 911 return 0; 912 } 913 914 static int mock_cxl_port_enumerate_dports(struct cxl_port *port) 915 { 916 struct platform_device **array; 917 int i, array_size; 918 919 if (port->depth == 1) { 920 if (is_multi_bridge(port->uport)) { 921 array_size = ARRAY_SIZE(cxl_root_port); 922 array = cxl_root_port; 923 } else if (is_single_bridge(port->uport)) { 924 array_size = ARRAY_SIZE(cxl_root_single); 925 array = cxl_root_single; 926 } else { 927 dev_dbg(&port->dev, "%s: unknown bridge type\n", 928 dev_name(port->uport)); 929 return -ENXIO; 930 } 931 } else if (port->depth == 2) { 932 struct cxl_port *parent = to_cxl_port(port->dev.parent); 933 934 if (is_multi_bridge(parent->uport)) { 935 array_size = ARRAY_SIZE(cxl_switch_dport); 936 array = cxl_switch_dport; 937 } else if (is_single_bridge(parent->uport)) { 938 array_size = ARRAY_SIZE(cxl_swd_single); 939 array = cxl_swd_single; 940 } else { 941 dev_dbg(&port->dev, "%s: unknown bridge type\n", 942 dev_name(port->uport)); 943 return -ENXIO; 944 } 945 } else { 946 dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n", 947 port->depth); 948 return -ENXIO; 949 } 950 951 for (i = 0; i < array_size; i++) { 952 struct platform_device *pdev = array[i]; 953 struct cxl_dport *dport; 954 955 if (pdev->dev.parent != port->uport) { 956 dev_dbg(&port->dev, "%s: mismatch parent %s\n", 957 dev_name(port->uport), 958 dev_name(pdev->dev.parent)); 959 continue; 960 } 961 962 dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id, 963 CXL_RESOURCE_NONE); 964 965 if (IS_ERR(dport)) 966 return PTR_ERR(dport); 967 } 968 969 return 0; 970 } 971 972 resource_size_t mock_cxl_rcrb_to_component(struct device *dev, 973 resource_size_t rcrb, 974 enum cxl_rcrb which) 975 { 976 dev_dbg(dev, "rcrb: %pa which: %d\n", &rcrb, which); 977 978 return (resource_size_t) which + 1; 979 } 980 981 static struct cxl_mock_ops cxl_mock_ops = { 982 .is_mock_adev = is_mock_adev, 983 .is_mock_bridge = is_mock_bridge, 984 .is_mock_bus = is_mock_bus, 985 .is_mock_port = is_mock_port, 986 .is_mock_dev = is_mock_dev, 987 .acpi_table_parse_cedt = mock_acpi_table_parse_cedt, 988 .acpi_evaluate_integer = mock_acpi_evaluate_integer, 989 .cxl_rcrb_to_component = mock_cxl_rcrb_to_component, 990 .acpi_pci_find_root = mock_acpi_pci_find_root, 991 .devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports, 992 .devm_cxl_setup_hdm = mock_cxl_setup_hdm, 993 .devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder, 994 .devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders, 995 .list = LIST_HEAD_INIT(cxl_mock_ops.list), 996 }; 997 998 static void mock_companion(struct acpi_device *adev, struct device *dev) 999 { 1000 device_initialize(&adev->dev); 1001 fwnode_init(&adev->fwnode, NULL); 1002 dev->fwnode = &adev->fwnode; 1003 adev->fwnode.dev = dev; 1004 } 1005 1006 #ifndef SZ_64G 1007 #define SZ_64G (SZ_32G * 2) 1008 #endif 1009 1010 #ifndef SZ_512G 1011 #define SZ_512G (SZ_64G * 8) 1012 #endif 1013 1014 static __init int cxl_rch_init(void) 1015 { 1016 int rc, i; 1017 1018 for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) { 1019 int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i; 1020 struct acpi_device *adev = &host_bridge[idx]; 1021 struct platform_device *pdev; 1022 1023 pdev = platform_device_alloc("cxl_host_bridge", idx); 1024 if (!pdev) 1025 goto err_bridge; 1026 1027 mock_companion(adev, &pdev->dev); 1028 rc = platform_device_add(pdev); 1029 if (rc) { 1030 platform_device_put(pdev); 1031 goto err_bridge; 1032 } 1033 1034 cxl_rch[i] = pdev; 1035 mock_pci_bus[idx].bridge = &pdev->dev; 1036 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, 1037 "firmware_node"); 1038 if (rc) 1039 goto err_bridge; 1040 } 1041 1042 for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) { 1043 int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i; 1044 struct platform_device *rch = cxl_rch[i]; 1045 struct platform_device *pdev; 1046 1047 pdev = platform_device_alloc("cxl_rcd", idx); 1048 if (!pdev) 1049 goto err_mem; 1050 pdev->dev.parent = &rch->dev; 1051 set_dev_node(&pdev->dev, i % 2); 1052 1053 rc = platform_device_add(pdev); 1054 if (rc) { 1055 platform_device_put(pdev); 1056 goto err_mem; 1057 } 1058 cxl_rcd[i] = pdev; 1059 } 1060 1061 return 0; 1062 1063 err_mem: 1064 for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) 1065 platform_device_unregister(cxl_rcd[i]); 1066 err_bridge: 1067 for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) { 1068 struct platform_device *pdev = cxl_rch[i]; 1069 1070 if (!pdev) 1071 continue; 1072 sysfs_remove_link(&pdev->dev.kobj, "firmware_node"); 1073 platform_device_unregister(cxl_rch[i]); 1074 } 1075 1076 return rc; 1077 } 1078 1079 static void cxl_rch_exit(void) 1080 { 1081 int i; 1082 1083 for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) 1084 platform_device_unregister(cxl_rcd[i]); 1085 for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) { 1086 struct platform_device *pdev = cxl_rch[i]; 1087 1088 if (!pdev) 1089 continue; 1090 sysfs_remove_link(&pdev->dev.kobj, "firmware_node"); 1091 platform_device_unregister(cxl_rch[i]); 1092 } 1093 } 1094 1095 static __init int cxl_single_init(void) 1096 { 1097 int i, rc; 1098 1099 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) { 1100 struct acpi_device *adev = 1101 &host_bridge[NR_CXL_HOST_BRIDGES + i]; 1102 struct platform_device *pdev; 1103 1104 pdev = platform_device_alloc("cxl_host_bridge", 1105 NR_CXL_HOST_BRIDGES + i); 1106 if (!pdev) 1107 goto err_bridge; 1108 1109 mock_companion(adev, &pdev->dev); 1110 rc = platform_device_add(pdev); 1111 if (rc) { 1112 platform_device_put(pdev); 1113 goto err_bridge; 1114 } 1115 1116 cxl_hb_single[i] = pdev; 1117 mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev; 1118 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, 1119 "physical_node"); 1120 if (rc) 1121 goto err_bridge; 1122 } 1123 1124 for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) { 1125 struct platform_device *bridge = 1126 cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)]; 1127 struct platform_device *pdev; 1128 1129 pdev = platform_device_alloc("cxl_root_port", 1130 NR_MULTI_ROOT + i); 1131 if (!pdev) 1132 goto err_port; 1133 pdev->dev.parent = &bridge->dev; 1134 1135 rc = platform_device_add(pdev); 1136 if (rc) { 1137 platform_device_put(pdev); 1138 goto err_port; 1139 } 1140 cxl_root_single[i] = pdev; 1141 } 1142 1143 for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) { 1144 struct platform_device *root_port = cxl_root_single[i]; 1145 struct platform_device *pdev; 1146 1147 pdev = platform_device_alloc("cxl_switch_uport", 1148 NR_MULTI_ROOT + i); 1149 if (!pdev) 1150 goto err_uport; 1151 pdev->dev.parent = &root_port->dev; 1152 1153 rc = platform_device_add(pdev); 1154 if (rc) { 1155 platform_device_put(pdev); 1156 goto err_uport; 1157 } 1158 cxl_swu_single[i] = pdev; 1159 } 1160 1161 for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) { 1162 struct platform_device *uport = 1163 cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)]; 1164 struct platform_device *pdev; 1165 1166 pdev = platform_device_alloc("cxl_switch_dport", 1167 i + NR_MEM_MULTI); 1168 if (!pdev) 1169 goto err_dport; 1170 pdev->dev.parent = &uport->dev; 1171 1172 rc = platform_device_add(pdev); 1173 if (rc) { 1174 platform_device_put(pdev); 1175 goto err_dport; 1176 } 1177 cxl_swd_single[i] = pdev; 1178 } 1179 1180 for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) { 1181 struct platform_device *dport = cxl_swd_single[i]; 1182 struct platform_device *pdev; 1183 1184 pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i); 1185 if (!pdev) 1186 goto err_mem; 1187 pdev->dev.parent = &dport->dev; 1188 set_dev_node(&pdev->dev, i % 2); 1189 1190 rc = platform_device_add(pdev); 1191 if (rc) { 1192 platform_device_put(pdev); 1193 goto err_mem; 1194 } 1195 cxl_mem_single[i] = pdev; 1196 } 1197 1198 return 0; 1199 1200 err_mem: 1201 for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 1202 platform_device_unregister(cxl_mem_single[i]); 1203 err_dport: 1204 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 1205 platform_device_unregister(cxl_swd_single[i]); 1206 err_uport: 1207 for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 1208 platform_device_unregister(cxl_swu_single[i]); 1209 err_port: 1210 for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 1211 platform_device_unregister(cxl_root_single[i]); 1212 err_bridge: 1213 for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 1214 struct platform_device *pdev = cxl_hb_single[i]; 1215 1216 if (!pdev) 1217 continue; 1218 sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 1219 platform_device_unregister(cxl_hb_single[i]); 1220 } 1221 1222 return rc; 1223 } 1224 1225 static void cxl_single_exit(void) 1226 { 1227 int i; 1228 1229 for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 1230 platform_device_unregister(cxl_mem_single[i]); 1231 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 1232 platform_device_unregister(cxl_swd_single[i]); 1233 for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 1234 platform_device_unregister(cxl_swu_single[i]); 1235 for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 1236 platform_device_unregister(cxl_root_single[i]); 1237 for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 1238 struct platform_device *pdev = cxl_hb_single[i]; 1239 1240 if (!pdev) 1241 continue; 1242 sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 1243 platform_device_unregister(cxl_hb_single[i]); 1244 } 1245 } 1246 1247 static __init int cxl_test_init(void) 1248 { 1249 int rc, i; 1250 1251 cxl_acpi_test(); 1252 cxl_core_test(); 1253 cxl_mem_test(); 1254 cxl_pmem_test(); 1255 cxl_port_test(); 1256 1257 register_cxl_mock_ops(&cxl_mock_ops); 1258 1259 cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE); 1260 if (!cxl_mock_pool) { 1261 rc = -ENOMEM; 1262 goto err_gen_pool_create; 1263 } 1264 1265 rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G, 1266 SZ_64G, NUMA_NO_NODE); 1267 if (rc) 1268 goto err_gen_pool_add; 1269 1270 if (interleave_arithmetic == 1) { 1271 cfmws_start = CFMWS_XOR_ARRAY_START; 1272 cfmws_end = CFMWS_XOR_ARRAY_END; 1273 dev_dbg(NULL, "cxl_test loading xor math option\n"); 1274 } else { 1275 cfmws_start = CFMWS_MOD_ARRAY_START; 1276 cfmws_end = CFMWS_MOD_ARRAY_END; 1277 dev_dbg(NULL, "cxl_test loading modulo math option\n"); 1278 } 1279 1280 rc = populate_cedt(); 1281 if (rc) 1282 goto err_populate; 1283 1284 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) { 1285 struct acpi_device *adev = &host_bridge[i]; 1286 struct platform_device *pdev; 1287 1288 pdev = platform_device_alloc("cxl_host_bridge", i); 1289 if (!pdev) 1290 goto err_bridge; 1291 1292 mock_companion(adev, &pdev->dev); 1293 rc = platform_device_add(pdev); 1294 if (rc) { 1295 platform_device_put(pdev); 1296 goto err_bridge; 1297 } 1298 1299 cxl_host_bridge[i] = pdev; 1300 mock_pci_bus[i].bridge = &pdev->dev; 1301 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, 1302 "physical_node"); 1303 if (rc) 1304 goto err_bridge; 1305 } 1306 1307 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) { 1308 struct platform_device *bridge = 1309 cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)]; 1310 struct platform_device *pdev; 1311 1312 pdev = platform_device_alloc("cxl_root_port", i); 1313 if (!pdev) 1314 goto err_port; 1315 pdev->dev.parent = &bridge->dev; 1316 1317 rc = platform_device_add(pdev); 1318 if (rc) { 1319 platform_device_put(pdev); 1320 goto err_port; 1321 } 1322 cxl_root_port[i] = pdev; 1323 } 1324 1325 BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port)); 1326 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) { 1327 struct platform_device *root_port = cxl_root_port[i]; 1328 struct platform_device *pdev; 1329 1330 pdev = platform_device_alloc("cxl_switch_uport", i); 1331 if (!pdev) 1332 goto err_uport; 1333 pdev->dev.parent = &root_port->dev; 1334 1335 rc = platform_device_add(pdev); 1336 if (rc) { 1337 platform_device_put(pdev); 1338 goto err_uport; 1339 } 1340 cxl_switch_uport[i] = pdev; 1341 } 1342 1343 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) { 1344 struct platform_device *uport = 1345 cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)]; 1346 struct platform_device *pdev; 1347 1348 pdev = platform_device_alloc("cxl_switch_dport", i); 1349 if (!pdev) 1350 goto err_dport; 1351 pdev->dev.parent = &uport->dev; 1352 1353 rc = platform_device_add(pdev); 1354 if (rc) { 1355 platform_device_put(pdev); 1356 goto err_dport; 1357 } 1358 cxl_switch_dport[i] = pdev; 1359 } 1360 1361 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) { 1362 struct platform_device *dport = cxl_switch_dport[i]; 1363 struct platform_device *pdev; 1364 1365 pdev = platform_device_alloc("cxl_mem", i); 1366 if (!pdev) 1367 goto err_mem; 1368 pdev->dev.parent = &dport->dev; 1369 set_dev_node(&pdev->dev, i % 2); 1370 1371 rc = platform_device_add(pdev); 1372 if (rc) { 1373 platform_device_put(pdev); 1374 goto err_mem; 1375 } 1376 cxl_mem[i] = pdev; 1377 } 1378 1379 rc = cxl_single_init(); 1380 if (rc) 1381 goto err_mem; 1382 1383 rc = cxl_rch_init(); 1384 if (rc) 1385 goto err_single; 1386 1387 cxl_acpi = platform_device_alloc("cxl_acpi", 0); 1388 if (!cxl_acpi) 1389 goto err_rch; 1390 1391 mock_companion(&acpi0017_mock, &cxl_acpi->dev); 1392 acpi0017_mock.dev.bus = &platform_bus_type; 1393 1394 rc = platform_device_add(cxl_acpi); 1395 if (rc) 1396 goto err_add; 1397 1398 return 0; 1399 1400 err_add: 1401 platform_device_put(cxl_acpi); 1402 err_rch: 1403 cxl_rch_exit(); 1404 err_single: 1405 cxl_single_exit(); 1406 err_mem: 1407 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1408 platform_device_unregister(cxl_mem[i]); 1409 err_dport: 1410 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--) 1411 platform_device_unregister(cxl_switch_dport[i]); 1412 err_uport: 1413 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--) 1414 platform_device_unregister(cxl_switch_uport[i]); 1415 err_port: 1416 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--) 1417 platform_device_unregister(cxl_root_port[i]); 1418 err_bridge: 1419 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) { 1420 struct platform_device *pdev = cxl_host_bridge[i]; 1421 1422 if (!pdev) 1423 continue; 1424 sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 1425 platform_device_unregister(cxl_host_bridge[i]); 1426 } 1427 err_populate: 1428 depopulate_all_mock_resources(); 1429 err_gen_pool_add: 1430 gen_pool_destroy(cxl_mock_pool); 1431 err_gen_pool_create: 1432 unregister_cxl_mock_ops(&cxl_mock_ops); 1433 return rc; 1434 } 1435 1436 static __exit void cxl_test_exit(void) 1437 { 1438 int i; 1439 1440 platform_device_unregister(cxl_acpi); 1441 cxl_rch_exit(); 1442 cxl_single_exit(); 1443 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1444 platform_device_unregister(cxl_mem[i]); 1445 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--) 1446 platform_device_unregister(cxl_switch_dport[i]); 1447 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--) 1448 platform_device_unregister(cxl_switch_uport[i]); 1449 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--) 1450 platform_device_unregister(cxl_root_port[i]); 1451 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) { 1452 struct platform_device *pdev = cxl_host_bridge[i]; 1453 1454 if (!pdev) 1455 continue; 1456 sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 1457 platform_device_unregister(cxl_host_bridge[i]); 1458 } 1459 depopulate_all_mock_resources(); 1460 gen_pool_destroy(cxl_mock_pool); 1461 unregister_cxl_mock_ops(&cxl_mock_ops); 1462 } 1463 1464 module_param(interleave_arithmetic, int, 0000); 1465 MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1"); 1466 module_init(cxl_test_init); 1467 module_exit(cxl_test_exit); 1468 MODULE_LICENSE("GPL v2"); 1469 MODULE_IMPORT_NS(ACPI); 1470 MODULE_IMPORT_NS(CXL); 1471