1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/io-64-nonatomic-lo-hi.h> 4 #include <linux/moduleparam.h> 5 #include <linux/module.h> 6 #include <linux/delay.h> 7 #include <linux/sizes.h> 8 #include <linux/mutex.h> 9 #include <linux/list.h> 10 #include <linux/pci.h> 11 #include <linux/io.h> 12 #include "cxlmem.h" 13 #include "cxlpci.h" 14 #include "cxl.h" 15 16 /** 17 * DOC: cxl pci 18 * 19 * This implements the PCI exclusive functionality for a CXL device as it is 20 * defined by the Compute Express Link specification. CXL devices may surface 21 * certain functionality even if it isn't CXL enabled. While this driver is 22 * focused around the PCI specific aspects of a CXL device, it binds to the 23 * specific CXL memory device class code, and therefore the implementation of 24 * cxl_pci is focused around CXL memory devices. 25 * 26 * The driver has several responsibilities, mainly: 27 * - Create the memX device and register on the CXL bus. 28 * - Enumerate device's register interface and map them. 29 * - Registers nvdimm bridge device with cxl_core. 30 * - Registers a CXL mailbox with cxl_core. 31 */ 32 33 #define cxl_doorbell_busy(cxlds) \ 34 (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ 35 CXLDEV_MBOX_CTRL_DOORBELL) 36 37 /* CXL 2.0 - 8.2.8.4 */ 38 #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) 39 40 /* 41 * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to 42 * dictate how long to wait for the mailbox to become ready. The new 43 * field allows the device to tell software the amount of time to wait 44 * before mailbox ready. This field per the spec theoretically allows 45 * for up to 255 seconds. 255 seconds is unreasonably long, its longer 46 * than the maximum SATA port link recovery wait. Default to 60 seconds 47 * until someone builds a CXL device that needs more time in practice. 48 */ 49 static unsigned short mbox_ready_timeout = 60; 50 module_param(mbox_ready_timeout, ushort, 0644); 51 MODULE_PARM_DESC(mbox_ready_timeout, 52 "seconds to wait for mailbox ready / memory active status"); 53 54 static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds) 55 { 56 const unsigned long start = jiffies; 57 unsigned long end = start; 58 59 while (cxl_doorbell_busy(cxlds)) { 60 end = jiffies; 61 62 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { 63 /* Check again in case preempted before timeout test */ 64 if (!cxl_doorbell_busy(cxlds)) 65 break; 66 return -ETIMEDOUT; 67 } 68 cpu_relax(); 69 } 70 71 dev_dbg(cxlds->dev, "Doorbell wait took %dms", 72 jiffies_to_msecs(end) - jiffies_to_msecs(start)); 73 return 0; 74 } 75 76 #define cxl_err(dev, status, msg) \ 77 dev_err_ratelimited(dev, msg ", device state %s%s\n", \ 78 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ 79 status & CXLMDEV_FW_HALT ? " firmware-halt" : "") 80 81 #define cxl_cmd_err(dev, cmd, status, msg) \ 82 dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \ 83 (cmd)->opcode, \ 84 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ 85 status & CXLMDEV_FW_HALT ? " firmware-halt" : "") 86 87 /** 88 * __cxl_pci_mbox_send_cmd() - Execute a mailbox command 89 * @cxlds: The device state to communicate with. 90 * @mbox_cmd: Command to send to the memory device. 91 * 92 * Context: Any context. Expects mbox_mutex to be held. 93 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. 94 * Caller should check the return code in @mbox_cmd to make sure it 95 * succeeded. 96 * 97 * This is a generic form of the CXL mailbox send command thus only using the 98 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory 99 * devices, and perhaps other types of CXL devices may have further information 100 * available upon error conditions. Driver facilities wishing to send mailbox 101 * commands should use the wrapper command. 102 * 103 * The CXL spec allows for up to two mailboxes. The intention is for the primary 104 * mailbox to be OS controlled and the secondary mailbox to be used by system 105 * firmware. This allows the OS and firmware to communicate with the device and 106 * not need to coordinate with each other. The driver only uses the primary 107 * mailbox. 108 */ 109 static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds, 110 struct cxl_mbox_cmd *mbox_cmd) 111 { 112 void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; 113 struct device *dev = cxlds->dev; 114 u64 cmd_reg, status_reg; 115 size_t out_len; 116 int rc; 117 118 lockdep_assert_held(&cxlds->mbox_mutex); 119 120 /* 121 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. 122 * 1. Caller reads MB Control Register to verify doorbell is clear 123 * 2. Caller writes Command Register 124 * 3. Caller writes Command Payload Registers if input payload is non-empty 125 * 4. Caller writes MB Control Register to set doorbell 126 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured 127 * 6. Caller reads MB Status Register to fetch Return code 128 * 7. If command successful, Caller reads Command Register to get Payload Length 129 * 8. If output payload is non-empty, host reads Command Payload Registers 130 * 131 * Hardware is free to do whatever it wants before the doorbell is rung, 132 * and isn't allowed to change anything after it clears the doorbell. As 133 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can 134 * also happen in any order (though some orders might not make sense). 135 */ 136 137 /* #1 */ 138 if (cxl_doorbell_busy(cxlds)) { 139 u64 md_status = 140 readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 141 142 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, 143 "mailbox queue busy"); 144 return -EBUSY; 145 } 146 147 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, 148 mbox_cmd->opcode); 149 if (mbox_cmd->size_in) { 150 if (WARN_ON(!mbox_cmd->payload_in)) 151 return -EINVAL; 152 153 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, 154 mbox_cmd->size_in); 155 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); 156 } 157 158 /* #2, #3 */ 159 writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 160 161 /* #4 */ 162 dev_dbg(dev, "Sending command\n"); 163 writel(CXLDEV_MBOX_CTRL_DOORBELL, 164 cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); 165 166 /* #5 */ 167 rc = cxl_pci_mbox_wait_for_doorbell(cxlds); 168 if (rc == -ETIMEDOUT) { 169 u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 170 171 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout"); 172 return rc; 173 } 174 175 /* #6 */ 176 status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); 177 mbox_cmd->return_code = 178 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); 179 180 if (mbox_cmd->return_code != 0) { 181 dev_dbg(dev, "Mailbox operation had an error\n"); 182 return 0; 183 } 184 185 /* #7 */ 186 cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 187 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); 188 189 /* #8 */ 190 if (out_len && mbox_cmd->payload_out) { 191 /* 192 * Sanitize the copy. If hardware misbehaves, out_len per the 193 * spec can actually be greater than the max allowed size (21 194 * bits available but spec defined 1M max). The caller also may 195 * have requested less data than the hardware supplied even 196 * within spec. 197 */ 198 size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len); 199 200 memcpy_fromio(mbox_cmd->payload_out, payload, n); 201 mbox_cmd->size_out = n; 202 } else { 203 mbox_cmd->size_out = 0; 204 } 205 206 return 0; 207 } 208 209 static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 210 { 211 int rc; 212 213 mutex_lock_io(&cxlds->mbox_mutex); 214 rc = __cxl_pci_mbox_send_cmd(cxlds, cmd); 215 mutex_unlock(&cxlds->mbox_mutex); 216 217 return rc; 218 } 219 220 static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds) 221 { 222 const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); 223 unsigned long timeout; 224 u64 md_status; 225 226 timeout = jiffies + mbox_ready_timeout * HZ; 227 do { 228 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 229 if (md_status & CXLMDEV_MBOX_IF_READY) 230 break; 231 if (msleep_interruptible(100)) 232 break; 233 } while (!time_after(jiffies, timeout)); 234 235 if (!(md_status & CXLMDEV_MBOX_IF_READY)) { 236 cxl_err(cxlds->dev, md_status, 237 "timeout awaiting mailbox ready"); 238 return -ETIMEDOUT; 239 } 240 241 /* 242 * A command may be in flight from a previous driver instance, 243 * think kexec, do one doorbell wait so that 244 * __cxl_pci_mbox_send_cmd() can assume that it is the only 245 * source for future doorbell busy events. 246 */ 247 if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) { 248 cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle"); 249 return -ETIMEDOUT; 250 } 251 252 cxlds->mbox_send = cxl_pci_mbox_send; 253 cxlds->payload_size = 254 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); 255 256 /* 257 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register 258 * 259 * If the size is too small, mandatory commands will not work and so 260 * there's no point in going forward. If the size is too large, there's 261 * no harm is soft limiting it. 262 */ 263 cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M); 264 if (cxlds->payload_size < 256) { 265 dev_err(cxlds->dev, "Mailbox is too small (%zub)", 266 cxlds->payload_size); 267 return -ENXIO; 268 } 269 270 dev_dbg(cxlds->dev, "Mailbox payload sized %zu", 271 cxlds->payload_size); 272 273 return 0; 274 } 275 276 static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map) 277 { 278 void __iomem *addr; 279 int bar = map->barno; 280 struct device *dev = &pdev->dev; 281 resource_size_t offset = map->block_offset; 282 283 /* Basic sanity check that BAR is big enough */ 284 if (pci_resource_len(pdev, bar) < offset) { 285 dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar, 286 &pdev->resource[bar], &offset); 287 return -ENXIO; 288 } 289 290 addr = pci_iomap(pdev, bar, 0); 291 if (!addr) { 292 dev_err(dev, "failed to map registers\n"); 293 return -ENOMEM; 294 } 295 296 dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n", 297 bar, &offset); 298 299 map->base = addr + map->block_offset; 300 return 0; 301 } 302 303 static void cxl_unmap_regblock(struct pci_dev *pdev, 304 struct cxl_register_map *map) 305 { 306 pci_iounmap(pdev, map->base - map->block_offset); 307 map->base = NULL; 308 } 309 310 static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map) 311 { 312 struct cxl_component_reg_map *comp_map; 313 struct cxl_device_reg_map *dev_map; 314 struct device *dev = &pdev->dev; 315 void __iomem *base = map->base; 316 317 switch (map->reg_type) { 318 case CXL_REGLOC_RBI_COMPONENT: 319 comp_map = &map->component_map; 320 cxl_probe_component_regs(dev, base, comp_map); 321 if (!comp_map->hdm_decoder.valid) { 322 dev_err(dev, "HDM decoder registers not found\n"); 323 return -ENXIO; 324 } 325 326 dev_dbg(dev, "Set up component registers\n"); 327 break; 328 case CXL_REGLOC_RBI_MEMDEV: 329 dev_map = &map->device_map; 330 cxl_probe_device_regs(dev, base, dev_map); 331 if (!dev_map->status.valid || !dev_map->mbox.valid || 332 !dev_map->memdev.valid) { 333 dev_err(dev, "registers not found: %s%s%s\n", 334 !dev_map->status.valid ? "status " : "", 335 !dev_map->mbox.valid ? "mbox " : "", 336 !dev_map->memdev.valid ? "memdev " : ""); 337 return -ENXIO; 338 } 339 340 dev_dbg(dev, "Probing device registers...\n"); 341 break; 342 default: 343 break; 344 } 345 346 return 0; 347 } 348 349 static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map) 350 { 351 struct device *dev = cxlds->dev; 352 struct pci_dev *pdev = to_pci_dev(dev); 353 354 switch (map->reg_type) { 355 case CXL_REGLOC_RBI_COMPONENT: 356 cxl_map_component_regs(pdev, &cxlds->regs.component, map); 357 dev_dbg(dev, "Mapping component registers...\n"); 358 break; 359 case CXL_REGLOC_RBI_MEMDEV: 360 cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map); 361 dev_dbg(dev, "Probing device registers...\n"); 362 break; 363 default: 364 break; 365 } 366 367 return 0; 368 } 369 370 static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, 371 struct cxl_register_map *map) 372 { 373 int rc; 374 375 rc = cxl_find_regblock(pdev, type, map); 376 if (rc) 377 return rc; 378 379 rc = cxl_map_regblock(pdev, map); 380 if (rc) 381 return rc; 382 383 rc = cxl_probe_regs(pdev, map); 384 cxl_unmap_regblock(pdev, map); 385 386 return rc; 387 } 388 389 static int wait_for_valid(struct cxl_dev_state *cxlds) 390 { 391 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 392 int d = cxlds->cxl_dvsec, rc; 393 u32 val; 394 395 /* 396 * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high 397 * and Size Low registers are valid. Must be set within 1 second of 398 * deassertion of reset to CXL device. Likely it is already set by the 399 * time this runs, but otherwise give a 1.5 second timeout in case of 400 * clock skew. 401 */ 402 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); 403 if (rc) 404 return rc; 405 406 if (val & CXL_DVSEC_MEM_INFO_VALID) 407 return 0; 408 409 msleep(1500); 410 411 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); 412 if (rc) 413 return rc; 414 415 if (val & CXL_DVSEC_MEM_INFO_VALID) 416 return 0; 417 418 return -ETIMEDOUT; 419 } 420 421 /* 422 * Wait up to @mbox_ready_timeout for the device to report memory 423 * active. 424 */ 425 static int wait_for_media_ready(struct cxl_dev_state *cxlds) 426 { 427 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 428 int d = cxlds->cxl_dvsec; 429 bool active = false; 430 u64 md_status; 431 int rc, i; 432 433 rc = wait_for_valid(cxlds); 434 if (rc) 435 return rc; 436 437 for (i = mbox_ready_timeout; i; i--) { 438 u32 temp; 439 int rc; 440 441 rc = pci_read_config_dword( 442 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp); 443 if (rc) 444 return rc; 445 446 active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp); 447 if (active) 448 break; 449 msleep(1000); 450 } 451 452 if (!active) { 453 dev_err(&pdev->dev, 454 "timeout awaiting memory active after %d seconds\n", 455 mbox_ready_timeout); 456 return -ETIMEDOUT; 457 } 458 459 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 460 if (!CXLMDEV_READY(md_status)) 461 return -EIO; 462 463 return 0; 464 } 465 466 static int cxl_dvsec_ranges(struct cxl_dev_state *cxlds) 467 { 468 struct cxl_endpoint_dvsec_info *info = &cxlds->info; 469 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 470 int d = cxlds->cxl_dvsec; 471 int hdm_count, rc, i; 472 u16 cap, ctrl; 473 474 if (!d) 475 return -ENXIO; 476 477 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap); 478 if (rc) 479 return rc; 480 481 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); 482 if (rc) 483 return rc; 484 485 if (!(cap & CXL_DVSEC_MEM_CAPABLE)) 486 return -ENXIO; 487 488 /* 489 * It is not allowed by spec for MEM.capable to be set and have 0 legacy 490 * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this 491 * driver is for a spec defined class code which must be CXL.mem 492 * capable, there is no point in continuing to enable CXL.mem. 493 */ 494 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap); 495 if (!hdm_count || hdm_count > 2) 496 return -EINVAL; 497 498 rc = wait_for_valid(cxlds); 499 if (rc) 500 return rc; 501 502 info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl); 503 504 for (i = 0; i < hdm_count; i++) { 505 u64 base, size; 506 u32 temp; 507 508 rc = pci_read_config_dword( 509 pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp); 510 if (rc) 511 return rc; 512 513 size = (u64)temp << 32; 514 515 rc = pci_read_config_dword( 516 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp); 517 if (rc) 518 return rc; 519 520 size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; 521 522 rc = pci_read_config_dword( 523 pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp); 524 if (rc) 525 return rc; 526 527 base = (u64)temp << 32; 528 529 rc = pci_read_config_dword( 530 pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp); 531 if (rc) 532 return rc; 533 534 base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK; 535 536 info->dvsec_range[i] = (struct range) { 537 .start = base, 538 .end = base + size - 1 539 }; 540 541 if (size) 542 info->ranges++; 543 } 544 545 return 0; 546 } 547 548 static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 549 { 550 struct cxl_register_map map; 551 struct cxl_memdev *cxlmd; 552 struct cxl_dev_state *cxlds; 553 int rc; 554 555 /* 556 * Double check the anonymous union trickery in struct cxl_regs 557 * FIXME switch to struct_group() 558 */ 559 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != 560 offsetof(struct cxl_regs, device_regs.memdev)); 561 562 rc = pcim_enable_device(pdev); 563 if (rc) 564 return rc; 565 566 cxlds = cxl_dev_state_create(&pdev->dev); 567 if (IS_ERR(cxlds)) 568 return PTR_ERR(cxlds); 569 570 cxlds->serial = pci_get_dsn(pdev); 571 cxlds->cxl_dvsec = pci_find_dvsec_capability( 572 pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); 573 if (!cxlds->cxl_dvsec) 574 dev_warn(&pdev->dev, 575 "Device DVSEC not present, skip CXL.mem init\n"); 576 577 cxlds->wait_media_ready = wait_for_media_ready; 578 579 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); 580 if (rc) 581 return rc; 582 583 rc = cxl_map_regs(cxlds, &map); 584 if (rc) 585 return rc; 586 587 /* 588 * If the component registers can't be found, the cxl_pci driver may 589 * still be useful for management functions so don't return an error. 590 */ 591 cxlds->component_reg_phys = CXL_RESOURCE_NONE; 592 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 593 if (rc) 594 dev_warn(&pdev->dev, "No component registers (%d)\n", rc); 595 596 cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map); 597 598 rc = cxl_pci_setup_mailbox(cxlds); 599 if (rc) 600 return rc; 601 602 rc = cxl_enumerate_cmds(cxlds); 603 if (rc) 604 return rc; 605 606 rc = cxl_dev_state_identify(cxlds); 607 if (rc) 608 return rc; 609 610 rc = cxl_mem_create_range_info(cxlds); 611 if (rc) 612 return rc; 613 614 rc = cxl_dvsec_ranges(cxlds); 615 if (rc) 616 dev_warn(&pdev->dev, 617 "Failed to get DVSEC range information (%d)\n", rc); 618 619 cxlmd = devm_cxl_add_memdev(cxlds); 620 if (IS_ERR(cxlmd)) 621 return PTR_ERR(cxlmd); 622 623 if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) 624 rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); 625 626 return rc; 627 } 628 629 static const struct pci_device_id cxl_mem_pci_tbl[] = { 630 /* PCI class code for CXL.mem Type-3 Devices */ 631 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, 632 { /* terminate list */ }, 633 }; 634 MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); 635 636 static struct pci_driver cxl_pci_driver = { 637 .name = KBUILD_MODNAME, 638 .id_table = cxl_mem_pci_tbl, 639 .probe = cxl_pci_probe, 640 .driver = { 641 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 642 }, 643 }; 644 645 MODULE_LICENSE("GPL v2"); 646 module_pci_driver(cxl_pci_driver); 647 MODULE_IMPORT_NS(CXL); 648