1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ 3 #include <linux/libnvdimm.h> 4 #include <asm/unaligned.h> 5 #include <linux/device.h> 6 #include <linux/module.h> 7 #include <linux/ndctl.h> 8 #include <linux/async.h> 9 #include <linux/slab.h> 10 #include <linux/nd.h> 11 #include "cxlmem.h" 12 #include "cxl.h" 13 14 extern const struct nvdimm_security_ops *cxl_security_ops; 15 16 /* 17 * Ordered workqueue for cxl nvdimm device arrival and departure 18 * to coordinate bus rescans when a bridge arrives and trigger remove 19 * operations when the bridge is removed. 20 */ 21 static struct workqueue_struct *cxl_pmem_wq; 22 23 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); 24 25 static void clear_exclusive(void *cxlds) 26 { 27 clear_exclusive_cxl_commands(cxlds, exclusive_cmds); 28 } 29 30 static void unregister_nvdimm(void *nvdimm) 31 { 32 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 33 struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge; 34 struct cxl_pmem_region *cxlr_pmem; 35 unsigned long index; 36 37 device_lock(&cxl_nvb->dev); 38 dev_set_drvdata(&cxl_nvd->dev, NULL); 39 xa_for_each(&cxl_nvd->pmem_regions, index, cxlr_pmem) { 40 get_device(&cxlr_pmem->dev); 41 device_unlock(&cxl_nvb->dev); 42 43 device_release_driver(&cxlr_pmem->dev); 44 put_device(&cxlr_pmem->dev); 45 46 device_lock(&cxl_nvb->dev); 47 } 48 device_unlock(&cxl_nvb->dev); 49 50 nvdimm_delete(nvdimm); 51 cxl_nvd->bridge = NULL; 52 } 53 54 static ssize_t provider_show(struct device *dev, struct device_attribute *attr, char *buf) 55 { 56 struct nvdimm *nvdimm = to_nvdimm(dev); 57 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 58 59 return sysfs_emit(buf, "%s\n", dev_name(&cxl_nvd->dev)); 60 } 61 static DEVICE_ATTR_RO(provider); 62 63 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 64 { 65 struct nvdimm *nvdimm = to_nvdimm(dev); 66 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 67 struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds; 68 69 return sysfs_emit(buf, "%lld\n", cxlds->serial); 70 } 71 static DEVICE_ATTR_RO(id); 72 73 static struct attribute *cxl_dimm_attributes[] = { 74 &dev_attr_id.attr, 75 &dev_attr_provider.attr, 76 NULL 77 }; 78 79 static const struct attribute_group cxl_dimm_attribute_group = { 80 .name = "cxl", 81 .attrs = cxl_dimm_attributes, 82 }; 83 84 static const struct attribute_group *cxl_dimm_attribute_groups[] = { 85 &cxl_dimm_attribute_group, 86 NULL 87 }; 88 89 static int cxl_nvdimm_probe(struct device *dev) 90 { 91 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); 92 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 93 unsigned long flags = 0, cmd_mask = 0; 94 struct cxl_dev_state *cxlds = cxlmd->cxlds; 95 struct cxl_nvdimm_bridge *cxl_nvb; 96 struct nvdimm *nvdimm; 97 int rc; 98 99 cxl_nvb = cxl_find_nvdimm_bridge(dev); 100 if (!cxl_nvb) 101 return -ENXIO; 102 103 device_lock(&cxl_nvb->dev); 104 if (!cxl_nvb->nvdimm_bus) { 105 rc = -ENXIO; 106 goto out; 107 } 108 109 set_exclusive_cxl_commands(cxlds, exclusive_cmds); 110 rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds); 111 if (rc) 112 goto out; 113 114 set_bit(NDD_LABELING, &flags); 115 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 116 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 117 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); 118 nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, 119 cxl_dimm_attribute_groups, flags, 120 cmd_mask, 0, NULL, NULL, cxl_security_ops, NULL); 121 if (!nvdimm) { 122 rc = -ENOMEM; 123 goto out; 124 } 125 126 dev_set_drvdata(dev, nvdimm); 127 cxl_nvd->bridge = cxl_nvb; 128 rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm); 129 out: 130 device_unlock(&cxl_nvb->dev); 131 put_device(&cxl_nvb->dev); 132 133 return rc; 134 } 135 136 static struct cxl_driver cxl_nvdimm_driver = { 137 .name = "cxl_nvdimm", 138 .probe = cxl_nvdimm_probe, 139 .id = CXL_DEVICE_NVDIMM, 140 }; 141 142 static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds, 143 struct nd_cmd_get_config_size *cmd, 144 unsigned int buf_len) 145 { 146 if (sizeof(*cmd) > buf_len) 147 return -EINVAL; 148 149 *cmd = (struct nd_cmd_get_config_size) { 150 .config_size = cxlds->lsa_size, 151 .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa), 152 }; 153 154 return 0; 155 } 156 157 static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds, 158 struct nd_cmd_get_config_data_hdr *cmd, 159 unsigned int buf_len) 160 { 161 struct cxl_mbox_get_lsa get_lsa; 162 int rc; 163 164 if (sizeof(*cmd) > buf_len) 165 return -EINVAL; 166 if (struct_size(cmd, out_buf, cmd->in_length) > buf_len) 167 return -EINVAL; 168 169 get_lsa = (struct cxl_mbox_get_lsa) { 170 .offset = cpu_to_le32(cmd->in_offset), 171 .length = cpu_to_le32(cmd->in_length), 172 }; 173 174 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa, 175 sizeof(get_lsa), cmd->out_buf, cmd->in_length); 176 cmd->status = 0; 177 178 return rc; 179 } 180 181 static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds, 182 struct nd_cmd_set_config_hdr *cmd, 183 unsigned int buf_len) 184 { 185 struct cxl_mbox_set_lsa *set_lsa; 186 int rc; 187 188 if (sizeof(*cmd) > buf_len) 189 return -EINVAL; 190 191 /* 4-byte status follows the input data in the payload */ 192 if (size_add(struct_size(cmd, in_buf, cmd->in_length), 4) > buf_len) 193 return -EINVAL; 194 195 set_lsa = 196 kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL); 197 if (!set_lsa) 198 return -ENOMEM; 199 200 *set_lsa = (struct cxl_mbox_set_lsa) { 201 .offset = cpu_to_le32(cmd->in_offset), 202 }; 203 memcpy(set_lsa->data, cmd->in_buf, cmd->in_length); 204 205 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa, 206 struct_size(set_lsa, data, cmd->in_length), 207 NULL, 0); 208 209 /* 210 * Set "firmware" status (4-packed bytes at the end of the input 211 * payload. 212 */ 213 put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]); 214 kvfree(set_lsa); 215 216 return rc; 217 } 218 219 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, 220 void *buf, unsigned int buf_len) 221 { 222 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 223 unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm); 224 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 225 struct cxl_dev_state *cxlds = cxlmd->cxlds; 226 227 if (!test_bit(cmd, &cmd_mask)) 228 return -ENOTTY; 229 230 switch (cmd) { 231 case ND_CMD_GET_CONFIG_SIZE: 232 return cxl_pmem_get_config_size(cxlds, buf, buf_len); 233 case ND_CMD_GET_CONFIG_DATA: 234 return cxl_pmem_get_config_data(cxlds, buf, buf_len); 235 case ND_CMD_SET_CONFIG_DATA: 236 return cxl_pmem_set_config_data(cxlds, buf, buf_len); 237 default: 238 return -ENOTTY; 239 } 240 } 241 242 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc, 243 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 244 unsigned int buf_len, int *cmd_rc) 245 { 246 /* 247 * No firmware response to translate, let the transport error 248 * code take precedence. 249 */ 250 *cmd_rc = 0; 251 252 if (!nvdimm) 253 return -ENOTTY; 254 return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len); 255 } 256 257 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb) 258 { 259 if (cxl_nvb->nvdimm_bus) 260 return true; 261 cxl_nvb->nvdimm_bus = 262 nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc); 263 return cxl_nvb->nvdimm_bus != NULL; 264 } 265 266 static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb) 267 { 268 struct cxl_nvdimm *cxl_nvd; 269 270 if (!is_cxl_nvdimm(dev)) 271 return 0; 272 273 cxl_nvd = to_cxl_nvdimm(dev); 274 if (cxl_nvd->bridge != cxl_nvb) 275 return 0; 276 277 device_release_driver(dev); 278 return 0; 279 } 280 281 static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb) 282 { 283 struct cxl_pmem_region *cxlr_pmem; 284 285 if (!is_cxl_pmem_region(dev)) 286 return 0; 287 288 cxlr_pmem = to_cxl_pmem_region(dev); 289 if (cxlr_pmem->bridge != cxl_nvb) 290 return 0; 291 292 device_release_driver(dev); 293 return 0; 294 } 295 296 static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb, 297 struct nvdimm_bus *nvdimm_bus) 298 { 299 if (!nvdimm_bus) 300 return; 301 302 /* 303 * Set the state of cxl_nvdimm devices to unbound / idle before 304 * nvdimm_bus_unregister() rips the nvdimm objects out from 305 * underneath them. 306 */ 307 bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb, 308 cxl_pmem_region_release_driver); 309 bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb, 310 cxl_nvdimm_release_driver); 311 nvdimm_bus_unregister(nvdimm_bus); 312 } 313 314 static void cxl_nvb_update_state(struct work_struct *work) 315 { 316 struct cxl_nvdimm_bridge *cxl_nvb = 317 container_of(work, typeof(*cxl_nvb), state_work); 318 struct nvdimm_bus *victim_bus = NULL; 319 bool release = false, rescan = false; 320 321 device_lock(&cxl_nvb->dev); 322 switch (cxl_nvb->state) { 323 case CXL_NVB_ONLINE: 324 if (!online_nvdimm_bus(cxl_nvb)) { 325 dev_err(&cxl_nvb->dev, 326 "failed to establish nvdimm bus\n"); 327 release = true; 328 } else 329 rescan = true; 330 break; 331 case CXL_NVB_OFFLINE: 332 case CXL_NVB_DEAD: 333 victim_bus = cxl_nvb->nvdimm_bus; 334 cxl_nvb->nvdimm_bus = NULL; 335 break; 336 default: 337 break; 338 } 339 device_unlock(&cxl_nvb->dev); 340 341 if (release) 342 device_release_driver(&cxl_nvb->dev); 343 if (rescan) { 344 int rc = bus_rescan_devices(&cxl_bus_type); 345 346 dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc); 347 } 348 offline_nvdimm_bus(cxl_nvb, victim_bus); 349 350 put_device(&cxl_nvb->dev); 351 } 352 353 static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb) 354 { 355 /* 356 * Take a reference that the workqueue will drop if new work 357 * gets queued. 358 */ 359 get_device(&cxl_nvb->dev); 360 if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work)) 361 put_device(&cxl_nvb->dev); 362 } 363 364 static void cxl_nvdimm_bridge_remove(struct device *dev) 365 { 366 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); 367 368 if (cxl_nvb->state == CXL_NVB_ONLINE) 369 cxl_nvb->state = CXL_NVB_OFFLINE; 370 cxl_nvdimm_bridge_state_work(cxl_nvb); 371 } 372 373 static int cxl_nvdimm_bridge_probe(struct device *dev) 374 { 375 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); 376 377 if (cxl_nvb->state == CXL_NVB_DEAD) 378 return -ENXIO; 379 380 if (cxl_nvb->state == CXL_NVB_NEW) { 381 cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) { 382 .provider_name = "CXL", 383 .module = THIS_MODULE, 384 .ndctl = cxl_pmem_ctl, 385 }; 386 387 INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state); 388 } 389 390 cxl_nvb->state = CXL_NVB_ONLINE; 391 cxl_nvdimm_bridge_state_work(cxl_nvb); 392 393 return 0; 394 } 395 396 static struct cxl_driver cxl_nvdimm_bridge_driver = { 397 .name = "cxl_nvdimm_bridge", 398 .probe = cxl_nvdimm_bridge_probe, 399 .remove = cxl_nvdimm_bridge_remove, 400 .id = CXL_DEVICE_NVDIMM_BRIDGE, 401 }; 402 403 static int match_cxl_nvdimm(struct device *dev, void *data) 404 { 405 return is_cxl_nvdimm(dev); 406 } 407 408 static void unregister_nvdimm_region(void *nd_region) 409 { 410 nvdimm_region_delete(nd_region); 411 } 412 413 static int cxl_nvdimm_add_region(struct cxl_nvdimm *cxl_nvd, 414 struct cxl_pmem_region *cxlr_pmem) 415 { 416 int rc; 417 418 rc = xa_insert(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem, 419 cxlr_pmem, GFP_KERNEL); 420 if (rc) 421 return rc; 422 423 get_device(&cxlr_pmem->dev); 424 return 0; 425 } 426 427 static void cxl_nvdimm_del_region(struct cxl_nvdimm *cxl_nvd, 428 struct cxl_pmem_region *cxlr_pmem) 429 { 430 /* 431 * It is possible this is called without a corresponding 432 * cxl_nvdimm_add_region for @cxlr_pmem 433 */ 434 cxlr_pmem = xa_erase(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem); 435 if (cxlr_pmem) 436 put_device(&cxlr_pmem->dev); 437 } 438 439 static void release_mappings(void *data) 440 { 441 int i; 442 struct cxl_pmem_region *cxlr_pmem = data; 443 struct cxl_nvdimm_bridge *cxl_nvb = cxlr_pmem->bridge; 444 445 device_lock(&cxl_nvb->dev); 446 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { 447 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; 448 struct cxl_nvdimm *cxl_nvd = m->cxl_nvd; 449 450 cxl_nvdimm_del_region(cxl_nvd, cxlr_pmem); 451 } 452 device_unlock(&cxl_nvb->dev); 453 } 454 455 static void cxlr_pmem_remove_resource(void *res) 456 { 457 remove_resource(res); 458 } 459 460 struct cxl_pmem_region_info { 461 u64 offset; 462 u64 serial; 463 }; 464 465 static int cxl_pmem_region_probe(struct device *dev) 466 { 467 struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE]; 468 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev); 469 struct cxl_region *cxlr = cxlr_pmem->cxlr; 470 struct cxl_pmem_region_info *info = NULL; 471 struct cxl_nvdimm_bridge *cxl_nvb; 472 struct nd_interleave_set *nd_set; 473 struct nd_region_desc ndr_desc; 474 struct cxl_nvdimm *cxl_nvd; 475 struct nvdimm *nvdimm; 476 struct resource *res; 477 int rc, i = 0; 478 479 cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev); 480 if (!cxl_nvb) { 481 dev_dbg(dev, "bridge not found\n"); 482 return -ENXIO; 483 } 484 cxlr_pmem->bridge = cxl_nvb; 485 486 device_lock(&cxl_nvb->dev); 487 if (!cxl_nvb->nvdimm_bus) { 488 dev_dbg(dev, "nvdimm bus not found\n"); 489 rc = -ENXIO; 490 goto out_nvb; 491 } 492 493 memset(&mappings, 0, sizeof(mappings)); 494 memset(&ndr_desc, 0, sizeof(ndr_desc)); 495 496 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); 497 if (!res) { 498 rc = -ENOMEM; 499 goto out_nvb; 500 } 501 502 res->name = "Persistent Memory"; 503 res->start = cxlr_pmem->hpa_range.start; 504 res->end = cxlr_pmem->hpa_range.end; 505 res->flags = IORESOURCE_MEM; 506 res->desc = IORES_DESC_PERSISTENT_MEMORY; 507 508 rc = insert_resource(&iomem_resource, res); 509 if (rc) 510 goto out_nvb; 511 512 rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res); 513 if (rc) 514 goto out_nvb; 515 516 ndr_desc.res = res; 517 ndr_desc.provider_data = cxlr_pmem; 518 519 ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start); 520 ndr_desc.target_node = phys_to_target_node(res->start); 521 if (ndr_desc.target_node == NUMA_NO_NODE) { 522 ndr_desc.target_node = ndr_desc.numa_node; 523 dev_dbg(&cxlr->dev, "changing target node from %d to %d", 524 NUMA_NO_NODE, ndr_desc.target_node); 525 } 526 527 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 528 if (!nd_set) { 529 rc = -ENOMEM; 530 goto out_nvb; 531 } 532 533 ndr_desc.memregion = cxlr->id; 534 set_bit(ND_REGION_CXL, &ndr_desc.flags); 535 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags); 536 537 info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL); 538 if (!info) { 539 rc = -ENOMEM; 540 goto out_nvb; 541 } 542 543 rc = devm_add_action_or_reset(dev, release_mappings, cxlr_pmem); 544 if (rc) 545 goto out_nvd; 546 547 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { 548 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; 549 struct cxl_memdev *cxlmd = m->cxlmd; 550 struct cxl_dev_state *cxlds = cxlmd->cxlds; 551 struct device *d; 552 553 d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm); 554 if (!d) { 555 dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i, 556 dev_name(&cxlmd->dev)); 557 rc = -ENODEV; 558 goto out_nvd; 559 } 560 561 /* safe to drop ref now with bridge lock held */ 562 put_device(d); 563 564 cxl_nvd = to_cxl_nvdimm(d); 565 nvdimm = dev_get_drvdata(&cxl_nvd->dev); 566 if (!nvdimm) { 567 dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i, 568 dev_name(&cxlmd->dev)); 569 rc = -ENODEV; 570 goto out_nvd; 571 } 572 573 /* 574 * Pin the region per nvdimm device as those may be released 575 * out-of-order with respect to the region, and a single nvdimm 576 * maybe associated with multiple regions 577 */ 578 rc = cxl_nvdimm_add_region(cxl_nvd, cxlr_pmem); 579 if (rc) 580 goto out_nvd; 581 m->cxl_nvd = cxl_nvd; 582 mappings[i] = (struct nd_mapping_desc) { 583 .nvdimm = nvdimm, 584 .start = m->start, 585 .size = m->size, 586 .position = i, 587 }; 588 info[i].offset = m->start; 589 info[i].serial = cxlds->serial; 590 } 591 ndr_desc.num_mappings = cxlr_pmem->nr_mappings; 592 ndr_desc.mapping = mappings; 593 594 /* 595 * TODO enable CXL labels which skip the need for 'interleave-set cookie' 596 */ 597 nd_set->cookie1 = 598 nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0); 599 nd_set->cookie2 = nd_set->cookie1; 600 ndr_desc.nd_set = nd_set; 601 602 cxlr_pmem->nd_region = 603 nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc); 604 if (!cxlr_pmem->nd_region) { 605 rc = -ENOMEM; 606 goto out_nvd; 607 } 608 609 rc = devm_add_action_or_reset(dev, unregister_nvdimm_region, 610 cxlr_pmem->nd_region); 611 out_nvd: 612 kfree(info); 613 out_nvb: 614 device_unlock(&cxl_nvb->dev); 615 put_device(&cxl_nvb->dev); 616 617 return rc; 618 } 619 620 static struct cxl_driver cxl_pmem_region_driver = { 621 .name = "cxl_pmem_region", 622 .probe = cxl_pmem_region_probe, 623 .id = CXL_DEVICE_PMEM_REGION, 624 }; 625 626 /* 627 * Return all bridges to the CXL_NVB_NEW state to invalidate any 628 * ->state_work referring to the now destroyed cxl_pmem_wq. 629 */ 630 static int cxl_nvdimm_bridge_reset(struct device *dev, void *data) 631 { 632 struct cxl_nvdimm_bridge *cxl_nvb; 633 634 if (!is_cxl_nvdimm_bridge(dev)) 635 return 0; 636 637 cxl_nvb = to_cxl_nvdimm_bridge(dev); 638 device_lock(dev); 639 cxl_nvb->state = CXL_NVB_NEW; 640 device_unlock(dev); 641 642 return 0; 643 } 644 645 static void destroy_cxl_pmem_wq(void) 646 { 647 destroy_workqueue(cxl_pmem_wq); 648 bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset); 649 } 650 651 static __init int cxl_pmem_init(void) 652 { 653 int rc; 654 655 set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds); 656 set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds); 657 658 cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0); 659 if (!cxl_pmem_wq) 660 return -ENXIO; 661 662 rc = cxl_driver_register(&cxl_nvdimm_bridge_driver); 663 if (rc) 664 goto err_bridge; 665 666 rc = cxl_driver_register(&cxl_nvdimm_driver); 667 if (rc) 668 goto err_nvdimm; 669 670 rc = cxl_driver_register(&cxl_pmem_region_driver); 671 if (rc) 672 goto err_region; 673 674 return 0; 675 676 err_region: 677 cxl_driver_unregister(&cxl_nvdimm_driver); 678 err_nvdimm: 679 cxl_driver_unregister(&cxl_nvdimm_bridge_driver); 680 err_bridge: 681 destroy_cxl_pmem_wq(); 682 return rc; 683 } 684 685 static __exit void cxl_pmem_exit(void) 686 { 687 cxl_driver_unregister(&cxl_pmem_region_driver); 688 cxl_driver_unregister(&cxl_nvdimm_driver); 689 cxl_driver_unregister(&cxl_nvdimm_bridge_driver); 690 destroy_cxl_pmem_wq(); 691 } 692 693 MODULE_LICENSE("GPL v2"); 694 module_init(cxl_pmem_init); 695 module_exit(cxl_pmem_exit); 696 MODULE_IMPORT_NS(CXL); 697 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE); 698 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM); 699 MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION); 700