xref: /linux/drivers/cxl/pmem.c (revision bd429e5355cd58aeb7e38b905fbecee357a6379b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <asm/unaligned.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/ndctl.h>
8 #include <linux/async.h>
9 #include <linux/slab.h>
10 #include <linux/nd.h>
11 #include "cxlmem.h"
12 #include "cxl.h"
13 
14 extern const struct nvdimm_security_ops *cxl_security_ops;
15 
16 /*
17  * Ordered workqueue for cxl nvdimm device arrival and departure
18  * to coordinate bus rescans when a bridge arrives and trigger remove
19  * operations when the bridge is removed.
20  */
21 static struct workqueue_struct *cxl_pmem_wq;
22 
23 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
24 
25 static void clear_exclusive(void *cxlds)
26 {
27 	clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
28 }
29 
30 static void unregister_nvdimm(void *nvdimm)
31 {
32 	struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
33 	struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge;
34 	struct cxl_pmem_region *cxlr_pmem;
35 	unsigned long index;
36 
37 	device_lock(&cxl_nvb->dev);
38 	dev_set_drvdata(&cxl_nvd->dev, NULL);
39 	xa_for_each(&cxl_nvd->pmem_regions, index, cxlr_pmem) {
40 		get_device(&cxlr_pmem->dev);
41 		device_unlock(&cxl_nvb->dev);
42 
43 		device_release_driver(&cxlr_pmem->dev);
44 		put_device(&cxlr_pmem->dev);
45 
46 		device_lock(&cxl_nvb->dev);
47 	}
48 	device_unlock(&cxl_nvb->dev);
49 
50 	nvdimm_delete(nvdimm);
51 	cxl_nvd->bridge = NULL;
52 }
53 
54 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
55 {
56 	struct nvdimm *nvdimm = to_nvdimm(dev);
57 	struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
58 	struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds;
59 
60 	return sysfs_emit(buf, "%lld\n", cxlds->serial);
61 }
62 static DEVICE_ATTR_RO(id);
63 
64 static struct attribute *cxl_dimm_attributes[] = {
65 	&dev_attr_id.attr,
66 	NULL
67 };
68 
69 static const struct attribute_group cxl_dimm_attribute_group = {
70 	.name = "cxl",
71 	.attrs = cxl_dimm_attributes,
72 };
73 
74 static const struct attribute_group *cxl_dimm_attribute_groups[] = {
75 	&cxl_dimm_attribute_group,
76 	NULL
77 };
78 
79 static int cxl_nvdimm_probe(struct device *dev)
80 {
81 	struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
82 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
83 	unsigned long flags = 0, cmd_mask = 0;
84 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
85 	struct cxl_nvdimm_bridge *cxl_nvb;
86 	struct nvdimm *nvdimm;
87 	int rc;
88 
89 	cxl_nvb = cxl_find_nvdimm_bridge(dev);
90 	if (!cxl_nvb)
91 		return -ENXIO;
92 
93 	device_lock(&cxl_nvb->dev);
94 	if (!cxl_nvb->nvdimm_bus) {
95 		rc = -ENXIO;
96 		goto out;
97 	}
98 
99 	set_exclusive_cxl_commands(cxlds, exclusive_cmds);
100 	rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
101 	if (rc)
102 		goto out;
103 
104 	set_bit(NDD_LABELING, &flags);
105 	set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
106 	set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
107 	set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
108 	nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
109 				 cxl_dimm_attribute_groups, flags,
110 				 cmd_mask, 0, NULL, NULL, cxl_security_ops, NULL);
111 	if (!nvdimm) {
112 		rc = -ENOMEM;
113 		goto out;
114 	}
115 
116 	dev_set_drvdata(dev, nvdimm);
117 	cxl_nvd->bridge = cxl_nvb;
118 	rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
119 out:
120 	device_unlock(&cxl_nvb->dev);
121 	put_device(&cxl_nvb->dev);
122 
123 	return rc;
124 }
125 
126 static struct cxl_driver cxl_nvdimm_driver = {
127 	.name = "cxl_nvdimm",
128 	.probe = cxl_nvdimm_probe,
129 	.id = CXL_DEVICE_NVDIMM,
130 };
131 
132 static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
133 				    struct nd_cmd_get_config_size *cmd,
134 				    unsigned int buf_len)
135 {
136 	if (sizeof(*cmd) > buf_len)
137 		return -EINVAL;
138 
139 	*cmd = (struct nd_cmd_get_config_size) {
140 		 .config_size = cxlds->lsa_size,
141 		 .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa),
142 	};
143 
144 	return 0;
145 }
146 
147 static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
148 				    struct nd_cmd_get_config_data_hdr *cmd,
149 				    unsigned int buf_len)
150 {
151 	struct cxl_mbox_get_lsa get_lsa;
152 	int rc;
153 
154 	if (sizeof(*cmd) > buf_len)
155 		return -EINVAL;
156 	if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
157 		return -EINVAL;
158 
159 	get_lsa = (struct cxl_mbox_get_lsa) {
160 		.offset = cpu_to_le32(cmd->in_offset),
161 		.length = cpu_to_le32(cmd->in_length),
162 	};
163 
164 	rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
165 			       sizeof(get_lsa), cmd->out_buf, cmd->in_length);
166 	cmd->status = 0;
167 
168 	return rc;
169 }
170 
171 static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
172 				    struct nd_cmd_set_config_hdr *cmd,
173 				    unsigned int buf_len)
174 {
175 	struct cxl_mbox_set_lsa *set_lsa;
176 	int rc;
177 
178 	if (sizeof(*cmd) > buf_len)
179 		return -EINVAL;
180 
181 	/* 4-byte status follows the input data in the payload */
182 	if (size_add(struct_size(cmd, in_buf, cmd->in_length), 4) > buf_len)
183 		return -EINVAL;
184 
185 	set_lsa =
186 		kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
187 	if (!set_lsa)
188 		return -ENOMEM;
189 
190 	*set_lsa = (struct cxl_mbox_set_lsa) {
191 		.offset = cpu_to_le32(cmd->in_offset),
192 	};
193 	memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
194 
195 	rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa,
196 			       struct_size(set_lsa, data, cmd->in_length),
197 			       NULL, 0);
198 
199 	/*
200 	 * Set "firmware" status (4-packed bytes at the end of the input
201 	 * payload.
202 	 */
203 	put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
204 	kvfree(set_lsa);
205 
206 	return rc;
207 }
208 
209 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
210 			       void *buf, unsigned int buf_len)
211 {
212 	struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
213 	unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
214 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
215 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
216 
217 	if (!test_bit(cmd, &cmd_mask))
218 		return -ENOTTY;
219 
220 	switch (cmd) {
221 	case ND_CMD_GET_CONFIG_SIZE:
222 		return cxl_pmem_get_config_size(cxlds, buf, buf_len);
223 	case ND_CMD_GET_CONFIG_DATA:
224 		return cxl_pmem_get_config_data(cxlds, buf, buf_len);
225 	case ND_CMD_SET_CONFIG_DATA:
226 		return cxl_pmem_set_config_data(cxlds, buf, buf_len);
227 	default:
228 		return -ENOTTY;
229 	}
230 }
231 
232 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
233 			struct nvdimm *nvdimm, unsigned int cmd, void *buf,
234 			unsigned int buf_len, int *cmd_rc)
235 {
236 	/*
237 	 * No firmware response to translate, let the transport error
238 	 * code take precedence.
239 	 */
240 	*cmd_rc = 0;
241 
242 	if (!nvdimm)
243 		return -ENOTTY;
244 	return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
245 }
246 
247 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
248 {
249 	if (cxl_nvb->nvdimm_bus)
250 		return true;
251 	cxl_nvb->nvdimm_bus =
252 		nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
253 	return cxl_nvb->nvdimm_bus != NULL;
254 }
255 
256 static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb)
257 {
258 	struct cxl_nvdimm *cxl_nvd;
259 
260 	if (!is_cxl_nvdimm(dev))
261 		return 0;
262 
263 	cxl_nvd = to_cxl_nvdimm(dev);
264 	if (cxl_nvd->bridge != cxl_nvb)
265 		return 0;
266 
267 	device_release_driver(dev);
268 	return 0;
269 }
270 
271 static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb)
272 {
273 	struct cxl_pmem_region *cxlr_pmem;
274 
275 	if (!is_cxl_pmem_region(dev))
276 		return 0;
277 
278 	cxlr_pmem = to_cxl_pmem_region(dev);
279 	if (cxlr_pmem->bridge != cxl_nvb)
280 		return 0;
281 
282 	device_release_driver(dev);
283 	return 0;
284 }
285 
286 static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
287 			       struct nvdimm_bus *nvdimm_bus)
288 {
289 	if (!nvdimm_bus)
290 		return;
291 
292 	/*
293 	 * Set the state of cxl_nvdimm devices to unbound / idle before
294 	 * nvdimm_bus_unregister() rips the nvdimm objects out from
295 	 * underneath them.
296 	 */
297 	bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
298 			 cxl_pmem_region_release_driver);
299 	bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
300 			 cxl_nvdimm_release_driver);
301 	nvdimm_bus_unregister(nvdimm_bus);
302 }
303 
304 static void cxl_nvb_update_state(struct work_struct *work)
305 {
306 	struct cxl_nvdimm_bridge *cxl_nvb =
307 		container_of(work, typeof(*cxl_nvb), state_work);
308 	struct nvdimm_bus *victim_bus = NULL;
309 	bool release = false, rescan = false;
310 
311 	device_lock(&cxl_nvb->dev);
312 	switch (cxl_nvb->state) {
313 	case CXL_NVB_ONLINE:
314 		if (!online_nvdimm_bus(cxl_nvb)) {
315 			dev_err(&cxl_nvb->dev,
316 				"failed to establish nvdimm bus\n");
317 			release = true;
318 		} else
319 			rescan = true;
320 		break;
321 	case CXL_NVB_OFFLINE:
322 	case CXL_NVB_DEAD:
323 		victim_bus = cxl_nvb->nvdimm_bus;
324 		cxl_nvb->nvdimm_bus = NULL;
325 		break;
326 	default:
327 		break;
328 	}
329 	device_unlock(&cxl_nvb->dev);
330 
331 	if (release)
332 		device_release_driver(&cxl_nvb->dev);
333 	if (rescan) {
334 		int rc = bus_rescan_devices(&cxl_bus_type);
335 
336 		dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
337 	}
338 	offline_nvdimm_bus(cxl_nvb, victim_bus);
339 
340 	put_device(&cxl_nvb->dev);
341 }
342 
343 static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb)
344 {
345 	/*
346 	 * Take a reference that the workqueue will drop if new work
347 	 * gets queued.
348 	 */
349 	get_device(&cxl_nvb->dev);
350 	if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
351 		put_device(&cxl_nvb->dev);
352 }
353 
354 static void cxl_nvdimm_bridge_remove(struct device *dev)
355 {
356 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
357 
358 	if (cxl_nvb->state == CXL_NVB_ONLINE)
359 		cxl_nvb->state = CXL_NVB_OFFLINE;
360 	cxl_nvdimm_bridge_state_work(cxl_nvb);
361 }
362 
363 static int cxl_nvdimm_bridge_probe(struct device *dev)
364 {
365 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
366 
367 	if (cxl_nvb->state == CXL_NVB_DEAD)
368 		return -ENXIO;
369 
370 	if (cxl_nvb->state == CXL_NVB_NEW) {
371 		cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
372 			.provider_name = "CXL",
373 			.module = THIS_MODULE,
374 			.ndctl = cxl_pmem_ctl,
375 		};
376 
377 		INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state);
378 	}
379 
380 	cxl_nvb->state = CXL_NVB_ONLINE;
381 	cxl_nvdimm_bridge_state_work(cxl_nvb);
382 
383 	return 0;
384 }
385 
386 static struct cxl_driver cxl_nvdimm_bridge_driver = {
387 	.name = "cxl_nvdimm_bridge",
388 	.probe = cxl_nvdimm_bridge_probe,
389 	.remove = cxl_nvdimm_bridge_remove,
390 	.id = CXL_DEVICE_NVDIMM_BRIDGE,
391 };
392 
393 static int match_cxl_nvdimm(struct device *dev, void *data)
394 {
395 	return is_cxl_nvdimm(dev);
396 }
397 
398 static void unregister_nvdimm_region(void *nd_region)
399 {
400 	nvdimm_region_delete(nd_region);
401 }
402 
403 static int cxl_nvdimm_add_region(struct cxl_nvdimm *cxl_nvd,
404 				 struct cxl_pmem_region *cxlr_pmem)
405 {
406 	int rc;
407 
408 	rc = xa_insert(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem,
409 		       cxlr_pmem, GFP_KERNEL);
410 	if (rc)
411 		return rc;
412 
413 	get_device(&cxlr_pmem->dev);
414 	return 0;
415 }
416 
417 static void cxl_nvdimm_del_region(struct cxl_nvdimm *cxl_nvd,
418 				  struct cxl_pmem_region *cxlr_pmem)
419 {
420 	/*
421 	 * It is possible this is called without a corresponding
422 	 * cxl_nvdimm_add_region for @cxlr_pmem
423 	 */
424 	cxlr_pmem = xa_erase(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem);
425 	if (cxlr_pmem)
426 		put_device(&cxlr_pmem->dev);
427 }
428 
429 static void release_mappings(void *data)
430 {
431 	int i;
432 	struct cxl_pmem_region *cxlr_pmem = data;
433 	struct cxl_nvdimm_bridge *cxl_nvb = cxlr_pmem->bridge;
434 
435 	device_lock(&cxl_nvb->dev);
436 	for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
437 		struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
438 		struct cxl_nvdimm *cxl_nvd = m->cxl_nvd;
439 
440 		cxl_nvdimm_del_region(cxl_nvd, cxlr_pmem);
441 	}
442 	device_unlock(&cxl_nvb->dev);
443 }
444 
445 static void cxlr_pmem_remove_resource(void *res)
446 {
447 	remove_resource(res);
448 }
449 
450 struct cxl_pmem_region_info {
451 	u64 offset;
452 	u64 serial;
453 };
454 
455 static int cxl_pmem_region_probe(struct device *dev)
456 {
457 	struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
458 	struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
459 	struct cxl_region *cxlr = cxlr_pmem->cxlr;
460 	struct cxl_pmem_region_info *info = NULL;
461 	struct cxl_nvdimm_bridge *cxl_nvb;
462 	struct nd_interleave_set *nd_set;
463 	struct nd_region_desc ndr_desc;
464 	struct cxl_nvdimm *cxl_nvd;
465 	struct nvdimm *nvdimm;
466 	struct resource *res;
467 	int rc, i = 0;
468 
469 	cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev);
470 	if (!cxl_nvb) {
471 		dev_dbg(dev, "bridge not found\n");
472 		return -ENXIO;
473 	}
474 	cxlr_pmem->bridge = cxl_nvb;
475 
476 	device_lock(&cxl_nvb->dev);
477 	if (!cxl_nvb->nvdimm_bus) {
478 		dev_dbg(dev, "nvdimm bus not found\n");
479 		rc = -ENXIO;
480 		goto out_nvb;
481 	}
482 
483 	memset(&mappings, 0, sizeof(mappings));
484 	memset(&ndr_desc, 0, sizeof(ndr_desc));
485 
486 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
487 	if (!res) {
488 		rc = -ENOMEM;
489 		goto out_nvb;
490 	}
491 
492 	res->name = "Persistent Memory";
493 	res->start = cxlr_pmem->hpa_range.start;
494 	res->end = cxlr_pmem->hpa_range.end;
495 	res->flags = IORESOURCE_MEM;
496 	res->desc = IORES_DESC_PERSISTENT_MEMORY;
497 
498 	rc = insert_resource(&iomem_resource, res);
499 	if (rc)
500 		goto out_nvb;
501 
502 	rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
503 	if (rc)
504 		goto out_nvb;
505 
506 	ndr_desc.res = res;
507 	ndr_desc.provider_data = cxlr_pmem;
508 
509 	ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
510 	ndr_desc.target_node = phys_to_target_node(res->start);
511 	if (ndr_desc.target_node == NUMA_NO_NODE) {
512 		ndr_desc.target_node = ndr_desc.numa_node;
513 		dev_dbg(&cxlr->dev, "changing target node from %d to %d",
514 			NUMA_NO_NODE, ndr_desc.target_node);
515 	}
516 
517 	nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
518 	if (!nd_set) {
519 		rc = -ENOMEM;
520 		goto out_nvb;
521 	}
522 
523 	ndr_desc.memregion = cxlr->id;
524 	set_bit(ND_REGION_CXL, &ndr_desc.flags);
525 	set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
526 
527 	info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
528 	if (!info) {
529 		rc = -ENOMEM;
530 		goto out_nvb;
531 	}
532 
533 	rc = devm_add_action_or_reset(dev, release_mappings, cxlr_pmem);
534 	if (rc)
535 		goto out_nvd;
536 
537 	for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
538 		struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
539 		struct cxl_memdev *cxlmd = m->cxlmd;
540 		struct cxl_dev_state *cxlds = cxlmd->cxlds;
541 		struct device *d;
542 
543 		d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm);
544 		if (!d) {
545 			dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i,
546 				dev_name(&cxlmd->dev));
547 			rc = -ENODEV;
548 			goto out_nvd;
549 		}
550 
551 		/* safe to drop ref now with bridge lock held */
552 		put_device(d);
553 
554 		cxl_nvd = to_cxl_nvdimm(d);
555 		nvdimm = dev_get_drvdata(&cxl_nvd->dev);
556 		if (!nvdimm) {
557 			dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
558 				dev_name(&cxlmd->dev));
559 			rc = -ENODEV;
560 			goto out_nvd;
561 		}
562 
563 		/*
564 		 * Pin the region per nvdimm device as those may be released
565 		 * out-of-order with respect to the region, and a single nvdimm
566 		 * maybe associated with multiple regions
567 		 */
568 		rc = cxl_nvdimm_add_region(cxl_nvd, cxlr_pmem);
569 		if (rc)
570 			goto out_nvd;
571 		m->cxl_nvd = cxl_nvd;
572 		mappings[i] = (struct nd_mapping_desc) {
573 			.nvdimm = nvdimm,
574 			.start = m->start,
575 			.size = m->size,
576 			.position = i,
577 		};
578 		info[i].offset = m->start;
579 		info[i].serial = cxlds->serial;
580 	}
581 	ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
582 	ndr_desc.mapping = mappings;
583 
584 	/*
585 	 * TODO enable CXL labels which skip the need for 'interleave-set cookie'
586 	 */
587 	nd_set->cookie1 =
588 		nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
589 	nd_set->cookie2 = nd_set->cookie1;
590 	ndr_desc.nd_set = nd_set;
591 
592 	cxlr_pmem->nd_region =
593 		nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
594 	if (!cxlr_pmem->nd_region) {
595 		rc = -ENOMEM;
596 		goto out_nvd;
597 	}
598 
599 	rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
600 				      cxlr_pmem->nd_region);
601 out_nvd:
602 	kfree(info);
603 out_nvb:
604 	device_unlock(&cxl_nvb->dev);
605 	put_device(&cxl_nvb->dev);
606 
607 	return rc;
608 }
609 
610 static struct cxl_driver cxl_pmem_region_driver = {
611 	.name = "cxl_pmem_region",
612 	.probe = cxl_pmem_region_probe,
613 	.id = CXL_DEVICE_PMEM_REGION,
614 };
615 
616 /*
617  * Return all bridges to the CXL_NVB_NEW state to invalidate any
618  * ->state_work referring to the now destroyed cxl_pmem_wq.
619  */
620 static int cxl_nvdimm_bridge_reset(struct device *dev, void *data)
621 {
622 	struct cxl_nvdimm_bridge *cxl_nvb;
623 
624 	if (!is_cxl_nvdimm_bridge(dev))
625 		return 0;
626 
627 	cxl_nvb = to_cxl_nvdimm_bridge(dev);
628 	device_lock(dev);
629 	cxl_nvb->state = CXL_NVB_NEW;
630 	device_unlock(dev);
631 
632 	return 0;
633 }
634 
635 static void destroy_cxl_pmem_wq(void)
636 {
637 	destroy_workqueue(cxl_pmem_wq);
638 	bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset);
639 }
640 
641 static __init int cxl_pmem_init(void)
642 {
643 	int rc;
644 
645 	set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
646 	set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
647 
648 	cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
649 	if (!cxl_pmem_wq)
650 		return -ENXIO;
651 
652 	rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
653 	if (rc)
654 		goto err_bridge;
655 
656 	rc = cxl_driver_register(&cxl_nvdimm_driver);
657 	if (rc)
658 		goto err_nvdimm;
659 
660 	rc = cxl_driver_register(&cxl_pmem_region_driver);
661 	if (rc)
662 		goto err_region;
663 
664 	return 0;
665 
666 err_region:
667 	cxl_driver_unregister(&cxl_nvdimm_driver);
668 err_nvdimm:
669 	cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
670 err_bridge:
671 	destroy_cxl_pmem_wq();
672 	return rc;
673 }
674 
675 static __exit void cxl_pmem_exit(void)
676 {
677 	cxl_driver_unregister(&cxl_pmem_region_driver);
678 	cxl_driver_unregister(&cxl_nvdimm_driver);
679 	cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
680 	destroy_cxl_pmem_wq();
681 }
682 
683 MODULE_LICENSE("GPL v2");
684 module_init(cxl_pmem_init);
685 module_exit(cxl_pmem_exit);
686 MODULE_IMPORT_NS(CXL);
687 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
688 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
689 MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
690