xref: /linux/drivers/cxl/core/pmem.c (revision e80a48bade619ec5a92230b3d4ae84bfc2746822)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 #include <linux/device.h>
4 #include <linux/slab.h>
5 #include <linux/idr.h>
6 #include <cxlmem.h>
7 #include <cxl.h>
8 #include "core.h"
9 
10 /**
11  * DOC: cxl pmem
12  *
13  * The core CXL PMEM infrastructure supports persistent memory
14  * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
15  * 'bridge' device is added at the root of a CXL device topology if
16  * platform firmware advertises at least one persistent memory capable
17  * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
18  * device. Then for each cxl_memdev in the CXL device topology a bridge
19  * device is added to host a LIBNVDIMM dimm object. When these bridges
20  * are registered native LIBNVDIMM uapis are translated to CXL
21  * operations, for example, namespace label access commands.
22  */
23 
24 static DEFINE_IDA(cxl_nvdimm_bridge_ida);
25 
26 static void cxl_nvdimm_bridge_release(struct device *dev)
27 {
28 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
29 
30 	ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
31 	kfree(cxl_nvb);
32 }
33 
34 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
35 	&cxl_base_attribute_group,
36 	NULL,
37 };
38 
39 const struct device_type cxl_nvdimm_bridge_type = {
40 	.name = "cxl_nvdimm_bridge",
41 	.release = cxl_nvdimm_bridge_release,
42 	.groups = cxl_nvdimm_bridge_attribute_groups,
43 };
44 
45 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
46 {
47 	if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
48 			  "not a cxl_nvdimm_bridge device\n"))
49 		return NULL;
50 	return container_of(dev, struct cxl_nvdimm_bridge, dev);
51 }
52 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
53 
54 bool is_cxl_nvdimm_bridge(struct device *dev)
55 {
56 	return dev->type == &cxl_nvdimm_bridge_type;
57 }
58 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
59 
60 static int match_nvdimm_bridge(struct device *dev, void *data)
61 {
62 	return is_cxl_nvdimm_bridge(dev);
63 }
64 
65 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
66 {
67 	struct cxl_port *port = find_cxl_root(start);
68 	struct device *dev;
69 
70 	if (!port)
71 		return NULL;
72 
73 	dev = device_find_child(&port->dev, NULL, match_nvdimm_bridge);
74 	put_device(&port->dev);
75 
76 	if (!dev)
77 		return NULL;
78 
79 	return to_cxl_nvdimm_bridge(dev);
80 }
81 EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
82 
83 static struct lock_class_key cxl_nvdimm_bridge_key;
84 
85 static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
86 {
87 	struct cxl_nvdimm_bridge *cxl_nvb;
88 	struct device *dev;
89 	int rc;
90 
91 	cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
92 	if (!cxl_nvb)
93 		return ERR_PTR(-ENOMEM);
94 
95 	rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
96 	if (rc < 0)
97 		goto err;
98 	cxl_nvb->id = rc;
99 
100 	dev = &cxl_nvb->dev;
101 	cxl_nvb->port = port;
102 	device_initialize(dev);
103 	lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key);
104 	device_set_pm_not_required(dev);
105 	dev->parent = &port->dev;
106 	dev->bus = &cxl_bus_type;
107 	dev->type = &cxl_nvdimm_bridge_type;
108 
109 	return cxl_nvb;
110 
111 err:
112 	kfree(cxl_nvb);
113 	return ERR_PTR(rc);
114 }
115 
116 static void unregister_nvb(void *_cxl_nvb)
117 {
118 	struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
119 
120 	device_unregister(&cxl_nvb->dev);
121 }
122 
123 /**
124  * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
125  * @host: platform firmware root device
126  * @port: CXL port at the root of a CXL topology
127  *
128  * Return: bridge device that can host cxl_nvdimm objects
129  */
130 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
131 						     struct cxl_port *port)
132 {
133 	struct cxl_nvdimm_bridge *cxl_nvb;
134 	struct device *dev;
135 	int rc;
136 
137 	if (!IS_ENABLED(CONFIG_CXL_PMEM))
138 		return ERR_PTR(-ENXIO);
139 
140 	cxl_nvb = cxl_nvdimm_bridge_alloc(port);
141 	if (IS_ERR(cxl_nvb))
142 		return cxl_nvb;
143 
144 	dev = &cxl_nvb->dev;
145 	rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
146 	if (rc)
147 		goto err;
148 
149 	rc = device_add(dev);
150 	if (rc)
151 		goto err;
152 
153 	rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
154 	if (rc)
155 		return ERR_PTR(rc);
156 
157 	return cxl_nvb;
158 
159 err:
160 	put_device(dev);
161 	return ERR_PTR(rc);
162 }
163 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
164 
165 static void cxl_nvdimm_release(struct device *dev)
166 {
167 	struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
168 
169 	kfree(cxl_nvd);
170 }
171 
172 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
173 	&cxl_base_attribute_group,
174 	NULL,
175 };
176 
177 const struct device_type cxl_nvdimm_type = {
178 	.name = "cxl_nvdimm",
179 	.release = cxl_nvdimm_release,
180 	.groups = cxl_nvdimm_attribute_groups,
181 };
182 
183 bool is_cxl_nvdimm(struct device *dev)
184 {
185 	return dev->type == &cxl_nvdimm_type;
186 }
187 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
188 
189 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
190 {
191 	if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
192 			  "not a cxl_nvdimm device\n"))
193 		return NULL;
194 	return container_of(dev, struct cxl_nvdimm, dev);
195 }
196 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
197 
198 static struct lock_class_key cxl_nvdimm_key;
199 
200 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
201 					   struct cxl_memdev *cxlmd)
202 {
203 	struct cxl_nvdimm *cxl_nvd;
204 	struct device *dev;
205 
206 	cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
207 	if (!cxl_nvd)
208 		return ERR_PTR(-ENOMEM);
209 
210 	dev = &cxl_nvd->dev;
211 	cxl_nvd->cxlmd = cxlmd;
212 	cxlmd->cxl_nvd = cxl_nvd;
213 	device_initialize(dev);
214 	lockdep_set_class(&dev->mutex, &cxl_nvdimm_key);
215 	device_set_pm_not_required(dev);
216 	dev->parent = &cxlmd->dev;
217 	dev->bus = &cxl_bus_type;
218 	dev->type = &cxl_nvdimm_type;
219 	/*
220 	 * A "%llx" string is 17-bytes vs dimm_id that is max
221 	 * NVDIMM_KEY_DESC_LEN
222 	 */
223 	BUILD_BUG_ON(sizeof(cxl_nvd->dev_id) < 17 ||
224 		     sizeof(cxl_nvd->dev_id) > NVDIMM_KEY_DESC_LEN);
225 	sprintf(cxl_nvd->dev_id, "%llx", cxlmd->cxlds->serial);
226 
227 	return cxl_nvd;
228 }
229 
230 static void cxl_nvd_unregister(void *_cxl_nvd)
231 {
232 	struct cxl_nvdimm *cxl_nvd = _cxl_nvd;
233 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
234 	struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
235 
236 	/*
237 	 * Either the bridge is in ->remove() context under the device_lock(),
238 	 * or cxlmd_release_nvdimm() is cancelling the bridge's release action
239 	 * for @cxl_nvd and doing it itself (while manually holding the bridge
240 	 * lock).
241 	 */
242 	device_lock_assert(&cxl_nvb->dev);
243 	cxl_nvd->cxlmd = NULL;
244 	cxlmd->cxl_nvd = NULL;
245 	device_unregister(&cxl_nvd->dev);
246 }
247 
248 static void cxlmd_release_nvdimm(void *_cxlmd)
249 {
250 	struct cxl_memdev *cxlmd = _cxlmd;
251 	struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
252 
253 	device_lock(&cxl_nvb->dev);
254 	if (cxlmd->cxl_nvd)
255 		devm_release_action(&cxl_nvb->dev, cxl_nvd_unregister,
256 				    cxlmd->cxl_nvd);
257 	device_unlock(&cxl_nvb->dev);
258 	put_device(&cxl_nvb->dev);
259 }
260 
261 /**
262  * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
263  * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
264  *
265  * Return: 0 on success negative error code on failure.
266  */
267 int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
268 {
269 	struct cxl_nvdimm_bridge *cxl_nvb;
270 	struct cxl_nvdimm *cxl_nvd;
271 	struct device *dev;
272 	int rc;
273 
274 	cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
275 	if (!cxl_nvb)
276 		return -ENODEV;
277 
278 	cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd);
279 	if (IS_ERR(cxl_nvd)) {
280 		rc = PTR_ERR(cxl_nvd);
281 		goto err_alloc;
282 	}
283 	cxlmd->cxl_nvb = cxl_nvb;
284 
285 	dev = &cxl_nvd->dev;
286 	rc = dev_set_name(dev, "pmem%d", cxlmd->id);
287 	if (rc)
288 		goto err;
289 
290 	rc = device_add(dev);
291 	if (rc)
292 		goto err;
293 
294 	dev_dbg(&cxlmd->dev, "register %s\n", dev_name(dev));
295 
296 	/*
297 	 * The two actions below arrange for @cxl_nvd to be deleted when either
298 	 * the top-level PMEM bridge goes down, or the endpoint device goes
299 	 * through ->remove().
300 	 */
301 	device_lock(&cxl_nvb->dev);
302 	if (cxl_nvb->dev.driver)
303 		rc = devm_add_action_or_reset(&cxl_nvb->dev, cxl_nvd_unregister,
304 					      cxl_nvd);
305 	else
306 		rc = -ENXIO;
307 	device_unlock(&cxl_nvb->dev);
308 
309 	if (rc)
310 		goto err_alloc;
311 
312 	/* @cxlmd carries a reference on @cxl_nvb until cxlmd_release_nvdimm */
313 	return devm_add_action_or_reset(&cxlmd->dev, cxlmd_release_nvdimm, cxlmd);
314 
315 err:
316 	put_device(dev);
317 err_alloc:
318 	cxlmd->cxl_nvb = NULL;
319 	cxlmd->cxl_nvd = NULL;
320 	put_device(&cxl_nvb->dev);
321 
322 	return rc;
323 }
324 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);
325