1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 #include <linux/device.h>
4 #include <linux/slab.h>
5 #include <linux/idr.h>
6 #include <cxlmem.h>
7 #include <cxl.h>
8 #include "core.h"
9
10 /**
11 * DOC: cxl pmem
12 *
13 * The core CXL PMEM infrastructure supports persistent memory
14 * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
15 * 'bridge' device is added at the root of a CXL device topology if
16 * platform firmware advertises at least one persistent memory capable
17 * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
18 * device. Then for each cxl_memdev in the CXL device topology a bridge
19 * device is added to host a LIBNVDIMM dimm object. When these bridges
20 * are registered native LIBNVDIMM uapis are translated to CXL
21 * operations, for example, namespace label access commands.
22 */
23
24 static DEFINE_IDA(cxl_nvdimm_bridge_ida);
25
cxl_nvdimm_bridge_release(struct device * dev)26 static void cxl_nvdimm_bridge_release(struct device *dev)
27 {
28 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
29
30 ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
31 kfree(cxl_nvb);
32 }
33
34 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
35 &cxl_base_attribute_group,
36 NULL,
37 };
38
39 const struct device_type cxl_nvdimm_bridge_type = {
40 .name = "cxl_nvdimm_bridge",
41 .release = cxl_nvdimm_bridge_release,
42 .groups = cxl_nvdimm_bridge_attribute_groups,
43 };
44
to_cxl_nvdimm_bridge(struct device * dev)45 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
46 {
47 if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
48 "not a cxl_nvdimm_bridge device\n"))
49 return NULL;
50 return container_of(dev, struct cxl_nvdimm_bridge, dev);
51 }
52 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, "CXL");
53
54 /**
55 * cxl_find_nvdimm_bridge() - find a bridge device relative to a port
56 * @port: any descendant port of an nvdimm-bridge associated
57 * root-cxl-port
58 */
cxl_find_nvdimm_bridge(struct cxl_port * port)59 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port)
60 {
61 struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
62 struct device *dev;
63
64 if (!cxl_root)
65 return NULL;
66
67 dev = device_find_child(&cxl_root->port.dev,
68 &cxl_nvdimm_bridge_type,
69 device_match_type);
70
71 if (!dev)
72 return NULL;
73
74 return to_cxl_nvdimm_bridge(dev);
75 }
76 EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, "CXL");
77
78 static struct lock_class_key cxl_nvdimm_bridge_key;
79
cxl_nvdimm_bridge_alloc(struct cxl_port * port)80 static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
81 {
82 struct cxl_nvdimm_bridge *cxl_nvb;
83 struct device *dev;
84 int rc;
85
86 cxl_nvb = kzalloc_obj(*cxl_nvb);
87 if (!cxl_nvb)
88 return ERR_PTR(-ENOMEM);
89
90 rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
91 if (rc < 0)
92 goto err;
93 cxl_nvb->id = rc;
94
95 dev = &cxl_nvb->dev;
96 cxl_nvb->port = port;
97 device_initialize(dev);
98 lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key);
99 device_set_pm_not_required(dev);
100 dev->parent = &port->dev;
101 dev->bus = &cxl_bus_type;
102 dev->type = &cxl_nvdimm_bridge_type;
103
104 return cxl_nvb;
105
106 err:
107 kfree(cxl_nvb);
108 return ERR_PTR(rc);
109 }
110
unregister_nvb(void * _cxl_nvb)111 static void unregister_nvb(void *_cxl_nvb)
112 {
113 struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
114
115 device_unregister(&cxl_nvb->dev);
116 }
117
cxl_nvdimm_bridge_failed_attach(struct cxl_nvdimm_bridge * cxl_nvb)118 static bool cxl_nvdimm_bridge_failed_attach(struct cxl_nvdimm_bridge *cxl_nvb)
119 {
120 struct device *dev = &cxl_nvb->dev;
121
122 guard(device)(dev);
123 /* If the device has no driver, then it failed to attach. */
124 return dev->driver == NULL;
125 }
126
__devm_cxl_add_nvdimm_bridge(struct device * host,struct cxl_port * port)127 struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
128 struct cxl_port *port)
129 {
130 struct cxl_nvdimm_bridge *cxl_nvb;
131 struct device *dev;
132 int rc;
133
134 if (!IS_ENABLED(CONFIG_CXL_PMEM))
135 return ERR_PTR(-ENXIO);
136
137 cxl_nvb = cxl_nvdimm_bridge_alloc(port);
138 if (IS_ERR(cxl_nvb))
139 return cxl_nvb;
140
141 dev = &cxl_nvb->dev;
142 rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
143 if (rc)
144 goto err;
145
146 rc = device_add(dev);
147 if (rc)
148 goto err;
149
150 if (cxl_nvdimm_bridge_failed_attach(cxl_nvb)) {
151 unregister_nvb(cxl_nvb);
152 return ERR_PTR(-ENODEV);
153 }
154
155 rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
156 if (rc)
157 return ERR_PTR(rc);
158
159 return cxl_nvb;
160
161 err:
162 put_device(dev);
163 return ERR_PTR(rc);
164 }
165 EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_nvdimm_bridge, "cxl_pmem");
166
cxl_nvdimm_release(struct device * dev)167 static void cxl_nvdimm_release(struct device *dev)
168 {
169 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
170
171 kfree(cxl_nvd);
172 }
173
174 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
175 &cxl_base_attribute_group,
176 NULL,
177 };
178
179 const struct device_type cxl_nvdimm_type = {
180 .name = "cxl_nvdimm",
181 .release = cxl_nvdimm_release,
182 .groups = cxl_nvdimm_attribute_groups,
183 };
184
is_cxl_nvdimm(struct device * dev)185 bool is_cxl_nvdimm(struct device *dev)
186 {
187 return dev->type == &cxl_nvdimm_type;
188 }
189 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, "CXL");
190
to_cxl_nvdimm(struct device * dev)191 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
192 {
193 if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
194 "not a cxl_nvdimm device\n"))
195 return NULL;
196 return container_of(dev, struct cxl_nvdimm, dev);
197 }
198 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, "CXL");
199
200 static struct lock_class_key cxl_nvdimm_key;
201
cxl_nvdimm_alloc(struct cxl_nvdimm_bridge * cxl_nvb,struct cxl_memdev * cxlmd)202 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
203 struct cxl_memdev *cxlmd)
204 {
205 struct cxl_nvdimm *cxl_nvd;
206 struct device *dev;
207
208 cxl_nvd = kzalloc_obj(*cxl_nvd);
209 if (!cxl_nvd)
210 return ERR_PTR(-ENOMEM);
211
212 dev = &cxl_nvd->dev;
213 cxl_nvd->cxlmd = cxlmd;
214 cxlmd->cxl_nvd = cxl_nvd;
215 device_initialize(dev);
216 lockdep_set_class(&dev->mutex, &cxl_nvdimm_key);
217 device_set_pm_not_required(dev);
218 dev->parent = &cxlmd->dev;
219 dev->bus = &cxl_bus_type;
220 dev->type = &cxl_nvdimm_type;
221 /*
222 * A "%llx" string is 17-bytes vs dimm_id that is max
223 * NVDIMM_KEY_DESC_LEN
224 */
225 BUILD_BUG_ON(sizeof(cxl_nvd->dev_id) < 17 ||
226 sizeof(cxl_nvd->dev_id) > NVDIMM_KEY_DESC_LEN);
227 sprintf(cxl_nvd->dev_id, "%llx", cxlmd->cxlds->serial);
228
229 return cxl_nvd;
230 }
231
cxlmd_release_nvdimm(void * _cxlmd)232 static void cxlmd_release_nvdimm(void *_cxlmd)
233 {
234 struct cxl_memdev *cxlmd = _cxlmd;
235 struct cxl_nvdimm *cxl_nvd = cxlmd->cxl_nvd;
236 struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
237
238 cxl_nvd->cxlmd = NULL;
239 cxlmd->cxl_nvd = NULL;
240 cxlmd->cxl_nvb = NULL;
241 device_unregister(&cxl_nvd->dev);
242 put_device(&cxl_nvb->dev);
243 }
244
245 /**
246 * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
247 * @host: host device for devm operations
248 * @port: any port in the CXL topology to find the nvdimm-bridge device
249 * @cxlmd: parent of the to be created cxl_nvdimm device
250 *
251 * Return: 0 on success negative error code on failure.
252 */
devm_cxl_add_nvdimm(struct device * host,struct cxl_port * port,struct cxl_memdev * cxlmd)253 int devm_cxl_add_nvdimm(struct device *host, struct cxl_port *port,
254 struct cxl_memdev *cxlmd)
255 {
256 struct cxl_nvdimm_bridge *cxl_nvb;
257 struct cxl_nvdimm *cxl_nvd;
258 struct device *dev;
259 int rc;
260
261 cxl_nvb = cxl_find_nvdimm_bridge(port);
262 if (!cxl_nvb)
263 return -ENODEV;
264
265 /*
266 * Take the uport_dev lock to guard against race of nvdimm_bus object.
267 * cxl_acpi_probe() registers the nvdimm_bus and is done under the
268 * root port uport_dev lock.
269 *
270 * Take the cxl_nvb device lock to ensure that cxl_nvb driver is in a
271 * consistent state. And the driver registers nvdimm_bus.
272 */
273 guard(device)(cxl_nvb->port->uport_dev);
274 guard(device)(&cxl_nvb->dev);
275 if (!cxl_nvb->nvdimm_bus) {
276 rc = -ENODEV;
277 goto err_alloc;
278 }
279
280 cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd);
281 if (IS_ERR(cxl_nvd)) {
282 rc = PTR_ERR(cxl_nvd);
283 goto err_alloc;
284 }
285 cxlmd->cxl_nvb = cxl_nvb;
286
287 dev = &cxl_nvd->dev;
288 rc = dev_set_name(dev, "pmem%d", cxlmd->id);
289 if (rc)
290 goto err;
291
292 rc = device_add(dev);
293 if (rc)
294 goto err;
295
296 dev_dbg(host, "register %s\n", dev_name(dev));
297
298 /* @cxlmd carries a reference on @cxl_nvb until cxlmd_release_nvdimm */
299 return devm_add_action_or_reset(host, cxlmd_release_nvdimm, cxlmd);
300
301 err:
302 put_device(dev);
303 err_alloc:
304 cxlmd->cxl_nvb = NULL;
305 cxlmd->cxl_nvd = NULL;
306 put_device(&cxl_nvb->dev);
307
308 return rc;
309 }
310 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, "CXL");
311