xref: /linux/drivers/cxl/core/memdev.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 #include <linux/io-64-nonatomic-lo-hi.h>
5 #include <linux/firmware.h>
6 #include <linux/device.h>
7 #include <linux/slab.h>
8 #include <linux/idr.h>
9 #include <linux/pci.h>
10 #include <cxlmem.h>
11 #include "trace.h"
12 #include "core.h"
13 
14 static DECLARE_RWSEM(cxl_memdev_rwsem);
15 
16 /*
17  * An entire PCI topology full of devices should be enough for any
18  * config
19  */
20 #define CXL_MEM_MAX_DEVS 65536
21 
22 static int cxl_mem_major;
23 static DEFINE_IDA(cxl_memdev_ida);
24 
25 static void cxl_memdev_release(struct device *dev)
26 {
27 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
28 
29 	ida_free(&cxl_memdev_ida, cxlmd->id);
30 	kfree(cxlmd);
31 }
32 
33 static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid,
34 				kgid_t *gid)
35 {
36 	return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
37 }
38 
39 static ssize_t firmware_version_show(struct device *dev,
40 				     struct device_attribute *attr, char *buf)
41 {
42 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
43 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
44 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
45 
46 	if (!mds)
47 		return sysfs_emit(buf, "\n");
48 	return sysfs_emit(buf, "%.16s\n", mds->firmware_version);
49 }
50 static DEVICE_ATTR_RO(firmware_version);
51 
52 static ssize_t payload_max_show(struct device *dev,
53 				struct device_attribute *attr, char *buf)
54 {
55 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
56 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
57 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
58 
59 	if (!mds)
60 		return sysfs_emit(buf, "\n");
61 	return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size);
62 }
63 static DEVICE_ATTR_RO(payload_max);
64 
65 static ssize_t label_storage_size_show(struct device *dev,
66 				       struct device_attribute *attr, char *buf)
67 {
68 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
69 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
70 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
71 
72 	if (!mds)
73 		return sysfs_emit(buf, "\n");
74 	return sysfs_emit(buf, "%zu\n", mds->lsa_size);
75 }
76 static DEVICE_ATTR_RO(label_storage_size);
77 
78 static resource_size_t cxl_ram_size(struct cxl_dev_state *cxlds)
79 {
80 	/* Static RAM is only expected at partition 0. */
81 	if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
82 		return 0;
83 	return resource_size(&cxlds->part[0].res);
84 }
85 
86 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
87 			     char *buf)
88 {
89 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
90 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
91 	unsigned long long len = cxl_ram_size(cxlds);
92 
93 	return sysfs_emit(buf, "%#llx\n", len);
94 }
95 
96 static struct device_attribute dev_attr_ram_size =
97 	__ATTR(size, 0444, ram_size_show, NULL);
98 
99 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
100 			      char *buf)
101 {
102 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
103 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
104 	unsigned long long len = cxl_pmem_size(cxlds);
105 
106 	return sysfs_emit(buf, "%#llx\n", len);
107 }
108 
109 static struct device_attribute dev_attr_pmem_size =
110 	__ATTR(size, 0444, pmem_size_show, NULL);
111 
112 static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
113 			   char *buf)
114 {
115 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
116 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
117 
118 	return sysfs_emit(buf, "%#llx\n", cxlds->serial);
119 }
120 static DEVICE_ATTR_RO(serial);
121 
122 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
123 			      char *buf)
124 {
125 	return sysfs_emit(buf, "%d\n", dev_to_node(dev));
126 }
127 static DEVICE_ATTR_RO(numa_node);
128 
129 static ssize_t security_state_show(struct device *dev,
130 				   struct device_attribute *attr,
131 				   char *buf)
132 {
133 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
134 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
135 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
136 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
137 	unsigned long state = mds->security.state;
138 	int rc = 0;
139 
140 	/* sync with latest submission state */
141 	mutex_lock(&cxl_mbox->mbox_mutex);
142 	if (mds->security.sanitize_active)
143 		rc = sysfs_emit(buf, "sanitize\n");
144 	mutex_unlock(&cxl_mbox->mbox_mutex);
145 	if (rc)
146 		return rc;
147 
148 	if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
149 		return sysfs_emit(buf, "disabled\n");
150 	if (state & CXL_PMEM_SEC_STATE_FROZEN ||
151 	    state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
152 	    state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
153 		return sysfs_emit(buf, "frozen\n");
154 	if (state & CXL_PMEM_SEC_STATE_LOCKED)
155 		return sysfs_emit(buf, "locked\n");
156 
157 	return sysfs_emit(buf, "unlocked\n");
158 }
159 static struct device_attribute dev_attr_security_state =
160 	__ATTR(state, 0444, security_state_show, NULL);
161 
162 static ssize_t security_sanitize_store(struct device *dev,
163 				       struct device_attribute *attr,
164 				       const char *buf, size_t len)
165 {
166 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
167 	bool sanitize;
168 	ssize_t rc;
169 
170 	if (kstrtobool(buf, &sanitize) || !sanitize)
171 		return -EINVAL;
172 
173 	rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
174 	if (rc)
175 		return rc;
176 
177 	return len;
178 }
179 static struct device_attribute dev_attr_security_sanitize =
180 	__ATTR(sanitize, 0200, NULL, security_sanitize_store);
181 
182 static ssize_t security_erase_store(struct device *dev,
183 				    struct device_attribute *attr,
184 				    const char *buf, size_t len)
185 {
186 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
187 	ssize_t rc;
188 	bool erase;
189 
190 	if (kstrtobool(buf, &erase) || !erase)
191 		return -EINVAL;
192 
193 	rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
194 	if (rc)
195 		return rc;
196 
197 	return len;
198 }
199 static struct device_attribute dev_attr_security_erase =
200 	__ATTR(erase, 0200, NULL, security_erase_store);
201 
202 bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
203 			       enum poison_cmd_enabled_bits cmd)
204 {
205 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
206 
207 	if (!mds)
208 		return 0;
209 
210 	return test_bit(cmd, mds->poison.enabled_cmds);
211 }
212 
213 static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
214 {
215 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
216 	u64 offset, length;
217 	int rc = 0;
218 
219 	/* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
220 	for (int i = 0; i < cxlds->nr_partitions; i++) {
221 		const struct resource *res = &cxlds->part[i].res;
222 
223 		offset = res->start;
224 		length = resource_size(res);
225 		rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
226 		/*
227 		 * Invalid Physical Address is not an error for
228 		 * volatile addresses. Device support is optional.
229 		 */
230 		if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM)
231 			rc = 0;
232 	}
233 	return rc;
234 }
235 
236 int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
237 {
238 	struct cxl_port *port;
239 	int rc;
240 
241 	port = cxlmd->endpoint;
242 	if (!port || !is_cxl_endpoint(port))
243 		return -EINVAL;
244 
245 	ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
246 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
247 		return rc;
248 
249 	ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
250 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
251 		return rc;
252 
253 	if (cxl_num_decoders_committed(port) == 0) {
254 		/* No regions mapped to this memdev */
255 		rc = cxl_get_poison_by_memdev(cxlmd);
256 	} else {
257 		/* Regions mapped, collect poison by endpoint */
258 		rc =  cxl_get_poison_by_endpoint(port);
259 	}
260 
261 	return rc;
262 }
263 EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, "CXL");
264 
265 static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
266 {
267 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
268 
269 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
270 		return 0;
271 
272 	if (!resource_size(&cxlds->dpa_res)) {
273 		dev_dbg(cxlds->dev, "device has no dpa resource\n");
274 		return -EINVAL;
275 	}
276 	if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) {
277 		dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
278 			dpa, &cxlds->dpa_res);
279 		return -EINVAL;
280 	}
281 	if (!IS_ALIGNED(dpa, 64)) {
282 		dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa);
283 		return -EINVAL;
284 	}
285 
286 	return 0;
287 }
288 
289 int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
290 {
291 	struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
292 	struct cxl_mbox_inject_poison inject;
293 	struct cxl_poison_record record;
294 	struct cxl_mbox_cmd mbox_cmd;
295 	struct cxl_region *cxlr;
296 	int rc;
297 
298 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
299 		return 0;
300 
301 	lockdep_assert_held(&cxl_rwsem.dpa);
302 	lockdep_assert_held(&cxl_rwsem.region);
303 
304 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
305 	if (rc)
306 		return rc;
307 
308 	inject.address = cpu_to_le64(dpa);
309 	mbox_cmd = (struct cxl_mbox_cmd) {
310 		.opcode = CXL_MBOX_OP_INJECT_POISON,
311 		.size_in = sizeof(inject),
312 		.payload_in = &inject,
313 	};
314 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
315 	if (rc)
316 		return rc;
317 
318 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
319 	if (cxlr)
320 		dev_warn_once(cxl_mbox->host,
321 			      "poison inject dpa:%#llx region: %s\n", dpa,
322 			      dev_name(&cxlr->dev));
323 
324 	record = (struct cxl_poison_record) {
325 		.address = cpu_to_le64(dpa),
326 		.length = cpu_to_le32(1),
327 	};
328 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
329 
330 	return 0;
331 }
332 
333 int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
334 {
335 	int rc;
336 
337 	ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
338 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
339 		return rc;
340 
341 	ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
342 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
343 		return rc;
344 
345 	return cxl_inject_poison_locked(cxlmd, dpa);
346 }
347 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
348 
349 int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
350 {
351 	struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
352 	struct cxl_mbox_clear_poison clear;
353 	struct cxl_poison_record record;
354 	struct cxl_mbox_cmd mbox_cmd;
355 	struct cxl_region *cxlr;
356 	int rc;
357 
358 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
359 		return 0;
360 
361 	lockdep_assert_held(&cxl_rwsem.dpa);
362 	lockdep_assert_held(&cxl_rwsem.region);
363 
364 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
365 	if (rc)
366 		return rc;
367 
368 	/*
369 	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
370 	 * is defined to accept 64 bytes of write-data, along with the
371 	 * address to clear. This driver uses zeroes as write-data.
372 	 */
373 	clear = (struct cxl_mbox_clear_poison) {
374 		.address = cpu_to_le64(dpa)
375 	};
376 
377 	mbox_cmd = (struct cxl_mbox_cmd) {
378 		.opcode = CXL_MBOX_OP_CLEAR_POISON,
379 		.size_in = sizeof(clear),
380 		.payload_in = &clear,
381 	};
382 
383 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
384 	if (rc)
385 		return rc;
386 
387 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
388 	if (cxlr)
389 		dev_warn_once(cxl_mbox->host,
390 			      "poison clear dpa:%#llx region: %s\n", dpa,
391 			      dev_name(&cxlr->dev));
392 
393 	record = (struct cxl_poison_record) {
394 		.address = cpu_to_le64(dpa),
395 		.length = cpu_to_le32(1),
396 	};
397 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
398 
399 	return 0;
400 }
401 
402 int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
403 {
404 	int rc;
405 
406 	ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
407 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
408 		return rc;
409 
410 	ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
411 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
412 		return rc;
413 
414 	return cxl_clear_poison_locked(cxlmd, dpa);
415 }
416 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
417 
418 static struct attribute *cxl_memdev_attributes[] = {
419 	&dev_attr_serial.attr,
420 	&dev_attr_firmware_version.attr,
421 	&dev_attr_payload_max.attr,
422 	&dev_attr_label_storage_size.attr,
423 	&dev_attr_numa_node.attr,
424 	NULL,
425 };
426 
427 static struct cxl_dpa_perf *to_pmem_perf(struct cxl_dev_state *cxlds)
428 {
429 	for (int i = 0; i < cxlds->nr_partitions; i++)
430 		if (cxlds->part[i].mode == CXL_PARTMODE_PMEM)
431 			return &cxlds->part[i].perf;
432 	return NULL;
433 }
434 
435 static ssize_t pmem_qos_class_show(struct device *dev,
436 				   struct device_attribute *attr, char *buf)
437 {
438 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
439 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
440 
441 	return sysfs_emit(buf, "%d\n", to_pmem_perf(cxlds)->qos_class);
442 }
443 
444 static struct device_attribute dev_attr_pmem_qos_class =
445 	__ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
446 
447 static struct attribute *cxl_memdev_pmem_attributes[] = {
448 	&dev_attr_pmem_size.attr,
449 	&dev_attr_pmem_qos_class.attr,
450 	NULL,
451 };
452 
453 static struct cxl_dpa_perf *to_ram_perf(struct cxl_dev_state *cxlds)
454 {
455 	if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
456 		return NULL;
457 	return &cxlds->part[0].perf;
458 }
459 
460 static ssize_t ram_qos_class_show(struct device *dev,
461 				  struct device_attribute *attr, char *buf)
462 {
463 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
464 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
465 
466 	return sysfs_emit(buf, "%d\n", to_ram_perf(cxlds)->qos_class);
467 }
468 
469 static struct device_attribute dev_attr_ram_qos_class =
470 	__ATTR(qos_class, 0444, ram_qos_class_show, NULL);
471 
472 static struct attribute *cxl_memdev_ram_attributes[] = {
473 	&dev_attr_ram_size.attr,
474 	&dev_attr_ram_qos_class.attr,
475 	NULL,
476 };
477 
478 static struct attribute *cxl_memdev_security_attributes[] = {
479 	&dev_attr_security_state.attr,
480 	&dev_attr_security_sanitize.attr,
481 	&dev_attr_security_erase.attr,
482 	NULL,
483 };
484 
485 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
486 				  int n)
487 {
488 	if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
489 		return 0;
490 	return a->mode;
491 }
492 
493 static struct attribute_group cxl_memdev_attribute_group = {
494 	.attrs = cxl_memdev_attributes,
495 	.is_visible = cxl_memdev_visible,
496 };
497 
498 static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
499 {
500 	struct device *dev = kobj_to_dev(kobj);
501 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
502 	struct cxl_dpa_perf *perf = to_ram_perf(cxlmd->cxlds);
503 
504 	if (a == &dev_attr_ram_qos_class.attr &&
505 	    (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
506 		return 0;
507 
508 	return a->mode;
509 }
510 
511 static struct attribute_group cxl_memdev_ram_attribute_group = {
512 	.name = "ram",
513 	.attrs = cxl_memdev_ram_attributes,
514 	.is_visible = cxl_ram_visible,
515 };
516 
517 static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
518 {
519 	struct device *dev = kobj_to_dev(kobj);
520 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
521 	struct cxl_dpa_perf *perf = to_pmem_perf(cxlmd->cxlds);
522 
523 	if (a == &dev_attr_pmem_qos_class.attr &&
524 	    (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
525 		return 0;
526 
527 	return a->mode;
528 }
529 
530 static struct attribute_group cxl_memdev_pmem_attribute_group = {
531 	.name = "pmem",
532 	.attrs = cxl_memdev_pmem_attributes,
533 	.is_visible = cxl_pmem_visible,
534 };
535 
536 static umode_t cxl_memdev_security_visible(struct kobject *kobj,
537 					   struct attribute *a, int n)
538 {
539 	struct device *dev = kobj_to_dev(kobj);
540 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
541 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
542 
543 	if (a == &dev_attr_security_sanitize.attr &&
544 	    !test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
545 		return 0;
546 
547 	if (a == &dev_attr_security_erase.attr &&
548 	    !test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds))
549 		return 0;
550 
551 	return a->mode;
552 }
553 
554 static struct attribute_group cxl_memdev_security_attribute_group = {
555 	.name = "security",
556 	.attrs = cxl_memdev_security_attributes,
557 	.is_visible = cxl_memdev_security_visible,
558 };
559 
560 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
561 	&cxl_memdev_attribute_group,
562 	&cxl_memdev_ram_attribute_group,
563 	&cxl_memdev_pmem_attribute_group,
564 	&cxl_memdev_security_attribute_group,
565 	NULL,
566 };
567 
568 void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
569 {
570 	sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
571 	sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
572 }
573 EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, "CXL");
574 
575 static const struct device_type cxl_memdev_type = {
576 	.name = "cxl_memdev",
577 	.release = cxl_memdev_release,
578 	.devnode = cxl_memdev_devnode,
579 	.groups = cxl_memdev_attribute_groups,
580 };
581 
582 bool is_cxl_memdev(const struct device *dev)
583 {
584 	return dev->type == &cxl_memdev_type;
585 }
586 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, "CXL");
587 
588 /**
589  * set_exclusive_cxl_commands() - atomically disable user cxl commands
590  * @mds: The device state to operate on
591  * @cmds: bitmap of commands to mark exclusive
592  *
593  * Grab the cxl_memdev_rwsem in write mode to flush in-flight
594  * invocations of the ioctl path and then disable future execution of
595  * commands with the command ids set in @cmds.
596  */
597 void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
598 				unsigned long *cmds)
599 {
600 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
601 
602 	guard(rwsem_write)(&cxl_memdev_rwsem);
603 	bitmap_or(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
604 		  cmds, CXL_MEM_COMMAND_ID_MAX);
605 }
606 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL");
607 
608 /**
609  * clear_exclusive_cxl_commands() - atomically enable user cxl commands
610  * @mds: The device state to modify
611  * @cmds: bitmap of commands to mark available for userspace
612  */
613 void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
614 				  unsigned long *cmds)
615 {
616 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
617 
618 	guard(rwsem_write)(&cxl_memdev_rwsem);
619 	bitmap_andnot(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
620 		      cmds, CXL_MEM_COMMAND_ID_MAX);
621 }
622 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL");
623 
624 static void cxl_memdev_shutdown(struct device *dev)
625 {
626 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
627 
628 	guard(rwsem_write)(&cxl_memdev_rwsem);
629 	cxlmd->cxlds = NULL;
630 }
631 
632 static void cxl_memdev_unregister(void *_cxlmd)
633 {
634 	struct cxl_memdev *cxlmd = _cxlmd;
635 	struct device *dev = &cxlmd->dev;
636 
637 	cdev_device_del(&cxlmd->cdev, dev);
638 	cxl_memdev_shutdown(dev);
639 	put_device(dev);
640 }
641 
642 static void detach_memdev(struct work_struct *work)
643 {
644 	struct cxl_memdev *cxlmd;
645 
646 	cxlmd = container_of(work, typeof(*cxlmd), detach_work);
647 
648 	/*
649 	 * When the creator of @cxlmd sets ->attach it indicates CXL operation
650 	 * is required. In that case, @cxlmd detach escalates to parent device
651 	 * detach.
652 	 */
653 	if (cxlmd->attach)
654 		device_release_driver(cxlmd->dev.parent);
655 	else
656 		device_release_driver(&cxlmd->dev);
657 	put_device(&cxlmd->dev);
658 }
659 
660 static struct lock_class_key cxl_memdev_key;
661 
662 struct cxl_dev_state *_devm_cxl_dev_state_create(struct device *dev,
663 						 enum cxl_devtype type,
664 						 u64 serial, u16 dvsec,
665 						 size_t size, bool has_mbox)
666 {
667 	struct cxl_dev_state *cxlds = devm_kzalloc(dev, size, GFP_KERNEL);
668 
669 	if (!cxlds)
670 		return NULL;
671 
672 	cxlds->dev = dev;
673 	cxlds->type = type;
674 	cxlds->serial = serial;
675 	cxlds->cxl_dvsec = dvsec;
676 	cxlds->reg_map.host = dev;
677 	cxlds->reg_map.resource = CXL_RESOURCE_NONE;
678 
679 	if (has_mbox)
680 		cxlds->cxl_mbox.host = dev;
681 
682 	return cxlds;
683 }
684 EXPORT_SYMBOL_NS_GPL(_devm_cxl_dev_state_create, "CXL");
685 
686 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
687 					   const struct file_operations *fops,
688 					   const struct cxl_memdev_attach *attach)
689 {
690 	struct cxl_memdev *cxlmd;
691 	struct device *dev;
692 	struct cdev *cdev;
693 	int rc;
694 
695 	cxlmd = kzalloc_obj(*cxlmd);
696 	if (!cxlmd)
697 		return ERR_PTR(-ENOMEM);
698 
699 	rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL);
700 	if (rc < 0)
701 		goto err;
702 	cxlmd->id = rc;
703 	cxlmd->depth = -1;
704 	cxlmd->attach = attach;
705 	cxlmd->endpoint = ERR_PTR(-ENXIO);
706 
707 	dev = &cxlmd->dev;
708 	device_initialize(dev);
709 	lockdep_set_class(&dev->mutex, &cxl_memdev_key);
710 	dev->parent = cxlds->dev;
711 	dev->bus = &cxl_bus_type;
712 	dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
713 	dev->type = &cxl_memdev_type;
714 	device_set_pm_not_required(dev);
715 	INIT_WORK(&cxlmd->detach_work, detach_memdev);
716 
717 	cdev = &cxlmd->cdev;
718 	cdev_init(cdev, fops);
719 	return cxlmd;
720 
721 err:
722 	kfree(cxlmd);
723 	return ERR_PTR(rc);
724 }
725 
726 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
727 			       unsigned long arg)
728 {
729 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
730 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
731 
732 	switch (cmd) {
733 	case CXL_MEM_QUERY_COMMANDS:
734 		return cxl_query_cmd(cxl_mbox, (void __user *)arg);
735 	case CXL_MEM_SEND_COMMAND:
736 		return cxl_send_cmd(cxl_mbox, (void __user *)arg);
737 	default:
738 		return -ENOTTY;
739 	}
740 }
741 
742 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
743 			     unsigned long arg)
744 {
745 	struct cxl_memdev *cxlmd = file->private_data;
746 	struct cxl_dev_state *cxlds;
747 
748 	guard(rwsem_read)(&cxl_memdev_rwsem);
749 	cxlds = cxlmd->cxlds;
750 	if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
751 		return __cxl_memdev_ioctl(cxlmd, cmd, arg);
752 
753 	return -ENXIO;
754 }
755 
756 static int cxl_memdev_open(struct inode *inode, struct file *file)
757 {
758 	struct cxl_memdev *cxlmd =
759 		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
760 
761 	get_device(&cxlmd->dev);
762 	file->private_data = cxlmd;
763 
764 	return 0;
765 }
766 
767 static int cxl_memdev_release_file(struct inode *inode, struct file *file)
768 {
769 	struct cxl_memdev *cxlmd =
770 		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
771 
772 	put_device(&cxlmd->dev);
773 
774 	return 0;
775 }
776 
777 /**
778  * cxl_mem_get_fw_info - Get Firmware info
779  * @mds: The device data for the operation
780  *
781  * Retrieve firmware info for the device specified.
782  *
783  * Return: 0 if no error: or the result of the mailbox command.
784  *
785  * See CXL-3.0 8.2.9.3.1 Get FW Info
786  */
787 static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
788 {
789 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
790 	struct cxl_mbox_get_fw_info info;
791 	struct cxl_mbox_cmd mbox_cmd;
792 	int rc;
793 
794 	mbox_cmd = (struct cxl_mbox_cmd) {
795 		.opcode = CXL_MBOX_OP_GET_FW_INFO,
796 		.size_out = sizeof(info),
797 		.payload_out = &info,
798 	};
799 
800 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
801 	if (rc < 0)
802 		return rc;
803 
804 	mds->fw.num_slots = info.num_slots;
805 	mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
806 				       info.slot_info);
807 
808 	return 0;
809 }
810 
811 /**
812  * cxl_mem_activate_fw - Activate Firmware
813  * @mds: The device data for the operation
814  * @slot: slot number to activate
815  *
816  * Activate firmware in a given slot for the device specified.
817  *
818  * Return: 0 if no error: or the result of the mailbox command.
819  *
820  * See CXL-3.0 8.2.9.3.3 Activate FW
821  */
822 static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
823 {
824 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
825 	struct cxl_mbox_activate_fw activate;
826 	struct cxl_mbox_cmd mbox_cmd;
827 
828 	if (slot == 0 || slot > mds->fw.num_slots)
829 		return -EINVAL;
830 
831 	mbox_cmd = (struct cxl_mbox_cmd) {
832 		.opcode = CXL_MBOX_OP_ACTIVATE_FW,
833 		.size_in = sizeof(activate),
834 		.payload_in = &activate,
835 	};
836 
837 	/* Only offline activation supported for now */
838 	activate.action = CXL_FW_ACTIVATE_OFFLINE;
839 	activate.slot = slot;
840 
841 	return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
842 }
843 
844 /**
845  * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer
846  * @mds: The device data for the operation
847  *
848  * Abort an in-progress firmware transfer for the device specified.
849  *
850  * Return: 0 if no error: or the result of the mailbox command.
851  *
852  * See CXL-3.0 8.2.9.3.2 Transfer FW
853  */
854 static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
855 {
856 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
857 	struct cxl_mbox_transfer_fw *transfer;
858 	struct cxl_mbox_cmd mbox_cmd;
859 	int rc;
860 
861 	transfer = kzalloc_flex(*transfer, data, 0);
862 	if (!transfer)
863 		return -ENOMEM;
864 
865 	/* Set a 1s poll interval and a total wait time of 30s */
866 	mbox_cmd = (struct cxl_mbox_cmd) {
867 		.opcode = CXL_MBOX_OP_TRANSFER_FW,
868 		.size_in = sizeof(*transfer),
869 		.payload_in = transfer,
870 		.poll_interval_ms = 1000,
871 		.poll_count = 30,
872 	};
873 
874 	transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
875 
876 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
877 	kfree(transfer);
878 	return rc;
879 }
880 
881 static void cxl_fw_cleanup(struct fw_upload *fwl)
882 {
883 	struct cxl_memdev_state *mds = fwl->dd_handle;
884 
885 	mds->fw.next_slot = 0;
886 }
887 
888 static int cxl_fw_do_cancel(struct fw_upload *fwl)
889 {
890 	struct cxl_memdev_state *mds = fwl->dd_handle;
891 	struct cxl_dev_state *cxlds = &mds->cxlds;
892 	struct cxl_memdev *cxlmd = cxlds->cxlmd;
893 	int rc;
894 
895 	rc = cxl_mem_abort_fw_xfer(mds);
896 	if (rc < 0)
897 		dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc);
898 
899 	return FW_UPLOAD_ERR_CANCELED;
900 }
901 
902 static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
903 					 u32 size)
904 {
905 	struct cxl_memdev_state *mds = fwl->dd_handle;
906 	struct cxl_mbox_transfer_fw *transfer;
907 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
908 
909 	if (!size)
910 		return FW_UPLOAD_ERR_INVALID_SIZE;
911 
912 	mds->fw.oneshot = struct_size(transfer, data, size) <
913 			    cxl_mbox->payload_size;
914 
915 	if (cxl_mem_get_fw_info(mds))
916 		return FW_UPLOAD_ERR_HW_ERROR;
917 
918 	/*
919 	 * So far no state has been changed, hence no other cleanup is
920 	 * necessary. Simply return the cancelled status.
921 	 */
922 	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
923 		return FW_UPLOAD_ERR_CANCELED;
924 
925 	return FW_UPLOAD_ERR_NONE;
926 }
927 
928 static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
929 				       u32 offset, u32 size, u32 *written)
930 {
931 	struct cxl_memdev_state *mds = fwl->dd_handle;
932 	struct cxl_dev_state *cxlds = &mds->cxlds;
933 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
934 	struct cxl_memdev *cxlmd = cxlds->cxlmd;
935 	struct cxl_mbox_transfer_fw *transfer;
936 	struct cxl_mbox_cmd mbox_cmd;
937 	u32 cur_size, remaining;
938 	size_t size_in;
939 	int rc;
940 
941 	*written = 0;
942 
943 	/* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */
944 	if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
945 		dev_err(&cxlmd->dev,
946 			"misaligned offset for FW transfer slice (%u)\n",
947 			offset);
948 		return FW_UPLOAD_ERR_RW_ERROR;
949 	}
950 
951 	/*
952 	 * Pick transfer size based on mds->payload_size @size must bw 128-byte
953 	 * aligned, ->payload_size is a power of 2 starting at 256 bytes, and
954 	 * sizeof(*transfer) is 128.  These constraints imply that @cur_size
955 	 * will always be 128b aligned.
956 	 */
957 	cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer));
958 
959 	remaining = size - cur_size;
960 	size_in = struct_size(transfer, data, cur_size);
961 
962 	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
963 		return cxl_fw_do_cancel(fwl);
964 
965 	/*
966 	 * Slot numbers are 1-indexed
967 	 * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1')
968 	 * Check for rollover using modulo, and 1-index it by adding 1
969 	 */
970 	mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
971 
972 	/* Do the transfer via mailbox cmd */
973 	transfer = kzalloc(size_in, GFP_KERNEL);
974 	if (!transfer)
975 		return FW_UPLOAD_ERR_RW_ERROR;
976 
977 	transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT);
978 	memcpy(transfer->data, data + offset, cur_size);
979 	if (mds->fw.oneshot) {
980 		transfer->action = CXL_FW_TRANSFER_ACTION_FULL;
981 		transfer->slot = mds->fw.next_slot;
982 	} else {
983 		if (offset == 0) {
984 			transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE;
985 		} else if (remaining == 0) {
986 			transfer->action = CXL_FW_TRANSFER_ACTION_END;
987 			transfer->slot = mds->fw.next_slot;
988 		} else {
989 			transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE;
990 		}
991 	}
992 
993 	mbox_cmd = (struct cxl_mbox_cmd) {
994 		.opcode = CXL_MBOX_OP_TRANSFER_FW,
995 		.size_in = size_in,
996 		.payload_in = transfer,
997 		.poll_interval_ms = 1000,
998 		.poll_count = 30,
999 	};
1000 
1001 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1002 	if (rc < 0) {
1003 		rc = FW_UPLOAD_ERR_RW_ERROR;
1004 		goto out_free;
1005 	}
1006 
1007 	*written = cur_size;
1008 
1009 	/* Activate FW if oneshot or if the last slice was written */
1010 	if (mds->fw.oneshot || remaining == 0) {
1011 		dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n",
1012 			mds->fw.next_slot);
1013 		rc = cxl_mem_activate_fw(mds, mds->fw.next_slot);
1014 		if (rc < 0) {
1015 			dev_err(&cxlmd->dev, "Error activating firmware: %d\n",
1016 				rc);
1017 			rc = FW_UPLOAD_ERR_HW_ERROR;
1018 			goto out_free;
1019 		}
1020 	}
1021 
1022 	rc = FW_UPLOAD_ERR_NONE;
1023 
1024 out_free:
1025 	kfree(transfer);
1026 	return rc;
1027 }
1028 
1029 static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl)
1030 {
1031 	struct cxl_memdev_state *mds = fwl->dd_handle;
1032 
1033 	/*
1034 	 * cxl_internal_send_cmd() handles background operations synchronously.
1035 	 * No need to wait for completions here - any errors would've been
1036 	 * reported and handled during the ->write() call(s).
1037 	 * Just check if a cancel request was received, and return success.
1038 	 */
1039 	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
1040 		return cxl_fw_do_cancel(fwl);
1041 
1042 	return FW_UPLOAD_ERR_NONE;
1043 }
1044 
1045 static void cxl_fw_cancel(struct fw_upload *fwl)
1046 {
1047 	struct cxl_memdev_state *mds = fwl->dd_handle;
1048 
1049 	set_bit(CXL_FW_CANCEL, mds->fw.state);
1050 }
1051 
1052 static const struct fw_upload_ops cxl_memdev_fw_ops = {
1053         .prepare = cxl_fw_prepare,
1054         .write = cxl_fw_write,
1055         .poll_complete = cxl_fw_poll_complete,
1056         .cancel = cxl_fw_cancel,
1057         .cleanup = cxl_fw_cleanup,
1058 };
1059 
1060 static void cxl_remove_fw_upload(void *fwl)
1061 {
1062 	firmware_upload_unregister(fwl);
1063 }
1064 
1065 int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
1066 {
1067 	struct cxl_dev_state *cxlds = &mds->cxlds;
1068 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
1069 	struct device *dev = &cxlds->cxlmd->dev;
1070 	struct fw_upload *fwl;
1071 
1072 	if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxl_mbox->enabled_cmds))
1073 		return 0;
1074 
1075 	fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
1076 				       &cxl_memdev_fw_ops, mds);
1077 	if (IS_ERR(fwl))
1078 		return PTR_ERR(fwl);
1079 	return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
1080 }
1081 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, "CXL");
1082 
1083 static const struct file_operations cxl_memdev_fops = {
1084 	.owner = THIS_MODULE,
1085 	.unlocked_ioctl = cxl_memdev_ioctl,
1086 	.open = cxl_memdev_open,
1087 	.release = cxl_memdev_release_file,
1088 	.compat_ioctl = compat_ptr_ioctl,
1089 	.llseek = noop_llseek,
1090 };
1091 
1092 /*
1093  * Activate ioctl operations, no cxl_memdev_rwsem manipulation needed as this is
1094  * ordered with cdev_add() publishing the device.
1095  */
1096 static int cxlmd_add(struct cxl_memdev *cxlmd, struct cxl_dev_state *cxlds)
1097 {
1098 	int rc;
1099 
1100 	cxlmd->cxlds = cxlds;
1101 	cxlds->cxlmd = cxlmd;
1102 
1103 	rc = cdev_device_add(&cxlmd->cdev, &cxlmd->dev);
1104 	if (rc) {
1105 		/*
1106 		 * The cdev was briefly live, shutdown any ioctl operations that
1107 		 * saw that state.
1108 		 */
1109 		cxl_memdev_shutdown(&cxlmd->dev);
1110 		return rc;
1111 	}
1112 
1113 	return 0;
1114 }
1115 
1116 DEFINE_FREE(put_cxlmd, struct cxl_memdev *,
1117 	    if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
1118 
1119 static bool cxl_memdev_attach_failed(struct cxl_memdev *cxlmd)
1120 {
1121 	/*
1122 	 * If @attach is provided fail if the driver is not attached upon
1123 	 * return. Note that failure here could be the result of a race to
1124 	 * teardown the CXL port topology. I.e. cxl_mem_probe() could have
1125 	 * succeeded and then cxl_mem unbound before the lock is acquired.
1126 	 */
1127 	guard(device)(&cxlmd->dev);
1128 	return (cxlmd->attach && !cxlmd->dev.driver);
1129 }
1130 
1131 static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd)
1132 {
1133 	int rc;
1134 
1135 	if (cxl_memdev_attach_failed(cxlmd)) {
1136 		cxl_memdev_unregister(cxlmd);
1137 		return ERR_PTR(-ENXIO);
1138 	}
1139 
1140 	rc = devm_add_action_or_reset(cxlmd->cxlds->dev, cxl_memdev_unregister,
1141 				      cxlmd);
1142 	if (rc)
1143 		return ERR_PTR(rc);
1144 
1145 	return cxlmd;
1146 }
1147 
1148 /*
1149  * Core helper for devm_cxl_add_memdev() that wants to both create a device and
1150  * assert to the caller that upon return cxl_mem::probe() has been invoked.
1151  */
1152 struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds,
1153 					 const struct cxl_memdev_attach *attach)
1154 {
1155 	struct device *dev;
1156 	int rc;
1157 
1158 	struct cxl_memdev *cxlmd __free(put_cxlmd) =
1159 		cxl_memdev_alloc(cxlds, &cxl_memdev_fops, attach);
1160 	if (IS_ERR(cxlmd))
1161 		return cxlmd;
1162 
1163 	dev = &cxlmd->dev;
1164 	rc = dev_set_name(dev, "mem%d", cxlmd->id);
1165 	if (rc)
1166 		return ERR_PTR(rc);
1167 
1168 	rc = cxlmd_add(cxlmd, cxlds);
1169 	if (rc)
1170 		return ERR_PTR(rc);
1171 
1172 	return cxl_memdev_autoremove(no_free_ptr(cxlmd));
1173 }
1174 EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_memdev, "cxl_mem");
1175 
1176 static void sanitize_teardown_notifier(void *data)
1177 {
1178 	struct cxl_memdev_state *mds = data;
1179 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1180 	struct kernfs_node *state;
1181 
1182 	/*
1183 	 * Prevent new irq triggered invocations of the workqueue and
1184 	 * flush inflight invocations.
1185 	 */
1186 	mutex_lock(&cxl_mbox->mbox_mutex);
1187 	state = mds->security.sanitize_node;
1188 	mds->security.sanitize_node = NULL;
1189 	mutex_unlock(&cxl_mbox->mbox_mutex);
1190 
1191 	cancel_delayed_work_sync(&mds->security.poll_dwork);
1192 	sysfs_put(state);
1193 }
1194 
1195 int devm_cxl_sanitize_setup_notifier(struct device *host,
1196 				     struct cxl_memdev *cxlmd)
1197 {
1198 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
1199 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
1200 	struct kernfs_node *sec;
1201 
1202 	if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
1203 		return 0;
1204 
1205 	/*
1206 	 * Note, the expectation is that @cxlmd would have failed to be
1207 	 * created if these sysfs_get_dirent calls fail.
1208 	 */
1209 	sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
1210 	if (!sec)
1211 		return -ENOENT;
1212 	mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
1213 	sysfs_put(sec);
1214 	if (!mds->security.sanitize_node)
1215 		return -ENOENT;
1216 
1217 	return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
1218 }
1219 EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, "CXL");
1220 
1221 __init int cxl_memdev_init(void)
1222 {
1223 	dev_t devt;
1224 	int rc;
1225 
1226 	rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
1227 	if (rc)
1228 		return rc;
1229 
1230 	cxl_mem_major = MAJOR(devt);
1231 
1232 	return 0;
1233 }
1234 
1235 void cxl_memdev_exit(void)
1236 {
1237 	unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
1238 }
1239