xref: /linux/drivers/cxl/core/memdev.c (revision f2546eba53bbe38c4bb950f78625ccf4b1a2cbc8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 #include <linux/io-64-nonatomic-lo-hi.h>
5 #include <linux/firmware.h>
6 #include <linux/device.h>
7 #include <linux/slab.h>
8 #include <linux/idr.h>
9 #include <linux/pci.h>
10 #include <cxlmem.h>
11 #include "trace.h"
12 #include "core.h"
13 
14 static DECLARE_RWSEM(cxl_memdev_rwsem);
15 
16 /*
17  * An entire PCI topology full of devices should be enough for any
18  * config
19  */
20 #define CXL_MEM_MAX_DEVS 65536
21 
22 static int cxl_mem_major;
23 static DEFINE_IDA(cxl_memdev_ida);
24 
25 static void cxl_memdev_release(struct device *dev)
26 {
27 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
28 
29 	ida_free(&cxl_memdev_ida, cxlmd->id);
30 	kfree(cxlmd);
31 }
32 
33 static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid,
34 				kgid_t *gid)
35 {
36 	return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
37 }
38 
39 static ssize_t firmware_version_show(struct device *dev,
40 				     struct device_attribute *attr, char *buf)
41 {
42 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
43 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
44 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
45 
46 	if (!mds)
47 		return sysfs_emit(buf, "\n");
48 	return sysfs_emit(buf, "%.16s\n", mds->firmware_version);
49 }
50 static DEVICE_ATTR_RO(firmware_version);
51 
52 static ssize_t payload_max_show(struct device *dev,
53 				struct device_attribute *attr, char *buf)
54 {
55 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
56 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
57 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
58 
59 	if (!mds)
60 		return sysfs_emit(buf, "\n");
61 	return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size);
62 }
63 static DEVICE_ATTR_RO(payload_max);
64 
65 static ssize_t label_storage_size_show(struct device *dev,
66 				       struct device_attribute *attr, char *buf)
67 {
68 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
69 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
70 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
71 
72 	if (!mds)
73 		return sysfs_emit(buf, "\n");
74 	return sysfs_emit(buf, "%zu\n", mds->lsa_size);
75 }
76 static DEVICE_ATTR_RO(label_storage_size);
77 
78 static resource_size_t cxl_ram_size(struct cxl_dev_state *cxlds)
79 {
80 	/* Static RAM is only expected at partition 0. */
81 	if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
82 		return 0;
83 	return resource_size(&cxlds->part[0].res);
84 }
85 
86 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
87 			     char *buf)
88 {
89 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
90 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
91 	unsigned long long len = cxl_ram_size(cxlds);
92 
93 	return sysfs_emit(buf, "%#llx\n", len);
94 }
95 
96 static struct device_attribute dev_attr_ram_size =
97 	__ATTR(size, 0444, ram_size_show, NULL);
98 
99 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
100 			      char *buf)
101 {
102 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
103 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
104 	unsigned long long len = cxl_pmem_size(cxlds);
105 
106 	return sysfs_emit(buf, "%#llx\n", len);
107 }
108 
109 static struct device_attribute dev_attr_pmem_size =
110 	__ATTR(size, 0444, pmem_size_show, NULL);
111 
112 static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
113 			   char *buf)
114 {
115 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
116 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
117 
118 	return sysfs_emit(buf, "%#llx\n", cxlds->serial);
119 }
120 static DEVICE_ATTR_RO(serial);
121 
122 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
123 			      char *buf)
124 {
125 	return sysfs_emit(buf, "%d\n", dev_to_node(dev));
126 }
127 static DEVICE_ATTR_RO(numa_node);
128 
129 static ssize_t security_state_show(struct device *dev,
130 				   struct device_attribute *attr,
131 				   char *buf)
132 {
133 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
134 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
135 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
136 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
137 	unsigned long state = mds->security.state;
138 	int rc = 0;
139 
140 	/* sync with latest submission state */
141 	mutex_lock(&cxl_mbox->mbox_mutex);
142 	if (mds->security.sanitize_active)
143 		rc = sysfs_emit(buf, "sanitize\n");
144 	mutex_unlock(&cxl_mbox->mbox_mutex);
145 	if (rc)
146 		return rc;
147 
148 	if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
149 		return sysfs_emit(buf, "disabled\n");
150 	if (state & CXL_PMEM_SEC_STATE_FROZEN ||
151 	    state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
152 	    state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
153 		return sysfs_emit(buf, "frozen\n");
154 	if (state & CXL_PMEM_SEC_STATE_LOCKED)
155 		return sysfs_emit(buf, "locked\n");
156 
157 	return sysfs_emit(buf, "unlocked\n");
158 }
159 static struct device_attribute dev_attr_security_state =
160 	__ATTR(state, 0444, security_state_show, NULL);
161 
162 static ssize_t security_sanitize_store(struct device *dev,
163 				       struct device_attribute *attr,
164 				       const char *buf, size_t len)
165 {
166 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
167 	bool sanitize;
168 	ssize_t rc;
169 
170 	if (kstrtobool(buf, &sanitize) || !sanitize)
171 		return -EINVAL;
172 
173 	rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
174 	if (rc)
175 		return rc;
176 
177 	return len;
178 }
179 static struct device_attribute dev_attr_security_sanitize =
180 	__ATTR(sanitize, 0200, NULL, security_sanitize_store);
181 
182 static ssize_t security_erase_store(struct device *dev,
183 				    struct device_attribute *attr,
184 				    const char *buf, size_t len)
185 {
186 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
187 	ssize_t rc;
188 	bool erase;
189 
190 	if (kstrtobool(buf, &erase) || !erase)
191 		return -EINVAL;
192 
193 	rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
194 	if (rc)
195 		return rc;
196 
197 	return len;
198 }
199 static struct device_attribute dev_attr_security_erase =
200 	__ATTR(erase, 0200, NULL, security_erase_store);
201 
202 bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
203 			       enum poison_cmd_enabled_bits cmd)
204 {
205 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
206 
207 	return test_bit(cmd, mds->poison.enabled_cmds);
208 }
209 
210 static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
211 {
212 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
213 	u64 offset, length;
214 	int rc = 0;
215 
216 	/* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
217 	for (int i = 0; i < cxlds->nr_partitions; i++) {
218 		const struct resource *res = &cxlds->part[i].res;
219 
220 		offset = res->start;
221 		length = resource_size(res);
222 		rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
223 		/*
224 		 * Invalid Physical Address is not an error for
225 		 * volatile addresses. Device support is optional.
226 		 */
227 		if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM)
228 			rc = 0;
229 	}
230 	return rc;
231 }
232 
233 int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
234 {
235 	struct cxl_port *port;
236 	int rc;
237 
238 	port = cxlmd->endpoint;
239 	if (!port || !is_cxl_endpoint(port))
240 		return -EINVAL;
241 
242 	ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
243 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
244 		return rc;
245 
246 	ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
247 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
248 		return rc;
249 
250 	if (cxl_num_decoders_committed(port) == 0) {
251 		/* No regions mapped to this memdev */
252 		rc = cxl_get_poison_by_memdev(cxlmd);
253 	} else {
254 		/* Regions mapped, collect poison by endpoint */
255 		rc =  cxl_get_poison_by_endpoint(port);
256 	}
257 
258 	return rc;
259 }
260 EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, "CXL");
261 
262 static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
263 {
264 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
265 
266 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
267 		return 0;
268 
269 	if (!resource_size(&cxlds->dpa_res)) {
270 		dev_dbg(cxlds->dev, "device has no dpa resource\n");
271 		return -EINVAL;
272 	}
273 	if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) {
274 		dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
275 			dpa, &cxlds->dpa_res);
276 		return -EINVAL;
277 	}
278 	if (!IS_ALIGNED(dpa, 64)) {
279 		dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa);
280 		return -EINVAL;
281 	}
282 
283 	return 0;
284 }
285 
286 int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
287 {
288 	struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
289 	struct cxl_mbox_inject_poison inject;
290 	struct cxl_poison_record record;
291 	struct cxl_mbox_cmd mbox_cmd;
292 	struct cxl_region *cxlr;
293 	int rc;
294 
295 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
296 		return 0;
297 
298 	lockdep_assert_held(&cxl_rwsem.dpa);
299 	lockdep_assert_held(&cxl_rwsem.region);
300 
301 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
302 	if (rc)
303 		return rc;
304 
305 	inject.address = cpu_to_le64(dpa);
306 	mbox_cmd = (struct cxl_mbox_cmd) {
307 		.opcode = CXL_MBOX_OP_INJECT_POISON,
308 		.size_in = sizeof(inject),
309 		.payload_in = &inject,
310 	};
311 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
312 	if (rc)
313 		return rc;
314 
315 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
316 	if (cxlr)
317 		dev_warn_once(cxl_mbox->host,
318 			      "poison inject dpa:%#llx region: %s\n", dpa,
319 			      dev_name(&cxlr->dev));
320 
321 	record = (struct cxl_poison_record) {
322 		.address = cpu_to_le64(dpa),
323 		.length = cpu_to_le32(1),
324 	};
325 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
326 
327 	return 0;
328 }
329 
330 int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
331 {
332 	int rc;
333 
334 	ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
335 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
336 		return rc;
337 
338 	ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
339 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
340 		return rc;
341 
342 	return cxl_inject_poison_locked(cxlmd, dpa);
343 }
344 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
345 
346 int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
347 {
348 	struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
349 	struct cxl_mbox_clear_poison clear;
350 	struct cxl_poison_record record;
351 	struct cxl_mbox_cmd mbox_cmd;
352 	struct cxl_region *cxlr;
353 	int rc;
354 
355 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
356 		return 0;
357 
358 	lockdep_assert_held(&cxl_rwsem.dpa);
359 	lockdep_assert_held(&cxl_rwsem.region);
360 
361 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
362 	if (rc)
363 		return rc;
364 
365 	/*
366 	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
367 	 * is defined to accept 64 bytes of write-data, along with the
368 	 * address to clear. This driver uses zeroes as write-data.
369 	 */
370 	clear = (struct cxl_mbox_clear_poison) {
371 		.address = cpu_to_le64(dpa)
372 	};
373 
374 	mbox_cmd = (struct cxl_mbox_cmd) {
375 		.opcode = CXL_MBOX_OP_CLEAR_POISON,
376 		.size_in = sizeof(clear),
377 		.payload_in = &clear,
378 	};
379 
380 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
381 	if (rc)
382 		return rc;
383 
384 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
385 	if (cxlr)
386 		dev_warn_once(cxl_mbox->host,
387 			      "poison clear dpa:%#llx region: %s\n", dpa,
388 			      dev_name(&cxlr->dev));
389 
390 	record = (struct cxl_poison_record) {
391 		.address = cpu_to_le64(dpa),
392 		.length = cpu_to_le32(1),
393 	};
394 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
395 
396 	return 0;
397 }
398 
399 int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
400 {
401 	int rc;
402 
403 	ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
404 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
405 		return rc;
406 
407 	ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
408 	if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
409 		return rc;
410 
411 	return cxl_clear_poison_locked(cxlmd, dpa);
412 }
413 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
414 
415 static struct attribute *cxl_memdev_attributes[] = {
416 	&dev_attr_serial.attr,
417 	&dev_attr_firmware_version.attr,
418 	&dev_attr_payload_max.attr,
419 	&dev_attr_label_storage_size.attr,
420 	&dev_attr_numa_node.attr,
421 	NULL,
422 };
423 
424 static struct cxl_dpa_perf *to_pmem_perf(struct cxl_dev_state *cxlds)
425 {
426 	for (int i = 0; i < cxlds->nr_partitions; i++)
427 		if (cxlds->part[i].mode == CXL_PARTMODE_PMEM)
428 			return &cxlds->part[i].perf;
429 	return NULL;
430 }
431 
432 static ssize_t pmem_qos_class_show(struct device *dev,
433 				   struct device_attribute *attr, char *buf)
434 {
435 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
436 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
437 
438 	return sysfs_emit(buf, "%d\n", to_pmem_perf(cxlds)->qos_class);
439 }
440 
441 static struct device_attribute dev_attr_pmem_qos_class =
442 	__ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
443 
444 static struct attribute *cxl_memdev_pmem_attributes[] = {
445 	&dev_attr_pmem_size.attr,
446 	&dev_attr_pmem_qos_class.attr,
447 	NULL,
448 };
449 
450 static struct cxl_dpa_perf *to_ram_perf(struct cxl_dev_state *cxlds)
451 {
452 	if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
453 		return NULL;
454 	return &cxlds->part[0].perf;
455 }
456 
457 static ssize_t ram_qos_class_show(struct device *dev,
458 				  struct device_attribute *attr, char *buf)
459 {
460 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
461 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
462 
463 	return sysfs_emit(buf, "%d\n", to_ram_perf(cxlds)->qos_class);
464 }
465 
466 static struct device_attribute dev_attr_ram_qos_class =
467 	__ATTR(qos_class, 0444, ram_qos_class_show, NULL);
468 
469 static struct attribute *cxl_memdev_ram_attributes[] = {
470 	&dev_attr_ram_size.attr,
471 	&dev_attr_ram_qos_class.attr,
472 	NULL,
473 };
474 
475 static struct attribute *cxl_memdev_security_attributes[] = {
476 	&dev_attr_security_state.attr,
477 	&dev_attr_security_sanitize.attr,
478 	&dev_attr_security_erase.attr,
479 	NULL,
480 };
481 
482 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
483 				  int n)
484 {
485 	if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
486 		return 0;
487 	return a->mode;
488 }
489 
490 static struct attribute_group cxl_memdev_attribute_group = {
491 	.attrs = cxl_memdev_attributes,
492 	.is_visible = cxl_memdev_visible,
493 };
494 
495 static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
496 {
497 	struct device *dev = kobj_to_dev(kobj);
498 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
499 	struct cxl_dpa_perf *perf = to_ram_perf(cxlmd->cxlds);
500 
501 	if (a == &dev_attr_ram_qos_class.attr &&
502 	    (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
503 		return 0;
504 
505 	return a->mode;
506 }
507 
508 static struct attribute_group cxl_memdev_ram_attribute_group = {
509 	.name = "ram",
510 	.attrs = cxl_memdev_ram_attributes,
511 	.is_visible = cxl_ram_visible,
512 };
513 
514 static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
515 {
516 	struct device *dev = kobj_to_dev(kobj);
517 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
518 	struct cxl_dpa_perf *perf = to_pmem_perf(cxlmd->cxlds);
519 
520 	if (a == &dev_attr_pmem_qos_class.attr &&
521 	    (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
522 		return 0;
523 
524 	return a->mode;
525 }
526 
527 static struct attribute_group cxl_memdev_pmem_attribute_group = {
528 	.name = "pmem",
529 	.attrs = cxl_memdev_pmem_attributes,
530 	.is_visible = cxl_pmem_visible,
531 };
532 
533 static umode_t cxl_memdev_security_visible(struct kobject *kobj,
534 					   struct attribute *a, int n)
535 {
536 	struct device *dev = kobj_to_dev(kobj);
537 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
538 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
539 
540 	if (a == &dev_attr_security_sanitize.attr &&
541 	    !test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
542 		return 0;
543 
544 	if (a == &dev_attr_security_erase.attr &&
545 	    !test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds))
546 		return 0;
547 
548 	return a->mode;
549 }
550 
551 static struct attribute_group cxl_memdev_security_attribute_group = {
552 	.name = "security",
553 	.attrs = cxl_memdev_security_attributes,
554 	.is_visible = cxl_memdev_security_visible,
555 };
556 
557 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
558 	&cxl_memdev_attribute_group,
559 	&cxl_memdev_ram_attribute_group,
560 	&cxl_memdev_pmem_attribute_group,
561 	&cxl_memdev_security_attribute_group,
562 	NULL,
563 };
564 
565 void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
566 {
567 	sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
568 	sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
569 }
570 EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, "CXL");
571 
572 static const struct device_type cxl_memdev_type = {
573 	.name = "cxl_memdev",
574 	.release = cxl_memdev_release,
575 	.devnode = cxl_memdev_devnode,
576 	.groups = cxl_memdev_attribute_groups,
577 };
578 
579 bool is_cxl_memdev(const struct device *dev)
580 {
581 	return dev->type == &cxl_memdev_type;
582 }
583 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, "CXL");
584 
585 /**
586  * set_exclusive_cxl_commands() - atomically disable user cxl commands
587  * @mds: The device state to operate on
588  * @cmds: bitmap of commands to mark exclusive
589  *
590  * Grab the cxl_memdev_rwsem in write mode to flush in-flight
591  * invocations of the ioctl path and then disable future execution of
592  * commands with the command ids set in @cmds.
593  */
594 void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
595 				unsigned long *cmds)
596 {
597 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
598 
599 	guard(rwsem_write)(&cxl_memdev_rwsem);
600 	bitmap_or(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
601 		  cmds, CXL_MEM_COMMAND_ID_MAX);
602 }
603 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL");
604 
605 /**
606  * clear_exclusive_cxl_commands() - atomically enable user cxl commands
607  * @mds: The device state to modify
608  * @cmds: bitmap of commands to mark available for userspace
609  */
610 void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
611 				  unsigned long *cmds)
612 {
613 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
614 
615 	guard(rwsem_write)(&cxl_memdev_rwsem);
616 	bitmap_andnot(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
617 		      cmds, CXL_MEM_COMMAND_ID_MAX);
618 }
619 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL");
620 
621 static void cxl_memdev_shutdown(struct device *dev)
622 {
623 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
624 
625 	guard(rwsem_write)(&cxl_memdev_rwsem);
626 	cxlmd->cxlds = NULL;
627 }
628 
629 static void cxl_memdev_unregister(void *_cxlmd)
630 {
631 	struct cxl_memdev *cxlmd = _cxlmd;
632 	struct device *dev = &cxlmd->dev;
633 
634 	cdev_device_del(&cxlmd->cdev, dev);
635 	cxl_memdev_shutdown(dev);
636 	put_device(dev);
637 }
638 
639 static void detach_memdev(struct work_struct *work)
640 {
641 	struct cxl_memdev *cxlmd;
642 
643 	cxlmd = container_of(work, typeof(*cxlmd), detach_work);
644 	device_release_driver(&cxlmd->dev);
645 	put_device(&cxlmd->dev);
646 }
647 
648 static struct lock_class_key cxl_memdev_key;
649 
650 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
651 					   const struct file_operations *fops)
652 {
653 	struct cxl_memdev *cxlmd;
654 	struct device *dev;
655 	struct cdev *cdev;
656 	int rc;
657 
658 	cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
659 	if (!cxlmd)
660 		return ERR_PTR(-ENOMEM);
661 
662 	rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL);
663 	if (rc < 0)
664 		goto err;
665 	cxlmd->id = rc;
666 	cxlmd->depth = -1;
667 
668 	dev = &cxlmd->dev;
669 	device_initialize(dev);
670 	lockdep_set_class(&dev->mutex, &cxl_memdev_key);
671 	dev->parent = cxlds->dev;
672 	dev->bus = &cxl_bus_type;
673 	dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
674 	dev->type = &cxl_memdev_type;
675 	device_set_pm_not_required(dev);
676 	INIT_WORK(&cxlmd->detach_work, detach_memdev);
677 
678 	cdev = &cxlmd->cdev;
679 	cdev_init(cdev, fops);
680 	return cxlmd;
681 
682 err:
683 	kfree(cxlmd);
684 	return ERR_PTR(rc);
685 }
686 
687 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
688 			       unsigned long arg)
689 {
690 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
691 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
692 
693 	switch (cmd) {
694 	case CXL_MEM_QUERY_COMMANDS:
695 		return cxl_query_cmd(cxl_mbox, (void __user *)arg);
696 	case CXL_MEM_SEND_COMMAND:
697 		return cxl_send_cmd(cxl_mbox, (void __user *)arg);
698 	default:
699 		return -ENOTTY;
700 	}
701 }
702 
703 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
704 			     unsigned long arg)
705 {
706 	struct cxl_memdev *cxlmd = file->private_data;
707 	struct cxl_dev_state *cxlds;
708 
709 	guard(rwsem_read)(&cxl_memdev_rwsem);
710 	cxlds = cxlmd->cxlds;
711 	if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
712 		return __cxl_memdev_ioctl(cxlmd, cmd, arg);
713 
714 	return -ENXIO;
715 }
716 
717 static int cxl_memdev_open(struct inode *inode, struct file *file)
718 {
719 	struct cxl_memdev *cxlmd =
720 		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
721 
722 	get_device(&cxlmd->dev);
723 	file->private_data = cxlmd;
724 
725 	return 0;
726 }
727 
728 static int cxl_memdev_release_file(struct inode *inode, struct file *file)
729 {
730 	struct cxl_memdev *cxlmd =
731 		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
732 
733 	put_device(&cxlmd->dev);
734 
735 	return 0;
736 }
737 
738 /**
739  * cxl_mem_get_fw_info - Get Firmware info
740  * @mds: The device data for the operation
741  *
742  * Retrieve firmware info for the device specified.
743  *
744  * Return: 0 if no error: or the result of the mailbox command.
745  *
746  * See CXL-3.0 8.2.9.3.1 Get FW Info
747  */
748 static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
749 {
750 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
751 	struct cxl_mbox_get_fw_info info;
752 	struct cxl_mbox_cmd mbox_cmd;
753 	int rc;
754 
755 	mbox_cmd = (struct cxl_mbox_cmd) {
756 		.opcode = CXL_MBOX_OP_GET_FW_INFO,
757 		.size_out = sizeof(info),
758 		.payload_out = &info,
759 	};
760 
761 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
762 	if (rc < 0)
763 		return rc;
764 
765 	mds->fw.num_slots = info.num_slots;
766 	mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
767 				       info.slot_info);
768 
769 	return 0;
770 }
771 
772 /**
773  * cxl_mem_activate_fw - Activate Firmware
774  * @mds: The device data for the operation
775  * @slot: slot number to activate
776  *
777  * Activate firmware in a given slot for the device specified.
778  *
779  * Return: 0 if no error: or the result of the mailbox command.
780  *
781  * See CXL-3.0 8.2.9.3.3 Activate FW
782  */
783 static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
784 {
785 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
786 	struct cxl_mbox_activate_fw activate;
787 	struct cxl_mbox_cmd mbox_cmd;
788 
789 	if (slot == 0 || slot > mds->fw.num_slots)
790 		return -EINVAL;
791 
792 	mbox_cmd = (struct cxl_mbox_cmd) {
793 		.opcode = CXL_MBOX_OP_ACTIVATE_FW,
794 		.size_in = sizeof(activate),
795 		.payload_in = &activate,
796 	};
797 
798 	/* Only offline activation supported for now */
799 	activate.action = CXL_FW_ACTIVATE_OFFLINE;
800 	activate.slot = slot;
801 
802 	return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
803 }
804 
805 /**
806  * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer
807  * @mds: The device data for the operation
808  *
809  * Abort an in-progress firmware transfer for the device specified.
810  *
811  * Return: 0 if no error: or the result of the mailbox command.
812  *
813  * See CXL-3.0 8.2.9.3.2 Transfer FW
814  */
815 static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
816 {
817 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
818 	struct cxl_mbox_transfer_fw *transfer;
819 	struct cxl_mbox_cmd mbox_cmd;
820 	int rc;
821 
822 	transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL);
823 	if (!transfer)
824 		return -ENOMEM;
825 
826 	/* Set a 1s poll interval and a total wait time of 30s */
827 	mbox_cmd = (struct cxl_mbox_cmd) {
828 		.opcode = CXL_MBOX_OP_TRANSFER_FW,
829 		.size_in = sizeof(*transfer),
830 		.payload_in = transfer,
831 		.poll_interval_ms = 1000,
832 		.poll_count = 30,
833 	};
834 
835 	transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
836 
837 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
838 	kfree(transfer);
839 	return rc;
840 }
841 
842 static void cxl_fw_cleanup(struct fw_upload *fwl)
843 {
844 	struct cxl_memdev_state *mds = fwl->dd_handle;
845 
846 	mds->fw.next_slot = 0;
847 }
848 
849 static int cxl_fw_do_cancel(struct fw_upload *fwl)
850 {
851 	struct cxl_memdev_state *mds = fwl->dd_handle;
852 	struct cxl_dev_state *cxlds = &mds->cxlds;
853 	struct cxl_memdev *cxlmd = cxlds->cxlmd;
854 	int rc;
855 
856 	rc = cxl_mem_abort_fw_xfer(mds);
857 	if (rc < 0)
858 		dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc);
859 
860 	return FW_UPLOAD_ERR_CANCELED;
861 }
862 
863 static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
864 					 u32 size)
865 {
866 	struct cxl_memdev_state *mds = fwl->dd_handle;
867 	struct cxl_mbox_transfer_fw *transfer;
868 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
869 
870 	if (!size)
871 		return FW_UPLOAD_ERR_INVALID_SIZE;
872 
873 	mds->fw.oneshot = struct_size(transfer, data, size) <
874 			    cxl_mbox->payload_size;
875 
876 	if (cxl_mem_get_fw_info(mds))
877 		return FW_UPLOAD_ERR_HW_ERROR;
878 
879 	/*
880 	 * So far no state has been changed, hence no other cleanup is
881 	 * necessary. Simply return the cancelled status.
882 	 */
883 	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
884 		return FW_UPLOAD_ERR_CANCELED;
885 
886 	return FW_UPLOAD_ERR_NONE;
887 }
888 
889 static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
890 				       u32 offset, u32 size, u32 *written)
891 {
892 	struct cxl_memdev_state *mds = fwl->dd_handle;
893 	struct cxl_dev_state *cxlds = &mds->cxlds;
894 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
895 	struct cxl_memdev *cxlmd = cxlds->cxlmd;
896 	struct cxl_mbox_transfer_fw *transfer;
897 	struct cxl_mbox_cmd mbox_cmd;
898 	u32 cur_size, remaining;
899 	size_t size_in;
900 	int rc;
901 
902 	*written = 0;
903 
904 	/* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */
905 	if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
906 		dev_err(&cxlmd->dev,
907 			"misaligned offset for FW transfer slice (%u)\n",
908 			offset);
909 		return FW_UPLOAD_ERR_RW_ERROR;
910 	}
911 
912 	/*
913 	 * Pick transfer size based on mds->payload_size @size must bw 128-byte
914 	 * aligned, ->payload_size is a power of 2 starting at 256 bytes, and
915 	 * sizeof(*transfer) is 128.  These constraints imply that @cur_size
916 	 * will always be 128b aligned.
917 	 */
918 	cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer));
919 
920 	remaining = size - cur_size;
921 	size_in = struct_size(transfer, data, cur_size);
922 
923 	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
924 		return cxl_fw_do_cancel(fwl);
925 
926 	/*
927 	 * Slot numbers are 1-indexed
928 	 * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1')
929 	 * Check for rollover using modulo, and 1-index it by adding 1
930 	 */
931 	mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
932 
933 	/* Do the transfer via mailbox cmd */
934 	transfer = kzalloc(size_in, GFP_KERNEL);
935 	if (!transfer)
936 		return FW_UPLOAD_ERR_RW_ERROR;
937 
938 	transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT);
939 	memcpy(transfer->data, data + offset, cur_size);
940 	if (mds->fw.oneshot) {
941 		transfer->action = CXL_FW_TRANSFER_ACTION_FULL;
942 		transfer->slot = mds->fw.next_slot;
943 	} else {
944 		if (offset == 0) {
945 			transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE;
946 		} else if (remaining == 0) {
947 			transfer->action = CXL_FW_TRANSFER_ACTION_END;
948 			transfer->slot = mds->fw.next_slot;
949 		} else {
950 			transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE;
951 		}
952 	}
953 
954 	mbox_cmd = (struct cxl_mbox_cmd) {
955 		.opcode = CXL_MBOX_OP_TRANSFER_FW,
956 		.size_in = size_in,
957 		.payload_in = transfer,
958 		.poll_interval_ms = 1000,
959 		.poll_count = 30,
960 	};
961 
962 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
963 	if (rc < 0) {
964 		rc = FW_UPLOAD_ERR_RW_ERROR;
965 		goto out_free;
966 	}
967 
968 	*written = cur_size;
969 
970 	/* Activate FW if oneshot or if the last slice was written */
971 	if (mds->fw.oneshot || remaining == 0) {
972 		dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n",
973 			mds->fw.next_slot);
974 		rc = cxl_mem_activate_fw(mds, mds->fw.next_slot);
975 		if (rc < 0) {
976 			dev_err(&cxlmd->dev, "Error activating firmware: %d\n",
977 				rc);
978 			rc = FW_UPLOAD_ERR_HW_ERROR;
979 			goto out_free;
980 		}
981 	}
982 
983 	rc = FW_UPLOAD_ERR_NONE;
984 
985 out_free:
986 	kfree(transfer);
987 	return rc;
988 }
989 
990 static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl)
991 {
992 	struct cxl_memdev_state *mds = fwl->dd_handle;
993 
994 	/*
995 	 * cxl_internal_send_cmd() handles background operations synchronously.
996 	 * No need to wait for completions here - any errors would've been
997 	 * reported and handled during the ->write() call(s).
998 	 * Just check if a cancel request was received, and return success.
999 	 */
1000 	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
1001 		return cxl_fw_do_cancel(fwl);
1002 
1003 	return FW_UPLOAD_ERR_NONE;
1004 }
1005 
1006 static void cxl_fw_cancel(struct fw_upload *fwl)
1007 {
1008 	struct cxl_memdev_state *mds = fwl->dd_handle;
1009 
1010 	set_bit(CXL_FW_CANCEL, mds->fw.state);
1011 }
1012 
1013 static const struct fw_upload_ops cxl_memdev_fw_ops = {
1014         .prepare = cxl_fw_prepare,
1015         .write = cxl_fw_write,
1016         .poll_complete = cxl_fw_poll_complete,
1017         .cancel = cxl_fw_cancel,
1018         .cleanup = cxl_fw_cleanup,
1019 };
1020 
1021 static void cxl_remove_fw_upload(void *fwl)
1022 {
1023 	firmware_upload_unregister(fwl);
1024 }
1025 
1026 int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
1027 {
1028 	struct cxl_dev_state *cxlds = &mds->cxlds;
1029 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
1030 	struct device *dev = &cxlds->cxlmd->dev;
1031 	struct fw_upload *fwl;
1032 
1033 	if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxl_mbox->enabled_cmds))
1034 		return 0;
1035 
1036 	fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
1037 				       &cxl_memdev_fw_ops, mds);
1038 	if (IS_ERR(fwl))
1039 		return PTR_ERR(fwl);
1040 	return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
1041 }
1042 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, "CXL");
1043 
1044 static const struct file_operations cxl_memdev_fops = {
1045 	.owner = THIS_MODULE,
1046 	.unlocked_ioctl = cxl_memdev_ioctl,
1047 	.open = cxl_memdev_open,
1048 	.release = cxl_memdev_release_file,
1049 	.compat_ioctl = compat_ptr_ioctl,
1050 	.llseek = noop_llseek,
1051 };
1052 
1053 /*
1054  * Activate ioctl operations, no cxl_memdev_rwsem manipulation needed as this is
1055  * ordered with cdev_add() publishing the device.
1056  */
1057 static int cxlmd_add(struct cxl_memdev *cxlmd, struct cxl_dev_state *cxlds)
1058 {
1059 	int rc;
1060 
1061 	cxlmd->cxlds = cxlds;
1062 	cxlds->cxlmd = cxlmd;
1063 
1064 	rc = cdev_device_add(&cxlmd->cdev, &cxlmd->dev);
1065 	if (rc) {
1066 		/*
1067 		 * The cdev was briefly live, shutdown any ioctl operations that
1068 		 * saw that state.
1069 		 */
1070 		cxl_memdev_shutdown(&cxlmd->dev);
1071 		return rc;
1072 	}
1073 
1074 	return 0;
1075 }
1076 
1077 DEFINE_FREE(put_cxlmd, struct cxl_memdev *,
1078 	    if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
1079 
1080 static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd)
1081 {
1082 	int rc;
1083 
1084 	rc = devm_add_action_or_reset(cxlmd->cxlds->dev, cxl_memdev_unregister,
1085 				      cxlmd);
1086 	if (rc)
1087 		return ERR_PTR(rc);
1088 
1089 	return cxlmd;
1090 }
1091 
1092 /*
1093  * Core helper for devm_cxl_add_memdev() that wants to both create a device and
1094  * assert to the caller that upon return cxl_mem::probe() has been invoked.
1095  */
1096 struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
1097 {
1098 	struct device *dev;
1099 	int rc;
1100 
1101 	struct cxl_memdev *cxlmd __free(put_cxlmd) =
1102 		cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
1103 	if (IS_ERR(cxlmd))
1104 		return cxlmd;
1105 
1106 	dev = &cxlmd->dev;
1107 	rc = dev_set_name(dev, "mem%d", cxlmd->id);
1108 	if (rc)
1109 		return ERR_PTR(rc);
1110 
1111 	rc = cxlmd_add(cxlmd, cxlds);
1112 	if (rc)
1113 		return ERR_PTR(rc);
1114 
1115 	return cxl_memdev_autoremove(no_free_ptr(cxlmd));
1116 }
1117 EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_memdev, "cxl_mem");
1118 
1119 static void sanitize_teardown_notifier(void *data)
1120 {
1121 	struct cxl_memdev_state *mds = data;
1122 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1123 	struct kernfs_node *state;
1124 
1125 	/*
1126 	 * Prevent new irq triggered invocations of the workqueue and
1127 	 * flush inflight invocations.
1128 	 */
1129 	mutex_lock(&cxl_mbox->mbox_mutex);
1130 	state = mds->security.sanitize_node;
1131 	mds->security.sanitize_node = NULL;
1132 	mutex_unlock(&cxl_mbox->mbox_mutex);
1133 
1134 	cancel_delayed_work_sync(&mds->security.poll_dwork);
1135 	sysfs_put(state);
1136 }
1137 
1138 int devm_cxl_sanitize_setup_notifier(struct device *host,
1139 				     struct cxl_memdev *cxlmd)
1140 {
1141 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
1142 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
1143 	struct kernfs_node *sec;
1144 
1145 	if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
1146 		return 0;
1147 
1148 	/*
1149 	 * Note, the expectation is that @cxlmd would have failed to be
1150 	 * created if these sysfs_get_dirent calls fail.
1151 	 */
1152 	sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
1153 	if (!sec)
1154 		return -ENOENT;
1155 	mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
1156 	sysfs_put(sec);
1157 	if (!mds->security.sanitize_node)
1158 		return -ENOENT;
1159 
1160 	return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
1161 }
1162 EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, "CXL");
1163 
1164 __init int cxl_memdev_init(void)
1165 {
1166 	dev_t devt;
1167 	int rc;
1168 
1169 	rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
1170 	if (rc)
1171 		return rc;
1172 
1173 	cxl_mem_major = MAJOR(devt);
1174 
1175 	return 0;
1176 }
1177 
1178 void cxl_memdev_exit(void)
1179 {
1180 	unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
1181 }
1182