xref: /linux/tools/testing/cxl/test/mem.c (revision 1729808c544a7edf6e8eed83e923d8082fb24512)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/vmalloc.h>
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/sizes.h>
10 #include <linux/bits.h>
11 #include <cxl/mailbox.h>
12 #include <linux/unaligned.h>
13 #include <crypto/sha2.h>
14 #include <cxlmem.h>
15 
16 #include "trace.h"
17 
18 #define LSA_SIZE SZ_128K
19 #define FW_SIZE SZ_64M
20 #define FW_SLOTS 3
21 #define DEV_SIZE SZ_2G
22 #define EFFECT(x) (1U << x)
23 
24 #define MOCK_INJECT_DEV_MAX 8
25 #define MOCK_INJECT_TEST_MAX 128
26 
27 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
28 
29 enum cxl_command_effects {
30 	CONF_CHANGE_COLD_RESET = 0,
31 	CONF_CHANGE_IMMEDIATE,
32 	DATA_CHANGE_IMMEDIATE,
33 	POLICY_CHANGE_IMMEDIATE,
34 	LOG_CHANGE_IMMEDIATE,
35 	SECURITY_CHANGE_IMMEDIATE,
36 	BACKGROUND_OP,
37 	SECONDARY_MBOX_SUPPORTED,
38 };
39 
40 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
41 
42 static struct cxl_cel_entry mock_cel[] = {
43 	{
44 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
45 		.effect = CXL_CMD_EFFECT_NONE,
46 	},
47 	{
48 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_FEATURES),
49 		.effect = CXL_CMD_EFFECT_NONE,
50 	},
51 	{
52 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FEATURE),
53 		.effect = CXL_CMD_EFFECT_NONE,
54 	},
55 	{
56 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_FEATURE),
57 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE)),
58 	},
59 	{
60 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
61 		.effect = CXL_CMD_EFFECT_NONE,
62 	},
63 	{
64 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
65 		.effect = CXL_CMD_EFFECT_NONE,
66 	},
67 	{
68 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
69 		.effect = CXL_CMD_EFFECT_NONE,
70 	},
71 	{
72 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
73 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
74 				      EFFECT(DATA_CHANGE_IMMEDIATE)),
75 	},
76 	{
77 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
78 		.effect = CXL_CMD_EFFECT_NONE,
79 	},
80 	{
81 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
82 		.effect = CXL_CMD_EFFECT_NONE,
83 	},
84 	{
85 		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
86 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
87 	},
88 	{
89 		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
90 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
91 	},
92 	{
93 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
94 		.effect = CXL_CMD_EFFECT_NONE,
95 	},
96 	{
97 		.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
98 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
99 				      EFFECT(BACKGROUND_OP)),
100 	},
101 	{
102 		.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
103 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
104 				      EFFECT(CONF_CHANGE_IMMEDIATE)),
105 	},
106 	{
107 		.opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
108 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
109 				      EFFECT(SECURITY_CHANGE_IMMEDIATE) |
110 				      EFFECT(BACKGROUND_OP)),
111 	},
112 };
113 
114 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
115 struct cxl_mbox_health_info {
116 	u8 health_status;
117 	u8 media_status;
118 	u8 ext_status;
119 	u8 life_used;
120 	__le16 temperature;
121 	__le32 dirty_shutdowns;
122 	__le32 volatile_errors;
123 	__le32 pmem_errors;
124 } __packed;
125 
126 static struct {
127 	struct cxl_mbox_get_supported_logs gsl;
128 	struct cxl_gsl_entry entry;
129 } mock_gsl_payload = {
130 	.gsl = {
131 		.entries = cpu_to_le16(1),
132 	},
133 	.entry = {
134 		.uuid = DEFINE_CXL_CEL_UUID,
135 		.size = cpu_to_le32(sizeof(mock_cel)),
136 	},
137 };
138 
139 #define PASS_TRY_LIMIT 3
140 
141 #define CXL_TEST_EVENT_CNT_MAX 15
142 
143 /* Set a number of events to return at a time for simulation.  */
144 #define CXL_TEST_EVENT_RET_MAX 4
145 
146 struct mock_event_log {
147 	u16 clear_idx;
148 	u16 cur_idx;
149 	u16 nr_events;
150 	u16 nr_overflow;
151 	u16 overflow_reset;
152 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
153 };
154 
155 struct mock_event_store {
156 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
157 	u32 ev_status;
158 };
159 
160 struct vendor_test_feat {
161 	__le32 data;
162 } __packed;
163 
164 struct cxl_mockmem_data {
165 	void *lsa;
166 	void *fw;
167 	int fw_slot;
168 	int fw_staged;
169 	size_t fw_size;
170 	u32 security_state;
171 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
172 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
173 	int user_limit;
174 	int master_limit;
175 	struct mock_event_store mes;
176 	struct cxl_memdev_state *mds;
177 	u8 event_buf[SZ_4K];
178 	u64 timestamp;
179 	unsigned long sanitize_timeout;
180 	struct vendor_test_feat test_feat;
181 };
182 
183 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
184 {
185 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
186 
187 	if (log_type >= CXL_EVENT_TYPE_MAX)
188 		return NULL;
189 	return &mdata->mes.mock_logs[log_type];
190 }
191 
192 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
193 {
194 	return log->events[log->cur_idx];
195 }
196 
197 static void event_reset_log(struct mock_event_log *log)
198 {
199 	log->cur_idx = 0;
200 	log->clear_idx = 0;
201 	log->nr_overflow = log->overflow_reset;
202 }
203 
204 /* Handle can never be 0 use 1 based indexing for handle */
205 static u16 event_get_clear_handle(struct mock_event_log *log)
206 {
207 	return log->clear_idx + 1;
208 }
209 
210 /* Handle can never be 0 use 1 based indexing for handle */
211 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
212 {
213 	u16 cur_handle = log->cur_idx + 1;
214 
215 	return cpu_to_le16(cur_handle);
216 }
217 
218 static bool event_log_empty(struct mock_event_log *log)
219 {
220 	return log->cur_idx == log->nr_events;
221 }
222 
223 static void mes_add_event(struct mock_event_store *mes,
224 			  enum cxl_event_log_type log_type,
225 			  struct cxl_event_record_raw *event)
226 {
227 	struct mock_event_log *log;
228 
229 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
230 		return;
231 
232 	log = &mes->mock_logs[log_type];
233 
234 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
235 		log->nr_overflow++;
236 		log->overflow_reset = log->nr_overflow;
237 		return;
238 	}
239 
240 	log->events[log->nr_events] = event;
241 	log->nr_events++;
242 }
243 
244 /*
245  * Vary the number of events returned to simulate events occuring while the
246  * logs are being read.
247  */
248 static int ret_limit = 0;
249 
250 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
251 {
252 	struct cxl_get_event_payload *pl;
253 	struct mock_event_log *log;
254 	u16 nr_overflow;
255 	u8 log_type;
256 	int i;
257 
258 	if (cmd->size_in != sizeof(log_type))
259 		return -EINVAL;
260 
261 	ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
262 	if (!ret_limit)
263 		ret_limit = 1;
264 
265 	if (cmd->size_out < struct_size(pl, records, ret_limit))
266 		return -EINVAL;
267 
268 	log_type = *((u8 *)cmd->payload_in);
269 	if (log_type >= CXL_EVENT_TYPE_MAX)
270 		return -EINVAL;
271 
272 	memset(cmd->payload_out, 0, struct_size(pl, records, 0));
273 
274 	log = event_find_log(dev, log_type);
275 	if (!log || event_log_empty(log))
276 		return 0;
277 
278 	pl = cmd->payload_out;
279 
280 	for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
281 		memcpy(&pl->records[i], event_get_current(log),
282 		       sizeof(pl->records[i]));
283 		pl->records[i].event.generic.hdr.handle =
284 				event_get_cur_event_handle(log);
285 		log->cur_idx++;
286 	}
287 
288 	cmd->size_out = struct_size(pl, records, i);
289 	pl->record_count = cpu_to_le16(i);
290 	if (!event_log_empty(log))
291 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
292 
293 	if (log->nr_overflow) {
294 		u64 ns;
295 
296 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
297 		pl->overflow_err_count = cpu_to_le16(nr_overflow);
298 		ns = ktime_get_real_ns();
299 		ns -= 5000000000; /* 5s ago */
300 		pl->first_overflow_timestamp = cpu_to_le64(ns);
301 		ns = ktime_get_real_ns();
302 		ns -= 1000000000; /* 1s ago */
303 		pl->last_overflow_timestamp = cpu_to_le64(ns);
304 	}
305 
306 	return 0;
307 }
308 
309 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
310 {
311 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
312 	struct mock_event_log *log;
313 	u8 log_type = pl->event_log;
314 	u16 handle;
315 	int nr;
316 
317 	if (log_type >= CXL_EVENT_TYPE_MAX)
318 		return -EINVAL;
319 
320 	log = event_find_log(dev, log_type);
321 	if (!log)
322 		return 0; /* No mock data in this log */
323 
324 	/*
325 	 * This check is technically not invalid per the specification AFAICS.
326 	 * (The host could 'guess' handles and clear them in order).
327 	 * However, this is not good behavior for the host so test it.
328 	 */
329 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
330 		dev_err(dev,
331 			"Attempting to clear more events than returned!\n");
332 		return -EINVAL;
333 	}
334 
335 	/* Check handle order prior to clearing events */
336 	for (nr = 0, handle = event_get_clear_handle(log);
337 	     nr < pl->nr_recs;
338 	     nr++, handle++) {
339 		if (handle != le16_to_cpu(pl->handles[nr])) {
340 			dev_err(dev, "Clearing events out of order\n");
341 			return -EINVAL;
342 		}
343 	}
344 
345 	if (log->nr_overflow)
346 		log->nr_overflow = 0;
347 
348 	/* Clear events */
349 	log->clear_idx += pl->nr_recs;
350 	return 0;
351 }
352 
353 static void cxl_mock_event_trigger(struct device *dev)
354 {
355 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
356 	struct mock_event_store *mes = &mdata->mes;
357 	int i;
358 
359 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
360 		struct mock_event_log *log;
361 
362 		log = event_find_log(dev, i);
363 		if (log)
364 			event_reset_log(log);
365 	}
366 
367 	cxl_mem_get_event_records(mdata->mds, mes->ev_status);
368 }
369 
370 struct cxl_event_record_raw maint_needed = {
371 	.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
372 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
373 	.event.generic = {
374 		.hdr = {
375 			.length = sizeof(struct cxl_event_record_raw),
376 			.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
377 			/* .handle = Set dynamically */
378 			.related_handle = cpu_to_le16(0xa5b6),
379 		},
380 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
381 	},
382 };
383 
384 struct cxl_event_record_raw hardware_replace = {
385 	.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
386 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
387 	.event.generic = {
388 		.hdr = {
389 			.length = sizeof(struct cxl_event_record_raw),
390 			.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
391 			/* .handle = Set dynamically */
392 			.related_handle = cpu_to_le16(0xb6a5),
393 		},
394 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
395 	},
396 };
397 
398 struct cxl_test_gen_media {
399 	uuid_t id;
400 	struct cxl_event_gen_media rec;
401 } __packed;
402 
403 struct cxl_test_gen_media gen_media = {
404 	.id = CXL_EVENT_GEN_MEDIA_UUID,
405 	.rec = {
406 		.media_hdr = {
407 			.hdr = {
408 				.length = sizeof(struct cxl_test_gen_media),
409 				.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
410 				/* .handle = Set dynamically */
411 				.related_handle = cpu_to_le16(0),
412 			},
413 			.phys_addr = cpu_to_le64(0x2000),
414 			.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
415 			.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
416 			.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
417 			/* .validity_flags = <set below> */
418 			.channel = 1,
419 			.rank = 30,
420 		},
421 		.component_id = { 0x3, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
422 		.cme_threshold_ev_flags = 3,
423 		.cme_count = { 33, 0, 0 },
424 		.sub_type = 0x2,
425 	},
426 };
427 
428 struct cxl_test_dram {
429 	uuid_t id;
430 	struct cxl_event_dram rec;
431 } __packed;
432 
433 struct cxl_test_dram dram = {
434 	.id = CXL_EVENT_DRAM_UUID,
435 	.rec = {
436 		.media_hdr = {
437 			.hdr = {
438 				.length = sizeof(struct cxl_test_dram),
439 				.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
440 				/* .handle = Set dynamically */
441 				.related_handle = cpu_to_le16(0),
442 			},
443 			.phys_addr = cpu_to_le64(0x8000),
444 			.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
445 			.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
446 			.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
447 			/* .validity_flags = <set below> */
448 			.channel = 1,
449 		},
450 		.bank_group = 5,
451 		.bank = 2,
452 		.column = {0xDE, 0xAD},
453 		.component_id = { 0x1, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
454 		.sub_channel = 8,
455 		.cme_threshold_ev_flags = 2,
456 		.cvme_count = { 14, 0, 0 },
457 		.sub_type = 0x5,
458 	},
459 };
460 
461 struct cxl_test_mem_module {
462 	uuid_t id;
463 	struct cxl_event_mem_module rec;
464 } __packed;
465 
466 struct cxl_test_mem_module mem_module = {
467 	.id = CXL_EVENT_MEM_MODULE_UUID,
468 	.rec = {
469 		.hdr = {
470 			.length = sizeof(struct cxl_test_mem_module),
471 			/* .handle = Set dynamically */
472 			.related_handle = cpu_to_le16(0),
473 		},
474 		.event_type = CXL_MMER_TEMP_CHANGE,
475 		.info = {
476 			.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
477 			.media_status = CXL_DHI_MS_ALL_DATA_LOST,
478 			.add_status = (CXL_DHI_AS_CRITICAL << 2) |
479 				      (CXL_DHI_AS_WARNING << 4) |
480 				      (CXL_DHI_AS_WARNING << 5),
481 			.device_temp = { 0xDE, 0xAD},
482 			.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
483 			.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
484 			.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
485 		},
486 		/* .validity_flags = <set below> */
487 		.component_id = { 0x2, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
488 		.event_sub_type = 0x3,
489 	},
490 };
491 
492 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
493 			      struct cxl_mbox_cmd *cmd)
494 {
495 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
496 	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
497 
498 	if (cmd->size_in != sizeof(*ts))
499 		return -EINVAL;
500 
501 	if (cmd->size_out != 0)
502 		return -EINVAL;
503 
504 	mdata->timestamp = le64_to_cpu(ts->timestamp);
505 	return 0;
506 }
507 
508 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
509 {
510 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK |
511 			   CXL_GMER_VALID_COMPONENT | CXL_GMER_VALID_COMPONENT_ID_FORMAT,
512 			   &gen_media.rec.media_hdr.validity_flags);
513 
514 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
515 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN | CXL_DER_VALID_SUB_CHANNEL |
516 			   CXL_DER_VALID_COMPONENT | CXL_DER_VALID_COMPONENT_ID_FORMAT,
517 			   &dram.rec.media_hdr.validity_flags);
518 
519 	put_unaligned_le16(CXL_MMER_VALID_COMPONENT | CXL_MMER_VALID_COMPONENT_ID_FORMAT,
520 			   &mem_module.rec.validity_flags);
521 
522 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
523 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
524 		      (struct cxl_event_record_raw *)&gen_media);
525 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
526 		      (struct cxl_event_record_raw *)&mem_module);
527 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
528 
529 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
530 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
531 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
532 		      (struct cxl_event_record_raw *)&dram);
533 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
534 		      (struct cxl_event_record_raw *)&gen_media);
535 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
536 		      (struct cxl_event_record_raw *)&mem_module);
537 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
538 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
539 		      (struct cxl_event_record_raw *)&dram);
540 	/* Overflow this log */
541 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
542 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
543 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
544 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
545 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
546 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
547 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
548 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
549 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
550 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
551 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
552 
553 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
554 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
555 		      (struct cxl_event_record_raw *)&dram);
556 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
557 }
558 
559 static int mock_gsl(struct cxl_mbox_cmd *cmd)
560 {
561 	if (cmd->size_out < sizeof(mock_gsl_payload))
562 		return -EINVAL;
563 
564 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
565 	cmd->size_out = sizeof(mock_gsl_payload);
566 
567 	return 0;
568 }
569 
570 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
571 {
572 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
573 	struct cxl_mbox_get_log *gl = cmd->payload_in;
574 	u32 offset = le32_to_cpu(gl->offset);
575 	u32 length = le32_to_cpu(gl->length);
576 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
577 	void *data = &mock_cel;
578 
579 	if (cmd->size_in < sizeof(*gl))
580 		return -EINVAL;
581 	if (length > cxl_mbox->payload_size)
582 		return -EINVAL;
583 	if (offset + length > sizeof(mock_cel))
584 		return -EINVAL;
585 	if (!uuid_equal(&gl->uuid, &uuid))
586 		return -EINVAL;
587 	if (length > cmd->size_out)
588 		return -EINVAL;
589 
590 	memcpy(cmd->payload_out, data + offset, length);
591 
592 	return 0;
593 }
594 
595 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
596 {
597 	struct cxl_mbox_identify id = {
598 		.fw_revision = { "mock fw v1 " },
599 		.total_capacity =
600 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
601 		.volatile_capacity =
602 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
603 	};
604 
605 	if (cmd->size_out < sizeof(id))
606 		return -EINVAL;
607 
608 	memcpy(cmd->payload_out, &id, sizeof(id));
609 
610 	return 0;
611 }
612 
613 static int mock_id(struct cxl_mbox_cmd *cmd)
614 {
615 	struct cxl_mbox_identify id = {
616 		.fw_revision = { "mock fw v1 " },
617 		.lsa_size = cpu_to_le32(LSA_SIZE),
618 		.partition_align =
619 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
620 		.total_capacity =
621 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
622 		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
623 	};
624 
625 	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
626 
627 	if (cmd->size_out < sizeof(id))
628 		return -EINVAL;
629 
630 	memcpy(cmd->payload_out, &id, sizeof(id));
631 
632 	return 0;
633 }
634 
635 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
636 {
637 	struct cxl_mbox_get_partition_info pi = {
638 		.active_volatile_cap =
639 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
640 		.active_persistent_cap =
641 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
642 	};
643 
644 	if (cmd->size_out < sizeof(pi))
645 		return -EINVAL;
646 
647 	memcpy(cmd->payload_out, &pi, sizeof(pi));
648 
649 	return 0;
650 }
651 
652 void cxl_mockmem_sanitize_work(struct work_struct *work)
653 {
654 	struct cxl_memdev_state *mds =
655 		container_of(work, typeof(*mds), security.poll_dwork.work);
656 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
657 
658 	mutex_lock(&cxl_mbox->mbox_mutex);
659 	if (mds->security.sanitize_node)
660 		sysfs_notify_dirent(mds->security.sanitize_node);
661 	mds->security.sanitize_active = false;
662 	mutex_unlock(&cxl_mbox->mbox_mutex);
663 
664 	dev_dbg(mds->cxlds.dev, "sanitize complete\n");
665 }
666 
667 static int mock_sanitize(struct cxl_mockmem_data *mdata,
668 			 struct cxl_mbox_cmd *cmd)
669 {
670 	struct cxl_memdev_state *mds = mdata->mds;
671 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
672 	int rc = 0;
673 
674 	if (cmd->size_in != 0)
675 		return -EINVAL;
676 
677 	if (cmd->size_out != 0)
678 		return -EINVAL;
679 
680 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
681 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
682 		return -ENXIO;
683 	}
684 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
685 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
686 		return -ENXIO;
687 	}
688 
689 	mutex_lock(&cxl_mbox->mbox_mutex);
690 	if (schedule_delayed_work(&mds->security.poll_dwork,
691 				  msecs_to_jiffies(mdata->sanitize_timeout))) {
692 		mds->security.sanitize_active = true;
693 		dev_dbg(mds->cxlds.dev, "sanitize issued\n");
694 	} else
695 		rc = -EBUSY;
696 	mutex_unlock(&cxl_mbox->mbox_mutex);
697 
698 	return rc;
699 }
700 
701 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
702 			     struct cxl_mbox_cmd *cmd)
703 {
704 	if (cmd->size_in != 0)
705 		return -EINVAL;
706 
707 	if (cmd->size_out != 0)
708 		return -EINVAL;
709 
710 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
711 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
712 		return -ENXIO;
713 	}
714 
715 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
716 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
717 		return -ENXIO;
718 	}
719 
720 	return 0;
721 }
722 
723 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
724 				   struct cxl_mbox_cmd *cmd)
725 {
726 	if (cmd->size_in)
727 		return -EINVAL;
728 
729 	if (cmd->size_out != sizeof(u32))
730 		return -EINVAL;
731 
732 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
733 
734 	return 0;
735 }
736 
737 static void master_plimit_check(struct cxl_mockmem_data *mdata)
738 {
739 	if (mdata->master_limit == PASS_TRY_LIMIT)
740 		return;
741 	mdata->master_limit++;
742 	if (mdata->master_limit == PASS_TRY_LIMIT)
743 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
744 }
745 
746 static void user_plimit_check(struct cxl_mockmem_data *mdata)
747 {
748 	if (mdata->user_limit == PASS_TRY_LIMIT)
749 		return;
750 	mdata->user_limit++;
751 	if (mdata->user_limit == PASS_TRY_LIMIT)
752 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
753 }
754 
755 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
756 			       struct cxl_mbox_cmd *cmd)
757 {
758 	struct cxl_set_pass *set_pass;
759 
760 	if (cmd->size_in != sizeof(*set_pass))
761 		return -EINVAL;
762 
763 	if (cmd->size_out != 0)
764 		return -EINVAL;
765 
766 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
767 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
768 		return -ENXIO;
769 	}
770 
771 	set_pass = cmd->payload_in;
772 	switch (set_pass->type) {
773 	case CXL_PMEM_SEC_PASS_MASTER:
774 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
775 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
776 			return -ENXIO;
777 		}
778 		/*
779 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
780 		 * the security disabled state when the user passphrase is not set.
781 		 */
782 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
783 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
784 			return -ENXIO;
785 		}
786 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
787 			master_plimit_check(mdata);
788 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
789 			return -ENXIO;
790 		}
791 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
792 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
793 		return 0;
794 
795 	case CXL_PMEM_SEC_PASS_USER:
796 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
797 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
798 			return -ENXIO;
799 		}
800 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
801 			user_plimit_check(mdata);
802 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
803 			return -ENXIO;
804 		}
805 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
806 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
807 		return 0;
808 
809 	default:
810 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
811 	}
812 	return -EINVAL;
813 }
814 
815 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
816 				   struct cxl_mbox_cmd *cmd)
817 {
818 	struct cxl_disable_pass *dis_pass;
819 
820 	if (cmd->size_in != sizeof(*dis_pass))
821 		return -EINVAL;
822 
823 	if (cmd->size_out != 0)
824 		return -EINVAL;
825 
826 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
827 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
828 		return -ENXIO;
829 	}
830 
831 	dis_pass = cmd->payload_in;
832 	switch (dis_pass->type) {
833 	case CXL_PMEM_SEC_PASS_MASTER:
834 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
835 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
836 			return -ENXIO;
837 		}
838 
839 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
840 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
841 			return -ENXIO;
842 		}
843 
844 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
845 			master_plimit_check(mdata);
846 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
847 			return -ENXIO;
848 		}
849 
850 		mdata->master_limit = 0;
851 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
852 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
853 		return 0;
854 
855 	case CXL_PMEM_SEC_PASS_USER:
856 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
857 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
858 			return -ENXIO;
859 		}
860 
861 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
862 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
863 			return -ENXIO;
864 		}
865 
866 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
867 			user_plimit_check(mdata);
868 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
869 			return -ENXIO;
870 		}
871 
872 		mdata->user_limit = 0;
873 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
874 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
875 					   CXL_PMEM_SEC_STATE_LOCKED);
876 		return 0;
877 
878 	default:
879 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
880 		return -EINVAL;
881 	}
882 
883 	return 0;
884 }
885 
886 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
887 				struct cxl_mbox_cmd *cmd)
888 {
889 	if (cmd->size_in != 0)
890 		return -EINVAL;
891 
892 	if (cmd->size_out != 0)
893 		return -EINVAL;
894 
895 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
896 		return 0;
897 
898 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
899 	return 0;
900 }
901 
902 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
903 				struct cxl_mbox_cmd *cmd)
904 {
905 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
906 		return -EINVAL;
907 
908 	if (cmd->size_out != 0)
909 		return -EINVAL;
910 
911 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
912 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
913 		return -ENXIO;
914 	}
915 
916 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
917 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
918 		return -ENXIO;
919 	}
920 
921 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
922 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
923 		return -ENXIO;
924 	}
925 
926 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
927 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
928 		return -ENXIO;
929 	}
930 
931 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
932 		if (++mdata->user_limit == PASS_TRY_LIMIT)
933 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
934 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
935 		return -ENXIO;
936 	}
937 
938 	mdata->user_limit = 0;
939 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
940 	return 0;
941 }
942 
943 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
944 					struct cxl_mbox_cmd *cmd)
945 {
946 	struct cxl_pass_erase *erase;
947 
948 	if (cmd->size_in != sizeof(*erase))
949 		return -EINVAL;
950 
951 	if (cmd->size_out != 0)
952 		return -EINVAL;
953 
954 	erase = cmd->payload_in;
955 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
956 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
957 		return -ENXIO;
958 	}
959 
960 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
961 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
962 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
963 		return -ENXIO;
964 	}
965 
966 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
967 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
968 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
969 		return -ENXIO;
970 	}
971 
972 	switch (erase->type) {
973 	case CXL_PMEM_SEC_PASS_MASTER:
974 		/*
975 		 * The spec does not clearly define the behavior of the scenario
976 		 * where a master passphrase is passed in while the master
977 		 * passphrase is not set and user passphrase is not set. The
978 		 * code will take the assumption that it will behave the same
979 		 * as a CXL secure erase command without passphrase (0x4401).
980 		 */
981 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
982 			if (memcmp(mdata->master_pass, erase->pass,
983 				   NVDIMM_PASSPHRASE_LEN)) {
984 				master_plimit_check(mdata);
985 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
986 				return -ENXIO;
987 			}
988 			mdata->master_limit = 0;
989 			mdata->user_limit = 0;
990 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
991 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
992 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
993 		} else {
994 			/*
995 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
996 			 * When master passphrase is disabled, the device shall
997 			 * return Invalid Input for the Passphrase Secure Erase
998 			 * command with master passphrase.
999 			 */
1000 			return -EINVAL;
1001 		}
1002 		/* Scramble encryption keys so that data is effectively erased */
1003 		break;
1004 	case CXL_PMEM_SEC_PASS_USER:
1005 		/*
1006 		 * The spec does not clearly define the behavior of the scenario
1007 		 * where a user passphrase is passed in while the user
1008 		 * passphrase is not set. The code will take the assumption that
1009 		 * it will behave the same as a CXL secure erase command without
1010 		 * passphrase (0x4401).
1011 		 */
1012 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
1013 			if (memcmp(mdata->user_pass, erase->pass,
1014 				   NVDIMM_PASSPHRASE_LEN)) {
1015 				user_plimit_check(mdata);
1016 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
1017 				return -ENXIO;
1018 			}
1019 			mdata->user_limit = 0;
1020 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
1021 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
1022 		}
1023 
1024 		/*
1025 		 * CXL rev3 Table 8-118
1026 		 * If user passphrase is not set or supported by device, current
1027 		 * passphrase value is ignored. Will make the assumption that
1028 		 * the operation will proceed as secure erase w/o passphrase
1029 		 * since spec is not explicit.
1030 		 */
1031 
1032 		/* Scramble encryption keys so that data is effectively erased */
1033 		break;
1034 	default:
1035 		return -EINVAL;
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
1042 			struct cxl_mbox_cmd *cmd)
1043 {
1044 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
1045 	void *lsa = mdata->lsa;
1046 	u32 offset, length;
1047 
1048 	if (sizeof(*get_lsa) > cmd->size_in)
1049 		return -EINVAL;
1050 	offset = le32_to_cpu(get_lsa->offset);
1051 	length = le32_to_cpu(get_lsa->length);
1052 	if (offset + length > LSA_SIZE)
1053 		return -EINVAL;
1054 	if (length > cmd->size_out)
1055 		return -EINVAL;
1056 
1057 	memcpy(cmd->payload_out, lsa + offset, length);
1058 	return 0;
1059 }
1060 
1061 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
1062 			struct cxl_mbox_cmd *cmd)
1063 {
1064 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
1065 	void *lsa = mdata->lsa;
1066 	u32 offset, length;
1067 
1068 	if (sizeof(*set_lsa) > cmd->size_in)
1069 		return -EINVAL;
1070 	offset = le32_to_cpu(set_lsa->offset);
1071 	length = cmd->size_in - sizeof(*set_lsa);
1072 	if (offset + length > LSA_SIZE)
1073 		return -EINVAL;
1074 
1075 	memcpy(lsa + offset, &set_lsa->data[0], length);
1076 	return 0;
1077 }
1078 
1079 static int mock_health_info(struct cxl_mbox_cmd *cmd)
1080 {
1081 	struct cxl_mbox_health_info health_info = {
1082 		/* set flags for maint needed, perf degraded, hw replacement */
1083 		.health_status = 0x7,
1084 		/* set media status to "All Data Lost" */
1085 		.media_status = 0x3,
1086 		/*
1087 		 * set ext_status flags for:
1088 		 *  ext_life_used: normal,
1089 		 *  ext_temperature: critical,
1090 		 *  ext_corrected_volatile: warning,
1091 		 *  ext_corrected_persistent: normal,
1092 		 */
1093 		.ext_status = 0x18,
1094 		.life_used = 15,
1095 		.temperature = cpu_to_le16(25),
1096 		.dirty_shutdowns = cpu_to_le32(10),
1097 		.volatile_errors = cpu_to_le32(20),
1098 		.pmem_errors = cpu_to_le32(30),
1099 	};
1100 
1101 	if (cmd->size_out < sizeof(health_info))
1102 		return -EINVAL;
1103 
1104 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1105 	return 0;
1106 }
1107 
1108 static struct mock_poison {
1109 	struct cxl_dev_state *cxlds;
1110 	u64 dpa;
1111 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1112 
1113 static struct cxl_mbox_poison_out *
1114 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1115 {
1116 	struct cxl_mbox_poison_out *po;
1117 	int nr_records = 0;
1118 	u64 dpa;
1119 
1120 	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1121 	if (!po)
1122 		return NULL;
1123 
1124 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1125 		if (mock_poison_list[i].cxlds != cxlds)
1126 			continue;
1127 		if (mock_poison_list[i].dpa < offset ||
1128 		    mock_poison_list[i].dpa > offset + length - 1)
1129 			continue;
1130 
1131 		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1132 		po->record[nr_records].address = cpu_to_le64(dpa);
1133 		po->record[nr_records].length = cpu_to_le32(1);
1134 		nr_records++;
1135 		if (nr_records == poison_inject_dev_max)
1136 			break;
1137 	}
1138 
1139 	/* Always return count, even when zero */
1140 	po->count = cpu_to_le16(nr_records);
1141 
1142 	return po;
1143 }
1144 
1145 static int mock_get_poison(struct cxl_dev_state *cxlds,
1146 			   struct cxl_mbox_cmd *cmd)
1147 {
1148 	struct cxl_mbox_poison_in *pi = cmd->payload_in;
1149 	struct cxl_mbox_poison_out *po;
1150 	u64 offset = le64_to_cpu(pi->offset);
1151 	u64 length = le64_to_cpu(pi->length);
1152 	int nr_records;
1153 
1154 	po = cxl_get_injected_po(cxlds, offset, length);
1155 	if (!po)
1156 		return -ENOMEM;
1157 	nr_records = le16_to_cpu(po->count);
1158 	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1159 	cmd->size_out = struct_size(po, record, nr_records);
1160 	kfree(po);
1161 
1162 	return 0;
1163 }
1164 
1165 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1166 {
1167 	int count = 0;
1168 
1169 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1170 		if (mock_poison_list[i].cxlds == cxlds)
1171 			count++;
1172 	}
1173 	return (count >= poison_inject_dev_max);
1174 }
1175 
1176 static int mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1177 {
1178 	/* Return EBUSY to match the CXL driver handling */
1179 	if (mock_poison_dev_max_injected(cxlds)) {
1180 		dev_dbg(cxlds->dev,
1181 			"Device poison injection limit has been reached: %d\n",
1182 			poison_inject_dev_max);
1183 		return -EBUSY;
1184 	}
1185 
1186 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1187 		if (!mock_poison_list[i].cxlds) {
1188 			mock_poison_list[i].cxlds = cxlds;
1189 			mock_poison_list[i].dpa = dpa;
1190 			return 0;
1191 		}
1192 	}
1193 	dev_dbg(cxlds->dev,
1194 		"Mock test poison injection limit has been reached: %d\n",
1195 		MOCK_INJECT_TEST_MAX);
1196 
1197 	return -ENXIO;
1198 }
1199 
1200 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1201 {
1202 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1203 		if (mock_poison_list[i].cxlds == cxlds &&
1204 		    mock_poison_list[i].dpa == dpa)
1205 			return true;
1206 	}
1207 	return false;
1208 }
1209 
1210 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1211 			      struct cxl_mbox_cmd *cmd)
1212 {
1213 	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1214 	u64 dpa = le64_to_cpu(pi->address);
1215 
1216 	if (mock_poison_found(cxlds, dpa)) {
1217 		/* Not an error to inject poison if already poisoned */
1218 		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1219 		return 0;
1220 	}
1221 
1222 	return mock_poison_add(cxlds, dpa);
1223 }
1224 
1225 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1226 {
1227 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1228 		if (mock_poison_list[i].cxlds == cxlds &&
1229 		    mock_poison_list[i].dpa == dpa) {
1230 			mock_poison_list[i].cxlds = NULL;
1231 			return true;
1232 		}
1233 	}
1234 	return false;
1235 }
1236 
1237 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1238 			     struct cxl_mbox_cmd *cmd)
1239 {
1240 	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1241 	u64 dpa = le64_to_cpu(pi->address);
1242 
1243 	/*
1244 	 * A real CXL device will write pi->write_data to the address
1245 	 * being cleared. In this mock, just delete this address from
1246 	 * the mock poison list.
1247 	 */
1248 	if (!mock_poison_del(cxlds, dpa))
1249 		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1250 
1251 	return 0;
1252 }
1253 
1254 static bool mock_poison_list_empty(void)
1255 {
1256 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1257 		if (mock_poison_list[i].cxlds)
1258 			return false;
1259 	}
1260 	return true;
1261 }
1262 
1263 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1264 {
1265 	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1266 }
1267 
1268 static ssize_t poison_inject_max_store(struct device_driver *drv,
1269 				       const char *buf, size_t len)
1270 {
1271 	int val;
1272 
1273 	if (kstrtoint(buf, 0, &val) < 0)
1274 		return -EINVAL;
1275 
1276 	if (!mock_poison_list_empty())
1277 		return -EBUSY;
1278 
1279 	if (val <= MOCK_INJECT_TEST_MAX)
1280 		poison_inject_dev_max = val;
1281 	else
1282 		return -EINVAL;
1283 
1284 	return len;
1285 }
1286 
1287 static DRIVER_ATTR_RW(poison_inject_max);
1288 
1289 static struct attribute *cxl_mock_mem_core_attrs[] = {
1290 	&driver_attr_poison_inject_max.attr,
1291 	NULL
1292 };
1293 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1294 
1295 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1296 			struct cxl_mbox_cmd *cmd)
1297 {
1298 	struct cxl_mbox_get_fw_info fw_info = {
1299 		.num_slots = FW_SLOTS,
1300 		.slot_info = (mdata->fw_slot & 0x7) |
1301 			     ((mdata->fw_staged & 0x7) << 3),
1302 		.activation_cap = 0,
1303 	};
1304 
1305 	strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1306 	strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1307 	strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1308 	strcpy(fw_info.slot_4_revision, "");
1309 
1310 	if (cmd->size_out < sizeof(fw_info))
1311 		return -EINVAL;
1312 
1313 	memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1314 	return 0;
1315 }
1316 
1317 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1318 			    struct cxl_mbox_cmd *cmd)
1319 {
1320 	struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1321 	void *fw = mdata->fw;
1322 	size_t offset, length;
1323 
1324 	offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1325 	length = cmd->size_in - sizeof(*transfer);
1326 	if (offset + length > FW_SIZE)
1327 		return -EINVAL;
1328 
1329 	switch (transfer->action) {
1330 	case CXL_FW_TRANSFER_ACTION_FULL:
1331 		if (offset != 0)
1332 			return -EINVAL;
1333 		fallthrough;
1334 	case CXL_FW_TRANSFER_ACTION_END:
1335 		if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1336 			return -EINVAL;
1337 		mdata->fw_size = offset + length;
1338 		break;
1339 	case CXL_FW_TRANSFER_ACTION_INITIATE:
1340 	case CXL_FW_TRANSFER_ACTION_CONTINUE:
1341 		break;
1342 	case CXL_FW_TRANSFER_ACTION_ABORT:
1343 		return 0;
1344 	default:
1345 		return -EINVAL;
1346 	}
1347 
1348 	memcpy(fw + offset, transfer->data, length);
1349 	usleep_range(1500, 2000);
1350 	return 0;
1351 }
1352 
1353 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1354 			    struct cxl_mbox_cmd *cmd)
1355 {
1356 	struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1357 
1358 	if (activate->slot == 0 || activate->slot > FW_SLOTS)
1359 		return -EINVAL;
1360 
1361 	switch (activate->action) {
1362 	case CXL_FW_ACTIVATE_ONLINE:
1363 		mdata->fw_slot = activate->slot;
1364 		mdata->fw_staged = 0;
1365 		return 0;
1366 	case CXL_FW_ACTIVATE_OFFLINE:
1367 		mdata->fw_staged = activate->slot;
1368 		return 0;
1369 	}
1370 
1371 	return -EINVAL;
1372 }
1373 
1374 #define CXL_VENDOR_FEATURE_TEST							\
1375 	UUID_INIT(0xffffffff, 0xffff, 0xffff, 0xff, 0xff, 0xff, 0xff, 0xff,	\
1376 		  0xff, 0xff, 0xff)
1377 
1378 static void fill_feature_vendor_test(struct cxl_feat_entry *feat)
1379 {
1380 	feat->uuid = CXL_VENDOR_FEATURE_TEST;
1381 	feat->id = 0;
1382 	feat->get_feat_size = cpu_to_le16(0x4);
1383 	feat->set_feat_size = cpu_to_le16(0x4);
1384 	feat->flags = cpu_to_le32(CXL_FEATURE_F_CHANGEABLE |
1385 				  CXL_FEATURE_F_DEFAULT_SEL |
1386 				  CXL_FEATURE_F_SAVED_SEL);
1387 	feat->get_feat_ver = 1;
1388 	feat->set_feat_ver = 1;
1389 	feat->effects = cpu_to_le16(CXL_CMD_CONFIG_CHANGE_COLD_RESET |
1390 				    CXL_CMD_EFFECTS_VALID);
1391 }
1392 
1393 #define MAX_CXL_TEST_FEATS	1
1394 
1395 static int mock_get_test_feature(struct cxl_mockmem_data *mdata,
1396 				 struct cxl_mbox_cmd *cmd)
1397 {
1398 	struct vendor_test_feat *output = cmd->payload_out;
1399 	struct cxl_mbox_get_feat_in *input = cmd->payload_in;
1400 	u16 offset = le16_to_cpu(input->offset);
1401 	u16 count = le16_to_cpu(input->count);
1402 	u8 *ptr;
1403 
1404 	if (offset > sizeof(*output)) {
1405 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1406 		return -EINVAL;
1407 	}
1408 
1409 	if (offset + count > sizeof(*output)) {
1410 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1411 		return -EINVAL;
1412 	}
1413 
1414 	ptr = (u8 *)&mdata->test_feat + offset;
1415 	memcpy((u8 *)output + offset, ptr, count);
1416 
1417 	return 0;
1418 }
1419 
1420 static int mock_get_feature(struct cxl_mockmem_data *mdata,
1421 			    struct cxl_mbox_cmd *cmd)
1422 {
1423 	struct cxl_mbox_get_feat_in *input = cmd->payload_in;
1424 
1425 	if (uuid_equal(&input->uuid, &CXL_VENDOR_FEATURE_TEST))
1426 		return mock_get_test_feature(mdata, cmd);
1427 
1428 	cmd->return_code = CXL_MBOX_CMD_RC_UNSUPPORTED;
1429 
1430 	return -EOPNOTSUPP;
1431 }
1432 
1433 static int mock_set_test_feature(struct cxl_mockmem_data *mdata,
1434 				 struct cxl_mbox_cmd *cmd)
1435 {
1436 	struct cxl_mbox_set_feat_in *input = cmd->payload_in;
1437 	struct vendor_test_feat *test =
1438 		(struct vendor_test_feat *)input->feat_data;
1439 	u32 action;
1440 
1441 	action = FIELD_GET(CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK,
1442 			   le32_to_cpu(input->hdr.flags));
1443 	/*
1444 	 * While it is spec compliant to support other set actions, it is not
1445 	 * necessary to add the complication in the emulation currently. Reject
1446 	 * anything besides full xfer.
1447 	 */
1448 	if (action != CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER) {
1449 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1450 		return -EINVAL;
1451 	}
1452 
1453 	/* Offset should be reserved when doing full transfer */
1454 	if (input->hdr.offset) {
1455 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1456 		return -EINVAL;
1457 	}
1458 
1459 	memcpy(&mdata->test_feat.data, &test->data, sizeof(u32));
1460 
1461 	return 0;
1462 }
1463 
1464 static int mock_set_feature(struct cxl_mockmem_data *mdata,
1465 			    struct cxl_mbox_cmd *cmd)
1466 {
1467 	struct cxl_mbox_set_feat_in *input = cmd->payload_in;
1468 
1469 	if (uuid_equal(&input->hdr.uuid, &CXL_VENDOR_FEATURE_TEST))
1470 		return mock_set_test_feature(mdata, cmd);
1471 
1472 	cmd->return_code = CXL_MBOX_CMD_RC_UNSUPPORTED;
1473 
1474 	return -EOPNOTSUPP;
1475 }
1476 
1477 static int mock_get_supported_features(struct cxl_mockmem_data *mdata,
1478 				       struct cxl_mbox_cmd *cmd)
1479 {
1480 	struct cxl_mbox_get_sup_feats_in *in = cmd->payload_in;
1481 	struct cxl_mbox_get_sup_feats_out *out = cmd->payload_out;
1482 	struct cxl_feat_entry *feat;
1483 	u16 start_idx, count;
1484 
1485 	if (cmd->size_out < sizeof(*out)) {
1486 		cmd->return_code = CXL_MBOX_CMD_RC_PAYLOADLEN;
1487 		return -EINVAL;
1488 	}
1489 
1490 	/*
1491 	 * Current emulation only supports 1 feature
1492 	 */
1493 	start_idx = le16_to_cpu(in->start_idx);
1494 	if (start_idx != 0) {
1495 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1496 		return -EINVAL;
1497 	}
1498 
1499 	count = le16_to_cpu(in->count);
1500 	if (count < struct_size(out, ents, 0)) {
1501 		cmd->return_code = CXL_MBOX_CMD_RC_PAYLOADLEN;
1502 		return -EINVAL;
1503 	}
1504 
1505 	out->supported_feats = cpu_to_le16(MAX_CXL_TEST_FEATS);
1506 	cmd->return_code = 0;
1507 	if (count < struct_size(out, ents, MAX_CXL_TEST_FEATS)) {
1508 		out->num_entries = 0;
1509 		return 0;
1510 	}
1511 
1512 	out->num_entries = cpu_to_le16(MAX_CXL_TEST_FEATS);
1513 	feat = out->ents;
1514 	fill_feature_vendor_test(feat);
1515 
1516 	return 0;
1517 }
1518 
1519 static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
1520 			      struct cxl_mbox_cmd *cmd)
1521 {
1522 	struct device *dev = cxl_mbox->host;
1523 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1524 	struct cxl_memdev_state *mds = mdata->mds;
1525 	struct cxl_dev_state *cxlds = &mds->cxlds;
1526 	int rc = -EIO;
1527 
1528 	switch (cmd->opcode) {
1529 	case CXL_MBOX_OP_SET_TIMESTAMP:
1530 		rc = mock_set_timestamp(cxlds, cmd);
1531 		break;
1532 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1533 		rc = mock_gsl(cmd);
1534 		break;
1535 	case CXL_MBOX_OP_GET_LOG:
1536 		rc = mock_get_log(mds, cmd);
1537 		break;
1538 	case CXL_MBOX_OP_IDENTIFY:
1539 		if (cxlds->rcd)
1540 			rc = mock_rcd_id(cmd);
1541 		else
1542 			rc = mock_id(cmd);
1543 		break;
1544 	case CXL_MBOX_OP_GET_LSA:
1545 		rc = mock_get_lsa(mdata, cmd);
1546 		break;
1547 	case CXL_MBOX_OP_GET_PARTITION_INFO:
1548 		rc = mock_partition_info(cmd);
1549 		break;
1550 	case CXL_MBOX_OP_GET_EVENT_RECORD:
1551 		rc = mock_get_event(dev, cmd);
1552 		break;
1553 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1554 		rc = mock_clear_event(dev, cmd);
1555 		break;
1556 	case CXL_MBOX_OP_SET_LSA:
1557 		rc = mock_set_lsa(mdata, cmd);
1558 		break;
1559 	case CXL_MBOX_OP_GET_HEALTH_INFO:
1560 		rc = mock_health_info(cmd);
1561 		break;
1562 	case CXL_MBOX_OP_SANITIZE:
1563 		rc = mock_sanitize(mdata, cmd);
1564 		break;
1565 	case CXL_MBOX_OP_SECURE_ERASE:
1566 		rc = mock_secure_erase(mdata, cmd);
1567 		break;
1568 	case CXL_MBOX_OP_GET_SECURITY_STATE:
1569 		rc = mock_get_security_state(mdata, cmd);
1570 		break;
1571 	case CXL_MBOX_OP_SET_PASSPHRASE:
1572 		rc = mock_set_passphrase(mdata, cmd);
1573 		break;
1574 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1575 		rc = mock_disable_passphrase(mdata, cmd);
1576 		break;
1577 	case CXL_MBOX_OP_FREEZE_SECURITY:
1578 		rc = mock_freeze_security(mdata, cmd);
1579 		break;
1580 	case CXL_MBOX_OP_UNLOCK:
1581 		rc = mock_unlock_security(mdata, cmd);
1582 		break;
1583 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1584 		rc = mock_passphrase_secure_erase(mdata, cmd);
1585 		break;
1586 	case CXL_MBOX_OP_GET_POISON:
1587 		rc = mock_get_poison(cxlds, cmd);
1588 		break;
1589 	case CXL_MBOX_OP_INJECT_POISON:
1590 		rc = mock_inject_poison(cxlds, cmd);
1591 		break;
1592 	case CXL_MBOX_OP_CLEAR_POISON:
1593 		rc = mock_clear_poison(cxlds, cmd);
1594 		break;
1595 	case CXL_MBOX_OP_GET_FW_INFO:
1596 		rc = mock_fw_info(mdata, cmd);
1597 		break;
1598 	case CXL_MBOX_OP_TRANSFER_FW:
1599 		rc = mock_transfer_fw(mdata, cmd);
1600 		break;
1601 	case CXL_MBOX_OP_ACTIVATE_FW:
1602 		rc = mock_activate_fw(mdata, cmd);
1603 		break;
1604 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
1605 		rc = mock_get_supported_features(mdata, cmd);
1606 		break;
1607 	case CXL_MBOX_OP_GET_FEATURE:
1608 		rc = mock_get_feature(mdata, cmd);
1609 		break;
1610 	case CXL_MBOX_OP_SET_FEATURE:
1611 		rc = mock_set_feature(mdata, cmd);
1612 		break;
1613 	default:
1614 		break;
1615 	}
1616 
1617 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1618 		cmd->size_in, cmd->size_out, rc);
1619 
1620 	return rc;
1621 }
1622 
1623 static void label_area_release(void *lsa)
1624 {
1625 	vfree(lsa);
1626 }
1627 
1628 static void fw_buf_release(void *buf)
1629 {
1630 	vfree(buf);
1631 }
1632 
1633 static bool is_rcd(struct platform_device *pdev)
1634 {
1635 	const struct platform_device_id *id = platform_get_device_id(pdev);
1636 
1637 	return !!id->driver_data;
1638 }
1639 
1640 static ssize_t event_trigger_store(struct device *dev,
1641 				   struct device_attribute *attr,
1642 				   const char *buf, size_t count)
1643 {
1644 	cxl_mock_event_trigger(dev);
1645 	return count;
1646 }
1647 static DEVICE_ATTR_WO(event_trigger);
1648 
1649 static int cxl_mock_mailbox_create(struct cxl_dev_state *cxlds)
1650 {
1651 	int rc;
1652 
1653 	rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
1654 	if (rc)
1655 		return rc;
1656 
1657 	return 0;
1658 }
1659 
1660 static void cxl_mock_test_feat_init(struct cxl_mockmem_data *mdata)
1661 {
1662 	mdata->test_feat.data = cpu_to_le32(0xdeadbeef);
1663 }
1664 
1665 static int cxl_mock_mem_probe(struct platform_device *pdev)
1666 {
1667 	struct device *dev = &pdev->dev;
1668 	struct cxl_memdev *cxlmd;
1669 	struct cxl_memdev_state *mds;
1670 	struct cxl_dev_state *cxlds;
1671 	struct cxl_mockmem_data *mdata;
1672 	struct cxl_mailbox *cxl_mbox;
1673 	int rc;
1674 
1675 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1676 	if (!mdata)
1677 		return -ENOMEM;
1678 	dev_set_drvdata(dev, mdata);
1679 
1680 	mdata->lsa = vmalloc(LSA_SIZE);
1681 	if (!mdata->lsa)
1682 		return -ENOMEM;
1683 	mdata->fw = vmalloc(FW_SIZE);
1684 	if (!mdata->fw)
1685 		return -ENOMEM;
1686 	mdata->fw_slot = 2;
1687 
1688 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1689 	if (rc)
1690 		return rc;
1691 
1692 	rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1693 	if (rc)
1694 		return rc;
1695 
1696 	mds = cxl_memdev_state_create(dev);
1697 	if (IS_ERR(mds))
1698 		return PTR_ERR(mds);
1699 
1700 	cxlds = &mds->cxlds;
1701 	rc = cxl_mock_mailbox_create(cxlds);
1702 	if (rc)
1703 		return rc;
1704 
1705 	cxl_mbox = &mds->cxlds.cxl_mbox;
1706 	mdata->mds = mds;
1707 	cxl_mbox->mbox_send = cxl_mock_mbox_send;
1708 	cxl_mbox->payload_size = SZ_4K;
1709 	mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1710 	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
1711 
1712 	cxlds->serial = pdev->id;
1713 	if (is_rcd(pdev))
1714 		cxlds->rcd = true;
1715 
1716 	rc = cxl_enumerate_cmds(mds);
1717 	if (rc)
1718 		return rc;
1719 
1720 	rc = cxl_poison_state_init(mds);
1721 	if (rc)
1722 		return rc;
1723 
1724 	rc = cxl_set_timestamp(mds);
1725 	if (rc)
1726 		return rc;
1727 
1728 	cxlds->media_ready = true;
1729 	rc = cxl_dev_state_identify(mds);
1730 	if (rc)
1731 		return rc;
1732 
1733 	rc = cxl_mem_create_range_info(mds);
1734 	if (rc)
1735 		return rc;
1736 
1737 	rc = devm_cxl_setup_features(cxlds);
1738 	if (rc)
1739 		dev_dbg(dev, "No CXL Features discovered\n");
1740 
1741 	cxl_mock_add_event_logs(&mdata->mes);
1742 
1743 	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1744 	if (IS_ERR(cxlmd))
1745 		return PTR_ERR(cxlmd);
1746 
1747 	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1748 	if (rc)
1749 		return rc;
1750 
1751 	rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
1752 	if (rc)
1753 		return rc;
1754 
1755 	rc = devm_cxl_setup_fwctl(cxlmd);
1756 	if (rc)
1757 		dev_dbg(dev, "No CXL FWCTL setup\n");
1758 
1759 	cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1760 	cxl_mock_test_feat_init(mdata);
1761 
1762 	return 0;
1763 }
1764 
1765 static ssize_t security_lock_show(struct device *dev,
1766 				  struct device_attribute *attr, char *buf)
1767 {
1768 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1769 
1770 	return sysfs_emit(buf, "%u\n",
1771 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1772 }
1773 
1774 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1775 				   const char *buf, size_t count)
1776 {
1777 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1778 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1779 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1780 	int val;
1781 
1782 	if (kstrtoint(buf, 0, &val) < 0)
1783 		return -EINVAL;
1784 
1785 	if (val == 1) {
1786 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1787 			return -ENXIO;
1788 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1789 		mdata->security_state &= ~mask;
1790 	} else {
1791 		return -EINVAL;
1792 	}
1793 	return count;
1794 }
1795 
1796 static DEVICE_ATTR_RW(security_lock);
1797 
1798 static ssize_t fw_buf_checksum_show(struct device *dev,
1799 				    struct device_attribute *attr, char *buf)
1800 {
1801 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1802 	u8 hash[SHA256_DIGEST_SIZE];
1803 	unsigned char *hstr, *hptr;
1804 	struct sha256_state sctx;
1805 	ssize_t written = 0;
1806 	int i;
1807 
1808 	sha256_init(&sctx);
1809 	sha256_update(&sctx, mdata->fw, mdata->fw_size);
1810 	sha256_final(&sctx, hash);
1811 
1812 	hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1813 	if (!hstr)
1814 		return -ENOMEM;
1815 
1816 	hptr = hstr;
1817 	for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1818 		hptr += sprintf(hptr, "%02x", hash[i]);
1819 
1820 	written = sysfs_emit(buf, "%s\n", hstr);
1821 
1822 	kfree(hstr);
1823 	return written;
1824 }
1825 
1826 static DEVICE_ATTR_RO(fw_buf_checksum);
1827 
1828 static ssize_t sanitize_timeout_show(struct device *dev,
1829 				  struct device_attribute *attr, char *buf)
1830 {
1831 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1832 
1833 	return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
1834 }
1835 
1836 static ssize_t sanitize_timeout_store(struct device *dev,
1837 				      struct device_attribute *attr,
1838 				      const char *buf, size_t count)
1839 {
1840 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1841 	unsigned long val;
1842 	int rc;
1843 
1844 	rc = kstrtoul(buf, 0, &val);
1845 	if (rc)
1846 		return rc;
1847 
1848 	mdata->sanitize_timeout = val;
1849 
1850 	return count;
1851 }
1852 
1853 static DEVICE_ATTR_RW(sanitize_timeout);
1854 
1855 static struct attribute *cxl_mock_mem_attrs[] = {
1856 	&dev_attr_security_lock.attr,
1857 	&dev_attr_event_trigger.attr,
1858 	&dev_attr_fw_buf_checksum.attr,
1859 	&dev_attr_sanitize_timeout.attr,
1860 	NULL
1861 };
1862 ATTRIBUTE_GROUPS(cxl_mock_mem);
1863 
1864 static const struct platform_device_id cxl_mock_mem_ids[] = {
1865 	{ .name = "cxl_mem", 0 },
1866 	{ .name = "cxl_rcd", 1 },
1867 	{ },
1868 };
1869 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1870 
1871 static struct platform_driver cxl_mock_mem_driver = {
1872 	.probe = cxl_mock_mem_probe,
1873 	.id_table = cxl_mock_mem_ids,
1874 	.driver = {
1875 		.name = KBUILD_MODNAME,
1876 		.dev_groups = cxl_mock_mem_groups,
1877 		.groups = cxl_mock_mem_core_groups,
1878 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1879 	},
1880 };
1881 
1882 module_platform_driver(cxl_mock_mem_driver);
1883 MODULE_LICENSE("GPL v2");
1884 MODULE_IMPORT_NS("CXL");
1885