xref: /linux/tools/testing/cxl/test/mem.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/module.h>
7 #include <linux/delay.h>
8 #include <linux/sizes.h>
9 #include <linux/bits.h>
10 #include <asm/unaligned.h>
11 #include <crypto/sha2.h>
12 #include <cxlmem.h>
13 
14 #include "trace.h"
15 
16 #define LSA_SIZE SZ_128K
17 #define FW_SIZE SZ_64M
18 #define FW_SLOTS 3
19 #define DEV_SIZE SZ_2G
20 #define EFFECT(x) (1U << x)
21 
22 #define MOCK_INJECT_DEV_MAX 8
23 #define MOCK_INJECT_TEST_MAX 128
24 
25 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
26 
27 enum cxl_command_effects {
28 	CONF_CHANGE_COLD_RESET = 0,
29 	CONF_CHANGE_IMMEDIATE,
30 	DATA_CHANGE_IMMEDIATE,
31 	POLICY_CHANGE_IMMEDIATE,
32 	LOG_CHANGE_IMMEDIATE,
33 	SECURITY_CHANGE_IMMEDIATE,
34 	BACKGROUND_OP,
35 	SECONDARY_MBOX_SUPPORTED,
36 };
37 
38 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
39 
40 static struct cxl_cel_entry mock_cel[] = {
41 	{
42 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
43 		.effect = CXL_CMD_EFFECT_NONE,
44 	},
45 	{
46 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
47 		.effect = CXL_CMD_EFFECT_NONE,
48 	},
49 	{
50 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
51 		.effect = CXL_CMD_EFFECT_NONE,
52 	},
53 	{
54 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
55 		.effect = CXL_CMD_EFFECT_NONE,
56 	},
57 	{
58 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
59 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
60 				      EFFECT(DATA_CHANGE_IMMEDIATE)),
61 	},
62 	{
63 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
64 		.effect = CXL_CMD_EFFECT_NONE,
65 	},
66 	{
67 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
68 		.effect = CXL_CMD_EFFECT_NONE,
69 	},
70 	{
71 		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
72 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
73 	},
74 	{
75 		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
76 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
77 	},
78 	{
79 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
80 		.effect = CXL_CMD_EFFECT_NONE,
81 	},
82 	{
83 		.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
84 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
85 				      EFFECT(BACKGROUND_OP)),
86 	},
87 	{
88 		.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
89 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
90 				      EFFECT(CONF_CHANGE_IMMEDIATE)),
91 	},
92 	{
93 		.opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
94 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
95 				      EFFECT(SECURITY_CHANGE_IMMEDIATE) |
96 				      EFFECT(BACKGROUND_OP)),
97 	},
98 };
99 
100 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
101 struct cxl_mbox_health_info {
102 	u8 health_status;
103 	u8 media_status;
104 	u8 ext_status;
105 	u8 life_used;
106 	__le16 temperature;
107 	__le32 dirty_shutdowns;
108 	__le32 volatile_errors;
109 	__le32 pmem_errors;
110 } __packed;
111 
112 static struct {
113 	struct cxl_mbox_get_supported_logs gsl;
114 	struct cxl_gsl_entry entry;
115 } mock_gsl_payload = {
116 	.gsl = {
117 		.entries = cpu_to_le16(1),
118 	},
119 	.entry = {
120 		.uuid = DEFINE_CXL_CEL_UUID,
121 		.size = cpu_to_le32(sizeof(mock_cel)),
122 	},
123 };
124 
125 #define PASS_TRY_LIMIT 3
126 
127 #define CXL_TEST_EVENT_CNT_MAX 15
128 
129 /* Set a number of events to return at a time for simulation.  */
130 #define CXL_TEST_EVENT_RET_MAX 4
131 
132 struct mock_event_log {
133 	u16 clear_idx;
134 	u16 cur_idx;
135 	u16 nr_events;
136 	u16 nr_overflow;
137 	u16 overflow_reset;
138 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
139 };
140 
141 struct mock_event_store {
142 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
143 	u32 ev_status;
144 };
145 
146 struct cxl_mockmem_data {
147 	void *lsa;
148 	void *fw;
149 	int fw_slot;
150 	int fw_staged;
151 	size_t fw_size;
152 	u32 security_state;
153 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
154 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
155 	int user_limit;
156 	int master_limit;
157 	struct mock_event_store mes;
158 	struct cxl_memdev_state *mds;
159 	u8 event_buf[SZ_4K];
160 	u64 timestamp;
161 	unsigned long sanitize_timeout;
162 };
163 
164 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
165 {
166 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
167 
168 	if (log_type >= CXL_EVENT_TYPE_MAX)
169 		return NULL;
170 	return &mdata->mes.mock_logs[log_type];
171 }
172 
173 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
174 {
175 	return log->events[log->cur_idx];
176 }
177 
178 static void event_reset_log(struct mock_event_log *log)
179 {
180 	log->cur_idx = 0;
181 	log->clear_idx = 0;
182 	log->nr_overflow = log->overflow_reset;
183 }
184 
185 /* Handle can never be 0 use 1 based indexing for handle */
186 static u16 event_get_clear_handle(struct mock_event_log *log)
187 {
188 	return log->clear_idx + 1;
189 }
190 
191 /* Handle can never be 0 use 1 based indexing for handle */
192 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
193 {
194 	u16 cur_handle = log->cur_idx + 1;
195 
196 	return cpu_to_le16(cur_handle);
197 }
198 
199 static bool event_log_empty(struct mock_event_log *log)
200 {
201 	return log->cur_idx == log->nr_events;
202 }
203 
204 static void mes_add_event(struct mock_event_store *mes,
205 			  enum cxl_event_log_type log_type,
206 			  struct cxl_event_record_raw *event)
207 {
208 	struct mock_event_log *log;
209 
210 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
211 		return;
212 
213 	log = &mes->mock_logs[log_type];
214 
215 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
216 		log->nr_overflow++;
217 		log->overflow_reset = log->nr_overflow;
218 		return;
219 	}
220 
221 	log->events[log->nr_events] = event;
222 	log->nr_events++;
223 }
224 
225 /*
226  * Vary the number of events returned to simulate events occuring while the
227  * logs are being read.
228  */
229 static int ret_limit = 0;
230 
231 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
232 {
233 	struct cxl_get_event_payload *pl;
234 	struct mock_event_log *log;
235 	u16 nr_overflow;
236 	u8 log_type;
237 	int i;
238 
239 	if (cmd->size_in != sizeof(log_type))
240 		return -EINVAL;
241 
242 	ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
243 	if (!ret_limit)
244 		ret_limit = 1;
245 
246 	if (cmd->size_out < struct_size(pl, records, ret_limit))
247 		return -EINVAL;
248 
249 	log_type = *((u8 *)cmd->payload_in);
250 	if (log_type >= CXL_EVENT_TYPE_MAX)
251 		return -EINVAL;
252 
253 	memset(cmd->payload_out, 0, struct_size(pl, records, 0));
254 
255 	log = event_find_log(dev, log_type);
256 	if (!log || event_log_empty(log))
257 		return 0;
258 
259 	pl = cmd->payload_out;
260 
261 	for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
262 		memcpy(&pl->records[i], event_get_current(log),
263 		       sizeof(pl->records[i]));
264 		pl->records[i].event.generic.hdr.handle =
265 				event_get_cur_event_handle(log);
266 		log->cur_idx++;
267 	}
268 
269 	cmd->size_out = struct_size(pl, records, i);
270 	pl->record_count = cpu_to_le16(i);
271 	if (!event_log_empty(log))
272 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
273 
274 	if (log->nr_overflow) {
275 		u64 ns;
276 
277 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
278 		pl->overflow_err_count = cpu_to_le16(nr_overflow);
279 		ns = ktime_get_real_ns();
280 		ns -= 5000000000; /* 5s ago */
281 		pl->first_overflow_timestamp = cpu_to_le64(ns);
282 		ns = ktime_get_real_ns();
283 		ns -= 1000000000; /* 1s ago */
284 		pl->last_overflow_timestamp = cpu_to_le64(ns);
285 	}
286 
287 	return 0;
288 }
289 
290 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
291 {
292 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
293 	struct mock_event_log *log;
294 	u8 log_type = pl->event_log;
295 	u16 handle;
296 	int nr;
297 
298 	if (log_type >= CXL_EVENT_TYPE_MAX)
299 		return -EINVAL;
300 
301 	log = event_find_log(dev, log_type);
302 	if (!log)
303 		return 0; /* No mock data in this log */
304 
305 	/*
306 	 * This check is technically not invalid per the specification AFAICS.
307 	 * (The host could 'guess' handles and clear them in order).
308 	 * However, this is not good behavior for the host so test it.
309 	 */
310 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
311 		dev_err(dev,
312 			"Attempting to clear more events than returned!\n");
313 		return -EINVAL;
314 	}
315 
316 	/* Check handle order prior to clearing events */
317 	for (nr = 0, handle = event_get_clear_handle(log);
318 	     nr < pl->nr_recs;
319 	     nr++, handle++) {
320 		if (handle != le16_to_cpu(pl->handles[nr])) {
321 			dev_err(dev, "Clearing events out of order\n");
322 			return -EINVAL;
323 		}
324 	}
325 
326 	if (log->nr_overflow)
327 		log->nr_overflow = 0;
328 
329 	/* Clear events */
330 	log->clear_idx += pl->nr_recs;
331 	return 0;
332 }
333 
334 static void cxl_mock_event_trigger(struct device *dev)
335 {
336 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
337 	struct mock_event_store *mes = &mdata->mes;
338 	int i;
339 
340 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
341 		struct mock_event_log *log;
342 
343 		log = event_find_log(dev, i);
344 		if (log)
345 			event_reset_log(log);
346 	}
347 
348 	cxl_mem_get_event_records(mdata->mds, mes->ev_status);
349 }
350 
351 struct cxl_event_record_raw maint_needed = {
352 	.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
353 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
354 	.event.generic = {
355 		.hdr = {
356 			.length = sizeof(struct cxl_event_record_raw),
357 			.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
358 			/* .handle = Set dynamically */
359 			.related_handle = cpu_to_le16(0xa5b6),
360 		},
361 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
362 	},
363 };
364 
365 struct cxl_event_record_raw hardware_replace = {
366 	.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
367 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
368 	.event.generic = {
369 		.hdr = {
370 			.length = sizeof(struct cxl_event_record_raw),
371 			.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
372 			/* .handle = Set dynamically */
373 			.related_handle = cpu_to_le16(0xb6a5),
374 		},
375 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
376 	},
377 };
378 
379 struct cxl_test_gen_media {
380 	uuid_t id;
381 	struct cxl_event_gen_media rec;
382 } __packed;
383 
384 struct cxl_test_gen_media gen_media = {
385 	.id = CXL_EVENT_GEN_MEDIA_UUID,
386 	.rec = {
387 		.hdr = {
388 			.length = sizeof(struct cxl_test_gen_media),
389 			.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
390 			/* .handle = Set dynamically */
391 			.related_handle = cpu_to_le16(0),
392 		},
393 		.phys_addr = cpu_to_le64(0x2000),
394 		.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
395 		.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
396 		.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
397 		/* .validity_flags = <set below> */
398 		.channel = 1,
399 		.rank = 30
400 	},
401 };
402 
403 struct cxl_test_dram {
404 	uuid_t id;
405 	struct cxl_event_dram rec;
406 } __packed;
407 
408 struct cxl_test_dram dram = {
409 	.id = CXL_EVENT_DRAM_UUID,
410 	.rec = {
411 		.hdr = {
412 			.length = sizeof(struct cxl_test_dram),
413 			.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
414 			/* .handle = Set dynamically */
415 			.related_handle = cpu_to_le16(0),
416 		},
417 		.phys_addr = cpu_to_le64(0x8000),
418 		.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
419 		.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
420 		.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
421 		/* .validity_flags = <set below> */
422 		.channel = 1,
423 		.bank_group = 5,
424 		.bank = 2,
425 		.column = {0xDE, 0xAD},
426 	},
427 };
428 
429 struct cxl_test_mem_module {
430 	uuid_t id;
431 	struct cxl_event_mem_module rec;
432 } __packed;
433 
434 struct cxl_test_mem_module mem_module = {
435 	.id = CXL_EVENT_MEM_MODULE_UUID,
436 	.rec = {
437 		.hdr = {
438 			.length = sizeof(struct cxl_test_mem_module),
439 			/* .handle = Set dynamically */
440 			.related_handle = cpu_to_le16(0),
441 		},
442 		.event_type = CXL_MMER_TEMP_CHANGE,
443 		.info = {
444 			.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
445 			.media_status = CXL_DHI_MS_ALL_DATA_LOST,
446 			.add_status = (CXL_DHI_AS_CRITICAL << 2) |
447 				      (CXL_DHI_AS_WARNING << 4) |
448 				      (CXL_DHI_AS_WARNING << 5),
449 			.device_temp = { 0xDE, 0xAD},
450 			.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
451 			.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
452 			.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
453 		}
454 	},
455 };
456 
457 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
458 			      struct cxl_mbox_cmd *cmd)
459 {
460 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
461 	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
462 
463 	if (cmd->size_in != sizeof(*ts))
464 		return -EINVAL;
465 
466 	if (cmd->size_out != 0)
467 		return -EINVAL;
468 
469 	mdata->timestamp = le64_to_cpu(ts->timestamp);
470 	return 0;
471 }
472 
473 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
474 {
475 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
476 			   &gen_media.rec.validity_flags);
477 
478 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
479 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
480 			   &dram.rec.validity_flags);
481 
482 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
483 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
484 		      (struct cxl_event_record_raw *)&gen_media);
485 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
486 		      (struct cxl_event_record_raw *)&mem_module);
487 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
488 
489 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
490 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
491 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
492 		      (struct cxl_event_record_raw *)&dram);
493 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
494 		      (struct cxl_event_record_raw *)&gen_media);
495 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
496 		      (struct cxl_event_record_raw *)&mem_module);
497 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
498 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
499 		      (struct cxl_event_record_raw *)&dram);
500 	/* Overflow this log */
501 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
502 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
503 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
504 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
505 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
506 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
507 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
508 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
509 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
510 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
511 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
512 
513 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
514 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
515 		      (struct cxl_event_record_raw *)&dram);
516 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
517 }
518 
519 static int mock_gsl(struct cxl_mbox_cmd *cmd)
520 {
521 	if (cmd->size_out < sizeof(mock_gsl_payload))
522 		return -EINVAL;
523 
524 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
525 	cmd->size_out = sizeof(mock_gsl_payload);
526 
527 	return 0;
528 }
529 
530 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
531 {
532 	struct cxl_mbox_get_log *gl = cmd->payload_in;
533 	u32 offset = le32_to_cpu(gl->offset);
534 	u32 length = le32_to_cpu(gl->length);
535 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
536 	void *data = &mock_cel;
537 
538 	if (cmd->size_in < sizeof(*gl))
539 		return -EINVAL;
540 	if (length > mds->payload_size)
541 		return -EINVAL;
542 	if (offset + length > sizeof(mock_cel))
543 		return -EINVAL;
544 	if (!uuid_equal(&gl->uuid, &uuid))
545 		return -EINVAL;
546 	if (length > cmd->size_out)
547 		return -EINVAL;
548 
549 	memcpy(cmd->payload_out, data + offset, length);
550 
551 	return 0;
552 }
553 
554 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
555 {
556 	struct cxl_mbox_identify id = {
557 		.fw_revision = { "mock fw v1 " },
558 		.total_capacity =
559 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
560 		.volatile_capacity =
561 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
562 	};
563 
564 	if (cmd->size_out < sizeof(id))
565 		return -EINVAL;
566 
567 	memcpy(cmd->payload_out, &id, sizeof(id));
568 
569 	return 0;
570 }
571 
572 static int mock_id(struct cxl_mbox_cmd *cmd)
573 {
574 	struct cxl_mbox_identify id = {
575 		.fw_revision = { "mock fw v1 " },
576 		.lsa_size = cpu_to_le32(LSA_SIZE),
577 		.partition_align =
578 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
579 		.total_capacity =
580 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
581 		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
582 	};
583 
584 	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
585 
586 	if (cmd->size_out < sizeof(id))
587 		return -EINVAL;
588 
589 	memcpy(cmd->payload_out, &id, sizeof(id));
590 
591 	return 0;
592 }
593 
594 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
595 {
596 	struct cxl_mbox_get_partition_info pi = {
597 		.active_volatile_cap =
598 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
599 		.active_persistent_cap =
600 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
601 	};
602 
603 	if (cmd->size_out < sizeof(pi))
604 		return -EINVAL;
605 
606 	memcpy(cmd->payload_out, &pi, sizeof(pi));
607 
608 	return 0;
609 }
610 
611 void cxl_mockmem_sanitize_work(struct work_struct *work)
612 {
613 	struct cxl_memdev_state *mds =
614 		container_of(work, typeof(*mds), security.poll_dwork.work);
615 
616 	mutex_lock(&mds->mbox_mutex);
617 	if (mds->security.sanitize_node)
618 		sysfs_notify_dirent(mds->security.sanitize_node);
619 	mds->security.sanitize_active = false;
620 	mutex_unlock(&mds->mbox_mutex);
621 
622 	dev_dbg(mds->cxlds.dev, "sanitize complete\n");
623 }
624 
625 static int mock_sanitize(struct cxl_mockmem_data *mdata,
626 			 struct cxl_mbox_cmd *cmd)
627 {
628 	struct cxl_memdev_state *mds = mdata->mds;
629 	int rc = 0;
630 
631 	if (cmd->size_in != 0)
632 		return -EINVAL;
633 
634 	if (cmd->size_out != 0)
635 		return -EINVAL;
636 
637 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
638 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
639 		return -ENXIO;
640 	}
641 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
642 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
643 		return -ENXIO;
644 	}
645 
646 	mutex_lock(&mds->mbox_mutex);
647 	if (schedule_delayed_work(&mds->security.poll_dwork,
648 				  msecs_to_jiffies(mdata->sanitize_timeout))) {
649 		mds->security.sanitize_active = true;
650 		dev_dbg(mds->cxlds.dev, "sanitize issued\n");
651 	} else
652 		rc = -EBUSY;
653 	mutex_unlock(&mds->mbox_mutex);
654 
655 	return rc;
656 }
657 
658 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
659 			     struct cxl_mbox_cmd *cmd)
660 {
661 	if (cmd->size_in != 0)
662 		return -EINVAL;
663 
664 	if (cmd->size_out != 0)
665 		return -EINVAL;
666 
667 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
668 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
669 		return -ENXIO;
670 	}
671 
672 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
673 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
674 		return -ENXIO;
675 	}
676 
677 	return 0;
678 }
679 
680 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
681 				   struct cxl_mbox_cmd *cmd)
682 {
683 	if (cmd->size_in)
684 		return -EINVAL;
685 
686 	if (cmd->size_out != sizeof(u32))
687 		return -EINVAL;
688 
689 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
690 
691 	return 0;
692 }
693 
694 static void master_plimit_check(struct cxl_mockmem_data *mdata)
695 {
696 	if (mdata->master_limit == PASS_TRY_LIMIT)
697 		return;
698 	mdata->master_limit++;
699 	if (mdata->master_limit == PASS_TRY_LIMIT)
700 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
701 }
702 
703 static void user_plimit_check(struct cxl_mockmem_data *mdata)
704 {
705 	if (mdata->user_limit == PASS_TRY_LIMIT)
706 		return;
707 	mdata->user_limit++;
708 	if (mdata->user_limit == PASS_TRY_LIMIT)
709 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
710 }
711 
712 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
713 			       struct cxl_mbox_cmd *cmd)
714 {
715 	struct cxl_set_pass *set_pass;
716 
717 	if (cmd->size_in != sizeof(*set_pass))
718 		return -EINVAL;
719 
720 	if (cmd->size_out != 0)
721 		return -EINVAL;
722 
723 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
724 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
725 		return -ENXIO;
726 	}
727 
728 	set_pass = cmd->payload_in;
729 	switch (set_pass->type) {
730 	case CXL_PMEM_SEC_PASS_MASTER:
731 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
732 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
733 			return -ENXIO;
734 		}
735 		/*
736 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
737 		 * the security disabled state when the user passphrase is not set.
738 		 */
739 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
740 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
741 			return -ENXIO;
742 		}
743 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
744 			master_plimit_check(mdata);
745 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
746 			return -ENXIO;
747 		}
748 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
749 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
750 		return 0;
751 
752 	case CXL_PMEM_SEC_PASS_USER:
753 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
754 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
755 			return -ENXIO;
756 		}
757 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
758 			user_plimit_check(mdata);
759 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
760 			return -ENXIO;
761 		}
762 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
763 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
764 		return 0;
765 
766 	default:
767 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
768 	}
769 	return -EINVAL;
770 }
771 
772 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
773 				   struct cxl_mbox_cmd *cmd)
774 {
775 	struct cxl_disable_pass *dis_pass;
776 
777 	if (cmd->size_in != sizeof(*dis_pass))
778 		return -EINVAL;
779 
780 	if (cmd->size_out != 0)
781 		return -EINVAL;
782 
783 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
784 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
785 		return -ENXIO;
786 	}
787 
788 	dis_pass = cmd->payload_in;
789 	switch (dis_pass->type) {
790 	case CXL_PMEM_SEC_PASS_MASTER:
791 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
792 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
793 			return -ENXIO;
794 		}
795 
796 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
797 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
798 			return -ENXIO;
799 		}
800 
801 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
802 			master_plimit_check(mdata);
803 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
804 			return -ENXIO;
805 		}
806 
807 		mdata->master_limit = 0;
808 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
809 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
810 		return 0;
811 
812 	case CXL_PMEM_SEC_PASS_USER:
813 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
814 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
815 			return -ENXIO;
816 		}
817 
818 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
819 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
820 			return -ENXIO;
821 		}
822 
823 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
824 			user_plimit_check(mdata);
825 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
826 			return -ENXIO;
827 		}
828 
829 		mdata->user_limit = 0;
830 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
831 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
832 					   CXL_PMEM_SEC_STATE_LOCKED);
833 		return 0;
834 
835 	default:
836 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
837 		return -EINVAL;
838 	}
839 
840 	return 0;
841 }
842 
843 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
844 				struct cxl_mbox_cmd *cmd)
845 {
846 	if (cmd->size_in != 0)
847 		return -EINVAL;
848 
849 	if (cmd->size_out != 0)
850 		return -EINVAL;
851 
852 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
853 		return 0;
854 
855 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
856 	return 0;
857 }
858 
859 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
860 				struct cxl_mbox_cmd *cmd)
861 {
862 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
863 		return -EINVAL;
864 
865 	if (cmd->size_out != 0)
866 		return -EINVAL;
867 
868 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
869 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
870 		return -ENXIO;
871 	}
872 
873 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
874 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
875 		return -ENXIO;
876 	}
877 
878 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
879 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
880 		return -ENXIO;
881 	}
882 
883 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
884 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
885 		return -ENXIO;
886 	}
887 
888 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
889 		if (++mdata->user_limit == PASS_TRY_LIMIT)
890 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
891 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
892 		return -ENXIO;
893 	}
894 
895 	mdata->user_limit = 0;
896 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
897 	return 0;
898 }
899 
900 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
901 					struct cxl_mbox_cmd *cmd)
902 {
903 	struct cxl_pass_erase *erase;
904 
905 	if (cmd->size_in != sizeof(*erase))
906 		return -EINVAL;
907 
908 	if (cmd->size_out != 0)
909 		return -EINVAL;
910 
911 	erase = cmd->payload_in;
912 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
913 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
914 		return -ENXIO;
915 	}
916 
917 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
918 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
919 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
920 		return -ENXIO;
921 	}
922 
923 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
924 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
925 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
926 		return -ENXIO;
927 	}
928 
929 	switch (erase->type) {
930 	case CXL_PMEM_SEC_PASS_MASTER:
931 		/*
932 		 * The spec does not clearly define the behavior of the scenario
933 		 * where a master passphrase is passed in while the master
934 		 * passphrase is not set and user passphrase is not set. The
935 		 * code will take the assumption that it will behave the same
936 		 * as a CXL secure erase command without passphrase (0x4401).
937 		 */
938 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
939 			if (memcmp(mdata->master_pass, erase->pass,
940 				   NVDIMM_PASSPHRASE_LEN)) {
941 				master_plimit_check(mdata);
942 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
943 				return -ENXIO;
944 			}
945 			mdata->master_limit = 0;
946 			mdata->user_limit = 0;
947 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
948 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
949 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
950 		} else {
951 			/*
952 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
953 			 * When master passphrase is disabled, the device shall
954 			 * return Invalid Input for the Passphrase Secure Erase
955 			 * command with master passphrase.
956 			 */
957 			return -EINVAL;
958 		}
959 		/* Scramble encryption keys so that data is effectively erased */
960 		break;
961 	case CXL_PMEM_SEC_PASS_USER:
962 		/*
963 		 * The spec does not clearly define the behavior of the scenario
964 		 * where a user passphrase is passed in while the user
965 		 * passphrase is not set. The code will take the assumption that
966 		 * it will behave the same as a CXL secure erase command without
967 		 * passphrase (0x4401).
968 		 */
969 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
970 			if (memcmp(mdata->user_pass, erase->pass,
971 				   NVDIMM_PASSPHRASE_LEN)) {
972 				user_plimit_check(mdata);
973 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
974 				return -ENXIO;
975 			}
976 			mdata->user_limit = 0;
977 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
978 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
979 		}
980 
981 		/*
982 		 * CXL rev3 Table 8-118
983 		 * If user passphrase is not set or supported by device, current
984 		 * passphrase value is ignored. Will make the assumption that
985 		 * the operation will proceed as secure erase w/o passphrase
986 		 * since spec is not explicit.
987 		 */
988 
989 		/* Scramble encryption keys so that data is effectively erased */
990 		break;
991 	default:
992 		return -EINVAL;
993 	}
994 
995 	return 0;
996 }
997 
998 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
999 			struct cxl_mbox_cmd *cmd)
1000 {
1001 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
1002 	void *lsa = mdata->lsa;
1003 	u32 offset, length;
1004 
1005 	if (sizeof(*get_lsa) > cmd->size_in)
1006 		return -EINVAL;
1007 	offset = le32_to_cpu(get_lsa->offset);
1008 	length = le32_to_cpu(get_lsa->length);
1009 	if (offset + length > LSA_SIZE)
1010 		return -EINVAL;
1011 	if (length > cmd->size_out)
1012 		return -EINVAL;
1013 
1014 	memcpy(cmd->payload_out, lsa + offset, length);
1015 	return 0;
1016 }
1017 
1018 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
1019 			struct cxl_mbox_cmd *cmd)
1020 {
1021 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
1022 	void *lsa = mdata->lsa;
1023 	u32 offset, length;
1024 
1025 	if (sizeof(*set_lsa) > cmd->size_in)
1026 		return -EINVAL;
1027 	offset = le32_to_cpu(set_lsa->offset);
1028 	length = cmd->size_in - sizeof(*set_lsa);
1029 	if (offset + length > LSA_SIZE)
1030 		return -EINVAL;
1031 
1032 	memcpy(lsa + offset, &set_lsa->data[0], length);
1033 	return 0;
1034 }
1035 
1036 static int mock_health_info(struct cxl_mbox_cmd *cmd)
1037 {
1038 	struct cxl_mbox_health_info health_info = {
1039 		/* set flags for maint needed, perf degraded, hw replacement */
1040 		.health_status = 0x7,
1041 		/* set media status to "All Data Lost" */
1042 		.media_status = 0x3,
1043 		/*
1044 		 * set ext_status flags for:
1045 		 *  ext_life_used: normal,
1046 		 *  ext_temperature: critical,
1047 		 *  ext_corrected_volatile: warning,
1048 		 *  ext_corrected_persistent: normal,
1049 		 */
1050 		.ext_status = 0x18,
1051 		.life_used = 15,
1052 		.temperature = cpu_to_le16(25),
1053 		.dirty_shutdowns = cpu_to_le32(10),
1054 		.volatile_errors = cpu_to_le32(20),
1055 		.pmem_errors = cpu_to_le32(30),
1056 	};
1057 
1058 	if (cmd->size_out < sizeof(health_info))
1059 		return -EINVAL;
1060 
1061 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1062 	return 0;
1063 }
1064 
1065 static struct mock_poison {
1066 	struct cxl_dev_state *cxlds;
1067 	u64 dpa;
1068 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1069 
1070 static struct cxl_mbox_poison_out *
1071 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1072 {
1073 	struct cxl_mbox_poison_out *po;
1074 	int nr_records = 0;
1075 	u64 dpa;
1076 
1077 	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1078 	if (!po)
1079 		return NULL;
1080 
1081 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1082 		if (mock_poison_list[i].cxlds != cxlds)
1083 			continue;
1084 		if (mock_poison_list[i].dpa < offset ||
1085 		    mock_poison_list[i].dpa > offset + length - 1)
1086 			continue;
1087 
1088 		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1089 		po->record[nr_records].address = cpu_to_le64(dpa);
1090 		po->record[nr_records].length = cpu_to_le32(1);
1091 		nr_records++;
1092 		if (nr_records == poison_inject_dev_max)
1093 			break;
1094 	}
1095 
1096 	/* Always return count, even when zero */
1097 	po->count = cpu_to_le16(nr_records);
1098 
1099 	return po;
1100 }
1101 
1102 static int mock_get_poison(struct cxl_dev_state *cxlds,
1103 			   struct cxl_mbox_cmd *cmd)
1104 {
1105 	struct cxl_mbox_poison_in *pi = cmd->payload_in;
1106 	struct cxl_mbox_poison_out *po;
1107 	u64 offset = le64_to_cpu(pi->offset);
1108 	u64 length = le64_to_cpu(pi->length);
1109 	int nr_records;
1110 
1111 	po = cxl_get_injected_po(cxlds, offset, length);
1112 	if (!po)
1113 		return -ENOMEM;
1114 	nr_records = le16_to_cpu(po->count);
1115 	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1116 	cmd->size_out = struct_size(po, record, nr_records);
1117 	kfree(po);
1118 
1119 	return 0;
1120 }
1121 
1122 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1123 {
1124 	int count = 0;
1125 
1126 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1127 		if (mock_poison_list[i].cxlds == cxlds)
1128 			count++;
1129 	}
1130 	return (count >= poison_inject_dev_max);
1131 }
1132 
1133 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1134 {
1135 	if (mock_poison_dev_max_injected(cxlds)) {
1136 		dev_dbg(cxlds->dev,
1137 			"Device poison injection limit has been reached: %d\n",
1138 			MOCK_INJECT_DEV_MAX);
1139 		return false;
1140 	}
1141 
1142 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1143 		if (!mock_poison_list[i].cxlds) {
1144 			mock_poison_list[i].cxlds = cxlds;
1145 			mock_poison_list[i].dpa = dpa;
1146 			return true;
1147 		}
1148 	}
1149 	dev_dbg(cxlds->dev,
1150 		"Mock test poison injection limit has been reached: %d\n",
1151 		MOCK_INJECT_TEST_MAX);
1152 
1153 	return false;
1154 }
1155 
1156 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1157 {
1158 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1159 		if (mock_poison_list[i].cxlds == cxlds &&
1160 		    mock_poison_list[i].dpa == dpa)
1161 			return true;
1162 	}
1163 	return false;
1164 }
1165 
1166 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1167 			      struct cxl_mbox_cmd *cmd)
1168 {
1169 	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1170 	u64 dpa = le64_to_cpu(pi->address);
1171 
1172 	if (mock_poison_found(cxlds, dpa)) {
1173 		/* Not an error to inject poison if already poisoned */
1174 		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1175 		return 0;
1176 	}
1177 	if (!mock_poison_add(cxlds, dpa))
1178 		return -ENXIO;
1179 
1180 	return 0;
1181 }
1182 
1183 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1184 {
1185 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1186 		if (mock_poison_list[i].cxlds == cxlds &&
1187 		    mock_poison_list[i].dpa == dpa) {
1188 			mock_poison_list[i].cxlds = NULL;
1189 			return true;
1190 		}
1191 	}
1192 	return false;
1193 }
1194 
1195 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1196 			     struct cxl_mbox_cmd *cmd)
1197 {
1198 	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1199 	u64 dpa = le64_to_cpu(pi->address);
1200 
1201 	/*
1202 	 * A real CXL device will write pi->write_data to the address
1203 	 * being cleared. In this mock, just delete this address from
1204 	 * the mock poison list.
1205 	 */
1206 	if (!mock_poison_del(cxlds, dpa))
1207 		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1208 
1209 	return 0;
1210 }
1211 
1212 static bool mock_poison_list_empty(void)
1213 {
1214 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1215 		if (mock_poison_list[i].cxlds)
1216 			return false;
1217 	}
1218 	return true;
1219 }
1220 
1221 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1222 {
1223 	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1224 }
1225 
1226 static ssize_t poison_inject_max_store(struct device_driver *drv,
1227 				       const char *buf, size_t len)
1228 {
1229 	int val;
1230 
1231 	if (kstrtoint(buf, 0, &val) < 0)
1232 		return -EINVAL;
1233 
1234 	if (!mock_poison_list_empty())
1235 		return -EBUSY;
1236 
1237 	if (val <= MOCK_INJECT_TEST_MAX)
1238 		poison_inject_dev_max = val;
1239 	else
1240 		return -EINVAL;
1241 
1242 	return len;
1243 }
1244 
1245 static DRIVER_ATTR_RW(poison_inject_max);
1246 
1247 static struct attribute *cxl_mock_mem_core_attrs[] = {
1248 	&driver_attr_poison_inject_max.attr,
1249 	NULL
1250 };
1251 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1252 
1253 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1254 			struct cxl_mbox_cmd *cmd)
1255 {
1256 	struct cxl_mbox_get_fw_info fw_info = {
1257 		.num_slots = FW_SLOTS,
1258 		.slot_info = (mdata->fw_slot & 0x7) |
1259 			     ((mdata->fw_staged & 0x7) << 3),
1260 		.activation_cap = 0,
1261 	};
1262 
1263 	strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1264 	strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1265 	strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1266 	strcpy(fw_info.slot_4_revision, "");
1267 
1268 	if (cmd->size_out < sizeof(fw_info))
1269 		return -EINVAL;
1270 
1271 	memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1272 	return 0;
1273 }
1274 
1275 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1276 			    struct cxl_mbox_cmd *cmd)
1277 {
1278 	struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1279 	void *fw = mdata->fw;
1280 	size_t offset, length;
1281 
1282 	offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1283 	length = cmd->size_in - sizeof(*transfer);
1284 	if (offset + length > FW_SIZE)
1285 		return -EINVAL;
1286 
1287 	switch (transfer->action) {
1288 	case CXL_FW_TRANSFER_ACTION_FULL:
1289 		if (offset != 0)
1290 			return -EINVAL;
1291 		fallthrough;
1292 	case CXL_FW_TRANSFER_ACTION_END:
1293 		if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1294 			return -EINVAL;
1295 		mdata->fw_size = offset + length;
1296 		break;
1297 	case CXL_FW_TRANSFER_ACTION_INITIATE:
1298 	case CXL_FW_TRANSFER_ACTION_CONTINUE:
1299 		break;
1300 	case CXL_FW_TRANSFER_ACTION_ABORT:
1301 		return 0;
1302 	default:
1303 		return -EINVAL;
1304 	}
1305 
1306 	memcpy(fw + offset, transfer->data, length);
1307 	usleep_range(1500, 2000);
1308 	return 0;
1309 }
1310 
1311 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1312 			    struct cxl_mbox_cmd *cmd)
1313 {
1314 	struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1315 
1316 	if (activate->slot == 0 || activate->slot > FW_SLOTS)
1317 		return -EINVAL;
1318 
1319 	switch (activate->action) {
1320 	case CXL_FW_ACTIVATE_ONLINE:
1321 		mdata->fw_slot = activate->slot;
1322 		mdata->fw_staged = 0;
1323 		return 0;
1324 	case CXL_FW_ACTIVATE_OFFLINE:
1325 		mdata->fw_staged = activate->slot;
1326 		return 0;
1327 	}
1328 
1329 	return -EINVAL;
1330 }
1331 
1332 static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
1333 			      struct cxl_mbox_cmd *cmd)
1334 {
1335 	struct cxl_dev_state *cxlds = &mds->cxlds;
1336 	struct device *dev = cxlds->dev;
1337 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1338 	int rc = -EIO;
1339 
1340 	switch (cmd->opcode) {
1341 	case CXL_MBOX_OP_SET_TIMESTAMP:
1342 		rc = mock_set_timestamp(cxlds, cmd);
1343 		break;
1344 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1345 		rc = mock_gsl(cmd);
1346 		break;
1347 	case CXL_MBOX_OP_GET_LOG:
1348 		rc = mock_get_log(mds, cmd);
1349 		break;
1350 	case CXL_MBOX_OP_IDENTIFY:
1351 		if (cxlds->rcd)
1352 			rc = mock_rcd_id(cmd);
1353 		else
1354 			rc = mock_id(cmd);
1355 		break;
1356 	case CXL_MBOX_OP_GET_LSA:
1357 		rc = mock_get_lsa(mdata, cmd);
1358 		break;
1359 	case CXL_MBOX_OP_GET_PARTITION_INFO:
1360 		rc = mock_partition_info(cmd);
1361 		break;
1362 	case CXL_MBOX_OP_GET_EVENT_RECORD:
1363 		rc = mock_get_event(dev, cmd);
1364 		break;
1365 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1366 		rc = mock_clear_event(dev, cmd);
1367 		break;
1368 	case CXL_MBOX_OP_SET_LSA:
1369 		rc = mock_set_lsa(mdata, cmd);
1370 		break;
1371 	case CXL_MBOX_OP_GET_HEALTH_INFO:
1372 		rc = mock_health_info(cmd);
1373 		break;
1374 	case CXL_MBOX_OP_SANITIZE:
1375 		rc = mock_sanitize(mdata, cmd);
1376 		break;
1377 	case CXL_MBOX_OP_SECURE_ERASE:
1378 		rc = mock_secure_erase(mdata, cmd);
1379 		break;
1380 	case CXL_MBOX_OP_GET_SECURITY_STATE:
1381 		rc = mock_get_security_state(mdata, cmd);
1382 		break;
1383 	case CXL_MBOX_OP_SET_PASSPHRASE:
1384 		rc = mock_set_passphrase(mdata, cmd);
1385 		break;
1386 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1387 		rc = mock_disable_passphrase(mdata, cmd);
1388 		break;
1389 	case CXL_MBOX_OP_FREEZE_SECURITY:
1390 		rc = mock_freeze_security(mdata, cmd);
1391 		break;
1392 	case CXL_MBOX_OP_UNLOCK:
1393 		rc = mock_unlock_security(mdata, cmd);
1394 		break;
1395 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1396 		rc = mock_passphrase_secure_erase(mdata, cmd);
1397 		break;
1398 	case CXL_MBOX_OP_GET_POISON:
1399 		rc = mock_get_poison(cxlds, cmd);
1400 		break;
1401 	case CXL_MBOX_OP_INJECT_POISON:
1402 		rc = mock_inject_poison(cxlds, cmd);
1403 		break;
1404 	case CXL_MBOX_OP_CLEAR_POISON:
1405 		rc = mock_clear_poison(cxlds, cmd);
1406 		break;
1407 	case CXL_MBOX_OP_GET_FW_INFO:
1408 		rc = mock_fw_info(mdata, cmd);
1409 		break;
1410 	case CXL_MBOX_OP_TRANSFER_FW:
1411 		rc = mock_transfer_fw(mdata, cmd);
1412 		break;
1413 	case CXL_MBOX_OP_ACTIVATE_FW:
1414 		rc = mock_activate_fw(mdata, cmd);
1415 		break;
1416 	default:
1417 		break;
1418 	}
1419 
1420 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1421 		cmd->size_in, cmd->size_out, rc);
1422 
1423 	return rc;
1424 }
1425 
1426 static void label_area_release(void *lsa)
1427 {
1428 	vfree(lsa);
1429 }
1430 
1431 static void fw_buf_release(void *buf)
1432 {
1433 	vfree(buf);
1434 }
1435 
1436 static bool is_rcd(struct platform_device *pdev)
1437 {
1438 	const struct platform_device_id *id = platform_get_device_id(pdev);
1439 
1440 	return !!id->driver_data;
1441 }
1442 
1443 static ssize_t event_trigger_store(struct device *dev,
1444 				   struct device_attribute *attr,
1445 				   const char *buf, size_t count)
1446 {
1447 	cxl_mock_event_trigger(dev);
1448 	return count;
1449 }
1450 static DEVICE_ATTR_WO(event_trigger);
1451 
1452 static int cxl_mock_mem_probe(struct platform_device *pdev)
1453 {
1454 	struct device *dev = &pdev->dev;
1455 	struct cxl_memdev *cxlmd;
1456 	struct cxl_memdev_state *mds;
1457 	struct cxl_dev_state *cxlds;
1458 	struct cxl_mockmem_data *mdata;
1459 	int rc;
1460 
1461 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1462 	if (!mdata)
1463 		return -ENOMEM;
1464 	dev_set_drvdata(dev, mdata);
1465 
1466 	mdata->lsa = vmalloc(LSA_SIZE);
1467 	if (!mdata->lsa)
1468 		return -ENOMEM;
1469 	mdata->fw = vmalloc(FW_SIZE);
1470 	if (!mdata->fw)
1471 		return -ENOMEM;
1472 	mdata->fw_slot = 2;
1473 
1474 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1475 	if (rc)
1476 		return rc;
1477 
1478 	rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1479 	if (rc)
1480 		return rc;
1481 
1482 	mds = cxl_memdev_state_create(dev);
1483 	if (IS_ERR(mds))
1484 		return PTR_ERR(mds);
1485 
1486 	mdata->mds = mds;
1487 	mds->mbox_send = cxl_mock_mbox_send;
1488 	mds->payload_size = SZ_4K;
1489 	mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1490 	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
1491 
1492 	cxlds = &mds->cxlds;
1493 	cxlds->serial = pdev->id;
1494 	if (is_rcd(pdev))
1495 		cxlds->rcd = true;
1496 
1497 	rc = cxl_enumerate_cmds(mds);
1498 	if (rc)
1499 		return rc;
1500 
1501 	rc = cxl_poison_state_init(mds);
1502 	if (rc)
1503 		return rc;
1504 
1505 	rc = cxl_set_timestamp(mds);
1506 	if (rc)
1507 		return rc;
1508 
1509 	cxlds->media_ready = true;
1510 	rc = cxl_dev_state_identify(mds);
1511 	if (rc)
1512 		return rc;
1513 
1514 	rc = cxl_mem_create_range_info(mds);
1515 	if (rc)
1516 		return rc;
1517 
1518 	cxl_mock_add_event_logs(&mdata->mes);
1519 
1520 	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1521 	if (IS_ERR(cxlmd))
1522 		return PTR_ERR(cxlmd);
1523 
1524 	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1525 	if (rc)
1526 		return rc;
1527 
1528 	rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
1529 	if (rc)
1530 		return rc;
1531 
1532 	cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1533 
1534 	return 0;
1535 }
1536 
1537 static ssize_t security_lock_show(struct device *dev,
1538 				  struct device_attribute *attr, char *buf)
1539 {
1540 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1541 
1542 	return sysfs_emit(buf, "%u\n",
1543 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1544 }
1545 
1546 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1547 				   const char *buf, size_t count)
1548 {
1549 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1550 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1551 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1552 	int val;
1553 
1554 	if (kstrtoint(buf, 0, &val) < 0)
1555 		return -EINVAL;
1556 
1557 	if (val == 1) {
1558 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1559 			return -ENXIO;
1560 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1561 		mdata->security_state &= ~mask;
1562 	} else {
1563 		return -EINVAL;
1564 	}
1565 	return count;
1566 }
1567 
1568 static DEVICE_ATTR_RW(security_lock);
1569 
1570 static ssize_t fw_buf_checksum_show(struct device *dev,
1571 				    struct device_attribute *attr, char *buf)
1572 {
1573 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1574 	u8 hash[SHA256_DIGEST_SIZE];
1575 	unsigned char *hstr, *hptr;
1576 	struct sha256_state sctx;
1577 	ssize_t written = 0;
1578 	int i;
1579 
1580 	sha256_init(&sctx);
1581 	sha256_update(&sctx, mdata->fw, mdata->fw_size);
1582 	sha256_final(&sctx, hash);
1583 
1584 	hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1585 	if (!hstr)
1586 		return -ENOMEM;
1587 
1588 	hptr = hstr;
1589 	for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1590 		hptr += sprintf(hptr, "%02x", hash[i]);
1591 
1592 	written = sysfs_emit(buf, "%s\n", hstr);
1593 
1594 	kfree(hstr);
1595 	return written;
1596 }
1597 
1598 static DEVICE_ATTR_RO(fw_buf_checksum);
1599 
1600 static ssize_t sanitize_timeout_show(struct device *dev,
1601 				  struct device_attribute *attr, char *buf)
1602 {
1603 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1604 
1605 	return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
1606 }
1607 
1608 static ssize_t sanitize_timeout_store(struct device *dev,
1609 				      struct device_attribute *attr,
1610 				      const char *buf, size_t count)
1611 {
1612 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1613 	unsigned long val;
1614 	int rc;
1615 
1616 	rc = kstrtoul(buf, 0, &val);
1617 	if (rc)
1618 		return rc;
1619 
1620 	mdata->sanitize_timeout = val;
1621 
1622 	return count;
1623 }
1624 
1625 static DEVICE_ATTR_RW(sanitize_timeout);
1626 
1627 static struct attribute *cxl_mock_mem_attrs[] = {
1628 	&dev_attr_security_lock.attr,
1629 	&dev_attr_event_trigger.attr,
1630 	&dev_attr_fw_buf_checksum.attr,
1631 	&dev_attr_sanitize_timeout.attr,
1632 	NULL
1633 };
1634 ATTRIBUTE_GROUPS(cxl_mock_mem);
1635 
1636 static const struct platform_device_id cxl_mock_mem_ids[] = {
1637 	{ .name = "cxl_mem", 0 },
1638 	{ .name = "cxl_rcd", 1 },
1639 	{ },
1640 };
1641 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1642 
1643 static struct platform_driver cxl_mock_mem_driver = {
1644 	.probe = cxl_mock_mem_probe,
1645 	.id_table = cxl_mock_mem_ids,
1646 	.driver = {
1647 		.name = KBUILD_MODNAME,
1648 		.dev_groups = cxl_mock_mem_groups,
1649 		.groups = cxl_mock_mem_core_groups,
1650 	},
1651 };
1652 
1653 module_platform_driver(cxl_mock_mem_driver);
1654 MODULE_LICENSE("GPL v2");
1655 MODULE_IMPORT_NS(CXL);
1656