xref: /linux/tools/testing/cxl/test/mem.c (revision e04e2b760ddbe3d7b283a05898c3a029085cd8cd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/vmalloc.h>
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/sizes.h>
10 #include <linux/bits.h>
11 #include <asm/unaligned.h>
12 #include <crypto/sha2.h>
13 #include <cxlmem.h>
14 
15 #include "trace.h"
16 
17 #define LSA_SIZE SZ_128K
18 #define FW_SIZE SZ_64M
19 #define FW_SLOTS 3
20 #define DEV_SIZE SZ_2G
21 #define EFFECT(x) (1U << x)
22 
23 #define MOCK_INJECT_DEV_MAX 8
24 #define MOCK_INJECT_TEST_MAX 128
25 
26 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
27 
28 enum cxl_command_effects {
29 	CONF_CHANGE_COLD_RESET = 0,
30 	CONF_CHANGE_IMMEDIATE,
31 	DATA_CHANGE_IMMEDIATE,
32 	POLICY_CHANGE_IMMEDIATE,
33 	LOG_CHANGE_IMMEDIATE,
34 	SECURITY_CHANGE_IMMEDIATE,
35 	BACKGROUND_OP,
36 	SECONDARY_MBOX_SUPPORTED,
37 };
38 
39 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
40 
41 static struct cxl_cel_entry mock_cel[] = {
42 	{
43 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
44 		.effect = CXL_CMD_EFFECT_NONE,
45 	},
46 	{
47 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
48 		.effect = CXL_CMD_EFFECT_NONE,
49 	},
50 	{
51 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
52 		.effect = CXL_CMD_EFFECT_NONE,
53 	},
54 	{
55 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
56 		.effect = CXL_CMD_EFFECT_NONE,
57 	},
58 	{
59 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
60 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
61 				      EFFECT(DATA_CHANGE_IMMEDIATE)),
62 	},
63 	{
64 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
65 		.effect = CXL_CMD_EFFECT_NONE,
66 	},
67 	{
68 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
69 		.effect = CXL_CMD_EFFECT_NONE,
70 	},
71 	{
72 		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
73 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
74 	},
75 	{
76 		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
77 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
78 	},
79 	{
80 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
81 		.effect = CXL_CMD_EFFECT_NONE,
82 	},
83 	{
84 		.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
85 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
86 				      EFFECT(BACKGROUND_OP)),
87 	},
88 	{
89 		.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
90 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
91 				      EFFECT(CONF_CHANGE_IMMEDIATE)),
92 	},
93 	{
94 		.opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
95 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
96 				      EFFECT(SECURITY_CHANGE_IMMEDIATE) |
97 				      EFFECT(BACKGROUND_OP)),
98 	},
99 };
100 
101 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
102 struct cxl_mbox_health_info {
103 	u8 health_status;
104 	u8 media_status;
105 	u8 ext_status;
106 	u8 life_used;
107 	__le16 temperature;
108 	__le32 dirty_shutdowns;
109 	__le32 volatile_errors;
110 	__le32 pmem_errors;
111 } __packed;
112 
113 static struct {
114 	struct cxl_mbox_get_supported_logs gsl;
115 	struct cxl_gsl_entry entry;
116 } mock_gsl_payload = {
117 	.gsl = {
118 		.entries = cpu_to_le16(1),
119 	},
120 	.entry = {
121 		.uuid = DEFINE_CXL_CEL_UUID,
122 		.size = cpu_to_le32(sizeof(mock_cel)),
123 	},
124 };
125 
126 #define PASS_TRY_LIMIT 3
127 
128 #define CXL_TEST_EVENT_CNT_MAX 15
129 
130 /* Set a number of events to return at a time for simulation.  */
131 #define CXL_TEST_EVENT_RET_MAX 4
132 
133 struct mock_event_log {
134 	u16 clear_idx;
135 	u16 cur_idx;
136 	u16 nr_events;
137 	u16 nr_overflow;
138 	u16 overflow_reset;
139 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
140 };
141 
142 struct mock_event_store {
143 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
144 	u32 ev_status;
145 };
146 
147 struct cxl_mockmem_data {
148 	void *lsa;
149 	void *fw;
150 	int fw_slot;
151 	int fw_staged;
152 	size_t fw_size;
153 	u32 security_state;
154 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
155 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
156 	int user_limit;
157 	int master_limit;
158 	struct mock_event_store mes;
159 	struct cxl_memdev_state *mds;
160 	u8 event_buf[SZ_4K];
161 	u64 timestamp;
162 	unsigned long sanitize_timeout;
163 };
164 
165 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
166 {
167 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
168 
169 	if (log_type >= CXL_EVENT_TYPE_MAX)
170 		return NULL;
171 	return &mdata->mes.mock_logs[log_type];
172 }
173 
174 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
175 {
176 	return log->events[log->cur_idx];
177 }
178 
179 static void event_reset_log(struct mock_event_log *log)
180 {
181 	log->cur_idx = 0;
182 	log->clear_idx = 0;
183 	log->nr_overflow = log->overflow_reset;
184 }
185 
186 /* Handle can never be 0 use 1 based indexing for handle */
187 static u16 event_get_clear_handle(struct mock_event_log *log)
188 {
189 	return log->clear_idx + 1;
190 }
191 
192 /* Handle can never be 0 use 1 based indexing for handle */
193 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
194 {
195 	u16 cur_handle = log->cur_idx + 1;
196 
197 	return cpu_to_le16(cur_handle);
198 }
199 
200 static bool event_log_empty(struct mock_event_log *log)
201 {
202 	return log->cur_idx == log->nr_events;
203 }
204 
205 static void mes_add_event(struct mock_event_store *mes,
206 			  enum cxl_event_log_type log_type,
207 			  struct cxl_event_record_raw *event)
208 {
209 	struct mock_event_log *log;
210 
211 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
212 		return;
213 
214 	log = &mes->mock_logs[log_type];
215 
216 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
217 		log->nr_overflow++;
218 		log->overflow_reset = log->nr_overflow;
219 		return;
220 	}
221 
222 	log->events[log->nr_events] = event;
223 	log->nr_events++;
224 }
225 
226 /*
227  * Vary the number of events returned to simulate events occuring while the
228  * logs are being read.
229  */
230 static int ret_limit = 0;
231 
232 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
233 {
234 	struct cxl_get_event_payload *pl;
235 	struct mock_event_log *log;
236 	u16 nr_overflow;
237 	u8 log_type;
238 	int i;
239 
240 	if (cmd->size_in != sizeof(log_type))
241 		return -EINVAL;
242 
243 	ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
244 	if (!ret_limit)
245 		ret_limit = 1;
246 
247 	if (cmd->size_out < struct_size(pl, records, ret_limit))
248 		return -EINVAL;
249 
250 	log_type = *((u8 *)cmd->payload_in);
251 	if (log_type >= CXL_EVENT_TYPE_MAX)
252 		return -EINVAL;
253 
254 	memset(cmd->payload_out, 0, struct_size(pl, records, 0));
255 
256 	log = event_find_log(dev, log_type);
257 	if (!log || event_log_empty(log))
258 		return 0;
259 
260 	pl = cmd->payload_out;
261 
262 	for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
263 		memcpy(&pl->records[i], event_get_current(log),
264 		       sizeof(pl->records[i]));
265 		pl->records[i].event.generic.hdr.handle =
266 				event_get_cur_event_handle(log);
267 		log->cur_idx++;
268 	}
269 
270 	cmd->size_out = struct_size(pl, records, i);
271 	pl->record_count = cpu_to_le16(i);
272 	if (!event_log_empty(log))
273 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
274 
275 	if (log->nr_overflow) {
276 		u64 ns;
277 
278 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
279 		pl->overflow_err_count = cpu_to_le16(nr_overflow);
280 		ns = ktime_get_real_ns();
281 		ns -= 5000000000; /* 5s ago */
282 		pl->first_overflow_timestamp = cpu_to_le64(ns);
283 		ns = ktime_get_real_ns();
284 		ns -= 1000000000; /* 1s ago */
285 		pl->last_overflow_timestamp = cpu_to_le64(ns);
286 	}
287 
288 	return 0;
289 }
290 
291 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
292 {
293 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
294 	struct mock_event_log *log;
295 	u8 log_type = pl->event_log;
296 	u16 handle;
297 	int nr;
298 
299 	if (log_type >= CXL_EVENT_TYPE_MAX)
300 		return -EINVAL;
301 
302 	log = event_find_log(dev, log_type);
303 	if (!log)
304 		return 0; /* No mock data in this log */
305 
306 	/*
307 	 * This check is technically not invalid per the specification AFAICS.
308 	 * (The host could 'guess' handles and clear them in order).
309 	 * However, this is not good behavior for the host so test it.
310 	 */
311 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
312 		dev_err(dev,
313 			"Attempting to clear more events than returned!\n");
314 		return -EINVAL;
315 	}
316 
317 	/* Check handle order prior to clearing events */
318 	for (nr = 0, handle = event_get_clear_handle(log);
319 	     nr < pl->nr_recs;
320 	     nr++, handle++) {
321 		if (handle != le16_to_cpu(pl->handles[nr])) {
322 			dev_err(dev, "Clearing events out of order\n");
323 			return -EINVAL;
324 		}
325 	}
326 
327 	if (log->nr_overflow)
328 		log->nr_overflow = 0;
329 
330 	/* Clear events */
331 	log->clear_idx += pl->nr_recs;
332 	return 0;
333 }
334 
335 static void cxl_mock_event_trigger(struct device *dev)
336 {
337 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
338 	struct mock_event_store *mes = &mdata->mes;
339 	int i;
340 
341 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
342 		struct mock_event_log *log;
343 
344 		log = event_find_log(dev, i);
345 		if (log)
346 			event_reset_log(log);
347 	}
348 
349 	cxl_mem_get_event_records(mdata->mds, mes->ev_status);
350 }
351 
352 struct cxl_event_record_raw maint_needed = {
353 	.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
354 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
355 	.event.generic = {
356 		.hdr = {
357 			.length = sizeof(struct cxl_event_record_raw),
358 			.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
359 			/* .handle = Set dynamically */
360 			.related_handle = cpu_to_le16(0xa5b6),
361 		},
362 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
363 	},
364 };
365 
366 struct cxl_event_record_raw hardware_replace = {
367 	.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
368 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
369 	.event.generic = {
370 		.hdr = {
371 			.length = sizeof(struct cxl_event_record_raw),
372 			.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
373 			/* .handle = Set dynamically */
374 			.related_handle = cpu_to_le16(0xb6a5),
375 		},
376 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
377 	},
378 };
379 
380 struct cxl_test_gen_media {
381 	uuid_t id;
382 	struct cxl_event_gen_media rec;
383 } __packed;
384 
385 struct cxl_test_gen_media gen_media = {
386 	.id = CXL_EVENT_GEN_MEDIA_UUID,
387 	.rec = {
388 		.media_hdr = {
389 			.hdr = {
390 				.length = sizeof(struct cxl_test_gen_media),
391 				.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
392 				/* .handle = Set dynamically */
393 				.related_handle = cpu_to_le16(0),
394 			},
395 			.phys_addr = cpu_to_le64(0x2000),
396 			.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
397 			.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
398 			.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
399 			/* .validity_flags = <set below> */
400 			.channel = 1,
401 			.rank = 30,
402 		},
403 	},
404 };
405 
406 struct cxl_test_dram {
407 	uuid_t id;
408 	struct cxl_event_dram rec;
409 } __packed;
410 
411 struct cxl_test_dram dram = {
412 	.id = CXL_EVENT_DRAM_UUID,
413 	.rec = {
414 		.media_hdr = {
415 			.hdr = {
416 				.length = sizeof(struct cxl_test_dram),
417 				.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
418 				/* .handle = Set dynamically */
419 				.related_handle = cpu_to_le16(0),
420 			},
421 			.phys_addr = cpu_to_le64(0x8000),
422 			.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
423 			.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
424 			.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
425 			/* .validity_flags = <set below> */
426 			.channel = 1,
427 		},
428 		.bank_group = 5,
429 		.bank = 2,
430 		.column = {0xDE, 0xAD},
431 	},
432 };
433 
434 struct cxl_test_mem_module {
435 	uuid_t id;
436 	struct cxl_event_mem_module rec;
437 } __packed;
438 
439 struct cxl_test_mem_module mem_module = {
440 	.id = CXL_EVENT_MEM_MODULE_UUID,
441 	.rec = {
442 		.hdr = {
443 			.length = sizeof(struct cxl_test_mem_module),
444 			/* .handle = Set dynamically */
445 			.related_handle = cpu_to_le16(0),
446 		},
447 		.event_type = CXL_MMER_TEMP_CHANGE,
448 		.info = {
449 			.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
450 			.media_status = CXL_DHI_MS_ALL_DATA_LOST,
451 			.add_status = (CXL_DHI_AS_CRITICAL << 2) |
452 				      (CXL_DHI_AS_WARNING << 4) |
453 				      (CXL_DHI_AS_WARNING << 5),
454 			.device_temp = { 0xDE, 0xAD},
455 			.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
456 			.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
457 			.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
458 		}
459 	},
460 };
461 
462 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
463 			      struct cxl_mbox_cmd *cmd)
464 {
465 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
466 	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
467 
468 	if (cmd->size_in != sizeof(*ts))
469 		return -EINVAL;
470 
471 	if (cmd->size_out != 0)
472 		return -EINVAL;
473 
474 	mdata->timestamp = le64_to_cpu(ts->timestamp);
475 	return 0;
476 }
477 
478 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
479 {
480 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
481 			   &gen_media.rec.media_hdr.validity_flags);
482 
483 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
484 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
485 			   &dram.rec.media_hdr.validity_flags);
486 
487 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
488 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
489 		      (struct cxl_event_record_raw *)&gen_media);
490 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
491 		      (struct cxl_event_record_raw *)&mem_module);
492 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
493 
494 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
495 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
496 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
497 		      (struct cxl_event_record_raw *)&dram);
498 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
499 		      (struct cxl_event_record_raw *)&gen_media);
500 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
501 		      (struct cxl_event_record_raw *)&mem_module);
502 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
503 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
504 		      (struct cxl_event_record_raw *)&dram);
505 	/* Overflow this log */
506 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
507 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
508 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
509 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
510 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
511 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
512 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
513 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
514 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
515 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
516 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
517 
518 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
519 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
520 		      (struct cxl_event_record_raw *)&dram);
521 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
522 }
523 
524 static int mock_gsl(struct cxl_mbox_cmd *cmd)
525 {
526 	if (cmd->size_out < sizeof(mock_gsl_payload))
527 		return -EINVAL;
528 
529 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
530 	cmd->size_out = sizeof(mock_gsl_payload);
531 
532 	return 0;
533 }
534 
535 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
536 {
537 	struct cxl_mbox_get_log *gl = cmd->payload_in;
538 	u32 offset = le32_to_cpu(gl->offset);
539 	u32 length = le32_to_cpu(gl->length);
540 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
541 	void *data = &mock_cel;
542 
543 	if (cmd->size_in < sizeof(*gl))
544 		return -EINVAL;
545 	if (length > mds->payload_size)
546 		return -EINVAL;
547 	if (offset + length > sizeof(mock_cel))
548 		return -EINVAL;
549 	if (!uuid_equal(&gl->uuid, &uuid))
550 		return -EINVAL;
551 	if (length > cmd->size_out)
552 		return -EINVAL;
553 
554 	memcpy(cmd->payload_out, data + offset, length);
555 
556 	return 0;
557 }
558 
559 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
560 {
561 	struct cxl_mbox_identify id = {
562 		.fw_revision = { "mock fw v1 " },
563 		.total_capacity =
564 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
565 		.volatile_capacity =
566 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
567 	};
568 
569 	if (cmd->size_out < sizeof(id))
570 		return -EINVAL;
571 
572 	memcpy(cmd->payload_out, &id, sizeof(id));
573 
574 	return 0;
575 }
576 
577 static int mock_id(struct cxl_mbox_cmd *cmd)
578 {
579 	struct cxl_mbox_identify id = {
580 		.fw_revision = { "mock fw v1 " },
581 		.lsa_size = cpu_to_le32(LSA_SIZE),
582 		.partition_align =
583 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
584 		.total_capacity =
585 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
586 		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
587 	};
588 
589 	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
590 
591 	if (cmd->size_out < sizeof(id))
592 		return -EINVAL;
593 
594 	memcpy(cmd->payload_out, &id, sizeof(id));
595 
596 	return 0;
597 }
598 
599 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
600 {
601 	struct cxl_mbox_get_partition_info pi = {
602 		.active_volatile_cap =
603 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
604 		.active_persistent_cap =
605 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
606 	};
607 
608 	if (cmd->size_out < sizeof(pi))
609 		return -EINVAL;
610 
611 	memcpy(cmd->payload_out, &pi, sizeof(pi));
612 
613 	return 0;
614 }
615 
616 void cxl_mockmem_sanitize_work(struct work_struct *work)
617 {
618 	struct cxl_memdev_state *mds =
619 		container_of(work, typeof(*mds), security.poll_dwork.work);
620 
621 	mutex_lock(&mds->mbox_mutex);
622 	if (mds->security.sanitize_node)
623 		sysfs_notify_dirent(mds->security.sanitize_node);
624 	mds->security.sanitize_active = false;
625 	mutex_unlock(&mds->mbox_mutex);
626 
627 	dev_dbg(mds->cxlds.dev, "sanitize complete\n");
628 }
629 
630 static int mock_sanitize(struct cxl_mockmem_data *mdata,
631 			 struct cxl_mbox_cmd *cmd)
632 {
633 	struct cxl_memdev_state *mds = mdata->mds;
634 	int rc = 0;
635 
636 	if (cmd->size_in != 0)
637 		return -EINVAL;
638 
639 	if (cmd->size_out != 0)
640 		return -EINVAL;
641 
642 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
643 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
644 		return -ENXIO;
645 	}
646 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
647 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
648 		return -ENXIO;
649 	}
650 
651 	mutex_lock(&mds->mbox_mutex);
652 	if (schedule_delayed_work(&mds->security.poll_dwork,
653 				  msecs_to_jiffies(mdata->sanitize_timeout))) {
654 		mds->security.sanitize_active = true;
655 		dev_dbg(mds->cxlds.dev, "sanitize issued\n");
656 	} else
657 		rc = -EBUSY;
658 	mutex_unlock(&mds->mbox_mutex);
659 
660 	return rc;
661 }
662 
663 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
664 			     struct cxl_mbox_cmd *cmd)
665 {
666 	if (cmd->size_in != 0)
667 		return -EINVAL;
668 
669 	if (cmd->size_out != 0)
670 		return -EINVAL;
671 
672 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
673 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
674 		return -ENXIO;
675 	}
676 
677 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
678 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
679 		return -ENXIO;
680 	}
681 
682 	return 0;
683 }
684 
685 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
686 				   struct cxl_mbox_cmd *cmd)
687 {
688 	if (cmd->size_in)
689 		return -EINVAL;
690 
691 	if (cmd->size_out != sizeof(u32))
692 		return -EINVAL;
693 
694 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
695 
696 	return 0;
697 }
698 
699 static void master_plimit_check(struct cxl_mockmem_data *mdata)
700 {
701 	if (mdata->master_limit == PASS_TRY_LIMIT)
702 		return;
703 	mdata->master_limit++;
704 	if (mdata->master_limit == PASS_TRY_LIMIT)
705 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
706 }
707 
708 static void user_plimit_check(struct cxl_mockmem_data *mdata)
709 {
710 	if (mdata->user_limit == PASS_TRY_LIMIT)
711 		return;
712 	mdata->user_limit++;
713 	if (mdata->user_limit == PASS_TRY_LIMIT)
714 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
715 }
716 
717 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
718 			       struct cxl_mbox_cmd *cmd)
719 {
720 	struct cxl_set_pass *set_pass;
721 
722 	if (cmd->size_in != sizeof(*set_pass))
723 		return -EINVAL;
724 
725 	if (cmd->size_out != 0)
726 		return -EINVAL;
727 
728 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
729 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
730 		return -ENXIO;
731 	}
732 
733 	set_pass = cmd->payload_in;
734 	switch (set_pass->type) {
735 	case CXL_PMEM_SEC_PASS_MASTER:
736 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
737 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
738 			return -ENXIO;
739 		}
740 		/*
741 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
742 		 * the security disabled state when the user passphrase is not set.
743 		 */
744 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
745 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
746 			return -ENXIO;
747 		}
748 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
749 			master_plimit_check(mdata);
750 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
751 			return -ENXIO;
752 		}
753 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
754 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
755 		return 0;
756 
757 	case CXL_PMEM_SEC_PASS_USER:
758 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
759 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
760 			return -ENXIO;
761 		}
762 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
763 			user_plimit_check(mdata);
764 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
765 			return -ENXIO;
766 		}
767 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
768 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
769 		return 0;
770 
771 	default:
772 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
773 	}
774 	return -EINVAL;
775 }
776 
777 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
778 				   struct cxl_mbox_cmd *cmd)
779 {
780 	struct cxl_disable_pass *dis_pass;
781 
782 	if (cmd->size_in != sizeof(*dis_pass))
783 		return -EINVAL;
784 
785 	if (cmd->size_out != 0)
786 		return -EINVAL;
787 
788 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
789 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
790 		return -ENXIO;
791 	}
792 
793 	dis_pass = cmd->payload_in;
794 	switch (dis_pass->type) {
795 	case CXL_PMEM_SEC_PASS_MASTER:
796 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
797 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
798 			return -ENXIO;
799 		}
800 
801 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
802 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
803 			return -ENXIO;
804 		}
805 
806 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
807 			master_plimit_check(mdata);
808 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
809 			return -ENXIO;
810 		}
811 
812 		mdata->master_limit = 0;
813 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
814 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
815 		return 0;
816 
817 	case CXL_PMEM_SEC_PASS_USER:
818 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
819 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
820 			return -ENXIO;
821 		}
822 
823 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
824 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
825 			return -ENXIO;
826 		}
827 
828 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
829 			user_plimit_check(mdata);
830 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
831 			return -ENXIO;
832 		}
833 
834 		mdata->user_limit = 0;
835 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
836 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
837 					   CXL_PMEM_SEC_STATE_LOCKED);
838 		return 0;
839 
840 	default:
841 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
842 		return -EINVAL;
843 	}
844 
845 	return 0;
846 }
847 
848 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
849 				struct cxl_mbox_cmd *cmd)
850 {
851 	if (cmd->size_in != 0)
852 		return -EINVAL;
853 
854 	if (cmd->size_out != 0)
855 		return -EINVAL;
856 
857 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
858 		return 0;
859 
860 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
861 	return 0;
862 }
863 
864 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
865 				struct cxl_mbox_cmd *cmd)
866 {
867 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
868 		return -EINVAL;
869 
870 	if (cmd->size_out != 0)
871 		return -EINVAL;
872 
873 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
874 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
875 		return -ENXIO;
876 	}
877 
878 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
879 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
880 		return -ENXIO;
881 	}
882 
883 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
884 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
885 		return -ENXIO;
886 	}
887 
888 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
889 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
890 		return -ENXIO;
891 	}
892 
893 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
894 		if (++mdata->user_limit == PASS_TRY_LIMIT)
895 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
896 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
897 		return -ENXIO;
898 	}
899 
900 	mdata->user_limit = 0;
901 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
902 	return 0;
903 }
904 
905 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
906 					struct cxl_mbox_cmd *cmd)
907 {
908 	struct cxl_pass_erase *erase;
909 
910 	if (cmd->size_in != sizeof(*erase))
911 		return -EINVAL;
912 
913 	if (cmd->size_out != 0)
914 		return -EINVAL;
915 
916 	erase = cmd->payload_in;
917 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
918 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
919 		return -ENXIO;
920 	}
921 
922 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
923 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
924 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
925 		return -ENXIO;
926 	}
927 
928 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
929 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
930 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
931 		return -ENXIO;
932 	}
933 
934 	switch (erase->type) {
935 	case CXL_PMEM_SEC_PASS_MASTER:
936 		/*
937 		 * The spec does not clearly define the behavior of the scenario
938 		 * where a master passphrase is passed in while the master
939 		 * passphrase is not set and user passphrase is not set. The
940 		 * code will take the assumption that it will behave the same
941 		 * as a CXL secure erase command without passphrase (0x4401).
942 		 */
943 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
944 			if (memcmp(mdata->master_pass, erase->pass,
945 				   NVDIMM_PASSPHRASE_LEN)) {
946 				master_plimit_check(mdata);
947 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
948 				return -ENXIO;
949 			}
950 			mdata->master_limit = 0;
951 			mdata->user_limit = 0;
952 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
953 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
954 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
955 		} else {
956 			/*
957 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
958 			 * When master passphrase is disabled, the device shall
959 			 * return Invalid Input for the Passphrase Secure Erase
960 			 * command with master passphrase.
961 			 */
962 			return -EINVAL;
963 		}
964 		/* Scramble encryption keys so that data is effectively erased */
965 		break;
966 	case CXL_PMEM_SEC_PASS_USER:
967 		/*
968 		 * The spec does not clearly define the behavior of the scenario
969 		 * where a user passphrase is passed in while the user
970 		 * passphrase is not set. The code will take the assumption that
971 		 * it will behave the same as a CXL secure erase command without
972 		 * passphrase (0x4401).
973 		 */
974 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
975 			if (memcmp(mdata->user_pass, erase->pass,
976 				   NVDIMM_PASSPHRASE_LEN)) {
977 				user_plimit_check(mdata);
978 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
979 				return -ENXIO;
980 			}
981 			mdata->user_limit = 0;
982 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
983 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
984 		}
985 
986 		/*
987 		 * CXL rev3 Table 8-118
988 		 * If user passphrase is not set or supported by device, current
989 		 * passphrase value is ignored. Will make the assumption that
990 		 * the operation will proceed as secure erase w/o passphrase
991 		 * since spec is not explicit.
992 		 */
993 
994 		/* Scramble encryption keys so that data is effectively erased */
995 		break;
996 	default:
997 		return -EINVAL;
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
1004 			struct cxl_mbox_cmd *cmd)
1005 {
1006 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
1007 	void *lsa = mdata->lsa;
1008 	u32 offset, length;
1009 
1010 	if (sizeof(*get_lsa) > cmd->size_in)
1011 		return -EINVAL;
1012 	offset = le32_to_cpu(get_lsa->offset);
1013 	length = le32_to_cpu(get_lsa->length);
1014 	if (offset + length > LSA_SIZE)
1015 		return -EINVAL;
1016 	if (length > cmd->size_out)
1017 		return -EINVAL;
1018 
1019 	memcpy(cmd->payload_out, lsa + offset, length);
1020 	return 0;
1021 }
1022 
1023 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
1024 			struct cxl_mbox_cmd *cmd)
1025 {
1026 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
1027 	void *lsa = mdata->lsa;
1028 	u32 offset, length;
1029 
1030 	if (sizeof(*set_lsa) > cmd->size_in)
1031 		return -EINVAL;
1032 	offset = le32_to_cpu(set_lsa->offset);
1033 	length = cmd->size_in - sizeof(*set_lsa);
1034 	if (offset + length > LSA_SIZE)
1035 		return -EINVAL;
1036 
1037 	memcpy(lsa + offset, &set_lsa->data[0], length);
1038 	return 0;
1039 }
1040 
1041 static int mock_health_info(struct cxl_mbox_cmd *cmd)
1042 {
1043 	struct cxl_mbox_health_info health_info = {
1044 		/* set flags for maint needed, perf degraded, hw replacement */
1045 		.health_status = 0x7,
1046 		/* set media status to "All Data Lost" */
1047 		.media_status = 0x3,
1048 		/*
1049 		 * set ext_status flags for:
1050 		 *  ext_life_used: normal,
1051 		 *  ext_temperature: critical,
1052 		 *  ext_corrected_volatile: warning,
1053 		 *  ext_corrected_persistent: normal,
1054 		 */
1055 		.ext_status = 0x18,
1056 		.life_used = 15,
1057 		.temperature = cpu_to_le16(25),
1058 		.dirty_shutdowns = cpu_to_le32(10),
1059 		.volatile_errors = cpu_to_le32(20),
1060 		.pmem_errors = cpu_to_le32(30),
1061 	};
1062 
1063 	if (cmd->size_out < sizeof(health_info))
1064 		return -EINVAL;
1065 
1066 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1067 	return 0;
1068 }
1069 
1070 static struct mock_poison {
1071 	struct cxl_dev_state *cxlds;
1072 	u64 dpa;
1073 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1074 
1075 static struct cxl_mbox_poison_out *
1076 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1077 {
1078 	struct cxl_mbox_poison_out *po;
1079 	int nr_records = 0;
1080 	u64 dpa;
1081 
1082 	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1083 	if (!po)
1084 		return NULL;
1085 
1086 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1087 		if (mock_poison_list[i].cxlds != cxlds)
1088 			continue;
1089 		if (mock_poison_list[i].dpa < offset ||
1090 		    mock_poison_list[i].dpa > offset + length - 1)
1091 			continue;
1092 
1093 		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1094 		po->record[nr_records].address = cpu_to_le64(dpa);
1095 		po->record[nr_records].length = cpu_to_le32(1);
1096 		nr_records++;
1097 		if (nr_records == poison_inject_dev_max)
1098 			break;
1099 	}
1100 
1101 	/* Always return count, even when zero */
1102 	po->count = cpu_to_le16(nr_records);
1103 
1104 	return po;
1105 }
1106 
1107 static int mock_get_poison(struct cxl_dev_state *cxlds,
1108 			   struct cxl_mbox_cmd *cmd)
1109 {
1110 	struct cxl_mbox_poison_in *pi = cmd->payload_in;
1111 	struct cxl_mbox_poison_out *po;
1112 	u64 offset = le64_to_cpu(pi->offset);
1113 	u64 length = le64_to_cpu(pi->length);
1114 	int nr_records;
1115 
1116 	po = cxl_get_injected_po(cxlds, offset, length);
1117 	if (!po)
1118 		return -ENOMEM;
1119 	nr_records = le16_to_cpu(po->count);
1120 	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1121 	cmd->size_out = struct_size(po, record, nr_records);
1122 	kfree(po);
1123 
1124 	return 0;
1125 }
1126 
1127 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1128 {
1129 	int count = 0;
1130 
1131 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1132 		if (mock_poison_list[i].cxlds == cxlds)
1133 			count++;
1134 	}
1135 	return (count >= poison_inject_dev_max);
1136 }
1137 
1138 static int mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1139 {
1140 	/* Return EBUSY to match the CXL driver handling */
1141 	if (mock_poison_dev_max_injected(cxlds)) {
1142 		dev_dbg(cxlds->dev,
1143 			"Device poison injection limit has been reached: %d\n",
1144 			poison_inject_dev_max);
1145 		return -EBUSY;
1146 	}
1147 
1148 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1149 		if (!mock_poison_list[i].cxlds) {
1150 			mock_poison_list[i].cxlds = cxlds;
1151 			mock_poison_list[i].dpa = dpa;
1152 			return 0;
1153 		}
1154 	}
1155 	dev_dbg(cxlds->dev,
1156 		"Mock test poison injection limit has been reached: %d\n",
1157 		MOCK_INJECT_TEST_MAX);
1158 
1159 	return -ENXIO;
1160 }
1161 
1162 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1163 {
1164 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1165 		if (mock_poison_list[i].cxlds == cxlds &&
1166 		    mock_poison_list[i].dpa == dpa)
1167 			return true;
1168 	}
1169 	return false;
1170 }
1171 
1172 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1173 			      struct cxl_mbox_cmd *cmd)
1174 {
1175 	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1176 	u64 dpa = le64_to_cpu(pi->address);
1177 
1178 	if (mock_poison_found(cxlds, dpa)) {
1179 		/* Not an error to inject poison if already poisoned */
1180 		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1181 		return 0;
1182 	}
1183 
1184 	return mock_poison_add(cxlds, dpa);
1185 }
1186 
1187 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1188 {
1189 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1190 		if (mock_poison_list[i].cxlds == cxlds &&
1191 		    mock_poison_list[i].dpa == dpa) {
1192 			mock_poison_list[i].cxlds = NULL;
1193 			return true;
1194 		}
1195 	}
1196 	return false;
1197 }
1198 
1199 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1200 			     struct cxl_mbox_cmd *cmd)
1201 {
1202 	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1203 	u64 dpa = le64_to_cpu(pi->address);
1204 
1205 	/*
1206 	 * A real CXL device will write pi->write_data to the address
1207 	 * being cleared. In this mock, just delete this address from
1208 	 * the mock poison list.
1209 	 */
1210 	if (!mock_poison_del(cxlds, dpa))
1211 		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1212 
1213 	return 0;
1214 }
1215 
1216 static bool mock_poison_list_empty(void)
1217 {
1218 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1219 		if (mock_poison_list[i].cxlds)
1220 			return false;
1221 	}
1222 	return true;
1223 }
1224 
1225 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1226 {
1227 	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1228 }
1229 
1230 static ssize_t poison_inject_max_store(struct device_driver *drv,
1231 				       const char *buf, size_t len)
1232 {
1233 	int val;
1234 
1235 	if (kstrtoint(buf, 0, &val) < 0)
1236 		return -EINVAL;
1237 
1238 	if (!mock_poison_list_empty())
1239 		return -EBUSY;
1240 
1241 	if (val <= MOCK_INJECT_TEST_MAX)
1242 		poison_inject_dev_max = val;
1243 	else
1244 		return -EINVAL;
1245 
1246 	return len;
1247 }
1248 
1249 static DRIVER_ATTR_RW(poison_inject_max);
1250 
1251 static struct attribute *cxl_mock_mem_core_attrs[] = {
1252 	&driver_attr_poison_inject_max.attr,
1253 	NULL
1254 };
1255 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1256 
1257 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1258 			struct cxl_mbox_cmd *cmd)
1259 {
1260 	struct cxl_mbox_get_fw_info fw_info = {
1261 		.num_slots = FW_SLOTS,
1262 		.slot_info = (mdata->fw_slot & 0x7) |
1263 			     ((mdata->fw_staged & 0x7) << 3),
1264 		.activation_cap = 0,
1265 	};
1266 
1267 	strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1268 	strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1269 	strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1270 	strcpy(fw_info.slot_4_revision, "");
1271 
1272 	if (cmd->size_out < sizeof(fw_info))
1273 		return -EINVAL;
1274 
1275 	memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1276 	return 0;
1277 }
1278 
1279 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1280 			    struct cxl_mbox_cmd *cmd)
1281 {
1282 	struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1283 	void *fw = mdata->fw;
1284 	size_t offset, length;
1285 
1286 	offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1287 	length = cmd->size_in - sizeof(*transfer);
1288 	if (offset + length > FW_SIZE)
1289 		return -EINVAL;
1290 
1291 	switch (transfer->action) {
1292 	case CXL_FW_TRANSFER_ACTION_FULL:
1293 		if (offset != 0)
1294 			return -EINVAL;
1295 		fallthrough;
1296 	case CXL_FW_TRANSFER_ACTION_END:
1297 		if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1298 			return -EINVAL;
1299 		mdata->fw_size = offset + length;
1300 		break;
1301 	case CXL_FW_TRANSFER_ACTION_INITIATE:
1302 	case CXL_FW_TRANSFER_ACTION_CONTINUE:
1303 		break;
1304 	case CXL_FW_TRANSFER_ACTION_ABORT:
1305 		return 0;
1306 	default:
1307 		return -EINVAL;
1308 	}
1309 
1310 	memcpy(fw + offset, transfer->data, length);
1311 	usleep_range(1500, 2000);
1312 	return 0;
1313 }
1314 
1315 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1316 			    struct cxl_mbox_cmd *cmd)
1317 {
1318 	struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1319 
1320 	if (activate->slot == 0 || activate->slot > FW_SLOTS)
1321 		return -EINVAL;
1322 
1323 	switch (activate->action) {
1324 	case CXL_FW_ACTIVATE_ONLINE:
1325 		mdata->fw_slot = activate->slot;
1326 		mdata->fw_staged = 0;
1327 		return 0;
1328 	case CXL_FW_ACTIVATE_OFFLINE:
1329 		mdata->fw_staged = activate->slot;
1330 		return 0;
1331 	}
1332 
1333 	return -EINVAL;
1334 }
1335 
1336 static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
1337 			      struct cxl_mbox_cmd *cmd)
1338 {
1339 	struct cxl_dev_state *cxlds = &mds->cxlds;
1340 	struct device *dev = cxlds->dev;
1341 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1342 	int rc = -EIO;
1343 
1344 	switch (cmd->opcode) {
1345 	case CXL_MBOX_OP_SET_TIMESTAMP:
1346 		rc = mock_set_timestamp(cxlds, cmd);
1347 		break;
1348 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1349 		rc = mock_gsl(cmd);
1350 		break;
1351 	case CXL_MBOX_OP_GET_LOG:
1352 		rc = mock_get_log(mds, cmd);
1353 		break;
1354 	case CXL_MBOX_OP_IDENTIFY:
1355 		if (cxlds->rcd)
1356 			rc = mock_rcd_id(cmd);
1357 		else
1358 			rc = mock_id(cmd);
1359 		break;
1360 	case CXL_MBOX_OP_GET_LSA:
1361 		rc = mock_get_lsa(mdata, cmd);
1362 		break;
1363 	case CXL_MBOX_OP_GET_PARTITION_INFO:
1364 		rc = mock_partition_info(cmd);
1365 		break;
1366 	case CXL_MBOX_OP_GET_EVENT_RECORD:
1367 		rc = mock_get_event(dev, cmd);
1368 		break;
1369 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1370 		rc = mock_clear_event(dev, cmd);
1371 		break;
1372 	case CXL_MBOX_OP_SET_LSA:
1373 		rc = mock_set_lsa(mdata, cmd);
1374 		break;
1375 	case CXL_MBOX_OP_GET_HEALTH_INFO:
1376 		rc = mock_health_info(cmd);
1377 		break;
1378 	case CXL_MBOX_OP_SANITIZE:
1379 		rc = mock_sanitize(mdata, cmd);
1380 		break;
1381 	case CXL_MBOX_OP_SECURE_ERASE:
1382 		rc = mock_secure_erase(mdata, cmd);
1383 		break;
1384 	case CXL_MBOX_OP_GET_SECURITY_STATE:
1385 		rc = mock_get_security_state(mdata, cmd);
1386 		break;
1387 	case CXL_MBOX_OP_SET_PASSPHRASE:
1388 		rc = mock_set_passphrase(mdata, cmd);
1389 		break;
1390 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1391 		rc = mock_disable_passphrase(mdata, cmd);
1392 		break;
1393 	case CXL_MBOX_OP_FREEZE_SECURITY:
1394 		rc = mock_freeze_security(mdata, cmd);
1395 		break;
1396 	case CXL_MBOX_OP_UNLOCK:
1397 		rc = mock_unlock_security(mdata, cmd);
1398 		break;
1399 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1400 		rc = mock_passphrase_secure_erase(mdata, cmd);
1401 		break;
1402 	case CXL_MBOX_OP_GET_POISON:
1403 		rc = mock_get_poison(cxlds, cmd);
1404 		break;
1405 	case CXL_MBOX_OP_INJECT_POISON:
1406 		rc = mock_inject_poison(cxlds, cmd);
1407 		break;
1408 	case CXL_MBOX_OP_CLEAR_POISON:
1409 		rc = mock_clear_poison(cxlds, cmd);
1410 		break;
1411 	case CXL_MBOX_OP_GET_FW_INFO:
1412 		rc = mock_fw_info(mdata, cmd);
1413 		break;
1414 	case CXL_MBOX_OP_TRANSFER_FW:
1415 		rc = mock_transfer_fw(mdata, cmd);
1416 		break;
1417 	case CXL_MBOX_OP_ACTIVATE_FW:
1418 		rc = mock_activate_fw(mdata, cmd);
1419 		break;
1420 	default:
1421 		break;
1422 	}
1423 
1424 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1425 		cmd->size_in, cmd->size_out, rc);
1426 
1427 	return rc;
1428 }
1429 
1430 static void label_area_release(void *lsa)
1431 {
1432 	vfree(lsa);
1433 }
1434 
1435 static void fw_buf_release(void *buf)
1436 {
1437 	vfree(buf);
1438 }
1439 
1440 static bool is_rcd(struct platform_device *pdev)
1441 {
1442 	const struct platform_device_id *id = platform_get_device_id(pdev);
1443 
1444 	return !!id->driver_data;
1445 }
1446 
1447 static ssize_t event_trigger_store(struct device *dev,
1448 				   struct device_attribute *attr,
1449 				   const char *buf, size_t count)
1450 {
1451 	cxl_mock_event_trigger(dev);
1452 	return count;
1453 }
1454 static DEVICE_ATTR_WO(event_trigger);
1455 
1456 static int cxl_mock_mem_probe(struct platform_device *pdev)
1457 {
1458 	struct device *dev = &pdev->dev;
1459 	struct cxl_memdev *cxlmd;
1460 	struct cxl_memdev_state *mds;
1461 	struct cxl_dev_state *cxlds;
1462 	struct cxl_mockmem_data *mdata;
1463 	int rc;
1464 
1465 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1466 	if (!mdata)
1467 		return -ENOMEM;
1468 	dev_set_drvdata(dev, mdata);
1469 
1470 	mdata->lsa = vmalloc(LSA_SIZE);
1471 	if (!mdata->lsa)
1472 		return -ENOMEM;
1473 	mdata->fw = vmalloc(FW_SIZE);
1474 	if (!mdata->fw)
1475 		return -ENOMEM;
1476 	mdata->fw_slot = 2;
1477 
1478 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1479 	if (rc)
1480 		return rc;
1481 
1482 	rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1483 	if (rc)
1484 		return rc;
1485 
1486 	mds = cxl_memdev_state_create(dev);
1487 	if (IS_ERR(mds))
1488 		return PTR_ERR(mds);
1489 
1490 	mdata->mds = mds;
1491 	mds->mbox_send = cxl_mock_mbox_send;
1492 	mds->payload_size = SZ_4K;
1493 	mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1494 	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
1495 
1496 	cxlds = &mds->cxlds;
1497 	cxlds->serial = pdev->id;
1498 	if (is_rcd(pdev))
1499 		cxlds->rcd = true;
1500 
1501 	rc = cxl_enumerate_cmds(mds);
1502 	if (rc)
1503 		return rc;
1504 
1505 	rc = cxl_poison_state_init(mds);
1506 	if (rc)
1507 		return rc;
1508 
1509 	rc = cxl_set_timestamp(mds);
1510 	if (rc)
1511 		return rc;
1512 
1513 	cxlds->media_ready = true;
1514 	rc = cxl_dev_state_identify(mds);
1515 	if (rc)
1516 		return rc;
1517 
1518 	rc = cxl_mem_create_range_info(mds);
1519 	if (rc)
1520 		return rc;
1521 
1522 	cxl_mock_add_event_logs(&mdata->mes);
1523 
1524 	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1525 	if (IS_ERR(cxlmd))
1526 		return PTR_ERR(cxlmd);
1527 
1528 	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1529 	if (rc)
1530 		return rc;
1531 
1532 	rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
1533 	if (rc)
1534 		return rc;
1535 
1536 	cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1537 
1538 	return 0;
1539 }
1540 
1541 static ssize_t security_lock_show(struct device *dev,
1542 				  struct device_attribute *attr, char *buf)
1543 {
1544 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1545 
1546 	return sysfs_emit(buf, "%u\n",
1547 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1548 }
1549 
1550 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1551 				   const char *buf, size_t count)
1552 {
1553 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1554 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1555 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1556 	int val;
1557 
1558 	if (kstrtoint(buf, 0, &val) < 0)
1559 		return -EINVAL;
1560 
1561 	if (val == 1) {
1562 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1563 			return -ENXIO;
1564 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1565 		mdata->security_state &= ~mask;
1566 	} else {
1567 		return -EINVAL;
1568 	}
1569 	return count;
1570 }
1571 
1572 static DEVICE_ATTR_RW(security_lock);
1573 
1574 static ssize_t fw_buf_checksum_show(struct device *dev,
1575 				    struct device_attribute *attr, char *buf)
1576 {
1577 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1578 	u8 hash[SHA256_DIGEST_SIZE];
1579 	unsigned char *hstr, *hptr;
1580 	struct sha256_state sctx;
1581 	ssize_t written = 0;
1582 	int i;
1583 
1584 	sha256_init(&sctx);
1585 	sha256_update(&sctx, mdata->fw, mdata->fw_size);
1586 	sha256_final(&sctx, hash);
1587 
1588 	hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1589 	if (!hstr)
1590 		return -ENOMEM;
1591 
1592 	hptr = hstr;
1593 	for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1594 		hptr += sprintf(hptr, "%02x", hash[i]);
1595 
1596 	written = sysfs_emit(buf, "%s\n", hstr);
1597 
1598 	kfree(hstr);
1599 	return written;
1600 }
1601 
1602 static DEVICE_ATTR_RO(fw_buf_checksum);
1603 
1604 static ssize_t sanitize_timeout_show(struct device *dev,
1605 				  struct device_attribute *attr, char *buf)
1606 {
1607 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1608 
1609 	return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
1610 }
1611 
1612 static ssize_t sanitize_timeout_store(struct device *dev,
1613 				      struct device_attribute *attr,
1614 				      const char *buf, size_t count)
1615 {
1616 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1617 	unsigned long val;
1618 	int rc;
1619 
1620 	rc = kstrtoul(buf, 0, &val);
1621 	if (rc)
1622 		return rc;
1623 
1624 	mdata->sanitize_timeout = val;
1625 
1626 	return count;
1627 }
1628 
1629 static DEVICE_ATTR_RW(sanitize_timeout);
1630 
1631 static struct attribute *cxl_mock_mem_attrs[] = {
1632 	&dev_attr_security_lock.attr,
1633 	&dev_attr_event_trigger.attr,
1634 	&dev_attr_fw_buf_checksum.attr,
1635 	&dev_attr_sanitize_timeout.attr,
1636 	NULL
1637 };
1638 ATTRIBUTE_GROUPS(cxl_mock_mem);
1639 
1640 static const struct platform_device_id cxl_mock_mem_ids[] = {
1641 	{ .name = "cxl_mem", 0 },
1642 	{ .name = "cxl_rcd", 1 },
1643 	{ },
1644 };
1645 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1646 
1647 static struct platform_driver cxl_mock_mem_driver = {
1648 	.probe = cxl_mock_mem_probe,
1649 	.id_table = cxl_mock_mem_ids,
1650 	.driver = {
1651 		.name = KBUILD_MODNAME,
1652 		.dev_groups = cxl_mock_mem_groups,
1653 		.groups = cxl_mock_mem_core_groups,
1654 	},
1655 };
1656 
1657 module_platform_driver(cxl_mock_mem_driver);
1658 MODULE_LICENSE("GPL v2");
1659 MODULE_IMPORT_NS(CXL);
1660