xref: /linux/tools/testing/cxl/test/mem.c (revision 3df692169e8486fc3dd91fcd5ea81c27a0bac033)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/module.h>
7 #include <linux/delay.h>
8 #include <linux/sizes.h>
9 #include <linux/bits.h>
10 #include <asm/unaligned.h>
11 #include <crypto/sha2.h>
12 #include <cxlmem.h>
13 
14 #include "trace.h"
15 
16 #define LSA_SIZE SZ_128K
17 #define FW_SIZE SZ_64M
18 #define FW_SLOTS 3
19 #define DEV_SIZE SZ_2G
20 #define EFFECT(x) (1U << x)
21 
22 #define MOCK_INJECT_DEV_MAX 8
23 #define MOCK_INJECT_TEST_MAX 128
24 
25 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
26 
27 enum cxl_command_effects {
28 	CONF_CHANGE_COLD_RESET = 0,
29 	CONF_CHANGE_IMMEDIATE,
30 	DATA_CHANGE_IMMEDIATE,
31 	POLICY_CHANGE_IMMEDIATE,
32 	LOG_CHANGE_IMMEDIATE,
33 	SECURITY_CHANGE_IMMEDIATE,
34 	BACKGROUND_OP,
35 	SECONDARY_MBOX_SUPPORTED,
36 };
37 
38 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
39 
40 static struct cxl_cel_entry mock_cel[] = {
41 	{
42 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
43 		.effect = CXL_CMD_EFFECT_NONE,
44 	},
45 	{
46 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
47 		.effect = CXL_CMD_EFFECT_NONE,
48 	},
49 	{
50 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
51 		.effect = CXL_CMD_EFFECT_NONE,
52 	},
53 	{
54 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
55 		.effect = CXL_CMD_EFFECT_NONE,
56 	},
57 	{
58 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
59 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
60 				      EFFECT(DATA_CHANGE_IMMEDIATE)),
61 	},
62 	{
63 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
64 		.effect = CXL_CMD_EFFECT_NONE,
65 	},
66 	{
67 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
68 		.effect = CXL_CMD_EFFECT_NONE,
69 	},
70 	{
71 		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
72 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
73 	},
74 	{
75 		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
76 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
77 	},
78 	{
79 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
80 		.effect = CXL_CMD_EFFECT_NONE,
81 	},
82 	{
83 		.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
84 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
85 				      EFFECT(BACKGROUND_OP)),
86 	},
87 	{
88 		.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
89 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
90 				      EFFECT(CONF_CHANGE_IMMEDIATE)),
91 	},
92 	{
93 		.opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
94 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
95 				      EFFECT(SECURITY_CHANGE_IMMEDIATE) |
96 				      EFFECT(BACKGROUND_OP)),
97 	},
98 };
99 
100 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
101 struct cxl_mbox_health_info {
102 	u8 health_status;
103 	u8 media_status;
104 	u8 ext_status;
105 	u8 life_used;
106 	__le16 temperature;
107 	__le32 dirty_shutdowns;
108 	__le32 volatile_errors;
109 	__le32 pmem_errors;
110 } __packed;
111 
112 static struct {
113 	struct cxl_mbox_get_supported_logs gsl;
114 	struct cxl_gsl_entry entry;
115 } mock_gsl_payload = {
116 	.gsl = {
117 		.entries = cpu_to_le16(1),
118 	},
119 	.entry = {
120 		.uuid = DEFINE_CXL_CEL_UUID,
121 		.size = cpu_to_le32(sizeof(mock_cel)),
122 	},
123 };
124 
125 #define PASS_TRY_LIMIT 3
126 
127 #define CXL_TEST_EVENT_CNT_MAX 15
128 
129 /* Set a number of events to return at a time for simulation.  */
130 #define CXL_TEST_EVENT_CNT 3
131 
132 struct mock_event_log {
133 	u16 clear_idx;
134 	u16 cur_idx;
135 	u16 nr_events;
136 	u16 nr_overflow;
137 	u16 overflow_reset;
138 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
139 };
140 
141 struct mock_event_store {
142 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
143 	u32 ev_status;
144 };
145 
146 struct cxl_mockmem_data {
147 	void *lsa;
148 	void *fw;
149 	int fw_slot;
150 	int fw_staged;
151 	size_t fw_size;
152 	u32 security_state;
153 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
154 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
155 	int user_limit;
156 	int master_limit;
157 	struct mock_event_store mes;
158 	struct cxl_memdev_state *mds;
159 	u8 event_buf[SZ_4K];
160 	u64 timestamp;
161 	unsigned long sanitize_timeout;
162 };
163 
164 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
165 {
166 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
167 
168 	if (log_type >= CXL_EVENT_TYPE_MAX)
169 		return NULL;
170 	return &mdata->mes.mock_logs[log_type];
171 }
172 
173 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
174 {
175 	return log->events[log->cur_idx];
176 }
177 
178 static void event_reset_log(struct mock_event_log *log)
179 {
180 	log->cur_idx = 0;
181 	log->clear_idx = 0;
182 	log->nr_overflow = log->overflow_reset;
183 }
184 
185 /* Handle can never be 0 use 1 based indexing for handle */
186 static u16 event_get_clear_handle(struct mock_event_log *log)
187 {
188 	return log->clear_idx + 1;
189 }
190 
191 /* Handle can never be 0 use 1 based indexing for handle */
192 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
193 {
194 	u16 cur_handle = log->cur_idx + 1;
195 
196 	return cpu_to_le16(cur_handle);
197 }
198 
199 static bool event_log_empty(struct mock_event_log *log)
200 {
201 	return log->cur_idx == log->nr_events;
202 }
203 
204 static void mes_add_event(struct mock_event_store *mes,
205 			  enum cxl_event_log_type log_type,
206 			  struct cxl_event_record_raw *event)
207 {
208 	struct mock_event_log *log;
209 
210 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
211 		return;
212 
213 	log = &mes->mock_logs[log_type];
214 
215 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
216 		log->nr_overflow++;
217 		log->overflow_reset = log->nr_overflow;
218 		return;
219 	}
220 
221 	log->events[log->nr_events] = event;
222 	log->nr_events++;
223 }
224 
225 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
226 {
227 	struct cxl_get_event_payload *pl;
228 	struct mock_event_log *log;
229 	u16 nr_overflow;
230 	u8 log_type;
231 	int i;
232 
233 	if (cmd->size_in != sizeof(log_type))
234 		return -EINVAL;
235 
236 	if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
237 		return -EINVAL;
238 
239 	log_type = *((u8 *)cmd->payload_in);
240 	if (log_type >= CXL_EVENT_TYPE_MAX)
241 		return -EINVAL;
242 
243 	memset(cmd->payload_out, 0, cmd->size_out);
244 
245 	log = event_find_log(dev, log_type);
246 	if (!log || event_log_empty(log))
247 		return 0;
248 
249 	pl = cmd->payload_out;
250 
251 	for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
252 		memcpy(&pl->records[i], event_get_current(log),
253 		       sizeof(pl->records[i]));
254 		pl->records[i].hdr.handle = event_get_cur_event_handle(log);
255 		log->cur_idx++;
256 	}
257 
258 	pl->record_count = cpu_to_le16(i);
259 	if (!event_log_empty(log))
260 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
261 
262 	if (log->nr_overflow) {
263 		u64 ns;
264 
265 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
266 		pl->overflow_err_count = cpu_to_le16(nr_overflow);
267 		ns = ktime_get_real_ns();
268 		ns -= 5000000000; /* 5s ago */
269 		pl->first_overflow_timestamp = cpu_to_le64(ns);
270 		ns = ktime_get_real_ns();
271 		ns -= 1000000000; /* 1s ago */
272 		pl->last_overflow_timestamp = cpu_to_le64(ns);
273 	}
274 
275 	return 0;
276 }
277 
278 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
279 {
280 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
281 	struct mock_event_log *log;
282 	u8 log_type = pl->event_log;
283 	u16 handle;
284 	int nr;
285 
286 	if (log_type >= CXL_EVENT_TYPE_MAX)
287 		return -EINVAL;
288 
289 	log = event_find_log(dev, log_type);
290 	if (!log)
291 		return 0; /* No mock data in this log */
292 
293 	/*
294 	 * This check is technically not invalid per the specification AFAICS.
295 	 * (The host could 'guess' handles and clear them in order).
296 	 * However, this is not good behavior for the host so test it.
297 	 */
298 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
299 		dev_err(dev,
300 			"Attempting to clear more events than returned!\n");
301 		return -EINVAL;
302 	}
303 
304 	/* Check handle order prior to clearing events */
305 	for (nr = 0, handle = event_get_clear_handle(log);
306 	     nr < pl->nr_recs;
307 	     nr++, handle++) {
308 		if (handle != le16_to_cpu(pl->handles[nr])) {
309 			dev_err(dev, "Clearing events out of order\n");
310 			return -EINVAL;
311 		}
312 	}
313 
314 	if (log->nr_overflow)
315 		log->nr_overflow = 0;
316 
317 	/* Clear events */
318 	log->clear_idx += pl->nr_recs;
319 	return 0;
320 }
321 
322 static void cxl_mock_event_trigger(struct device *dev)
323 {
324 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
325 	struct mock_event_store *mes = &mdata->mes;
326 	int i;
327 
328 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
329 		struct mock_event_log *log;
330 
331 		log = event_find_log(dev, i);
332 		if (log)
333 			event_reset_log(log);
334 	}
335 
336 	cxl_mem_get_event_records(mdata->mds, mes->ev_status);
337 }
338 
339 struct cxl_event_record_raw maint_needed = {
340 	.hdr = {
341 		.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
342 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
343 		.length = sizeof(struct cxl_event_record_raw),
344 		.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
345 		/* .handle = Set dynamically */
346 		.related_handle = cpu_to_le16(0xa5b6),
347 	},
348 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
349 };
350 
351 struct cxl_event_record_raw hardware_replace = {
352 	.hdr = {
353 		.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
354 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
355 		.length = sizeof(struct cxl_event_record_raw),
356 		.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
357 		/* .handle = Set dynamically */
358 		.related_handle = cpu_to_le16(0xb6a5),
359 	},
360 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
361 };
362 
363 struct cxl_event_gen_media gen_media = {
364 	.hdr = {
365 		.id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
366 				0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
367 		.length = sizeof(struct cxl_event_gen_media),
368 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
369 		/* .handle = Set dynamically */
370 		.related_handle = cpu_to_le16(0),
371 	},
372 	.phys_addr = cpu_to_le64(0x2000),
373 	.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
374 	.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
375 	.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
376 	/* .validity_flags = <set below> */
377 	.channel = 1,
378 	.rank = 30
379 };
380 
381 struct cxl_event_dram dram = {
382 	.hdr = {
383 		.id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
384 				0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
385 		.length = sizeof(struct cxl_event_dram),
386 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
387 		/* .handle = Set dynamically */
388 		.related_handle = cpu_to_le16(0),
389 	},
390 	.phys_addr = cpu_to_le64(0x8000),
391 	.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
392 	.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
393 	.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
394 	/* .validity_flags = <set below> */
395 	.channel = 1,
396 	.bank_group = 5,
397 	.bank = 2,
398 	.column = {0xDE, 0xAD},
399 };
400 
401 struct cxl_event_mem_module mem_module = {
402 	.hdr = {
403 		.id = UUID_INIT(0xfe927475, 0xdd59, 0x4339,
404 				0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
405 		.length = sizeof(struct cxl_event_mem_module),
406 		/* .handle = Set dynamically */
407 		.related_handle = cpu_to_le16(0),
408 	},
409 	.event_type = CXL_MMER_TEMP_CHANGE,
410 	.info = {
411 		.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
412 		.media_status = CXL_DHI_MS_ALL_DATA_LOST,
413 		.add_status = (CXL_DHI_AS_CRITICAL << 2) |
414 			      (CXL_DHI_AS_WARNING << 4) |
415 			      (CXL_DHI_AS_WARNING << 5),
416 		.device_temp = { 0xDE, 0xAD},
417 		.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
418 		.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
419 		.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
420 	}
421 };
422 
423 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
424 			      struct cxl_mbox_cmd *cmd)
425 {
426 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
427 	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
428 
429 	if (cmd->size_in != sizeof(*ts))
430 		return -EINVAL;
431 
432 	if (cmd->size_out != 0)
433 		return -EINVAL;
434 
435 	mdata->timestamp = le64_to_cpu(ts->timestamp);
436 	return 0;
437 }
438 
439 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
440 {
441 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
442 			   &gen_media.validity_flags);
443 
444 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
445 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
446 			   &dram.validity_flags);
447 
448 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
449 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
450 		      (struct cxl_event_record_raw *)&gen_media);
451 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
452 		      (struct cxl_event_record_raw *)&mem_module);
453 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
454 
455 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
456 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
457 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
458 		      (struct cxl_event_record_raw *)&dram);
459 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
460 		      (struct cxl_event_record_raw *)&gen_media);
461 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
462 		      (struct cxl_event_record_raw *)&mem_module);
463 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
464 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
465 		      (struct cxl_event_record_raw *)&dram);
466 	/* Overflow this log */
467 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
468 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
469 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
470 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
471 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
472 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
473 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
474 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
475 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
476 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
477 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
478 
479 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
480 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
481 		      (struct cxl_event_record_raw *)&dram);
482 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
483 }
484 
485 static int mock_gsl(struct cxl_mbox_cmd *cmd)
486 {
487 	if (cmd->size_out < sizeof(mock_gsl_payload))
488 		return -EINVAL;
489 
490 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
491 	cmd->size_out = sizeof(mock_gsl_payload);
492 
493 	return 0;
494 }
495 
496 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
497 {
498 	struct cxl_mbox_get_log *gl = cmd->payload_in;
499 	u32 offset = le32_to_cpu(gl->offset);
500 	u32 length = le32_to_cpu(gl->length);
501 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
502 	void *data = &mock_cel;
503 
504 	if (cmd->size_in < sizeof(*gl))
505 		return -EINVAL;
506 	if (length > mds->payload_size)
507 		return -EINVAL;
508 	if (offset + length > sizeof(mock_cel))
509 		return -EINVAL;
510 	if (!uuid_equal(&gl->uuid, &uuid))
511 		return -EINVAL;
512 	if (length > cmd->size_out)
513 		return -EINVAL;
514 
515 	memcpy(cmd->payload_out, data + offset, length);
516 
517 	return 0;
518 }
519 
520 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
521 {
522 	struct cxl_mbox_identify id = {
523 		.fw_revision = { "mock fw v1 " },
524 		.total_capacity =
525 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
526 		.volatile_capacity =
527 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
528 	};
529 
530 	if (cmd->size_out < sizeof(id))
531 		return -EINVAL;
532 
533 	memcpy(cmd->payload_out, &id, sizeof(id));
534 
535 	return 0;
536 }
537 
538 static int mock_id(struct cxl_mbox_cmd *cmd)
539 {
540 	struct cxl_mbox_identify id = {
541 		.fw_revision = { "mock fw v1 " },
542 		.lsa_size = cpu_to_le32(LSA_SIZE),
543 		.partition_align =
544 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
545 		.total_capacity =
546 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
547 		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
548 	};
549 
550 	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
551 
552 	if (cmd->size_out < sizeof(id))
553 		return -EINVAL;
554 
555 	memcpy(cmd->payload_out, &id, sizeof(id));
556 
557 	return 0;
558 }
559 
560 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
561 {
562 	struct cxl_mbox_get_partition_info pi = {
563 		.active_volatile_cap =
564 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
565 		.active_persistent_cap =
566 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
567 	};
568 
569 	if (cmd->size_out < sizeof(pi))
570 		return -EINVAL;
571 
572 	memcpy(cmd->payload_out, &pi, sizeof(pi));
573 
574 	return 0;
575 }
576 
577 void cxl_mockmem_sanitize_work(struct work_struct *work)
578 {
579 	struct cxl_memdev_state *mds =
580 		container_of(work, typeof(*mds), security.poll_dwork.work);
581 
582 	mutex_lock(&mds->mbox_mutex);
583 	if (mds->security.sanitize_node)
584 		sysfs_notify_dirent(mds->security.sanitize_node);
585 	mds->security.sanitize_active = false;
586 	mutex_unlock(&mds->mbox_mutex);
587 
588 	dev_dbg(mds->cxlds.dev, "sanitize complete\n");
589 }
590 
591 static int mock_sanitize(struct cxl_mockmem_data *mdata,
592 			 struct cxl_mbox_cmd *cmd)
593 {
594 	struct cxl_memdev_state *mds = mdata->mds;
595 	int rc = 0;
596 
597 	if (cmd->size_in != 0)
598 		return -EINVAL;
599 
600 	if (cmd->size_out != 0)
601 		return -EINVAL;
602 
603 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
604 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
605 		return -ENXIO;
606 	}
607 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
608 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
609 		return -ENXIO;
610 	}
611 
612 	mutex_lock(&mds->mbox_mutex);
613 	if (schedule_delayed_work(&mds->security.poll_dwork,
614 				  msecs_to_jiffies(mdata->sanitize_timeout))) {
615 		mds->security.sanitize_active = true;
616 		dev_dbg(mds->cxlds.dev, "sanitize issued\n");
617 	} else
618 		rc = -EBUSY;
619 	mutex_unlock(&mds->mbox_mutex);
620 
621 	return rc;
622 }
623 
624 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
625 			     struct cxl_mbox_cmd *cmd)
626 {
627 	if (cmd->size_in != 0)
628 		return -EINVAL;
629 
630 	if (cmd->size_out != 0)
631 		return -EINVAL;
632 
633 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
634 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
635 		return -ENXIO;
636 	}
637 
638 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
639 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
640 		return -ENXIO;
641 	}
642 
643 	return 0;
644 }
645 
646 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
647 				   struct cxl_mbox_cmd *cmd)
648 {
649 	if (cmd->size_in)
650 		return -EINVAL;
651 
652 	if (cmd->size_out != sizeof(u32))
653 		return -EINVAL;
654 
655 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
656 
657 	return 0;
658 }
659 
660 static void master_plimit_check(struct cxl_mockmem_data *mdata)
661 {
662 	if (mdata->master_limit == PASS_TRY_LIMIT)
663 		return;
664 	mdata->master_limit++;
665 	if (mdata->master_limit == PASS_TRY_LIMIT)
666 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
667 }
668 
669 static void user_plimit_check(struct cxl_mockmem_data *mdata)
670 {
671 	if (mdata->user_limit == PASS_TRY_LIMIT)
672 		return;
673 	mdata->user_limit++;
674 	if (mdata->user_limit == PASS_TRY_LIMIT)
675 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
676 }
677 
678 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
679 			       struct cxl_mbox_cmd *cmd)
680 {
681 	struct cxl_set_pass *set_pass;
682 
683 	if (cmd->size_in != sizeof(*set_pass))
684 		return -EINVAL;
685 
686 	if (cmd->size_out != 0)
687 		return -EINVAL;
688 
689 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
690 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
691 		return -ENXIO;
692 	}
693 
694 	set_pass = cmd->payload_in;
695 	switch (set_pass->type) {
696 	case CXL_PMEM_SEC_PASS_MASTER:
697 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
698 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
699 			return -ENXIO;
700 		}
701 		/*
702 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
703 		 * the security disabled state when the user passphrase is not set.
704 		 */
705 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
706 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
707 			return -ENXIO;
708 		}
709 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
710 			master_plimit_check(mdata);
711 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
712 			return -ENXIO;
713 		}
714 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
715 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
716 		return 0;
717 
718 	case CXL_PMEM_SEC_PASS_USER:
719 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
720 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
721 			return -ENXIO;
722 		}
723 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
724 			user_plimit_check(mdata);
725 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
726 			return -ENXIO;
727 		}
728 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
729 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
730 		return 0;
731 
732 	default:
733 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
734 	}
735 	return -EINVAL;
736 }
737 
738 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
739 				   struct cxl_mbox_cmd *cmd)
740 {
741 	struct cxl_disable_pass *dis_pass;
742 
743 	if (cmd->size_in != sizeof(*dis_pass))
744 		return -EINVAL;
745 
746 	if (cmd->size_out != 0)
747 		return -EINVAL;
748 
749 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
750 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
751 		return -ENXIO;
752 	}
753 
754 	dis_pass = cmd->payload_in;
755 	switch (dis_pass->type) {
756 	case CXL_PMEM_SEC_PASS_MASTER:
757 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
758 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
759 			return -ENXIO;
760 		}
761 
762 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
763 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
764 			return -ENXIO;
765 		}
766 
767 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
768 			master_plimit_check(mdata);
769 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
770 			return -ENXIO;
771 		}
772 
773 		mdata->master_limit = 0;
774 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
775 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
776 		return 0;
777 
778 	case CXL_PMEM_SEC_PASS_USER:
779 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
780 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
781 			return -ENXIO;
782 		}
783 
784 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
785 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
786 			return -ENXIO;
787 		}
788 
789 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
790 			user_plimit_check(mdata);
791 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
792 			return -ENXIO;
793 		}
794 
795 		mdata->user_limit = 0;
796 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
797 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
798 					   CXL_PMEM_SEC_STATE_LOCKED);
799 		return 0;
800 
801 	default:
802 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
803 		return -EINVAL;
804 	}
805 
806 	return 0;
807 }
808 
809 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
810 				struct cxl_mbox_cmd *cmd)
811 {
812 	if (cmd->size_in != 0)
813 		return -EINVAL;
814 
815 	if (cmd->size_out != 0)
816 		return -EINVAL;
817 
818 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
819 		return 0;
820 
821 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
822 	return 0;
823 }
824 
825 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
826 				struct cxl_mbox_cmd *cmd)
827 {
828 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
829 		return -EINVAL;
830 
831 	if (cmd->size_out != 0)
832 		return -EINVAL;
833 
834 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
835 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
836 		return -ENXIO;
837 	}
838 
839 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
840 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
841 		return -ENXIO;
842 	}
843 
844 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
845 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
846 		return -ENXIO;
847 	}
848 
849 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
850 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
851 		return -ENXIO;
852 	}
853 
854 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
855 		if (++mdata->user_limit == PASS_TRY_LIMIT)
856 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
857 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
858 		return -ENXIO;
859 	}
860 
861 	mdata->user_limit = 0;
862 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
863 	return 0;
864 }
865 
866 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
867 					struct cxl_mbox_cmd *cmd)
868 {
869 	struct cxl_pass_erase *erase;
870 
871 	if (cmd->size_in != sizeof(*erase))
872 		return -EINVAL;
873 
874 	if (cmd->size_out != 0)
875 		return -EINVAL;
876 
877 	erase = cmd->payload_in;
878 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
879 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
880 		return -ENXIO;
881 	}
882 
883 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
884 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
885 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
886 		return -ENXIO;
887 	}
888 
889 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
890 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
891 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
892 		return -ENXIO;
893 	}
894 
895 	switch (erase->type) {
896 	case CXL_PMEM_SEC_PASS_MASTER:
897 		/*
898 		 * The spec does not clearly define the behavior of the scenario
899 		 * where a master passphrase is passed in while the master
900 		 * passphrase is not set and user passphrase is not set. The
901 		 * code will take the assumption that it will behave the same
902 		 * as a CXL secure erase command without passphrase (0x4401).
903 		 */
904 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
905 			if (memcmp(mdata->master_pass, erase->pass,
906 				   NVDIMM_PASSPHRASE_LEN)) {
907 				master_plimit_check(mdata);
908 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
909 				return -ENXIO;
910 			}
911 			mdata->master_limit = 0;
912 			mdata->user_limit = 0;
913 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
914 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
915 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
916 		} else {
917 			/*
918 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
919 			 * When master passphrase is disabled, the device shall
920 			 * return Invalid Input for the Passphrase Secure Erase
921 			 * command with master passphrase.
922 			 */
923 			return -EINVAL;
924 		}
925 		/* Scramble encryption keys so that data is effectively erased */
926 		break;
927 	case CXL_PMEM_SEC_PASS_USER:
928 		/*
929 		 * The spec does not clearly define the behavior of the scenario
930 		 * where a user passphrase is passed in while the user
931 		 * passphrase is not set. The code will take the assumption that
932 		 * it will behave the same as a CXL secure erase command without
933 		 * passphrase (0x4401).
934 		 */
935 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
936 			if (memcmp(mdata->user_pass, erase->pass,
937 				   NVDIMM_PASSPHRASE_LEN)) {
938 				user_plimit_check(mdata);
939 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
940 				return -ENXIO;
941 			}
942 			mdata->user_limit = 0;
943 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
944 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
945 		}
946 
947 		/*
948 		 * CXL rev3 Table 8-118
949 		 * If user passphrase is not set or supported by device, current
950 		 * passphrase value is ignored. Will make the assumption that
951 		 * the operation will proceed as secure erase w/o passphrase
952 		 * since spec is not explicit.
953 		 */
954 
955 		/* Scramble encryption keys so that data is effectively erased */
956 		break;
957 	default:
958 		return -EINVAL;
959 	}
960 
961 	return 0;
962 }
963 
964 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
965 			struct cxl_mbox_cmd *cmd)
966 {
967 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
968 	void *lsa = mdata->lsa;
969 	u32 offset, length;
970 
971 	if (sizeof(*get_lsa) > cmd->size_in)
972 		return -EINVAL;
973 	offset = le32_to_cpu(get_lsa->offset);
974 	length = le32_to_cpu(get_lsa->length);
975 	if (offset + length > LSA_SIZE)
976 		return -EINVAL;
977 	if (length > cmd->size_out)
978 		return -EINVAL;
979 
980 	memcpy(cmd->payload_out, lsa + offset, length);
981 	return 0;
982 }
983 
984 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
985 			struct cxl_mbox_cmd *cmd)
986 {
987 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
988 	void *lsa = mdata->lsa;
989 	u32 offset, length;
990 
991 	if (sizeof(*set_lsa) > cmd->size_in)
992 		return -EINVAL;
993 	offset = le32_to_cpu(set_lsa->offset);
994 	length = cmd->size_in - sizeof(*set_lsa);
995 	if (offset + length > LSA_SIZE)
996 		return -EINVAL;
997 
998 	memcpy(lsa + offset, &set_lsa->data[0], length);
999 	return 0;
1000 }
1001 
1002 static int mock_health_info(struct cxl_mbox_cmd *cmd)
1003 {
1004 	struct cxl_mbox_health_info health_info = {
1005 		/* set flags for maint needed, perf degraded, hw replacement */
1006 		.health_status = 0x7,
1007 		/* set media status to "All Data Lost" */
1008 		.media_status = 0x3,
1009 		/*
1010 		 * set ext_status flags for:
1011 		 *  ext_life_used: normal,
1012 		 *  ext_temperature: critical,
1013 		 *  ext_corrected_volatile: warning,
1014 		 *  ext_corrected_persistent: normal,
1015 		 */
1016 		.ext_status = 0x18,
1017 		.life_used = 15,
1018 		.temperature = cpu_to_le16(25),
1019 		.dirty_shutdowns = cpu_to_le32(10),
1020 		.volatile_errors = cpu_to_le32(20),
1021 		.pmem_errors = cpu_to_le32(30),
1022 	};
1023 
1024 	if (cmd->size_out < sizeof(health_info))
1025 		return -EINVAL;
1026 
1027 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1028 	return 0;
1029 }
1030 
1031 static struct mock_poison {
1032 	struct cxl_dev_state *cxlds;
1033 	u64 dpa;
1034 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1035 
1036 static struct cxl_mbox_poison_out *
1037 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1038 {
1039 	struct cxl_mbox_poison_out *po;
1040 	int nr_records = 0;
1041 	u64 dpa;
1042 
1043 	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1044 	if (!po)
1045 		return NULL;
1046 
1047 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1048 		if (mock_poison_list[i].cxlds != cxlds)
1049 			continue;
1050 		if (mock_poison_list[i].dpa < offset ||
1051 		    mock_poison_list[i].dpa > offset + length - 1)
1052 			continue;
1053 
1054 		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1055 		po->record[nr_records].address = cpu_to_le64(dpa);
1056 		po->record[nr_records].length = cpu_to_le32(1);
1057 		nr_records++;
1058 		if (nr_records == poison_inject_dev_max)
1059 			break;
1060 	}
1061 
1062 	/* Always return count, even when zero */
1063 	po->count = cpu_to_le16(nr_records);
1064 
1065 	return po;
1066 }
1067 
1068 static int mock_get_poison(struct cxl_dev_state *cxlds,
1069 			   struct cxl_mbox_cmd *cmd)
1070 {
1071 	struct cxl_mbox_poison_in *pi = cmd->payload_in;
1072 	struct cxl_mbox_poison_out *po;
1073 	u64 offset = le64_to_cpu(pi->offset);
1074 	u64 length = le64_to_cpu(pi->length);
1075 	int nr_records;
1076 
1077 	po = cxl_get_injected_po(cxlds, offset, length);
1078 	if (!po)
1079 		return -ENOMEM;
1080 	nr_records = le16_to_cpu(po->count);
1081 	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1082 	cmd->size_out = struct_size(po, record, nr_records);
1083 	kfree(po);
1084 
1085 	return 0;
1086 }
1087 
1088 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1089 {
1090 	int count = 0;
1091 
1092 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1093 		if (mock_poison_list[i].cxlds == cxlds)
1094 			count++;
1095 	}
1096 	return (count >= poison_inject_dev_max);
1097 }
1098 
1099 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1100 {
1101 	if (mock_poison_dev_max_injected(cxlds)) {
1102 		dev_dbg(cxlds->dev,
1103 			"Device poison injection limit has been reached: %d\n",
1104 			MOCK_INJECT_DEV_MAX);
1105 		return false;
1106 	}
1107 
1108 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1109 		if (!mock_poison_list[i].cxlds) {
1110 			mock_poison_list[i].cxlds = cxlds;
1111 			mock_poison_list[i].dpa = dpa;
1112 			return true;
1113 		}
1114 	}
1115 	dev_dbg(cxlds->dev,
1116 		"Mock test poison injection limit has been reached: %d\n",
1117 		MOCK_INJECT_TEST_MAX);
1118 
1119 	return false;
1120 }
1121 
1122 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1123 {
1124 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1125 		if (mock_poison_list[i].cxlds == cxlds &&
1126 		    mock_poison_list[i].dpa == dpa)
1127 			return true;
1128 	}
1129 	return false;
1130 }
1131 
1132 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1133 			      struct cxl_mbox_cmd *cmd)
1134 {
1135 	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1136 	u64 dpa = le64_to_cpu(pi->address);
1137 
1138 	if (mock_poison_found(cxlds, dpa)) {
1139 		/* Not an error to inject poison if already poisoned */
1140 		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1141 		return 0;
1142 	}
1143 	if (!mock_poison_add(cxlds, dpa))
1144 		return -ENXIO;
1145 
1146 	return 0;
1147 }
1148 
1149 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1150 {
1151 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1152 		if (mock_poison_list[i].cxlds == cxlds &&
1153 		    mock_poison_list[i].dpa == dpa) {
1154 			mock_poison_list[i].cxlds = NULL;
1155 			return true;
1156 		}
1157 	}
1158 	return false;
1159 }
1160 
1161 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1162 			     struct cxl_mbox_cmd *cmd)
1163 {
1164 	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1165 	u64 dpa = le64_to_cpu(pi->address);
1166 
1167 	/*
1168 	 * A real CXL device will write pi->write_data to the address
1169 	 * being cleared. In this mock, just delete this address from
1170 	 * the mock poison list.
1171 	 */
1172 	if (!mock_poison_del(cxlds, dpa))
1173 		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1174 
1175 	return 0;
1176 }
1177 
1178 static bool mock_poison_list_empty(void)
1179 {
1180 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1181 		if (mock_poison_list[i].cxlds)
1182 			return false;
1183 	}
1184 	return true;
1185 }
1186 
1187 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1188 {
1189 	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1190 }
1191 
1192 static ssize_t poison_inject_max_store(struct device_driver *drv,
1193 				       const char *buf, size_t len)
1194 {
1195 	int val;
1196 
1197 	if (kstrtoint(buf, 0, &val) < 0)
1198 		return -EINVAL;
1199 
1200 	if (!mock_poison_list_empty())
1201 		return -EBUSY;
1202 
1203 	if (val <= MOCK_INJECT_TEST_MAX)
1204 		poison_inject_dev_max = val;
1205 	else
1206 		return -EINVAL;
1207 
1208 	return len;
1209 }
1210 
1211 static DRIVER_ATTR_RW(poison_inject_max);
1212 
1213 static struct attribute *cxl_mock_mem_core_attrs[] = {
1214 	&driver_attr_poison_inject_max.attr,
1215 	NULL
1216 };
1217 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1218 
1219 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1220 			struct cxl_mbox_cmd *cmd)
1221 {
1222 	struct cxl_mbox_get_fw_info fw_info = {
1223 		.num_slots = FW_SLOTS,
1224 		.slot_info = (mdata->fw_slot & 0x7) |
1225 			     ((mdata->fw_staged & 0x7) << 3),
1226 		.activation_cap = 0,
1227 	};
1228 
1229 	strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1230 	strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1231 	strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1232 	strcpy(fw_info.slot_4_revision, "");
1233 
1234 	if (cmd->size_out < sizeof(fw_info))
1235 		return -EINVAL;
1236 
1237 	memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1238 	return 0;
1239 }
1240 
1241 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1242 			    struct cxl_mbox_cmd *cmd)
1243 {
1244 	struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1245 	void *fw = mdata->fw;
1246 	size_t offset, length;
1247 
1248 	offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1249 	length = cmd->size_in - sizeof(*transfer);
1250 	if (offset + length > FW_SIZE)
1251 		return -EINVAL;
1252 
1253 	switch (transfer->action) {
1254 	case CXL_FW_TRANSFER_ACTION_FULL:
1255 		if (offset != 0)
1256 			return -EINVAL;
1257 		fallthrough;
1258 	case CXL_FW_TRANSFER_ACTION_END:
1259 		if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1260 			return -EINVAL;
1261 		mdata->fw_size = offset + length;
1262 		break;
1263 	case CXL_FW_TRANSFER_ACTION_INITIATE:
1264 	case CXL_FW_TRANSFER_ACTION_CONTINUE:
1265 		break;
1266 	case CXL_FW_TRANSFER_ACTION_ABORT:
1267 		return 0;
1268 	default:
1269 		return -EINVAL;
1270 	}
1271 
1272 	memcpy(fw + offset, transfer->data, length);
1273 	usleep_range(1500, 2000);
1274 	return 0;
1275 }
1276 
1277 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1278 			    struct cxl_mbox_cmd *cmd)
1279 {
1280 	struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1281 
1282 	if (activate->slot == 0 || activate->slot > FW_SLOTS)
1283 		return -EINVAL;
1284 
1285 	switch (activate->action) {
1286 	case CXL_FW_ACTIVATE_ONLINE:
1287 		mdata->fw_slot = activate->slot;
1288 		mdata->fw_staged = 0;
1289 		return 0;
1290 	case CXL_FW_ACTIVATE_OFFLINE:
1291 		mdata->fw_staged = activate->slot;
1292 		return 0;
1293 	}
1294 
1295 	return -EINVAL;
1296 }
1297 
1298 static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
1299 			      struct cxl_mbox_cmd *cmd)
1300 {
1301 	struct cxl_dev_state *cxlds = &mds->cxlds;
1302 	struct device *dev = cxlds->dev;
1303 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1304 	int rc = -EIO;
1305 
1306 	switch (cmd->opcode) {
1307 	case CXL_MBOX_OP_SET_TIMESTAMP:
1308 		rc = mock_set_timestamp(cxlds, cmd);
1309 		break;
1310 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1311 		rc = mock_gsl(cmd);
1312 		break;
1313 	case CXL_MBOX_OP_GET_LOG:
1314 		rc = mock_get_log(mds, cmd);
1315 		break;
1316 	case CXL_MBOX_OP_IDENTIFY:
1317 		if (cxlds->rcd)
1318 			rc = mock_rcd_id(cmd);
1319 		else
1320 			rc = mock_id(cmd);
1321 		break;
1322 	case CXL_MBOX_OP_GET_LSA:
1323 		rc = mock_get_lsa(mdata, cmd);
1324 		break;
1325 	case CXL_MBOX_OP_GET_PARTITION_INFO:
1326 		rc = mock_partition_info(cmd);
1327 		break;
1328 	case CXL_MBOX_OP_GET_EVENT_RECORD:
1329 		rc = mock_get_event(dev, cmd);
1330 		break;
1331 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1332 		rc = mock_clear_event(dev, cmd);
1333 		break;
1334 	case CXL_MBOX_OP_SET_LSA:
1335 		rc = mock_set_lsa(mdata, cmd);
1336 		break;
1337 	case CXL_MBOX_OP_GET_HEALTH_INFO:
1338 		rc = mock_health_info(cmd);
1339 		break;
1340 	case CXL_MBOX_OP_SANITIZE:
1341 		rc = mock_sanitize(mdata, cmd);
1342 		break;
1343 	case CXL_MBOX_OP_SECURE_ERASE:
1344 		rc = mock_secure_erase(mdata, cmd);
1345 		break;
1346 	case CXL_MBOX_OP_GET_SECURITY_STATE:
1347 		rc = mock_get_security_state(mdata, cmd);
1348 		break;
1349 	case CXL_MBOX_OP_SET_PASSPHRASE:
1350 		rc = mock_set_passphrase(mdata, cmd);
1351 		break;
1352 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1353 		rc = mock_disable_passphrase(mdata, cmd);
1354 		break;
1355 	case CXL_MBOX_OP_FREEZE_SECURITY:
1356 		rc = mock_freeze_security(mdata, cmd);
1357 		break;
1358 	case CXL_MBOX_OP_UNLOCK:
1359 		rc = mock_unlock_security(mdata, cmd);
1360 		break;
1361 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1362 		rc = mock_passphrase_secure_erase(mdata, cmd);
1363 		break;
1364 	case CXL_MBOX_OP_GET_POISON:
1365 		rc = mock_get_poison(cxlds, cmd);
1366 		break;
1367 	case CXL_MBOX_OP_INJECT_POISON:
1368 		rc = mock_inject_poison(cxlds, cmd);
1369 		break;
1370 	case CXL_MBOX_OP_CLEAR_POISON:
1371 		rc = mock_clear_poison(cxlds, cmd);
1372 		break;
1373 	case CXL_MBOX_OP_GET_FW_INFO:
1374 		rc = mock_fw_info(mdata, cmd);
1375 		break;
1376 	case CXL_MBOX_OP_TRANSFER_FW:
1377 		rc = mock_transfer_fw(mdata, cmd);
1378 		break;
1379 	case CXL_MBOX_OP_ACTIVATE_FW:
1380 		rc = mock_activate_fw(mdata, cmd);
1381 		break;
1382 	default:
1383 		break;
1384 	}
1385 
1386 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1387 		cmd->size_in, cmd->size_out, rc);
1388 
1389 	return rc;
1390 }
1391 
1392 static void label_area_release(void *lsa)
1393 {
1394 	vfree(lsa);
1395 }
1396 
1397 static void fw_buf_release(void *buf)
1398 {
1399 	vfree(buf);
1400 }
1401 
1402 static bool is_rcd(struct platform_device *pdev)
1403 {
1404 	const struct platform_device_id *id = platform_get_device_id(pdev);
1405 
1406 	return !!id->driver_data;
1407 }
1408 
1409 static ssize_t event_trigger_store(struct device *dev,
1410 				   struct device_attribute *attr,
1411 				   const char *buf, size_t count)
1412 {
1413 	cxl_mock_event_trigger(dev);
1414 	return count;
1415 }
1416 static DEVICE_ATTR_WO(event_trigger);
1417 
1418 static int cxl_mock_mem_probe(struct platform_device *pdev)
1419 {
1420 	struct device *dev = &pdev->dev;
1421 	struct cxl_memdev *cxlmd;
1422 	struct cxl_memdev_state *mds;
1423 	struct cxl_dev_state *cxlds;
1424 	struct cxl_mockmem_data *mdata;
1425 	int rc;
1426 
1427 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1428 	if (!mdata)
1429 		return -ENOMEM;
1430 	dev_set_drvdata(dev, mdata);
1431 
1432 	mdata->lsa = vmalloc(LSA_SIZE);
1433 	if (!mdata->lsa)
1434 		return -ENOMEM;
1435 	mdata->fw = vmalloc(FW_SIZE);
1436 	if (!mdata->fw)
1437 		return -ENOMEM;
1438 	mdata->fw_slot = 2;
1439 
1440 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1441 	if (rc)
1442 		return rc;
1443 
1444 	rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1445 	if (rc)
1446 		return rc;
1447 
1448 	mds = cxl_memdev_state_create(dev);
1449 	if (IS_ERR(mds))
1450 		return PTR_ERR(mds);
1451 
1452 	mdata->mds = mds;
1453 	mds->mbox_send = cxl_mock_mbox_send;
1454 	mds->payload_size = SZ_4K;
1455 	mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1456 	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
1457 
1458 	cxlds = &mds->cxlds;
1459 	cxlds->serial = pdev->id;
1460 	if (is_rcd(pdev))
1461 		cxlds->rcd = true;
1462 
1463 	rc = cxl_enumerate_cmds(mds);
1464 	if (rc)
1465 		return rc;
1466 
1467 	rc = cxl_poison_state_init(mds);
1468 	if (rc)
1469 		return rc;
1470 
1471 	rc = cxl_set_timestamp(mds);
1472 	if (rc)
1473 		return rc;
1474 
1475 	cxlds->media_ready = true;
1476 	rc = cxl_dev_state_identify(mds);
1477 	if (rc)
1478 		return rc;
1479 
1480 	rc = cxl_mem_create_range_info(mds);
1481 	if (rc)
1482 		return rc;
1483 
1484 	cxl_mock_add_event_logs(&mdata->mes);
1485 
1486 	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1487 	if (IS_ERR(cxlmd))
1488 		return PTR_ERR(cxlmd);
1489 
1490 	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1491 	if (rc)
1492 		return rc;
1493 
1494 	rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
1495 	if (rc)
1496 		return rc;
1497 
1498 	cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1499 
1500 	return 0;
1501 }
1502 
1503 static ssize_t security_lock_show(struct device *dev,
1504 				  struct device_attribute *attr, char *buf)
1505 {
1506 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1507 
1508 	return sysfs_emit(buf, "%u\n",
1509 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1510 }
1511 
1512 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1513 				   const char *buf, size_t count)
1514 {
1515 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1516 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1517 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1518 	int val;
1519 
1520 	if (kstrtoint(buf, 0, &val) < 0)
1521 		return -EINVAL;
1522 
1523 	if (val == 1) {
1524 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1525 			return -ENXIO;
1526 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1527 		mdata->security_state &= ~mask;
1528 	} else {
1529 		return -EINVAL;
1530 	}
1531 	return count;
1532 }
1533 
1534 static DEVICE_ATTR_RW(security_lock);
1535 
1536 static ssize_t fw_buf_checksum_show(struct device *dev,
1537 				    struct device_attribute *attr, char *buf)
1538 {
1539 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1540 	u8 hash[SHA256_DIGEST_SIZE];
1541 	unsigned char *hstr, *hptr;
1542 	struct sha256_state sctx;
1543 	ssize_t written = 0;
1544 	int i;
1545 
1546 	sha256_init(&sctx);
1547 	sha256_update(&sctx, mdata->fw, mdata->fw_size);
1548 	sha256_final(&sctx, hash);
1549 
1550 	hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1551 	if (!hstr)
1552 		return -ENOMEM;
1553 
1554 	hptr = hstr;
1555 	for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1556 		hptr += sprintf(hptr, "%02x", hash[i]);
1557 
1558 	written = sysfs_emit(buf, "%s\n", hstr);
1559 
1560 	kfree(hstr);
1561 	return written;
1562 }
1563 
1564 static DEVICE_ATTR_RO(fw_buf_checksum);
1565 
1566 static ssize_t sanitize_timeout_show(struct device *dev,
1567 				  struct device_attribute *attr, char *buf)
1568 {
1569 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1570 
1571 	return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
1572 }
1573 
1574 static ssize_t sanitize_timeout_store(struct device *dev,
1575 				      struct device_attribute *attr,
1576 				      const char *buf, size_t count)
1577 {
1578 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1579 	unsigned long val;
1580 	int rc;
1581 
1582 	rc = kstrtoul(buf, 0, &val);
1583 	if (rc)
1584 		return rc;
1585 
1586 	mdata->sanitize_timeout = val;
1587 
1588 	return count;
1589 }
1590 
1591 static DEVICE_ATTR_RW(sanitize_timeout);
1592 
1593 static struct attribute *cxl_mock_mem_attrs[] = {
1594 	&dev_attr_security_lock.attr,
1595 	&dev_attr_event_trigger.attr,
1596 	&dev_attr_fw_buf_checksum.attr,
1597 	&dev_attr_sanitize_timeout.attr,
1598 	NULL
1599 };
1600 ATTRIBUTE_GROUPS(cxl_mock_mem);
1601 
1602 static const struct platform_device_id cxl_mock_mem_ids[] = {
1603 	{ .name = "cxl_mem", 0 },
1604 	{ .name = "cxl_rcd", 1 },
1605 	{ },
1606 };
1607 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1608 
1609 static struct platform_driver cxl_mock_mem_driver = {
1610 	.probe = cxl_mock_mem_probe,
1611 	.id_table = cxl_mock_mem_ids,
1612 	.driver = {
1613 		.name = KBUILD_MODNAME,
1614 		.dev_groups = cxl_mock_mem_groups,
1615 		.groups = cxl_mock_mem_core_groups,
1616 	},
1617 };
1618 
1619 module_platform_driver(cxl_mock_mem_driver);
1620 MODULE_LICENSE("GPL v2");
1621 MODULE_IMPORT_NS(CXL);
1622