xref: /linux/tools/testing/cxl/test/mem.c (revision 5797d10ea4fade7df0f920c368cad07f705afc63)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/vmalloc.h>
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/sizes.h>
10 #include <linux/bits.h>
11 #include <cxl/mailbox.h>
12 #include <linux/unaligned.h>
13 #include <crypto/sha2.h>
14 #include <cxlmem.h>
15 
16 #include "trace.h"
17 
18 #define LSA_SIZE SZ_128K
19 #define FW_SIZE SZ_64M
20 #define FW_SLOTS 3
21 #define DEV_SIZE SZ_2G
22 #define EFFECT(x) (1U << x)
23 
24 #define MOCK_INJECT_DEV_MAX 8
25 #define MOCK_INJECT_TEST_MAX 128
26 
27 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
28 
29 enum cxl_command_effects {
30 	CONF_CHANGE_COLD_RESET = 0,
31 	CONF_CHANGE_IMMEDIATE,
32 	DATA_CHANGE_IMMEDIATE,
33 	POLICY_CHANGE_IMMEDIATE,
34 	LOG_CHANGE_IMMEDIATE,
35 	SECURITY_CHANGE_IMMEDIATE,
36 	BACKGROUND_OP,
37 	SECONDARY_MBOX_SUPPORTED,
38 };
39 
40 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
41 
42 static struct cxl_cel_entry mock_cel[] = {
43 	{
44 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
45 		.effect = CXL_CMD_EFFECT_NONE,
46 	},
47 	{
48 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_FEATURES),
49 		.effect = CXL_CMD_EFFECT_NONE,
50 	},
51 	{
52 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FEATURE),
53 		.effect = CXL_CMD_EFFECT_NONE,
54 	},
55 	{
56 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_FEATURE),
57 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE)),
58 	},
59 	{
60 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
61 		.effect = CXL_CMD_EFFECT_NONE,
62 	},
63 	{
64 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
65 		.effect = CXL_CMD_EFFECT_NONE,
66 	},
67 	{
68 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
69 		.effect = CXL_CMD_EFFECT_NONE,
70 	},
71 	{
72 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
73 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
74 				      EFFECT(DATA_CHANGE_IMMEDIATE)),
75 	},
76 	{
77 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
78 		.effect = CXL_CMD_EFFECT_NONE,
79 	},
80 	{
81 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_SHUTDOWN_STATE),
82 		.effect = POLICY_CHANGE_IMMEDIATE,
83 	},
84 	{
85 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
86 		.effect = CXL_CMD_EFFECT_NONE,
87 	},
88 	{
89 		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
90 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
91 	},
92 	{
93 		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
94 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
95 	},
96 	{
97 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
98 		.effect = CXL_CMD_EFFECT_NONE,
99 	},
100 	{
101 		.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
102 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
103 				      EFFECT(BACKGROUND_OP)),
104 	},
105 	{
106 		.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
107 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
108 				      EFFECT(CONF_CHANGE_IMMEDIATE)),
109 	},
110 	{
111 		.opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
112 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
113 				      EFFECT(SECURITY_CHANGE_IMMEDIATE) |
114 				      EFFECT(BACKGROUND_OP)),
115 	},
116 };
117 
118 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
119 struct cxl_mbox_health_info {
120 	u8 health_status;
121 	u8 media_status;
122 	u8 ext_status;
123 	u8 life_used;
124 	__le16 temperature;
125 	__le32 dirty_shutdowns;
126 	__le32 volatile_errors;
127 	__le32 pmem_errors;
128 } __packed;
129 
130 static struct {
131 	struct cxl_mbox_get_supported_logs gsl;
132 	struct cxl_gsl_entry entry;
133 } mock_gsl_payload = {
134 	.gsl = {
135 		.entries = cpu_to_le16(1),
136 	},
137 	.entry = {
138 		.uuid = DEFINE_CXL_CEL_UUID,
139 		.size = cpu_to_le32(sizeof(mock_cel)),
140 	},
141 };
142 
143 #define PASS_TRY_LIMIT 3
144 
145 #define CXL_TEST_EVENT_CNT_MAX 15
146 
147 /* Set a number of events to return at a time for simulation.  */
148 #define CXL_TEST_EVENT_RET_MAX 4
149 
150 struct mock_event_log {
151 	u16 clear_idx;
152 	u16 cur_idx;
153 	u16 nr_events;
154 	u16 nr_overflow;
155 	u16 overflow_reset;
156 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
157 };
158 
159 struct mock_event_store {
160 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
161 	u32 ev_status;
162 };
163 
164 struct vendor_test_feat {
165 	__le32 data;
166 } __packed;
167 
168 struct cxl_mockmem_data {
169 	void *lsa;
170 	void *fw;
171 	int fw_slot;
172 	int fw_staged;
173 	size_t fw_size;
174 	u32 security_state;
175 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
176 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
177 	int user_limit;
178 	int master_limit;
179 	struct mock_event_store mes;
180 	struct cxl_memdev_state *mds;
181 	u8 event_buf[SZ_4K];
182 	u64 timestamp;
183 	unsigned long sanitize_timeout;
184 	struct vendor_test_feat test_feat;
185 	u8 shutdown_state;
186 };
187 
event_find_log(struct device * dev,int log_type)188 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
189 {
190 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
191 
192 	if (log_type >= CXL_EVENT_TYPE_MAX)
193 		return NULL;
194 	return &mdata->mes.mock_logs[log_type];
195 }
196 
event_get_current(struct mock_event_log * log)197 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
198 {
199 	return log->events[log->cur_idx];
200 }
201 
event_reset_log(struct mock_event_log * log)202 static void event_reset_log(struct mock_event_log *log)
203 {
204 	log->cur_idx = 0;
205 	log->clear_idx = 0;
206 	log->nr_overflow = log->overflow_reset;
207 }
208 
209 /* Handle can never be 0 use 1 based indexing for handle */
event_get_clear_handle(struct mock_event_log * log)210 static u16 event_get_clear_handle(struct mock_event_log *log)
211 {
212 	return log->clear_idx + 1;
213 }
214 
215 /* Handle can never be 0 use 1 based indexing for handle */
event_get_cur_event_handle(struct mock_event_log * log)216 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
217 {
218 	u16 cur_handle = log->cur_idx + 1;
219 
220 	return cpu_to_le16(cur_handle);
221 }
222 
event_log_empty(struct mock_event_log * log)223 static bool event_log_empty(struct mock_event_log *log)
224 {
225 	return log->cur_idx == log->nr_events;
226 }
227 
mes_add_event(struct mock_event_store * mes,enum cxl_event_log_type log_type,struct cxl_event_record_raw * event)228 static void mes_add_event(struct mock_event_store *mes,
229 			  enum cxl_event_log_type log_type,
230 			  struct cxl_event_record_raw *event)
231 {
232 	struct mock_event_log *log;
233 
234 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
235 		return;
236 
237 	log = &mes->mock_logs[log_type];
238 
239 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
240 		log->nr_overflow++;
241 		log->overflow_reset = log->nr_overflow;
242 		return;
243 	}
244 
245 	log->events[log->nr_events] = event;
246 	log->nr_events++;
247 }
248 
249 /*
250  * Vary the number of events returned to simulate events occuring while the
251  * logs are being read.
252  */
253 static atomic_t event_counter = ATOMIC_INIT(0);
254 
mock_get_event(struct device * dev,struct cxl_mbox_cmd * cmd)255 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
256 {
257 	struct cxl_get_event_payload *pl;
258 	struct mock_event_log *log;
259 	int ret_limit;
260 	u8 log_type;
261 	int i;
262 
263 	if (cmd->size_in != sizeof(log_type))
264 		return -EINVAL;
265 
266 	/* Vary return limit from 1 to CXL_TEST_EVENT_RET_MAX */
267 	ret_limit = (atomic_inc_return(&event_counter) % CXL_TEST_EVENT_RET_MAX) + 1;
268 
269 	if (cmd->size_out < struct_size(pl, records, ret_limit))
270 		return -EINVAL;
271 
272 	log_type = *((u8 *)cmd->payload_in);
273 	if (log_type >= CXL_EVENT_TYPE_MAX)
274 		return -EINVAL;
275 
276 	memset(cmd->payload_out, 0, struct_size(pl, records, 0));
277 
278 	log = event_find_log(dev, log_type);
279 	if (!log || event_log_empty(log))
280 		return 0;
281 
282 	pl = cmd->payload_out;
283 
284 	for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
285 		memcpy(&pl->records[i], event_get_current(log),
286 		       sizeof(pl->records[i]));
287 		pl->records[i].event.generic.hdr.handle =
288 				event_get_cur_event_handle(log);
289 		log->cur_idx++;
290 	}
291 
292 	cmd->size_out = struct_size(pl, records, i);
293 	pl->record_count = cpu_to_le16(i);
294 	if (!event_log_empty(log))
295 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
296 
297 	if (log->nr_overflow) {
298 		u64 ns;
299 
300 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
301 		pl->overflow_err_count = cpu_to_le16(log->nr_overflow);
302 		ns = ktime_get_real_ns();
303 		ns -= 5000000000; /* 5s ago */
304 		pl->first_overflow_timestamp = cpu_to_le64(ns);
305 		ns = ktime_get_real_ns();
306 		ns -= 1000000000; /* 1s ago */
307 		pl->last_overflow_timestamp = cpu_to_le64(ns);
308 	}
309 
310 	return 0;
311 }
312 
mock_clear_event(struct device * dev,struct cxl_mbox_cmd * cmd)313 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
314 {
315 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
316 	struct mock_event_log *log;
317 	u8 log_type = pl->event_log;
318 	u16 handle;
319 	int nr;
320 
321 	if (log_type >= CXL_EVENT_TYPE_MAX)
322 		return -EINVAL;
323 
324 	log = event_find_log(dev, log_type);
325 	if (!log)
326 		return 0; /* No mock data in this log */
327 
328 	/*
329 	 * This check is technically not invalid per the specification AFAICS.
330 	 * (The host could 'guess' handles and clear them in order).
331 	 * However, this is not good behavior for the host so test it.
332 	 */
333 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
334 		dev_err(dev,
335 			"Attempting to clear more events than returned!\n");
336 		return -EINVAL;
337 	}
338 
339 	/* Check handle order prior to clearing events */
340 	for (nr = 0, handle = event_get_clear_handle(log);
341 	     nr < pl->nr_recs;
342 	     nr++, handle++) {
343 		if (handle != le16_to_cpu(pl->handles[nr])) {
344 			dev_err(dev, "Clearing events out of order\n");
345 			return -EINVAL;
346 		}
347 	}
348 
349 	if (log->nr_overflow)
350 		log->nr_overflow = 0;
351 
352 	/* Clear events */
353 	log->clear_idx += pl->nr_recs;
354 	return 0;
355 }
356 
cxl_mock_event_trigger(struct device * dev)357 static void cxl_mock_event_trigger(struct device *dev)
358 {
359 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
360 	struct mock_event_store *mes = &mdata->mes;
361 	int i;
362 
363 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
364 		struct mock_event_log *log;
365 
366 		log = event_find_log(dev, i);
367 		if (log)
368 			event_reset_log(log);
369 	}
370 
371 	cxl_mem_get_event_records(mdata->mds, mes->ev_status);
372 }
373 
374 struct cxl_event_record_raw maint_needed = {
375 	.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
376 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
377 	.event.generic = {
378 		.hdr = {
379 			.length = sizeof(struct cxl_event_record_raw),
380 			.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
381 			/* .handle = Set dynamically */
382 			.related_handle = cpu_to_le16(0xa5b6),
383 		},
384 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
385 	},
386 };
387 
388 struct cxl_event_record_raw hardware_replace = {
389 	.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
390 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
391 	.event.generic = {
392 		.hdr = {
393 			.length = sizeof(struct cxl_event_record_raw),
394 			.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
395 			/* .handle = Set dynamically */
396 			.related_handle = cpu_to_le16(0xb6a5),
397 		},
398 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
399 	},
400 };
401 
402 struct cxl_test_gen_media {
403 	uuid_t id;
404 	struct cxl_event_gen_media rec;
405 } __packed;
406 
407 struct cxl_test_gen_media gen_media = {
408 	.id = CXL_EVENT_GEN_MEDIA_UUID,
409 	.rec = {
410 		.media_hdr = {
411 			.hdr = {
412 				.length = sizeof(struct cxl_test_gen_media),
413 				.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
414 				/* .handle = Set dynamically */
415 				.related_handle = cpu_to_le16(0),
416 			},
417 			.phys_addr = cpu_to_le64(0x2000),
418 			.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
419 			.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
420 			.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
421 			/* .validity_flags = <set below> */
422 			.channel = 1,
423 			.rank = 30,
424 		},
425 		.component_id = { 0x3, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
426 		.cme_threshold_ev_flags = 3,
427 		.cme_count = { 33, 0, 0 },
428 		.sub_type = 0x2,
429 	},
430 };
431 
432 struct cxl_test_dram {
433 	uuid_t id;
434 	struct cxl_event_dram rec;
435 } __packed;
436 
437 struct cxl_test_dram dram = {
438 	.id = CXL_EVENT_DRAM_UUID,
439 	.rec = {
440 		.media_hdr = {
441 			.hdr = {
442 				.length = sizeof(struct cxl_test_dram),
443 				.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
444 				/* .handle = Set dynamically */
445 				.related_handle = cpu_to_le16(0),
446 			},
447 			.phys_addr = cpu_to_le64(0x8000),
448 			.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
449 			.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
450 			.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
451 			/* .validity_flags = <set below> */
452 			.channel = 1,
453 		},
454 		.bank_group = 5,
455 		.bank = 2,
456 		.column = {0xDE, 0xAD},
457 		.component_id = { 0x1, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
458 		.sub_channel = 8,
459 		.cme_threshold_ev_flags = 2,
460 		.cvme_count = { 14, 0, 0 },
461 		.sub_type = 0x5,
462 	},
463 };
464 
465 struct cxl_test_mem_module {
466 	uuid_t id;
467 	struct cxl_event_mem_module rec;
468 } __packed;
469 
470 struct cxl_test_mem_module mem_module = {
471 	.id = CXL_EVENT_MEM_MODULE_UUID,
472 	.rec = {
473 		.hdr = {
474 			.length = sizeof(struct cxl_test_mem_module),
475 			/* .handle = Set dynamically */
476 			.related_handle = cpu_to_le16(0),
477 		},
478 		.event_type = CXL_MMER_TEMP_CHANGE,
479 		.info = {
480 			.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
481 			.media_status = CXL_DHI_MS_ALL_DATA_LOST,
482 			.add_status = (CXL_DHI_AS_CRITICAL << 2) |
483 				      (CXL_DHI_AS_WARNING << 4) |
484 				      (CXL_DHI_AS_WARNING << 5),
485 			.device_temp = { 0xDE, 0xAD},
486 			.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
487 			.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
488 			.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
489 		},
490 		/* .validity_flags = <set below> */
491 		.component_id = { 0x2, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
492 		.event_sub_type = 0x3,
493 	},
494 };
495 
mock_set_timestamp(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)496 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
497 			      struct cxl_mbox_cmd *cmd)
498 {
499 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
500 	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
501 
502 	if (cmd->size_in != sizeof(*ts))
503 		return -EINVAL;
504 
505 	if (cmd->size_out != 0)
506 		return -EINVAL;
507 
508 	mdata->timestamp = le64_to_cpu(ts->timestamp);
509 	return 0;
510 }
511 
cxl_mock_add_event_logs(struct mock_event_store * mes)512 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
513 {
514 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK |
515 			   CXL_GMER_VALID_COMPONENT | CXL_GMER_VALID_COMPONENT_ID_FORMAT,
516 			   &gen_media.rec.media_hdr.validity_flags);
517 
518 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
519 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN | CXL_DER_VALID_SUB_CHANNEL |
520 			   CXL_DER_VALID_COMPONENT | CXL_DER_VALID_COMPONENT_ID_FORMAT,
521 			   &dram.rec.media_hdr.validity_flags);
522 
523 	put_unaligned_le16(CXL_MMER_VALID_COMPONENT | CXL_MMER_VALID_COMPONENT_ID_FORMAT,
524 			   &mem_module.rec.validity_flags);
525 
526 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
527 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
528 		      (struct cxl_event_record_raw *)&gen_media);
529 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
530 		      (struct cxl_event_record_raw *)&mem_module);
531 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
532 
533 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
534 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
535 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
536 		      (struct cxl_event_record_raw *)&dram);
537 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
538 		      (struct cxl_event_record_raw *)&gen_media);
539 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
540 		      (struct cxl_event_record_raw *)&mem_module);
541 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
542 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
543 		      (struct cxl_event_record_raw *)&dram);
544 	/* Overflow this log */
545 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
546 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
547 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
548 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
549 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
550 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
551 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
552 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
553 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
554 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
555 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
556 
557 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
558 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
559 		      (struct cxl_event_record_raw *)&dram);
560 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
561 }
562 
mock_gsl(struct cxl_mbox_cmd * cmd)563 static int mock_gsl(struct cxl_mbox_cmd *cmd)
564 {
565 	if (cmd->size_out < sizeof(mock_gsl_payload))
566 		return -EINVAL;
567 
568 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
569 	cmd->size_out = sizeof(mock_gsl_payload);
570 
571 	return 0;
572 }
573 
mock_get_log(struct cxl_memdev_state * mds,struct cxl_mbox_cmd * cmd)574 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
575 {
576 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
577 	struct cxl_mbox_get_log *gl = cmd->payload_in;
578 	u32 offset = le32_to_cpu(gl->offset);
579 	u32 length = le32_to_cpu(gl->length);
580 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
581 	void *data = &mock_cel;
582 
583 	if (cmd->size_in < sizeof(*gl))
584 		return -EINVAL;
585 	if (length > cxl_mbox->payload_size)
586 		return -EINVAL;
587 	if (offset + length > sizeof(mock_cel))
588 		return -EINVAL;
589 	if (!uuid_equal(&gl->uuid, &uuid))
590 		return -EINVAL;
591 	if (length > cmd->size_out)
592 		return -EINVAL;
593 
594 	memcpy(cmd->payload_out, data + offset, length);
595 
596 	return 0;
597 }
598 
mock_rcd_id(struct cxl_mbox_cmd * cmd)599 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
600 {
601 	struct cxl_mbox_identify id = {
602 		.fw_revision = { "mock fw v1 " },
603 		.total_capacity =
604 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
605 		.volatile_capacity =
606 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
607 	};
608 
609 	if (cmd->size_out < sizeof(id))
610 		return -EINVAL;
611 
612 	memcpy(cmd->payload_out, &id, sizeof(id));
613 
614 	return 0;
615 }
616 
mock_id(struct cxl_mbox_cmd * cmd)617 static int mock_id(struct cxl_mbox_cmd *cmd)
618 {
619 	struct cxl_mbox_identify id = {
620 		.fw_revision = { "mock fw v1 " },
621 		.lsa_size = cpu_to_le32(LSA_SIZE),
622 		.partition_align =
623 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
624 		.total_capacity =
625 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
626 		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
627 	};
628 
629 	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
630 
631 	if (cmd->size_out < sizeof(id))
632 		return -EINVAL;
633 
634 	memcpy(cmd->payload_out, &id, sizeof(id));
635 
636 	return 0;
637 }
638 
mock_partition_info(struct cxl_mbox_cmd * cmd)639 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
640 {
641 	struct cxl_mbox_get_partition_info pi = {
642 		.active_volatile_cap =
643 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
644 		.active_persistent_cap =
645 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
646 	};
647 
648 	if (cmd->size_out < sizeof(pi))
649 		return -EINVAL;
650 
651 	memcpy(cmd->payload_out, &pi, sizeof(pi));
652 
653 	return 0;
654 }
655 
cxl_mockmem_sanitize_work(struct work_struct * work)656 void cxl_mockmem_sanitize_work(struct work_struct *work)
657 {
658 	struct cxl_memdev_state *mds =
659 		container_of(work, typeof(*mds), security.poll_dwork.work);
660 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
661 
662 	mutex_lock(&cxl_mbox->mbox_mutex);
663 	if (mds->security.sanitize_node)
664 		sysfs_notify_dirent(mds->security.sanitize_node);
665 	mds->security.sanitize_active = false;
666 	mutex_unlock(&cxl_mbox->mbox_mutex);
667 
668 	dev_dbg(mds->cxlds.dev, "sanitize complete\n");
669 }
670 
mock_sanitize(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)671 static int mock_sanitize(struct cxl_mockmem_data *mdata,
672 			 struct cxl_mbox_cmd *cmd)
673 {
674 	struct cxl_memdev_state *mds = mdata->mds;
675 	struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
676 	int rc = 0;
677 
678 	if (cmd->size_in != 0)
679 		return -EINVAL;
680 
681 	if (cmd->size_out != 0)
682 		return -EINVAL;
683 
684 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
685 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
686 		return -ENXIO;
687 	}
688 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
689 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
690 		return -ENXIO;
691 	}
692 
693 	mutex_lock(&cxl_mbox->mbox_mutex);
694 	if (schedule_delayed_work(&mds->security.poll_dwork,
695 				  msecs_to_jiffies(mdata->sanitize_timeout))) {
696 		mds->security.sanitize_active = true;
697 		dev_dbg(mds->cxlds.dev, "sanitize issued\n");
698 	} else
699 		rc = -EBUSY;
700 	mutex_unlock(&cxl_mbox->mbox_mutex);
701 
702 	return rc;
703 }
704 
mock_secure_erase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)705 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
706 			     struct cxl_mbox_cmd *cmd)
707 {
708 	if (cmd->size_in != 0)
709 		return -EINVAL;
710 
711 	if (cmd->size_out != 0)
712 		return -EINVAL;
713 
714 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
715 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
716 		return -ENXIO;
717 	}
718 
719 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
720 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
721 		return -ENXIO;
722 	}
723 
724 	return 0;
725 }
726 
mock_get_security_state(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)727 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
728 				   struct cxl_mbox_cmd *cmd)
729 {
730 	if (cmd->size_in)
731 		return -EINVAL;
732 
733 	if (cmd->size_out != sizeof(u32))
734 		return -EINVAL;
735 
736 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
737 
738 	return 0;
739 }
740 
master_plimit_check(struct cxl_mockmem_data * mdata)741 static void master_plimit_check(struct cxl_mockmem_data *mdata)
742 {
743 	if (mdata->master_limit == PASS_TRY_LIMIT)
744 		return;
745 	mdata->master_limit++;
746 	if (mdata->master_limit == PASS_TRY_LIMIT)
747 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
748 }
749 
user_plimit_check(struct cxl_mockmem_data * mdata)750 static void user_plimit_check(struct cxl_mockmem_data *mdata)
751 {
752 	if (mdata->user_limit == PASS_TRY_LIMIT)
753 		return;
754 	mdata->user_limit++;
755 	if (mdata->user_limit == PASS_TRY_LIMIT)
756 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
757 }
758 
mock_set_passphrase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)759 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
760 			       struct cxl_mbox_cmd *cmd)
761 {
762 	struct cxl_set_pass *set_pass;
763 
764 	if (cmd->size_in != sizeof(*set_pass))
765 		return -EINVAL;
766 
767 	if (cmd->size_out != 0)
768 		return -EINVAL;
769 
770 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
771 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
772 		return -ENXIO;
773 	}
774 
775 	set_pass = cmd->payload_in;
776 	switch (set_pass->type) {
777 	case CXL_PMEM_SEC_PASS_MASTER:
778 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
779 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
780 			return -ENXIO;
781 		}
782 		/*
783 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
784 		 * the security disabled state when the user passphrase is not set.
785 		 */
786 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
787 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
788 			return -ENXIO;
789 		}
790 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
791 			master_plimit_check(mdata);
792 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
793 			return -ENXIO;
794 		}
795 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
796 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
797 		return 0;
798 
799 	case CXL_PMEM_SEC_PASS_USER:
800 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
801 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
802 			return -ENXIO;
803 		}
804 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
805 			user_plimit_check(mdata);
806 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
807 			return -ENXIO;
808 		}
809 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
810 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
811 		return 0;
812 
813 	default:
814 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
815 	}
816 	return -EINVAL;
817 }
818 
mock_disable_passphrase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)819 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
820 				   struct cxl_mbox_cmd *cmd)
821 {
822 	struct cxl_disable_pass *dis_pass;
823 
824 	if (cmd->size_in != sizeof(*dis_pass))
825 		return -EINVAL;
826 
827 	if (cmd->size_out != 0)
828 		return -EINVAL;
829 
830 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
831 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
832 		return -ENXIO;
833 	}
834 
835 	dis_pass = cmd->payload_in;
836 	switch (dis_pass->type) {
837 	case CXL_PMEM_SEC_PASS_MASTER:
838 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
839 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
840 			return -ENXIO;
841 		}
842 
843 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
844 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
845 			return -ENXIO;
846 		}
847 
848 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
849 			master_plimit_check(mdata);
850 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
851 			return -ENXIO;
852 		}
853 
854 		mdata->master_limit = 0;
855 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
856 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
857 		return 0;
858 
859 	case CXL_PMEM_SEC_PASS_USER:
860 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
861 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
862 			return -ENXIO;
863 		}
864 
865 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
866 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
867 			return -ENXIO;
868 		}
869 
870 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
871 			user_plimit_check(mdata);
872 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
873 			return -ENXIO;
874 		}
875 
876 		mdata->user_limit = 0;
877 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
878 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
879 					   CXL_PMEM_SEC_STATE_LOCKED);
880 		return 0;
881 
882 	default:
883 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
884 		return -EINVAL;
885 	}
886 
887 	return 0;
888 }
889 
mock_freeze_security(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)890 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
891 				struct cxl_mbox_cmd *cmd)
892 {
893 	if (cmd->size_in != 0)
894 		return -EINVAL;
895 
896 	if (cmd->size_out != 0)
897 		return -EINVAL;
898 
899 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
900 		return 0;
901 
902 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
903 	return 0;
904 }
905 
mock_unlock_security(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)906 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
907 				struct cxl_mbox_cmd *cmd)
908 {
909 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
910 		return -EINVAL;
911 
912 	if (cmd->size_out != 0)
913 		return -EINVAL;
914 
915 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
916 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
917 		return -ENXIO;
918 	}
919 
920 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
921 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
922 		return -ENXIO;
923 	}
924 
925 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
926 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
927 		return -ENXIO;
928 	}
929 
930 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
931 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
932 		return -ENXIO;
933 	}
934 
935 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
936 		if (++mdata->user_limit == PASS_TRY_LIMIT)
937 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
938 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
939 		return -ENXIO;
940 	}
941 
942 	mdata->user_limit = 0;
943 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
944 	return 0;
945 }
946 
mock_passphrase_secure_erase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)947 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
948 					struct cxl_mbox_cmd *cmd)
949 {
950 	struct cxl_pass_erase *erase;
951 
952 	if (cmd->size_in != sizeof(*erase))
953 		return -EINVAL;
954 
955 	if (cmd->size_out != 0)
956 		return -EINVAL;
957 
958 	erase = cmd->payload_in;
959 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
960 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
961 		return -ENXIO;
962 	}
963 
964 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
965 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
966 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
967 		return -ENXIO;
968 	}
969 
970 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
971 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
972 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
973 		return -ENXIO;
974 	}
975 
976 	switch (erase->type) {
977 	case CXL_PMEM_SEC_PASS_MASTER:
978 		/*
979 		 * The spec does not clearly define the behavior of the scenario
980 		 * where a master passphrase is passed in while the master
981 		 * passphrase is not set and user passphrase is not set. The
982 		 * code will take the assumption that it will behave the same
983 		 * as a CXL secure erase command without passphrase (0x4401).
984 		 */
985 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
986 			if (memcmp(mdata->master_pass, erase->pass,
987 				   NVDIMM_PASSPHRASE_LEN)) {
988 				master_plimit_check(mdata);
989 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
990 				return -ENXIO;
991 			}
992 			mdata->master_limit = 0;
993 			mdata->user_limit = 0;
994 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
995 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
996 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
997 		} else {
998 			/*
999 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
1000 			 * When master passphrase is disabled, the device shall
1001 			 * return Invalid Input for the Passphrase Secure Erase
1002 			 * command with master passphrase.
1003 			 */
1004 			return -EINVAL;
1005 		}
1006 		/* Scramble encryption keys so that data is effectively erased */
1007 		break;
1008 	case CXL_PMEM_SEC_PASS_USER:
1009 		/*
1010 		 * The spec does not clearly define the behavior of the scenario
1011 		 * where a user passphrase is passed in while the user
1012 		 * passphrase is not set. The code will take the assumption that
1013 		 * it will behave the same as a CXL secure erase command without
1014 		 * passphrase (0x4401).
1015 		 */
1016 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
1017 			if (memcmp(mdata->user_pass, erase->pass,
1018 				   NVDIMM_PASSPHRASE_LEN)) {
1019 				user_plimit_check(mdata);
1020 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
1021 				return -ENXIO;
1022 			}
1023 			mdata->user_limit = 0;
1024 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
1025 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
1026 		}
1027 
1028 		/*
1029 		 * CXL rev3 Table 8-118
1030 		 * If user passphrase is not set or supported by device, current
1031 		 * passphrase value is ignored. Will make the assumption that
1032 		 * the operation will proceed as secure erase w/o passphrase
1033 		 * since spec is not explicit.
1034 		 */
1035 
1036 		/* Scramble encryption keys so that data is effectively erased */
1037 		break;
1038 	default:
1039 		return -EINVAL;
1040 	}
1041 
1042 	return 0;
1043 }
1044 
mock_get_lsa(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1045 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
1046 			struct cxl_mbox_cmd *cmd)
1047 {
1048 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
1049 	void *lsa = mdata->lsa;
1050 	u32 offset, length;
1051 
1052 	if (sizeof(*get_lsa) > cmd->size_in)
1053 		return -EINVAL;
1054 	offset = le32_to_cpu(get_lsa->offset);
1055 	length = le32_to_cpu(get_lsa->length);
1056 	if (offset + length > LSA_SIZE)
1057 		return -EINVAL;
1058 	if (length > cmd->size_out)
1059 		return -EINVAL;
1060 
1061 	memcpy(cmd->payload_out, lsa + offset, length);
1062 	return 0;
1063 }
1064 
mock_set_lsa(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1065 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
1066 			struct cxl_mbox_cmd *cmd)
1067 {
1068 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
1069 	void *lsa = mdata->lsa;
1070 	u32 offset, length;
1071 
1072 	if (sizeof(*set_lsa) > cmd->size_in)
1073 		return -EINVAL;
1074 	offset = le32_to_cpu(set_lsa->offset);
1075 	length = cmd->size_in - sizeof(*set_lsa);
1076 	if (offset + length > LSA_SIZE)
1077 		return -EINVAL;
1078 
1079 	memcpy(lsa + offset, &set_lsa->data[0], length);
1080 	return 0;
1081 }
1082 
mock_health_info(struct cxl_mbox_cmd * cmd)1083 static int mock_health_info(struct cxl_mbox_cmd *cmd)
1084 {
1085 	struct cxl_mbox_health_info health_info = {
1086 		/* set flags for maint needed, perf degraded, hw replacement */
1087 		.health_status = 0x7,
1088 		/* set media status to "All Data Lost" */
1089 		.media_status = 0x3,
1090 		/*
1091 		 * set ext_status flags for:
1092 		 *  ext_life_used: normal,
1093 		 *  ext_temperature: critical,
1094 		 *  ext_corrected_volatile: warning,
1095 		 *  ext_corrected_persistent: normal,
1096 		 */
1097 		.ext_status = 0x18,
1098 		.life_used = 15,
1099 		.temperature = cpu_to_le16(25),
1100 		.dirty_shutdowns = cpu_to_le32(10),
1101 		.volatile_errors = cpu_to_le32(20),
1102 		.pmem_errors = cpu_to_le32(30),
1103 	};
1104 
1105 	if (cmd->size_out < sizeof(health_info))
1106 		return -EINVAL;
1107 
1108 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1109 	return 0;
1110 }
1111 
mock_set_shutdown_state(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1112 static int mock_set_shutdown_state(struct cxl_mockmem_data *mdata,
1113 				   struct cxl_mbox_cmd *cmd)
1114 {
1115 	struct cxl_mbox_set_shutdown_state_in *ss = cmd->payload_in;
1116 
1117 	if (cmd->size_in != sizeof(*ss))
1118 		return -EINVAL;
1119 
1120 	if (cmd->size_out != 0)
1121 		return -EINVAL;
1122 
1123 	mdata->shutdown_state = ss->state;
1124 	return 0;
1125 }
1126 
1127 static struct mock_poison {
1128 	struct cxl_dev_state *cxlds;
1129 	u64 dpa;
1130 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1131 
1132 static struct cxl_mbox_poison_out *
cxl_get_injected_po(struct cxl_dev_state * cxlds,u64 offset,u64 length)1133 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1134 {
1135 	struct cxl_mbox_poison_out *po;
1136 	int nr_records = 0;
1137 	u64 dpa;
1138 
1139 	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1140 	if (!po)
1141 		return NULL;
1142 
1143 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1144 		if (mock_poison_list[i].cxlds != cxlds)
1145 			continue;
1146 		if (mock_poison_list[i].dpa < offset ||
1147 		    mock_poison_list[i].dpa > offset + length - 1)
1148 			continue;
1149 
1150 		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1151 		po->record[nr_records].address = cpu_to_le64(dpa);
1152 		po->record[nr_records].length = cpu_to_le32(1);
1153 		nr_records++;
1154 		if (nr_records == poison_inject_dev_max)
1155 			break;
1156 	}
1157 
1158 	/* Always return count, even when zero */
1159 	po->count = cpu_to_le16(nr_records);
1160 
1161 	return po;
1162 }
1163 
mock_get_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1164 static int mock_get_poison(struct cxl_dev_state *cxlds,
1165 			   struct cxl_mbox_cmd *cmd)
1166 {
1167 	struct cxl_mbox_poison_in *pi = cmd->payload_in;
1168 	struct cxl_mbox_poison_out *po;
1169 	u64 offset = le64_to_cpu(pi->offset);
1170 	u64 length = le64_to_cpu(pi->length);
1171 	int nr_records;
1172 
1173 	po = cxl_get_injected_po(cxlds, offset, length);
1174 	if (!po)
1175 		return -ENOMEM;
1176 	nr_records = le16_to_cpu(po->count);
1177 	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1178 	cmd->size_out = struct_size(po, record, nr_records);
1179 	kfree(po);
1180 
1181 	return 0;
1182 }
1183 
mock_poison_dev_max_injected(struct cxl_dev_state * cxlds)1184 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1185 {
1186 	int count = 0;
1187 
1188 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1189 		if (mock_poison_list[i].cxlds == cxlds)
1190 			count++;
1191 	}
1192 	return (count >= poison_inject_dev_max);
1193 }
1194 
mock_poison_add(struct cxl_dev_state * cxlds,u64 dpa)1195 static int mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1196 {
1197 	/* Return EBUSY to match the CXL driver handling */
1198 	if (mock_poison_dev_max_injected(cxlds)) {
1199 		dev_dbg(cxlds->dev,
1200 			"Device poison injection limit has been reached: %d\n",
1201 			poison_inject_dev_max);
1202 		return -EBUSY;
1203 	}
1204 
1205 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1206 		if (!mock_poison_list[i].cxlds) {
1207 			mock_poison_list[i].cxlds = cxlds;
1208 			mock_poison_list[i].dpa = dpa;
1209 			return 0;
1210 		}
1211 	}
1212 	dev_dbg(cxlds->dev,
1213 		"Mock test poison injection limit has been reached: %d\n",
1214 		MOCK_INJECT_TEST_MAX);
1215 
1216 	return -ENXIO;
1217 }
1218 
mock_poison_found(struct cxl_dev_state * cxlds,u64 dpa)1219 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1220 {
1221 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1222 		if (mock_poison_list[i].cxlds == cxlds &&
1223 		    mock_poison_list[i].dpa == dpa)
1224 			return true;
1225 	}
1226 	return false;
1227 }
1228 
mock_inject_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1229 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1230 			      struct cxl_mbox_cmd *cmd)
1231 {
1232 	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1233 	u64 dpa = le64_to_cpu(pi->address);
1234 
1235 	if (mock_poison_found(cxlds, dpa)) {
1236 		/* Not an error to inject poison if already poisoned */
1237 		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1238 		return 0;
1239 	}
1240 
1241 	return mock_poison_add(cxlds, dpa);
1242 }
1243 
mock_poison_del(struct cxl_dev_state * cxlds,u64 dpa)1244 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1245 {
1246 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1247 		if (mock_poison_list[i].cxlds == cxlds &&
1248 		    mock_poison_list[i].dpa == dpa) {
1249 			mock_poison_list[i].cxlds = NULL;
1250 			return true;
1251 		}
1252 	}
1253 	return false;
1254 }
1255 
mock_clear_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1256 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1257 			     struct cxl_mbox_cmd *cmd)
1258 {
1259 	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1260 	u64 dpa = le64_to_cpu(pi->address);
1261 
1262 	/*
1263 	 * A real CXL device will write pi->write_data to the address
1264 	 * being cleared. In this mock, just delete this address from
1265 	 * the mock poison list.
1266 	 */
1267 	if (!mock_poison_del(cxlds, dpa))
1268 		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1269 
1270 	return 0;
1271 }
1272 
mock_poison_list_empty(void)1273 static bool mock_poison_list_empty(void)
1274 {
1275 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1276 		if (mock_poison_list[i].cxlds)
1277 			return false;
1278 	}
1279 	return true;
1280 }
1281 
poison_inject_max_show(struct device_driver * drv,char * buf)1282 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1283 {
1284 	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1285 }
1286 
poison_inject_max_store(struct device_driver * drv,const char * buf,size_t len)1287 static ssize_t poison_inject_max_store(struct device_driver *drv,
1288 				       const char *buf, size_t len)
1289 {
1290 	int val;
1291 
1292 	if (kstrtoint(buf, 0, &val) < 0)
1293 		return -EINVAL;
1294 
1295 	if (!mock_poison_list_empty())
1296 		return -EBUSY;
1297 
1298 	if (val <= MOCK_INJECT_TEST_MAX)
1299 		poison_inject_dev_max = val;
1300 	else
1301 		return -EINVAL;
1302 
1303 	return len;
1304 }
1305 
1306 static DRIVER_ATTR_RW(poison_inject_max);
1307 
1308 static struct attribute *cxl_mock_mem_core_attrs[] = {
1309 	&driver_attr_poison_inject_max.attr,
1310 	NULL
1311 };
1312 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1313 
mock_fw_info(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1314 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1315 			struct cxl_mbox_cmd *cmd)
1316 {
1317 	struct cxl_mbox_get_fw_info fw_info = {
1318 		.num_slots = FW_SLOTS,
1319 		.slot_info = (mdata->fw_slot & 0x7) |
1320 			     ((mdata->fw_staged & 0x7) << 3),
1321 		.activation_cap = 0,
1322 	};
1323 
1324 	strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1325 	strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1326 	strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1327 	strcpy(fw_info.slot_4_revision, "");
1328 
1329 	if (cmd->size_out < sizeof(fw_info))
1330 		return -EINVAL;
1331 
1332 	memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1333 	return 0;
1334 }
1335 
mock_transfer_fw(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1336 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1337 			    struct cxl_mbox_cmd *cmd)
1338 {
1339 	struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1340 	void *fw = mdata->fw;
1341 	size_t offset, length;
1342 
1343 	offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1344 	length = cmd->size_in - sizeof(*transfer);
1345 	if (offset + length > FW_SIZE)
1346 		return -EINVAL;
1347 
1348 	switch (transfer->action) {
1349 	case CXL_FW_TRANSFER_ACTION_FULL:
1350 		if (offset != 0)
1351 			return -EINVAL;
1352 		fallthrough;
1353 	case CXL_FW_TRANSFER_ACTION_END:
1354 		if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1355 			return -EINVAL;
1356 		mdata->fw_size = offset + length;
1357 		break;
1358 	case CXL_FW_TRANSFER_ACTION_INITIATE:
1359 	case CXL_FW_TRANSFER_ACTION_CONTINUE:
1360 		break;
1361 	case CXL_FW_TRANSFER_ACTION_ABORT:
1362 		return 0;
1363 	default:
1364 		return -EINVAL;
1365 	}
1366 
1367 	memcpy(fw + offset, transfer->data, length);
1368 	usleep_range(1500, 2000);
1369 	return 0;
1370 }
1371 
mock_activate_fw(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1372 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1373 			    struct cxl_mbox_cmd *cmd)
1374 {
1375 	struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1376 
1377 	if (activate->slot == 0 || activate->slot > FW_SLOTS)
1378 		return -EINVAL;
1379 
1380 	switch (activate->action) {
1381 	case CXL_FW_ACTIVATE_ONLINE:
1382 		mdata->fw_slot = activate->slot;
1383 		mdata->fw_staged = 0;
1384 		return 0;
1385 	case CXL_FW_ACTIVATE_OFFLINE:
1386 		mdata->fw_staged = activate->slot;
1387 		return 0;
1388 	}
1389 
1390 	return -EINVAL;
1391 }
1392 
1393 #define CXL_VENDOR_FEATURE_TEST							\
1394 	UUID_INIT(0xffffffff, 0xffff, 0xffff, 0xff, 0xff, 0xff, 0xff, 0xff,	\
1395 		  0xff, 0xff, 0xff)
1396 
fill_feature_vendor_test(struct cxl_feat_entry * feat)1397 static void fill_feature_vendor_test(struct cxl_feat_entry *feat)
1398 {
1399 	feat->uuid = CXL_VENDOR_FEATURE_TEST;
1400 	feat->id = 0;
1401 	feat->get_feat_size = cpu_to_le16(0x4);
1402 	feat->set_feat_size = cpu_to_le16(0x4);
1403 	feat->flags = cpu_to_le32(CXL_FEATURE_F_CHANGEABLE |
1404 				  CXL_FEATURE_F_DEFAULT_SEL |
1405 				  CXL_FEATURE_F_SAVED_SEL);
1406 	feat->get_feat_ver = 1;
1407 	feat->set_feat_ver = 1;
1408 	feat->effects = cpu_to_le16(CXL_CMD_CONFIG_CHANGE_COLD_RESET |
1409 				    CXL_CMD_EFFECTS_VALID);
1410 }
1411 
1412 #define MAX_CXL_TEST_FEATS	1
1413 
mock_get_test_feature(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1414 static int mock_get_test_feature(struct cxl_mockmem_data *mdata,
1415 				 struct cxl_mbox_cmd *cmd)
1416 {
1417 	struct vendor_test_feat *output = cmd->payload_out;
1418 	struct cxl_mbox_get_feat_in *input = cmd->payload_in;
1419 	u16 offset = le16_to_cpu(input->offset);
1420 	u16 count = le16_to_cpu(input->count);
1421 	u8 *ptr;
1422 
1423 	if (offset > sizeof(*output)) {
1424 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1425 		return -EINVAL;
1426 	}
1427 
1428 	if (offset + count > sizeof(*output)) {
1429 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1430 		return -EINVAL;
1431 	}
1432 
1433 	ptr = (u8 *)&mdata->test_feat + offset;
1434 	memcpy((u8 *)output + offset, ptr, count);
1435 
1436 	return 0;
1437 }
1438 
mock_get_feature(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1439 static int mock_get_feature(struct cxl_mockmem_data *mdata,
1440 			    struct cxl_mbox_cmd *cmd)
1441 {
1442 	struct cxl_mbox_get_feat_in *input = cmd->payload_in;
1443 
1444 	if (uuid_equal(&input->uuid, &CXL_VENDOR_FEATURE_TEST))
1445 		return mock_get_test_feature(mdata, cmd);
1446 
1447 	cmd->return_code = CXL_MBOX_CMD_RC_UNSUPPORTED;
1448 
1449 	return -EOPNOTSUPP;
1450 }
1451 
mock_set_test_feature(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1452 static int mock_set_test_feature(struct cxl_mockmem_data *mdata,
1453 				 struct cxl_mbox_cmd *cmd)
1454 {
1455 	struct cxl_mbox_set_feat_in *input = cmd->payload_in;
1456 	struct vendor_test_feat *test =
1457 		(struct vendor_test_feat *)input->feat_data;
1458 	u32 action;
1459 
1460 	action = FIELD_GET(CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK,
1461 			   le32_to_cpu(input->hdr.flags));
1462 	/*
1463 	 * While it is spec compliant to support other set actions, it is not
1464 	 * necessary to add the complication in the emulation currently. Reject
1465 	 * anything besides full xfer.
1466 	 */
1467 	if (action != CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER) {
1468 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1469 		return -EINVAL;
1470 	}
1471 
1472 	/* Offset should be reserved when doing full transfer */
1473 	if (input->hdr.offset) {
1474 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1475 		return -EINVAL;
1476 	}
1477 
1478 	memcpy(&mdata->test_feat.data, &test->data, sizeof(u32));
1479 
1480 	return 0;
1481 }
1482 
mock_set_feature(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1483 static int mock_set_feature(struct cxl_mockmem_data *mdata,
1484 			    struct cxl_mbox_cmd *cmd)
1485 {
1486 	struct cxl_mbox_set_feat_in *input = cmd->payload_in;
1487 
1488 	if (uuid_equal(&input->hdr.uuid, &CXL_VENDOR_FEATURE_TEST))
1489 		return mock_set_test_feature(mdata, cmd);
1490 
1491 	cmd->return_code = CXL_MBOX_CMD_RC_UNSUPPORTED;
1492 
1493 	return -EOPNOTSUPP;
1494 }
1495 
mock_get_supported_features(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1496 static int mock_get_supported_features(struct cxl_mockmem_data *mdata,
1497 				       struct cxl_mbox_cmd *cmd)
1498 {
1499 	struct cxl_mbox_get_sup_feats_in *in = cmd->payload_in;
1500 	struct cxl_mbox_get_sup_feats_out *out = cmd->payload_out;
1501 	struct cxl_feat_entry *feat;
1502 	u16 start_idx, count;
1503 
1504 	if (cmd->size_out < sizeof(*out)) {
1505 		cmd->return_code = CXL_MBOX_CMD_RC_PAYLOADLEN;
1506 		return -EINVAL;
1507 	}
1508 
1509 	/*
1510 	 * Current emulation only supports 1 feature
1511 	 */
1512 	start_idx = le16_to_cpu(in->start_idx);
1513 	if (start_idx != 0) {
1514 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1515 		return -EINVAL;
1516 	}
1517 
1518 	count = le16_to_cpu(in->count);
1519 	if (count < struct_size(out, ents, 0)) {
1520 		cmd->return_code = CXL_MBOX_CMD_RC_PAYLOADLEN;
1521 		return -EINVAL;
1522 	}
1523 
1524 	out->supported_feats = cpu_to_le16(MAX_CXL_TEST_FEATS);
1525 	cmd->return_code = 0;
1526 	if (count < struct_size(out, ents, MAX_CXL_TEST_FEATS)) {
1527 		out->num_entries = 0;
1528 		return 0;
1529 	}
1530 
1531 	out->num_entries = cpu_to_le16(MAX_CXL_TEST_FEATS);
1532 	feat = out->ents;
1533 	fill_feature_vendor_test(feat);
1534 
1535 	return 0;
1536 }
1537 
cxl_mock_mbox_send(struct cxl_mailbox * cxl_mbox,struct cxl_mbox_cmd * cmd)1538 static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
1539 			      struct cxl_mbox_cmd *cmd)
1540 {
1541 	struct device *dev = cxl_mbox->host;
1542 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1543 	struct cxl_memdev_state *mds = mdata->mds;
1544 	struct cxl_dev_state *cxlds = &mds->cxlds;
1545 	int rc = -EIO;
1546 
1547 	switch (cmd->opcode) {
1548 	case CXL_MBOX_OP_SET_TIMESTAMP:
1549 		rc = mock_set_timestamp(cxlds, cmd);
1550 		break;
1551 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1552 		rc = mock_gsl(cmd);
1553 		break;
1554 	case CXL_MBOX_OP_GET_LOG:
1555 		rc = mock_get_log(mds, cmd);
1556 		break;
1557 	case CXL_MBOX_OP_IDENTIFY:
1558 		if (cxlds->rcd)
1559 			rc = mock_rcd_id(cmd);
1560 		else
1561 			rc = mock_id(cmd);
1562 		break;
1563 	case CXL_MBOX_OP_GET_LSA:
1564 		rc = mock_get_lsa(mdata, cmd);
1565 		break;
1566 	case CXL_MBOX_OP_GET_PARTITION_INFO:
1567 		rc = mock_partition_info(cmd);
1568 		break;
1569 	case CXL_MBOX_OP_GET_EVENT_RECORD:
1570 		rc = mock_get_event(dev, cmd);
1571 		break;
1572 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1573 		rc = mock_clear_event(dev, cmd);
1574 		break;
1575 	case CXL_MBOX_OP_SET_LSA:
1576 		rc = mock_set_lsa(mdata, cmd);
1577 		break;
1578 	case CXL_MBOX_OP_GET_HEALTH_INFO:
1579 		rc = mock_health_info(cmd);
1580 		break;
1581 	case CXL_MBOX_OP_SANITIZE:
1582 		rc = mock_sanitize(mdata, cmd);
1583 		break;
1584 	case CXL_MBOX_OP_SECURE_ERASE:
1585 		rc = mock_secure_erase(mdata, cmd);
1586 		break;
1587 	case CXL_MBOX_OP_GET_SECURITY_STATE:
1588 		rc = mock_get_security_state(mdata, cmd);
1589 		break;
1590 	case CXL_MBOX_OP_SET_PASSPHRASE:
1591 		rc = mock_set_passphrase(mdata, cmd);
1592 		break;
1593 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1594 		rc = mock_disable_passphrase(mdata, cmd);
1595 		break;
1596 	case CXL_MBOX_OP_FREEZE_SECURITY:
1597 		rc = mock_freeze_security(mdata, cmd);
1598 		break;
1599 	case CXL_MBOX_OP_UNLOCK:
1600 		rc = mock_unlock_security(mdata, cmd);
1601 		break;
1602 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1603 		rc = mock_passphrase_secure_erase(mdata, cmd);
1604 		break;
1605 	case CXL_MBOX_OP_SET_SHUTDOWN_STATE:
1606 		rc = mock_set_shutdown_state(mdata, cmd);
1607 		break;
1608 	case CXL_MBOX_OP_GET_POISON:
1609 		rc = mock_get_poison(cxlds, cmd);
1610 		break;
1611 	case CXL_MBOX_OP_INJECT_POISON:
1612 		rc = mock_inject_poison(cxlds, cmd);
1613 		break;
1614 	case CXL_MBOX_OP_CLEAR_POISON:
1615 		rc = mock_clear_poison(cxlds, cmd);
1616 		break;
1617 	case CXL_MBOX_OP_GET_FW_INFO:
1618 		rc = mock_fw_info(mdata, cmd);
1619 		break;
1620 	case CXL_MBOX_OP_TRANSFER_FW:
1621 		rc = mock_transfer_fw(mdata, cmd);
1622 		break;
1623 	case CXL_MBOX_OP_ACTIVATE_FW:
1624 		rc = mock_activate_fw(mdata, cmd);
1625 		break;
1626 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
1627 		rc = mock_get_supported_features(mdata, cmd);
1628 		break;
1629 	case CXL_MBOX_OP_GET_FEATURE:
1630 		rc = mock_get_feature(mdata, cmd);
1631 		break;
1632 	case CXL_MBOX_OP_SET_FEATURE:
1633 		rc = mock_set_feature(mdata, cmd);
1634 		break;
1635 	default:
1636 		break;
1637 	}
1638 
1639 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1640 		cmd->size_in, cmd->size_out, rc);
1641 
1642 	return rc;
1643 }
1644 
label_area_release(void * lsa)1645 static void label_area_release(void *lsa)
1646 {
1647 	vfree(lsa);
1648 }
1649 
fw_buf_release(void * buf)1650 static void fw_buf_release(void *buf)
1651 {
1652 	vfree(buf);
1653 }
1654 
is_rcd(struct platform_device * pdev)1655 static bool is_rcd(struct platform_device *pdev)
1656 {
1657 	const struct platform_device_id *id = platform_get_device_id(pdev);
1658 
1659 	return !!id->driver_data;
1660 }
1661 
event_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1662 static ssize_t event_trigger_store(struct device *dev,
1663 				   struct device_attribute *attr,
1664 				   const char *buf, size_t count)
1665 {
1666 	cxl_mock_event_trigger(dev);
1667 	return count;
1668 }
1669 static DEVICE_ATTR_WO(event_trigger);
1670 
cxl_mock_mailbox_create(struct cxl_dev_state * cxlds)1671 static int cxl_mock_mailbox_create(struct cxl_dev_state *cxlds)
1672 {
1673 	int rc;
1674 
1675 	rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
1676 	if (rc)
1677 		return rc;
1678 
1679 	return 0;
1680 }
1681 
cxl_mock_test_feat_init(struct cxl_mockmem_data * mdata)1682 static void cxl_mock_test_feat_init(struct cxl_mockmem_data *mdata)
1683 {
1684 	mdata->test_feat.data = cpu_to_le32(0xdeadbeef);
1685 }
1686 
cxl_mock_mem_probe(struct platform_device * pdev)1687 static int cxl_mock_mem_probe(struct platform_device *pdev)
1688 {
1689 	struct device *dev = &pdev->dev;
1690 	struct cxl_memdev *cxlmd;
1691 	struct cxl_memdev_state *mds;
1692 	struct cxl_dev_state *cxlds;
1693 	struct cxl_mockmem_data *mdata;
1694 	struct cxl_mailbox *cxl_mbox;
1695 	struct cxl_dpa_info range_info = { 0 };
1696 	int rc;
1697 
1698 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1699 	if (!mdata)
1700 		return -ENOMEM;
1701 	dev_set_drvdata(dev, mdata);
1702 
1703 	mdata->lsa = vmalloc(LSA_SIZE);
1704 	if (!mdata->lsa)
1705 		return -ENOMEM;
1706 	mdata->fw = vmalloc(FW_SIZE);
1707 	if (!mdata->fw)
1708 		return -ENOMEM;
1709 	mdata->fw_slot = 2;
1710 
1711 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1712 	if (rc)
1713 		return rc;
1714 
1715 	rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1716 	if (rc)
1717 		return rc;
1718 
1719 	mds = cxl_memdev_state_create(dev);
1720 	if (IS_ERR(mds))
1721 		return PTR_ERR(mds);
1722 
1723 	cxlds = &mds->cxlds;
1724 	rc = cxl_mock_mailbox_create(cxlds);
1725 	if (rc)
1726 		return rc;
1727 
1728 	cxl_mbox = &mds->cxlds.cxl_mbox;
1729 	mdata->mds = mds;
1730 	cxl_mbox->mbox_send = cxl_mock_mbox_send;
1731 	cxl_mbox->payload_size = SZ_4K;
1732 	mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1733 	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
1734 
1735 	cxlds->serial = pdev->id + 1;
1736 	if (is_rcd(pdev))
1737 		cxlds->rcd = true;
1738 
1739 	rc = cxl_enumerate_cmds(mds);
1740 	if (rc)
1741 		return rc;
1742 
1743 	rc = cxl_poison_state_init(mds);
1744 	if (rc)
1745 		return rc;
1746 
1747 	rc = cxl_set_timestamp(mds);
1748 	if (rc)
1749 		return rc;
1750 
1751 	cxlds->media_ready = true;
1752 	rc = cxl_dev_state_identify(mds);
1753 	if (rc)
1754 		return rc;
1755 
1756 	rc = cxl_mem_dpa_fetch(mds, &range_info);
1757 	if (rc)
1758 		return rc;
1759 
1760 	rc = cxl_dpa_setup(cxlds, &range_info);
1761 	if (rc)
1762 		return rc;
1763 
1764 	rc = devm_cxl_setup_features(cxlds);
1765 	if (rc)
1766 		dev_dbg(dev, "No CXL Features discovered\n");
1767 
1768 	cxl_mock_add_event_logs(&mdata->mes);
1769 
1770 	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1771 	if (IS_ERR(cxlmd))
1772 		return PTR_ERR(cxlmd);
1773 
1774 	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1775 	if (rc)
1776 		return rc;
1777 
1778 	rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
1779 	if (rc)
1780 		return rc;
1781 
1782 	rc = devm_cxl_setup_fwctl(&pdev->dev, cxlmd);
1783 	if (rc)
1784 		dev_dbg(dev, "No CXL FWCTL setup\n");
1785 
1786 	cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1787 	cxl_mock_test_feat_init(mdata);
1788 
1789 	return 0;
1790 }
1791 
security_lock_show(struct device * dev,struct device_attribute * attr,char * buf)1792 static ssize_t security_lock_show(struct device *dev,
1793 				  struct device_attribute *attr, char *buf)
1794 {
1795 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1796 
1797 	return sysfs_emit(buf, "%u\n",
1798 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1799 }
1800 
security_lock_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1801 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1802 				   const char *buf, size_t count)
1803 {
1804 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1805 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1806 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1807 	int val;
1808 
1809 	if (kstrtoint(buf, 0, &val) < 0)
1810 		return -EINVAL;
1811 
1812 	if (val == 1) {
1813 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1814 			return -ENXIO;
1815 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1816 		mdata->security_state &= ~mask;
1817 	} else {
1818 		return -EINVAL;
1819 	}
1820 	return count;
1821 }
1822 
1823 static DEVICE_ATTR_RW(security_lock);
1824 
fw_buf_checksum_show(struct device * dev,struct device_attribute * attr,char * buf)1825 static ssize_t fw_buf_checksum_show(struct device *dev,
1826 				    struct device_attribute *attr, char *buf)
1827 {
1828 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1829 	u8 hash[SHA256_DIGEST_SIZE];
1830 
1831 	sha256(mdata->fw, mdata->fw_size, hash);
1832 
1833 	return sysfs_emit(buf, "%*phN\n", SHA256_DIGEST_SIZE, hash);
1834 }
1835 
1836 static DEVICE_ATTR_RO(fw_buf_checksum);
1837 
sanitize_timeout_show(struct device * dev,struct device_attribute * attr,char * buf)1838 static ssize_t sanitize_timeout_show(struct device *dev,
1839 				  struct device_attribute *attr, char *buf)
1840 {
1841 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1842 
1843 	return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
1844 }
1845 
sanitize_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1846 static ssize_t sanitize_timeout_store(struct device *dev,
1847 				      struct device_attribute *attr,
1848 				      const char *buf, size_t count)
1849 {
1850 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1851 	unsigned long val;
1852 	int rc;
1853 
1854 	rc = kstrtoul(buf, 0, &val);
1855 	if (rc)
1856 		return rc;
1857 
1858 	mdata->sanitize_timeout = val;
1859 
1860 	return count;
1861 }
1862 
1863 static DEVICE_ATTR_RW(sanitize_timeout);
1864 
1865 static struct attribute *cxl_mock_mem_attrs[] = {
1866 	&dev_attr_security_lock.attr,
1867 	&dev_attr_event_trigger.attr,
1868 	&dev_attr_fw_buf_checksum.attr,
1869 	&dev_attr_sanitize_timeout.attr,
1870 	NULL
1871 };
1872 ATTRIBUTE_GROUPS(cxl_mock_mem);
1873 
1874 static const struct platform_device_id cxl_mock_mem_ids[] = {
1875 	{ .name = "cxl_mem", 0 },
1876 	{ .name = "cxl_rcd", 1 },
1877 	{ },
1878 };
1879 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1880 
1881 static struct platform_driver cxl_mock_mem_driver = {
1882 	.probe = cxl_mock_mem_probe,
1883 	.id_table = cxl_mock_mem_ids,
1884 	.driver = {
1885 		.name = KBUILD_MODNAME,
1886 		.dev_groups = cxl_mock_mem_groups,
1887 		.groups = cxl_mock_mem_core_groups,
1888 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1889 	},
1890 };
1891 
1892 module_platform_driver(cxl_mock_mem_driver);
1893 MODULE_LICENSE("GPL v2");
1894 MODULE_DESCRIPTION("cxl_test: mem device mock module");
1895 MODULE_IMPORT_NS("CXL");
1896