xref: /linux/tools/testing/cxl/test/mem.c (revision 376b1446153ca67e7028e6b9555d9b17477f568b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/module.h>
7 #include <linux/delay.h>
8 #include <linux/sizes.h>
9 #include <linux/bits.h>
10 #include <cxlmem.h>
11 
12 #include "trace.h"
13 
14 #define LSA_SIZE SZ_128K
15 #define DEV_SIZE SZ_2G
16 #define EFFECT(x) (1U << x)
17 
18 static struct cxl_cel_entry mock_cel[] = {
19 	{
20 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
21 		.effect = cpu_to_le16(0),
22 	},
23 	{
24 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
25 		.effect = cpu_to_le16(0),
26 	},
27 	{
28 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
29 		.effect = cpu_to_le16(0),
30 	},
31 	{
32 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
33 		.effect = cpu_to_le16(0),
34 	},
35 	{
36 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
37 		.effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
38 	},
39 	{
40 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
41 		.effect = cpu_to_le16(0),
42 	},
43 };
44 
45 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
46 struct cxl_mbox_health_info {
47 	u8 health_status;
48 	u8 media_status;
49 	u8 ext_status;
50 	u8 life_used;
51 	__le16 temperature;
52 	__le32 dirty_shutdowns;
53 	__le32 volatile_errors;
54 	__le32 pmem_errors;
55 } __packed;
56 
57 static struct {
58 	struct cxl_mbox_get_supported_logs gsl;
59 	struct cxl_gsl_entry entry;
60 } mock_gsl_payload = {
61 	.gsl = {
62 		.entries = cpu_to_le16(1),
63 	},
64 	.entry = {
65 		.uuid = DEFINE_CXL_CEL_UUID,
66 		.size = cpu_to_le32(sizeof(mock_cel)),
67 	},
68 };
69 
70 #define PASS_TRY_LIMIT 3
71 
72 #define CXL_TEST_EVENT_CNT_MAX 15
73 
74 /* Set a number of events to return at a time for simulation.  */
75 #define CXL_TEST_EVENT_CNT 3
76 
77 struct mock_event_log {
78 	u16 clear_idx;
79 	u16 cur_idx;
80 	u16 nr_events;
81 	u16 nr_overflow;
82 	u16 overflow_reset;
83 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
84 };
85 
86 struct mock_event_store {
87 	struct cxl_dev_state *cxlds;
88 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
89 	u32 ev_status;
90 };
91 
92 struct cxl_mockmem_data {
93 	void *lsa;
94 	u32 security_state;
95 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
96 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
97 	int user_limit;
98 	int master_limit;
99 	struct mock_event_store mes;
100 	u8 event_buf[SZ_4K];
101 };
102 
103 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
104 {
105 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
106 
107 	if (log_type >= CXL_EVENT_TYPE_MAX)
108 		return NULL;
109 	return &mdata->mes.mock_logs[log_type];
110 }
111 
112 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
113 {
114 	return log->events[log->cur_idx];
115 }
116 
117 static void event_reset_log(struct mock_event_log *log)
118 {
119 	log->cur_idx = 0;
120 	log->clear_idx = 0;
121 	log->nr_overflow = log->overflow_reset;
122 }
123 
124 /* Handle can never be 0 use 1 based indexing for handle */
125 static u16 event_get_clear_handle(struct mock_event_log *log)
126 {
127 	return log->clear_idx + 1;
128 }
129 
130 /* Handle can never be 0 use 1 based indexing for handle */
131 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
132 {
133 	u16 cur_handle = log->cur_idx + 1;
134 
135 	return cpu_to_le16(cur_handle);
136 }
137 
138 static bool event_log_empty(struct mock_event_log *log)
139 {
140 	return log->cur_idx == log->nr_events;
141 }
142 
143 static void mes_add_event(struct mock_event_store *mes,
144 			  enum cxl_event_log_type log_type,
145 			  struct cxl_event_record_raw *event)
146 {
147 	struct mock_event_log *log;
148 
149 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
150 		return;
151 
152 	log = &mes->mock_logs[log_type];
153 
154 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
155 		log->nr_overflow++;
156 		log->overflow_reset = log->nr_overflow;
157 		return;
158 	}
159 
160 	log->events[log->nr_events] = event;
161 	log->nr_events++;
162 }
163 
164 static int mock_get_event(struct cxl_dev_state *cxlds,
165 			  struct cxl_mbox_cmd *cmd)
166 {
167 	struct cxl_get_event_payload *pl;
168 	struct mock_event_log *log;
169 	u16 nr_overflow;
170 	u8 log_type;
171 	int i;
172 
173 	if (cmd->size_in != sizeof(log_type))
174 		return -EINVAL;
175 
176 	if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
177 		return -EINVAL;
178 
179 	log_type = *((u8 *)cmd->payload_in);
180 	if (log_type >= CXL_EVENT_TYPE_MAX)
181 		return -EINVAL;
182 
183 	memset(cmd->payload_out, 0, cmd->size_out);
184 
185 	log = event_find_log(cxlds->dev, log_type);
186 	if (!log || event_log_empty(log))
187 		return 0;
188 
189 	pl = cmd->payload_out;
190 
191 	for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
192 		memcpy(&pl->records[i], event_get_current(log),
193 		       sizeof(pl->records[i]));
194 		pl->records[i].hdr.handle = event_get_cur_event_handle(log);
195 		log->cur_idx++;
196 	}
197 
198 	pl->record_count = cpu_to_le16(i);
199 	if (!event_log_empty(log))
200 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
201 
202 	if (log->nr_overflow) {
203 		u64 ns;
204 
205 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
206 		pl->overflow_err_count = cpu_to_le16(nr_overflow);
207 		ns = ktime_get_real_ns();
208 		ns -= 5000000000; /* 5s ago */
209 		pl->first_overflow_timestamp = cpu_to_le64(ns);
210 		ns = ktime_get_real_ns();
211 		ns -= 1000000000; /* 1s ago */
212 		pl->last_overflow_timestamp = cpu_to_le64(ns);
213 	}
214 
215 	return 0;
216 }
217 
218 static int mock_clear_event(struct cxl_dev_state *cxlds,
219 			    struct cxl_mbox_cmd *cmd)
220 {
221 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
222 	struct mock_event_log *log;
223 	u8 log_type = pl->event_log;
224 	u16 handle;
225 	int nr;
226 
227 	if (log_type >= CXL_EVENT_TYPE_MAX)
228 		return -EINVAL;
229 
230 	log = event_find_log(cxlds->dev, log_type);
231 	if (!log)
232 		return 0; /* No mock data in this log */
233 
234 	/*
235 	 * This check is technically not invalid per the specification AFAICS.
236 	 * (The host could 'guess' handles and clear them in order).
237 	 * However, this is not good behavior for the host so test it.
238 	 */
239 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
240 		dev_err(cxlds->dev,
241 			"Attempting to clear more events than returned!\n");
242 		return -EINVAL;
243 	}
244 
245 	/* Check handle order prior to clearing events */
246 	for (nr = 0, handle = event_get_clear_handle(log);
247 	     nr < pl->nr_recs;
248 	     nr++, handle++) {
249 		if (handle != le16_to_cpu(pl->handles[nr])) {
250 			dev_err(cxlds->dev, "Clearing events out of order\n");
251 			return -EINVAL;
252 		}
253 	}
254 
255 	if (log->nr_overflow)
256 		log->nr_overflow = 0;
257 
258 	/* Clear events */
259 	log->clear_idx += pl->nr_recs;
260 	return 0;
261 }
262 
263 static void cxl_mock_event_trigger(struct device *dev)
264 {
265 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
266 	struct mock_event_store *mes = &mdata->mes;
267 	int i;
268 
269 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
270 		struct mock_event_log *log;
271 
272 		log = event_find_log(dev, i);
273 		if (log)
274 			event_reset_log(log);
275 	}
276 
277 	cxl_mem_get_event_records(mes->cxlds, mes->ev_status);
278 }
279 
280 struct cxl_event_record_raw maint_needed = {
281 	.hdr = {
282 		.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
283 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
284 		.length = sizeof(struct cxl_event_record_raw),
285 		.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
286 		/* .handle = Set dynamically */
287 		.related_handle = cpu_to_le16(0xa5b6),
288 	},
289 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
290 };
291 
292 struct cxl_event_record_raw hardware_replace = {
293 	.hdr = {
294 		.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
295 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
296 		.length = sizeof(struct cxl_event_record_raw),
297 		.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
298 		/* .handle = Set dynamically */
299 		.related_handle = cpu_to_le16(0xb6a5),
300 	},
301 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
302 };
303 
304 struct cxl_event_gen_media gen_media = {
305 	.hdr = {
306 		.id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
307 				0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
308 		.length = sizeof(struct cxl_event_gen_media),
309 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
310 		/* .handle = Set dynamically */
311 		.related_handle = cpu_to_le16(0),
312 	},
313 	.phys_addr = cpu_to_le64(0x2000),
314 	.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
315 	.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
316 	.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
317 	/* .validity_flags = <set below> */
318 	.channel = 1,
319 	.rank = 30
320 };
321 
322 struct cxl_event_dram dram = {
323 	.hdr = {
324 		.id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
325 				0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
326 		.length = sizeof(struct cxl_event_dram),
327 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
328 		/* .handle = Set dynamically */
329 		.related_handle = cpu_to_le16(0),
330 	},
331 	.phys_addr = cpu_to_le64(0x8000),
332 	.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
333 	.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
334 	.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
335 	/* .validity_flags = <set below> */
336 	.channel = 1,
337 	.bank_group = 5,
338 	.bank = 2,
339 	.column = {0xDE, 0xAD},
340 };
341 
342 struct cxl_event_mem_module mem_module = {
343 	.hdr = {
344 		.id = UUID_INIT(0xfe927475, 0xdd59, 0x4339,
345 				0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
346 		.length = sizeof(struct cxl_event_mem_module),
347 		/* .handle = Set dynamically */
348 		.related_handle = cpu_to_le16(0),
349 	},
350 	.event_type = CXL_MMER_TEMP_CHANGE,
351 	.info = {
352 		.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
353 		.media_status = CXL_DHI_MS_ALL_DATA_LOST,
354 		.add_status = (CXL_DHI_AS_CRITICAL << 2) |
355 			      (CXL_DHI_AS_WARNING << 4) |
356 			      (CXL_DHI_AS_WARNING << 5),
357 		.device_temp = { 0xDE, 0xAD},
358 		.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
359 		.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
360 		.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
361 	}
362 };
363 
364 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
365 {
366 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
367 			   &gen_media.validity_flags);
368 
369 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
370 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
371 			   &dram.validity_flags);
372 
373 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
374 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
375 		      (struct cxl_event_record_raw *)&gen_media);
376 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
377 		      (struct cxl_event_record_raw *)&mem_module);
378 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
379 
380 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
381 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
382 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
383 		      (struct cxl_event_record_raw *)&dram);
384 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
385 		      (struct cxl_event_record_raw *)&gen_media);
386 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
387 		      (struct cxl_event_record_raw *)&mem_module);
388 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
389 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
390 		      (struct cxl_event_record_raw *)&dram);
391 	/* Overflow this log */
392 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
393 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
394 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
395 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
396 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
397 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
398 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
399 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
400 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
401 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
402 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
403 
404 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
405 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
406 		      (struct cxl_event_record_raw *)&dram);
407 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
408 }
409 
410 static int mock_gsl(struct cxl_mbox_cmd *cmd)
411 {
412 	if (cmd->size_out < sizeof(mock_gsl_payload))
413 		return -EINVAL;
414 
415 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
416 	cmd->size_out = sizeof(mock_gsl_payload);
417 
418 	return 0;
419 }
420 
421 static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
422 {
423 	struct cxl_mbox_get_log *gl = cmd->payload_in;
424 	u32 offset = le32_to_cpu(gl->offset);
425 	u32 length = le32_to_cpu(gl->length);
426 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
427 	void *data = &mock_cel;
428 
429 	if (cmd->size_in < sizeof(*gl))
430 		return -EINVAL;
431 	if (length > cxlds->payload_size)
432 		return -EINVAL;
433 	if (offset + length > sizeof(mock_cel))
434 		return -EINVAL;
435 	if (!uuid_equal(&gl->uuid, &uuid))
436 		return -EINVAL;
437 	if (length > cmd->size_out)
438 		return -EINVAL;
439 
440 	memcpy(cmd->payload_out, data + offset, length);
441 
442 	return 0;
443 }
444 
445 static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
446 {
447 	struct cxl_mbox_identify id = {
448 		.fw_revision = { "mock fw v1 " },
449 		.total_capacity =
450 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
451 		.volatile_capacity =
452 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
453 	};
454 
455 	if (cmd->size_out < sizeof(id))
456 		return -EINVAL;
457 
458 	memcpy(cmd->payload_out, &id, sizeof(id));
459 
460 	return 0;
461 }
462 
463 static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
464 {
465 	struct cxl_mbox_identify id = {
466 		.fw_revision = { "mock fw v1 " },
467 		.lsa_size = cpu_to_le32(LSA_SIZE),
468 		.partition_align =
469 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
470 		.total_capacity =
471 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
472 	};
473 
474 	if (cmd->size_out < sizeof(id))
475 		return -EINVAL;
476 
477 	memcpy(cmd->payload_out, &id, sizeof(id));
478 
479 	return 0;
480 }
481 
482 static int mock_partition_info(struct cxl_dev_state *cxlds,
483 			       struct cxl_mbox_cmd *cmd)
484 {
485 	struct cxl_mbox_get_partition_info pi = {
486 		.active_volatile_cap =
487 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
488 		.active_persistent_cap =
489 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
490 	};
491 
492 	if (cmd->size_out < sizeof(pi))
493 		return -EINVAL;
494 
495 	memcpy(cmd->payload_out, &pi, sizeof(pi));
496 
497 	return 0;
498 }
499 
500 static int mock_get_security_state(struct cxl_dev_state *cxlds,
501 				   struct cxl_mbox_cmd *cmd)
502 {
503 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
504 
505 	if (cmd->size_in)
506 		return -EINVAL;
507 
508 	if (cmd->size_out != sizeof(u32))
509 		return -EINVAL;
510 
511 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
512 
513 	return 0;
514 }
515 
516 static void master_plimit_check(struct cxl_mockmem_data *mdata)
517 {
518 	if (mdata->master_limit == PASS_TRY_LIMIT)
519 		return;
520 	mdata->master_limit++;
521 	if (mdata->master_limit == PASS_TRY_LIMIT)
522 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
523 }
524 
525 static void user_plimit_check(struct cxl_mockmem_data *mdata)
526 {
527 	if (mdata->user_limit == PASS_TRY_LIMIT)
528 		return;
529 	mdata->user_limit++;
530 	if (mdata->user_limit == PASS_TRY_LIMIT)
531 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
532 }
533 
534 static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
535 {
536 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
537 	struct cxl_set_pass *set_pass;
538 
539 	if (cmd->size_in != sizeof(*set_pass))
540 		return -EINVAL;
541 
542 	if (cmd->size_out != 0)
543 		return -EINVAL;
544 
545 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
546 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
547 		return -ENXIO;
548 	}
549 
550 	set_pass = cmd->payload_in;
551 	switch (set_pass->type) {
552 	case CXL_PMEM_SEC_PASS_MASTER:
553 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
554 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
555 			return -ENXIO;
556 		}
557 		/*
558 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
559 		 * the security disabled state when the user passphrase is not set.
560 		 */
561 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
562 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
563 			return -ENXIO;
564 		}
565 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
566 			master_plimit_check(mdata);
567 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
568 			return -ENXIO;
569 		}
570 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
571 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
572 		return 0;
573 
574 	case CXL_PMEM_SEC_PASS_USER:
575 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
576 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
577 			return -ENXIO;
578 		}
579 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
580 			user_plimit_check(mdata);
581 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
582 			return -ENXIO;
583 		}
584 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
585 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
586 		return 0;
587 
588 	default:
589 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
590 	}
591 	return -EINVAL;
592 }
593 
594 static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
595 {
596 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
597 	struct cxl_disable_pass *dis_pass;
598 
599 	if (cmd->size_in != sizeof(*dis_pass))
600 		return -EINVAL;
601 
602 	if (cmd->size_out != 0)
603 		return -EINVAL;
604 
605 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
606 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
607 		return -ENXIO;
608 	}
609 
610 	dis_pass = cmd->payload_in;
611 	switch (dis_pass->type) {
612 	case CXL_PMEM_SEC_PASS_MASTER:
613 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
614 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
615 			return -ENXIO;
616 		}
617 
618 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
619 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
620 			return -ENXIO;
621 		}
622 
623 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
624 			master_plimit_check(mdata);
625 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
626 			return -ENXIO;
627 		}
628 
629 		mdata->master_limit = 0;
630 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
631 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
632 		return 0;
633 
634 	case CXL_PMEM_SEC_PASS_USER:
635 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
636 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
637 			return -ENXIO;
638 		}
639 
640 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
641 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
642 			return -ENXIO;
643 		}
644 
645 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
646 			user_plimit_check(mdata);
647 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
648 			return -ENXIO;
649 		}
650 
651 		mdata->user_limit = 0;
652 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
653 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
654 					   CXL_PMEM_SEC_STATE_LOCKED);
655 		return 0;
656 
657 	default:
658 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
659 		return -EINVAL;
660 	}
661 
662 	return 0;
663 }
664 
665 static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
666 {
667 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
668 
669 	if (cmd->size_in != 0)
670 		return -EINVAL;
671 
672 	if (cmd->size_out != 0)
673 		return -EINVAL;
674 
675 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
676 		return 0;
677 
678 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
679 	return 0;
680 }
681 
682 static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
683 {
684 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
685 
686 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
687 		return -EINVAL;
688 
689 	if (cmd->size_out != 0)
690 		return -EINVAL;
691 
692 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
693 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
694 		return -ENXIO;
695 	}
696 
697 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
698 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
699 		return -ENXIO;
700 	}
701 
702 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
703 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
704 		return -ENXIO;
705 	}
706 
707 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
708 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
709 		return -ENXIO;
710 	}
711 
712 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
713 		if (++mdata->user_limit == PASS_TRY_LIMIT)
714 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
715 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
716 		return -ENXIO;
717 	}
718 
719 	mdata->user_limit = 0;
720 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
721 	return 0;
722 }
723 
724 static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds,
725 					struct cxl_mbox_cmd *cmd)
726 {
727 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
728 	struct cxl_pass_erase *erase;
729 
730 	if (cmd->size_in != sizeof(*erase))
731 		return -EINVAL;
732 
733 	if (cmd->size_out != 0)
734 		return -EINVAL;
735 
736 	erase = cmd->payload_in;
737 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
738 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
739 		return -ENXIO;
740 	}
741 
742 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
743 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
744 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
745 		return -ENXIO;
746 	}
747 
748 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
749 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
750 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
751 		return -ENXIO;
752 	}
753 
754 	switch (erase->type) {
755 	case CXL_PMEM_SEC_PASS_MASTER:
756 		/*
757 		 * The spec does not clearly define the behavior of the scenario
758 		 * where a master passphrase is passed in while the master
759 		 * passphrase is not set and user passphrase is not set. The
760 		 * code will take the assumption that it will behave the same
761 		 * as a CXL secure erase command without passphrase (0x4401).
762 		 */
763 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
764 			if (memcmp(mdata->master_pass, erase->pass,
765 				   NVDIMM_PASSPHRASE_LEN)) {
766 				master_plimit_check(mdata);
767 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
768 				return -ENXIO;
769 			}
770 			mdata->master_limit = 0;
771 			mdata->user_limit = 0;
772 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
773 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
774 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
775 		} else {
776 			/*
777 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
778 			 * When master passphrase is disabled, the device shall
779 			 * return Invalid Input for the Passphrase Secure Erase
780 			 * command with master passphrase.
781 			 */
782 			return -EINVAL;
783 		}
784 		/* Scramble encryption keys so that data is effectively erased */
785 		break;
786 	case CXL_PMEM_SEC_PASS_USER:
787 		/*
788 		 * The spec does not clearly define the behavior of the scenario
789 		 * where a user passphrase is passed in while the user
790 		 * passphrase is not set. The code will take the assumption that
791 		 * it will behave the same as a CXL secure erase command without
792 		 * passphrase (0x4401).
793 		 */
794 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
795 			if (memcmp(mdata->user_pass, erase->pass,
796 				   NVDIMM_PASSPHRASE_LEN)) {
797 				user_plimit_check(mdata);
798 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
799 				return -ENXIO;
800 			}
801 			mdata->user_limit = 0;
802 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
803 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
804 		}
805 
806 		/*
807 		 * CXL rev3 Table 8-118
808 		 * If user passphrase is not set or supported by device, current
809 		 * passphrase value is ignored. Will make the assumption that
810 		 * the operation will proceed as secure erase w/o passphrase
811 		 * since spec is not explicit.
812 		 */
813 
814 		/* Scramble encryption keys so that data is effectively erased */
815 		break;
816 	default:
817 		return -EINVAL;
818 	}
819 
820 	return 0;
821 }
822 
823 static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
824 {
825 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
826 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
827 	void *lsa = mdata->lsa;
828 	u32 offset, length;
829 
830 	if (sizeof(*get_lsa) > cmd->size_in)
831 		return -EINVAL;
832 	offset = le32_to_cpu(get_lsa->offset);
833 	length = le32_to_cpu(get_lsa->length);
834 	if (offset + length > LSA_SIZE)
835 		return -EINVAL;
836 	if (length > cmd->size_out)
837 		return -EINVAL;
838 
839 	memcpy(cmd->payload_out, lsa + offset, length);
840 	return 0;
841 }
842 
843 static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
844 {
845 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
846 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
847 	void *lsa = mdata->lsa;
848 	u32 offset, length;
849 
850 	if (sizeof(*set_lsa) > cmd->size_in)
851 		return -EINVAL;
852 	offset = le32_to_cpu(set_lsa->offset);
853 	length = cmd->size_in - sizeof(*set_lsa);
854 	if (offset + length > LSA_SIZE)
855 		return -EINVAL;
856 
857 	memcpy(lsa + offset, &set_lsa->data[0], length);
858 	return 0;
859 }
860 
861 static int mock_health_info(struct cxl_dev_state *cxlds,
862 			    struct cxl_mbox_cmd *cmd)
863 {
864 	struct cxl_mbox_health_info health_info = {
865 		/* set flags for maint needed, perf degraded, hw replacement */
866 		.health_status = 0x7,
867 		/* set media status to "All Data Lost" */
868 		.media_status = 0x3,
869 		/*
870 		 * set ext_status flags for:
871 		 *  ext_life_used: normal,
872 		 *  ext_temperature: critical,
873 		 *  ext_corrected_volatile: warning,
874 		 *  ext_corrected_persistent: normal,
875 		 */
876 		.ext_status = 0x18,
877 		.life_used = 15,
878 		.temperature = cpu_to_le16(25),
879 		.dirty_shutdowns = cpu_to_le32(10),
880 		.volatile_errors = cpu_to_le32(20),
881 		.pmem_errors = cpu_to_le32(30),
882 	};
883 
884 	if (cmd->size_out < sizeof(health_info))
885 		return -EINVAL;
886 
887 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
888 	return 0;
889 }
890 
891 static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
892 {
893 	struct device *dev = cxlds->dev;
894 	int rc = -EIO;
895 
896 	switch (cmd->opcode) {
897 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
898 		rc = mock_gsl(cmd);
899 		break;
900 	case CXL_MBOX_OP_GET_LOG:
901 		rc = mock_get_log(cxlds, cmd);
902 		break;
903 	case CXL_MBOX_OP_IDENTIFY:
904 		if (cxlds->rcd)
905 			rc = mock_rcd_id(cxlds, cmd);
906 		else
907 			rc = mock_id(cxlds, cmd);
908 		break;
909 	case CXL_MBOX_OP_GET_LSA:
910 		rc = mock_get_lsa(cxlds, cmd);
911 		break;
912 	case CXL_MBOX_OP_GET_PARTITION_INFO:
913 		rc = mock_partition_info(cxlds, cmd);
914 		break;
915 	case CXL_MBOX_OP_GET_EVENT_RECORD:
916 		rc = mock_get_event(cxlds, cmd);
917 		break;
918 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
919 		rc = mock_clear_event(cxlds, cmd);
920 		break;
921 	case CXL_MBOX_OP_SET_LSA:
922 		rc = mock_set_lsa(cxlds, cmd);
923 		break;
924 	case CXL_MBOX_OP_GET_HEALTH_INFO:
925 		rc = mock_health_info(cxlds, cmd);
926 		break;
927 	case CXL_MBOX_OP_GET_SECURITY_STATE:
928 		rc = mock_get_security_state(cxlds, cmd);
929 		break;
930 	case CXL_MBOX_OP_SET_PASSPHRASE:
931 		rc = mock_set_passphrase(cxlds, cmd);
932 		break;
933 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
934 		rc = mock_disable_passphrase(cxlds, cmd);
935 		break;
936 	case CXL_MBOX_OP_FREEZE_SECURITY:
937 		rc = mock_freeze_security(cxlds, cmd);
938 		break;
939 	case CXL_MBOX_OP_UNLOCK:
940 		rc = mock_unlock_security(cxlds, cmd);
941 		break;
942 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
943 		rc = mock_passphrase_secure_erase(cxlds, cmd);
944 		break;
945 	default:
946 		break;
947 	}
948 
949 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
950 		cmd->size_in, cmd->size_out, rc);
951 
952 	return rc;
953 }
954 
955 static void label_area_release(void *lsa)
956 {
957 	vfree(lsa);
958 }
959 
960 static bool is_rcd(struct platform_device *pdev)
961 {
962 	const struct platform_device_id *id = platform_get_device_id(pdev);
963 
964 	return !!id->driver_data;
965 }
966 
967 static ssize_t event_trigger_store(struct device *dev,
968 				   struct device_attribute *attr,
969 				   const char *buf, size_t count)
970 {
971 	cxl_mock_event_trigger(dev);
972 	return count;
973 }
974 static DEVICE_ATTR_WO(event_trigger);
975 
976 static int cxl_mock_mem_probe(struct platform_device *pdev)
977 {
978 	struct device *dev = &pdev->dev;
979 	struct cxl_memdev *cxlmd;
980 	struct cxl_dev_state *cxlds;
981 	struct cxl_mockmem_data *mdata;
982 	int rc;
983 
984 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
985 	if (!mdata)
986 		return -ENOMEM;
987 	dev_set_drvdata(dev, mdata);
988 
989 	mdata->lsa = vmalloc(LSA_SIZE);
990 	if (!mdata->lsa)
991 		return -ENOMEM;
992 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
993 	if (rc)
994 		return rc;
995 
996 	cxlds = cxl_dev_state_create(dev);
997 	if (IS_ERR(cxlds))
998 		return PTR_ERR(cxlds);
999 
1000 	cxlds->serial = pdev->id;
1001 	cxlds->mbox_send = cxl_mock_mbox_send;
1002 	cxlds->payload_size = SZ_4K;
1003 	cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1004 	if (is_rcd(pdev)) {
1005 		cxlds->rcd = true;
1006 		cxlds->component_reg_phys = CXL_RESOURCE_NONE;
1007 	}
1008 
1009 	rc = cxl_enumerate_cmds(cxlds);
1010 	if (rc)
1011 		return rc;
1012 
1013 	rc = cxl_dev_state_identify(cxlds);
1014 	if (rc)
1015 		return rc;
1016 
1017 	rc = cxl_mem_create_range_info(cxlds);
1018 	if (rc)
1019 		return rc;
1020 
1021 	mdata->mes.cxlds = cxlds;
1022 	cxl_mock_add_event_logs(&mdata->mes);
1023 
1024 	cxlmd = devm_cxl_add_memdev(cxlds);
1025 	if (IS_ERR(cxlmd))
1026 		return PTR_ERR(cxlmd);
1027 
1028 	cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
1029 
1030 	return 0;
1031 }
1032 
1033 static ssize_t security_lock_show(struct device *dev,
1034 				  struct device_attribute *attr, char *buf)
1035 {
1036 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1037 
1038 	return sysfs_emit(buf, "%u\n",
1039 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1040 }
1041 
1042 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1043 				   const char *buf, size_t count)
1044 {
1045 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1046 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1047 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1048 	int val;
1049 
1050 	if (kstrtoint(buf, 0, &val) < 0)
1051 		return -EINVAL;
1052 
1053 	if (val == 1) {
1054 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1055 			return -ENXIO;
1056 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1057 		mdata->security_state &= ~mask;
1058 	} else {
1059 		return -EINVAL;
1060 	}
1061 	return count;
1062 }
1063 
1064 static DEVICE_ATTR_RW(security_lock);
1065 
1066 static struct attribute *cxl_mock_mem_attrs[] = {
1067 	&dev_attr_security_lock.attr,
1068 	&dev_attr_event_trigger.attr,
1069 	NULL
1070 };
1071 ATTRIBUTE_GROUPS(cxl_mock_mem);
1072 
1073 static const struct platform_device_id cxl_mock_mem_ids[] = {
1074 	{ .name = "cxl_mem", 0 },
1075 	{ .name = "cxl_rcd", 1 },
1076 	{ },
1077 };
1078 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1079 
1080 static struct platform_driver cxl_mock_mem_driver = {
1081 	.probe = cxl_mock_mem_probe,
1082 	.id_table = cxl_mock_mem_ids,
1083 	.driver = {
1084 		.name = KBUILD_MODNAME,
1085 		.dev_groups = cxl_mock_mem_groups,
1086 	},
1087 };
1088 
1089 module_platform_driver(cxl_mock_mem_driver);
1090 MODULE_LICENSE("GPL v2");
1091 MODULE_IMPORT_NS(CXL);
1092