1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/vmalloc.h>
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/sizes.h>
10 #include <linux/bits.h>
11 #include <cxl/mailbox.h>
12 #include <linux/unaligned.h>
13 #include <crypto/sha2.h>
14 #include <cxlmem.h>
15
16 #include "trace.h"
17
18 #define LSA_SIZE SZ_128K
19 #define FW_SIZE SZ_64M
20 #define FW_SLOTS 3
21 #define DEV_SIZE SZ_2G
22 #define EFFECT(x) (1U << x)
23
24 #define MOCK_INJECT_DEV_MAX 8
25 #define MOCK_INJECT_TEST_MAX 128
26
27 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
28
29 enum cxl_command_effects {
30 CONF_CHANGE_COLD_RESET = 0,
31 CONF_CHANGE_IMMEDIATE,
32 DATA_CHANGE_IMMEDIATE,
33 POLICY_CHANGE_IMMEDIATE,
34 LOG_CHANGE_IMMEDIATE,
35 SECURITY_CHANGE_IMMEDIATE,
36 BACKGROUND_OP,
37 SECONDARY_MBOX_SUPPORTED,
38 };
39
40 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
41
42 static struct cxl_cel_entry mock_cel[] = {
43 {
44 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
45 .effect = CXL_CMD_EFFECT_NONE,
46 },
47 {
48 .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
49 .effect = CXL_CMD_EFFECT_NONE,
50 },
51 {
52 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
53 .effect = CXL_CMD_EFFECT_NONE,
54 },
55 {
56 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
57 .effect = CXL_CMD_EFFECT_NONE,
58 },
59 {
60 .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
61 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
62 EFFECT(DATA_CHANGE_IMMEDIATE)),
63 },
64 {
65 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
66 .effect = CXL_CMD_EFFECT_NONE,
67 },
68 {
69 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
70 .effect = CXL_CMD_EFFECT_NONE,
71 },
72 {
73 .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
74 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
75 },
76 {
77 .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
78 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
79 },
80 {
81 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
82 .effect = CXL_CMD_EFFECT_NONE,
83 },
84 {
85 .opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
86 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
87 EFFECT(BACKGROUND_OP)),
88 },
89 {
90 .opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
91 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
92 EFFECT(CONF_CHANGE_IMMEDIATE)),
93 },
94 {
95 .opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
96 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
97 EFFECT(SECURITY_CHANGE_IMMEDIATE) |
98 EFFECT(BACKGROUND_OP)),
99 },
100 };
101
102 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
103 struct cxl_mbox_health_info {
104 u8 health_status;
105 u8 media_status;
106 u8 ext_status;
107 u8 life_used;
108 __le16 temperature;
109 __le32 dirty_shutdowns;
110 __le32 volatile_errors;
111 __le32 pmem_errors;
112 } __packed;
113
114 static struct {
115 struct cxl_mbox_get_supported_logs gsl;
116 struct cxl_gsl_entry entry;
117 } mock_gsl_payload = {
118 .gsl = {
119 .entries = cpu_to_le16(1),
120 },
121 .entry = {
122 .uuid = DEFINE_CXL_CEL_UUID,
123 .size = cpu_to_le32(sizeof(mock_cel)),
124 },
125 };
126
127 #define PASS_TRY_LIMIT 3
128
129 #define CXL_TEST_EVENT_CNT_MAX 15
130
131 /* Set a number of events to return at a time for simulation. */
132 #define CXL_TEST_EVENT_RET_MAX 4
133
134 struct mock_event_log {
135 u16 clear_idx;
136 u16 cur_idx;
137 u16 nr_events;
138 u16 nr_overflow;
139 u16 overflow_reset;
140 struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
141 };
142
143 struct mock_event_store {
144 struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
145 u32 ev_status;
146 };
147
148 struct cxl_mockmem_data {
149 void *lsa;
150 void *fw;
151 int fw_slot;
152 int fw_staged;
153 size_t fw_size;
154 u32 security_state;
155 u8 user_pass[NVDIMM_PASSPHRASE_LEN];
156 u8 master_pass[NVDIMM_PASSPHRASE_LEN];
157 int user_limit;
158 int master_limit;
159 struct mock_event_store mes;
160 struct cxl_memdev_state *mds;
161 u8 event_buf[SZ_4K];
162 u64 timestamp;
163 unsigned long sanitize_timeout;
164 };
165
event_find_log(struct device * dev,int log_type)166 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
167 {
168 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
169
170 if (log_type >= CXL_EVENT_TYPE_MAX)
171 return NULL;
172 return &mdata->mes.mock_logs[log_type];
173 }
174
event_get_current(struct mock_event_log * log)175 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
176 {
177 return log->events[log->cur_idx];
178 }
179
event_reset_log(struct mock_event_log * log)180 static void event_reset_log(struct mock_event_log *log)
181 {
182 log->cur_idx = 0;
183 log->clear_idx = 0;
184 log->nr_overflow = log->overflow_reset;
185 }
186
187 /* Handle can never be 0 use 1 based indexing for handle */
event_get_clear_handle(struct mock_event_log * log)188 static u16 event_get_clear_handle(struct mock_event_log *log)
189 {
190 return log->clear_idx + 1;
191 }
192
193 /* Handle can never be 0 use 1 based indexing for handle */
event_get_cur_event_handle(struct mock_event_log * log)194 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
195 {
196 u16 cur_handle = log->cur_idx + 1;
197
198 return cpu_to_le16(cur_handle);
199 }
200
event_log_empty(struct mock_event_log * log)201 static bool event_log_empty(struct mock_event_log *log)
202 {
203 return log->cur_idx == log->nr_events;
204 }
205
mes_add_event(struct mock_event_store * mes,enum cxl_event_log_type log_type,struct cxl_event_record_raw * event)206 static void mes_add_event(struct mock_event_store *mes,
207 enum cxl_event_log_type log_type,
208 struct cxl_event_record_raw *event)
209 {
210 struct mock_event_log *log;
211
212 if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
213 return;
214
215 log = &mes->mock_logs[log_type];
216
217 if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
218 log->nr_overflow++;
219 log->overflow_reset = log->nr_overflow;
220 return;
221 }
222
223 log->events[log->nr_events] = event;
224 log->nr_events++;
225 }
226
227 /*
228 * Vary the number of events returned to simulate events occuring while the
229 * logs are being read.
230 */
231 static int ret_limit = 0;
232
mock_get_event(struct device * dev,struct cxl_mbox_cmd * cmd)233 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
234 {
235 struct cxl_get_event_payload *pl;
236 struct mock_event_log *log;
237 u16 nr_overflow;
238 u8 log_type;
239 int i;
240
241 if (cmd->size_in != sizeof(log_type))
242 return -EINVAL;
243
244 ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
245 if (!ret_limit)
246 ret_limit = 1;
247
248 if (cmd->size_out < struct_size(pl, records, ret_limit))
249 return -EINVAL;
250
251 log_type = *((u8 *)cmd->payload_in);
252 if (log_type >= CXL_EVENT_TYPE_MAX)
253 return -EINVAL;
254
255 memset(cmd->payload_out, 0, struct_size(pl, records, 0));
256
257 log = event_find_log(dev, log_type);
258 if (!log || event_log_empty(log))
259 return 0;
260
261 pl = cmd->payload_out;
262
263 for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
264 memcpy(&pl->records[i], event_get_current(log),
265 sizeof(pl->records[i]));
266 pl->records[i].event.generic.hdr.handle =
267 event_get_cur_event_handle(log);
268 log->cur_idx++;
269 }
270
271 cmd->size_out = struct_size(pl, records, i);
272 pl->record_count = cpu_to_le16(i);
273 if (!event_log_empty(log))
274 pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
275
276 if (log->nr_overflow) {
277 u64 ns;
278
279 pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
280 pl->overflow_err_count = cpu_to_le16(nr_overflow);
281 ns = ktime_get_real_ns();
282 ns -= 5000000000; /* 5s ago */
283 pl->first_overflow_timestamp = cpu_to_le64(ns);
284 ns = ktime_get_real_ns();
285 ns -= 1000000000; /* 1s ago */
286 pl->last_overflow_timestamp = cpu_to_le64(ns);
287 }
288
289 return 0;
290 }
291
mock_clear_event(struct device * dev,struct cxl_mbox_cmd * cmd)292 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
293 {
294 struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
295 struct mock_event_log *log;
296 u8 log_type = pl->event_log;
297 u16 handle;
298 int nr;
299
300 if (log_type >= CXL_EVENT_TYPE_MAX)
301 return -EINVAL;
302
303 log = event_find_log(dev, log_type);
304 if (!log)
305 return 0; /* No mock data in this log */
306
307 /*
308 * This check is technically not invalid per the specification AFAICS.
309 * (The host could 'guess' handles and clear them in order).
310 * However, this is not good behavior for the host so test it.
311 */
312 if (log->clear_idx + pl->nr_recs > log->cur_idx) {
313 dev_err(dev,
314 "Attempting to clear more events than returned!\n");
315 return -EINVAL;
316 }
317
318 /* Check handle order prior to clearing events */
319 for (nr = 0, handle = event_get_clear_handle(log);
320 nr < pl->nr_recs;
321 nr++, handle++) {
322 if (handle != le16_to_cpu(pl->handles[nr])) {
323 dev_err(dev, "Clearing events out of order\n");
324 return -EINVAL;
325 }
326 }
327
328 if (log->nr_overflow)
329 log->nr_overflow = 0;
330
331 /* Clear events */
332 log->clear_idx += pl->nr_recs;
333 return 0;
334 }
335
cxl_mock_event_trigger(struct device * dev)336 static void cxl_mock_event_trigger(struct device *dev)
337 {
338 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
339 struct mock_event_store *mes = &mdata->mes;
340 int i;
341
342 for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
343 struct mock_event_log *log;
344
345 log = event_find_log(dev, i);
346 if (log)
347 event_reset_log(log);
348 }
349
350 cxl_mem_get_event_records(mdata->mds, mes->ev_status);
351 }
352
353 struct cxl_event_record_raw maint_needed = {
354 .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
355 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
356 .event.generic = {
357 .hdr = {
358 .length = sizeof(struct cxl_event_record_raw),
359 .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
360 /* .handle = Set dynamically */
361 .related_handle = cpu_to_le16(0xa5b6),
362 },
363 .data = { 0xDE, 0xAD, 0xBE, 0xEF },
364 },
365 };
366
367 struct cxl_event_record_raw hardware_replace = {
368 .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
369 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
370 .event.generic = {
371 .hdr = {
372 .length = sizeof(struct cxl_event_record_raw),
373 .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
374 /* .handle = Set dynamically */
375 .related_handle = cpu_to_le16(0xb6a5),
376 },
377 .data = { 0xDE, 0xAD, 0xBE, 0xEF },
378 },
379 };
380
381 struct cxl_test_gen_media {
382 uuid_t id;
383 struct cxl_event_gen_media rec;
384 } __packed;
385
386 struct cxl_test_gen_media gen_media = {
387 .id = CXL_EVENT_GEN_MEDIA_UUID,
388 .rec = {
389 .media_hdr = {
390 .hdr = {
391 .length = sizeof(struct cxl_test_gen_media),
392 .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
393 /* .handle = Set dynamically */
394 .related_handle = cpu_to_le16(0),
395 },
396 .phys_addr = cpu_to_le64(0x2000),
397 .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
398 .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
399 .transaction_type = CXL_GMER_TRANS_HOST_WRITE,
400 /* .validity_flags = <set below> */
401 .channel = 1,
402 .rank = 30,
403 },
404 },
405 };
406
407 struct cxl_test_dram {
408 uuid_t id;
409 struct cxl_event_dram rec;
410 } __packed;
411
412 struct cxl_test_dram dram = {
413 .id = CXL_EVENT_DRAM_UUID,
414 .rec = {
415 .media_hdr = {
416 .hdr = {
417 .length = sizeof(struct cxl_test_dram),
418 .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
419 /* .handle = Set dynamically */
420 .related_handle = cpu_to_le16(0),
421 },
422 .phys_addr = cpu_to_le64(0x8000),
423 .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
424 .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
425 .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
426 /* .validity_flags = <set below> */
427 .channel = 1,
428 },
429 .bank_group = 5,
430 .bank = 2,
431 .column = {0xDE, 0xAD},
432 },
433 };
434
435 struct cxl_test_mem_module {
436 uuid_t id;
437 struct cxl_event_mem_module rec;
438 } __packed;
439
440 struct cxl_test_mem_module mem_module = {
441 .id = CXL_EVENT_MEM_MODULE_UUID,
442 .rec = {
443 .hdr = {
444 .length = sizeof(struct cxl_test_mem_module),
445 /* .handle = Set dynamically */
446 .related_handle = cpu_to_le16(0),
447 },
448 .event_type = CXL_MMER_TEMP_CHANGE,
449 .info = {
450 .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
451 .media_status = CXL_DHI_MS_ALL_DATA_LOST,
452 .add_status = (CXL_DHI_AS_CRITICAL << 2) |
453 (CXL_DHI_AS_WARNING << 4) |
454 (CXL_DHI_AS_WARNING << 5),
455 .device_temp = { 0xDE, 0xAD},
456 .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
457 .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
458 .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
459 }
460 },
461 };
462
mock_set_timestamp(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)463 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
464 struct cxl_mbox_cmd *cmd)
465 {
466 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
467 struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
468
469 if (cmd->size_in != sizeof(*ts))
470 return -EINVAL;
471
472 if (cmd->size_out != 0)
473 return -EINVAL;
474
475 mdata->timestamp = le64_to_cpu(ts->timestamp);
476 return 0;
477 }
478
cxl_mock_add_event_logs(struct mock_event_store * mes)479 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
480 {
481 put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
482 &gen_media.rec.media_hdr.validity_flags);
483
484 put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
485 CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
486 &dram.rec.media_hdr.validity_flags);
487
488 mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
489 mes_add_event(mes, CXL_EVENT_TYPE_INFO,
490 (struct cxl_event_record_raw *)&gen_media);
491 mes_add_event(mes, CXL_EVENT_TYPE_INFO,
492 (struct cxl_event_record_raw *)&mem_module);
493 mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
494
495 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
496 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
497 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
498 (struct cxl_event_record_raw *)&dram);
499 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
500 (struct cxl_event_record_raw *)&gen_media);
501 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
502 (struct cxl_event_record_raw *)&mem_module);
503 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
504 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
505 (struct cxl_event_record_raw *)&dram);
506 /* Overflow this log */
507 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
508 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
509 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
510 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
511 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
512 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
513 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
514 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
515 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
516 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
517 mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
518
519 mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
520 mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
521 (struct cxl_event_record_raw *)&dram);
522 mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
523 }
524
mock_gsl(struct cxl_mbox_cmd * cmd)525 static int mock_gsl(struct cxl_mbox_cmd *cmd)
526 {
527 if (cmd->size_out < sizeof(mock_gsl_payload))
528 return -EINVAL;
529
530 memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
531 cmd->size_out = sizeof(mock_gsl_payload);
532
533 return 0;
534 }
535
mock_get_log(struct cxl_memdev_state * mds,struct cxl_mbox_cmd * cmd)536 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
537 {
538 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
539 struct cxl_mbox_get_log *gl = cmd->payload_in;
540 u32 offset = le32_to_cpu(gl->offset);
541 u32 length = le32_to_cpu(gl->length);
542 uuid_t uuid = DEFINE_CXL_CEL_UUID;
543 void *data = &mock_cel;
544
545 if (cmd->size_in < sizeof(*gl))
546 return -EINVAL;
547 if (length > cxl_mbox->payload_size)
548 return -EINVAL;
549 if (offset + length > sizeof(mock_cel))
550 return -EINVAL;
551 if (!uuid_equal(&gl->uuid, &uuid))
552 return -EINVAL;
553 if (length > cmd->size_out)
554 return -EINVAL;
555
556 memcpy(cmd->payload_out, data + offset, length);
557
558 return 0;
559 }
560
mock_rcd_id(struct cxl_mbox_cmd * cmd)561 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
562 {
563 struct cxl_mbox_identify id = {
564 .fw_revision = { "mock fw v1 " },
565 .total_capacity =
566 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
567 .volatile_capacity =
568 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
569 };
570
571 if (cmd->size_out < sizeof(id))
572 return -EINVAL;
573
574 memcpy(cmd->payload_out, &id, sizeof(id));
575
576 return 0;
577 }
578
mock_id(struct cxl_mbox_cmd * cmd)579 static int mock_id(struct cxl_mbox_cmd *cmd)
580 {
581 struct cxl_mbox_identify id = {
582 .fw_revision = { "mock fw v1 " },
583 .lsa_size = cpu_to_le32(LSA_SIZE),
584 .partition_align =
585 cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
586 .total_capacity =
587 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
588 .inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
589 };
590
591 put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
592
593 if (cmd->size_out < sizeof(id))
594 return -EINVAL;
595
596 memcpy(cmd->payload_out, &id, sizeof(id));
597
598 return 0;
599 }
600
mock_partition_info(struct cxl_mbox_cmd * cmd)601 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
602 {
603 struct cxl_mbox_get_partition_info pi = {
604 .active_volatile_cap =
605 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
606 .active_persistent_cap =
607 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
608 };
609
610 if (cmd->size_out < sizeof(pi))
611 return -EINVAL;
612
613 memcpy(cmd->payload_out, &pi, sizeof(pi));
614
615 return 0;
616 }
617
cxl_mockmem_sanitize_work(struct work_struct * work)618 void cxl_mockmem_sanitize_work(struct work_struct *work)
619 {
620 struct cxl_memdev_state *mds =
621 container_of(work, typeof(*mds), security.poll_dwork.work);
622 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
623
624 mutex_lock(&cxl_mbox->mbox_mutex);
625 if (mds->security.sanitize_node)
626 sysfs_notify_dirent(mds->security.sanitize_node);
627 mds->security.sanitize_active = false;
628 mutex_unlock(&cxl_mbox->mbox_mutex);
629
630 dev_dbg(mds->cxlds.dev, "sanitize complete\n");
631 }
632
mock_sanitize(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)633 static int mock_sanitize(struct cxl_mockmem_data *mdata,
634 struct cxl_mbox_cmd *cmd)
635 {
636 struct cxl_memdev_state *mds = mdata->mds;
637 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
638 int rc = 0;
639
640 if (cmd->size_in != 0)
641 return -EINVAL;
642
643 if (cmd->size_out != 0)
644 return -EINVAL;
645
646 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
647 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
648 return -ENXIO;
649 }
650 if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
651 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
652 return -ENXIO;
653 }
654
655 mutex_lock(&cxl_mbox->mbox_mutex);
656 if (schedule_delayed_work(&mds->security.poll_dwork,
657 msecs_to_jiffies(mdata->sanitize_timeout))) {
658 mds->security.sanitize_active = true;
659 dev_dbg(mds->cxlds.dev, "sanitize issued\n");
660 } else
661 rc = -EBUSY;
662 mutex_unlock(&cxl_mbox->mbox_mutex);
663
664 return rc;
665 }
666
mock_secure_erase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)667 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
668 struct cxl_mbox_cmd *cmd)
669 {
670 if (cmd->size_in != 0)
671 return -EINVAL;
672
673 if (cmd->size_out != 0)
674 return -EINVAL;
675
676 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
677 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
678 return -ENXIO;
679 }
680
681 if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
682 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
683 return -ENXIO;
684 }
685
686 return 0;
687 }
688
mock_get_security_state(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)689 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
690 struct cxl_mbox_cmd *cmd)
691 {
692 if (cmd->size_in)
693 return -EINVAL;
694
695 if (cmd->size_out != sizeof(u32))
696 return -EINVAL;
697
698 memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
699
700 return 0;
701 }
702
master_plimit_check(struct cxl_mockmem_data * mdata)703 static void master_plimit_check(struct cxl_mockmem_data *mdata)
704 {
705 if (mdata->master_limit == PASS_TRY_LIMIT)
706 return;
707 mdata->master_limit++;
708 if (mdata->master_limit == PASS_TRY_LIMIT)
709 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
710 }
711
user_plimit_check(struct cxl_mockmem_data * mdata)712 static void user_plimit_check(struct cxl_mockmem_data *mdata)
713 {
714 if (mdata->user_limit == PASS_TRY_LIMIT)
715 return;
716 mdata->user_limit++;
717 if (mdata->user_limit == PASS_TRY_LIMIT)
718 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
719 }
720
mock_set_passphrase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)721 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
722 struct cxl_mbox_cmd *cmd)
723 {
724 struct cxl_set_pass *set_pass;
725
726 if (cmd->size_in != sizeof(*set_pass))
727 return -EINVAL;
728
729 if (cmd->size_out != 0)
730 return -EINVAL;
731
732 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
733 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
734 return -ENXIO;
735 }
736
737 set_pass = cmd->payload_in;
738 switch (set_pass->type) {
739 case CXL_PMEM_SEC_PASS_MASTER:
740 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
741 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
742 return -ENXIO;
743 }
744 /*
745 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
746 * the security disabled state when the user passphrase is not set.
747 */
748 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
749 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
750 return -ENXIO;
751 }
752 if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
753 master_plimit_check(mdata);
754 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
755 return -ENXIO;
756 }
757 memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
758 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
759 return 0;
760
761 case CXL_PMEM_SEC_PASS_USER:
762 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
763 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
764 return -ENXIO;
765 }
766 if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
767 user_plimit_check(mdata);
768 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
769 return -ENXIO;
770 }
771 memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
772 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
773 return 0;
774
775 default:
776 cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
777 }
778 return -EINVAL;
779 }
780
mock_disable_passphrase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)781 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
782 struct cxl_mbox_cmd *cmd)
783 {
784 struct cxl_disable_pass *dis_pass;
785
786 if (cmd->size_in != sizeof(*dis_pass))
787 return -EINVAL;
788
789 if (cmd->size_out != 0)
790 return -EINVAL;
791
792 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
793 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
794 return -ENXIO;
795 }
796
797 dis_pass = cmd->payload_in;
798 switch (dis_pass->type) {
799 case CXL_PMEM_SEC_PASS_MASTER:
800 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
801 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
802 return -ENXIO;
803 }
804
805 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
806 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
807 return -ENXIO;
808 }
809
810 if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
811 master_plimit_check(mdata);
812 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
813 return -ENXIO;
814 }
815
816 mdata->master_limit = 0;
817 memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
818 mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
819 return 0;
820
821 case CXL_PMEM_SEC_PASS_USER:
822 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
823 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
824 return -ENXIO;
825 }
826
827 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
828 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
829 return -ENXIO;
830 }
831
832 if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
833 user_plimit_check(mdata);
834 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
835 return -ENXIO;
836 }
837
838 mdata->user_limit = 0;
839 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
840 mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
841 CXL_PMEM_SEC_STATE_LOCKED);
842 return 0;
843
844 default:
845 cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
846 return -EINVAL;
847 }
848
849 return 0;
850 }
851
mock_freeze_security(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)852 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
853 struct cxl_mbox_cmd *cmd)
854 {
855 if (cmd->size_in != 0)
856 return -EINVAL;
857
858 if (cmd->size_out != 0)
859 return -EINVAL;
860
861 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
862 return 0;
863
864 mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
865 return 0;
866 }
867
mock_unlock_security(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)868 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
869 struct cxl_mbox_cmd *cmd)
870 {
871 if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
872 return -EINVAL;
873
874 if (cmd->size_out != 0)
875 return -EINVAL;
876
877 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
878 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
879 return -ENXIO;
880 }
881
882 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
883 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
884 return -ENXIO;
885 }
886
887 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
888 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
889 return -ENXIO;
890 }
891
892 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
893 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
894 return -ENXIO;
895 }
896
897 if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
898 if (++mdata->user_limit == PASS_TRY_LIMIT)
899 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
900 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
901 return -ENXIO;
902 }
903
904 mdata->user_limit = 0;
905 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
906 return 0;
907 }
908
mock_passphrase_secure_erase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)909 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
910 struct cxl_mbox_cmd *cmd)
911 {
912 struct cxl_pass_erase *erase;
913
914 if (cmd->size_in != sizeof(*erase))
915 return -EINVAL;
916
917 if (cmd->size_out != 0)
918 return -EINVAL;
919
920 erase = cmd->payload_in;
921 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
922 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
923 return -ENXIO;
924 }
925
926 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
927 erase->type == CXL_PMEM_SEC_PASS_USER) {
928 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
929 return -ENXIO;
930 }
931
932 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
933 erase->type == CXL_PMEM_SEC_PASS_MASTER) {
934 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
935 return -ENXIO;
936 }
937
938 switch (erase->type) {
939 case CXL_PMEM_SEC_PASS_MASTER:
940 /*
941 * The spec does not clearly define the behavior of the scenario
942 * where a master passphrase is passed in while the master
943 * passphrase is not set and user passphrase is not set. The
944 * code will take the assumption that it will behave the same
945 * as a CXL secure erase command without passphrase (0x4401).
946 */
947 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
948 if (memcmp(mdata->master_pass, erase->pass,
949 NVDIMM_PASSPHRASE_LEN)) {
950 master_plimit_check(mdata);
951 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
952 return -ENXIO;
953 }
954 mdata->master_limit = 0;
955 mdata->user_limit = 0;
956 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
957 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
958 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
959 } else {
960 /*
961 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
962 * When master passphrase is disabled, the device shall
963 * return Invalid Input for the Passphrase Secure Erase
964 * command with master passphrase.
965 */
966 return -EINVAL;
967 }
968 /* Scramble encryption keys so that data is effectively erased */
969 break;
970 case CXL_PMEM_SEC_PASS_USER:
971 /*
972 * The spec does not clearly define the behavior of the scenario
973 * where a user passphrase is passed in while the user
974 * passphrase is not set. The code will take the assumption that
975 * it will behave the same as a CXL secure erase command without
976 * passphrase (0x4401).
977 */
978 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
979 if (memcmp(mdata->user_pass, erase->pass,
980 NVDIMM_PASSPHRASE_LEN)) {
981 user_plimit_check(mdata);
982 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
983 return -ENXIO;
984 }
985 mdata->user_limit = 0;
986 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
987 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
988 }
989
990 /*
991 * CXL rev3 Table 8-118
992 * If user passphrase is not set or supported by device, current
993 * passphrase value is ignored. Will make the assumption that
994 * the operation will proceed as secure erase w/o passphrase
995 * since spec is not explicit.
996 */
997
998 /* Scramble encryption keys so that data is effectively erased */
999 break;
1000 default:
1001 return -EINVAL;
1002 }
1003
1004 return 0;
1005 }
1006
mock_get_lsa(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1007 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
1008 struct cxl_mbox_cmd *cmd)
1009 {
1010 struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
1011 void *lsa = mdata->lsa;
1012 u32 offset, length;
1013
1014 if (sizeof(*get_lsa) > cmd->size_in)
1015 return -EINVAL;
1016 offset = le32_to_cpu(get_lsa->offset);
1017 length = le32_to_cpu(get_lsa->length);
1018 if (offset + length > LSA_SIZE)
1019 return -EINVAL;
1020 if (length > cmd->size_out)
1021 return -EINVAL;
1022
1023 memcpy(cmd->payload_out, lsa + offset, length);
1024 return 0;
1025 }
1026
mock_set_lsa(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1027 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
1028 struct cxl_mbox_cmd *cmd)
1029 {
1030 struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
1031 void *lsa = mdata->lsa;
1032 u32 offset, length;
1033
1034 if (sizeof(*set_lsa) > cmd->size_in)
1035 return -EINVAL;
1036 offset = le32_to_cpu(set_lsa->offset);
1037 length = cmd->size_in - sizeof(*set_lsa);
1038 if (offset + length > LSA_SIZE)
1039 return -EINVAL;
1040
1041 memcpy(lsa + offset, &set_lsa->data[0], length);
1042 return 0;
1043 }
1044
mock_health_info(struct cxl_mbox_cmd * cmd)1045 static int mock_health_info(struct cxl_mbox_cmd *cmd)
1046 {
1047 struct cxl_mbox_health_info health_info = {
1048 /* set flags for maint needed, perf degraded, hw replacement */
1049 .health_status = 0x7,
1050 /* set media status to "All Data Lost" */
1051 .media_status = 0x3,
1052 /*
1053 * set ext_status flags for:
1054 * ext_life_used: normal,
1055 * ext_temperature: critical,
1056 * ext_corrected_volatile: warning,
1057 * ext_corrected_persistent: normal,
1058 */
1059 .ext_status = 0x18,
1060 .life_used = 15,
1061 .temperature = cpu_to_le16(25),
1062 .dirty_shutdowns = cpu_to_le32(10),
1063 .volatile_errors = cpu_to_le32(20),
1064 .pmem_errors = cpu_to_le32(30),
1065 };
1066
1067 if (cmd->size_out < sizeof(health_info))
1068 return -EINVAL;
1069
1070 memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1071 return 0;
1072 }
1073
1074 static struct mock_poison {
1075 struct cxl_dev_state *cxlds;
1076 u64 dpa;
1077 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1078
1079 static struct cxl_mbox_poison_out *
cxl_get_injected_po(struct cxl_dev_state * cxlds,u64 offset,u64 length)1080 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1081 {
1082 struct cxl_mbox_poison_out *po;
1083 int nr_records = 0;
1084 u64 dpa;
1085
1086 po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1087 if (!po)
1088 return NULL;
1089
1090 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1091 if (mock_poison_list[i].cxlds != cxlds)
1092 continue;
1093 if (mock_poison_list[i].dpa < offset ||
1094 mock_poison_list[i].dpa > offset + length - 1)
1095 continue;
1096
1097 dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1098 po->record[nr_records].address = cpu_to_le64(dpa);
1099 po->record[nr_records].length = cpu_to_le32(1);
1100 nr_records++;
1101 if (nr_records == poison_inject_dev_max)
1102 break;
1103 }
1104
1105 /* Always return count, even when zero */
1106 po->count = cpu_to_le16(nr_records);
1107
1108 return po;
1109 }
1110
mock_get_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1111 static int mock_get_poison(struct cxl_dev_state *cxlds,
1112 struct cxl_mbox_cmd *cmd)
1113 {
1114 struct cxl_mbox_poison_in *pi = cmd->payload_in;
1115 struct cxl_mbox_poison_out *po;
1116 u64 offset = le64_to_cpu(pi->offset);
1117 u64 length = le64_to_cpu(pi->length);
1118 int nr_records;
1119
1120 po = cxl_get_injected_po(cxlds, offset, length);
1121 if (!po)
1122 return -ENOMEM;
1123 nr_records = le16_to_cpu(po->count);
1124 memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1125 cmd->size_out = struct_size(po, record, nr_records);
1126 kfree(po);
1127
1128 return 0;
1129 }
1130
mock_poison_dev_max_injected(struct cxl_dev_state * cxlds)1131 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1132 {
1133 int count = 0;
1134
1135 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1136 if (mock_poison_list[i].cxlds == cxlds)
1137 count++;
1138 }
1139 return (count >= poison_inject_dev_max);
1140 }
1141
mock_poison_add(struct cxl_dev_state * cxlds,u64 dpa)1142 static int mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1143 {
1144 /* Return EBUSY to match the CXL driver handling */
1145 if (mock_poison_dev_max_injected(cxlds)) {
1146 dev_dbg(cxlds->dev,
1147 "Device poison injection limit has been reached: %d\n",
1148 poison_inject_dev_max);
1149 return -EBUSY;
1150 }
1151
1152 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1153 if (!mock_poison_list[i].cxlds) {
1154 mock_poison_list[i].cxlds = cxlds;
1155 mock_poison_list[i].dpa = dpa;
1156 return 0;
1157 }
1158 }
1159 dev_dbg(cxlds->dev,
1160 "Mock test poison injection limit has been reached: %d\n",
1161 MOCK_INJECT_TEST_MAX);
1162
1163 return -ENXIO;
1164 }
1165
mock_poison_found(struct cxl_dev_state * cxlds,u64 dpa)1166 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1167 {
1168 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1169 if (mock_poison_list[i].cxlds == cxlds &&
1170 mock_poison_list[i].dpa == dpa)
1171 return true;
1172 }
1173 return false;
1174 }
1175
mock_inject_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1176 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1177 struct cxl_mbox_cmd *cmd)
1178 {
1179 struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1180 u64 dpa = le64_to_cpu(pi->address);
1181
1182 if (mock_poison_found(cxlds, dpa)) {
1183 /* Not an error to inject poison if already poisoned */
1184 dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1185 return 0;
1186 }
1187
1188 return mock_poison_add(cxlds, dpa);
1189 }
1190
mock_poison_del(struct cxl_dev_state * cxlds,u64 dpa)1191 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1192 {
1193 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1194 if (mock_poison_list[i].cxlds == cxlds &&
1195 mock_poison_list[i].dpa == dpa) {
1196 mock_poison_list[i].cxlds = NULL;
1197 return true;
1198 }
1199 }
1200 return false;
1201 }
1202
mock_clear_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1203 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1204 struct cxl_mbox_cmd *cmd)
1205 {
1206 struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1207 u64 dpa = le64_to_cpu(pi->address);
1208
1209 /*
1210 * A real CXL device will write pi->write_data to the address
1211 * being cleared. In this mock, just delete this address from
1212 * the mock poison list.
1213 */
1214 if (!mock_poison_del(cxlds, dpa))
1215 dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1216
1217 return 0;
1218 }
1219
mock_poison_list_empty(void)1220 static bool mock_poison_list_empty(void)
1221 {
1222 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1223 if (mock_poison_list[i].cxlds)
1224 return false;
1225 }
1226 return true;
1227 }
1228
poison_inject_max_show(struct device_driver * drv,char * buf)1229 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1230 {
1231 return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1232 }
1233
poison_inject_max_store(struct device_driver * drv,const char * buf,size_t len)1234 static ssize_t poison_inject_max_store(struct device_driver *drv,
1235 const char *buf, size_t len)
1236 {
1237 int val;
1238
1239 if (kstrtoint(buf, 0, &val) < 0)
1240 return -EINVAL;
1241
1242 if (!mock_poison_list_empty())
1243 return -EBUSY;
1244
1245 if (val <= MOCK_INJECT_TEST_MAX)
1246 poison_inject_dev_max = val;
1247 else
1248 return -EINVAL;
1249
1250 return len;
1251 }
1252
1253 static DRIVER_ATTR_RW(poison_inject_max);
1254
1255 static struct attribute *cxl_mock_mem_core_attrs[] = {
1256 &driver_attr_poison_inject_max.attr,
1257 NULL
1258 };
1259 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1260
mock_fw_info(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1261 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1262 struct cxl_mbox_cmd *cmd)
1263 {
1264 struct cxl_mbox_get_fw_info fw_info = {
1265 .num_slots = FW_SLOTS,
1266 .slot_info = (mdata->fw_slot & 0x7) |
1267 ((mdata->fw_staged & 0x7) << 3),
1268 .activation_cap = 0,
1269 };
1270
1271 strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1272 strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1273 strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1274 strcpy(fw_info.slot_4_revision, "");
1275
1276 if (cmd->size_out < sizeof(fw_info))
1277 return -EINVAL;
1278
1279 memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1280 return 0;
1281 }
1282
mock_transfer_fw(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1283 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1284 struct cxl_mbox_cmd *cmd)
1285 {
1286 struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1287 void *fw = mdata->fw;
1288 size_t offset, length;
1289
1290 offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1291 length = cmd->size_in - sizeof(*transfer);
1292 if (offset + length > FW_SIZE)
1293 return -EINVAL;
1294
1295 switch (transfer->action) {
1296 case CXL_FW_TRANSFER_ACTION_FULL:
1297 if (offset != 0)
1298 return -EINVAL;
1299 fallthrough;
1300 case CXL_FW_TRANSFER_ACTION_END:
1301 if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1302 return -EINVAL;
1303 mdata->fw_size = offset + length;
1304 break;
1305 case CXL_FW_TRANSFER_ACTION_INITIATE:
1306 case CXL_FW_TRANSFER_ACTION_CONTINUE:
1307 break;
1308 case CXL_FW_TRANSFER_ACTION_ABORT:
1309 return 0;
1310 default:
1311 return -EINVAL;
1312 }
1313
1314 memcpy(fw + offset, transfer->data, length);
1315 usleep_range(1500, 2000);
1316 return 0;
1317 }
1318
mock_activate_fw(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1319 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1320 struct cxl_mbox_cmd *cmd)
1321 {
1322 struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1323
1324 if (activate->slot == 0 || activate->slot > FW_SLOTS)
1325 return -EINVAL;
1326
1327 switch (activate->action) {
1328 case CXL_FW_ACTIVATE_ONLINE:
1329 mdata->fw_slot = activate->slot;
1330 mdata->fw_staged = 0;
1331 return 0;
1332 case CXL_FW_ACTIVATE_OFFLINE:
1333 mdata->fw_staged = activate->slot;
1334 return 0;
1335 }
1336
1337 return -EINVAL;
1338 }
1339
cxl_mock_mbox_send(struct cxl_mailbox * cxl_mbox,struct cxl_mbox_cmd * cmd)1340 static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
1341 struct cxl_mbox_cmd *cmd)
1342 {
1343 struct device *dev = cxl_mbox->host;
1344 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1345 struct cxl_memdev_state *mds = mdata->mds;
1346 struct cxl_dev_state *cxlds = &mds->cxlds;
1347 int rc = -EIO;
1348
1349 switch (cmd->opcode) {
1350 case CXL_MBOX_OP_SET_TIMESTAMP:
1351 rc = mock_set_timestamp(cxlds, cmd);
1352 break;
1353 case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1354 rc = mock_gsl(cmd);
1355 break;
1356 case CXL_MBOX_OP_GET_LOG:
1357 rc = mock_get_log(mds, cmd);
1358 break;
1359 case CXL_MBOX_OP_IDENTIFY:
1360 if (cxlds->rcd)
1361 rc = mock_rcd_id(cmd);
1362 else
1363 rc = mock_id(cmd);
1364 break;
1365 case CXL_MBOX_OP_GET_LSA:
1366 rc = mock_get_lsa(mdata, cmd);
1367 break;
1368 case CXL_MBOX_OP_GET_PARTITION_INFO:
1369 rc = mock_partition_info(cmd);
1370 break;
1371 case CXL_MBOX_OP_GET_EVENT_RECORD:
1372 rc = mock_get_event(dev, cmd);
1373 break;
1374 case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1375 rc = mock_clear_event(dev, cmd);
1376 break;
1377 case CXL_MBOX_OP_SET_LSA:
1378 rc = mock_set_lsa(mdata, cmd);
1379 break;
1380 case CXL_MBOX_OP_GET_HEALTH_INFO:
1381 rc = mock_health_info(cmd);
1382 break;
1383 case CXL_MBOX_OP_SANITIZE:
1384 rc = mock_sanitize(mdata, cmd);
1385 break;
1386 case CXL_MBOX_OP_SECURE_ERASE:
1387 rc = mock_secure_erase(mdata, cmd);
1388 break;
1389 case CXL_MBOX_OP_GET_SECURITY_STATE:
1390 rc = mock_get_security_state(mdata, cmd);
1391 break;
1392 case CXL_MBOX_OP_SET_PASSPHRASE:
1393 rc = mock_set_passphrase(mdata, cmd);
1394 break;
1395 case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1396 rc = mock_disable_passphrase(mdata, cmd);
1397 break;
1398 case CXL_MBOX_OP_FREEZE_SECURITY:
1399 rc = mock_freeze_security(mdata, cmd);
1400 break;
1401 case CXL_MBOX_OP_UNLOCK:
1402 rc = mock_unlock_security(mdata, cmd);
1403 break;
1404 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1405 rc = mock_passphrase_secure_erase(mdata, cmd);
1406 break;
1407 case CXL_MBOX_OP_GET_POISON:
1408 rc = mock_get_poison(cxlds, cmd);
1409 break;
1410 case CXL_MBOX_OP_INJECT_POISON:
1411 rc = mock_inject_poison(cxlds, cmd);
1412 break;
1413 case CXL_MBOX_OP_CLEAR_POISON:
1414 rc = mock_clear_poison(cxlds, cmd);
1415 break;
1416 case CXL_MBOX_OP_GET_FW_INFO:
1417 rc = mock_fw_info(mdata, cmd);
1418 break;
1419 case CXL_MBOX_OP_TRANSFER_FW:
1420 rc = mock_transfer_fw(mdata, cmd);
1421 break;
1422 case CXL_MBOX_OP_ACTIVATE_FW:
1423 rc = mock_activate_fw(mdata, cmd);
1424 break;
1425 default:
1426 break;
1427 }
1428
1429 dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1430 cmd->size_in, cmd->size_out, rc);
1431
1432 return rc;
1433 }
1434
label_area_release(void * lsa)1435 static void label_area_release(void *lsa)
1436 {
1437 vfree(lsa);
1438 }
1439
fw_buf_release(void * buf)1440 static void fw_buf_release(void *buf)
1441 {
1442 vfree(buf);
1443 }
1444
is_rcd(struct platform_device * pdev)1445 static bool is_rcd(struct platform_device *pdev)
1446 {
1447 const struct platform_device_id *id = platform_get_device_id(pdev);
1448
1449 return !!id->driver_data;
1450 }
1451
event_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1452 static ssize_t event_trigger_store(struct device *dev,
1453 struct device_attribute *attr,
1454 const char *buf, size_t count)
1455 {
1456 cxl_mock_event_trigger(dev);
1457 return count;
1458 }
1459 static DEVICE_ATTR_WO(event_trigger);
1460
cxl_mock_mailbox_create(struct cxl_dev_state * cxlds)1461 static int cxl_mock_mailbox_create(struct cxl_dev_state *cxlds)
1462 {
1463 int rc;
1464
1465 rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
1466 if (rc)
1467 return rc;
1468
1469 return 0;
1470 }
1471
cxl_mock_mem_probe(struct platform_device * pdev)1472 static int cxl_mock_mem_probe(struct platform_device *pdev)
1473 {
1474 struct device *dev = &pdev->dev;
1475 struct cxl_memdev *cxlmd;
1476 struct cxl_memdev_state *mds;
1477 struct cxl_dev_state *cxlds;
1478 struct cxl_mockmem_data *mdata;
1479 struct cxl_mailbox *cxl_mbox;
1480 int rc;
1481
1482 mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1483 if (!mdata)
1484 return -ENOMEM;
1485 dev_set_drvdata(dev, mdata);
1486
1487 mdata->lsa = vmalloc(LSA_SIZE);
1488 if (!mdata->lsa)
1489 return -ENOMEM;
1490 mdata->fw = vmalloc(FW_SIZE);
1491 if (!mdata->fw)
1492 return -ENOMEM;
1493 mdata->fw_slot = 2;
1494
1495 rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1496 if (rc)
1497 return rc;
1498
1499 rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1500 if (rc)
1501 return rc;
1502
1503 mds = cxl_memdev_state_create(dev);
1504 if (IS_ERR(mds))
1505 return PTR_ERR(mds);
1506
1507 cxlds = &mds->cxlds;
1508 rc = cxl_mock_mailbox_create(cxlds);
1509 if (rc)
1510 return rc;
1511
1512 cxl_mbox = &mds->cxlds.cxl_mbox;
1513 mdata->mds = mds;
1514 cxl_mbox->mbox_send = cxl_mock_mbox_send;
1515 cxl_mbox->payload_size = SZ_4K;
1516 mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1517 INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
1518
1519 cxlds->serial = pdev->id;
1520 if (is_rcd(pdev))
1521 cxlds->rcd = true;
1522
1523 rc = cxl_enumerate_cmds(mds);
1524 if (rc)
1525 return rc;
1526
1527 rc = cxl_poison_state_init(mds);
1528 if (rc)
1529 return rc;
1530
1531 rc = cxl_set_timestamp(mds);
1532 if (rc)
1533 return rc;
1534
1535 cxlds->media_ready = true;
1536 rc = cxl_dev_state_identify(mds);
1537 if (rc)
1538 return rc;
1539
1540 rc = cxl_mem_create_range_info(mds);
1541 if (rc)
1542 return rc;
1543
1544 cxl_mock_add_event_logs(&mdata->mes);
1545
1546 cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1547 if (IS_ERR(cxlmd))
1548 return PTR_ERR(cxlmd);
1549
1550 rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1551 if (rc)
1552 return rc;
1553
1554 rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
1555 if (rc)
1556 return rc;
1557
1558 cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1559
1560 return 0;
1561 }
1562
security_lock_show(struct device * dev,struct device_attribute * attr,char * buf)1563 static ssize_t security_lock_show(struct device *dev,
1564 struct device_attribute *attr, char *buf)
1565 {
1566 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1567
1568 return sysfs_emit(buf, "%u\n",
1569 !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1570 }
1571
security_lock_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1572 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1573 const char *buf, size_t count)
1574 {
1575 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1576 u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1577 CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1578 int val;
1579
1580 if (kstrtoint(buf, 0, &val) < 0)
1581 return -EINVAL;
1582
1583 if (val == 1) {
1584 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1585 return -ENXIO;
1586 mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1587 mdata->security_state &= ~mask;
1588 } else {
1589 return -EINVAL;
1590 }
1591 return count;
1592 }
1593
1594 static DEVICE_ATTR_RW(security_lock);
1595
fw_buf_checksum_show(struct device * dev,struct device_attribute * attr,char * buf)1596 static ssize_t fw_buf_checksum_show(struct device *dev,
1597 struct device_attribute *attr, char *buf)
1598 {
1599 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1600 u8 hash[SHA256_DIGEST_SIZE];
1601 unsigned char *hstr, *hptr;
1602 struct sha256_state sctx;
1603 ssize_t written = 0;
1604 int i;
1605
1606 sha256_init(&sctx);
1607 sha256_update(&sctx, mdata->fw, mdata->fw_size);
1608 sha256_final(&sctx, hash);
1609
1610 hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1611 if (!hstr)
1612 return -ENOMEM;
1613
1614 hptr = hstr;
1615 for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1616 hptr += sprintf(hptr, "%02x", hash[i]);
1617
1618 written = sysfs_emit(buf, "%s\n", hstr);
1619
1620 kfree(hstr);
1621 return written;
1622 }
1623
1624 static DEVICE_ATTR_RO(fw_buf_checksum);
1625
sanitize_timeout_show(struct device * dev,struct device_attribute * attr,char * buf)1626 static ssize_t sanitize_timeout_show(struct device *dev,
1627 struct device_attribute *attr, char *buf)
1628 {
1629 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1630
1631 return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
1632 }
1633
sanitize_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1634 static ssize_t sanitize_timeout_store(struct device *dev,
1635 struct device_attribute *attr,
1636 const char *buf, size_t count)
1637 {
1638 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1639 unsigned long val;
1640 int rc;
1641
1642 rc = kstrtoul(buf, 0, &val);
1643 if (rc)
1644 return rc;
1645
1646 mdata->sanitize_timeout = val;
1647
1648 return count;
1649 }
1650
1651 static DEVICE_ATTR_RW(sanitize_timeout);
1652
1653 static struct attribute *cxl_mock_mem_attrs[] = {
1654 &dev_attr_security_lock.attr,
1655 &dev_attr_event_trigger.attr,
1656 &dev_attr_fw_buf_checksum.attr,
1657 &dev_attr_sanitize_timeout.attr,
1658 NULL
1659 };
1660 ATTRIBUTE_GROUPS(cxl_mock_mem);
1661
1662 static const struct platform_device_id cxl_mock_mem_ids[] = {
1663 { .name = "cxl_mem", 0 },
1664 { .name = "cxl_rcd", 1 },
1665 { },
1666 };
1667 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1668
1669 static struct platform_driver cxl_mock_mem_driver = {
1670 .probe = cxl_mock_mem_probe,
1671 .id_table = cxl_mock_mem_ids,
1672 .driver = {
1673 .name = KBUILD_MODNAME,
1674 .dev_groups = cxl_mock_mem_groups,
1675 .groups = cxl_mock_mem_core_groups,
1676 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1677 },
1678 };
1679
1680 module_platform_driver(cxl_mock_mem_driver);
1681 MODULE_LICENSE("GPL v2");
1682 MODULE_IMPORT_NS(CXL);
1683