1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/vmalloc.h>
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/sizes.h>
10 #include <linux/bits.h>
11 #include <cxl/mailbox.h>
12 #include <linux/unaligned.h>
13 #include <crypto/sha2.h>
14 #include <cxlmem.h>
15
16 #include "trace.h"
17
18 #define LSA_SIZE SZ_128K
19 #define FW_SIZE SZ_64M
20 #define FW_SLOTS 3
21 #define DEV_SIZE SZ_2G
22 #define EFFECT(x) (1U << x)
23
24 #define MOCK_INJECT_DEV_MAX 8
25 #define MOCK_INJECT_TEST_MAX 128
26
27 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
28
29 enum cxl_command_effects {
30 CONF_CHANGE_COLD_RESET = 0,
31 CONF_CHANGE_IMMEDIATE,
32 DATA_CHANGE_IMMEDIATE,
33 POLICY_CHANGE_IMMEDIATE,
34 LOG_CHANGE_IMMEDIATE,
35 SECURITY_CHANGE_IMMEDIATE,
36 BACKGROUND_OP,
37 SECONDARY_MBOX_SUPPORTED,
38 };
39
40 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
41
42 static struct cxl_cel_entry mock_cel[] = {
43 {
44 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
45 .effect = CXL_CMD_EFFECT_NONE,
46 },
47 {
48 .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
49 .effect = CXL_CMD_EFFECT_NONE,
50 },
51 {
52 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
53 .effect = CXL_CMD_EFFECT_NONE,
54 },
55 {
56 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
57 .effect = CXL_CMD_EFFECT_NONE,
58 },
59 {
60 .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
61 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
62 EFFECT(DATA_CHANGE_IMMEDIATE)),
63 },
64 {
65 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
66 .effect = CXL_CMD_EFFECT_NONE,
67 },
68 {
69 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
70 .effect = CXL_CMD_EFFECT_NONE,
71 },
72 {
73 .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
74 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
75 },
76 {
77 .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
78 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
79 },
80 {
81 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
82 .effect = CXL_CMD_EFFECT_NONE,
83 },
84 {
85 .opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
86 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
87 EFFECT(BACKGROUND_OP)),
88 },
89 {
90 .opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
91 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
92 EFFECT(CONF_CHANGE_IMMEDIATE)),
93 },
94 {
95 .opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
96 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
97 EFFECT(SECURITY_CHANGE_IMMEDIATE) |
98 EFFECT(BACKGROUND_OP)),
99 },
100 };
101
102 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
103 struct cxl_mbox_health_info {
104 u8 health_status;
105 u8 media_status;
106 u8 ext_status;
107 u8 life_used;
108 __le16 temperature;
109 __le32 dirty_shutdowns;
110 __le32 volatile_errors;
111 __le32 pmem_errors;
112 } __packed;
113
114 static struct {
115 struct cxl_mbox_get_supported_logs gsl;
116 struct cxl_gsl_entry entry;
117 } mock_gsl_payload = {
118 .gsl = {
119 .entries = cpu_to_le16(1),
120 },
121 .entry = {
122 .uuid = DEFINE_CXL_CEL_UUID,
123 .size = cpu_to_le32(sizeof(mock_cel)),
124 },
125 };
126
127 #define PASS_TRY_LIMIT 3
128
129 #define CXL_TEST_EVENT_CNT_MAX 15
130
131 /* Set a number of events to return at a time for simulation. */
132 #define CXL_TEST_EVENT_RET_MAX 4
133
134 struct mock_event_log {
135 u16 clear_idx;
136 u16 cur_idx;
137 u16 nr_events;
138 u16 nr_overflow;
139 u16 overflow_reset;
140 struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
141 };
142
143 struct mock_event_store {
144 struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
145 u32 ev_status;
146 };
147
148 struct cxl_mockmem_data {
149 void *lsa;
150 void *fw;
151 int fw_slot;
152 int fw_staged;
153 size_t fw_size;
154 u32 security_state;
155 u8 user_pass[NVDIMM_PASSPHRASE_LEN];
156 u8 master_pass[NVDIMM_PASSPHRASE_LEN];
157 int user_limit;
158 int master_limit;
159 struct mock_event_store mes;
160 struct cxl_memdev_state *mds;
161 u8 event_buf[SZ_4K];
162 u64 timestamp;
163 unsigned long sanitize_timeout;
164 };
165
event_find_log(struct device * dev,int log_type)166 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
167 {
168 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
169
170 if (log_type >= CXL_EVENT_TYPE_MAX)
171 return NULL;
172 return &mdata->mes.mock_logs[log_type];
173 }
174
event_get_current(struct mock_event_log * log)175 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
176 {
177 return log->events[log->cur_idx];
178 }
179
event_reset_log(struct mock_event_log * log)180 static void event_reset_log(struct mock_event_log *log)
181 {
182 log->cur_idx = 0;
183 log->clear_idx = 0;
184 log->nr_overflow = log->overflow_reset;
185 }
186
187 /* Handle can never be 0 use 1 based indexing for handle */
event_get_clear_handle(struct mock_event_log * log)188 static u16 event_get_clear_handle(struct mock_event_log *log)
189 {
190 return log->clear_idx + 1;
191 }
192
193 /* Handle can never be 0 use 1 based indexing for handle */
event_get_cur_event_handle(struct mock_event_log * log)194 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
195 {
196 u16 cur_handle = log->cur_idx + 1;
197
198 return cpu_to_le16(cur_handle);
199 }
200
event_log_empty(struct mock_event_log * log)201 static bool event_log_empty(struct mock_event_log *log)
202 {
203 return log->cur_idx == log->nr_events;
204 }
205
mes_add_event(struct mock_event_store * mes,enum cxl_event_log_type log_type,struct cxl_event_record_raw * event)206 static void mes_add_event(struct mock_event_store *mes,
207 enum cxl_event_log_type log_type,
208 struct cxl_event_record_raw *event)
209 {
210 struct mock_event_log *log;
211
212 if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
213 return;
214
215 log = &mes->mock_logs[log_type];
216
217 if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
218 log->nr_overflow++;
219 log->overflow_reset = log->nr_overflow;
220 return;
221 }
222
223 log->events[log->nr_events] = event;
224 log->nr_events++;
225 }
226
227 /*
228 * Vary the number of events returned to simulate events occuring while the
229 * logs are being read.
230 */
231 static int ret_limit = 0;
232
mock_get_event(struct device * dev,struct cxl_mbox_cmd * cmd)233 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
234 {
235 struct cxl_get_event_payload *pl;
236 struct mock_event_log *log;
237 u16 nr_overflow;
238 u8 log_type;
239 int i;
240
241 if (cmd->size_in != sizeof(log_type))
242 return -EINVAL;
243
244 ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
245 if (!ret_limit)
246 ret_limit = 1;
247
248 if (cmd->size_out < struct_size(pl, records, ret_limit))
249 return -EINVAL;
250
251 log_type = *((u8 *)cmd->payload_in);
252 if (log_type >= CXL_EVENT_TYPE_MAX)
253 return -EINVAL;
254
255 memset(cmd->payload_out, 0, struct_size(pl, records, 0));
256
257 log = event_find_log(dev, log_type);
258 if (!log || event_log_empty(log))
259 return 0;
260
261 pl = cmd->payload_out;
262
263 for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
264 memcpy(&pl->records[i], event_get_current(log),
265 sizeof(pl->records[i]));
266 pl->records[i].event.generic.hdr.handle =
267 event_get_cur_event_handle(log);
268 log->cur_idx++;
269 }
270
271 cmd->size_out = struct_size(pl, records, i);
272 pl->record_count = cpu_to_le16(i);
273 if (!event_log_empty(log))
274 pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
275
276 if (log->nr_overflow) {
277 u64 ns;
278
279 pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
280 pl->overflow_err_count = cpu_to_le16(nr_overflow);
281 ns = ktime_get_real_ns();
282 ns -= 5000000000; /* 5s ago */
283 pl->first_overflow_timestamp = cpu_to_le64(ns);
284 ns = ktime_get_real_ns();
285 ns -= 1000000000; /* 1s ago */
286 pl->last_overflow_timestamp = cpu_to_le64(ns);
287 }
288
289 return 0;
290 }
291
mock_clear_event(struct device * dev,struct cxl_mbox_cmd * cmd)292 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
293 {
294 struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
295 struct mock_event_log *log;
296 u8 log_type = pl->event_log;
297 u16 handle;
298 int nr;
299
300 if (log_type >= CXL_EVENT_TYPE_MAX)
301 return -EINVAL;
302
303 log = event_find_log(dev, log_type);
304 if (!log)
305 return 0; /* No mock data in this log */
306
307 /*
308 * This check is technically not invalid per the specification AFAICS.
309 * (The host could 'guess' handles and clear them in order).
310 * However, this is not good behavior for the host so test it.
311 */
312 if (log->clear_idx + pl->nr_recs > log->cur_idx) {
313 dev_err(dev,
314 "Attempting to clear more events than returned!\n");
315 return -EINVAL;
316 }
317
318 /* Check handle order prior to clearing events */
319 for (nr = 0, handle = event_get_clear_handle(log);
320 nr < pl->nr_recs;
321 nr++, handle++) {
322 if (handle != le16_to_cpu(pl->handles[nr])) {
323 dev_err(dev, "Clearing events out of order\n");
324 return -EINVAL;
325 }
326 }
327
328 if (log->nr_overflow)
329 log->nr_overflow = 0;
330
331 /* Clear events */
332 log->clear_idx += pl->nr_recs;
333 return 0;
334 }
335
cxl_mock_event_trigger(struct device * dev)336 static void cxl_mock_event_trigger(struct device *dev)
337 {
338 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
339 struct mock_event_store *mes = &mdata->mes;
340 int i;
341
342 for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
343 struct mock_event_log *log;
344
345 log = event_find_log(dev, i);
346 if (log)
347 event_reset_log(log);
348 }
349
350 cxl_mem_get_event_records(mdata->mds, mes->ev_status);
351 }
352
353 struct cxl_event_record_raw maint_needed = {
354 .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
355 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
356 .event.generic = {
357 .hdr = {
358 .length = sizeof(struct cxl_event_record_raw),
359 .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
360 /* .handle = Set dynamically */
361 .related_handle = cpu_to_le16(0xa5b6),
362 },
363 .data = { 0xDE, 0xAD, 0xBE, 0xEF },
364 },
365 };
366
367 struct cxl_event_record_raw hardware_replace = {
368 .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
369 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
370 .event.generic = {
371 .hdr = {
372 .length = sizeof(struct cxl_event_record_raw),
373 .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
374 /* .handle = Set dynamically */
375 .related_handle = cpu_to_le16(0xb6a5),
376 },
377 .data = { 0xDE, 0xAD, 0xBE, 0xEF },
378 },
379 };
380
381 struct cxl_test_gen_media {
382 uuid_t id;
383 struct cxl_event_gen_media rec;
384 } __packed;
385
386 struct cxl_test_gen_media gen_media = {
387 .id = CXL_EVENT_GEN_MEDIA_UUID,
388 .rec = {
389 .media_hdr = {
390 .hdr = {
391 .length = sizeof(struct cxl_test_gen_media),
392 .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
393 /* .handle = Set dynamically */
394 .related_handle = cpu_to_le16(0),
395 },
396 .phys_addr = cpu_to_le64(0x2000),
397 .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
398 .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
399 .transaction_type = CXL_GMER_TRANS_HOST_WRITE,
400 /* .validity_flags = <set below> */
401 .channel = 1,
402 .rank = 30,
403 },
404 .component_id = { 0x3, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
405 .cme_threshold_ev_flags = 3,
406 .cme_count = { 33, 0, 0 },
407 .sub_type = 0x2,
408 },
409 };
410
411 struct cxl_test_dram {
412 uuid_t id;
413 struct cxl_event_dram rec;
414 } __packed;
415
416 struct cxl_test_dram dram = {
417 .id = CXL_EVENT_DRAM_UUID,
418 .rec = {
419 .media_hdr = {
420 .hdr = {
421 .length = sizeof(struct cxl_test_dram),
422 .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
423 /* .handle = Set dynamically */
424 .related_handle = cpu_to_le16(0),
425 },
426 .phys_addr = cpu_to_le64(0x8000),
427 .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
428 .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
429 .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
430 /* .validity_flags = <set below> */
431 .channel = 1,
432 },
433 .bank_group = 5,
434 .bank = 2,
435 .column = {0xDE, 0xAD},
436 .component_id = { 0x1, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
437 .sub_channel = 8,
438 .cme_threshold_ev_flags = 2,
439 .cvme_count = { 14, 0, 0 },
440 .sub_type = 0x5,
441 },
442 };
443
444 struct cxl_test_mem_module {
445 uuid_t id;
446 struct cxl_event_mem_module rec;
447 } __packed;
448
449 struct cxl_test_mem_module mem_module = {
450 .id = CXL_EVENT_MEM_MODULE_UUID,
451 .rec = {
452 .hdr = {
453 .length = sizeof(struct cxl_test_mem_module),
454 /* .handle = Set dynamically */
455 .related_handle = cpu_to_le16(0),
456 },
457 .event_type = CXL_MMER_TEMP_CHANGE,
458 .info = {
459 .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
460 .media_status = CXL_DHI_MS_ALL_DATA_LOST,
461 .add_status = (CXL_DHI_AS_CRITICAL << 2) |
462 (CXL_DHI_AS_WARNING << 4) |
463 (CXL_DHI_AS_WARNING << 5),
464 .device_temp = { 0xDE, 0xAD},
465 .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
466 .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
467 .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
468 },
469 /* .validity_flags = <set below> */
470 .component_id = { 0x2, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
471 .event_sub_type = 0x3,
472 },
473 };
474
mock_set_timestamp(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)475 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
476 struct cxl_mbox_cmd *cmd)
477 {
478 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
479 struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
480
481 if (cmd->size_in != sizeof(*ts))
482 return -EINVAL;
483
484 if (cmd->size_out != 0)
485 return -EINVAL;
486
487 mdata->timestamp = le64_to_cpu(ts->timestamp);
488 return 0;
489 }
490
cxl_mock_add_event_logs(struct mock_event_store * mes)491 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
492 {
493 put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK |
494 CXL_GMER_VALID_COMPONENT | CXL_GMER_VALID_COMPONENT_ID_FORMAT,
495 &gen_media.rec.media_hdr.validity_flags);
496
497 put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
498 CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN | CXL_DER_VALID_SUB_CHANNEL |
499 CXL_DER_VALID_COMPONENT | CXL_DER_VALID_COMPONENT_ID_FORMAT,
500 &dram.rec.media_hdr.validity_flags);
501
502 put_unaligned_le16(CXL_MMER_VALID_COMPONENT | CXL_MMER_VALID_COMPONENT_ID_FORMAT,
503 &mem_module.rec.validity_flags);
504
505 mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
506 mes_add_event(mes, CXL_EVENT_TYPE_INFO,
507 (struct cxl_event_record_raw *)&gen_media);
508 mes_add_event(mes, CXL_EVENT_TYPE_INFO,
509 (struct cxl_event_record_raw *)&mem_module);
510 mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
511
512 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
513 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
514 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
515 (struct cxl_event_record_raw *)&dram);
516 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
517 (struct cxl_event_record_raw *)&gen_media);
518 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
519 (struct cxl_event_record_raw *)&mem_module);
520 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
521 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
522 (struct cxl_event_record_raw *)&dram);
523 /* Overflow this log */
524 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
525 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
526 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
527 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
528 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
529 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
530 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
531 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
532 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
533 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
534 mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
535
536 mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
537 mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
538 (struct cxl_event_record_raw *)&dram);
539 mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
540 }
541
mock_gsl(struct cxl_mbox_cmd * cmd)542 static int mock_gsl(struct cxl_mbox_cmd *cmd)
543 {
544 if (cmd->size_out < sizeof(mock_gsl_payload))
545 return -EINVAL;
546
547 memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
548 cmd->size_out = sizeof(mock_gsl_payload);
549
550 return 0;
551 }
552
mock_get_log(struct cxl_memdev_state * mds,struct cxl_mbox_cmd * cmd)553 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
554 {
555 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
556 struct cxl_mbox_get_log *gl = cmd->payload_in;
557 u32 offset = le32_to_cpu(gl->offset);
558 u32 length = le32_to_cpu(gl->length);
559 uuid_t uuid = DEFINE_CXL_CEL_UUID;
560 void *data = &mock_cel;
561
562 if (cmd->size_in < sizeof(*gl))
563 return -EINVAL;
564 if (length > cxl_mbox->payload_size)
565 return -EINVAL;
566 if (offset + length > sizeof(mock_cel))
567 return -EINVAL;
568 if (!uuid_equal(&gl->uuid, &uuid))
569 return -EINVAL;
570 if (length > cmd->size_out)
571 return -EINVAL;
572
573 memcpy(cmd->payload_out, data + offset, length);
574
575 return 0;
576 }
577
mock_rcd_id(struct cxl_mbox_cmd * cmd)578 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
579 {
580 struct cxl_mbox_identify id = {
581 .fw_revision = { "mock fw v1 " },
582 .total_capacity =
583 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
584 .volatile_capacity =
585 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
586 };
587
588 if (cmd->size_out < sizeof(id))
589 return -EINVAL;
590
591 memcpy(cmd->payload_out, &id, sizeof(id));
592
593 return 0;
594 }
595
mock_id(struct cxl_mbox_cmd * cmd)596 static int mock_id(struct cxl_mbox_cmd *cmd)
597 {
598 struct cxl_mbox_identify id = {
599 .fw_revision = { "mock fw v1 " },
600 .lsa_size = cpu_to_le32(LSA_SIZE),
601 .partition_align =
602 cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
603 .total_capacity =
604 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
605 .inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
606 };
607
608 put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
609
610 if (cmd->size_out < sizeof(id))
611 return -EINVAL;
612
613 memcpy(cmd->payload_out, &id, sizeof(id));
614
615 return 0;
616 }
617
mock_partition_info(struct cxl_mbox_cmd * cmd)618 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
619 {
620 struct cxl_mbox_get_partition_info pi = {
621 .active_volatile_cap =
622 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
623 .active_persistent_cap =
624 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
625 };
626
627 if (cmd->size_out < sizeof(pi))
628 return -EINVAL;
629
630 memcpy(cmd->payload_out, &pi, sizeof(pi));
631
632 return 0;
633 }
634
cxl_mockmem_sanitize_work(struct work_struct * work)635 void cxl_mockmem_sanitize_work(struct work_struct *work)
636 {
637 struct cxl_memdev_state *mds =
638 container_of(work, typeof(*mds), security.poll_dwork.work);
639 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
640
641 mutex_lock(&cxl_mbox->mbox_mutex);
642 if (mds->security.sanitize_node)
643 sysfs_notify_dirent(mds->security.sanitize_node);
644 mds->security.sanitize_active = false;
645 mutex_unlock(&cxl_mbox->mbox_mutex);
646
647 dev_dbg(mds->cxlds.dev, "sanitize complete\n");
648 }
649
mock_sanitize(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)650 static int mock_sanitize(struct cxl_mockmem_data *mdata,
651 struct cxl_mbox_cmd *cmd)
652 {
653 struct cxl_memdev_state *mds = mdata->mds;
654 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
655 int rc = 0;
656
657 if (cmd->size_in != 0)
658 return -EINVAL;
659
660 if (cmd->size_out != 0)
661 return -EINVAL;
662
663 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
664 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
665 return -ENXIO;
666 }
667 if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
668 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
669 return -ENXIO;
670 }
671
672 mutex_lock(&cxl_mbox->mbox_mutex);
673 if (schedule_delayed_work(&mds->security.poll_dwork,
674 msecs_to_jiffies(mdata->sanitize_timeout))) {
675 mds->security.sanitize_active = true;
676 dev_dbg(mds->cxlds.dev, "sanitize issued\n");
677 } else
678 rc = -EBUSY;
679 mutex_unlock(&cxl_mbox->mbox_mutex);
680
681 return rc;
682 }
683
mock_secure_erase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)684 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
685 struct cxl_mbox_cmd *cmd)
686 {
687 if (cmd->size_in != 0)
688 return -EINVAL;
689
690 if (cmd->size_out != 0)
691 return -EINVAL;
692
693 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
694 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
695 return -ENXIO;
696 }
697
698 if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
699 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
700 return -ENXIO;
701 }
702
703 return 0;
704 }
705
mock_get_security_state(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)706 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
707 struct cxl_mbox_cmd *cmd)
708 {
709 if (cmd->size_in)
710 return -EINVAL;
711
712 if (cmd->size_out != sizeof(u32))
713 return -EINVAL;
714
715 memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
716
717 return 0;
718 }
719
master_plimit_check(struct cxl_mockmem_data * mdata)720 static void master_plimit_check(struct cxl_mockmem_data *mdata)
721 {
722 if (mdata->master_limit == PASS_TRY_LIMIT)
723 return;
724 mdata->master_limit++;
725 if (mdata->master_limit == PASS_TRY_LIMIT)
726 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
727 }
728
user_plimit_check(struct cxl_mockmem_data * mdata)729 static void user_plimit_check(struct cxl_mockmem_data *mdata)
730 {
731 if (mdata->user_limit == PASS_TRY_LIMIT)
732 return;
733 mdata->user_limit++;
734 if (mdata->user_limit == PASS_TRY_LIMIT)
735 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
736 }
737
mock_set_passphrase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)738 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
739 struct cxl_mbox_cmd *cmd)
740 {
741 struct cxl_set_pass *set_pass;
742
743 if (cmd->size_in != sizeof(*set_pass))
744 return -EINVAL;
745
746 if (cmd->size_out != 0)
747 return -EINVAL;
748
749 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
750 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
751 return -ENXIO;
752 }
753
754 set_pass = cmd->payload_in;
755 switch (set_pass->type) {
756 case CXL_PMEM_SEC_PASS_MASTER:
757 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
758 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
759 return -ENXIO;
760 }
761 /*
762 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
763 * the security disabled state when the user passphrase is not set.
764 */
765 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
766 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
767 return -ENXIO;
768 }
769 if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
770 master_plimit_check(mdata);
771 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
772 return -ENXIO;
773 }
774 memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
775 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
776 return 0;
777
778 case CXL_PMEM_SEC_PASS_USER:
779 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
780 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
781 return -ENXIO;
782 }
783 if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
784 user_plimit_check(mdata);
785 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
786 return -ENXIO;
787 }
788 memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
789 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
790 return 0;
791
792 default:
793 cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
794 }
795 return -EINVAL;
796 }
797
mock_disable_passphrase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)798 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
799 struct cxl_mbox_cmd *cmd)
800 {
801 struct cxl_disable_pass *dis_pass;
802
803 if (cmd->size_in != sizeof(*dis_pass))
804 return -EINVAL;
805
806 if (cmd->size_out != 0)
807 return -EINVAL;
808
809 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
810 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
811 return -ENXIO;
812 }
813
814 dis_pass = cmd->payload_in;
815 switch (dis_pass->type) {
816 case CXL_PMEM_SEC_PASS_MASTER:
817 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
818 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
819 return -ENXIO;
820 }
821
822 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
823 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
824 return -ENXIO;
825 }
826
827 if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
828 master_plimit_check(mdata);
829 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
830 return -ENXIO;
831 }
832
833 mdata->master_limit = 0;
834 memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
835 mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
836 return 0;
837
838 case CXL_PMEM_SEC_PASS_USER:
839 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
840 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
841 return -ENXIO;
842 }
843
844 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
845 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
846 return -ENXIO;
847 }
848
849 if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
850 user_plimit_check(mdata);
851 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
852 return -ENXIO;
853 }
854
855 mdata->user_limit = 0;
856 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
857 mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
858 CXL_PMEM_SEC_STATE_LOCKED);
859 return 0;
860
861 default:
862 cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
863 return -EINVAL;
864 }
865
866 return 0;
867 }
868
mock_freeze_security(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)869 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
870 struct cxl_mbox_cmd *cmd)
871 {
872 if (cmd->size_in != 0)
873 return -EINVAL;
874
875 if (cmd->size_out != 0)
876 return -EINVAL;
877
878 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
879 return 0;
880
881 mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
882 return 0;
883 }
884
mock_unlock_security(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)885 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
886 struct cxl_mbox_cmd *cmd)
887 {
888 if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
889 return -EINVAL;
890
891 if (cmd->size_out != 0)
892 return -EINVAL;
893
894 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
895 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
896 return -ENXIO;
897 }
898
899 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
900 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
901 return -ENXIO;
902 }
903
904 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
905 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
906 return -ENXIO;
907 }
908
909 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
910 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
911 return -ENXIO;
912 }
913
914 if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
915 if (++mdata->user_limit == PASS_TRY_LIMIT)
916 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
917 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
918 return -ENXIO;
919 }
920
921 mdata->user_limit = 0;
922 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
923 return 0;
924 }
925
mock_passphrase_secure_erase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)926 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
927 struct cxl_mbox_cmd *cmd)
928 {
929 struct cxl_pass_erase *erase;
930
931 if (cmd->size_in != sizeof(*erase))
932 return -EINVAL;
933
934 if (cmd->size_out != 0)
935 return -EINVAL;
936
937 erase = cmd->payload_in;
938 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
939 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
940 return -ENXIO;
941 }
942
943 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
944 erase->type == CXL_PMEM_SEC_PASS_USER) {
945 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
946 return -ENXIO;
947 }
948
949 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
950 erase->type == CXL_PMEM_SEC_PASS_MASTER) {
951 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
952 return -ENXIO;
953 }
954
955 switch (erase->type) {
956 case CXL_PMEM_SEC_PASS_MASTER:
957 /*
958 * The spec does not clearly define the behavior of the scenario
959 * where a master passphrase is passed in while the master
960 * passphrase is not set and user passphrase is not set. The
961 * code will take the assumption that it will behave the same
962 * as a CXL secure erase command without passphrase (0x4401).
963 */
964 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
965 if (memcmp(mdata->master_pass, erase->pass,
966 NVDIMM_PASSPHRASE_LEN)) {
967 master_plimit_check(mdata);
968 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
969 return -ENXIO;
970 }
971 mdata->master_limit = 0;
972 mdata->user_limit = 0;
973 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
974 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
975 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
976 } else {
977 /*
978 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
979 * When master passphrase is disabled, the device shall
980 * return Invalid Input for the Passphrase Secure Erase
981 * command with master passphrase.
982 */
983 return -EINVAL;
984 }
985 /* Scramble encryption keys so that data is effectively erased */
986 break;
987 case CXL_PMEM_SEC_PASS_USER:
988 /*
989 * The spec does not clearly define the behavior of the scenario
990 * where a user passphrase is passed in while the user
991 * passphrase is not set. The code will take the assumption that
992 * it will behave the same as a CXL secure erase command without
993 * passphrase (0x4401).
994 */
995 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
996 if (memcmp(mdata->user_pass, erase->pass,
997 NVDIMM_PASSPHRASE_LEN)) {
998 user_plimit_check(mdata);
999 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
1000 return -ENXIO;
1001 }
1002 mdata->user_limit = 0;
1003 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
1004 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
1005 }
1006
1007 /*
1008 * CXL rev3 Table 8-118
1009 * If user passphrase is not set or supported by device, current
1010 * passphrase value is ignored. Will make the assumption that
1011 * the operation will proceed as secure erase w/o passphrase
1012 * since spec is not explicit.
1013 */
1014
1015 /* Scramble encryption keys so that data is effectively erased */
1016 break;
1017 default:
1018 return -EINVAL;
1019 }
1020
1021 return 0;
1022 }
1023
mock_get_lsa(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1024 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
1025 struct cxl_mbox_cmd *cmd)
1026 {
1027 struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
1028 void *lsa = mdata->lsa;
1029 u32 offset, length;
1030
1031 if (sizeof(*get_lsa) > cmd->size_in)
1032 return -EINVAL;
1033 offset = le32_to_cpu(get_lsa->offset);
1034 length = le32_to_cpu(get_lsa->length);
1035 if (offset + length > LSA_SIZE)
1036 return -EINVAL;
1037 if (length > cmd->size_out)
1038 return -EINVAL;
1039
1040 memcpy(cmd->payload_out, lsa + offset, length);
1041 return 0;
1042 }
1043
mock_set_lsa(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1044 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
1045 struct cxl_mbox_cmd *cmd)
1046 {
1047 struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
1048 void *lsa = mdata->lsa;
1049 u32 offset, length;
1050
1051 if (sizeof(*set_lsa) > cmd->size_in)
1052 return -EINVAL;
1053 offset = le32_to_cpu(set_lsa->offset);
1054 length = cmd->size_in - sizeof(*set_lsa);
1055 if (offset + length > LSA_SIZE)
1056 return -EINVAL;
1057
1058 memcpy(lsa + offset, &set_lsa->data[0], length);
1059 return 0;
1060 }
1061
mock_health_info(struct cxl_mbox_cmd * cmd)1062 static int mock_health_info(struct cxl_mbox_cmd *cmd)
1063 {
1064 struct cxl_mbox_health_info health_info = {
1065 /* set flags for maint needed, perf degraded, hw replacement */
1066 .health_status = 0x7,
1067 /* set media status to "All Data Lost" */
1068 .media_status = 0x3,
1069 /*
1070 * set ext_status flags for:
1071 * ext_life_used: normal,
1072 * ext_temperature: critical,
1073 * ext_corrected_volatile: warning,
1074 * ext_corrected_persistent: normal,
1075 */
1076 .ext_status = 0x18,
1077 .life_used = 15,
1078 .temperature = cpu_to_le16(25),
1079 .dirty_shutdowns = cpu_to_le32(10),
1080 .volatile_errors = cpu_to_le32(20),
1081 .pmem_errors = cpu_to_le32(30),
1082 };
1083
1084 if (cmd->size_out < sizeof(health_info))
1085 return -EINVAL;
1086
1087 memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1088 return 0;
1089 }
1090
1091 static struct mock_poison {
1092 struct cxl_dev_state *cxlds;
1093 u64 dpa;
1094 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1095
1096 static struct cxl_mbox_poison_out *
cxl_get_injected_po(struct cxl_dev_state * cxlds,u64 offset,u64 length)1097 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1098 {
1099 struct cxl_mbox_poison_out *po;
1100 int nr_records = 0;
1101 u64 dpa;
1102
1103 po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1104 if (!po)
1105 return NULL;
1106
1107 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1108 if (mock_poison_list[i].cxlds != cxlds)
1109 continue;
1110 if (mock_poison_list[i].dpa < offset ||
1111 mock_poison_list[i].dpa > offset + length - 1)
1112 continue;
1113
1114 dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1115 po->record[nr_records].address = cpu_to_le64(dpa);
1116 po->record[nr_records].length = cpu_to_le32(1);
1117 nr_records++;
1118 if (nr_records == poison_inject_dev_max)
1119 break;
1120 }
1121
1122 /* Always return count, even when zero */
1123 po->count = cpu_to_le16(nr_records);
1124
1125 return po;
1126 }
1127
mock_get_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1128 static int mock_get_poison(struct cxl_dev_state *cxlds,
1129 struct cxl_mbox_cmd *cmd)
1130 {
1131 struct cxl_mbox_poison_in *pi = cmd->payload_in;
1132 struct cxl_mbox_poison_out *po;
1133 u64 offset = le64_to_cpu(pi->offset);
1134 u64 length = le64_to_cpu(pi->length);
1135 int nr_records;
1136
1137 po = cxl_get_injected_po(cxlds, offset, length);
1138 if (!po)
1139 return -ENOMEM;
1140 nr_records = le16_to_cpu(po->count);
1141 memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1142 cmd->size_out = struct_size(po, record, nr_records);
1143 kfree(po);
1144
1145 return 0;
1146 }
1147
mock_poison_dev_max_injected(struct cxl_dev_state * cxlds)1148 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1149 {
1150 int count = 0;
1151
1152 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1153 if (mock_poison_list[i].cxlds == cxlds)
1154 count++;
1155 }
1156 return (count >= poison_inject_dev_max);
1157 }
1158
mock_poison_add(struct cxl_dev_state * cxlds,u64 dpa)1159 static int mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1160 {
1161 /* Return EBUSY to match the CXL driver handling */
1162 if (mock_poison_dev_max_injected(cxlds)) {
1163 dev_dbg(cxlds->dev,
1164 "Device poison injection limit has been reached: %d\n",
1165 poison_inject_dev_max);
1166 return -EBUSY;
1167 }
1168
1169 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1170 if (!mock_poison_list[i].cxlds) {
1171 mock_poison_list[i].cxlds = cxlds;
1172 mock_poison_list[i].dpa = dpa;
1173 return 0;
1174 }
1175 }
1176 dev_dbg(cxlds->dev,
1177 "Mock test poison injection limit has been reached: %d\n",
1178 MOCK_INJECT_TEST_MAX);
1179
1180 return -ENXIO;
1181 }
1182
mock_poison_found(struct cxl_dev_state * cxlds,u64 dpa)1183 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1184 {
1185 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1186 if (mock_poison_list[i].cxlds == cxlds &&
1187 mock_poison_list[i].dpa == dpa)
1188 return true;
1189 }
1190 return false;
1191 }
1192
mock_inject_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1193 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1194 struct cxl_mbox_cmd *cmd)
1195 {
1196 struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1197 u64 dpa = le64_to_cpu(pi->address);
1198
1199 if (mock_poison_found(cxlds, dpa)) {
1200 /* Not an error to inject poison if already poisoned */
1201 dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1202 return 0;
1203 }
1204
1205 return mock_poison_add(cxlds, dpa);
1206 }
1207
mock_poison_del(struct cxl_dev_state * cxlds,u64 dpa)1208 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1209 {
1210 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1211 if (mock_poison_list[i].cxlds == cxlds &&
1212 mock_poison_list[i].dpa == dpa) {
1213 mock_poison_list[i].cxlds = NULL;
1214 return true;
1215 }
1216 }
1217 return false;
1218 }
1219
mock_clear_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1220 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1221 struct cxl_mbox_cmd *cmd)
1222 {
1223 struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1224 u64 dpa = le64_to_cpu(pi->address);
1225
1226 /*
1227 * A real CXL device will write pi->write_data to the address
1228 * being cleared. In this mock, just delete this address from
1229 * the mock poison list.
1230 */
1231 if (!mock_poison_del(cxlds, dpa))
1232 dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1233
1234 return 0;
1235 }
1236
mock_poison_list_empty(void)1237 static bool mock_poison_list_empty(void)
1238 {
1239 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1240 if (mock_poison_list[i].cxlds)
1241 return false;
1242 }
1243 return true;
1244 }
1245
poison_inject_max_show(struct device_driver * drv,char * buf)1246 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1247 {
1248 return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1249 }
1250
poison_inject_max_store(struct device_driver * drv,const char * buf,size_t len)1251 static ssize_t poison_inject_max_store(struct device_driver *drv,
1252 const char *buf, size_t len)
1253 {
1254 int val;
1255
1256 if (kstrtoint(buf, 0, &val) < 0)
1257 return -EINVAL;
1258
1259 if (!mock_poison_list_empty())
1260 return -EBUSY;
1261
1262 if (val <= MOCK_INJECT_TEST_MAX)
1263 poison_inject_dev_max = val;
1264 else
1265 return -EINVAL;
1266
1267 return len;
1268 }
1269
1270 static DRIVER_ATTR_RW(poison_inject_max);
1271
1272 static struct attribute *cxl_mock_mem_core_attrs[] = {
1273 &driver_attr_poison_inject_max.attr,
1274 NULL
1275 };
1276 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1277
mock_fw_info(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1278 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1279 struct cxl_mbox_cmd *cmd)
1280 {
1281 struct cxl_mbox_get_fw_info fw_info = {
1282 .num_slots = FW_SLOTS,
1283 .slot_info = (mdata->fw_slot & 0x7) |
1284 ((mdata->fw_staged & 0x7) << 3),
1285 .activation_cap = 0,
1286 };
1287
1288 strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1289 strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1290 strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1291 strcpy(fw_info.slot_4_revision, "");
1292
1293 if (cmd->size_out < sizeof(fw_info))
1294 return -EINVAL;
1295
1296 memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1297 return 0;
1298 }
1299
mock_transfer_fw(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1300 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1301 struct cxl_mbox_cmd *cmd)
1302 {
1303 struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1304 void *fw = mdata->fw;
1305 size_t offset, length;
1306
1307 offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1308 length = cmd->size_in - sizeof(*transfer);
1309 if (offset + length > FW_SIZE)
1310 return -EINVAL;
1311
1312 switch (transfer->action) {
1313 case CXL_FW_TRANSFER_ACTION_FULL:
1314 if (offset != 0)
1315 return -EINVAL;
1316 fallthrough;
1317 case CXL_FW_TRANSFER_ACTION_END:
1318 if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1319 return -EINVAL;
1320 mdata->fw_size = offset + length;
1321 break;
1322 case CXL_FW_TRANSFER_ACTION_INITIATE:
1323 case CXL_FW_TRANSFER_ACTION_CONTINUE:
1324 break;
1325 case CXL_FW_TRANSFER_ACTION_ABORT:
1326 return 0;
1327 default:
1328 return -EINVAL;
1329 }
1330
1331 memcpy(fw + offset, transfer->data, length);
1332 usleep_range(1500, 2000);
1333 return 0;
1334 }
1335
mock_activate_fw(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1336 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1337 struct cxl_mbox_cmd *cmd)
1338 {
1339 struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1340
1341 if (activate->slot == 0 || activate->slot > FW_SLOTS)
1342 return -EINVAL;
1343
1344 switch (activate->action) {
1345 case CXL_FW_ACTIVATE_ONLINE:
1346 mdata->fw_slot = activate->slot;
1347 mdata->fw_staged = 0;
1348 return 0;
1349 case CXL_FW_ACTIVATE_OFFLINE:
1350 mdata->fw_staged = activate->slot;
1351 return 0;
1352 }
1353
1354 return -EINVAL;
1355 }
1356
cxl_mock_mbox_send(struct cxl_mailbox * cxl_mbox,struct cxl_mbox_cmd * cmd)1357 static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
1358 struct cxl_mbox_cmd *cmd)
1359 {
1360 struct device *dev = cxl_mbox->host;
1361 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1362 struct cxl_memdev_state *mds = mdata->mds;
1363 struct cxl_dev_state *cxlds = &mds->cxlds;
1364 int rc = -EIO;
1365
1366 switch (cmd->opcode) {
1367 case CXL_MBOX_OP_SET_TIMESTAMP:
1368 rc = mock_set_timestamp(cxlds, cmd);
1369 break;
1370 case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1371 rc = mock_gsl(cmd);
1372 break;
1373 case CXL_MBOX_OP_GET_LOG:
1374 rc = mock_get_log(mds, cmd);
1375 break;
1376 case CXL_MBOX_OP_IDENTIFY:
1377 if (cxlds->rcd)
1378 rc = mock_rcd_id(cmd);
1379 else
1380 rc = mock_id(cmd);
1381 break;
1382 case CXL_MBOX_OP_GET_LSA:
1383 rc = mock_get_lsa(mdata, cmd);
1384 break;
1385 case CXL_MBOX_OP_GET_PARTITION_INFO:
1386 rc = mock_partition_info(cmd);
1387 break;
1388 case CXL_MBOX_OP_GET_EVENT_RECORD:
1389 rc = mock_get_event(dev, cmd);
1390 break;
1391 case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1392 rc = mock_clear_event(dev, cmd);
1393 break;
1394 case CXL_MBOX_OP_SET_LSA:
1395 rc = mock_set_lsa(mdata, cmd);
1396 break;
1397 case CXL_MBOX_OP_GET_HEALTH_INFO:
1398 rc = mock_health_info(cmd);
1399 break;
1400 case CXL_MBOX_OP_SANITIZE:
1401 rc = mock_sanitize(mdata, cmd);
1402 break;
1403 case CXL_MBOX_OP_SECURE_ERASE:
1404 rc = mock_secure_erase(mdata, cmd);
1405 break;
1406 case CXL_MBOX_OP_GET_SECURITY_STATE:
1407 rc = mock_get_security_state(mdata, cmd);
1408 break;
1409 case CXL_MBOX_OP_SET_PASSPHRASE:
1410 rc = mock_set_passphrase(mdata, cmd);
1411 break;
1412 case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1413 rc = mock_disable_passphrase(mdata, cmd);
1414 break;
1415 case CXL_MBOX_OP_FREEZE_SECURITY:
1416 rc = mock_freeze_security(mdata, cmd);
1417 break;
1418 case CXL_MBOX_OP_UNLOCK:
1419 rc = mock_unlock_security(mdata, cmd);
1420 break;
1421 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1422 rc = mock_passphrase_secure_erase(mdata, cmd);
1423 break;
1424 case CXL_MBOX_OP_GET_POISON:
1425 rc = mock_get_poison(cxlds, cmd);
1426 break;
1427 case CXL_MBOX_OP_INJECT_POISON:
1428 rc = mock_inject_poison(cxlds, cmd);
1429 break;
1430 case CXL_MBOX_OP_CLEAR_POISON:
1431 rc = mock_clear_poison(cxlds, cmd);
1432 break;
1433 case CXL_MBOX_OP_GET_FW_INFO:
1434 rc = mock_fw_info(mdata, cmd);
1435 break;
1436 case CXL_MBOX_OP_TRANSFER_FW:
1437 rc = mock_transfer_fw(mdata, cmd);
1438 break;
1439 case CXL_MBOX_OP_ACTIVATE_FW:
1440 rc = mock_activate_fw(mdata, cmd);
1441 break;
1442 default:
1443 break;
1444 }
1445
1446 dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1447 cmd->size_in, cmd->size_out, rc);
1448
1449 return rc;
1450 }
1451
label_area_release(void * lsa)1452 static void label_area_release(void *lsa)
1453 {
1454 vfree(lsa);
1455 }
1456
fw_buf_release(void * buf)1457 static void fw_buf_release(void *buf)
1458 {
1459 vfree(buf);
1460 }
1461
is_rcd(struct platform_device * pdev)1462 static bool is_rcd(struct platform_device *pdev)
1463 {
1464 const struct platform_device_id *id = platform_get_device_id(pdev);
1465
1466 return !!id->driver_data;
1467 }
1468
event_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1469 static ssize_t event_trigger_store(struct device *dev,
1470 struct device_attribute *attr,
1471 const char *buf, size_t count)
1472 {
1473 cxl_mock_event_trigger(dev);
1474 return count;
1475 }
1476 static DEVICE_ATTR_WO(event_trigger);
1477
cxl_mock_mailbox_create(struct cxl_dev_state * cxlds)1478 static int cxl_mock_mailbox_create(struct cxl_dev_state *cxlds)
1479 {
1480 int rc;
1481
1482 rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
1483 if (rc)
1484 return rc;
1485
1486 return 0;
1487 }
1488
cxl_mock_mem_probe(struct platform_device * pdev)1489 static int cxl_mock_mem_probe(struct platform_device *pdev)
1490 {
1491 struct device *dev = &pdev->dev;
1492 struct cxl_memdev *cxlmd;
1493 struct cxl_memdev_state *mds;
1494 struct cxl_dev_state *cxlds;
1495 struct cxl_mockmem_data *mdata;
1496 struct cxl_mailbox *cxl_mbox;
1497 int rc;
1498
1499 mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1500 if (!mdata)
1501 return -ENOMEM;
1502 dev_set_drvdata(dev, mdata);
1503
1504 mdata->lsa = vmalloc(LSA_SIZE);
1505 if (!mdata->lsa)
1506 return -ENOMEM;
1507 mdata->fw = vmalloc(FW_SIZE);
1508 if (!mdata->fw)
1509 return -ENOMEM;
1510 mdata->fw_slot = 2;
1511
1512 rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1513 if (rc)
1514 return rc;
1515
1516 rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1517 if (rc)
1518 return rc;
1519
1520 mds = cxl_memdev_state_create(dev);
1521 if (IS_ERR(mds))
1522 return PTR_ERR(mds);
1523
1524 cxlds = &mds->cxlds;
1525 rc = cxl_mock_mailbox_create(cxlds);
1526 if (rc)
1527 return rc;
1528
1529 cxl_mbox = &mds->cxlds.cxl_mbox;
1530 mdata->mds = mds;
1531 cxl_mbox->mbox_send = cxl_mock_mbox_send;
1532 cxl_mbox->payload_size = SZ_4K;
1533 mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1534 INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
1535
1536 cxlds->serial = pdev->id;
1537 if (is_rcd(pdev))
1538 cxlds->rcd = true;
1539
1540 rc = cxl_enumerate_cmds(mds);
1541 if (rc)
1542 return rc;
1543
1544 rc = cxl_poison_state_init(mds);
1545 if (rc)
1546 return rc;
1547
1548 rc = cxl_set_timestamp(mds);
1549 if (rc)
1550 return rc;
1551
1552 cxlds->media_ready = true;
1553 rc = cxl_dev_state_identify(mds);
1554 if (rc)
1555 return rc;
1556
1557 rc = cxl_mem_create_range_info(mds);
1558 if (rc)
1559 return rc;
1560
1561 cxl_mock_add_event_logs(&mdata->mes);
1562
1563 cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1564 if (IS_ERR(cxlmd))
1565 return PTR_ERR(cxlmd);
1566
1567 rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1568 if (rc)
1569 return rc;
1570
1571 rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
1572 if (rc)
1573 return rc;
1574
1575 cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1576
1577 return 0;
1578 }
1579
security_lock_show(struct device * dev,struct device_attribute * attr,char * buf)1580 static ssize_t security_lock_show(struct device *dev,
1581 struct device_attribute *attr, char *buf)
1582 {
1583 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1584
1585 return sysfs_emit(buf, "%u\n",
1586 !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1587 }
1588
security_lock_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1589 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1590 const char *buf, size_t count)
1591 {
1592 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1593 u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1594 CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1595 int val;
1596
1597 if (kstrtoint(buf, 0, &val) < 0)
1598 return -EINVAL;
1599
1600 if (val == 1) {
1601 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1602 return -ENXIO;
1603 mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1604 mdata->security_state &= ~mask;
1605 } else {
1606 return -EINVAL;
1607 }
1608 return count;
1609 }
1610
1611 static DEVICE_ATTR_RW(security_lock);
1612
fw_buf_checksum_show(struct device * dev,struct device_attribute * attr,char * buf)1613 static ssize_t fw_buf_checksum_show(struct device *dev,
1614 struct device_attribute *attr, char *buf)
1615 {
1616 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1617 u8 hash[SHA256_DIGEST_SIZE];
1618 unsigned char *hstr, *hptr;
1619 struct sha256_state sctx;
1620 ssize_t written = 0;
1621 int i;
1622
1623 sha256_init(&sctx);
1624 sha256_update(&sctx, mdata->fw, mdata->fw_size);
1625 sha256_final(&sctx, hash);
1626
1627 hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1628 if (!hstr)
1629 return -ENOMEM;
1630
1631 hptr = hstr;
1632 for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1633 hptr += sprintf(hptr, "%02x", hash[i]);
1634
1635 written = sysfs_emit(buf, "%s\n", hstr);
1636
1637 kfree(hstr);
1638 return written;
1639 }
1640
1641 static DEVICE_ATTR_RO(fw_buf_checksum);
1642
sanitize_timeout_show(struct device * dev,struct device_attribute * attr,char * buf)1643 static ssize_t sanitize_timeout_show(struct device *dev,
1644 struct device_attribute *attr, char *buf)
1645 {
1646 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1647
1648 return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
1649 }
1650
sanitize_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1651 static ssize_t sanitize_timeout_store(struct device *dev,
1652 struct device_attribute *attr,
1653 const char *buf, size_t count)
1654 {
1655 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1656 unsigned long val;
1657 int rc;
1658
1659 rc = kstrtoul(buf, 0, &val);
1660 if (rc)
1661 return rc;
1662
1663 mdata->sanitize_timeout = val;
1664
1665 return count;
1666 }
1667
1668 static DEVICE_ATTR_RW(sanitize_timeout);
1669
1670 static struct attribute *cxl_mock_mem_attrs[] = {
1671 &dev_attr_security_lock.attr,
1672 &dev_attr_event_trigger.attr,
1673 &dev_attr_fw_buf_checksum.attr,
1674 &dev_attr_sanitize_timeout.attr,
1675 NULL
1676 };
1677 ATTRIBUTE_GROUPS(cxl_mock_mem);
1678
1679 static const struct platform_device_id cxl_mock_mem_ids[] = {
1680 { .name = "cxl_mem", 0 },
1681 { .name = "cxl_rcd", 1 },
1682 { },
1683 };
1684 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1685
1686 static struct platform_driver cxl_mock_mem_driver = {
1687 .probe = cxl_mock_mem_probe,
1688 .id_table = cxl_mock_mem_ids,
1689 .driver = {
1690 .name = KBUILD_MODNAME,
1691 .dev_groups = cxl_mock_mem_groups,
1692 .groups = cxl_mock_mem_core_groups,
1693 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1694 },
1695 };
1696
1697 module_platform_driver(cxl_mock_mem_driver);
1698 MODULE_LICENSE("GPL v2");
1699 MODULE_IMPORT_NS("CXL");
1700