1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/security.h> 4 #include <linux/debugfs.h> 5 #include <linux/ktime.h> 6 #include <linux/mutex.h> 7 #include <asm/unaligned.h> 8 #include <cxlpci.h> 9 #include <cxlmem.h> 10 #include <cxl.h> 11 12 #include "core.h" 13 #include "trace.h" 14 15 static bool cxl_raw_allow_all; 16 17 /** 18 * DOC: cxl mbox 19 * 20 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The 21 * implementation is used by the cxl_pci driver to initialize the device 22 * and implement the cxl_mem.h IOCTL UAPI. It also implements the 23 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM. 24 */ 25 26 #define cxl_for_each_cmd(cmd) \ 27 for ((cmd) = &cxl_mem_commands[0]; \ 28 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++) 29 30 #define CXL_CMD(_id, sin, sout, _flags) \ 31 [CXL_MEM_COMMAND_ID_##_id] = { \ 32 .info = { \ 33 .id = CXL_MEM_COMMAND_ID_##_id, \ 34 .size_in = sin, \ 35 .size_out = sout, \ 36 }, \ 37 .opcode = CXL_MBOX_OP_##_id, \ 38 .flags = _flags, \ 39 } 40 41 #define CXL_VARIABLE_PAYLOAD ~0U 42 /* 43 * This table defines the supported mailbox commands for the driver. This table 44 * is made up of a UAPI structure. Non-negative values as parameters in the 45 * table will be validated against the user's input. For example, if size_in is 46 * 0, and the user passed in 1, it is an error. 47 */ 48 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { 49 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), 50 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS 51 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0), 52 #endif 53 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), 54 CXL_CMD(GET_FW_INFO, 0, 0x50, 0), 55 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), 56 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), 57 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), 58 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), 59 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), 60 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), 61 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), 62 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), 63 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), 64 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), 65 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), 66 CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0), 67 }; 68 69 /* 70 * Commands that RAW doesn't permit. The rationale for each: 71 * 72 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / 73 * coordination of transaction timeout values at the root bridge level. 74 * 75 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live 76 * and needs to be coordinated with HDM updates. 77 * 78 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the 79 * driver and any writes from userspace invalidates those contents. 80 * 81 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes 82 * to the device after it is marked clean, userspace can not make that 83 * assertion. 84 * 85 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that 86 * is kept up to date with patrol notifications and error management. 87 * 88 * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel 89 * driver orchestration for safety. 90 */ 91 static u16 cxl_disabled_raw_commands[] = { 92 CXL_MBOX_OP_ACTIVATE_FW, 93 CXL_MBOX_OP_SET_PARTITION_INFO, 94 CXL_MBOX_OP_SET_LSA, 95 CXL_MBOX_OP_SET_SHUTDOWN_STATE, 96 CXL_MBOX_OP_SCAN_MEDIA, 97 CXL_MBOX_OP_GET_SCAN_MEDIA, 98 CXL_MBOX_OP_GET_POISON, 99 CXL_MBOX_OP_INJECT_POISON, 100 CXL_MBOX_OP_CLEAR_POISON, 101 }; 102 103 /* 104 * Command sets that RAW doesn't permit. All opcodes in this set are 105 * disabled because they pass plain text security payloads over the 106 * user/kernel boundary. This functionality is intended to be wrapped 107 * behind the keys ABI which allows for encrypted payloads in the UAPI 108 */ 109 static u8 security_command_sets[] = { 110 0x44, /* Sanitize */ 111 0x45, /* Persistent Memory Data-at-rest Security */ 112 0x46, /* Security Passthrough */ 113 }; 114 115 static bool cxl_is_security_command(u16 opcode) 116 { 117 int i; 118 119 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) 120 if (security_command_sets[i] == (opcode >> 8)) 121 return true; 122 return false; 123 } 124 125 static void cxl_set_security_cmd_enabled(struct cxl_security_state *security, 126 u16 opcode) 127 { 128 switch (opcode) { 129 case CXL_MBOX_OP_SANITIZE: 130 set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds); 131 break; 132 case CXL_MBOX_OP_SECURE_ERASE: 133 set_bit(CXL_SEC_ENABLED_SECURE_ERASE, 134 security->enabled_cmds); 135 break; 136 case CXL_MBOX_OP_GET_SECURITY_STATE: 137 set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE, 138 security->enabled_cmds); 139 break; 140 case CXL_MBOX_OP_SET_PASSPHRASE: 141 set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE, 142 security->enabled_cmds); 143 break; 144 case CXL_MBOX_OP_DISABLE_PASSPHRASE: 145 set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE, 146 security->enabled_cmds); 147 break; 148 case CXL_MBOX_OP_UNLOCK: 149 set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds); 150 break; 151 case CXL_MBOX_OP_FREEZE_SECURITY: 152 set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY, 153 security->enabled_cmds); 154 break; 155 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: 156 set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE, 157 security->enabled_cmds); 158 break; 159 default: 160 break; 161 } 162 } 163 164 static bool cxl_is_poison_command(u16 opcode) 165 { 166 #define CXL_MBOX_OP_POISON_CMDS 0x43 167 168 if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS) 169 return true; 170 171 return false; 172 } 173 174 static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison, 175 u16 opcode) 176 { 177 switch (opcode) { 178 case CXL_MBOX_OP_GET_POISON: 179 set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds); 180 break; 181 case CXL_MBOX_OP_INJECT_POISON: 182 set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds); 183 break; 184 case CXL_MBOX_OP_CLEAR_POISON: 185 set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds); 186 break; 187 case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS: 188 set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds); 189 break; 190 case CXL_MBOX_OP_SCAN_MEDIA: 191 set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds); 192 break; 193 case CXL_MBOX_OP_GET_SCAN_MEDIA: 194 set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds); 195 break; 196 default: 197 break; 198 } 199 } 200 201 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) 202 { 203 struct cxl_mem_command *c; 204 205 cxl_for_each_cmd(c) 206 if (c->opcode == opcode) 207 return c; 208 209 return NULL; 210 } 211 212 static const char *cxl_mem_opcode_to_name(u16 opcode) 213 { 214 struct cxl_mem_command *c; 215 216 c = cxl_mem_find_command(opcode); 217 if (!c) 218 return NULL; 219 220 return cxl_command_names[c->info.id].name; 221 } 222 223 /** 224 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command 225 * @mds: The driver data for the operation 226 * @mbox_cmd: initialized command to execute 227 * 228 * Context: Any context. 229 * Return: 230 * * %>=0 - Number of bytes returned in @out. 231 * * %-E2BIG - Payload is too large for hardware. 232 * * %-EBUSY - Couldn't acquire exclusive mailbox access. 233 * * %-EFAULT - Hardware error occurred. 234 * * %-ENXIO - Command completed, but device reported an error. 235 * * %-EIO - Unexpected output size. 236 * 237 * Mailbox commands may execute successfully yet the device itself reported an 238 * error. While this distinction can be useful for commands from userspace, the 239 * kernel will only be able to use results when both are successful. 240 */ 241 int cxl_internal_send_cmd(struct cxl_memdev_state *mds, 242 struct cxl_mbox_cmd *mbox_cmd) 243 { 244 size_t out_size, min_out; 245 int rc; 246 247 if (mbox_cmd->size_in > mds->payload_size || 248 mbox_cmd->size_out > mds->payload_size) 249 return -E2BIG; 250 251 out_size = mbox_cmd->size_out; 252 min_out = mbox_cmd->min_out; 253 rc = mds->mbox_send(mds, mbox_cmd); 254 /* 255 * EIO is reserved for a payload size mismatch and mbox_send() 256 * may not return this error. 257 */ 258 if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO")) 259 return -ENXIO; 260 if (rc) 261 return rc; 262 263 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS && 264 mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND) 265 return cxl_mbox_cmd_rc2errno(mbox_cmd); 266 267 if (!out_size) 268 return 0; 269 270 /* 271 * Variable sized output needs to at least satisfy the caller's 272 * minimum if not the fully requested size. 273 */ 274 if (min_out == 0) 275 min_out = out_size; 276 277 if (mbox_cmd->size_out < min_out) 278 return -EIO; 279 return 0; 280 } 281 EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL); 282 283 static bool cxl_mem_raw_command_allowed(u16 opcode) 284 { 285 int i; 286 287 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) 288 return false; 289 290 if (security_locked_down(LOCKDOWN_PCI_ACCESS)) 291 return false; 292 293 if (cxl_raw_allow_all) 294 return true; 295 296 if (cxl_is_security_command(opcode)) 297 return false; 298 299 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) 300 if (cxl_disabled_raw_commands[i] == opcode) 301 return false; 302 303 return true; 304 } 305 306 /** 307 * cxl_payload_from_user_allowed() - Check contents of in_payload. 308 * @opcode: The mailbox command opcode. 309 * @payload_in: Pointer to the input payload passed in from user space. 310 * 311 * Return: 312 * * true - payload_in passes check for @opcode. 313 * * false - payload_in contains invalid or unsupported values. 314 * 315 * The driver may inspect payload contents before sending a mailbox 316 * command from user space to the device. The intent is to reject 317 * commands with input payloads that are known to be unsafe. This 318 * check is not intended to replace the users careful selection of 319 * mailbox command parameters and makes no guarantee that the user 320 * command will succeed, nor that it is appropriate. 321 * 322 * The specific checks are determined by the opcode. 323 */ 324 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) 325 { 326 switch (opcode) { 327 case CXL_MBOX_OP_SET_PARTITION_INFO: { 328 struct cxl_mbox_set_partition_info *pi = payload_in; 329 330 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG) 331 return false; 332 break; 333 } 334 default: 335 break; 336 } 337 return true; 338 } 339 340 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, 341 struct cxl_memdev_state *mds, u16 opcode, 342 size_t in_size, size_t out_size, u64 in_payload) 343 { 344 *mbox = (struct cxl_mbox_cmd) { 345 .opcode = opcode, 346 .size_in = in_size, 347 }; 348 349 if (in_size) { 350 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload), 351 in_size); 352 if (IS_ERR(mbox->payload_in)) 353 return PTR_ERR(mbox->payload_in); 354 355 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) { 356 dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n", 357 cxl_mem_opcode_to_name(opcode)); 358 kvfree(mbox->payload_in); 359 return -EBUSY; 360 } 361 } 362 363 /* Prepare to handle a full payload for variable sized output */ 364 if (out_size == CXL_VARIABLE_PAYLOAD) 365 mbox->size_out = mds->payload_size; 366 else 367 mbox->size_out = out_size; 368 369 if (mbox->size_out) { 370 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL); 371 if (!mbox->payload_out) { 372 kvfree(mbox->payload_in); 373 return -ENOMEM; 374 } 375 } 376 return 0; 377 } 378 379 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox) 380 { 381 kvfree(mbox->payload_in); 382 kvfree(mbox->payload_out); 383 } 384 385 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, 386 const struct cxl_send_command *send_cmd, 387 struct cxl_memdev_state *mds) 388 { 389 if (send_cmd->raw.rsvd) 390 return -EINVAL; 391 392 /* 393 * Unlike supported commands, the output size of RAW commands 394 * gets passed along without further checking, so it must be 395 * validated here. 396 */ 397 if (send_cmd->out.size > mds->payload_size) 398 return -EINVAL; 399 400 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) 401 return -EPERM; 402 403 dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n"); 404 405 *mem_cmd = (struct cxl_mem_command) { 406 .info = { 407 .id = CXL_MEM_COMMAND_ID_RAW, 408 .size_in = send_cmd->in.size, 409 .size_out = send_cmd->out.size, 410 }, 411 .opcode = send_cmd->raw.opcode 412 }; 413 414 return 0; 415 } 416 417 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, 418 const struct cxl_send_command *send_cmd, 419 struct cxl_memdev_state *mds) 420 { 421 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id]; 422 const struct cxl_command_info *info = &c->info; 423 424 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) 425 return -EINVAL; 426 427 if (send_cmd->rsvd) 428 return -EINVAL; 429 430 if (send_cmd->in.rsvd || send_cmd->out.rsvd) 431 return -EINVAL; 432 433 /* Check that the command is enabled for hardware */ 434 if (!test_bit(info->id, mds->enabled_cmds)) 435 return -ENOTTY; 436 437 /* Check that the command is not claimed for exclusive kernel use */ 438 if (test_bit(info->id, mds->exclusive_cmds)) 439 return -EBUSY; 440 441 /* Check the input buffer is the expected size */ 442 if ((info->size_in != CXL_VARIABLE_PAYLOAD) && 443 (info->size_in != send_cmd->in.size)) 444 return -ENOMEM; 445 446 /* Check the output buffer is at least large enough */ 447 if ((info->size_out != CXL_VARIABLE_PAYLOAD) && 448 (send_cmd->out.size < info->size_out)) 449 return -ENOMEM; 450 451 *mem_cmd = (struct cxl_mem_command) { 452 .info = { 453 .id = info->id, 454 .flags = info->flags, 455 .size_in = send_cmd->in.size, 456 .size_out = send_cmd->out.size, 457 }, 458 .opcode = c->opcode 459 }; 460 461 return 0; 462 } 463 464 /** 465 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. 466 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd. 467 * @mds: The driver data for the operation 468 * @send_cmd: &struct cxl_send_command copied in from userspace. 469 * 470 * Return: 471 * * %0 - @out_cmd is ready to send. 472 * * %-ENOTTY - Invalid command specified. 473 * * %-EINVAL - Reserved fields or invalid values were used. 474 * * %-ENOMEM - Input or output buffer wasn't sized properly. 475 * * %-EPERM - Attempted to use a protected command. 476 * * %-EBUSY - Kernel has claimed exclusive access to this opcode 477 * 478 * The result of this command is a fully validated command in @mbox_cmd that is 479 * safe to send to the hardware. 480 */ 481 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, 482 struct cxl_memdev_state *mds, 483 const struct cxl_send_command *send_cmd) 484 { 485 struct cxl_mem_command mem_cmd; 486 int rc; 487 488 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) 489 return -ENOTTY; 490 491 /* 492 * The user can never specify an input payload larger than what hardware 493 * supports, but output can be arbitrarily large (simply write out as 494 * much data as the hardware provides). 495 */ 496 if (send_cmd->in.size > mds->payload_size) 497 return -EINVAL; 498 499 /* Sanitize and construct a cxl_mem_command */ 500 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) 501 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds); 502 else 503 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds); 504 505 if (rc) 506 return rc; 507 508 /* Sanitize and construct a cxl_mbox_cmd */ 509 return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode, 510 mem_cmd.info.size_in, mem_cmd.info.size_out, 511 send_cmd->in.payload); 512 } 513 514 int cxl_query_cmd(struct cxl_memdev *cxlmd, 515 struct cxl_mem_query_commands __user *q) 516 { 517 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 518 struct device *dev = &cxlmd->dev; 519 struct cxl_mem_command *cmd; 520 u32 n_commands; 521 int j = 0; 522 523 dev_dbg(dev, "Query IOCTL\n"); 524 525 if (get_user(n_commands, &q->n_commands)) 526 return -EFAULT; 527 528 /* returns the total number if 0 elements are requested. */ 529 if (n_commands == 0) 530 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands); 531 532 /* 533 * otherwise, return max(n_commands, total commands) cxl_command_info 534 * structures. 535 */ 536 cxl_for_each_cmd(cmd) { 537 struct cxl_command_info info = cmd->info; 538 539 if (test_bit(info.id, mds->enabled_cmds)) 540 info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED; 541 if (test_bit(info.id, mds->exclusive_cmds)) 542 info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE; 543 544 if (copy_to_user(&q->commands[j++], &info, sizeof(info))) 545 return -EFAULT; 546 547 if (j == n_commands) 548 break; 549 } 550 551 return 0; 552 } 553 554 /** 555 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. 556 * @mds: The driver data for the operation 557 * @mbox_cmd: The validated mailbox command. 558 * @out_payload: Pointer to userspace's output payload. 559 * @size_out: (Input) Max payload size to copy out. 560 * (Output) Payload size hardware generated. 561 * @retval: Hardware generated return code from the operation. 562 * 563 * Return: 564 * * %0 - Mailbox transaction succeeded. This implies the mailbox 565 * protocol completed successfully not that the operation itself 566 * was successful. 567 * * %-ENOMEM - Couldn't allocate a bounce buffer. 568 * * %-EFAULT - Something happened with copy_to/from_user. 569 * * %-EINTR - Mailbox acquisition interrupted. 570 * * %-EXXX - Transaction level failures. 571 * 572 * Dispatches a mailbox command on behalf of a userspace request. 573 * The output payload is copied to userspace. 574 * 575 * See cxl_send_cmd(). 576 */ 577 static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds, 578 struct cxl_mbox_cmd *mbox_cmd, 579 u64 out_payload, s32 *size_out, 580 u32 *retval) 581 { 582 struct device *dev = mds->cxlds.dev; 583 int rc; 584 585 dev_dbg(dev, 586 "Submitting %s command for user\n" 587 "\topcode: %x\n" 588 "\tsize: %zx\n", 589 cxl_mem_opcode_to_name(mbox_cmd->opcode), 590 mbox_cmd->opcode, mbox_cmd->size_in); 591 592 rc = mds->mbox_send(mds, mbox_cmd); 593 if (rc) 594 goto out; 595 596 /* 597 * @size_out contains the max size that's allowed to be written back out 598 * to userspace. While the payload may have written more output than 599 * this it will have to be ignored. 600 */ 601 if (mbox_cmd->size_out) { 602 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out, 603 "Invalid return size\n"); 604 if (copy_to_user(u64_to_user_ptr(out_payload), 605 mbox_cmd->payload_out, mbox_cmd->size_out)) { 606 rc = -EFAULT; 607 goto out; 608 } 609 } 610 611 *size_out = mbox_cmd->size_out; 612 *retval = mbox_cmd->return_code; 613 614 out: 615 cxl_mbox_cmd_dtor(mbox_cmd); 616 return rc; 617 } 618 619 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) 620 { 621 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 622 struct device *dev = &cxlmd->dev; 623 struct cxl_send_command send; 624 struct cxl_mbox_cmd mbox_cmd; 625 int rc; 626 627 dev_dbg(dev, "Send IOCTL\n"); 628 629 if (copy_from_user(&send, s, sizeof(send))) 630 return -EFAULT; 631 632 rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send); 633 if (rc) 634 return rc; 635 636 rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload, 637 &send.out.size, &send.retval); 638 if (rc) 639 return rc; 640 641 if (copy_to_user(s, &send, sizeof(send))) 642 return -EFAULT; 643 644 return 0; 645 } 646 647 static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, 648 u32 *size, u8 *out) 649 { 650 u32 remaining = *size; 651 u32 offset = 0; 652 653 while (remaining) { 654 u32 xfer_size = min_t(u32, remaining, mds->payload_size); 655 struct cxl_mbox_cmd mbox_cmd; 656 struct cxl_mbox_get_log log; 657 int rc; 658 659 log = (struct cxl_mbox_get_log) { 660 .uuid = *uuid, 661 .offset = cpu_to_le32(offset), 662 .length = cpu_to_le32(xfer_size), 663 }; 664 665 mbox_cmd = (struct cxl_mbox_cmd) { 666 .opcode = CXL_MBOX_OP_GET_LOG, 667 .size_in = sizeof(log), 668 .payload_in = &log, 669 .size_out = xfer_size, 670 .payload_out = out, 671 }; 672 673 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 674 675 /* 676 * The output payload length that indicates the number 677 * of valid bytes can be smaller than the Log buffer 678 * size. 679 */ 680 if (rc == -EIO && mbox_cmd.size_out < xfer_size) { 681 offset += mbox_cmd.size_out; 682 break; 683 } 684 685 if (rc < 0) 686 return rc; 687 688 out += xfer_size; 689 remaining -= xfer_size; 690 offset += xfer_size; 691 } 692 693 *size = offset; 694 695 return 0; 696 } 697 698 /** 699 * cxl_walk_cel() - Walk through the Command Effects Log. 700 * @mds: The driver data for the operation 701 * @size: Length of the Command Effects Log. 702 * @cel: CEL 703 * 704 * Iterate over each entry in the CEL and determine if the driver supports the 705 * command. If so, the command is enabled for the device and can be used later. 706 */ 707 static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) 708 { 709 struct cxl_cel_entry *cel_entry; 710 const int cel_entries = size / sizeof(*cel_entry); 711 struct device *dev = mds->cxlds.dev; 712 int i; 713 714 cel_entry = (struct cxl_cel_entry *) cel; 715 716 for (i = 0; i < cel_entries; i++) { 717 u16 opcode = le16_to_cpu(cel_entry[i].opcode); 718 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 719 int enabled = 0; 720 721 if (cmd) { 722 set_bit(cmd->info.id, mds->enabled_cmds); 723 enabled++; 724 } 725 726 if (cxl_is_poison_command(opcode)) { 727 cxl_set_poison_cmd_enabled(&mds->poison, opcode); 728 enabled++; 729 } 730 731 if (cxl_is_security_command(opcode)) { 732 cxl_set_security_cmd_enabled(&mds->security, opcode); 733 enabled++; 734 } 735 736 dev_dbg(dev, "Opcode 0x%04x %s\n", opcode, 737 enabled ? "enabled" : "unsupported by driver"); 738 } 739 } 740 741 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds) 742 { 743 struct cxl_mbox_get_supported_logs *ret; 744 struct cxl_mbox_cmd mbox_cmd; 745 int rc; 746 747 ret = kvmalloc(mds->payload_size, GFP_KERNEL); 748 if (!ret) 749 return ERR_PTR(-ENOMEM); 750 751 mbox_cmd = (struct cxl_mbox_cmd) { 752 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS, 753 .size_out = mds->payload_size, 754 .payload_out = ret, 755 /* At least the record number field must be valid */ 756 .min_out = 2, 757 }; 758 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 759 if (rc < 0) { 760 kvfree(ret); 761 return ERR_PTR(rc); 762 } 763 764 765 return ret; 766 } 767 768 enum { 769 CEL_UUID, 770 VENDOR_DEBUG_UUID, 771 }; 772 773 /* See CXL 2.0 Table 170. Get Log Input Payload */ 774 static const uuid_t log_uuid[] = { 775 [CEL_UUID] = DEFINE_CXL_CEL_UUID, 776 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID, 777 }; 778 779 /** 780 * cxl_enumerate_cmds() - Enumerate commands for a device. 781 * @mds: The driver data for the operation 782 * 783 * Returns 0 if enumerate completed successfully. 784 * 785 * CXL devices have optional support for certain commands. This function will 786 * determine the set of supported commands for the hardware and update the 787 * enabled_cmds bitmap in the @mds. 788 */ 789 int cxl_enumerate_cmds(struct cxl_memdev_state *mds) 790 { 791 struct cxl_mbox_get_supported_logs *gsl; 792 struct device *dev = mds->cxlds.dev; 793 struct cxl_mem_command *cmd; 794 int i, rc; 795 796 gsl = cxl_get_gsl(mds); 797 if (IS_ERR(gsl)) 798 return PTR_ERR(gsl); 799 800 rc = -ENOENT; 801 for (i = 0; i < le16_to_cpu(gsl->entries); i++) { 802 u32 size = le32_to_cpu(gsl->entry[i].size); 803 uuid_t uuid = gsl->entry[i].uuid; 804 u8 *log; 805 806 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); 807 808 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) 809 continue; 810 811 log = kvmalloc(size, GFP_KERNEL); 812 if (!log) { 813 rc = -ENOMEM; 814 goto out; 815 } 816 817 rc = cxl_xfer_log(mds, &uuid, &size, log); 818 if (rc) { 819 kvfree(log); 820 goto out; 821 } 822 823 cxl_walk_cel(mds, size, log); 824 kvfree(log); 825 826 /* In case CEL was bogus, enable some default commands. */ 827 cxl_for_each_cmd(cmd) 828 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) 829 set_bit(cmd->info.id, mds->enabled_cmds); 830 831 /* Found the required CEL */ 832 rc = 0; 833 } 834 out: 835 kvfree(gsl); 836 return rc; 837 } 838 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL); 839 840 void cxl_event_trace_record(const struct cxl_memdev *cxlmd, 841 enum cxl_event_log_type type, 842 enum cxl_event_type event_type, 843 const uuid_t *uuid, union cxl_event *evt) 844 { 845 if (event_type == CXL_CPER_EVENT_GEN_MEDIA) 846 trace_cxl_general_media(cxlmd, type, &evt->gen_media); 847 else if (event_type == CXL_CPER_EVENT_DRAM) 848 trace_cxl_dram(cxlmd, type, &evt->dram); 849 else if (event_type == CXL_CPER_EVENT_MEM_MODULE) 850 trace_cxl_memory_module(cxlmd, type, &evt->mem_module); 851 else 852 trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); 853 } 854 EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); 855 856 static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd, 857 enum cxl_event_log_type type, 858 struct cxl_event_record_raw *record) 859 { 860 enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC; 861 const uuid_t *uuid = &record->id; 862 863 if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID)) 864 ev_type = CXL_CPER_EVENT_GEN_MEDIA; 865 else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID)) 866 ev_type = CXL_CPER_EVENT_DRAM; 867 else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID)) 868 ev_type = CXL_CPER_EVENT_MEM_MODULE; 869 870 cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event); 871 } 872 873 static int cxl_clear_event_record(struct cxl_memdev_state *mds, 874 enum cxl_event_log_type log, 875 struct cxl_get_event_payload *get_pl) 876 { 877 struct cxl_mbox_clear_event_payload *payload; 878 u16 total = le16_to_cpu(get_pl->record_count); 879 u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES; 880 size_t pl_size = struct_size(payload, handles, max_handles); 881 struct cxl_mbox_cmd mbox_cmd; 882 u16 cnt; 883 int rc = 0; 884 int i; 885 886 /* Payload size may limit the max handles */ 887 if (pl_size > mds->payload_size) { 888 max_handles = (mds->payload_size - sizeof(*payload)) / 889 sizeof(__le16); 890 pl_size = struct_size(payload, handles, max_handles); 891 } 892 893 payload = kvzalloc(pl_size, GFP_KERNEL); 894 if (!payload) 895 return -ENOMEM; 896 897 *payload = (struct cxl_mbox_clear_event_payload) { 898 .event_log = log, 899 }; 900 901 mbox_cmd = (struct cxl_mbox_cmd) { 902 .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD, 903 .payload_in = payload, 904 .size_in = pl_size, 905 }; 906 907 /* 908 * Clear Event Records uses u8 for the handle cnt while Get Event 909 * Record can return up to 0xffff records. 910 */ 911 i = 0; 912 for (cnt = 0; cnt < total; cnt++) { 913 struct cxl_event_record_raw *raw = &get_pl->records[cnt]; 914 struct cxl_event_generic *gen = &raw->event.generic; 915 916 payload->handles[i++] = gen->hdr.handle; 917 dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log, 918 le16_to_cpu(payload->handles[i - 1])); 919 920 if (i == max_handles) { 921 payload->nr_recs = i; 922 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 923 if (rc) 924 goto free_pl; 925 i = 0; 926 } 927 } 928 929 /* Clear what is left if any */ 930 if (i) { 931 payload->nr_recs = i; 932 mbox_cmd.size_in = struct_size(payload, handles, i); 933 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 934 if (rc) 935 goto free_pl; 936 } 937 938 free_pl: 939 kvfree(payload); 940 return rc; 941 } 942 943 static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, 944 enum cxl_event_log_type type) 945 { 946 struct cxl_memdev *cxlmd = mds->cxlds.cxlmd; 947 struct device *dev = mds->cxlds.dev; 948 struct cxl_get_event_payload *payload; 949 u8 log_type = type; 950 u16 nr_rec; 951 952 mutex_lock(&mds->event.log_lock); 953 payload = mds->event.buf; 954 955 do { 956 int rc, i; 957 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) { 958 .opcode = CXL_MBOX_OP_GET_EVENT_RECORD, 959 .payload_in = &log_type, 960 .size_in = sizeof(log_type), 961 .payload_out = payload, 962 .size_out = mds->payload_size, 963 .min_out = struct_size(payload, records, 0), 964 }; 965 966 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 967 if (rc) { 968 dev_err_ratelimited(dev, 969 "Event log '%d': Failed to query event records : %d", 970 type, rc); 971 break; 972 } 973 974 nr_rec = le16_to_cpu(payload->record_count); 975 if (!nr_rec) 976 break; 977 978 for (i = 0; i < nr_rec; i++) 979 __cxl_event_trace_record(cxlmd, type, 980 &payload->records[i]); 981 982 if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW) 983 trace_cxl_overflow(cxlmd, type, payload); 984 985 rc = cxl_clear_event_record(mds, type, payload); 986 if (rc) { 987 dev_err_ratelimited(dev, 988 "Event log '%d': Failed to clear events : %d", 989 type, rc); 990 break; 991 } 992 } while (nr_rec); 993 994 mutex_unlock(&mds->event.log_lock); 995 } 996 997 /** 998 * cxl_mem_get_event_records - Get Event Records from the device 999 * @mds: The driver data for the operation 1000 * @status: Event Status register value identifying which events are available. 1001 * 1002 * Retrieve all event records available on the device, report them as trace 1003 * events, and clear them. 1004 * 1005 * See CXL rev 3.0 @8.2.9.2.2 Get Event Records 1006 * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records 1007 */ 1008 void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status) 1009 { 1010 dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status); 1011 1012 if (status & CXLDEV_EVENT_STATUS_FATAL) 1013 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL); 1014 if (status & CXLDEV_EVENT_STATUS_FAIL) 1015 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL); 1016 if (status & CXLDEV_EVENT_STATUS_WARN) 1017 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN); 1018 if (status & CXLDEV_EVENT_STATUS_INFO) 1019 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO); 1020 } 1021 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL); 1022 1023 /** 1024 * cxl_mem_get_partition_info - Get partition info 1025 * @mds: The driver data for the operation 1026 * 1027 * Retrieve the current partition info for the device specified. The active 1028 * values are the current capacity in bytes. If not 0, the 'next' values are 1029 * the pending values, in bytes, which take affect on next cold reset. 1030 * 1031 * Return: 0 if no error: or the result of the mailbox command. 1032 * 1033 * See CXL @8.2.9.5.2.1 Get Partition Info 1034 */ 1035 static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) 1036 { 1037 struct cxl_mbox_get_partition_info pi; 1038 struct cxl_mbox_cmd mbox_cmd; 1039 int rc; 1040 1041 mbox_cmd = (struct cxl_mbox_cmd) { 1042 .opcode = CXL_MBOX_OP_GET_PARTITION_INFO, 1043 .size_out = sizeof(pi), 1044 .payload_out = &pi, 1045 }; 1046 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1047 if (rc) 1048 return rc; 1049 1050 mds->active_volatile_bytes = 1051 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1052 mds->active_persistent_bytes = 1053 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; 1054 mds->next_volatile_bytes = 1055 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1056 mds->next_persistent_bytes = 1057 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1058 1059 return 0; 1060 } 1061 1062 /** 1063 * cxl_dev_state_identify() - Send the IDENTIFY command to the device. 1064 * @mds: The driver data for the operation 1065 * 1066 * Return: 0 if identify was executed successfully or media not ready. 1067 * 1068 * This will dispatch the identify command to the device and on success populate 1069 * structures to be exported to sysfs. 1070 */ 1071 int cxl_dev_state_identify(struct cxl_memdev_state *mds) 1072 { 1073 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ 1074 struct cxl_mbox_identify id; 1075 struct cxl_mbox_cmd mbox_cmd; 1076 u32 val; 1077 int rc; 1078 1079 if (!mds->cxlds.media_ready) 1080 return 0; 1081 1082 mbox_cmd = (struct cxl_mbox_cmd) { 1083 .opcode = CXL_MBOX_OP_IDENTIFY, 1084 .size_out = sizeof(id), 1085 .payload_out = &id, 1086 }; 1087 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1088 if (rc < 0) 1089 return rc; 1090 1091 mds->total_bytes = 1092 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER; 1093 mds->volatile_only_bytes = 1094 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER; 1095 mds->persistent_only_bytes = 1096 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER; 1097 mds->partition_align_bytes = 1098 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER; 1099 1100 mds->lsa_size = le32_to_cpu(id.lsa_size); 1101 memcpy(mds->firmware_version, id.fw_revision, 1102 sizeof(id.fw_revision)); 1103 1104 if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) { 1105 val = get_unaligned_le24(id.poison_list_max_mer); 1106 mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX); 1107 } 1108 1109 return 0; 1110 } 1111 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); 1112 1113 static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) 1114 { 1115 int rc; 1116 u32 sec_out = 0; 1117 struct cxl_get_security_output { 1118 __le32 flags; 1119 } out; 1120 struct cxl_mbox_cmd sec_cmd = { 1121 .opcode = CXL_MBOX_OP_GET_SECURITY_STATE, 1122 .payload_out = &out, 1123 .size_out = sizeof(out), 1124 }; 1125 struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd }; 1126 struct cxl_dev_state *cxlds = &mds->cxlds; 1127 1128 if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE) 1129 return -EINVAL; 1130 1131 rc = cxl_internal_send_cmd(mds, &sec_cmd); 1132 if (rc < 0) { 1133 dev_err(cxlds->dev, "Failed to get security state : %d", rc); 1134 return rc; 1135 } 1136 1137 /* 1138 * Prior to using these commands, any security applied to 1139 * the user data areas of the device shall be DISABLED (or 1140 * UNLOCKED for secure erase case). 1141 */ 1142 sec_out = le32_to_cpu(out.flags); 1143 if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) 1144 return -EINVAL; 1145 1146 if (cmd == CXL_MBOX_OP_SECURE_ERASE && 1147 sec_out & CXL_PMEM_SEC_STATE_LOCKED) 1148 return -EINVAL; 1149 1150 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1151 if (rc < 0) { 1152 dev_err(cxlds->dev, "Failed to sanitize device : %d", rc); 1153 return rc; 1154 } 1155 1156 return 0; 1157 } 1158 1159 1160 /** 1161 * cxl_mem_sanitize() - Send a sanitization command to the device. 1162 * @cxlmd: The device for the operation 1163 * @cmd: The specific sanitization command opcode 1164 * 1165 * Return: 0 if the command was executed successfully, regardless of 1166 * whether or not the actual security operation is done in the background, 1167 * such as for the Sanitize case. 1168 * Error return values can be the result of the mailbox command, -EINVAL 1169 * when security requirements are not met or invalid contexts, or -EBUSY 1170 * if the sanitize operation is already in flight. 1171 * 1172 * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase. 1173 */ 1174 int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) 1175 { 1176 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 1177 struct cxl_port *endpoint; 1178 int rc; 1179 1180 /* synchronize with cxl_mem_probe() and decoder write operations */ 1181 device_lock(&cxlmd->dev); 1182 endpoint = cxlmd->endpoint; 1183 down_read(&cxl_region_rwsem); 1184 /* 1185 * Require an endpoint to be safe otherwise the driver can not 1186 * be sure that the device is unmapped. 1187 */ 1188 if (endpoint && cxl_num_decoders_committed(endpoint) == 0) 1189 rc = __cxl_mem_sanitize(mds, cmd); 1190 else 1191 rc = -EBUSY; 1192 up_read(&cxl_region_rwsem); 1193 device_unlock(&cxlmd->dev); 1194 1195 return rc; 1196 } 1197 1198 static int add_dpa_res(struct device *dev, struct resource *parent, 1199 struct resource *res, resource_size_t start, 1200 resource_size_t size, const char *type) 1201 { 1202 int rc; 1203 1204 res->name = type; 1205 res->start = start; 1206 res->end = start + size - 1; 1207 res->flags = IORESOURCE_MEM; 1208 if (resource_size(res) == 0) { 1209 dev_dbg(dev, "DPA(%s): no capacity\n", res->name); 1210 return 0; 1211 } 1212 rc = request_resource(parent, res); 1213 if (rc) { 1214 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name, 1215 res, rc); 1216 return rc; 1217 } 1218 1219 dev_dbg(dev, "DPA(%s): %pr\n", res->name, res); 1220 1221 return 0; 1222 } 1223 1224 int cxl_mem_create_range_info(struct cxl_memdev_state *mds) 1225 { 1226 struct cxl_dev_state *cxlds = &mds->cxlds; 1227 struct device *dev = cxlds->dev; 1228 int rc; 1229 1230 if (!cxlds->media_ready) { 1231 cxlds->dpa_res = DEFINE_RES_MEM(0, 0); 1232 cxlds->ram_res = DEFINE_RES_MEM(0, 0); 1233 cxlds->pmem_res = DEFINE_RES_MEM(0, 0); 1234 return 0; 1235 } 1236 1237 cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes); 1238 1239 if (mds->partition_align_bytes == 0) { 1240 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, 1241 mds->volatile_only_bytes, "ram"); 1242 if (rc) 1243 return rc; 1244 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, 1245 mds->volatile_only_bytes, 1246 mds->persistent_only_bytes, "pmem"); 1247 } 1248 1249 rc = cxl_mem_get_partition_info(mds); 1250 if (rc) { 1251 dev_err(dev, "Failed to query partition information\n"); 1252 return rc; 1253 } 1254 1255 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, 1256 mds->active_volatile_bytes, "ram"); 1257 if (rc) 1258 return rc; 1259 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, 1260 mds->active_volatile_bytes, 1261 mds->active_persistent_bytes, "pmem"); 1262 } 1263 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL); 1264 1265 int cxl_set_timestamp(struct cxl_memdev_state *mds) 1266 { 1267 struct cxl_mbox_cmd mbox_cmd; 1268 struct cxl_mbox_set_timestamp_in pi; 1269 int rc; 1270 1271 pi.timestamp = cpu_to_le64(ktime_get_real_ns()); 1272 mbox_cmd = (struct cxl_mbox_cmd) { 1273 .opcode = CXL_MBOX_OP_SET_TIMESTAMP, 1274 .size_in = sizeof(pi), 1275 .payload_in = &pi, 1276 }; 1277 1278 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1279 /* 1280 * Command is optional. Devices may have another way of providing 1281 * a timestamp, or may return all 0s in timestamp fields. 1282 * Don't report an error if this command isn't supported 1283 */ 1284 if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED)) 1285 return rc; 1286 1287 return 0; 1288 } 1289 EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL); 1290 1291 int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, 1292 struct cxl_region *cxlr) 1293 { 1294 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 1295 struct cxl_mbox_poison_out *po; 1296 struct cxl_mbox_poison_in pi; 1297 int nr_records = 0; 1298 int rc; 1299 1300 rc = mutex_lock_interruptible(&mds->poison.lock); 1301 if (rc) 1302 return rc; 1303 1304 po = mds->poison.list_out; 1305 pi.offset = cpu_to_le64(offset); 1306 pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT); 1307 1308 do { 1309 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){ 1310 .opcode = CXL_MBOX_OP_GET_POISON, 1311 .size_in = sizeof(pi), 1312 .payload_in = &pi, 1313 .size_out = mds->payload_size, 1314 .payload_out = po, 1315 .min_out = struct_size(po, record, 0), 1316 }; 1317 1318 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1319 if (rc) 1320 break; 1321 1322 for (int i = 0; i < le16_to_cpu(po->count); i++) 1323 trace_cxl_poison(cxlmd, cxlr, &po->record[i], 1324 po->flags, po->overflow_ts, 1325 CXL_POISON_TRACE_LIST); 1326 1327 /* Protect against an uncleared _FLAG_MORE */ 1328 nr_records = nr_records + le16_to_cpu(po->count); 1329 if (nr_records >= mds->poison.max_errors) { 1330 dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n", 1331 nr_records); 1332 break; 1333 } 1334 } while (po->flags & CXL_POISON_FLAG_MORE); 1335 1336 mutex_unlock(&mds->poison.lock); 1337 return rc; 1338 } 1339 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL); 1340 1341 static void free_poison_buf(void *buf) 1342 { 1343 kvfree(buf); 1344 } 1345 1346 /* Get Poison List output buffer is protected by mds->poison.lock */ 1347 static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds) 1348 { 1349 mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL); 1350 if (!mds->poison.list_out) 1351 return -ENOMEM; 1352 1353 return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf, 1354 mds->poison.list_out); 1355 } 1356 1357 int cxl_poison_state_init(struct cxl_memdev_state *mds) 1358 { 1359 int rc; 1360 1361 if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) 1362 return 0; 1363 1364 rc = cxl_poison_alloc_buf(mds); 1365 if (rc) { 1366 clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds); 1367 return rc; 1368 } 1369 1370 mutex_init(&mds->poison.lock); 1371 return 0; 1372 } 1373 EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL); 1374 1375 struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) 1376 { 1377 struct cxl_memdev_state *mds; 1378 1379 mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL); 1380 if (!mds) { 1381 dev_err(dev, "No memory available\n"); 1382 return ERR_PTR(-ENOMEM); 1383 } 1384 1385 mutex_init(&mds->mbox_mutex); 1386 mutex_init(&mds->event.log_lock); 1387 mds->cxlds.dev = dev; 1388 mds->cxlds.reg_map.host = dev; 1389 mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; 1390 mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; 1391 mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID; 1392 mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID; 1393 1394 return mds; 1395 } 1396 EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL); 1397 1398 void __init cxl_mbox_init(void) 1399 { 1400 struct dentry *mbox_debugfs; 1401 1402 mbox_debugfs = cxl_debugfs_create_dir("mbox"); 1403 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, 1404 &cxl_raw_allow_all); 1405 } 1406