1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/security.h> 4 #include <linux/debugfs.h> 5 #include <linux/ktime.h> 6 #include <linux/mutex.h> 7 #include <asm/unaligned.h> 8 #include <cxlpci.h> 9 #include <cxlmem.h> 10 #include <cxl.h> 11 12 #include "core.h" 13 #include "trace.h" 14 15 static bool cxl_raw_allow_all; 16 17 /** 18 * DOC: cxl mbox 19 * 20 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The 21 * implementation is used by the cxl_pci driver to initialize the device 22 * and implement the cxl_mem.h IOCTL UAPI. It also implements the 23 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM. 24 */ 25 26 #define cxl_for_each_cmd(cmd) \ 27 for ((cmd) = &cxl_mem_commands[0]; \ 28 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++) 29 30 #define CXL_CMD(_id, sin, sout, _flags) \ 31 [CXL_MEM_COMMAND_ID_##_id] = { \ 32 .info = { \ 33 .id = CXL_MEM_COMMAND_ID_##_id, \ 34 .size_in = sin, \ 35 .size_out = sout, \ 36 }, \ 37 .opcode = CXL_MBOX_OP_##_id, \ 38 .flags = _flags, \ 39 } 40 41 #define CXL_VARIABLE_PAYLOAD ~0U 42 /* 43 * This table defines the supported mailbox commands for the driver. This table 44 * is made up of a UAPI structure. Non-negative values as parameters in the 45 * table will be validated against the user's input. For example, if size_in is 46 * 0, and the user passed in 1, it is an error. 47 */ 48 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { 49 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), 50 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS 51 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0), 52 #endif 53 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), 54 CXL_CMD(GET_FW_INFO, 0, 0x50, 0), 55 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), 56 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), 57 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), 58 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), 59 CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0), 60 CXL_CMD(CLEAR_LOG, 0x10, 0, 0), 61 CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0), 62 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), 63 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), 64 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), 65 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), 66 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), 67 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), 68 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), 69 CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0), 70 }; 71 72 /* 73 * Commands that RAW doesn't permit. The rationale for each: 74 * 75 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / 76 * coordination of transaction timeout values at the root bridge level. 77 * 78 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live 79 * and needs to be coordinated with HDM updates. 80 * 81 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the 82 * driver and any writes from userspace invalidates those contents. 83 * 84 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes 85 * to the device after it is marked clean, userspace can not make that 86 * assertion. 87 * 88 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that 89 * is kept up to date with patrol notifications and error management. 90 * 91 * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel 92 * driver orchestration for safety. 93 */ 94 static u16 cxl_disabled_raw_commands[] = { 95 CXL_MBOX_OP_ACTIVATE_FW, 96 CXL_MBOX_OP_SET_PARTITION_INFO, 97 CXL_MBOX_OP_SET_LSA, 98 CXL_MBOX_OP_SET_SHUTDOWN_STATE, 99 CXL_MBOX_OP_SCAN_MEDIA, 100 CXL_MBOX_OP_GET_SCAN_MEDIA, 101 CXL_MBOX_OP_GET_POISON, 102 CXL_MBOX_OP_INJECT_POISON, 103 CXL_MBOX_OP_CLEAR_POISON, 104 }; 105 106 /* 107 * Command sets that RAW doesn't permit. All opcodes in this set are 108 * disabled because they pass plain text security payloads over the 109 * user/kernel boundary. This functionality is intended to be wrapped 110 * behind the keys ABI which allows for encrypted payloads in the UAPI 111 */ 112 static u8 security_command_sets[] = { 113 0x44, /* Sanitize */ 114 0x45, /* Persistent Memory Data-at-rest Security */ 115 0x46, /* Security Passthrough */ 116 }; 117 118 static bool cxl_is_security_command(u16 opcode) 119 { 120 int i; 121 122 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) 123 if (security_command_sets[i] == (opcode >> 8)) 124 return true; 125 return false; 126 } 127 128 static void cxl_set_security_cmd_enabled(struct cxl_security_state *security, 129 u16 opcode) 130 { 131 switch (opcode) { 132 case CXL_MBOX_OP_SANITIZE: 133 set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds); 134 break; 135 case CXL_MBOX_OP_SECURE_ERASE: 136 set_bit(CXL_SEC_ENABLED_SECURE_ERASE, 137 security->enabled_cmds); 138 break; 139 case CXL_MBOX_OP_GET_SECURITY_STATE: 140 set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE, 141 security->enabled_cmds); 142 break; 143 case CXL_MBOX_OP_SET_PASSPHRASE: 144 set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE, 145 security->enabled_cmds); 146 break; 147 case CXL_MBOX_OP_DISABLE_PASSPHRASE: 148 set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE, 149 security->enabled_cmds); 150 break; 151 case CXL_MBOX_OP_UNLOCK: 152 set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds); 153 break; 154 case CXL_MBOX_OP_FREEZE_SECURITY: 155 set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY, 156 security->enabled_cmds); 157 break; 158 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: 159 set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE, 160 security->enabled_cmds); 161 break; 162 default: 163 break; 164 } 165 } 166 167 static bool cxl_is_poison_command(u16 opcode) 168 { 169 #define CXL_MBOX_OP_POISON_CMDS 0x43 170 171 if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS) 172 return true; 173 174 return false; 175 } 176 177 static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison, 178 u16 opcode) 179 { 180 switch (opcode) { 181 case CXL_MBOX_OP_GET_POISON: 182 set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds); 183 break; 184 case CXL_MBOX_OP_INJECT_POISON: 185 set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds); 186 break; 187 case CXL_MBOX_OP_CLEAR_POISON: 188 set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds); 189 break; 190 case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS: 191 set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds); 192 break; 193 case CXL_MBOX_OP_SCAN_MEDIA: 194 set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds); 195 break; 196 case CXL_MBOX_OP_GET_SCAN_MEDIA: 197 set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds); 198 break; 199 default: 200 break; 201 } 202 } 203 204 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) 205 { 206 struct cxl_mem_command *c; 207 208 cxl_for_each_cmd(c) 209 if (c->opcode == opcode) 210 return c; 211 212 return NULL; 213 } 214 215 static const char *cxl_mem_opcode_to_name(u16 opcode) 216 { 217 struct cxl_mem_command *c; 218 219 c = cxl_mem_find_command(opcode); 220 if (!c) 221 return NULL; 222 223 return cxl_command_names[c->info.id].name; 224 } 225 226 /** 227 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command 228 * @mds: The driver data for the operation 229 * @mbox_cmd: initialized command to execute 230 * 231 * Context: Any context. 232 * Return: 233 * * %>=0 - Number of bytes returned in @out. 234 * * %-E2BIG - Payload is too large for hardware. 235 * * %-EBUSY - Couldn't acquire exclusive mailbox access. 236 * * %-EFAULT - Hardware error occurred. 237 * * %-ENXIO - Command completed, but device reported an error. 238 * * %-EIO - Unexpected output size. 239 * 240 * Mailbox commands may execute successfully yet the device itself reported an 241 * error. While this distinction can be useful for commands from userspace, the 242 * kernel will only be able to use results when both are successful. 243 */ 244 int cxl_internal_send_cmd(struct cxl_memdev_state *mds, 245 struct cxl_mbox_cmd *mbox_cmd) 246 { 247 size_t out_size, min_out; 248 int rc; 249 250 if (mbox_cmd->size_in > mds->payload_size || 251 mbox_cmd->size_out > mds->payload_size) 252 return -E2BIG; 253 254 out_size = mbox_cmd->size_out; 255 min_out = mbox_cmd->min_out; 256 rc = mds->mbox_send(mds, mbox_cmd); 257 /* 258 * EIO is reserved for a payload size mismatch and mbox_send() 259 * may not return this error. 260 */ 261 if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO")) 262 return -ENXIO; 263 if (rc) 264 return rc; 265 266 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS && 267 mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND) 268 return cxl_mbox_cmd_rc2errno(mbox_cmd); 269 270 if (!out_size) 271 return 0; 272 273 /* 274 * Variable sized output needs to at least satisfy the caller's 275 * minimum if not the fully requested size. 276 */ 277 if (min_out == 0) 278 min_out = out_size; 279 280 if (mbox_cmd->size_out < min_out) 281 return -EIO; 282 return 0; 283 } 284 EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL); 285 286 static bool cxl_mem_raw_command_allowed(u16 opcode) 287 { 288 int i; 289 290 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) 291 return false; 292 293 if (security_locked_down(LOCKDOWN_PCI_ACCESS)) 294 return false; 295 296 if (cxl_raw_allow_all) 297 return true; 298 299 if (cxl_is_security_command(opcode)) 300 return false; 301 302 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) 303 if (cxl_disabled_raw_commands[i] == opcode) 304 return false; 305 306 return true; 307 } 308 309 /** 310 * cxl_payload_from_user_allowed() - Check contents of in_payload. 311 * @opcode: The mailbox command opcode. 312 * @payload_in: Pointer to the input payload passed in from user space. 313 * 314 * Return: 315 * * true - payload_in passes check for @opcode. 316 * * false - payload_in contains invalid or unsupported values. 317 * 318 * The driver may inspect payload contents before sending a mailbox 319 * command from user space to the device. The intent is to reject 320 * commands with input payloads that are known to be unsafe. This 321 * check is not intended to replace the users careful selection of 322 * mailbox command parameters and makes no guarantee that the user 323 * command will succeed, nor that it is appropriate. 324 * 325 * The specific checks are determined by the opcode. 326 */ 327 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) 328 { 329 switch (opcode) { 330 case CXL_MBOX_OP_SET_PARTITION_INFO: { 331 struct cxl_mbox_set_partition_info *pi = payload_in; 332 333 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG) 334 return false; 335 break; 336 } 337 case CXL_MBOX_OP_CLEAR_LOG: { 338 const uuid_t *uuid = (uuid_t *)payload_in; 339 340 /* 341 * Restrict the ‘Clear log’ action to only apply to 342 * Vendor debug logs. 343 */ 344 return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID); 345 } 346 default: 347 break; 348 } 349 return true; 350 } 351 352 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, 353 struct cxl_memdev_state *mds, u16 opcode, 354 size_t in_size, size_t out_size, u64 in_payload) 355 { 356 *mbox = (struct cxl_mbox_cmd) { 357 .opcode = opcode, 358 .size_in = in_size, 359 }; 360 361 if (in_size) { 362 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload), 363 in_size); 364 if (IS_ERR(mbox->payload_in)) 365 return PTR_ERR(mbox->payload_in); 366 367 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) { 368 dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n", 369 cxl_mem_opcode_to_name(opcode)); 370 kvfree(mbox->payload_in); 371 return -EBUSY; 372 } 373 } 374 375 /* Prepare to handle a full payload for variable sized output */ 376 if (out_size == CXL_VARIABLE_PAYLOAD) 377 mbox->size_out = mds->payload_size; 378 else 379 mbox->size_out = out_size; 380 381 if (mbox->size_out) { 382 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL); 383 if (!mbox->payload_out) { 384 kvfree(mbox->payload_in); 385 return -ENOMEM; 386 } 387 } 388 return 0; 389 } 390 391 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox) 392 { 393 kvfree(mbox->payload_in); 394 kvfree(mbox->payload_out); 395 } 396 397 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, 398 const struct cxl_send_command *send_cmd, 399 struct cxl_memdev_state *mds) 400 { 401 if (send_cmd->raw.rsvd) 402 return -EINVAL; 403 404 /* 405 * Unlike supported commands, the output size of RAW commands 406 * gets passed along without further checking, so it must be 407 * validated here. 408 */ 409 if (send_cmd->out.size > mds->payload_size) 410 return -EINVAL; 411 412 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) 413 return -EPERM; 414 415 dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n"); 416 417 *mem_cmd = (struct cxl_mem_command) { 418 .info = { 419 .id = CXL_MEM_COMMAND_ID_RAW, 420 .size_in = send_cmd->in.size, 421 .size_out = send_cmd->out.size, 422 }, 423 .opcode = send_cmd->raw.opcode 424 }; 425 426 return 0; 427 } 428 429 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, 430 const struct cxl_send_command *send_cmd, 431 struct cxl_memdev_state *mds) 432 { 433 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id]; 434 const struct cxl_command_info *info = &c->info; 435 436 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) 437 return -EINVAL; 438 439 if (send_cmd->rsvd) 440 return -EINVAL; 441 442 if (send_cmd->in.rsvd || send_cmd->out.rsvd) 443 return -EINVAL; 444 445 /* Check that the command is enabled for hardware */ 446 if (!test_bit(info->id, mds->enabled_cmds)) 447 return -ENOTTY; 448 449 /* Check that the command is not claimed for exclusive kernel use */ 450 if (test_bit(info->id, mds->exclusive_cmds)) 451 return -EBUSY; 452 453 /* Check the input buffer is the expected size */ 454 if ((info->size_in != CXL_VARIABLE_PAYLOAD) && 455 (info->size_in != send_cmd->in.size)) 456 return -ENOMEM; 457 458 /* Check the output buffer is at least large enough */ 459 if ((info->size_out != CXL_VARIABLE_PAYLOAD) && 460 (send_cmd->out.size < info->size_out)) 461 return -ENOMEM; 462 463 *mem_cmd = (struct cxl_mem_command) { 464 .info = { 465 .id = info->id, 466 .flags = info->flags, 467 .size_in = send_cmd->in.size, 468 .size_out = send_cmd->out.size, 469 }, 470 .opcode = c->opcode 471 }; 472 473 return 0; 474 } 475 476 /** 477 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. 478 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd. 479 * @mds: The driver data for the operation 480 * @send_cmd: &struct cxl_send_command copied in from userspace. 481 * 482 * Return: 483 * * %0 - @out_cmd is ready to send. 484 * * %-ENOTTY - Invalid command specified. 485 * * %-EINVAL - Reserved fields or invalid values were used. 486 * * %-ENOMEM - Input or output buffer wasn't sized properly. 487 * * %-EPERM - Attempted to use a protected command. 488 * * %-EBUSY - Kernel has claimed exclusive access to this opcode 489 * 490 * The result of this command is a fully validated command in @mbox_cmd that is 491 * safe to send to the hardware. 492 */ 493 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, 494 struct cxl_memdev_state *mds, 495 const struct cxl_send_command *send_cmd) 496 { 497 struct cxl_mem_command mem_cmd; 498 int rc; 499 500 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) 501 return -ENOTTY; 502 503 /* 504 * The user can never specify an input payload larger than what hardware 505 * supports, but output can be arbitrarily large (simply write out as 506 * much data as the hardware provides). 507 */ 508 if (send_cmd->in.size > mds->payload_size) 509 return -EINVAL; 510 511 /* Sanitize and construct a cxl_mem_command */ 512 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) 513 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds); 514 else 515 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds); 516 517 if (rc) 518 return rc; 519 520 /* Sanitize and construct a cxl_mbox_cmd */ 521 return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode, 522 mem_cmd.info.size_in, mem_cmd.info.size_out, 523 send_cmd->in.payload); 524 } 525 526 int cxl_query_cmd(struct cxl_memdev *cxlmd, 527 struct cxl_mem_query_commands __user *q) 528 { 529 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 530 struct device *dev = &cxlmd->dev; 531 struct cxl_mem_command *cmd; 532 u32 n_commands; 533 int j = 0; 534 535 dev_dbg(dev, "Query IOCTL\n"); 536 537 if (get_user(n_commands, &q->n_commands)) 538 return -EFAULT; 539 540 /* returns the total number if 0 elements are requested. */ 541 if (n_commands == 0) 542 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands); 543 544 /* 545 * otherwise, return max(n_commands, total commands) cxl_command_info 546 * structures. 547 */ 548 cxl_for_each_cmd(cmd) { 549 struct cxl_command_info info = cmd->info; 550 551 if (test_bit(info.id, mds->enabled_cmds)) 552 info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED; 553 if (test_bit(info.id, mds->exclusive_cmds)) 554 info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE; 555 556 if (copy_to_user(&q->commands[j++], &info, sizeof(info))) 557 return -EFAULT; 558 559 if (j == n_commands) 560 break; 561 } 562 563 return 0; 564 } 565 566 /** 567 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. 568 * @mds: The driver data for the operation 569 * @mbox_cmd: The validated mailbox command. 570 * @out_payload: Pointer to userspace's output payload. 571 * @size_out: (Input) Max payload size to copy out. 572 * (Output) Payload size hardware generated. 573 * @retval: Hardware generated return code from the operation. 574 * 575 * Return: 576 * * %0 - Mailbox transaction succeeded. This implies the mailbox 577 * protocol completed successfully not that the operation itself 578 * was successful. 579 * * %-ENOMEM - Couldn't allocate a bounce buffer. 580 * * %-EFAULT - Something happened with copy_to/from_user. 581 * * %-EINTR - Mailbox acquisition interrupted. 582 * * %-EXXX - Transaction level failures. 583 * 584 * Dispatches a mailbox command on behalf of a userspace request. 585 * The output payload is copied to userspace. 586 * 587 * See cxl_send_cmd(). 588 */ 589 static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds, 590 struct cxl_mbox_cmd *mbox_cmd, 591 u64 out_payload, s32 *size_out, 592 u32 *retval) 593 { 594 struct device *dev = mds->cxlds.dev; 595 int rc; 596 597 dev_dbg(dev, 598 "Submitting %s command for user\n" 599 "\topcode: %x\n" 600 "\tsize: %zx\n", 601 cxl_mem_opcode_to_name(mbox_cmd->opcode), 602 mbox_cmd->opcode, mbox_cmd->size_in); 603 604 rc = mds->mbox_send(mds, mbox_cmd); 605 if (rc) 606 goto out; 607 608 /* 609 * @size_out contains the max size that's allowed to be written back out 610 * to userspace. While the payload may have written more output than 611 * this it will have to be ignored. 612 */ 613 if (mbox_cmd->size_out) { 614 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out, 615 "Invalid return size\n"); 616 if (copy_to_user(u64_to_user_ptr(out_payload), 617 mbox_cmd->payload_out, mbox_cmd->size_out)) { 618 rc = -EFAULT; 619 goto out; 620 } 621 } 622 623 *size_out = mbox_cmd->size_out; 624 *retval = mbox_cmd->return_code; 625 626 out: 627 cxl_mbox_cmd_dtor(mbox_cmd); 628 return rc; 629 } 630 631 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) 632 { 633 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 634 struct device *dev = &cxlmd->dev; 635 struct cxl_send_command send; 636 struct cxl_mbox_cmd mbox_cmd; 637 int rc; 638 639 dev_dbg(dev, "Send IOCTL\n"); 640 641 if (copy_from_user(&send, s, sizeof(send))) 642 return -EFAULT; 643 644 rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send); 645 if (rc) 646 return rc; 647 648 rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload, 649 &send.out.size, &send.retval); 650 if (rc) 651 return rc; 652 653 if (copy_to_user(s, &send, sizeof(send))) 654 return -EFAULT; 655 656 return 0; 657 } 658 659 static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, 660 u32 *size, u8 *out) 661 { 662 u32 remaining = *size; 663 u32 offset = 0; 664 665 while (remaining) { 666 u32 xfer_size = min_t(u32, remaining, mds->payload_size); 667 struct cxl_mbox_cmd mbox_cmd; 668 struct cxl_mbox_get_log log; 669 int rc; 670 671 log = (struct cxl_mbox_get_log) { 672 .uuid = *uuid, 673 .offset = cpu_to_le32(offset), 674 .length = cpu_to_le32(xfer_size), 675 }; 676 677 mbox_cmd = (struct cxl_mbox_cmd) { 678 .opcode = CXL_MBOX_OP_GET_LOG, 679 .size_in = sizeof(log), 680 .payload_in = &log, 681 .size_out = xfer_size, 682 .payload_out = out, 683 }; 684 685 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 686 687 /* 688 * The output payload length that indicates the number 689 * of valid bytes can be smaller than the Log buffer 690 * size. 691 */ 692 if (rc == -EIO && mbox_cmd.size_out < xfer_size) { 693 offset += mbox_cmd.size_out; 694 break; 695 } 696 697 if (rc < 0) 698 return rc; 699 700 out += xfer_size; 701 remaining -= xfer_size; 702 offset += xfer_size; 703 } 704 705 *size = offset; 706 707 return 0; 708 } 709 710 /** 711 * cxl_walk_cel() - Walk through the Command Effects Log. 712 * @mds: The driver data for the operation 713 * @size: Length of the Command Effects Log. 714 * @cel: CEL 715 * 716 * Iterate over each entry in the CEL and determine if the driver supports the 717 * command. If so, the command is enabled for the device and can be used later. 718 */ 719 static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) 720 { 721 struct cxl_cel_entry *cel_entry; 722 const int cel_entries = size / sizeof(*cel_entry); 723 struct device *dev = mds->cxlds.dev; 724 int i; 725 726 cel_entry = (struct cxl_cel_entry *) cel; 727 728 for (i = 0; i < cel_entries; i++) { 729 u16 opcode = le16_to_cpu(cel_entry[i].opcode); 730 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 731 int enabled = 0; 732 733 if (cmd) { 734 set_bit(cmd->info.id, mds->enabled_cmds); 735 enabled++; 736 } 737 738 if (cxl_is_poison_command(opcode)) { 739 cxl_set_poison_cmd_enabled(&mds->poison, opcode); 740 enabled++; 741 } 742 743 if (cxl_is_security_command(opcode)) { 744 cxl_set_security_cmd_enabled(&mds->security, opcode); 745 enabled++; 746 } 747 748 dev_dbg(dev, "Opcode 0x%04x %s\n", opcode, 749 enabled ? "enabled" : "unsupported by driver"); 750 } 751 } 752 753 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds) 754 { 755 struct cxl_mbox_get_supported_logs *ret; 756 struct cxl_mbox_cmd mbox_cmd; 757 int rc; 758 759 ret = kvmalloc(mds->payload_size, GFP_KERNEL); 760 if (!ret) 761 return ERR_PTR(-ENOMEM); 762 763 mbox_cmd = (struct cxl_mbox_cmd) { 764 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS, 765 .size_out = mds->payload_size, 766 .payload_out = ret, 767 /* At least the record number field must be valid */ 768 .min_out = 2, 769 }; 770 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 771 if (rc < 0) { 772 kvfree(ret); 773 return ERR_PTR(rc); 774 } 775 776 777 return ret; 778 } 779 780 enum { 781 CEL_UUID, 782 VENDOR_DEBUG_UUID, 783 }; 784 785 /* See CXL 2.0 Table 170. Get Log Input Payload */ 786 static const uuid_t log_uuid[] = { 787 [CEL_UUID] = DEFINE_CXL_CEL_UUID, 788 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID, 789 }; 790 791 /** 792 * cxl_enumerate_cmds() - Enumerate commands for a device. 793 * @mds: The driver data for the operation 794 * 795 * Returns 0 if enumerate completed successfully. 796 * 797 * CXL devices have optional support for certain commands. This function will 798 * determine the set of supported commands for the hardware and update the 799 * enabled_cmds bitmap in the @mds. 800 */ 801 int cxl_enumerate_cmds(struct cxl_memdev_state *mds) 802 { 803 struct cxl_mbox_get_supported_logs *gsl; 804 struct device *dev = mds->cxlds.dev; 805 struct cxl_mem_command *cmd; 806 int i, rc; 807 808 gsl = cxl_get_gsl(mds); 809 if (IS_ERR(gsl)) 810 return PTR_ERR(gsl); 811 812 rc = -ENOENT; 813 for (i = 0; i < le16_to_cpu(gsl->entries); i++) { 814 u32 size = le32_to_cpu(gsl->entry[i].size); 815 uuid_t uuid = gsl->entry[i].uuid; 816 u8 *log; 817 818 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); 819 820 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) 821 continue; 822 823 log = kvmalloc(size, GFP_KERNEL); 824 if (!log) { 825 rc = -ENOMEM; 826 goto out; 827 } 828 829 rc = cxl_xfer_log(mds, &uuid, &size, log); 830 if (rc) { 831 kvfree(log); 832 goto out; 833 } 834 835 cxl_walk_cel(mds, size, log); 836 kvfree(log); 837 838 /* In case CEL was bogus, enable some default commands. */ 839 cxl_for_each_cmd(cmd) 840 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) 841 set_bit(cmd->info.id, mds->enabled_cmds); 842 843 /* Found the required CEL */ 844 rc = 0; 845 } 846 out: 847 kvfree(gsl); 848 return rc; 849 } 850 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL); 851 852 void cxl_event_trace_record(const struct cxl_memdev *cxlmd, 853 enum cxl_event_log_type type, 854 enum cxl_event_type event_type, 855 const uuid_t *uuid, union cxl_event *evt) 856 { 857 if (event_type == CXL_CPER_EVENT_MEM_MODULE) { 858 trace_cxl_memory_module(cxlmd, type, &evt->mem_module); 859 return; 860 } 861 if (event_type == CXL_CPER_EVENT_GENERIC) { 862 trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); 863 return; 864 } 865 866 if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) { 867 u64 dpa, hpa = ULLONG_MAX; 868 struct cxl_region *cxlr; 869 870 /* 871 * These trace points are annotated with HPA and region 872 * translations. Take topology mutation locks and lookup 873 * { HPA, REGION } from { DPA, MEMDEV } in the event record. 874 */ 875 guard(rwsem_read)(&cxl_region_rwsem); 876 guard(rwsem_read)(&cxl_dpa_rwsem); 877 878 dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK; 879 cxlr = cxl_dpa_to_region(cxlmd, dpa); 880 if (cxlr) 881 hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa); 882 883 if (event_type == CXL_CPER_EVENT_GEN_MEDIA) 884 trace_cxl_general_media(cxlmd, type, cxlr, hpa, 885 &evt->gen_media); 886 else if (event_type == CXL_CPER_EVENT_DRAM) 887 trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram); 888 } 889 } 890 EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); 891 892 static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd, 893 enum cxl_event_log_type type, 894 struct cxl_event_record_raw *record) 895 { 896 enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC; 897 const uuid_t *uuid = &record->id; 898 899 if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID)) 900 ev_type = CXL_CPER_EVENT_GEN_MEDIA; 901 else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID)) 902 ev_type = CXL_CPER_EVENT_DRAM; 903 else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID)) 904 ev_type = CXL_CPER_EVENT_MEM_MODULE; 905 906 cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event); 907 } 908 909 static int cxl_clear_event_record(struct cxl_memdev_state *mds, 910 enum cxl_event_log_type log, 911 struct cxl_get_event_payload *get_pl) 912 { 913 struct cxl_mbox_clear_event_payload *payload; 914 u16 total = le16_to_cpu(get_pl->record_count); 915 u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES; 916 size_t pl_size = struct_size(payload, handles, max_handles); 917 struct cxl_mbox_cmd mbox_cmd; 918 u16 cnt; 919 int rc = 0; 920 int i; 921 922 /* Payload size may limit the max handles */ 923 if (pl_size > mds->payload_size) { 924 max_handles = (mds->payload_size - sizeof(*payload)) / 925 sizeof(__le16); 926 pl_size = struct_size(payload, handles, max_handles); 927 } 928 929 payload = kvzalloc(pl_size, GFP_KERNEL); 930 if (!payload) 931 return -ENOMEM; 932 933 *payload = (struct cxl_mbox_clear_event_payload) { 934 .event_log = log, 935 }; 936 937 mbox_cmd = (struct cxl_mbox_cmd) { 938 .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD, 939 .payload_in = payload, 940 .size_in = pl_size, 941 }; 942 943 /* 944 * Clear Event Records uses u8 for the handle cnt while Get Event 945 * Record can return up to 0xffff records. 946 */ 947 i = 0; 948 for (cnt = 0; cnt < total; cnt++) { 949 struct cxl_event_record_raw *raw = &get_pl->records[cnt]; 950 struct cxl_event_generic *gen = &raw->event.generic; 951 952 payload->handles[i++] = gen->hdr.handle; 953 dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log, 954 le16_to_cpu(payload->handles[i - 1])); 955 956 if (i == max_handles) { 957 payload->nr_recs = i; 958 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 959 if (rc) 960 goto free_pl; 961 i = 0; 962 } 963 } 964 965 /* Clear what is left if any */ 966 if (i) { 967 payload->nr_recs = i; 968 mbox_cmd.size_in = struct_size(payload, handles, i); 969 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 970 if (rc) 971 goto free_pl; 972 } 973 974 free_pl: 975 kvfree(payload); 976 return rc; 977 } 978 979 static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, 980 enum cxl_event_log_type type) 981 { 982 struct cxl_memdev *cxlmd = mds->cxlds.cxlmd; 983 struct device *dev = mds->cxlds.dev; 984 struct cxl_get_event_payload *payload; 985 u8 log_type = type; 986 u16 nr_rec; 987 988 mutex_lock(&mds->event.log_lock); 989 payload = mds->event.buf; 990 991 do { 992 int rc, i; 993 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) { 994 .opcode = CXL_MBOX_OP_GET_EVENT_RECORD, 995 .payload_in = &log_type, 996 .size_in = sizeof(log_type), 997 .payload_out = payload, 998 .size_out = mds->payload_size, 999 .min_out = struct_size(payload, records, 0), 1000 }; 1001 1002 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1003 if (rc) { 1004 dev_err_ratelimited(dev, 1005 "Event log '%d': Failed to query event records : %d", 1006 type, rc); 1007 break; 1008 } 1009 1010 nr_rec = le16_to_cpu(payload->record_count); 1011 if (!nr_rec) 1012 break; 1013 1014 for (i = 0; i < nr_rec; i++) 1015 __cxl_event_trace_record(cxlmd, type, 1016 &payload->records[i]); 1017 1018 if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW) 1019 trace_cxl_overflow(cxlmd, type, payload); 1020 1021 rc = cxl_clear_event_record(mds, type, payload); 1022 if (rc) { 1023 dev_err_ratelimited(dev, 1024 "Event log '%d': Failed to clear events : %d", 1025 type, rc); 1026 break; 1027 } 1028 } while (nr_rec); 1029 1030 mutex_unlock(&mds->event.log_lock); 1031 } 1032 1033 /** 1034 * cxl_mem_get_event_records - Get Event Records from the device 1035 * @mds: The driver data for the operation 1036 * @status: Event Status register value identifying which events are available. 1037 * 1038 * Retrieve all event records available on the device, report them as trace 1039 * events, and clear them. 1040 * 1041 * See CXL rev 3.0 @8.2.9.2.2 Get Event Records 1042 * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records 1043 */ 1044 void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status) 1045 { 1046 dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status); 1047 1048 if (status & CXLDEV_EVENT_STATUS_FATAL) 1049 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL); 1050 if (status & CXLDEV_EVENT_STATUS_FAIL) 1051 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL); 1052 if (status & CXLDEV_EVENT_STATUS_WARN) 1053 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN); 1054 if (status & CXLDEV_EVENT_STATUS_INFO) 1055 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO); 1056 } 1057 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL); 1058 1059 /** 1060 * cxl_mem_get_partition_info - Get partition info 1061 * @mds: The driver data for the operation 1062 * 1063 * Retrieve the current partition info for the device specified. The active 1064 * values are the current capacity in bytes. If not 0, the 'next' values are 1065 * the pending values, in bytes, which take affect on next cold reset. 1066 * 1067 * Return: 0 if no error: or the result of the mailbox command. 1068 * 1069 * See CXL @8.2.9.5.2.1 Get Partition Info 1070 */ 1071 static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) 1072 { 1073 struct cxl_mbox_get_partition_info pi; 1074 struct cxl_mbox_cmd mbox_cmd; 1075 int rc; 1076 1077 mbox_cmd = (struct cxl_mbox_cmd) { 1078 .opcode = CXL_MBOX_OP_GET_PARTITION_INFO, 1079 .size_out = sizeof(pi), 1080 .payload_out = &pi, 1081 }; 1082 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1083 if (rc) 1084 return rc; 1085 1086 mds->active_volatile_bytes = 1087 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1088 mds->active_persistent_bytes = 1089 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; 1090 mds->next_volatile_bytes = 1091 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1092 mds->next_persistent_bytes = 1093 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1094 1095 return 0; 1096 } 1097 1098 /** 1099 * cxl_dev_state_identify() - Send the IDENTIFY command to the device. 1100 * @mds: The driver data for the operation 1101 * 1102 * Return: 0 if identify was executed successfully or media not ready. 1103 * 1104 * This will dispatch the identify command to the device and on success populate 1105 * structures to be exported to sysfs. 1106 */ 1107 int cxl_dev_state_identify(struct cxl_memdev_state *mds) 1108 { 1109 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ 1110 struct cxl_mbox_identify id; 1111 struct cxl_mbox_cmd mbox_cmd; 1112 u32 val; 1113 int rc; 1114 1115 if (!mds->cxlds.media_ready) 1116 return 0; 1117 1118 mbox_cmd = (struct cxl_mbox_cmd) { 1119 .opcode = CXL_MBOX_OP_IDENTIFY, 1120 .size_out = sizeof(id), 1121 .payload_out = &id, 1122 }; 1123 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1124 if (rc < 0) 1125 return rc; 1126 1127 mds->total_bytes = 1128 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER; 1129 mds->volatile_only_bytes = 1130 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER; 1131 mds->persistent_only_bytes = 1132 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER; 1133 mds->partition_align_bytes = 1134 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER; 1135 1136 mds->lsa_size = le32_to_cpu(id.lsa_size); 1137 memcpy(mds->firmware_version, id.fw_revision, 1138 sizeof(id.fw_revision)); 1139 1140 if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) { 1141 val = get_unaligned_le24(id.poison_list_max_mer); 1142 mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX); 1143 } 1144 1145 return 0; 1146 } 1147 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); 1148 1149 static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) 1150 { 1151 int rc; 1152 u32 sec_out = 0; 1153 struct cxl_get_security_output { 1154 __le32 flags; 1155 } out; 1156 struct cxl_mbox_cmd sec_cmd = { 1157 .opcode = CXL_MBOX_OP_GET_SECURITY_STATE, 1158 .payload_out = &out, 1159 .size_out = sizeof(out), 1160 }; 1161 struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd }; 1162 struct cxl_dev_state *cxlds = &mds->cxlds; 1163 1164 if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE) 1165 return -EINVAL; 1166 1167 rc = cxl_internal_send_cmd(mds, &sec_cmd); 1168 if (rc < 0) { 1169 dev_err(cxlds->dev, "Failed to get security state : %d", rc); 1170 return rc; 1171 } 1172 1173 /* 1174 * Prior to using these commands, any security applied to 1175 * the user data areas of the device shall be DISABLED (or 1176 * UNLOCKED for secure erase case). 1177 */ 1178 sec_out = le32_to_cpu(out.flags); 1179 if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) 1180 return -EINVAL; 1181 1182 if (cmd == CXL_MBOX_OP_SECURE_ERASE && 1183 sec_out & CXL_PMEM_SEC_STATE_LOCKED) 1184 return -EINVAL; 1185 1186 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1187 if (rc < 0) { 1188 dev_err(cxlds->dev, "Failed to sanitize device : %d", rc); 1189 return rc; 1190 } 1191 1192 return 0; 1193 } 1194 1195 1196 /** 1197 * cxl_mem_sanitize() - Send a sanitization command to the device. 1198 * @cxlmd: The device for the operation 1199 * @cmd: The specific sanitization command opcode 1200 * 1201 * Return: 0 if the command was executed successfully, regardless of 1202 * whether or not the actual security operation is done in the background, 1203 * such as for the Sanitize case. 1204 * Error return values can be the result of the mailbox command, -EINVAL 1205 * when security requirements are not met or invalid contexts, or -EBUSY 1206 * if the sanitize operation is already in flight. 1207 * 1208 * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase. 1209 */ 1210 int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) 1211 { 1212 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 1213 struct cxl_port *endpoint; 1214 int rc; 1215 1216 /* synchronize with cxl_mem_probe() and decoder write operations */ 1217 device_lock(&cxlmd->dev); 1218 endpoint = cxlmd->endpoint; 1219 down_read(&cxl_region_rwsem); 1220 /* 1221 * Require an endpoint to be safe otherwise the driver can not 1222 * be sure that the device is unmapped. 1223 */ 1224 if (endpoint && cxl_num_decoders_committed(endpoint) == 0) 1225 rc = __cxl_mem_sanitize(mds, cmd); 1226 else 1227 rc = -EBUSY; 1228 up_read(&cxl_region_rwsem); 1229 device_unlock(&cxlmd->dev); 1230 1231 return rc; 1232 } 1233 1234 static int add_dpa_res(struct device *dev, struct resource *parent, 1235 struct resource *res, resource_size_t start, 1236 resource_size_t size, const char *type) 1237 { 1238 int rc; 1239 1240 res->name = type; 1241 res->start = start; 1242 res->end = start + size - 1; 1243 res->flags = IORESOURCE_MEM; 1244 if (resource_size(res) == 0) { 1245 dev_dbg(dev, "DPA(%s): no capacity\n", res->name); 1246 return 0; 1247 } 1248 rc = request_resource(parent, res); 1249 if (rc) { 1250 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name, 1251 res, rc); 1252 return rc; 1253 } 1254 1255 dev_dbg(dev, "DPA(%s): %pr\n", res->name, res); 1256 1257 return 0; 1258 } 1259 1260 int cxl_mem_create_range_info(struct cxl_memdev_state *mds) 1261 { 1262 struct cxl_dev_state *cxlds = &mds->cxlds; 1263 struct device *dev = cxlds->dev; 1264 int rc; 1265 1266 if (!cxlds->media_ready) { 1267 cxlds->dpa_res = DEFINE_RES_MEM(0, 0); 1268 cxlds->ram_res = DEFINE_RES_MEM(0, 0); 1269 cxlds->pmem_res = DEFINE_RES_MEM(0, 0); 1270 return 0; 1271 } 1272 1273 cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes); 1274 1275 if (mds->partition_align_bytes == 0) { 1276 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, 1277 mds->volatile_only_bytes, "ram"); 1278 if (rc) 1279 return rc; 1280 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, 1281 mds->volatile_only_bytes, 1282 mds->persistent_only_bytes, "pmem"); 1283 } 1284 1285 rc = cxl_mem_get_partition_info(mds); 1286 if (rc) { 1287 dev_err(dev, "Failed to query partition information\n"); 1288 return rc; 1289 } 1290 1291 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, 1292 mds->active_volatile_bytes, "ram"); 1293 if (rc) 1294 return rc; 1295 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, 1296 mds->active_volatile_bytes, 1297 mds->active_persistent_bytes, "pmem"); 1298 } 1299 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL); 1300 1301 int cxl_set_timestamp(struct cxl_memdev_state *mds) 1302 { 1303 struct cxl_mbox_cmd mbox_cmd; 1304 struct cxl_mbox_set_timestamp_in pi; 1305 int rc; 1306 1307 pi.timestamp = cpu_to_le64(ktime_get_real_ns()); 1308 mbox_cmd = (struct cxl_mbox_cmd) { 1309 .opcode = CXL_MBOX_OP_SET_TIMESTAMP, 1310 .size_in = sizeof(pi), 1311 .payload_in = &pi, 1312 }; 1313 1314 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1315 /* 1316 * Command is optional. Devices may have another way of providing 1317 * a timestamp, or may return all 0s in timestamp fields. 1318 * Don't report an error if this command isn't supported 1319 */ 1320 if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED)) 1321 return rc; 1322 1323 return 0; 1324 } 1325 EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL); 1326 1327 int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, 1328 struct cxl_region *cxlr) 1329 { 1330 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 1331 struct cxl_mbox_poison_out *po; 1332 struct cxl_mbox_poison_in pi; 1333 int nr_records = 0; 1334 int rc; 1335 1336 rc = mutex_lock_interruptible(&mds->poison.lock); 1337 if (rc) 1338 return rc; 1339 1340 po = mds->poison.list_out; 1341 pi.offset = cpu_to_le64(offset); 1342 pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT); 1343 1344 do { 1345 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){ 1346 .opcode = CXL_MBOX_OP_GET_POISON, 1347 .size_in = sizeof(pi), 1348 .payload_in = &pi, 1349 .size_out = mds->payload_size, 1350 .payload_out = po, 1351 .min_out = struct_size(po, record, 0), 1352 }; 1353 1354 rc = cxl_internal_send_cmd(mds, &mbox_cmd); 1355 if (rc) 1356 break; 1357 1358 for (int i = 0; i < le16_to_cpu(po->count); i++) 1359 trace_cxl_poison(cxlmd, cxlr, &po->record[i], 1360 po->flags, po->overflow_ts, 1361 CXL_POISON_TRACE_LIST); 1362 1363 /* Protect against an uncleared _FLAG_MORE */ 1364 nr_records = nr_records + le16_to_cpu(po->count); 1365 if (nr_records >= mds->poison.max_errors) { 1366 dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n", 1367 nr_records); 1368 break; 1369 } 1370 } while (po->flags & CXL_POISON_FLAG_MORE); 1371 1372 mutex_unlock(&mds->poison.lock); 1373 return rc; 1374 } 1375 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL); 1376 1377 static void free_poison_buf(void *buf) 1378 { 1379 kvfree(buf); 1380 } 1381 1382 /* Get Poison List output buffer is protected by mds->poison.lock */ 1383 static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds) 1384 { 1385 mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL); 1386 if (!mds->poison.list_out) 1387 return -ENOMEM; 1388 1389 return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf, 1390 mds->poison.list_out); 1391 } 1392 1393 int cxl_poison_state_init(struct cxl_memdev_state *mds) 1394 { 1395 int rc; 1396 1397 if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) 1398 return 0; 1399 1400 rc = cxl_poison_alloc_buf(mds); 1401 if (rc) { 1402 clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds); 1403 return rc; 1404 } 1405 1406 mutex_init(&mds->poison.lock); 1407 return 0; 1408 } 1409 EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL); 1410 1411 struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) 1412 { 1413 struct cxl_memdev_state *mds; 1414 1415 mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL); 1416 if (!mds) { 1417 dev_err(dev, "No memory available\n"); 1418 return ERR_PTR(-ENOMEM); 1419 } 1420 1421 mutex_init(&mds->mbox_mutex); 1422 mutex_init(&mds->event.log_lock); 1423 mds->cxlds.dev = dev; 1424 mds->cxlds.reg_map.host = dev; 1425 mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; 1426 mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; 1427 mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID; 1428 mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID; 1429 1430 return mds; 1431 } 1432 EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL); 1433 1434 void __init cxl_mbox_init(void) 1435 { 1436 struct dentry *mbox_debugfs; 1437 1438 mbox_debugfs = cxl_debugfs_create_dir("mbox"); 1439 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, 1440 &cxl_raw_allow_all); 1441 } 1442