1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure Encrypted Virtualization (SEV) interface 4 * 5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Brijesh Singh <brijesh.singh@amd.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/sched.h> 14 #include <linux/interrupt.h> 15 #include <linux/spinlock.h> 16 #include <linux/spinlock_types.h> 17 #include <linux/types.h> 18 #include <linux/mutex.h> 19 #include <linux/delay.h> 20 #include <linux/hw_random.h> 21 #include <linux/ccp.h> 22 #include <linux/firmware.h> 23 #include <linux/gfp.h> 24 #include <linux/cpufeature.h> 25 #include <linux/fs.h> 26 #include <linux/fs_struct.h> 27 #include <linux/psp.h> 28 29 #include <asm/smp.h> 30 #include <asm/cacheflush.h> 31 32 #include "psp-dev.h" 33 #include "sev-dev.h" 34 35 #define DEVICE_NAME "sev" 36 #define SEV_FW_FILE "amd/sev.fw" 37 #define SEV_FW_NAME_SIZE 64 38 39 static DEFINE_MUTEX(sev_cmd_mutex); 40 static struct sev_misc_dev *misc_dev; 41 42 static int psp_cmd_timeout = 100; 43 module_param(psp_cmd_timeout, int, 0644); 44 MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); 45 46 static int psp_probe_timeout = 5; 47 module_param(psp_probe_timeout, int, 0644); 48 MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); 49 50 static char *init_ex_path; 51 module_param(init_ex_path, charp, 0444); 52 MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX"); 53 54 static bool psp_init_on_probe = true; 55 module_param(psp_init_on_probe, bool, 0444); 56 MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it"); 57 58 MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */ 59 MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */ 60 MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */ 61 MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ 62 63 static bool psp_dead; 64 static int psp_timeout; 65 66 /* Trusted Memory Region (TMR): 67 * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator 68 * to allocate the memory, which will return aligned memory for the specified 69 * allocation order. 70 */ 71 #define SEV_ES_TMR_SIZE (1024 * 1024) 72 static void *sev_es_tmr; 73 74 /* INIT_EX NV Storage: 75 * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page 76 * allocator to allocate the memory, which will return aligned memory for the 77 * specified allocation order. 78 */ 79 #define NV_LENGTH (32 * 1024) 80 static void *sev_init_ex_buffer; 81 82 static inline bool sev_version_greater_or_equal(u8 maj, u8 min) 83 { 84 struct sev_device *sev = psp_master->sev_data; 85 86 if (sev->api_major > maj) 87 return true; 88 89 if (sev->api_major == maj && sev->api_minor >= min) 90 return true; 91 92 return false; 93 } 94 95 static void sev_irq_handler(int irq, void *data, unsigned int status) 96 { 97 struct sev_device *sev = data; 98 int reg; 99 100 /* Check if it is command completion: */ 101 if (!(status & SEV_CMD_COMPLETE)) 102 return; 103 104 /* Check if it is SEV command completion: */ 105 reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 106 if (reg & PSP_CMDRESP_RESP) { 107 sev->int_rcvd = 1; 108 wake_up(&sev->int_queue); 109 } 110 } 111 112 static int sev_wait_cmd_ioc(struct sev_device *sev, 113 unsigned int *reg, unsigned int timeout) 114 { 115 int ret; 116 117 ret = wait_event_timeout(sev->int_queue, 118 sev->int_rcvd, timeout * HZ); 119 if (!ret) 120 return -ETIMEDOUT; 121 122 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 123 124 return 0; 125 } 126 127 static int sev_cmd_buffer_len(int cmd) 128 { 129 switch (cmd) { 130 case SEV_CMD_INIT: return sizeof(struct sev_data_init); 131 case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); 132 case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); 133 case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); 134 case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); 135 case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); 136 case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); 137 case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); 138 case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); 139 case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); 140 case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); 141 case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); 142 case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); 143 case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); 144 case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); 145 case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); 146 case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); 147 case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); 148 case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); 149 case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); 150 case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); 151 case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); 152 case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); 153 case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); 154 case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); 155 case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); 156 case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); 157 case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); 158 case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); 159 case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); 160 default: return 0; 161 } 162 163 return 0; 164 } 165 166 static void *sev_fw_alloc(unsigned long len) 167 { 168 struct page *page; 169 170 page = alloc_pages(GFP_KERNEL, get_order(len)); 171 if (!page) 172 return NULL; 173 174 return page_address(page); 175 } 176 177 static struct file *open_file_as_root(const char *filename, int flags, umode_t mode) 178 { 179 struct file *fp; 180 struct path root; 181 struct cred *cred; 182 const struct cred *old_cred; 183 184 task_lock(&init_task); 185 get_fs_root(init_task.fs, &root); 186 task_unlock(&init_task); 187 188 cred = prepare_creds(); 189 if (!cred) 190 return ERR_PTR(-ENOMEM); 191 cred->fsuid = GLOBAL_ROOT_UID; 192 old_cred = override_creds(cred); 193 194 fp = file_open_root(&root, filename, flags, mode); 195 path_put(&root); 196 197 revert_creds(old_cred); 198 199 return fp; 200 } 201 202 static int sev_read_init_ex_file(void) 203 { 204 struct sev_device *sev = psp_master->sev_data; 205 struct file *fp; 206 ssize_t nread; 207 208 lockdep_assert_held(&sev_cmd_mutex); 209 210 if (!sev_init_ex_buffer) 211 return -EOPNOTSUPP; 212 213 fp = open_file_as_root(init_ex_path, O_RDONLY, 0); 214 if (IS_ERR(fp)) { 215 int ret = PTR_ERR(fp); 216 217 if (ret == -ENOENT) { 218 dev_info(sev->dev, 219 "SEV: %s does not exist and will be created later.\n", 220 init_ex_path); 221 ret = 0; 222 } else { 223 dev_err(sev->dev, 224 "SEV: could not open %s for read, error %d\n", 225 init_ex_path, ret); 226 } 227 return ret; 228 } 229 230 nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL); 231 if (nread != NV_LENGTH) { 232 dev_info(sev->dev, 233 "SEV: could not read %u bytes to non volatile memory area, ret %ld\n", 234 NV_LENGTH, nread); 235 } 236 237 dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread); 238 filp_close(fp, NULL); 239 240 return 0; 241 } 242 243 static int sev_write_init_ex_file(void) 244 { 245 struct sev_device *sev = psp_master->sev_data; 246 struct file *fp; 247 loff_t offset = 0; 248 ssize_t nwrite; 249 250 lockdep_assert_held(&sev_cmd_mutex); 251 252 if (!sev_init_ex_buffer) 253 return 0; 254 255 fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600); 256 if (IS_ERR(fp)) { 257 int ret = PTR_ERR(fp); 258 259 dev_err(sev->dev, 260 "SEV: could not open file for write, error %d\n", 261 ret); 262 return ret; 263 } 264 265 nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset); 266 vfs_fsync(fp, 0); 267 filp_close(fp, NULL); 268 269 if (nwrite != NV_LENGTH) { 270 dev_err(sev->dev, 271 "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n", 272 NV_LENGTH, nwrite); 273 return -EIO; 274 } 275 276 dev_dbg(sev->dev, "SEV: write successful to NV file\n"); 277 278 return 0; 279 } 280 281 static int sev_write_init_ex_file_if_required(int cmd_id) 282 { 283 lockdep_assert_held(&sev_cmd_mutex); 284 285 if (!sev_init_ex_buffer) 286 return 0; 287 288 /* 289 * Only a few platform commands modify the SPI/NV area, but none of the 290 * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN, 291 * PEK_CERT_IMPORT, and PDH_GEN do. 292 */ 293 switch (cmd_id) { 294 case SEV_CMD_FACTORY_RESET: 295 case SEV_CMD_INIT_EX: 296 case SEV_CMD_PDH_GEN: 297 case SEV_CMD_PEK_CERT_IMPORT: 298 case SEV_CMD_PEK_GEN: 299 break; 300 default: 301 return 0; 302 } 303 304 return sev_write_init_ex_file(); 305 } 306 307 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) 308 { 309 struct psp_device *psp = psp_master; 310 struct sev_device *sev; 311 unsigned int phys_lsb, phys_msb; 312 unsigned int reg, ret = 0; 313 int buf_len; 314 315 if (!psp || !psp->sev_data) 316 return -ENODEV; 317 318 if (psp_dead) 319 return -EBUSY; 320 321 sev = psp->sev_data; 322 323 buf_len = sev_cmd_buffer_len(cmd); 324 if (WARN_ON_ONCE(!data != !buf_len)) 325 return -EINVAL; 326 327 /* 328 * Copy the incoming data to driver's scratch buffer as __pa() will not 329 * work for some memory, e.g. vmalloc'd addresses, and @data may not be 330 * physically contiguous. 331 */ 332 if (data) 333 memcpy(sev->cmd_buf, data, buf_len); 334 335 /* Get the physical address of the command buffer */ 336 phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0; 337 phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0; 338 339 dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", 340 cmd, phys_msb, phys_lsb, psp_timeout); 341 342 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, 343 buf_len, false); 344 345 iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); 346 iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); 347 348 sev->int_rcvd = 0; 349 350 reg = cmd; 351 reg <<= SEV_CMDRESP_CMD_SHIFT; 352 reg |= SEV_CMDRESP_IOC; 353 iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); 354 355 /* wait for command completion */ 356 ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); 357 if (ret) { 358 if (psp_ret) 359 *psp_ret = 0; 360 361 dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); 362 psp_dead = true; 363 364 return ret; 365 } 366 367 psp_timeout = psp_cmd_timeout; 368 369 if (psp_ret) 370 *psp_ret = reg & PSP_CMDRESP_ERR_MASK; 371 372 if (reg & PSP_CMDRESP_ERR_MASK) { 373 dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n", 374 cmd, reg & PSP_CMDRESP_ERR_MASK); 375 ret = -EIO; 376 } else { 377 ret = sev_write_init_ex_file_if_required(cmd); 378 } 379 380 print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, 381 buf_len, false); 382 383 /* 384 * Copy potential output from the PSP back to data. Do this even on 385 * failure in case the caller wants to glean something from the error. 386 */ 387 if (data) 388 memcpy(data, sev->cmd_buf, buf_len); 389 390 return ret; 391 } 392 393 static int sev_do_cmd(int cmd, void *data, int *psp_ret) 394 { 395 int rc; 396 397 mutex_lock(&sev_cmd_mutex); 398 rc = __sev_do_cmd_locked(cmd, data, psp_ret); 399 mutex_unlock(&sev_cmd_mutex); 400 401 return rc; 402 } 403 404 static int __sev_init_locked(int *error) 405 { 406 struct sev_data_init data; 407 408 memset(&data, 0, sizeof(data)); 409 if (sev_es_tmr) { 410 /* 411 * Do not include the encryption mask on the physical 412 * address of the TMR (firmware should clear it anyway). 413 */ 414 data.tmr_address = __pa(sev_es_tmr); 415 416 data.flags |= SEV_INIT_FLAGS_SEV_ES; 417 data.tmr_len = SEV_ES_TMR_SIZE; 418 } 419 420 return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error); 421 } 422 423 static int __sev_init_ex_locked(int *error) 424 { 425 struct sev_data_init_ex data; 426 427 memset(&data, 0, sizeof(data)); 428 data.length = sizeof(data); 429 data.nv_address = __psp_pa(sev_init_ex_buffer); 430 data.nv_len = NV_LENGTH; 431 432 if (sev_es_tmr) { 433 /* 434 * Do not include the encryption mask on the physical 435 * address of the TMR (firmware should clear it anyway). 436 */ 437 data.tmr_address = __pa(sev_es_tmr); 438 439 data.flags |= SEV_INIT_FLAGS_SEV_ES; 440 data.tmr_len = SEV_ES_TMR_SIZE; 441 } 442 443 return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error); 444 } 445 446 static int __sev_platform_init_locked(int *error) 447 { 448 struct psp_device *psp = psp_master; 449 struct sev_device *sev; 450 int rc = 0, psp_ret = -1; 451 int (*init_function)(int *error); 452 453 if (!psp || !psp->sev_data) 454 return -ENODEV; 455 456 sev = psp->sev_data; 457 458 if (sev->state == SEV_STATE_INIT) 459 return 0; 460 461 if (sev_init_ex_buffer) { 462 init_function = __sev_init_ex_locked; 463 rc = sev_read_init_ex_file(); 464 if (rc) 465 return rc; 466 } else { 467 init_function = __sev_init_locked; 468 } 469 470 rc = init_function(&psp_ret); 471 if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { 472 /* 473 * Initialization command returned an integrity check failure 474 * status code, meaning that firmware load and validation of SEV 475 * related persistent data has failed. Retrying the 476 * initialization function should succeed by replacing the state 477 * with a reset state. 478 */ 479 dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state."); 480 rc = init_function(&psp_ret); 481 } 482 if (error) 483 *error = psp_ret; 484 485 if (rc) 486 return rc; 487 488 sev->state = SEV_STATE_INIT; 489 490 /* Prepare for first SEV guest launch after INIT */ 491 wbinvd_on_all_cpus(); 492 rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); 493 if (rc) 494 return rc; 495 496 dev_dbg(sev->dev, "SEV firmware initialized\n"); 497 498 dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, 499 sev->api_minor, sev->build); 500 501 return 0; 502 } 503 504 int sev_platform_init(int *error) 505 { 506 int rc; 507 508 mutex_lock(&sev_cmd_mutex); 509 rc = __sev_platform_init_locked(error); 510 mutex_unlock(&sev_cmd_mutex); 511 512 return rc; 513 } 514 EXPORT_SYMBOL_GPL(sev_platform_init); 515 516 static int __sev_platform_shutdown_locked(int *error) 517 { 518 struct sev_device *sev = psp_master->sev_data; 519 int ret; 520 521 if (!sev || sev->state == SEV_STATE_UNINIT) 522 return 0; 523 524 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); 525 if (ret) 526 return ret; 527 528 sev->state = SEV_STATE_UNINIT; 529 dev_dbg(sev->dev, "SEV firmware shutdown\n"); 530 531 return ret; 532 } 533 534 static int sev_platform_shutdown(int *error) 535 { 536 int rc; 537 538 mutex_lock(&sev_cmd_mutex); 539 rc = __sev_platform_shutdown_locked(NULL); 540 mutex_unlock(&sev_cmd_mutex); 541 542 return rc; 543 } 544 545 static int sev_get_platform_state(int *state, int *error) 546 { 547 struct sev_user_data_status data; 548 int rc; 549 550 rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error); 551 if (rc) 552 return rc; 553 554 *state = data.state; 555 return rc; 556 } 557 558 static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) 559 { 560 int state, rc; 561 562 if (!writable) 563 return -EPERM; 564 565 /* 566 * The SEV spec requires that FACTORY_RESET must be issued in 567 * UNINIT state. Before we go further lets check if any guest is 568 * active. 569 * 570 * If FW is in WORKING state then deny the request otherwise issue 571 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. 572 * 573 */ 574 rc = sev_get_platform_state(&state, &argp->error); 575 if (rc) 576 return rc; 577 578 if (state == SEV_STATE_WORKING) 579 return -EBUSY; 580 581 if (state == SEV_STATE_INIT) { 582 rc = __sev_platform_shutdown_locked(&argp->error); 583 if (rc) 584 return rc; 585 } 586 587 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); 588 } 589 590 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) 591 { 592 struct sev_user_data_status data; 593 int ret; 594 595 memset(&data, 0, sizeof(data)); 596 597 ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); 598 if (ret) 599 return ret; 600 601 if (copy_to_user((void __user *)argp->data, &data, sizeof(data))) 602 ret = -EFAULT; 603 604 return ret; 605 } 606 607 static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable) 608 { 609 struct sev_device *sev = psp_master->sev_data; 610 int rc; 611 612 if (!writable) 613 return -EPERM; 614 615 if (sev->state == SEV_STATE_UNINIT) { 616 rc = __sev_platform_init_locked(&argp->error); 617 if (rc) 618 return rc; 619 } 620 621 return __sev_do_cmd_locked(cmd, NULL, &argp->error); 622 } 623 624 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) 625 { 626 struct sev_device *sev = psp_master->sev_data; 627 struct sev_user_data_pek_csr input; 628 struct sev_data_pek_csr data; 629 void __user *input_address; 630 void *blob = NULL; 631 int ret; 632 633 if (!writable) 634 return -EPERM; 635 636 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 637 return -EFAULT; 638 639 memset(&data, 0, sizeof(data)); 640 641 /* userspace wants to query CSR length */ 642 if (!input.address || !input.length) 643 goto cmd; 644 645 /* allocate a physically contiguous buffer to store the CSR blob */ 646 input_address = (void __user *)input.address; 647 if (input.length > SEV_FW_BLOB_MAX_SIZE) 648 return -EFAULT; 649 650 blob = kzalloc(input.length, GFP_KERNEL); 651 if (!blob) 652 return -ENOMEM; 653 654 data.address = __psp_pa(blob); 655 data.len = input.length; 656 657 cmd: 658 if (sev->state == SEV_STATE_UNINIT) { 659 ret = __sev_platform_init_locked(&argp->error); 660 if (ret) 661 goto e_free_blob; 662 } 663 664 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error); 665 666 /* If we query the CSR length, FW responded with expected data. */ 667 input.length = data.len; 668 669 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 670 ret = -EFAULT; 671 goto e_free_blob; 672 } 673 674 if (blob) { 675 if (copy_to_user(input_address, blob, input.length)) 676 ret = -EFAULT; 677 } 678 679 e_free_blob: 680 kfree(blob); 681 return ret; 682 } 683 684 void *psp_copy_user_blob(u64 uaddr, u32 len) 685 { 686 if (!uaddr || !len) 687 return ERR_PTR(-EINVAL); 688 689 /* verify that blob length does not exceed our limit */ 690 if (len > SEV_FW_BLOB_MAX_SIZE) 691 return ERR_PTR(-EINVAL); 692 693 return memdup_user((void __user *)uaddr, len); 694 } 695 EXPORT_SYMBOL_GPL(psp_copy_user_blob); 696 697 static int sev_get_api_version(void) 698 { 699 struct sev_device *sev = psp_master->sev_data; 700 struct sev_user_data_status status; 701 int error = 0, ret; 702 703 ret = sev_platform_status(&status, &error); 704 if (ret) { 705 dev_err(sev->dev, 706 "SEV: failed to get status. Error: %#x\n", error); 707 return 1; 708 } 709 710 sev->api_major = status.api_major; 711 sev->api_minor = status.api_minor; 712 sev->build = status.build; 713 sev->state = status.state; 714 715 return 0; 716 } 717 718 static int sev_get_firmware(struct device *dev, 719 const struct firmware **firmware) 720 { 721 char fw_name_specific[SEV_FW_NAME_SIZE]; 722 char fw_name_subset[SEV_FW_NAME_SIZE]; 723 724 snprintf(fw_name_specific, sizeof(fw_name_specific), 725 "amd/amd_sev_fam%.2xh_model%.2xh.sbin", 726 boot_cpu_data.x86, boot_cpu_data.x86_model); 727 728 snprintf(fw_name_subset, sizeof(fw_name_subset), 729 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin", 730 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4); 731 732 /* Check for SEV FW for a particular model. 733 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h 734 * 735 * or 736 * 737 * Check for SEV FW common to a subset of models. 738 * Ex. amd_sev_fam17h_model0xh.sbin for 739 * Family 17h Model 00h -- Family 17h Model 0Fh 740 * 741 * or 742 * 743 * Fall-back to using generic name: sev.fw 744 */ 745 if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) || 746 (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) || 747 (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) 748 return 0; 749 750 return -ENOENT; 751 } 752 753 /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ 754 static int sev_update_firmware(struct device *dev) 755 { 756 struct sev_data_download_firmware *data; 757 const struct firmware *firmware; 758 int ret, error, order; 759 struct page *p; 760 u64 data_size; 761 762 if (!sev_version_greater_or_equal(0, 15)) { 763 dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); 764 return -1; 765 } 766 767 if (sev_get_firmware(dev, &firmware) == -ENOENT) { 768 dev_dbg(dev, "No SEV firmware file present\n"); 769 return -1; 770 } 771 772 /* 773 * SEV FW expects the physical address given to it to be 32 774 * byte aligned. Memory allocated has structure placed at the 775 * beginning followed by the firmware being passed to the SEV 776 * FW. Allocate enough memory for data structure + alignment 777 * padding + SEV FW. 778 */ 779 data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); 780 781 order = get_order(firmware->size + data_size); 782 p = alloc_pages(GFP_KERNEL, order); 783 if (!p) { 784 ret = -1; 785 goto fw_err; 786 } 787 788 /* 789 * Copy firmware data to a kernel allocated contiguous 790 * memory region. 791 */ 792 data = page_address(p); 793 memcpy(page_address(p) + data_size, firmware->data, firmware->size); 794 795 data->address = __psp_pa(page_address(p) + data_size); 796 data->len = firmware->size; 797 798 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 799 800 /* 801 * A quirk for fixing the committed TCB version, when upgrading from 802 * earlier firmware version than 1.50. 803 */ 804 if (!ret && !sev_version_greater_or_equal(1, 50)) 805 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 806 807 if (ret) 808 dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); 809 else 810 dev_info(dev, "SEV firmware update successful\n"); 811 812 __free_pages(p, order); 813 814 fw_err: 815 release_firmware(firmware); 816 817 return ret; 818 } 819 820 static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) 821 { 822 struct sev_device *sev = psp_master->sev_data; 823 struct sev_user_data_pek_cert_import input; 824 struct sev_data_pek_cert_import data; 825 void *pek_blob, *oca_blob; 826 int ret; 827 828 if (!writable) 829 return -EPERM; 830 831 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 832 return -EFAULT; 833 834 /* copy PEK certificate blobs from userspace */ 835 pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); 836 if (IS_ERR(pek_blob)) 837 return PTR_ERR(pek_blob); 838 839 data.reserved = 0; 840 data.pek_cert_address = __psp_pa(pek_blob); 841 data.pek_cert_len = input.pek_cert_len; 842 843 /* copy PEK certificate blobs from userspace */ 844 oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); 845 if (IS_ERR(oca_blob)) { 846 ret = PTR_ERR(oca_blob); 847 goto e_free_pek; 848 } 849 850 data.oca_cert_address = __psp_pa(oca_blob); 851 data.oca_cert_len = input.oca_cert_len; 852 853 /* If platform is not in INIT state then transition it to INIT */ 854 if (sev->state != SEV_STATE_INIT) { 855 ret = __sev_platform_init_locked(&argp->error); 856 if (ret) 857 goto e_free_oca; 858 } 859 860 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); 861 862 e_free_oca: 863 kfree(oca_blob); 864 e_free_pek: 865 kfree(pek_blob); 866 return ret; 867 } 868 869 static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) 870 { 871 struct sev_user_data_get_id2 input; 872 struct sev_data_get_id data; 873 void __user *input_address; 874 void *id_blob = NULL; 875 int ret; 876 877 /* SEV GET_ID is available from SEV API v0.16 and up */ 878 if (!sev_version_greater_or_equal(0, 16)) 879 return -ENOTSUPP; 880 881 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 882 return -EFAULT; 883 884 input_address = (void __user *)input.address; 885 886 if (input.address && input.length) { 887 /* 888 * The length of the ID shouldn't be assumed by software since 889 * it may change in the future. The allocation size is limited 890 * to 1 << (PAGE_SHIFT + MAX_ORDER - 1) by the page allocator. 891 * If the allocation fails, simply return ENOMEM rather than 892 * warning in the kernel log. 893 */ 894 id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN); 895 if (!id_blob) 896 return -ENOMEM; 897 898 data.address = __psp_pa(id_blob); 899 data.len = input.length; 900 } else { 901 data.address = 0; 902 data.len = 0; 903 } 904 905 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error); 906 907 /* 908 * Firmware will return the length of the ID value (either the minimum 909 * required length or the actual length written), return it to the user. 910 */ 911 input.length = data.len; 912 913 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 914 ret = -EFAULT; 915 goto e_free; 916 } 917 918 if (id_blob) { 919 if (copy_to_user(input_address, id_blob, data.len)) { 920 ret = -EFAULT; 921 goto e_free; 922 } 923 } 924 925 e_free: 926 kfree(id_blob); 927 928 return ret; 929 } 930 931 static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) 932 { 933 struct sev_data_get_id *data; 934 u64 data_size, user_size; 935 void *id_blob, *mem; 936 int ret; 937 938 /* SEV GET_ID available from SEV API v0.16 and up */ 939 if (!sev_version_greater_or_equal(0, 16)) 940 return -ENOTSUPP; 941 942 /* SEV FW expects the buffer it fills with the ID to be 943 * 8-byte aligned. Memory allocated should be enough to 944 * hold data structure + alignment padding + memory 945 * where SEV FW writes the ID. 946 */ 947 data_size = ALIGN(sizeof(struct sev_data_get_id), 8); 948 user_size = sizeof(struct sev_user_data_get_id); 949 950 mem = kzalloc(data_size + user_size, GFP_KERNEL); 951 if (!mem) 952 return -ENOMEM; 953 954 data = mem; 955 id_blob = mem + data_size; 956 957 data->address = __psp_pa(id_blob); 958 data->len = user_size; 959 960 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); 961 if (!ret) { 962 if (copy_to_user((void __user *)argp->data, id_blob, data->len)) 963 ret = -EFAULT; 964 } 965 966 kfree(mem); 967 968 return ret; 969 } 970 971 static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) 972 { 973 struct sev_device *sev = psp_master->sev_data; 974 struct sev_user_data_pdh_cert_export input; 975 void *pdh_blob = NULL, *cert_blob = NULL; 976 struct sev_data_pdh_cert_export data; 977 void __user *input_cert_chain_address; 978 void __user *input_pdh_cert_address; 979 int ret; 980 981 /* If platform is not in INIT state then transition it to INIT. */ 982 if (sev->state != SEV_STATE_INIT) { 983 if (!writable) 984 return -EPERM; 985 986 ret = __sev_platform_init_locked(&argp->error); 987 if (ret) 988 return ret; 989 } 990 991 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 992 return -EFAULT; 993 994 memset(&data, 0, sizeof(data)); 995 996 /* Userspace wants to query the certificate length. */ 997 if (!input.pdh_cert_address || 998 !input.pdh_cert_len || 999 !input.cert_chain_address) 1000 goto cmd; 1001 1002 input_pdh_cert_address = (void __user *)input.pdh_cert_address; 1003 input_cert_chain_address = (void __user *)input.cert_chain_address; 1004 1005 /* Allocate a physically contiguous buffer to store the PDH blob. */ 1006 if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) 1007 return -EFAULT; 1008 1009 /* Allocate a physically contiguous buffer to store the cert chain blob. */ 1010 if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) 1011 return -EFAULT; 1012 1013 pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL); 1014 if (!pdh_blob) 1015 return -ENOMEM; 1016 1017 data.pdh_cert_address = __psp_pa(pdh_blob); 1018 data.pdh_cert_len = input.pdh_cert_len; 1019 1020 cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL); 1021 if (!cert_blob) { 1022 ret = -ENOMEM; 1023 goto e_free_pdh; 1024 } 1025 1026 data.cert_chain_address = __psp_pa(cert_blob); 1027 data.cert_chain_len = input.cert_chain_len; 1028 1029 cmd: 1030 ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); 1031 1032 /* If we query the length, FW responded with expected data. */ 1033 input.cert_chain_len = data.cert_chain_len; 1034 input.pdh_cert_len = data.pdh_cert_len; 1035 1036 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 1037 ret = -EFAULT; 1038 goto e_free_cert; 1039 } 1040 1041 if (pdh_blob) { 1042 if (copy_to_user(input_pdh_cert_address, 1043 pdh_blob, input.pdh_cert_len)) { 1044 ret = -EFAULT; 1045 goto e_free_cert; 1046 } 1047 } 1048 1049 if (cert_blob) { 1050 if (copy_to_user(input_cert_chain_address, 1051 cert_blob, input.cert_chain_len)) 1052 ret = -EFAULT; 1053 } 1054 1055 e_free_cert: 1056 kfree(cert_blob); 1057 e_free_pdh: 1058 kfree(pdh_blob); 1059 return ret; 1060 } 1061 1062 static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) 1063 { 1064 void __user *argp = (void __user *)arg; 1065 struct sev_issue_cmd input; 1066 int ret = -EFAULT; 1067 bool writable = file->f_mode & FMODE_WRITE; 1068 1069 if (!psp_master || !psp_master->sev_data) 1070 return -ENODEV; 1071 1072 if (ioctl != SEV_ISSUE_CMD) 1073 return -EINVAL; 1074 1075 if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) 1076 return -EFAULT; 1077 1078 if (input.cmd > SEV_MAX) 1079 return -EINVAL; 1080 1081 mutex_lock(&sev_cmd_mutex); 1082 1083 switch (input.cmd) { 1084 1085 case SEV_FACTORY_RESET: 1086 ret = sev_ioctl_do_reset(&input, writable); 1087 break; 1088 case SEV_PLATFORM_STATUS: 1089 ret = sev_ioctl_do_platform_status(&input); 1090 break; 1091 case SEV_PEK_GEN: 1092 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable); 1093 break; 1094 case SEV_PDH_GEN: 1095 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable); 1096 break; 1097 case SEV_PEK_CSR: 1098 ret = sev_ioctl_do_pek_csr(&input, writable); 1099 break; 1100 case SEV_PEK_CERT_IMPORT: 1101 ret = sev_ioctl_do_pek_import(&input, writable); 1102 break; 1103 case SEV_PDH_CERT_EXPORT: 1104 ret = sev_ioctl_do_pdh_export(&input, writable); 1105 break; 1106 case SEV_GET_ID: 1107 pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n"); 1108 ret = sev_ioctl_do_get_id(&input); 1109 break; 1110 case SEV_GET_ID2: 1111 ret = sev_ioctl_do_get_id2(&input); 1112 break; 1113 default: 1114 ret = -EINVAL; 1115 goto out; 1116 } 1117 1118 if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) 1119 ret = -EFAULT; 1120 out: 1121 mutex_unlock(&sev_cmd_mutex); 1122 1123 return ret; 1124 } 1125 1126 static const struct file_operations sev_fops = { 1127 .owner = THIS_MODULE, 1128 .unlocked_ioctl = sev_ioctl, 1129 }; 1130 1131 int sev_platform_status(struct sev_user_data_status *data, int *error) 1132 { 1133 return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); 1134 } 1135 EXPORT_SYMBOL_GPL(sev_platform_status); 1136 1137 int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) 1138 { 1139 return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); 1140 } 1141 EXPORT_SYMBOL_GPL(sev_guest_deactivate); 1142 1143 int sev_guest_activate(struct sev_data_activate *data, int *error) 1144 { 1145 return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); 1146 } 1147 EXPORT_SYMBOL_GPL(sev_guest_activate); 1148 1149 int sev_guest_decommission(struct sev_data_decommission *data, int *error) 1150 { 1151 return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); 1152 } 1153 EXPORT_SYMBOL_GPL(sev_guest_decommission); 1154 1155 int sev_guest_df_flush(int *error) 1156 { 1157 return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); 1158 } 1159 EXPORT_SYMBOL_GPL(sev_guest_df_flush); 1160 1161 static void sev_exit(struct kref *ref) 1162 { 1163 misc_deregister(&misc_dev->misc); 1164 kfree(misc_dev); 1165 misc_dev = NULL; 1166 } 1167 1168 static int sev_misc_init(struct sev_device *sev) 1169 { 1170 struct device *dev = sev->dev; 1171 int ret; 1172 1173 /* 1174 * SEV feature support can be detected on multiple devices but the SEV 1175 * FW commands must be issued on the master. During probe, we do not 1176 * know the master hence we create /dev/sev on the first device probe. 1177 * sev_do_cmd() finds the right master device to which to issue the 1178 * command to the firmware. 1179 */ 1180 if (!misc_dev) { 1181 struct miscdevice *misc; 1182 1183 misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL); 1184 if (!misc_dev) 1185 return -ENOMEM; 1186 1187 misc = &misc_dev->misc; 1188 misc->minor = MISC_DYNAMIC_MINOR; 1189 misc->name = DEVICE_NAME; 1190 misc->fops = &sev_fops; 1191 1192 ret = misc_register(misc); 1193 if (ret) 1194 return ret; 1195 1196 kref_init(&misc_dev->refcount); 1197 } else { 1198 kref_get(&misc_dev->refcount); 1199 } 1200 1201 init_waitqueue_head(&sev->int_queue); 1202 sev->misc = misc_dev; 1203 dev_dbg(dev, "registered SEV device\n"); 1204 1205 return 0; 1206 } 1207 1208 int sev_dev_init(struct psp_device *psp) 1209 { 1210 struct device *dev = psp->dev; 1211 struct sev_device *sev; 1212 int ret = -ENOMEM; 1213 1214 if (!boot_cpu_has(X86_FEATURE_SEV)) { 1215 dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); 1216 return 0; 1217 } 1218 1219 sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); 1220 if (!sev) 1221 goto e_err; 1222 1223 sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); 1224 if (!sev->cmd_buf) 1225 goto e_sev; 1226 1227 psp->sev_data = sev; 1228 1229 sev->dev = dev; 1230 sev->psp = psp; 1231 1232 sev->io_regs = psp->io_regs; 1233 1234 sev->vdata = (struct sev_vdata *)psp->vdata->sev; 1235 if (!sev->vdata) { 1236 ret = -ENODEV; 1237 dev_err(dev, "sev: missing driver data\n"); 1238 goto e_buf; 1239 } 1240 1241 psp_set_sev_irq_handler(psp, sev_irq_handler, sev); 1242 1243 ret = sev_misc_init(sev); 1244 if (ret) 1245 goto e_irq; 1246 1247 dev_notice(dev, "sev enabled\n"); 1248 1249 return 0; 1250 1251 e_irq: 1252 psp_clear_sev_irq_handler(psp); 1253 e_buf: 1254 devm_free_pages(dev, (unsigned long)sev->cmd_buf); 1255 e_sev: 1256 devm_kfree(dev, sev); 1257 e_err: 1258 psp->sev_data = NULL; 1259 1260 dev_notice(dev, "sev initialization failed\n"); 1261 1262 return ret; 1263 } 1264 1265 static void sev_firmware_shutdown(struct sev_device *sev) 1266 { 1267 sev_platform_shutdown(NULL); 1268 1269 if (sev_es_tmr) { 1270 /* The TMR area was encrypted, flush it from the cache */ 1271 wbinvd_on_all_cpus(); 1272 1273 free_pages((unsigned long)sev_es_tmr, 1274 get_order(SEV_ES_TMR_SIZE)); 1275 sev_es_tmr = NULL; 1276 } 1277 1278 if (sev_init_ex_buffer) { 1279 free_pages((unsigned long)sev_init_ex_buffer, 1280 get_order(NV_LENGTH)); 1281 sev_init_ex_buffer = NULL; 1282 } 1283 } 1284 1285 void sev_dev_destroy(struct psp_device *psp) 1286 { 1287 struct sev_device *sev = psp->sev_data; 1288 1289 if (!sev) 1290 return; 1291 1292 sev_firmware_shutdown(sev); 1293 1294 if (sev->misc) 1295 kref_put(&misc_dev->refcount, sev_exit); 1296 1297 psp_clear_sev_irq_handler(psp); 1298 } 1299 1300 int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, 1301 void *data, int *error) 1302 { 1303 if (!filep || filep->f_op != &sev_fops) 1304 return -EBADF; 1305 1306 return sev_do_cmd(cmd, data, error); 1307 } 1308 EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); 1309 1310 void sev_pci_init(void) 1311 { 1312 struct sev_device *sev = psp_master->sev_data; 1313 int error, rc; 1314 1315 if (!sev) 1316 return; 1317 1318 psp_timeout = psp_probe_timeout; 1319 1320 if (sev_get_api_version()) 1321 goto err; 1322 1323 if (sev_update_firmware(sev->dev) == 0) 1324 sev_get_api_version(); 1325 1326 /* If an init_ex_path is provided rely on INIT_EX for PSP initialization 1327 * instead of INIT. 1328 */ 1329 if (init_ex_path) { 1330 sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH); 1331 if (!sev_init_ex_buffer) { 1332 dev_err(sev->dev, 1333 "SEV: INIT_EX NV memory allocation failed\n"); 1334 goto err; 1335 } 1336 } 1337 1338 /* Obtain the TMR memory area for SEV-ES use */ 1339 sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE); 1340 if (sev_es_tmr) 1341 /* Must flush the cache before giving it to the firmware */ 1342 clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE); 1343 else 1344 dev_warn(sev->dev, 1345 "SEV: TMR allocation failed, SEV-ES support unavailable\n"); 1346 1347 if (!psp_init_on_probe) 1348 return; 1349 1350 /* Initialize the platform */ 1351 rc = sev_platform_init(&error); 1352 if (rc) 1353 dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n", 1354 error, rc); 1355 1356 return; 1357 1358 err: 1359 psp_master->sev_data = NULL; 1360 } 1361 1362 void sev_pci_exit(void) 1363 { 1364 struct sev_device *sev = psp_master->sev_data; 1365 1366 if (!sev) 1367 return; 1368 1369 sev_firmware_shutdown(sev); 1370 } 1371