1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure Encrypted Virtualization (SEV) interface 4 * 5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Brijesh Singh <brijesh.singh@amd.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/sched.h> 14 #include <linux/interrupt.h> 15 #include <linux/spinlock.h> 16 #include <linux/spinlock_types.h> 17 #include <linux/types.h> 18 #include <linux/mutex.h> 19 #include <linux/delay.h> 20 #include <linux/hw_random.h> 21 #include <linux/ccp.h> 22 #include <linux/firmware.h> 23 #include <linux/gfp.h> 24 #include <linux/cpufeature.h> 25 #include <linux/fs.h> 26 #include <linux/fs_struct.h> 27 28 #include <asm/smp.h> 29 30 #include "psp-dev.h" 31 #include "sev-dev.h" 32 33 #define DEVICE_NAME "sev" 34 #define SEV_FW_FILE "amd/sev.fw" 35 #define SEV_FW_NAME_SIZE 64 36 37 static DEFINE_MUTEX(sev_cmd_mutex); 38 static struct sev_misc_dev *misc_dev; 39 40 static int psp_cmd_timeout = 100; 41 module_param(psp_cmd_timeout, int, 0644); 42 MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); 43 44 static int psp_probe_timeout = 5; 45 module_param(psp_probe_timeout, int, 0644); 46 MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); 47 48 static char *init_ex_path; 49 module_param(init_ex_path, charp, 0444); 50 MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX"); 51 52 static bool psp_init_on_probe = true; 53 module_param(psp_init_on_probe, bool, 0444); 54 MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it"); 55 56 MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */ 57 MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */ 58 MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */ 59 60 static bool psp_dead; 61 static int psp_timeout; 62 63 /* Trusted Memory Region (TMR): 64 * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator 65 * to allocate the memory, which will return aligned memory for the specified 66 * allocation order. 67 */ 68 #define SEV_ES_TMR_SIZE (1024 * 1024) 69 static void *sev_es_tmr; 70 71 /* INIT_EX NV Storage: 72 * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page 73 * allocator to allocate the memory, which will return aligned memory for the 74 * specified allocation order. 75 */ 76 #define NV_LENGTH (32 * 1024) 77 static void *sev_init_ex_buffer; 78 79 static inline bool sev_version_greater_or_equal(u8 maj, u8 min) 80 { 81 struct sev_device *sev = psp_master->sev_data; 82 83 if (sev->api_major > maj) 84 return true; 85 86 if (sev->api_major == maj && sev->api_minor >= min) 87 return true; 88 89 return false; 90 } 91 92 static void sev_irq_handler(int irq, void *data, unsigned int status) 93 { 94 struct sev_device *sev = data; 95 int reg; 96 97 /* Check if it is command completion: */ 98 if (!(status & SEV_CMD_COMPLETE)) 99 return; 100 101 /* Check if it is SEV command completion: */ 102 reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 103 if (reg & PSP_CMDRESP_RESP) { 104 sev->int_rcvd = 1; 105 wake_up(&sev->int_queue); 106 } 107 } 108 109 static int sev_wait_cmd_ioc(struct sev_device *sev, 110 unsigned int *reg, unsigned int timeout) 111 { 112 int ret; 113 114 ret = wait_event_timeout(sev->int_queue, 115 sev->int_rcvd, timeout * HZ); 116 if (!ret) 117 return -ETIMEDOUT; 118 119 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 120 121 return 0; 122 } 123 124 static int sev_cmd_buffer_len(int cmd) 125 { 126 switch (cmd) { 127 case SEV_CMD_INIT: return sizeof(struct sev_data_init); 128 case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); 129 case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); 130 case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); 131 case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); 132 case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); 133 case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); 134 case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); 135 case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); 136 case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); 137 case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); 138 case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); 139 case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); 140 case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); 141 case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); 142 case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); 143 case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); 144 case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); 145 case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); 146 case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); 147 case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); 148 case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); 149 case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); 150 case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); 151 case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); 152 case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); 153 case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); 154 case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); 155 case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); 156 case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); 157 default: return 0; 158 } 159 160 return 0; 161 } 162 163 static void *sev_fw_alloc(unsigned long len) 164 { 165 struct page *page; 166 167 page = alloc_pages(GFP_KERNEL, get_order(len)); 168 if (!page) 169 return NULL; 170 171 return page_address(page); 172 } 173 174 static struct file *open_file_as_root(const char *filename, int flags, umode_t mode) 175 { 176 struct file *fp; 177 struct path root; 178 struct cred *cred; 179 const struct cred *old_cred; 180 181 task_lock(&init_task); 182 get_fs_root(init_task.fs, &root); 183 task_unlock(&init_task); 184 185 cred = prepare_creds(); 186 if (!cred) 187 return ERR_PTR(-ENOMEM); 188 cred->fsuid = GLOBAL_ROOT_UID; 189 old_cred = override_creds(cred); 190 191 fp = file_open_root(&root, filename, flags, mode); 192 path_put(&root); 193 194 revert_creds(old_cred); 195 196 return fp; 197 } 198 199 static int sev_read_init_ex_file(void) 200 { 201 struct sev_device *sev = psp_master->sev_data; 202 struct file *fp; 203 ssize_t nread; 204 205 lockdep_assert_held(&sev_cmd_mutex); 206 207 if (!sev_init_ex_buffer) 208 return -EOPNOTSUPP; 209 210 fp = open_file_as_root(init_ex_path, O_RDONLY, 0); 211 if (IS_ERR(fp)) { 212 int ret = PTR_ERR(fp); 213 214 if (ret == -ENOENT) { 215 dev_info(sev->dev, 216 "SEV: %s does not exist and will be created later.\n", 217 init_ex_path); 218 ret = 0; 219 } else { 220 dev_err(sev->dev, 221 "SEV: could not open %s for read, error %d\n", 222 init_ex_path, ret); 223 } 224 return ret; 225 } 226 227 nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL); 228 if (nread != NV_LENGTH) { 229 dev_info(sev->dev, 230 "SEV: could not read %u bytes to non volatile memory area, ret %ld\n", 231 NV_LENGTH, nread); 232 } 233 234 dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread); 235 filp_close(fp, NULL); 236 237 return 0; 238 } 239 240 static int sev_write_init_ex_file(void) 241 { 242 struct sev_device *sev = psp_master->sev_data; 243 struct file *fp; 244 loff_t offset = 0; 245 ssize_t nwrite; 246 247 lockdep_assert_held(&sev_cmd_mutex); 248 249 if (!sev_init_ex_buffer) 250 return 0; 251 252 fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600); 253 if (IS_ERR(fp)) { 254 int ret = PTR_ERR(fp); 255 256 dev_err(sev->dev, 257 "SEV: could not open file for write, error %d\n", 258 ret); 259 return ret; 260 } 261 262 nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset); 263 vfs_fsync(fp, 0); 264 filp_close(fp, NULL); 265 266 if (nwrite != NV_LENGTH) { 267 dev_err(sev->dev, 268 "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n", 269 NV_LENGTH, nwrite); 270 return -EIO; 271 } 272 273 dev_dbg(sev->dev, "SEV: write successful to NV file\n"); 274 275 return 0; 276 } 277 278 static int sev_write_init_ex_file_if_required(int cmd_id) 279 { 280 lockdep_assert_held(&sev_cmd_mutex); 281 282 if (!sev_init_ex_buffer) 283 return 0; 284 285 /* 286 * Only a few platform commands modify the SPI/NV area, but none of the 287 * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN, 288 * PEK_CERT_IMPORT, and PDH_GEN do. 289 */ 290 switch (cmd_id) { 291 case SEV_CMD_FACTORY_RESET: 292 case SEV_CMD_INIT_EX: 293 case SEV_CMD_PDH_GEN: 294 case SEV_CMD_PEK_CERT_IMPORT: 295 case SEV_CMD_PEK_GEN: 296 break; 297 default: 298 return 0; 299 } 300 301 return sev_write_init_ex_file(); 302 } 303 304 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) 305 { 306 struct psp_device *psp = psp_master; 307 struct sev_device *sev; 308 unsigned int phys_lsb, phys_msb; 309 unsigned int reg, ret = 0; 310 int buf_len; 311 312 if (!psp || !psp->sev_data) 313 return -ENODEV; 314 315 if (psp_dead) 316 return -EBUSY; 317 318 sev = psp->sev_data; 319 320 buf_len = sev_cmd_buffer_len(cmd); 321 if (WARN_ON_ONCE(!data != !buf_len)) 322 return -EINVAL; 323 324 /* 325 * Copy the incoming data to driver's scratch buffer as __pa() will not 326 * work for some memory, e.g. vmalloc'd addresses, and @data may not be 327 * physically contiguous. 328 */ 329 if (data) 330 memcpy(sev->cmd_buf, data, buf_len); 331 332 /* Get the physical address of the command buffer */ 333 phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0; 334 phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0; 335 336 dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", 337 cmd, phys_msb, phys_lsb, psp_timeout); 338 339 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, 340 buf_len, false); 341 342 iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); 343 iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); 344 345 sev->int_rcvd = 0; 346 347 reg = cmd; 348 reg <<= SEV_CMDRESP_CMD_SHIFT; 349 reg |= SEV_CMDRESP_IOC; 350 iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); 351 352 /* wait for command completion */ 353 ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); 354 if (ret) { 355 if (psp_ret) 356 *psp_ret = 0; 357 358 dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); 359 psp_dead = true; 360 361 return ret; 362 } 363 364 psp_timeout = psp_cmd_timeout; 365 366 if (psp_ret) 367 *psp_ret = reg & PSP_CMDRESP_ERR_MASK; 368 369 if (reg & PSP_CMDRESP_ERR_MASK) { 370 dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n", 371 cmd, reg & PSP_CMDRESP_ERR_MASK); 372 ret = -EIO; 373 } else { 374 ret = sev_write_init_ex_file_if_required(cmd); 375 } 376 377 print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, 378 buf_len, false); 379 380 /* 381 * Copy potential output from the PSP back to data. Do this even on 382 * failure in case the caller wants to glean something from the error. 383 */ 384 if (data) 385 memcpy(data, sev->cmd_buf, buf_len); 386 387 return ret; 388 } 389 390 static int sev_do_cmd(int cmd, void *data, int *psp_ret) 391 { 392 int rc; 393 394 mutex_lock(&sev_cmd_mutex); 395 rc = __sev_do_cmd_locked(cmd, data, psp_ret); 396 mutex_unlock(&sev_cmd_mutex); 397 398 return rc; 399 } 400 401 static int __sev_init_locked(int *error) 402 { 403 struct sev_data_init data; 404 405 memset(&data, 0, sizeof(data)); 406 if (sev_es_tmr) { 407 /* 408 * Do not include the encryption mask on the physical 409 * address of the TMR (firmware should clear it anyway). 410 */ 411 data.tmr_address = __pa(sev_es_tmr); 412 413 data.flags |= SEV_INIT_FLAGS_SEV_ES; 414 data.tmr_len = SEV_ES_TMR_SIZE; 415 } 416 417 return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error); 418 } 419 420 static int __sev_init_ex_locked(int *error) 421 { 422 struct sev_data_init_ex data; 423 424 memset(&data, 0, sizeof(data)); 425 data.length = sizeof(data); 426 data.nv_address = __psp_pa(sev_init_ex_buffer); 427 data.nv_len = NV_LENGTH; 428 429 if (sev_es_tmr) { 430 /* 431 * Do not include the encryption mask on the physical 432 * address of the TMR (firmware should clear it anyway). 433 */ 434 data.tmr_address = __pa(sev_es_tmr); 435 436 data.flags |= SEV_INIT_FLAGS_SEV_ES; 437 data.tmr_len = SEV_ES_TMR_SIZE; 438 } 439 440 return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error); 441 } 442 443 static int __sev_platform_init_locked(int *error) 444 { 445 struct psp_device *psp = psp_master; 446 struct sev_device *sev; 447 int rc = 0, psp_ret = -1; 448 int (*init_function)(int *error); 449 450 if (!psp || !psp->sev_data) 451 return -ENODEV; 452 453 sev = psp->sev_data; 454 455 if (sev->state == SEV_STATE_INIT) 456 return 0; 457 458 if (sev_init_ex_buffer) { 459 init_function = __sev_init_ex_locked; 460 rc = sev_read_init_ex_file(); 461 if (rc) 462 return rc; 463 } else { 464 init_function = __sev_init_locked; 465 } 466 467 rc = init_function(&psp_ret); 468 if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { 469 /* 470 * Initialization command returned an integrity check failure 471 * status code, meaning that firmware load and validation of SEV 472 * related persistent data has failed. Retrying the 473 * initialization function should succeed by replacing the state 474 * with a reset state. 475 */ 476 dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state."); 477 rc = init_function(&psp_ret); 478 } 479 if (error) 480 *error = psp_ret; 481 482 if (rc) 483 return rc; 484 485 sev->state = SEV_STATE_INIT; 486 487 /* Prepare for first SEV guest launch after INIT */ 488 wbinvd_on_all_cpus(); 489 rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); 490 if (rc) 491 return rc; 492 493 dev_dbg(sev->dev, "SEV firmware initialized\n"); 494 495 dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, 496 sev->api_minor, sev->build); 497 498 return 0; 499 } 500 501 int sev_platform_init(int *error) 502 { 503 int rc; 504 505 mutex_lock(&sev_cmd_mutex); 506 rc = __sev_platform_init_locked(error); 507 mutex_unlock(&sev_cmd_mutex); 508 509 return rc; 510 } 511 EXPORT_SYMBOL_GPL(sev_platform_init); 512 513 static int __sev_platform_shutdown_locked(int *error) 514 { 515 struct sev_device *sev = psp_master->sev_data; 516 int ret; 517 518 if (!sev || sev->state == SEV_STATE_UNINIT) 519 return 0; 520 521 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); 522 if (ret) 523 return ret; 524 525 sev->state = SEV_STATE_UNINIT; 526 dev_dbg(sev->dev, "SEV firmware shutdown\n"); 527 528 return ret; 529 } 530 531 static int sev_platform_shutdown(int *error) 532 { 533 int rc; 534 535 mutex_lock(&sev_cmd_mutex); 536 rc = __sev_platform_shutdown_locked(NULL); 537 mutex_unlock(&sev_cmd_mutex); 538 539 return rc; 540 } 541 542 static int sev_get_platform_state(int *state, int *error) 543 { 544 struct sev_user_data_status data; 545 int rc; 546 547 rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error); 548 if (rc) 549 return rc; 550 551 *state = data.state; 552 return rc; 553 } 554 555 static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) 556 { 557 int state, rc; 558 559 if (!writable) 560 return -EPERM; 561 562 /* 563 * The SEV spec requires that FACTORY_RESET must be issued in 564 * UNINIT state. Before we go further lets check if any guest is 565 * active. 566 * 567 * If FW is in WORKING state then deny the request otherwise issue 568 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. 569 * 570 */ 571 rc = sev_get_platform_state(&state, &argp->error); 572 if (rc) 573 return rc; 574 575 if (state == SEV_STATE_WORKING) 576 return -EBUSY; 577 578 if (state == SEV_STATE_INIT) { 579 rc = __sev_platform_shutdown_locked(&argp->error); 580 if (rc) 581 return rc; 582 } 583 584 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); 585 } 586 587 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) 588 { 589 struct sev_user_data_status data; 590 int ret; 591 592 memset(&data, 0, sizeof(data)); 593 594 ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); 595 if (ret) 596 return ret; 597 598 if (copy_to_user((void __user *)argp->data, &data, sizeof(data))) 599 ret = -EFAULT; 600 601 return ret; 602 } 603 604 static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable) 605 { 606 struct sev_device *sev = psp_master->sev_data; 607 int rc; 608 609 if (!writable) 610 return -EPERM; 611 612 if (sev->state == SEV_STATE_UNINIT) { 613 rc = __sev_platform_init_locked(&argp->error); 614 if (rc) 615 return rc; 616 } 617 618 return __sev_do_cmd_locked(cmd, NULL, &argp->error); 619 } 620 621 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) 622 { 623 struct sev_device *sev = psp_master->sev_data; 624 struct sev_user_data_pek_csr input; 625 struct sev_data_pek_csr data; 626 void __user *input_address; 627 void *blob = NULL; 628 int ret; 629 630 if (!writable) 631 return -EPERM; 632 633 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 634 return -EFAULT; 635 636 memset(&data, 0, sizeof(data)); 637 638 /* userspace wants to query CSR length */ 639 if (!input.address || !input.length) 640 goto cmd; 641 642 /* allocate a physically contiguous buffer to store the CSR blob */ 643 input_address = (void __user *)input.address; 644 if (input.length > SEV_FW_BLOB_MAX_SIZE) 645 return -EFAULT; 646 647 blob = kzalloc(input.length, GFP_KERNEL); 648 if (!blob) 649 return -ENOMEM; 650 651 data.address = __psp_pa(blob); 652 data.len = input.length; 653 654 cmd: 655 if (sev->state == SEV_STATE_UNINIT) { 656 ret = __sev_platform_init_locked(&argp->error); 657 if (ret) 658 goto e_free_blob; 659 } 660 661 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error); 662 663 /* If we query the CSR length, FW responded with expected data. */ 664 input.length = data.len; 665 666 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 667 ret = -EFAULT; 668 goto e_free_blob; 669 } 670 671 if (blob) { 672 if (copy_to_user(input_address, blob, input.length)) 673 ret = -EFAULT; 674 } 675 676 e_free_blob: 677 kfree(blob); 678 return ret; 679 } 680 681 void *psp_copy_user_blob(u64 uaddr, u32 len) 682 { 683 if (!uaddr || !len) 684 return ERR_PTR(-EINVAL); 685 686 /* verify that blob length does not exceed our limit */ 687 if (len > SEV_FW_BLOB_MAX_SIZE) 688 return ERR_PTR(-EINVAL); 689 690 return memdup_user((void __user *)uaddr, len); 691 } 692 EXPORT_SYMBOL_GPL(psp_copy_user_blob); 693 694 static int sev_get_api_version(void) 695 { 696 struct sev_device *sev = psp_master->sev_data; 697 struct sev_user_data_status status; 698 int error = 0, ret; 699 700 ret = sev_platform_status(&status, &error); 701 if (ret) { 702 dev_err(sev->dev, 703 "SEV: failed to get status. Error: %#x\n", error); 704 return 1; 705 } 706 707 sev->api_major = status.api_major; 708 sev->api_minor = status.api_minor; 709 sev->build = status.build; 710 sev->state = status.state; 711 712 return 0; 713 } 714 715 static int sev_get_firmware(struct device *dev, 716 const struct firmware **firmware) 717 { 718 char fw_name_specific[SEV_FW_NAME_SIZE]; 719 char fw_name_subset[SEV_FW_NAME_SIZE]; 720 721 snprintf(fw_name_specific, sizeof(fw_name_specific), 722 "amd/amd_sev_fam%.2xh_model%.2xh.sbin", 723 boot_cpu_data.x86, boot_cpu_data.x86_model); 724 725 snprintf(fw_name_subset, sizeof(fw_name_subset), 726 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin", 727 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4); 728 729 /* Check for SEV FW for a particular model. 730 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h 731 * 732 * or 733 * 734 * Check for SEV FW common to a subset of models. 735 * Ex. amd_sev_fam17h_model0xh.sbin for 736 * Family 17h Model 00h -- Family 17h Model 0Fh 737 * 738 * or 739 * 740 * Fall-back to using generic name: sev.fw 741 */ 742 if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) || 743 (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) || 744 (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) 745 return 0; 746 747 return -ENOENT; 748 } 749 750 /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ 751 static int sev_update_firmware(struct device *dev) 752 { 753 struct sev_data_download_firmware *data; 754 const struct firmware *firmware; 755 int ret, error, order; 756 struct page *p; 757 u64 data_size; 758 759 if (!sev_version_greater_or_equal(0, 15)) { 760 dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); 761 return -1; 762 } 763 764 if (sev_get_firmware(dev, &firmware) == -ENOENT) { 765 dev_dbg(dev, "No SEV firmware file present\n"); 766 return -1; 767 } 768 769 /* 770 * SEV FW expects the physical address given to it to be 32 771 * byte aligned. Memory allocated has structure placed at the 772 * beginning followed by the firmware being passed to the SEV 773 * FW. Allocate enough memory for data structure + alignment 774 * padding + SEV FW. 775 */ 776 data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); 777 778 order = get_order(firmware->size + data_size); 779 p = alloc_pages(GFP_KERNEL, order); 780 if (!p) { 781 ret = -1; 782 goto fw_err; 783 } 784 785 /* 786 * Copy firmware data to a kernel allocated contiguous 787 * memory region. 788 */ 789 data = page_address(p); 790 memcpy(page_address(p) + data_size, firmware->data, firmware->size); 791 792 data->address = __psp_pa(page_address(p) + data_size); 793 data->len = firmware->size; 794 795 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 796 797 /* 798 * A quirk for fixing the committed TCB version, when upgrading from 799 * earlier firmware version than 1.50. 800 */ 801 if (!ret && !sev_version_greater_or_equal(1, 50)) 802 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 803 804 if (ret) 805 dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); 806 else 807 dev_info(dev, "SEV firmware update successful\n"); 808 809 __free_pages(p, order); 810 811 fw_err: 812 release_firmware(firmware); 813 814 return ret; 815 } 816 817 static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) 818 { 819 struct sev_device *sev = psp_master->sev_data; 820 struct sev_user_data_pek_cert_import input; 821 struct sev_data_pek_cert_import data; 822 void *pek_blob, *oca_blob; 823 int ret; 824 825 if (!writable) 826 return -EPERM; 827 828 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 829 return -EFAULT; 830 831 /* copy PEK certificate blobs from userspace */ 832 pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); 833 if (IS_ERR(pek_blob)) 834 return PTR_ERR(pek_blob); 835 836 data.reserved = 0; 837 data.pek_cert_address = __psp_pa(pek_blob); 838 data.pek_cert_len = input.pek_cert_len; 839 840 /* copy PEK certificate blobs from userspace */ 841 oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); 842 if (IS_ERR(oca_blob)) { 843 ret = PTR_ERR(oca_blob); 844 goto e_free_pek; 845 } 846 847 data.oca_cert_address = __psp_pa(oca_blob); 848 data.oca_cert_len = input.oca_cert_len; 849 850 /* If platform is not in INIT state then transition it to INIT */ 851 if (sev->state != SEV_STATE_INIT) { 852 ret = __sev_platform_init_locked(&argp->error); 853 if (ret) 854 goto e_free_oca; 855 } 856 857 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); 858 859 e_free_oca: 860 kfree(oca_blob); 861 e_free_pek: 862 kfree(pek_blob); 863 return ret; 864 } 865 866 static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) 867 { 868 struct sev_user_data_get_id2 input; 869 struct sev_data_get_id data; 870 void __user *input_address; 871 void *id_blob = NULL; 872 int ret; 873 874 /* SEV GET_ID is available from SEV API v0.16 and up */ 875 if (!sev_version_greater_or_equal(0, 16)) 876 return -ENOTSUPP; 877 878 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 879 return -EFAULT; 880 881 input_address = (void __user *)input.address; 882 883 if (input.address && input.length) { 884 id_blob = kzalloc(input.length, GFP_KERNEL); 885 if (!id_blob) 886 return -ENOMEM; 887 888 data.address = __psp_pa(id_blob); 889 data.len = input.length; 890 } else { 891 data.address = 0; 892 data.len = 0; 893 } 894 895 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error); 896 897 /* 898 * Firmware will return the length of the ID value (either the minimum 899 * required length or the actual length written), return it to the user. 900 */ 901 input.length = data.len; 902 903 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 904 ret = -EFAULT; 905 goto e_free; 906 } 907 908 if (id_blob) { 909 if (copy_to_user(input_address, id_blob, data.len)) { 910 ret = -EFAULT; 911 goto e_free; 912 } 913 } 914 915 e_free: 916 kfree(id_blob); 917 918 return ret; 919 } 920 921 static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) 922 { 923 struct sev_data_get_id *data; 924 u64 data_size, user_size; 925 void *id_blob, *mem; 926 int ret; 927 928 /* SEV GET_ID available from SEV API v0.16 and up */ 929 if (!sev_version_greater_or_equal(0, 16)) 930 return -ENOTSUPP; 931 932 /* SEV FW expects the buffer it fills with the ID to be 933 * 8-byte aligned. Memory allocated should be enough to 934 * hold data structure + alignment padding + memory 935 * where SEV FW writes the ID. 936 */ 937 data_size = ALIGN(sizeof(struct sev_data_get_id), 8); 938 user_size = sizeof(struct sev_user_data_get_id); 939 940 mem = kzalloc(data_size + user_size, GFP_KERNEL); 941 if (!mem) 942 return -ENOMEM; 943 944 data = mem; 945 id_blob = mem + data_size; 946 947 data->address = __psp_pa(id_blob); 948 data->len = user_size; 949 950 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); 951 if (!ret) { 952 if (copy_to_user((void __user *)argp->data, id_blob, data->len)) 953 ret = -EFAULT; 954 } 955 956 kfree(mem); 957 958 return ret; 959 } 960 961 static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) 962 { 963 struct sev_device *sev = psp_master->sev_data; 964 struct sev_user_data_pdh_cert_export input; 965 void *pdh_blob = NULL, *cert_blob = NULL; 966 struct sev_data_pdh_cert_export data; 967 void __user *input_cert_chain_address; 968 void __user *input_pdh_cert_address; 969 int ret; 970 971 /* If platform is not in INIT state then transition it to INIT. */ 972 if (sev->state != SEV_STATE_INIT) { 973 if (!writable) 974 return -EPERM; 975 976 ret = __sev_platform_init_locked(&argp->error); 977 if (ret) 978 return ret; 979 } 980 981 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 982 return -EFAULT; 983 984 memset(&data, 0, sizeof(data)); 985 986 /* Userspace wants to query the certificate length. */ 987 if (!input.pdh_cert_address || 988 !input.pdh_cert_len || 989 !input.cert_chain_address) 990 goto cmd; 991 992 input_pdh_cert_address = (void __user *)input.pdh_cert_address; 993 input_cert_chain_address = (void __user *)input.cert_chain_address; 994 995 /* Allocate a physically contiguous buffer to store the PDH blob. */ 996 if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) 997 return -EFAULT; 998 999 /* Allocate a physically contiguous buffer to store the cert chain blob. */ 1000 if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) 1001 return -EFAULT; 1002 1003 pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL); 1004 if (!pdh_blob) 1005 return -ENOMEM; 1006 1007 data.pdh_cert_address = __psp_pa(pdh_blob); 1008 data.pdh_cert_len = input.pdh_cert_len; 1009 1010 cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL); 1011 if (!cert_blob) { 1012 ret = -ENOMEM; 1013 goto e_free_pdh; 1014 } 1015 1016 data.cert_chain_address = __psp_pa(cert_blob); 1017 data.cert_chain_len = input.cert_chain_len; 1018 1019 cmd: 1020 ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); 1021 1022 /* If we query the length, FW responded with expected data. */ 1023 input.cert_chain_len = data.cert_chain_len; 1024 input.pdh_cert_len = data.pdh_cert_len; 1025 1026 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 1027 ret = -EFAULT; 1028 goto e_free_cert; 1029 } 1030 1031 if (pdh_blob) { 1032 if (copy_to_user(input_pdh_cert_address, 1033 pdh_blob, input.pdh_cert_len)) { 1034 ret = -EFAULT; 1035 goto e_free_cert; 1036 } 1037 } 1038 1039 if (cert_blob) { 1040 if (copy_to_user(input_cert_chain_address, 1041 cert_blob, input.cert_chain_len)) 1042 ret = -EFAULT; 1043 } 1044 1045 e_free_cert: 1046 kfree(cert_blob); 1047 e_free_pdh: 1048 kfree(pdh_blob); 1049 return ret; 1050 } 1051 1052 static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) 1053 { 1054 void __user *argp = (void __user *)arg; 1055 struct sev_issue_cmd input; 1056 int ret = -EFAULT; 1057 bool writable = file->f_mode & FMODE_WRITE; 1058 1059 if (!psp_master || !psp_master->sev_data) 1060 return -ENODEV; 1061 1062 if (ioctl != SEV_ISSUE_CMD) 1063 return -EINVAL; 1064 1065 if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) 1066 return -EFAULT; 1067 1068 if (input.cmd > SEV_MAX) 1069 return -EINVAL; 1070 1071 mutex_lock(&sev_cmd_mutex); 1072 1073 switch (input.cmd) { 1074 1075 case SEV_FACTORY_RESET: 1076 ret = sev_ioctl_do_reset(&input, writable); 1077 break; 1078 case SEV_PLATFORM_STATUS: 1079 ret = sev_ioctl_do_platform_status(&input); 1080 break; 1081 case SEV_PEK_GEN: 1082 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable); 1083 break; 1084 case SEV_PDH_GEN: 1085 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable); 1086 break; 1087 case SEV_PEK_CSR: 1088 ret = sev_ioctl_do_pek_csr(&input, writable); 1089 break; 1090 case SEV_PEK_CERT_IMPORT: 1091 ret = sev_ioctl_do_pek_import(&input, writable); 1092 break; 1093 case SEV_PDH_CERT_EXPORT: 1094 ret = sev_ioctl_do_pdh_export(&input, writable); 1095 break; 1096 case SEV_GET_ID: 1097 pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n"); 1098 ret = sev_ioctl_do_get_id(&input); 1099 break; 1100 case SEV_GET_ID2: 1101 ret = sev_ioctl_do_get_id2(&input); 1102 break; 1103 default: 1104 ret = -EINVAL; 1105 goto out; 1106 } 1107 1108 if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) 1109 ret = -EFAULT; 1110 out: 1111 mutex_unlock(&sev_cmd_mutex); 1112 1113 return ret; 1114 } 1115 1116 static const struct file_operations sev_fops = { 1117 .owner = THIS_MODULE, 1118 .unlocked_ioctl = sev_ioctl, 1119 }; 1120 1121 int sev_platform_status(struct sev_user_data_status *data, int *error) 1122 { 1123 return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); 1124 } 1125 EXPORT_SYMBOL_GPL(sev_platform_status); 1126 1127 int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) 1128 { 1129 return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); 1130 } 1131 EXPORT_SYMBOL_GPL(sev_guest_deactivate); 1132 1133 int sev_guest_activate(struct sev_data_activate *data, int *error) 1134 { 1135 return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); 1136 } 1137 EXPORT_SYMBOL_GPL(sev_guest_activate); 1138 1139 int sev_guest_decommission(struct sev_data_decommission *data, int *error) 1140 { 1141 return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); 1142 } 1143 EXPORT_SYMBOL_GPL(sev_guest_decommission); 1144 1145 int sev_guest_df_flush(int *error) 1146 { 1147 return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); 1148 } 1149 EXPORT_SYMBOL_GPL(sev_guest_df_flush); 1150 1151 static void sev_exit(struct kref *ref) 1152 { 1153 misc_deregister(&misc_dev->misc); 1154 kfree(misc_dev); 1155 misc_dev = NULL; 1156 } 1157 1158 static int sev_misc_init(struct sev_device *sev) 1159 { 1160 struct device *dev = sev->dev; 1161 int ret; 1162 1163 /* 1164 * SEV feature support can be detected on multiple devices but the SEV 1165 * FW commands must be issued on the master. During probe, we do not 1166 * know the master hence we create /dev/sev on the first device probe. 1167 * sev_do_cmd() finds the right master device to which to issue the 1168 * command to the firmware. 1169 */ 1170 if (!misc_dev) { 1171 struct miscdevice *misc; 1172 1173 misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL); 1174 if (!misc_dev) 1175 return -ENOMEM; 1176 1177 misc = &misc_dev->misc; 1178 misc->minor = MISC_DYNAMIC_MINOR; 1179 misc->name = DEVICE_NAME; 1180 misc->fops = &sev_fops; 1181 1182 ret = misc_register(misc); 1183 if (ret) 1184 return ret; 1185 1186 kref_init(&misc_dev->refcount); 1187 } else { 1188 kref_get(&misc_dev->refcount); 1189 } 1190 1191 init_waitqueue_head(&sev->int_queue); 1192 sev->misc = misc_dev; 1193 dev_dbg(dev, "registered SEV device\n"); 1194 1195 return 0; 1196 } 1197 1198 int sev_dev_init(struct psp_device *psp) 1199 { 1200 struct device *dev = psp->dev; 1201 struct sev_device *sev; 1202 int ret = -ENOMEM; 1203 1204 if (!boot_cpu_has(X86_FEATURE_SEV)) { 1205 dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); 1206 return 0; 1207 } 1208 1209 sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); 1210 if (!sev) 1211 goto e_err; 1212 1213 sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); 1214 if (!sev->cmd_buf) 1215 goto e_sev; 1216 1217 psp->sev_data = sev; 1218 1219 sev->dev = dev; 1220 sev->psp = psp; 1221 1222 sev->io_regs = psp->io_regs; 1223 1224 sev->vdata = (struct sev_vdata *)psp->vdata->sev; 1225 if (!sev->vdata) { 1226 ret = -ENODEV; 1227 dev_err(dev, "sev: missing driver data\n"); 1228 goto e_buf; 1229 } 1230 1231 psp_set_sev_irq_handler(psp, sev_irq_handler, sev); 1232 1233 ret = sev_misc_init(sev); 1234 if (ret) 1235 goto e_irq; 1236 1237 dev_notice(dev, "sev enabled\n"); 1238 1239 return 0; 1240 1241 e_irq: 1242 psp_clear_sev_irq_handler(psp); 1243 e_buf: 1244 devm_free_pages(dev, (unsigned long)sev->cmd_buf); 1245 e_sev: 1246 devm_kfree(dev, sev); 1247 e_err: 1248 psp->sev_data = NULL; 1249 1250 dev_notice(dev, "sev initialization failed\n"); 1251 1252 return ret; 1253 } 1254 1255 static void sev_firmware_shutdown(struct sev_device *sev) 1256 { 1257 sev_platform_shutdown(NULL); 1258 1259 if (sev_es_tmr) { 1260 /* The TMR area was encrypted, flush it from the cache */ 1261 wbinvd_on_all_cpus(); 1262 1263 free_pages((unsigned long)sev_es_tmr, 1264 get_order(SEV_ES_TMR_SIZE)); 1265 sev_es_tmr = NULL; 1266 } 1267 1268 if (sev_init_ex_buffer) { 1269 free_pages((unsigned long)sev_init_ex_buffer, 1270 get_order(NV_LENGTH)); 1271 sev_init_ex_buffer = NULL; 1272 } 1273 } 1274 1275 void sev_dev_destroy(struct psp_device *psp) 1276 { 1277 struct sev_device *sev = psp->sev_data; 1278 1279 if (!sev) 1280 return; 1281 1282 sev_firmware_shutdown(sev); 1283 1284 if (sev->misc) 1285 kref_put(&misc_dev->refcount, sev_exit); 1286 1287 psp_clear_sev_irq_handler(psp); 1288 } 1289 1290 int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, 1291 void *data, int *error) 1292 { 1293 if (!filep || filep->f_op != &sev_fops) 1294 return -EBADF; 1295 1296 return sev_do_cmd(cmd, data, error); 1297 } 1298 EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); 1299 1300 void sev_pci_init(void) 1301 { 1302 struct sev_device *sev = psp_master->sev_data; 1303 int error, rc; 1304 1305 if (!sev) 1306 return; 1307 1308 psp_timeout = psp_probe_timeout; 1309 1310 if (sev_get_api_version()) 1311 goto err; 1312 1313 if (sev_update_firmware(sev->dev) == 0) 1314 sev_get_api_version(); 1315 1316 /* If an init_ex_path is provided rely on INIT_EX for PSP initialization 1317 * instead of INIT. 1318 */ 1319 if (init_ex_path) { 1320 sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH); 1321 if (!sev_init_ex_buffer) { 1322 dev_err(sev->dev, 1323 "SEV: INIT_EX NV memory allocation failed\n"); 1324 goto err; 1325 } 1326 } 1327 1328 /* Obtain the TMR memory area for SEV-ES use */ 1329 sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE); 1330 if (!sev_es_tmr) 1331 dev_warn(sev->dev, 1332 "SEV: TMR allocation failed, SEV-ES support unavailable\n"); 1333 1334 if (!psp_init_on_probe) 1335 return; 1336 1337 /* Initialize the platform */ 1338 rc = sev_platform_init(&error); 1339 if (rc) 1340 dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n", 1341 error, rc); 1342 1343 return; 1344 1345 err: 1346 psp_master->sev_data = NULL; 1347 } 1348 1349 void sev_pci_exit(void) 1350 { 1351 struct sev_device *sev = psp_master->sev_data; 1352 1353 if (!sev) 1354 return; 1355 1356 sev_firmware_shutdown(sev); 1357 } 1358