1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure Encrypted Virtualization (SEV) interface 4 * 5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Brijesh Singh <brijesh.singh@amd.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/kthread.h> 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/spinlock.h> 17 #include <linux/spinlock_types.h> 18 #include <linux/types.h> 19 #include <linux/mutex.h> 20 #include <linux/delay.h> 21 #include <linux/hw_random.h> 22 #include <linux/ccp.h> 23 #include <linux/firmware.h> 24 #include <linux/panic_notifier.h> 25 #include <linux/gfp.h> 26 #include <linux/cpufeature.h> 27 #include <linux/fs.h> 28 #include <linux/fs_struct.h> 29 #include <linux/psp.h> 30 #include <linux/amd-iommu.h> 31 #include <linux/crash_dump.h> 32 33 #include <asm/smp.h> 34 #include <asm/cacheflush.h> 35 #include <asm/e820/types.h> 36 #include <asm/sev.h> 37 #include <asm/msr.h> 38 39 #include "psp-dev.h" 40 #include "sev-dev.h" 41 42 #define DEVICE_NAME "sev" 43 #define SEV_FW_FILE "amd/sev.fw" 44 #define SEV_FW_NAME_SIZE 64 45 46 /* Minimum firmware version required for the SEV-SNP support */ 47 #define SNP_MIN_API_MAJOR 1 48 #define SNP_MIN_API_MINOR 51 49 50 /* 51 * Maximum number of firmware-writable buffers that might be specified 52 * in the parameters of a legacy SEV command buffer. 53 */ 54 #define CMD_BUF_FW_WRITABLE_MAX 2 55 56 /* Leave room in the descriptor array for an end-of-list indicator. */ 57 #define CMD_BUF_DESC_MAX (CMD_BUF_FW_WRITABLE_MAX + 1) 58 59 static DEFINE_MUTEX(sev_cmd_mutex); 60 static struct sev_misc_dev *misc_dev; 61 62 static int psp_cmd_timeout = 100; 63 module_param(psp_cmd_timeout, int, 0644); 64 MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); 65 66 static int psp_probe_timeout = 5; 67 module_param(psp_probe_timeout, int, 0644); 68 MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); 69 70 static char *init_ex_path; 71 module_param(init_ex_path, charp, 0444); 72 MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX"); 73 74 static bool psp_init_on_probe = true; 75 module_param(psp_init_on_probe, bool, 0444); 76 MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it"); 77 78 MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */ 79 MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */ 80 MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */ 81 MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ 82 83 static bool psp_dead; 84 static int psp_timeout; 85 86 enum snp_hv_fixed_pages_state { 87 ALLOCATED, 88 HV_FIXED, 89 }; 90 91 struct snp_hv_fixed_pages_entry { 92 struct list_head list; 93 struct page *page; 94 unsigned int order; 95 bool free; 96 enum snp_hv_fixed_pages_state page_state; 97 }; 98 99 static LIST_HEAD(snp_hv_fixed_pages); 100 101 /* Trusted Memory Region (TMR): 102 * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator 103 * to allocate the memory, which will return aligned memory for the specified 104 * allocation order. 105 * 106 * When SEV-SNP is enabled the TMR needs to be 2MB aligned and 2MB sized. 107 */ 108 #define SEV_TMR_SIZE (1024 * 1024) 109 #define SNP_TMR_SIZE (2 * 1024 * 1024) 110 111 static void *sev_es_tmr; 112 static size_t sev_es_tmr_size = SEV_TMR_SIZE; 113 114 /* INIT_EX NV Storage: 115 * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page 116 * allocator to allocate the memory, which will return aligned memory for the 117 * specified allocation order. 118 */ 119 #define NV_LENGTH (32 * 1024) 120 static void *sev_init_ex_buffer; 121 122 /* 123 * SEV_DATA_RANGE_LIST: 124 * Array containing range of pages that firmware transitions to HV-fixed 125 * page state. 126 */ 127 static struct sev_data_range_list *snp_range_list; 128 129 static void __sev_firmware_shutdown(struct sev_device *sev, bool panic); 130 131 static int snp_shutdown_on_panic(struct notifier_block *nb, 132 unsigned long reason, void *arg); 133 134 static struct notifier_block snp_panic_notifier = { 135 .notifier_call = snp_shutdown_on_panic, 136 }; 137 138 static inline bool sev_version_greater_or_equal(u8 maj, u8 min) 139 { 140 struct sev_device *sev = psp_master->sev_data; 141 142 if (sev->api_major > maj) 143 return true; 144 145 if (sev->api_major == maj && sev->api_minor >= min) 146 return true; 147 148 return false; 149 } 150 151 static void sev_irq_handler(int irq, void *data, unsigned int status) 152 { 153 struct sev_device *sev = data; 154 int reg; 155 156 /* Check if it is command completion: */ 157 if (!(status & SEV_CMD_COMPLETE)) 158 return; 159 160 /* Check if it is SEV command completion: */ 161 reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 162 if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { 163 sev->int_rcvd = 1; 164 wake_up(&sev->int_queue); 165 } 166 } 167 168 static int sev_wait_cmd_ioc(struct sev_device *sev, 169 unsigned int *reg, unsigned int timeout) 170 { 171 int ret; 172 173 /* 174 * If invoked during panic handling, local interrupts are disabled, 175 * so the PSP command completion interrupt can't be used. Poll for 176 * PSP command completion instead. 177 */ 178 if (irqs_disabled()) { 179 unsigned long timeout_usecs = (timeout * USEC_PER_SEC) / 10; 180 181 /* Poll for SEV command completion: */ 182 while (timeout_usecs--) { 183 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 184 if (*reg & PSP_CMDRESP_RESP) 185 return 0; 186 187 udelay(10); 188 } 189 return -ETIMEDOUT; 190 } 191 192 ret = wait_event_timeout(sev->int_queue, 193 sev->int_rcvd, timeout * HZ); 194 if (!ret) 195 return -ETIMEDOUT; 196 197 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 198 199 return 0; 200 } 201 202 static int sev_cmd_buffer_len(int cmd) 203 { 204 switch (cmd) { 205 case SEV_CMD_INIT: return sizeof(struct sev_data_init); 206 case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); 207 case SEV_CMD_SNP_SHUTDOWN_EX: return sizeof(struct sev_data_snp_shutdown_ex); 208 case SEV_CMD_SNP_INIT_EX: return sizeof(struct sev_data_snp_init_ex); 209 case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); 210 case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); 211 case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); 212 case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); 213 case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); 214 case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); 215 case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); 216 case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); 217 case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); 218 case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); 219 case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); 220 case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); 221 case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); 222 case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); 223 case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); 224 case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); 225 case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); 226 case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); 227 case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); 228 case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); 229 case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); 230 case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); 231 case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); 232 case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); 233 case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); 234 case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); 235 case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); 236 case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); 237 case SEV_CMD_SNP_GCTX_CREATE: return sizeof(struct sev_data_snp_addr); 238 case SEV_CMD_SNP_LAUNCH_START: return sizeof(struct sev_data_snp_launch_start); 239 case SEV_CMD_SNP_LAUNCH_UPDATE: return sizeof(struct sev_data_snp_launch_update); 240 case SEV_CMD_SNP_ACTIVATE: return sizeof(struct sev_data_snp_activate); 241 case SEV_CMD_SNP_DECOMMISSION: return sizeof(struct sev_data_snp_addr); 242 case SEV_CMD_SNP_PAGE_RECLAIM: return sizeof(struct sev_data_snp_page_reclaim); 243 case SEV_CMD_SNP_GUEST_STATUS: return sizeof(struct sev_data_snp_guest_status); 244 case SEV_CMD_SNP_LAUNCH_FINISH: return sizeof(struct sev_data_snp_launch_finish); 245 case SEV_CMD_SNP_DBG_DECRYPT: return sizeof(struct sev_data_snp_dbg); 246 case SEV_CMD_SNP_DBG_ENCRYPT: return sizeof(struct sev_data_snp_dbg); 247 case SEV_CMD_SNP_PAGE_UNSMASH: return sizeof(struct sev_data_snp_page_unsmash); 248 case SEV_CMD_SNP_PLATFORM_STATUS: return sizeof(struct sev_data_snp_addr); 249 case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request); 250 case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config); 251 case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit); 252 default: return 0; 253 } 254 255 return 0; 256 } 257 258 static struct file *open_file_as_root(const char *filename, int flags, umode_t mode) 259 { 260 struct file *fp; 261 struct path root; 262 struct cred *cred; 263 const struct cred *old_cred; 264 265 task_lock(&init_task); 266 get_fs_root(init_task.fs, &root); 267 task_unlock(&init_task); 268 269 cred = prepare_creds(); 270 if (!cred) 271 return ERR_PTR(-ENOMEM); 272 cred->fsuid = GLOBAL_ROOT_UID; 273 old_cred = override_creds(cred); 274 275 fp = file_open_root(&root, filename, flags, mode); 276 path_put(&root); 277 278 put_cred(revert_creds(old_cred)); 279 280 return fp; 281 } 282 283 static int sev_read_init_ex_file(void) 284 { 285 struct sev_device *sev = psp_master->sev_data; 286 struct file *fp; 287 ssize_t nread; 288 289 lockdep_assert_held(&sev_cmd_mutex); 290 291 if (!sev_init_ex_buffer) 292 return -EOPNOTSUPP; 293 294 fp = open_file_as_root(init_ex_path, O_RDONLY, 0); 295 if (IS_ERR(fp)) { 296 int ret = PTR_ERR(fp); 297 298 if (ret == -ENOENT) { 299 dev_info(sev->dev, 300 "SEV: %s does not exist and will be created later.\n", 301 init_ex_path); 302 ret = 0; 303 } else { 304 dev_err(sev->dev, 305 "SEV: could not open %s for read, error %d\n", 306 init_ex_path, ret); 307 } 308 return ret; 309 } 310 311 nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL); 312 if (nread != NV_LENGTH) { 313 dev_info(sev->dev, 314 "SEV: could not read %u bytes to non volatile memory area, ret %ld\n", 315 NV_LENGTH, nread); 316 } 317 318 dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread); 319 filp_close(fp, NULL); 320 321 return 0; 322 } 323 324 static int sev_write_init_ex_file(void) 325 { 326 struct sev_device *sev = psp_master->sev_data; 327 struct file *fp; 328 loff_t offset = 0; 329 ssize_t nwrite; 330 331 lockdep_assert_held(&sev_cmd_mutex); 332 333 if (!sev_init_ex_buffer) 334 return 0; 335 336 fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600); 337 if (IS_ERR(fp)) { 338 int ret = PTR_ERR(fp); 339 340 dev_err(sev->dev, 341 "SEV: could not open file for write, error %d\n", 342 ret); 343 return ret; 344 } 345 346 nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset); 347 vfs_fsync(fp, 0); 348 filp_close(fp, NULL); 349 350 if (nwrite != NV_LENGTH) { 351 dev_err(sev->dev, 352 "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n", 353 NV_LENGTH, nwrite); 354 return -EIO; 355 } 356 357 dev_dbg(sev->dev, "SEV: write successful to NV file\n"); 358 359 return 0; 360 } 361 362 static int sev_write_init_ex_file_if_required(int cmd_id) 363 { 364 lockdep_assert_held(&sev_cmd_mutex); 365 366 if (!sev_init_ex_buffer) 367 return 0; 368 369 /* 370 * Only a few platform commands modify the SPI/NV area, but none of the 371 * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN, 372 * PEK_CERT_IMPORT, and PDH_GEN do. 373 */ 374 switch (cmd_id) { 375 case SEV_CMD_FACTORY_RESET: 376 case SEV_CMD_INIT_EX: 377 case SEV_CMD_PDH_GEN: 378 case SEV_CMD_PEK_CERT_IMPORT: 379 case SEV_CMD_PEK_GEN: 380 break; 381 default: 382 return 0; 383 } 384 385 return sev_write_init_ex_file(); 386 } 387 388 /* 389 * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked() 390 * needs snp_reclaim_pages(), so a forward declaration is needed. 391 */ 392 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret); 393 394 static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked) 395 { 396 int ret, err, i; 397 398 paddr = __sme_clr(ALIGN_DOWN(paddr, PAGE_SIZE)); 399 400 for (i = 0; i < npages; i++, paddr += PAGE_SIZE) { 401 struct sev_data_snp_page_reclaim data = {0}; 402 403 data.paddr = paddr; 404 405 if (locked) 406 ret = __sev_do_cmd_locked(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err); 407 else 408 ret = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err); 409 410 if (ret) 411 goto cleanup; 412 413 ret = rmp_make_shared(__phys_to_pfn(paddr), PG_LEVEL_4K); 414 if (ret) 415 goto cleanup; 416 } 417 418 return 0; 419 420 cleanup: 421 /* 422 * If there was a failure reclaiming the page then it is no longer safe 423 * to release it back to the system; leak it instead. 424 */ 425 snp_leak_pages(__phys_to_pfn(paddr), npages - i); 426 return ret; 427 } 428 429 static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked) 430 { 431 unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT; 432 int rc, i; 433 434 for (i = 0; i < npages; i++, pfn++) { 435 rc = rmp_make_private(pfn, 0, PG_LEVEL_4K, 0, true); 436 if (rc) 437 goto cleanup; 438 } 439 440 return 0; 441 442 cleanup: 443 /* 444 * Try unrolling the firmware state changes by 445 * reclaiming the pages which were already changed to the 446 * firmware state. 447 */ 448 snp_reclaim_pages(paddr, i, locked); 449 450 return rc; 451 } 452 453 static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked) 454 { 455 unsigned long npages = 1ul << order, paddr; 456 struct sev_device *sev; 457 struct page *page; 458 459 if (!psp_master || !psp_master->sev_data) 460 return NULL; 461 462 page = alloc_pages(gfp_mask, order); 463 if (!page) 464 return NULL; 465 466 /* If SEV-SNP is initialized then add the page in RMP table. */ 467 sev = psp_master->sev_data; 468 if (!sev->snp_initialized) 469 return page; 470 471 paddr = __pa((unsigned long)page_address(page)); 472 if (rmp_mark_pages_firmware(paddr, npages, locked)) 473 return NULL; 474 475 return page; 476 } 477 478 void *snp_alloc_firmware_page(gfp_t gfp_mask) 479 { 480 struct page *page; 481 482 page = __snp_alloc_firmware_pages(gfp_mask, 0, false); 483 484 return page ? page_address(page) : NULL; 485 } 486 EXPORT_SYMBOL_GPL(snp_alloc_firmware_page); 487 488 static void __snp_free_firmware_pages(struct page *page, int order, bool locked) 489 { 490 struct sev_device *sev = psp_master->sev_data; 491 unsigned long paddr, npages = 1ul << order; 492 493 if (!page) 494 return; 495 496 paddr = __pa((unsigned long)page_address(page)); 497 if (sev->snp_initialized && 498 snp_reclaim_pages(paddr, npages, locked)) 499 return; 500 501 __free_pages(page, order); 502 } 503 504 void snp_free_firmware_page(void *addr) 505 { 506 if (!addr) 507 return; 508 509 __snp_free_firmware_pages(virt_to_page(addr), 0, false); 510 } 511 EXPORT_SYMBOL_GPL(snp_free_firmware_page); 512 513 static void *sev_fw_alloc(unsigned long len) 514 { 515 struct page *page; 516 517 page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true); 518 if (!page) 519 return NULL; 520 521 return page_address(page); 522 } 523 524 /** 525 * struct cmd_buf_desc - descriptors for managing legacy SEV command address 526 * parameters corresponding to buffers that may be written to by firmware. 527 * 528 * @paddr_ptr: pointer to the address parameter in the command buffer which may 529 * need to be saved/restored depending on whether a bounce buffer 530 * is used. In the case of a bounce buffer, the command buffer 531 * needs to be updated with the address of the new bounce buffer 532 * snp_map_cmd_buf_desc() has allocated specifically for it. Must 533 * be NULL if this descriptor is only an end-of-list indicator. 534 * 535 * @paddr_orig: storage for the original address parameter, which can be used to 536 * restore the original value in @paddr_ptr in cases where it is 537 * replaced with the address of a bounce buffer. 538 * 539 * @len: length of buffer located at the address originally stored at @paddr_ptr 540 * 541 * @guest_owned: true if the address corresponds to guest-owned pages, in which 542 * case bounce buffers are not needed. 543 */ 544 struct cmd_buf_desc { 545 u64 *paddr_ptr; 546 u64 paddr_orig; 547 u32 len; 548 bool guest_owned; 549 }; 550 551 /* 552 * If a legacy SEV command parameter is a memory address, those pages in 553 * turn need to be transitioned to/from firmware-owned before/after 554 * executing the firmware command. 555 * 556 * Additionally, in cases where those pages are not guest-owned, a bounce 557 * buffer is needed in place of the original memory address parameter. 558 * 559 * A set of descriptors are used to keep track of this handling, and 560 * initialized here based on the specific commands being executed. 561 */ 562 static void snp_populate_cmd_buf_desc_list(int cmd, void *cmd_buf, 563 struct cmd_buf_desc *desc_list) 564 { 565 switch (cmd) { 566 case SEV_CMD_PDH_CERT_EXPORT: { 567 struct sev_data_pdh_cert_export *data = cmd_buf; 568 569 desc_list[0].paddr_ptr = &data->pdh_cert_address; 570 desc_list[0].len = data->pdh_cert_len; 571 desc_list[1].paddr_ptr = &data->cert_chain_address; 572 desc_list[1].len = data->cert_chain_len; 573 break; 574 } 575 case SEV_CMD_GET_ID: { 576 struct sev_data_get_id *data = cmd_buf; 577 578 desc_list[0].paddr_ptr = &data->address; 579 desc_list[0].len = data->len; 580 break; 581 } 582 case SEV_CMD_PEK_CSR: { 583 struct sev_data_pek_csr *data = cmd_buf; 584 585 desc_list[0].paddr_ptr = &data->address; 586 desc_list[0].len = data->len; 587 break; 588 } 589 case SEV_CMD_LAUNCH_UPDATE_DATA: { 590 struct sev_data_launch_update_data *data = cmd_buf; 591 592 desc_list[0].paddr_ptr = &data->address; 593 desc_list[0].len = data->len; 594 desc_list[0].guest_owned = true; 595 break; 596 } 597 case SEV_CMD_LAUNCH_UPDATE_VMSA: { 598 struct sev_data_launch_update_vmsa *data = cmd_buf; 599 600 desc_list[0].paddr_ptr = &data->address; 601 desc_list[0].len = data->len; 602 desc_list[0].guest_owned = true; 603 break; 604 } 605 case SEV_CMD_LAUNCH_MEASURE: { 606 struct sev_data_launch_measure *data = cmd_buf; 607 608 desc_list[0].paddr_ptr = &data->address; 609 desc_list[0].len = data->len; 610 break; 611 } 612 case SEV_CMD_LAUNCH_UPDATE_SECRET: { 613 struct sev_data_launch_secret *data = cmd_buf; 614 615 desc_list[0].paddr_ptr = &data->guest_address; 616 desc_list[0].len = data->guest_len; 617 desc_list[0].guest_owned = true; 618 break; 619 } 620 case SEV_CMD_DBG_DECRYPT: { 621 struct sev_data_dbg *data = cmd_buf; 622 623 desc_list[0].paddr_ptr = &data->dst_addr; 624 desc_list[0].len = data->len; 625 desc_list[0].guest_owned = true; 626 break; 627 } 628 case SEV_CMD_DBG_ENCRYPT: { 629 struct sev_data_dbg *data = cmd_buf; 630 631 desc_list[0].paddr_ptr = &data->dst_addr; 632 desc_list[0].len = data->len; 633 desc_list[0].guest_owned = true; 634 break; 635 } 636 case SEV_CMD_ATTESTATION_REPORT: { 637 struct sev_data_attestation_report *data = cmd_buf; 638 639 desc_list[0].paddr_ptr = &data->address; 640 desc_list[0].len = data->len; 641 break; 642 } 643 case SEV_CMD_SEND_START: { 644 struct sev_data_send_start *data = cmd_buf; 645 646 desc_list[0].paddr_ptr = &data->session_address; 647 desc_list[0].len = data->session_len; 648 break; 649 } 650 case SEV_CMD_SEND_UPDATE_DATA: { 651 struct sev_data_send_update_data *data = cmd_buf; 652 653 desc_list[0].paddr_ptr = &data->hdr_address; 654 desc_list[0].len = data->hdr_len; 655 desc_list[1].paddr_ptr = &data->trans_address; 656 desc_list[1].len = data->trans_len; 657 break; 658 } 659 case SEV_CMD_SEND_UPDATE_VMSA: { 660 struct sev_data_send_update_vmsa *data = cmd_buf; 661 662 desc_list[0].paddr_ptr = &data->hdr_address; 663 desc_list[0].len = data->hdr_len; 664 desc_list[1].paddr_ptr = &data->trans_address; 665 desc_list[1].len = data->trans_len; 666 break; 667 } 668 case SEV_CMD_RECEIVE_UPDATE_DATA: { 669 struct sev_data_receive_update_data *data = cmd_buf; 670 671 desc_list[0].paddr_ptr = &data->guest_address; 672 desc_list[0].len = data->guest_len; 673 desc_list[0].guest_owned = true; 674 break; 675 } 676 case SEV_CMD_RECEIVE_UPDATE_VMSA: { 677 struct sev_data_receive_update_vmsa *data = cmd_buf; 678 679 desc_list[0].paddr_ptr = &data->guest_address; 680 desc_list[0].len = data->guest_len; 681 desc_list[0].guest_owned = true; 682 break; 683 } 684 default: 685 break; 686 } 687 } 688 689 static int snp_map_cmd_buf_desc(struct cmd_buf_desc *desc) 690 { 691 unsigned int npages; 692 693 if (!desc->len) 694 return 0; 695 696 /* Allocate a bounce buffer if this isn't a guest owned page. */ 697 if (!desc->guest_owned) { 698 struct page *page; 699 700 page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(desc->len)); 701 if (!page) { 702 pr_warn("Failed to allocate bounce buffer for SEV legacy command.\n"); 703 return -ENOMEM; 704 } 705 706 desc->paddr_orig = *desc->paddr_ptr; 707 *desc->paddr_ptr = __psp_pa(page_to_virt(page)); 708 } 709 710 npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT; 711 712 /* Transition the buffer to firmware-owned. */ 713 if (rmp_mark_pages_firmware(*desc->paddr_ptr, npages, true)) { 714 pr_warn("Error moving pages to firmware-owned state for SEV legacy command.\n"); 715 return -EFAULT; 716 } 717 718 return 0; 719 } 720 721 static int snp_unmap_cmd_buf_desc(struct cmd_buf_desc *desc) 722 { 723 unsigned int npages; 724 725 if (!desc->len) 726 return 0; 727 728 npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT; 729 730 /* Transition the buffers back to hypervisor-owned. */ 731 if (snp_reclaim_pages(*desc->paddr_ptr, npages, true)) { 732 pr_warn("Failed to reclaim firmware-owned pages while issuing SEV legacy command.\n"); 733 return -EFAULT; 734 } 735 736 /* Copy data from bounce buffer and then free it. */ 737 if (!desc->guest_owned) { 738 void *bounce_buf = __va(__sme_clr(*desc->paddr_ptr)); 739 void *dst_buf = __va(__sme_clr(desc->paddr_orig)); 740 741 memcpy(dst_buf, bounce_buf, desc->len); 742 __free_pages(virt_to_page(bounce_buf), get_order(desc->len)); 743 744 /* Restore the original address in the command buffer. */ 745 *desc->paddr_ptr = desc->paddr_orig; 746 } 747 748 return 0; 749 } 750 751 static int snp_map_cmd_buf_desc_list(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list) 752 { 753 int i; 754 755 snp_populate_cmd_buf_desc_list(cmd, cmd_buf, desc_list); 756 757 for (i = 0; i < CMD_BUF_DESC_MAX; i++) { 758 struct cmd_buf_desc *desc = &desc_list[i]; 759 760 if (!desc->paddr_ptr) 761 break; 762 763 if (snp_map_cmd_buf_desc(desc)) 764 goto err_unmap; 765 } 766 767 return 0; 768 769 err_unmap: 770 for (i--; i >= 0; i--) 771 snp_unmap_cmd_buf_desc(&desc_list[i]); 772 773 return -EFAULT; 774 } 775 776 static int snp_unmap_cmd_buf_desc_list(struct cmd_buf_desc *desc_list) 777 { 778 int i, ret = 0; 779 780 for (i = 0; i < CMD_BUF_DESC_MAX; i++) { 781 struct cmd_buf_desc *desc = &desc_list[i]; 782 783 if (!desc->paddr_ptr) 784 break; 785 786 if (snp_unmap_cmd_buf_desc(&desc_list[i])) 787 ret = -EFAULT; 788 } 789 790 return ret; 791 } 792 793 static bool sev_cmd_buf_writable(int cmd) 794 { 795 switch (cmd) { 796 case SEV_CMD_PLATFORM_STATUS: 797 case SEV_CMD_GUEST_STATUS: 798 case SEV_CMD_LAUNCH_START: 799 case SEV_CMD_RECEIVE_START: 800 case SEV_CMD_LAUNCH_MEASURE: 801 case SEV_CMD_SEND_START: 802 case SEV_CMD_SEND_UPDATE_DATA: 803 case SEV_CMD_SEND_UPDATE_VMSA: 804 case SEV_CMD_PEK_CSR: 805 case SEV_CMD_PDH_CERT_EXPORT: 806 case SEV_CMD_GET_ID: 807 case SEV_CMD_ATTESTATION_REPORT: 808 return true; 809 default: 810 return false; 811 } 812 } 813 814 /* After SNP is INIT'ed, the behavior of legacy SEV commands is changed. */ 815 static bool snp_legacy_handling_needed(int cmd) 816 { 817 struct sev_device *sev = psp_master->sev_data; 818 819 return cmd < SEV_CMD_SNP_INIT && sev->snp_initialized; 820 } 821 822 static int snp_prep_cmd_buf(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list) 823 { 824 if (!snp_legacy_handling_needed(cmd)) 825 return 0; 826 827 if (snp_map_cmd_buf_desc_list(cmd, cmd_buf, desc_list)) 828 return -EFAULT; 829 830 /* 831 * Before command execution, the command buffer needs to be put into 832 * the firmware-owned state. 833 */ 834 if (sev_cmd_buf_writable(cmd)) { 835 if (rmp_mark_pages_firmware(__pa(cmd_buf), 1, true)) 836 return -EFAULT; 837 } 838 839 return 0; 840 } 841 842 static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf) 843 { 844 if (!snp_legacy_handling_needed(cmd)) 845 return 0; 846 847 /* 848 * After command completion, the command buffer needs to be put back 849 * into the hypervisor-owned state. 850 */ 851 if (sev_cmd_buf_writable(cmd)) 852 if (snp_reclaim_pages(__pa(cmd_buf), 1, true)) 853 return -EFAULT; 854 855 return 0; 856 } 857 858 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) 859 { 860 struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0}; 861 struct psp_device *psp = psp_master; 862 struct sev_device *sev; 863 unsigned int cmdbuff_hi, cmdbuff_lo; 864 unsigned int phys_lsb, phys_msb; 865 unsigned int reg, ret = 0; 866 void *cmd_buf; 867 int buf_len; 868 869 if (!psp || !psp->sev_data) 870 return -ENODEV; 871 872 if (psp_dead) 873 return -EBUSY; 874 875 sev = psp->sev_data; 876 877 buf_len = sev_cmd_buffer_len(cmd); 878 if (WARN_ON_ONCE(!data != !buf_len)) 879 return -EINVAL; 880 881 /* 882 * Copy the incoming data to driver's scratch buffer as __pa() will not 883 * work for some memory, e.g. vmalloc'd addresses, and @data may not be 884 * physically contiguous. 885 */ 886 if (data) { 887 /* 888 * Commands are generally issued one at a time and require the 889 * sev_cmd_mutex, but there could be recursive firmware requests 890 * due to SEV_CMD_SNP_PAGE_RECLAIM needing to be issued while 891 * preparing buffers for another command. This is the only known 892 * case of nesting in the current code, so exactly one 893 * additional command buffer is available for that purpose. 894 */ 895 if (!sev->cmd_buf_active) { 896 cmd_buf = sev->cmd_buf; 897 sev->cmd_buf_active = true; 898 } else if (!sev->cmd_buf_backup_active) { 899 cmd_buf = sev->cmd_buf_backup; 900 sev->cmd_buf_backup_active = true; 901 } else { 902 dev_err(sev->dev, 903 "SEV: too many firmware commands in progress, no command buffers available.\n"); 904 return -EBUSY; 905 } 906 907 memcpy(cmd_buf, data, buf_len); 908 909 /* 910 * The behavior of the SEV-legacy commands is altered when the 911 * SNP firmware is in the INIT state. 912 */ 913 ret = snp_prep_cmd_buf(cmd, cmd_buf, desc_list); 914 if (ret) { 915 dev_err(sev->dev, 916 "SEV: failed to prepare buffer for legacy command 0x%x. Error: %d\n", 917 cmd, ret); 918 return ret; 919 } 920 } else { 921 cmd_buf = sev->cmd_buf; 922 } 923 924 /* Get the physical address of the command buffer */ 925 phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0; 926 phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0; 927 928 dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", 929 cmd, phys_msb, phys_lsb, psp_timeout); 930 931 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, 932 buf_len, false); 933 934 iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); 935 iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); 936 937 sev->int_rcvd = 0; 938 939 reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd); 940 941 /* 942 * If invoked during panic handling, local interrupts are disabled so 943 * the PSP command completion interrupt can't be used. 944 * sev_wait_cmd_ioc() already checks for interrupts disabled and 945 * polls for PSP command completion. Ensure we do not request an 946 * interrupt from the PSP if irqs disabled. 947 */ 948 if (!irqs_disabled()) 949 reg |= SEV_CMDRESP_IOC; 950 951 iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); 952 953 /* wait for command completion */ 954 ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); 955 if (ret) { 956 if (psp_ret) 957 *psp_ret = 0; 958 959 dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); 960 psp_dead = true; 961 962 return ret; 963 } 964 965 psp_timeout = psp_cmd_timeout; 966 967 if (psp_ret) 968 *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); 969 970 if (FIELD_GET(PSP_CMDRESP_STS, reg)) { 971 dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", 972 cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); 973 974 /* 975 * PSP firmware may report additional error information in the 976 * command buffer registers on error. Print contents of command 977 * buffer registers if they changed. 978 */ 979 cmdbuff_hi = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); 980 cmdbuff_lo = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); 981 if (cmdbuff_hi != phys_msb || cmdbuff_lo != phys_lsb) { 982 dev_dbg(sev->dev, "Additional error information reported in cmdbuff:"); 983 dev_dbg(sev->dev, " cmdbuff hi: %#010x\n", cmdbuff_hi); 984 dev_dbg(sev->dev, " cmdbuff lo: %#010x\n", cmdbuff_lo); 985 } 986 ret = -EIO; 987 } else { 988 ret = sev_write_init_ex_file_if_required(cmd); 989 } 990 991 /* 992 * Copy potential output from the PSP back to data. Do this even on 993 * failure in case the caller wants to glean something from the error. 994 */ 995 if (data) { 996 int ret_reclaim; 997 /* 998 * Restore the page state after the command completes. 999 */ 1000 ret_reclaim = snp_reclaim_cmd_buf(cmd, cmd_buf); 1001 if (ret_reclaim) { 1002 dev_err(sev->dev, 1003 "SEV: failed to reclaim buffer for legacy command %#x. Error: %d\n", 1004 cmd, ret_reclaim); 1005 return ret_reclaim; 1006 } 1007 1008 memcpy(data, cmd_buf, buf_len); 1009 1010 if (sev->cmd_buf_backup_active) 1011 sev->cmd_buf_backup_active = false; 1012 else 1013 sev->cmd_buf_active = false; 1014 1015 if (snp_unmap_cmd_buf_desc_list(desc_list)) 1016 return -EFAULT; 1017 } 1018 1019 print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, 1020 buf_len, false); 1021 1022 return ret; 1023 } 1024 1025 int sev_do_cmd(int cmd, void *data, int *psp_ret) 1026 { 1027 int rc; 1028 1029 mutex_lock(&sev_cmd_mutex); 1030 rc = __sev_do_cmd_locked(cmd, data, psp_ret); 1031 mutex_unlock(&sev_cmd_mutex); 1032 1033 return rc; 1034 } 1035 EXPORT_SYMBOL_GPL(sev_do_cmd); 1036 1037 static int __sev_init_locked(int *error) 1038 { 1039 struct sev_data_init data; 1040 1041 memset(&data, 0, sizeof(data)); 1042 if (sev_es_tmr) { 1043 /* 1044 * Do not include the encryption mask on the physical 1045 * address of the TMR (firmware should clear it anyway). 1046 */ 1047 data.tmr_address = __pa(sev_es_tmr); 1048 1049 data.flags |= SEV_INIT_FLAGS_SEV_ES; 1050 data.tmr_len = sev_es_tmr_size; 1051 } 1052 1053 return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error); 1054 } 1055 1056 static int __sev_init_ex_locked(int *error) 1057 { 1058 struct sev_data_init_ex data; 1059 1060 memset(&data, 0, sizeof(data)); 1061 data.length = sizeof(data); 1062 data.nv_address = __psp_pa(sev_init_ex_buffer); 1063 data.nv_len = NV_LENGTH; 1064 1065 if (sev_es_tmr) { 1066 /* 1067 * Do not include the encryption mask on the physical 1068 * address of the TMR (firmware should clear it anyway). 1069 */ 1070 data.tmr_address = __pa(sev_es_tmr); 1071 1072 data.flags |= SEV_INIT_FLAGS_SEV_ES; 1073 data.tmr_len = sev_es_tmr_size; 1074 } 1075 1076 return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error); 1077 } 1078 1079 static inline int __sev_do_init_locked(int *psp_ret) 1080 { 1081 if (sev_init_ex_buffer) 1082 return __sev_init_ex_locked(psp_ret); 1083 else 1084 return __sev_init_locked(psp_ret); 1085 } 1086 1087 static void snp_set_hsave_pa(void *arg) 1088 { 1089 wrmsrq(MSR_VM_HSAVE_PA, 0); 1090 } 1091 1092 /* Hypervisor Fixed pages API interface */ 1093 static void snp_hv_fixed_pages_state_update(struct sev_device *sev, 1094 enum snp_hv_fixed_pages_state page_state) 1095 { 1096 struct snp_hv_fixed_pages_entry *entry; 1097 1098 /* List is protected by sev_cmd_mutex */ 1099 lockdep_assert_held(&sev_cmd_mutex); 1100 1101 if (list_empty(&snp_hv_fixed_pages)) 1102 return; 1103 1104 list_for_each_entry(entry, &snp_hv_fixed_pages, list) 1105 entry->page_state = page_state; 1106 } 1107 1108 /* 1109 * Allocate HV_FIXED pages in 2MB aligned sizes to ensure the whole 1110 * 2MB pages are marked as HV_FIXED. 1111 */ 1112 struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages) 1113 { 1114 struct psp_device *psp_master = psp_get_master_device(); 1115 struct snp_hv_fixed_pages_entry *entry; 1116 struct sev_device *sev; 1117 unsigned int order; 1118 struct page *page; 1119 1120 if (!psp_master || !psp_master->sev_data) 1121 return NULL; 1122 1123 sev = psp_master->sev_data; 1124 1125 order = get_order(PMD_SIZE * num_2mb_pages); 1126 1127 /* 1128 * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list 1129 * also needs to be protected using the same mutex. 1130 */ 1131 guard(mutex)(&sev_cmd_mutex); 1132 1133 /* 1134 * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed 1135 * page state, fail if SNP is already initialized. 1136 */ 1137 if (sev->snp_initialized) 1138 return NULL; 1139 1140 /* Re-use freed pages that match the request */ 1141 list_for_each_entry(entry, &snp_hv_fixed_pages, list) { 1142 /* Hypervisor fixed page allocator implements exact fit policy */ 1143 if (entry->order == order && entry->free) { 1144 entry->free = false; 1145 memset(page_address(entry->page), 0, 1146 (1 << entry->order) * PAGE_SIZE); 1147 return entry->page; 1148 } 1149 } 1150 1151 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 1152 if (!page) 1153 return NULL; 1154 1155 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1156 if (!entry) { 1157 __free_pages(page, order); 1158 return NULL; 1159 } 1160 1161 entry->page = page; 1162 entry->order = order; 1163 list_add_tail(&entry->list, &snp_hv_fixed_pages); 1164 1165 return page; 1166 } 1167 1168 void snp_free_hv_fixed_pages(struct page *page) 1169 { 1170 struct psp_device *psp_master = psp_get_master_device(); 1171 struct snp_hv_fixed_pages_entry *entry, *nentry; 1172 1173 if (!psp_master || !psp_master->sev_data) 1174 return; 1175 1176 /* 1177 * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list 1178 * also needs to be protected using the same mutex. 1179 */ 1180 guard(mutex)(&sev_cmd_mutex); 1181 1182 list_for_each_entry_safe(entry, nentry, &snp_hv_fixed_pages, list) { 1183 if (entry->page != page) 1184 continue; 1185 1186 /* 1187 * HV_FIXED page state cannot be changed until reboot 1188 * and they cannot be used by an SNP guest, so they cannot 1189 * be returned back to the page allocator. 1190 * Mark the pages as free internally to allow possible re-use. 1191 */ 1192 if (entry->page_state == HV_FIXED) { 1193 entry->free = true; 1194 } else { 1195 __free_pages(page, entry->order); 1196 list_del(&entry->list); 1197 kfree(entry); 1198 } 1199 return; 1200 } 1201 } 1202 1203 static void snp_add_hv_fixed_pages(struct sev_device *sev, struct sev_data_range_list *range_list) 1204 { 1205 struct snp_hv_fixed_pages_entry *entry; 1206 struct sev_data_range *range; 1207 int num_elements; 1208 1209 lockdep_assert_held(&sev_cmd_mutex); 1210 1211 if (list_empty(&snp_hv_fixed_pages)) 1212 return; 1213 1214 num_elements = list_count_nodes(&snp_hv_fixed_pages) + 1215 range_list->num_elements; 1216 1217 /* 1218 * Ensure the list of HV_FIXED pages that will be passed to firmware 1219 * do not exceed the page-sized argument buffer. 1220 */ 1221 if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) { 1222 dev_warn(sev->dev, "Additional HV_Fixed pages cannot be accommodated, omitting\n"); 1223 return; 1224 } 1225 1226 range = &range_list->ranges[range_list->num_elements]; 1227 list_for_each_entry(entry, &snp_hv_fixed_pages, list) { 1228 range->base = page_to_pfn(entry->page) << PAGE_SHIFT; 1229 range->page_count = 1 << entry->order; 1230 range++; 1231 } 1232 range_list->num_elements = num_elements; 1233 } 1234 1235 static void snp_leak_hv_fixed_pages(void) 1236 { 1237 struct snp_hv_fixed_pages_entry *entry; 1238 1239 /* List is protected by sev_cmd_mutex */ 1240 lockdep_assert_held(&sev_cmd_mutex); 1241 1242 if (list_empty(&snp_hv_fixed_pages)) 1243 return; 1244 1245 list_for_each_entry(entry, &snp_hv_fixed_pages, list) 1246 if (entry->page_state == HV_FIXED) 1247 __snp_leak_pages(page_to_pfn(entry->page), 1248 1 << entry->order, false); 1249 } 1250 1251 static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg) 1252 { 1253 struct sev_data_range_list *range_list = arg; 1254 struct sev_data_range *range = &range_list->ranges[range_list->num_elements]; 1255 size_t size; 1256 1257 /* 1258 * Ensure the list of HV_FIXED pages that will be passed to firmware 1259 * do not exceed the page-sized argument buffer. 1260 */ 1261 if ((range_list->num_elements * sizeof(struct sev_data_range) + 1262 sizeof(struct sev_data_range_list)) > PAGE_SIZE) 1263 return -E2BIG; 1264 1265 switch (rs->desc) { 1266 case E820_TYPE_RESERVED: 1267 case E820_TYPE_PMEM: 1268 case E820_TYPE_ACPI: 1269 range->base = rs->start & PAGE_MASK; 1270 size = PAGE_ALIGN((rs->end + 1) - rs->start); 1271 range->page_count = size >> PAGE_SHIFT; 1272 range_list->num_elements++; 1273 break; 1274 default: 1275 break; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static int __sev_snp_init_locked(int *error) 1282 { 1283 struct psp_device *psp = psp_master; 1284 struct sev_data_snp_init_ex data; 1285 struct sev_device *sev; 1286 void *arg = &data; 1287 int cmd, rc = 0; 1288 1289 if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) 1290 return -ENODEV; 1291 1292 sev = psp->sev_data; 1293 1294 if (sev->snp_initialized) 1295 return 0; 1296 1297 if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) { 1298 dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n", 1299 SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR); 1300 return -EOPNOTSUPP; 1301 } 1302 1303 /* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */ 1304 on_each_cpu(snp_set_hsave_pa, NULL, 1); 1305 1306 /* 1307 * Starting in SNP firmware v1.52, the SNP_INIT_EX command takes a list 1308 * of system physical address ranges to convert into HV-fixed page 1309 * states during the RMP initialization. For instance, the memory that 1310 * UEFI reserves should be included in the that list. This allows system 1311 * components that occasionally write to memory (e.g. logging to UEFI 1312 * reserved regions) to not fail due to RMP initialization and SNP 1313 * enablement. 1314 * 1315 */ 1316 if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) { 1317 /* 1318 * Firmware checks that the pages containing the ranges enumerated 1319 * in the RANGES structure are either in the default page state or in the 1320 * firmware page state. 1321 */ 1322 snp_range_list = kzalloc(PAGE_SIZE, GFP_KERNEL); 1323 if (!snp_range_list) { 1324 dev_err(sev->dev, 1325 "SEV: SNP_INIT_EX range list memory allocation failed\n"); 1326 return -ENOMEM; 1327 } 1328 1329 /* 1330 * Retrieve all reserved memory regions from the e820 memory map 1331 * to be setup as HV-fixed pages. 1332 */ 1333 rc = walk_iomem_res_desc(IORES_DESC_NONE, IORESOURCE_MEM, 0, ~0, 1334 snp_range_list, snp_filter_reserved_mem_regions); 1335 if (rc) { 1336 dev_err(sev->dev, 1337 "SEV: SNP_INIT_EX walk_iomem_res_desc failed rc = %d\n", rc); 1338 return rc; 1339 } 1340 1341 /* 1342 * Add HV_Fixed pages from other PSP sub-devices, such as SFS to the 1343 * HV_Fixed page list. 1344 */ 1345 snp_add_hv_fixed_pages(sev, snp_range_list); 1346 1347 memset(&data, 0, sizeof(data)); 1348 data.init_rmp = 1; 1349 data.list_paddr_en = 1; 1350 data.list_paddr = __psp_pa(snp_range_list); 1351 cmd = SEV_CMD_SNP_INIT_EX; 1352 } else { 1353 cmd = SEV_CMD_SNP_INIT; 1354 arg = NULL; 1355 } 1356 1357 /* 1358 * The following sequence must be issued before launching the first SNP 1359 * guest to ensure all dirty cache lines are flushed, including from 1360 * updates to the RMP table itself via the RMPUPDATE instruction: 1361 * 1362 * - WBINVD on all running CPUs 1363 * - SEV_CMD_SNP_INIT[_EX] firmware command 1364 * - WBINVD on all running CPUs 1365 * - SEV_CMD_SNP_DF_FLUSH firmware command 1366 */ 1367 wbinvd_on_all_cpus(); 1368 1369 rc = __sev_do_cmd_locked(cmd, arg, error); 1370 if (rc) { 1371 dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n", 1372 cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT", 1373 rc, *error); 1374 return rc; 1375 } 1376 1377 /* Prepare for first SNP guest launch after INIT. */ 1378 wbinvd_on_all_cpus(); 1379 rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error); 1380 if (rc) { 1381 dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n", 1382 rc, *error); 1383 return rc; 1384 } 1385 1386 snp_hv_fixed_pages_state_update(sev, HV_FIXED); 1387 sev->snp_initialized = true; 1388 dev_dbg(sev->dev, "SEV-SNP firmware initialized\n"); 1389 1390 dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major, 1391 sev->api_minor, sev->build); 1392 1393 atomic_notifier_chain_register(&panic_notifier_list, 1394 &snp_panic_notifier); 1395 1396 sev_es_tmr_size = SNP_TMR_SIZE; 1397 1398 return 0; 1399 } 1400 1401 static void __sev_platform_init_handle_tmr(struct sev_device *sev) 1402 { 1403 if (sev_es_tmr) 1404 return; 1405 1406 /* Obtain the TMR memory area for SEV-ES use */ 1407 sev_es_tmr = sev_fw_alloc(sev_es_tmr_size); 1408 if (sev_es_tmr) { 1409 /* Must flush the cache before giving it to the firmware */ 1410 if (!sev->snp_initialized) 1411 clflush_cache_range(sev_es_tmr, sev_es_tmr_size); 1412 } else { 1413 dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n"); 1414 } 1415 } 1416 1417 /* 1418 * If an init_ex_path is provided allocate a buffer for the file and 1419 * read in the contents. Additionally, if SNP is initialized, convert 1420 * the buffer pages to firmware pages. 1421 */ 1422 static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev) 1423 { 1424 struct page *page; 1425 int rc; 1426 1427 if (!init_ex_path) 1428 return 0; 1429 1430 if (sev_init_ex_buffer) 1431 return 0; 1432 1433 page = alloc_pages(GFP_KERNEL, get_order(NV_LENGTH)); 1434 if (!page) { 1435 dev_err(sev->dev, "SEV: INIT_EX NV memory allocation failed\n"); 1436 return -ENOMEM; 1437 } 1438 1439 sev_init_ex_buffer = page_address(page); 1440 1441 rc = sev_read_init_ex_file(); 1442 if (rc) 1443 return rc; 1444 1445 /* If SEV-SNP is initialized, transition to firmware page. */ 1446 if (sev->snp_initialized) { 1447 unsigned long npages; 1448 1449 npages = 1UL << get_order(NV_LENGTH); 1450 if (rmp_mark_pages_firmware(__pa(sev_init_ex_buffer), npages, false)) { 1451 dev_err(sev->dev, "SEV: INIT_EX NV memory page state change failed.\n"); 1452 return -ENOMEM; 1453 } 1454 } 1455 1456 return 0; 1457 } 1458 1459 static int __sev_platform_init_locked(int *error) 1460 { 1461 int rc, psp_ret, dfflush_error; 1462 struct sev_device *sev; 1463 1464 psp_ret = dfflush_error = SEV_RET_NO_FW_CALL; 1465 1466 if (!psp_master || !psp_master->sev_data) 1467 return -ENODEV; 1468 1469 sev = psp_master->sev_data; 1470 1471 if (sev->state == SEV_STATE_INIT) 1472 return 0; 1473 1474 __sev_platform_init_handle_tmr(sev); 1475 1476 rc = __sev_platform_init_handle_init_ex_path(sev); 1477 if (rc) 1478 return rc; 1479 1480 rc = __sev_do_init_locked(&psp_ret); 1481 if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { 1482 /* 1483 * Initialization command returned an integrity check failure 1484 * status code, meaning that firmware load and validation of SEV 1485 * related persistent data has failed. Retrying the 1486 * initialization function should succeed by replacing the state 1487 * with a reset state. 1488 */ 1489 dev_err(sev->dev, 1490 "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state."); 1491 rc = __sev_do_init_locked(&psp_ret); 1492 } 1493 1494 if (error) 1495 *error = psp_ret; 1496 1497 if (rc) { 1498 dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n", 1499 sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc); 1500 return rc; 1501 } 1502 1503 sev->state = SEV_STATE_INIT; 1504 1505 /* Prepare for first SEV guest launch after INIT */ 1506 wbinvd_on_all_cpus(); 1507 rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, &dfflush_error); 1508 if (rc) { 1509 dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n", 1510 dfflush_error, rc); 1511 return rc; 1512 } 1513 1514 dev_dbg(sev->dev, "SEV firmware initialized\n"); 1515 1516 dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, 1517 sev->api_minor, sev->build); 1518 1519 return 0; 1520 } 1521 1522 static int _sev_platform_init_locked(struct sev_platform_init_args *args) 1523 { 1524 struct sev_device *sev; 1525 int rc; 1526 1527 if (!psp_master || !psp_master->sev_data) 1528 return -ENODEV; 1529 1530 /* 1531 * Skip SNP/SEV initialization under a kdump kernel as SEV/SNP 1532 * may already be initialized in the previous kernel. Since no 1533 * SNP/SEV guests are run under a kdump kernel, there is no 1534 * need to initialize SNP or SEV during kdump boot. 1535 */ 1536 if (is_kdump_kernel()) 1537 return 0; 1538 1539 sev = psp_master->sev_data; 1540 1541 if (sev->state == SEV_STATE_INIT) 1542 return 0; 1543 1544 rc = __sev_snp_init_locked(&args->error); 1545 if (rc && rc != -ENODEV) 1546 return rc; 1547 1548 /* Defer legacy SEV/SEV-ES support if allowed by caller/module. */ 1549 if (args->probe && !psp_init_on_probe) 1550 return 0; 1551 1552 return __sev_platform_init_locked(&args->error); 1553 } 1554 1555 int sev_platform_init(struct sev_platform_init_args *args) 1556 { 1557 int rc; 1558 1559 mutex_lock(&sev_cmd_mutex); 1560 rc = _sev_platform_init_locked(args); 1561 mutex_unlock(&sev_cmd_mutex); 1562 1563 return rc; 1564 } 1565 EXPORT_SYMBOL_GPL(sev_platform_init); 1566 1567 static int __sev_platform_shutdown_locked(int *error) 1568 { 1569 struct psp_device *psp = psp_master; 1570 struct sev_device *sev; 1571 int ret; 1572 1573 if (!psp || !psp->sev_data) 1574 return 0; 1575 1576 sev = psp->sev_data; 1577 1578 if (sev->state == SEV_STATE_UNINIT) 1579 return 0; 1580 1581 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); 1582 if (ret) { 1583 dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n", 1584 *error, ret); 1585 return ret; 1586 } 1587 1588 sev->state = SEV_STATE_UNINIT; 1589 dev_dbg(sev->dev, "SEV firmware shutdown\n"); 1590 1591 return ret; 1592 } 1593 1594 static int sev_get_platform_state(int *state, int *error) 1595 { 1596 struct sev_user_data_status data; 1597 int rc; 1598 1599 rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error); 1600 if (rc) 1601 return rc; 1602 1603 *state = data.state; 1604 return rc; 1605 } 1606 1607 static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required) 1608 { 1609 struct sev_platform_init_args init_args = {0}; 1610 int rc; 1611 1612 rc = _sev_platform_init_locked(&init_args); 1613 if (rc) { 1614 argp->error = SEV_RET_INVALID_PLATFORM_STATE; 1615 return rc; 1616 } 1617 1618 *shutdown_required = true; 1619 1620 return 0; 1621 } 1622 1623 static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required) 1624 { 1625 int error, rc; 1626 1627 rc = __sev_snp_init_locked(&error); 1628 if (rc) { 1629 argp->error = SEV_RET_INVALID_PLATFORM_STATE; 1630 return rc; 1631 } 1632 1633 *shutdown_required = true; 1634 1635 return 0; 1636 } 1637 1638 static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) 1639 { 1640 int state, rc; 1641 1642 if (!writable) 1643 return -EPERM; 1644 1645 /* 1646 * The SEV spec requires that FACTORY_RESET must be issued in 1647 * UNINIT state. Before we go further lets check if any guest is 1648 * active. 1649 * 1650 * If FW is in WORKING state then deny the request otherwise issue 1651 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. 1652 * 1653 */ 1654 rc = sev_get_platform_state(&state, &argp->error); 1655 if (rc) 1656 return rc; 1657 1658 if (state == SEV_STATE_WORKING) 1659 return -EBUSY; 1660 1661 if (state == SEV_STATE_INIT) { 1662 rc = __sev_platform_shutdown_locked(&argp->error); 1663 if (rc) 1664 return rc; 1665 } 1666 1667 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); 1668 } 1669 1670 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) 1671 { 1672 struct sev_user_data_status data; 1673 int ret; 1674 1675 memset(&data, 0, sizeof(data)); 1676 1677 ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); 1678 if (ret) 1679 return ret; 1680 1681 if (copy_to_user((void __user *)argp->data, &data, sizeof(data))) 1682 ret = -EFAULT; 1683 1684 return ret; 1685 } 1686 1687 static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable) 1688 { 1689 struct sev_device *sev = psp_master->sev_data; 1690 bool shutdown_required = false; 1691 int rc; 1692 1693 if (!writable) 1694 return -EPERM; 1695 1696 if (sev->state == SEV_STATE_UNINIT) { 1697 rc = sev_move_to_init_state(argp, &shutdown_required); 1698 if (rc) 1699 return rc; 1700 } 1701 1702 rc = __sev_do_cmd_locked(cmd, NULL, &argp->error); 1703 1704 if (shutdown_required) 1705 __sev_firmware_shutdown(sev, false); 1706 1707 return rc; 1708 } 1709 1710 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) 1711 { 1712 struct sev_device *sev = psp_master->sev_data; 1713 struct sev_user_data_pek_csr input; 1714 bool shutdown_required = false; 1715 struct sev_data_pek_csr data; 1716 void __user *input_address; 1717 void *blob = NULL; 1718 int ret; 1719 1720 if (!writable) 1721 return -EPERM; 1722 1723 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1724 return -EFAULT; 1725 1726 memset(&data, 0, sizeof(data)); 1727 1728 /* userspace wants to query CSR length */ 1729 if (!input.address || !input.length) 1730 goto cmd; 1731 1732 /* allocate a physically contiguous buffer to store the CSR blob */ 1733 input_address = (void __user *)input.address; 1734 if (input.length > SEV_FW_BLOB_MAX_SIZE) 1735 return -EFAULT; 1736 1737 blob = kzalloc(input.length, GFP_KERNEL); 1738 if (!blob) 1739 return -ENOMEM; 1740 1741 data.address = __psp_pa(blob); 1742 data.len = input.length; 1743 1744 cmd: 1745 if (sev->state == SEV_STATE_UNINIT) { 1746 ret = sev_move_to_init_state(argp, &shutdown_required); 1747 if (ret) 1748 goto e_free_blob; 1749 } 1750 1751 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error); 1752 1753 /* If we query the CSR length, FW responded with expected data. */ 1754 input.length = data.len; 1755 1756 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 1757 ret = -EFAULT; 1758 goto e_free_blob; 1759 } 1760 1761 if (blob) { 1762 if (copy_to_user(input_address, blob, input.length)) 1763 ret = -EFAULT; 1764 } 1765 1766 e_free_blob: 1767 if (shutdown_required) 1768 __sev_firmware_shutdown(sev, false); 1769 1770 kfree(blob); 1771 return ret; 1772 } 1773 1774 void *psp_copy_user_blob(u64 uaddr, u32 len) 1775 { 1776 if (!uaddr || !len) 1777 return ERR_PTR(-EINVAL); 1778 1779 /* verify that blob length does not exceed our limit */ 1780 if (len > SEV_FW_BLOB_MAX_SIZE) 1781 return ERR_PTR(-EINVAL); 1782 1783 return memdup_user((void __user *)uaddr, len); 1784 } 1785 EXPORT_SYMBOL_GPL(psp_copy_user_blob); 1786 1787 static int sev_get_api_version(void) 1788 { 1789 struct sev_device *sev = psp_master->sev_data; 1790 struct sev_user_data_status status; 1791 int error = 0, ret; 1792 1793 ret = sev_platform_status(&status, &error); 1794 if (ret) { 1795 dev_err(sev->dev, 1796 "SEV: failed to get status. Error: %#x\n", error); 1797 return 1; 1798 } 1799 1800 sev->api_major = status.api_major; 1801 sev->api_minor = status.api_minor; 1802 sev->build = status.build; 1803 sev->state = status.state; 1804 1805 return 0; 1806 } 1807 1808 static int sev_get_firmware(struct device *dev, 1809 const struct firmware **firmware) 1810 { 1811 char fw_name_specific[SEV_FW_NAME_SIZE]; 1812 char fw_name_subset[SEV_FW_NAME_SIZE]; 1813 1814 snprintf(fw_name_specific, sizeof(fw_name_specific), 1815 "amd/amd_sev_fam%.2xh_model%.2xh.sbin", 1816 boot_cpu_data.x86, boot_cpu_data.x86_model); 1817 1818 snprintf(fw_name_subset, sizeof(fw_name_subset), 1819 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin", 1820 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4); 1821 1822 /* Check for SEV FW for a particular model. 1823 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h 1824 * 1825 * or 1826 * 1827 * Check for SEV FW common to a subset of models. 1828 * Ex. amd_sev_fam17h_model0xh.sbin for 1829 * Family 17h Model 00h -- Family 17h Model 0Fh 1830 * 1831 * or 1832 * 1833 * Fall-back to using generic name: sev.fw 1834 */ 1835 if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) || 1836 (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) || 1837 (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) 1838 return 0; 1839 1840 return -ENOENT; 1841 } 1842 1843 /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ 1844 static int sev_update_firmware(struct device *dev) 1845 { 1846 struct sev_data_download_firmware *data; 1847 const struct firmware *firmware; 1848 int ret, error, order; 1849 struct page *p; 1850 u64 data_size; 1851 1852 if (!sev_version_greater_or_equal(0, 15)) { 1853 dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); 1854 return -1; 1855 } 1856 1857 if (sev_get_firmware(dev, &firmware) == -ENOENT) { 1858 dev_dbg(dev, "No SEV firmware file present\n"); 1859 return -1; 1860 } 1861 1862 /* 1863 * SEV FW expects the physical address given to it to be 32 1864 * byte aligned. Memory allocated has structure placed at the 1865 * beginning followed by the firmware being passed to the SEV 1866 * FW. Allocate enough memory for data structure + alignment 1867 * padding + SEV FW. 1868 */ 1869 data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); 1870 1871 order = get_order(firmware->size + data_size); 1872 p = alloc_pages(GFP_KERNEL, order); 1873 if (!p) { 1874 ret = -1; 1875 goto fw_err; 1876 } 1877 1878 /* 1879 * Copy firmware data to a kernel allocated contiguous 1880 * memory region. 1881 */ 1882 data = page_address(p); 1883 memcpy(page_address(p) + data_size, firmware->data, firmware->size); 1884 1885 data->address = __psp_pa(page_address(p) + data_size); 1886 data->len = firmware->size; 1887 1888 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 1889 1890 /* 1891 * A quirk for fixing the committed TCB version, when upgrading from 1892 * earlier firmware version than 1.50. 1893 */ 1894 if (!ret && !sev_version_greater_or_equal(1, 50)) 1895 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 1896 1897 if (ret) 1898 dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); 1899 1900 __free_pages(p, order); 1901 1902 fw_err: 1903 release_firmware(firmware); 1904 1905 return ret; 1906 } 1907 1908 static int __sev_snp_shutdown_locked(int *error, bool panic) 1909 { 1910 struct psp_device *psp = psp_master; 1911 struct sev_device *sev; 1912 struct sev_data_snp_shutdown_ex data; 1913 int ret; 1914 1915 if (!psp || !psp->sev_data) 1916 return 0; 1917 1918 sev = psp->sev_data; 1919 1920 if (!sev->snp_initialized) 1921 return 0; 1922 1923 memset(&data, 0, sizeof(data)); 1924 data.len = sizeof(data); 1925 data.iommu_snp_shutdown = 1; 1926 1927 /* 1928 * If invoked during panic handling, local interrupts are disabled 1929 * and all CPUs are stopped, so wbinvd_on_all_cpus() can't be called. 1930 * In that case, a wbinvd() is done on remote CPUs via the NMI 1931 * callback, so only a local wbinvd() is needed here. 1932 */ 1933 if (!panic) 1934 wbinvd_on_all_cpus(); 1935 else 1936 wbinvd(); 1937 1938 ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error); 1939 /* SHUTDOWN may require DF_FLUSH */ 1940 if (*error == SEV_RET_DFFLUSH_REQUIRED) { 1941 int dfflush_error = SEV_RET_NO_FW_CALL; 1942 1943 ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error); 1944 if (ret) { 1945 dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n", 1946 ret, dfflush_error); 1947 return ret; 1948 } 1949 /* reissue the shutdown command */ 1950 ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, 1951 error); 1952 } 1953 if (ret) { 1954 dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n", 1955 ret, *error); 1956 return ret; 1957 } 1958 1959 /* 1960 * SNP_SHUTDOWN_EX with IOMMU_SNP_SHUTDOWN set to 1 disables SNP 1961 * enforcement by the IOMMU and also transitions all pages 1962 * associated with the IOMMU to the Reclaim state. 1963 * Firmware was transitioning the IOMMU pages to Hypervisor state 1964 * before version 1.53. But, accounting for the number of assigned 1965 * 4kB pages in a 2M page was done incorrectly by not transitioning 1966 * to the Reclaim state. This resulted in RMP #PF when later accessing 1967 * the 2M page containing those pages during kexec boot. Hence, the 1968 * firmware now transitions these pages to Reclaim state and hypervisor 1969 * needs to transition these pages to shared state. SNP Firmware 1970 * version 1.53 and above are needed for kexec boot. 1971 */ 1972 ret = amd_iommu_snp_disable(); 1973 if (ret) { 1974 dev_err(sev->dev, "SNP IOMMU shutdown failed\n"); 1975 return ret; 1976 } 1977 1978 snp_leak_hv_fixed_pages(); 1979 sev->snp_initialized = false; 1980 dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n"); 1981 1982 /* 1983 * __sev_snp_shutdown_locked() deadlocks when it tries to unregister 1984 * itself during panic as the panic notifier is called with RCU read 1985 * lock held and notifier unregistration does RCU synchronization. 1986 */ 1987 if (!panic) 1988 atomic_notifier_chain_unregister(&panic_notifier_list, 1989 &snp_panic_notifier); 1990 1991 /* Reset TMR size back to default */ 1992 sev_es_tmr_size = SEV_TMR_SIZE; 1993 1994 return ret; 1995 } 1996 1997 static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) 1998 { 1999 struct sev_device *sev = psp_master->sev_data; 2000 struct sev_user_data_pek_cert_import input; 2001 struct sev_data_pek_cert_import data; 2002 bool shutdown_required = false; 2003 void *pek_blob, *oca_blob; 2004 int ret; 2005 2006 if (!writable) 2007 return -EPERM; 2008 2009 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 2010 return -EFAULT; 2011 2012 /* copy PEK certificate blobs from userspace */ 2013 pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); 2014 if (IS_ERR(pek_blob)) 2015 return PTR_ERR(pek_blob); 2016 2017 data.reserved = 0; 2018 data.pek_cert_address = __psp_pa(pek_blob); 2019 data.pek_cert_len = input.pek_cert_len; 2020 2021 /* copy PEK certificate blobs from userspace */ 2022 oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); 2023 if (IS_ERR(oca_blob)) { 2024 ret = PTR_ERR(oca_blob); 2025 goto e_free_pek; 2026 } 2027 2028 data.oca_cert_address = __psp_pa(oca_blob); 2029 data.oca_cert_len = input.oca_cert_len; 2030 2031 /* If platform is not in INIT state then transition it to INIT */ 2032 if (sev->state != SEV_STATE_INIT) { 2033 ret = sev_move_to_init_state(argp, &shutdown_required); 2034 if (ret) 2035 goto e_free_oca; 2036 } 2037 2038 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); 2039 2040 e_free_oca: 2041 if (shutdown_required) 2042 __sev_firmware_shutdown(sev, false); 2043 2044 kfree(oca_blob); 2045 e_free_pek: 2046 kfree(pek_blob); 2047 return ret; 2048 } 2049 2050 static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) 2051 { 2052 struct sev_user_data_get_id2 input; 2053 struct sev_data_get_id data; 2054 void __user *input_address; 2055 void *id_blob = NULL; 2056 int ret; 2057 2058 /* SEV GET_ID is available from SEV API v0.16 and up */ 2059 if (!sev_version_greater_or_equal(0, 16)) 2060 return -ENOTSUPP; 2061 2062 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 2063 return -EFAULT; 2064 2065 input_address = (void __user *)input.address; 2066 2067 if (input.address && input.length) { 2068 /* 2069 * The length of the ID shouldn't be assumed by software since 2070 * it may change in the future. The allocation size is limited 2071 * to 1 << (PAGE_SHIFT + MAX_PAGE_ORDER) by the page allocator. 2072 * If the allocation fails, simply return ENOMEM rather than 2073 * warning in the kernel log. 2074 */ 2075 id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN); 2076 if (!id_blob) 2077 return -ENOMEM; 2078 2079 data.address = __psp_pa(id_blob); 2080 data.len = input.length; 2081 } else { 2082 data.address = 0; 2083 data.len = 0; 2084 } 2085 2086 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error); 2087 2088 /* 2089 * Firmware will return the length of the ID value (either the minimum 2090 * required length or the actual length written), return it to the user. 2091 */ 2092 input.length = data.len; 2093 2094 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 2095 ret = -EFAULT; 2096 goto e_free; 2097 } 2098 2099 if (id_blob) { 2100 if (copy_to_user(input_address, id_blob, data.len)) { 2101 ret = -EFAULT; 2102 goto e_free; 2103 } 2104 } 2105 2106 e_free: 2107 kfree(id_blob); 2108 2109 return ret; 2110 } 2111 2112 static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) 2113 { 2114 struct sev_data_get_id *data; 2115 u64 data_size, user_size; 2116 void *id_blob, *mem; 2117 int ret; 2118 2119 /* SEV GET_ID available from SEV API v0.16 and up */ 2120 if (!sev_version_greater_or_equal(0, 16)) 2121 return -ENOTSUPP; 2122 2123 /* SEV FW expects the buffer it fills with the ID to be 2124 * 8-byte aligned. Memory allocated should be enough to 2125 * hold data structure + alignment padding + memory 2126 * where SEV FW writes the ID. 2127 */ 2128 data_size = ALIGN(sizeof(struct sev_data_get_id), 8); 2129 user_size = sizeof(struct sev_user_data_get_id); 2130 2131 mem = kzalloc(data_size + user_size, GFP_KERNEL); 2132 if (!mem) 2133 return -ENOMEM; 2134 2135 data = mem; 2136 id_blob = mem + data_size; 2137 2138 data->address = __psp_pa(id_blob); 2139 data->len = user_size; 2140 2141 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); 2142 if (!ret) { 2143 if (copy_to_user((void __user *)argp->data, id_blob, data->len)) 2144 ret = -EFAULT; 2145 } 2146 2147 kfree(mem); 2148 2149 return ret; 2150 } 2151 2152 static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) 2153 { 2154 struct sev_device *sev = psp_master->sev_data; 2155 struct sev_user_data_pdh_cert_export input; 2156 void *pdh_blob = NULL, *cert_blob = NULL; 2157 struct sev_data_pdh_cert_export data; 2158 void __user *input_cert_chain_address; 2159 void __user *input_pdh_cert_address; 2160 bool shutdown_required = false; 2161 int ret; 2162 2163 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 2164 return -EFAULT; 2165 2166 memset(&data, 0, sizeof(data)); 2167 2168 input_pdh_cert_address = (void __user *)input.pdh_cert_address; 2169 input_cert_chain_address = (void __user *)input.cert_chain_address; 2170 2171 /* Userspace wants to query the certificate length. */ 2172 if (!input.pdh_cert_address || 2173 !input.pdh_cert_len || 2174 !input.cert_chain_address) 2175 goto cmd; 2176 2177 /* Allocate a physically contiguous buffer to store the PDH blob. */ 2178 if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) 2179 return -EFAULT; 2180 2181 /* Allocate a physically contiguous buffer to store the cert chain blob. */ 2182 if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) 2183 return -EFAULT; 2184 2185 pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL); 2186 if (!pdh_blob) 2187 return -ENOMEM; 2188 2189 data.pdh_cert_address = __psp_pa(pdh_blob); 2190 data.pdh_cert_len = input.pdh_cert_len; 2191 2192 cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL); 2193 if (!cert_blob) { 2194 ret = -ENOMEM; 2195 goto e_free_pdh; 2196 } 2197 2198 data.cert_chain_address = __psp_pa(cert_blob); 2199 data.cert_chain_len = input.cert_chain_len; 2200 2201 cmd: 2202 /* If platform is not in INIT state then transition it to INIT. */ 2203 if (sev->state != SEV_STATE_INIT) { 2204 if (!writable) { 2205 ret = -EPERM; 2206 goto e_free_cert; 2207 } 2208 ret = sev_move_to_init_state(argp, &shutdown_required); 2209 if (ret) 2210 goto e_free_cert; 2211 } 2212 2213 ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); 2214 2215 /* If we query the length, FW responded with expected data. */ 2216 input.cert_chain_len = data.cert_chain_len; 2217 input.pdh_cert_len = data.pdh_cert_len; 2218 2219 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 2220 ret = -EFAULT; 2221 goto e_free_cert; 2222 } 2223 2224 if (pdh_blob) { 2225 if (copy_to_user(input_pdh_cert_address, 2226 pdh_blob, input.pdh_cert_len)) { 2227 ret = -EFAULT; 2228 goto e_free_cert; 2229 } 2230 } 2231 2232 if (cert_blob) { 2233 if (copy_to_user(input_cert_chain_address, 2234 cert_blob, input.cert_chain_len)) 2235 ret = -EFAULT; 2236 } 2237 2238 e_free_cert: 2239 if (shutdown_required) 2240 __sev_firmware_shutdown(sev, false); 2241 2242 kfree(cert_blob); 2243 e_free_pdh: 2244 kfree(pdh_blob); 2245 return ret; 2246 } 2247 2248 static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp) 2249 { 2250 struct sev_device *sev = psp_master->sev_data; 2251 bool shutdown_required = false; 2252 struct sev_data_snp_addr buf; 2253 struct page *status_page; 2254 int ret, error; 2255 void *data; 2256 2257 if (!argp->data) 2258 return -EINVAL; 2259 2260 status_page = alloc_page(GFP_KERNEL_ACCOUNT); 2261 if (!status_page) 2262 return -ENOMEM; 2263 2264 data = page_address(status_page); 2265 2266 if (!sev->snp_initialized) { 2267 ret = snp_move_to_init_state(argp, &shutdown_required); 2268 if (ret) 2269 goto cleanup; 2270 } 2271 2272 /* 2273 * Firmware expects status page to be in firmware-owned state, otherwise 2274 * it will report firmware error code INVALID_PAGE_STATE (0x1A). 2275 */ 2276 if (rmp_mark_pages_firmware(__pa(data), 1, true)) { 2277 ret = -EFAULT; 2278 goto cleanup; 2279 } 2280 2281 buf.address = __psp_pa(data); 2282 ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error); 2283 2284 /* 2285 * Status page will be transitioned to Reclaim state upon success, or 2286 * left in Firmware state in failure. Use snp_reclaim_pages() to 2287 * transition either case back to Hypervisor-owned state. 2288 */ 2289 if (snp_reclaim_pages(__pa(data), 1, true)) 2290 return -EFAULT; 2291 2292 if (ret) 2293 goto cleanup; 2294 2295 if (copy_to_user((void __user *)argp->data, data, 2296 sizeof(struct sev_user_data_snp_status))) 2297 ret = -EFAULT; 2298 2299 cleanup: 2300 if (shutdown_required) 2301 __sev_snp_shutdown_locked(&error, false); 2302 2303 __free_pages(status_page, 0); 2304 return ret; 2305 } 2306 2307 static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp) 2308 { 2309 struct sev_device *sev = psp_master->sev_data; 2310 struct sev_data_snp_commit buf; 2311 bool shutdown_required = false; 2312 int ret, error; 2313 2314 if (!sev->snp_initialized) { 2315 ret = snp_move_to_init_state(argp, &shutdown_required); 2316 if (ret) 2317 return ret; 2318 } 2319 2320 buf.len = sizeof(buf); 2321 2322 ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error); 2323 2324 if (shutdown_required) 2325 __sev_snp_shutdown_locked(&error, false); 2326 2327 return ret; 2328 } 2329 2330 static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable) 2331 { 2332 struct sev_device *sev = psp_master->sev_data; 2333 struct sev_user_data_snp_config config; 2334 bool shutdown_required = false; 2335 int ret, error; 2336 2337 if (!argp->data) 2338 return -EINVAL; 2339 2340 if (!writable) 2341 return -EPERM; 2342 2343 if (copy_from_user(&config, (void __user *)argp->data, sizeof(config))) 2344 return -EFAULT; 2345 2346 if (!sev->snp_initialized) { 2347 ret = snp_move_to_init_state(argp, &shutdown_required); 2348 if (ret) 2349 return ret; 2350 } 2351 2352 ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error); 2353 2354 if (shutdown_required) 2355 __sev_snp_shutdown_locked(&error, false); 2356 2357 return ret; 2358 } 2359 2360 static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable) 2361 { 2362 struct sev_device *sev = psp_master->sev_data; 2363 struct sev_user_data_snp_vlek_load input; 2364 bool shutdown_required = false; 2365 int ret, error; 2366 void *blob; 2367 2368 if (!argp->data) 2369 return -EINVAL; 2370 2371 if (!writable) 2372 return -EPERM; 2373 2374 if (copy_from_user(&input, u64_to_user_ptr(argp->data), sizeof(input))) 2375 return -EFAULT; 2376 2377 if (input.len != sizeof(input) || input.vlek_wrapped_version != 0) 2378 return -EINVAL; 2379 2380 blob = psp_copy_user_blob(input.vlek_wrapped_address, 2381 sizeof(struct sev_user_data_snp_wrapped_vlek_hashstick)); 2382 if (IS_ERR(blob)) 2383 return PTR_ERR(blob); 2384 2385 input.vlek_wrapped_address = __psp_pa(blob); 2386 2387 if (!sev->snp_initialized) { 2388 ret = snp_move_to_init_state(argp, &shutdown_required); 2389 if (ret) 2390 goto cleanup; 2391 } 2392 2393 ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error); 2394 2395 if (shutdown_required) 2396 __sev_snp_shutdown_locked(&error, false); 2397 2398 cleanup: 2399 kfree(blob); 2400 2401 return ret; 2402 } 2403 2404 static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) 2405 { 2406 void __user *argp = (void __user *)arg; 2407 struct sev_issue_cmd input; 2408 int ret = -EFAULT; 2409 bool writable = file->f_mode & FMODE_WRITE; 2410 2411 if (!psp_master || !psp_master->sev_data) 2412 return -ENODEV; 2413 2414 if (ioctl != SEV_ISSUE_CMD) 2415 return -EINVAL; 2416 2417 if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) 2418 return -EFAULT; 2419 2420 if (input.cmd > SEV_MAX) 2421 return -EINVAL; 2422 2423 mutex_lock(&sev_cmd_mutex); 2424 2425 switch (input.cmd) { 2426 2427 case SEV_FACTORY_RESET: 2428 ret = sev_ioctl_do_reset(&input, writable); 2429 break; 2430 case SEV_PLATFORM_STATUS: 2431 ret = sev_ioctl_do_platform_status(&input); 2432 break; 2433 case SEV_PEK_GEN: 2434 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable); 2435 break; 2436 case SEV_PDH_GEN: 2437 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable); 2438 break; 2439 case SEV_PEK_CSR: 2440 ret = sev_ioctl_do_pek_csr(&input, writable); 2441 break; 2442 case SEV_PEK_CERT_IMPORT: 2443 ret = sev_ioctl_do_pek_import(&input, writable); 2444 break; 2445 case SEV_PDH_CERT_EXPORT: 2446 ret = sev_ioctl_do_pdh_export(&input, writable); 2447 break; 2448 case SEV_GET_ID: 2449 pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n"); 2450 ret = sev_ioctl_do_get_id(&input); 2451 break; 2452 case SEV_GET_ID2: 2453 ret = sev_ioctl_do_get_id2(&input); 2454 break; 2455 case SNP_PLATFORM_STATUS: 2456 ret = sev_ioctl_do_snp_platform_status(&input); 2457 break; 2458 case SNP_COMMIT: 2459 ret = sev_ioctl_do_snp_commit(&input); 2460 break; 2461 case SNP_SET_CONFIG: 2462 ret = sev_ioctl_do_snp_set_config(&input, writable); 2463 break; 2464 case SNP_VLEK_LOAD: 2465 ret = sev_ioctl_do_snp_vlek_load(&input, writable); 2466 break; 2467 default: 2468 ret = -EINVAL; 2469 goto out; 2470 } 2471 2472 if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) 2473 ret = -EFAULT; 2474 out: 2475 mutex_unlock(&sev_cmd_mutex); 2476 2477 return ret; 2478 } 2479 2480 static const struct file_operations sev_fops = { 2481 .owner = THIS_MODULE, 2482 .unlocked_ioctl = sev_ioctl, 2483 }; 2484 2485 int sev_platform_status(struct sev_user_data_status *data, int *error) 2486 { 2487 return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); 2488 } 2489 EXPORT_SYMBOL_GPL(sev_platform_status); 2490 2491 int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) 2492 { 2493 return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); 2494 } 2495 EXPORT_SYMBOL_GPL(sev_guest_deactivate); 2496 2497 int sev_guest_activate(struct sev_data_activate *data, int *error) 2498 { 2499 return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); 2500 } 2501 EXPORT_SYMBOL_GPL(sev_guest_activate); 2502 2503 int sev_guest_decommission(struct sev_data_decommission *data, int *error) 2504 { 2505 return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); 2506 } 2507 EXPORT_SYMBOL_GPL(sev_guest_decommission); 2508 2509 int sev_guest_df_flush(int *error) 2510 { 2511 return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); 2512 } 2513 EXPORT_SYMBOL_GPL(sev_guest_df_flush); 2514 2515 static void sev_exit(struct kref *ref) 2516 { 2517 misc_deregister(&misc_dev->misc); 2518 kfree(misc_dev); 2519 misc_dev = NULL; 2520 } 2521 2522 static int sev_misc_init(struct sev_device *sev) 2523 { 2524 struct device *dev = sev->dev; 2525 int ret; 2526 2527 /* 2528 * SEV feature support can be detected on multiple devices but the SEV 2529 * FW commands must be issued on the master. During probe, we do not 2530 * know the master hence we create /dev/sev on the first device probe. 2531 * sev_do_cmd() finds the right master device to which to issue the 2532 * command to the firmware. 2533 */ 2534 if (!misc_dev) { 2535 struct miscdevice *misc; 2536 2537 misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL); 2538 if (!misc_dev) 2539 return -ENOMEM; 2540 2541 misc = &misc_dev->misc; 2542 misc->minor = MISC_DYNAMIC_MINOR; 2543 misc->name = DEVICE_NAME; 2544 misc->fops = &sev_fops; 2545 2546 ret = misc_register(misc); 2547 if (ret) 2548 return ret; 2549 2550 kref_init(&misc_dev->refcount); 2551 } else { 2552 kref_get(&misc_dev->refcount); 2553 } 2554 2555 init_waitqueue_head(&sev->int_queue); 2556 sev->misc = misc_dev; 2557 dev_dbg(dev, "registered SEV device\n"); 2558 2559 return 0; 2560 } 2561 2562 int sev_dev_init(struct psp_device *psp) 2563 { 2564 struct device *dev = psp->dev; 2565 struct sev_device *sev; 2566 int ret = -ENOMEM; 2567 2568 if (!boot_cpu_has(X86_FEATURE_SEV)) { 2569 dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); 2570 return 0; 2571 } 2572 2573 sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); 2574 if (!sev) 2575 goto e_err; 2576 2577 sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 1); 2578 if (!sev->cmd_buf) 2579 goto e_sev; 2580 2581 sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE; 2582 2583 psp->sev_data = sev; 2584 2585 sev->dev = dev; 2586 sev->psp = psp; 2587 2588 sev->io_regs = psp->io_regs; 2589 2590 sev->vdata = (struct sev_vdata *)psp->vdata->sev; 2591 if (!sev->vdata) { 2592 ret = -ENODEV; 2593 dev_err(dev, "sev: missing driver data\n"); 2594 goto e_buf; 2595 } 2596 2597 psp_set_sev_irq_handler(psp, sev_irq_handler, sev); 2598 2599 ret = sev_misc_init(sev); 2600 if (ret) 2601 goto e_irq; 2602 2603 dev_notice(dev, "sev enabled\n"); 2604 2605 return 0; 2606 2607 e_irq: 2608 psp_clear_sev_irq_handler(psp); 2609 e_buf: 2610 devm_free_pages(dev, (unsigned long)sev->cmd_buf); 2611 e_sev: 2612 devm_kfree(dev, sev); 2613 e_err: 2614 psp->sev_data = NULL; 2615 2616 dev_notice(dev, "sev initialization failed\n"); 2617 2618 return ret; 2619 } 2620 2621 static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) 2622 { 2623 int error; 2624 2625 __sev_platform_shutdown_locked(&error); 2626 2627 if (sev_es_tmr) { 2628 /* 2629 * The TMR area was encrypted, flush it from the cache. 2630 * 2631 * If invoked during panic handling, local interrupts are 2632 * disabled and all CPUs are stopped, so wbinvd_on_all_cpus() 2633 * can't be used. In that case, wbinvd() is done on remote CPUs 2634 * via the NMI callback, and done for this CPU later during 2635 * SNP shutdown, so wbinvd_on_all_cpus() can be skipped. 2636 */ 2637 if (!panic) 2638 wbinvd_on_all_cpus(); 2639 2640 __snp_free_firmware_pages(virt_to_page(sev_es_tmr), 2641 get_order(sev_es_tmr_size), 2642 true); 2643 sev_es_tmr = NULL; 2644 } 2645 2646 if (sev_init_ex_buffer) { 2647 __snp_free_firmware_pages(virt_to_page(sev_init_ex_buffer), 2648 get_order(NV_LENGTH), 2649 true); 2650 sev_init_ex_buffer = NULL; 2651 } 2652 2653 if (snp_range_list) { 2654 kfree(snp_range_list); 2655 snp_range_list = NULL; 2656 } 2657 2658 __sev_snp_shutdown_locked(&error, panic); 2659 } 2660 2661 static void sev_firmware_shutdown(struct sev_device *sev) 2662 { 2663 mutex_lock(&sev_cmd_mutex); 2664 __sev_firmware_shutdown(sev, false); 2665 mutex_unlock(&sev_cmd_mutex); 2666 } 2667 2668 void sev_platform_shutdown(void) 2669 { 2670 if (!psp_master || !psp_master->sev_data) 2671 return; 2672 2673 sev_firmware_shutdown(psp_master->sev_data); 2674 } 2675 EXPORT_SYMBOL_GPL(sev_platform_shutdown); 2676 2677 void sev_dev_destroy(struct psp_device *psp) 2678 { 2679 struct sev_device *sev = psp->sev_data; 2680 2681 if (!sev) 2682 return; 2683 2684 sev_firmware_shutdown(sev); 2685 2686 if (sev->misc) 2687 kref_put(&misc_dev->refcount, sev_exit); 2688 2689 psp_clear_sev_irq_handler(psp); 2690 } 2691 2692 static int snp_shutdown_on_panic(struct notifier_block *nb, 2693 unsigned long reason, void *arg) 2694 { 2695 struct sev_device *sev = psp_master->sev_data; 2696 2697 /* 2698 * If sev_cmd_mutex is already acquired, then it's likely 2699 * another PSP command is in flight and issuing a shutdown 2700 * would fail in unexpected ways. Rather than create even 2701 * more confusion during a panic, just bail out here. 2702 */ 2703 if (mutex_is_locked(&sev_cmd_mutex)) 2704 return NOTIFY_DONE; 2705 2706 __sev_firmware_shutdown(sev, true); 2707 2708 return NOTIFY_DONE; 2709 } 2710 2711 int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, 2712 void *data, int *error) 2713 { 2714 if (!filep || filep->f_op != &sev_fops) 2715 return -EBADF; 2716 2717 return sev_do_cmd(cmd, data, error); 2718 } 2719 EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); 2720 2721 void sev_pci_init(void) 2722 { 2723 struct sev_device *sev = psp_master->sev_data; 2724 u8 api_major, api_minor, build; 2725 2726 if (!sev) 2727 return; 2728 2729 psp_timeout = psp_probe_timeout; 2730 2731 if (sev_get_api_version()) 2732 goto err; 2733 2734 api_major = sev->api_major; 2735 api_minor = sev->api_minor; 2736 build = sev->build; 2737 2738 if (sev_update_firmware(sev->dev) == 0) 2739 sev_get_api_version(); 2740 2741 if (api_major != sev->api_major || api_minor != sev->api_minor || 2742 build != sev->build) 2743 dev_info(sev->dev, "SEV firmware updated from %d.%d.%d to %d.%d.%d\n", 2744 api_major, api_minor, build, 2745 sev->api_major, sev->api_minor, sev->build); 2746 2747 return; 2748 2749 err: 2750 sev_dev_destroy(psp_master); 2751 2752 psp_master->sev_data = NULL; 2753 } 2754 2755 void sev_pci_exit(void) 2756 { 2757 struct sev_device *sev = psp_master->sev_data; 2758 2759 if (!sev) 2760 return; 2761 2762 sev_firmware_shutdown(sev); 2763 } 2764