1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure Encrypted Virtualization (SEV) interface 4 * 5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Brijesh Singh <brijesh.singh@amd.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/kthread.h> 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/spinlock.h> 17 #include <linux/spinlock_types.h> 18 #include <linux/types.h> 19 #include <linux/mutex.h> 20 #include <linux/delay.h> 21 #include <linux/hw_random.h> 22 #include <linux/ccp.h> 23 #include <linux/firmware.h> 24 #include <linux/panic_notifier.h> 25 #include <linux/gfp.h> 26 #include <linux/cpufeature.h> 27 #include <linux/fs.h> 28 #include <linux/fs_struct.h> 29 #include <linux/psp.h> 30 #include <linux/amd-iommu.h> 31 32 #include <asm/smp.h> 33 #include <asm/cacheflush.h> 34 #include <asm/e820/types.h> 35 #include <asm/sev.h> 36 #include <asm/msr.h> 37 38 #include "psp-dev.h" 39 #include "sev-dev.h" 40 41 #define DEVICE_NAME "sev" 42 #define SEV_FW_FILE "amd/sev.fw" 43 #define SEV_FW_NAME_SIZE 64 44 45 /* Minimum firmware version required for the SEV-SNP support */ 46 #define SNP_MIN_API_MAJOR 1 47 #define SNP_MIN_API_MINOR 51 48 49 /* 50 * Maximum number of firmware-writable buffers that might be specified 51 * in the parameters of a legacy SEV command buffer. 52 */ 53 #define CMD_BUF_FW_WRITABLE_MAX 2 54 55 /* Leave room in the descriptor array for an end-of-list indicator. */ 56 #define CMD_BUF_DESC_MAX (CMD_BUF_FW_WRITABLE_MAX + 1) 57 58 static DEFINE_MUTEX(sev_cmd_mutex); 59 static struct sev_misc_dev *misc_dev; 60 61 static int psp_cmd_timeout = 100; 62 module_param(psp_cmd_timeout, int, 0644); 63 MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); 64 65 static int psp_probe_timeout = 5; 66 module_param(psp_probe_timeout, int, 0644); 67 MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); 68 69 static char *init_ex_path; 70 module_param(init_ex_path, charp, 0444); 71 MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX"); 72 73 static bool psp_init_on_probe = true; 74 module_param(psp_init_on_probe, bool, 0444); 75 MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it"); 76 77 MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */ 78 MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */ 79 MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */ 80 MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ 81 82 static bool psp_dead; 83 static int psp_timeout; 84 85 /* Trusted Memory Region (TMR): 86 * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator 87 * to allocate the memory, which will return aligned memory for the specified 88 * allocation order. 89 * 90 * When SEV-SNP is enabled the TMR needs to be 2MB aligned and 2MB sized. 91 */ 92 #define SEV_TMR_SIZE (1024 * 1024) 93 #define SNP_TMR_SIZE (2 * 1024 * 1024) 94 95 static void *sev_es_tmr; 96 static size_t sev_es_tmr_size = SEV_TMR_SIZE; 97 98 /* INIT_EX NV Storage: 99 * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page 100 * allocator to allocate the memory, which will return aligned memory for the 101 * specified allocation order. 102 */ 103 #define NV_LENGTH (32 * 1024) 104 static void *sev_init_ex_buffer; 105 106 /* 107 * SEV_DATA_RANGE_LIST: 108 * Array containing range of pages that firmware transitions to HV-fixed 109 * page state. 110 */ 111 static struct sev_data_range_list *snp_range_list; 112 113 static void __sev_firmware_shutdown(struct sev_device *sev, bool panic); 114 115 static int snp_shutdown_on_panic(struct notifier_block *nb, 116 unsigned long reason, void *arg); 117 118 static struct notifier_block snp_panic_notifier = { 119 .notifier_call = snp_shutdown_on_panic, 120 }; 121 122 static inline bool sev_version_greater_or_equal(u8 maj, u8 min) 123 { 124 struct sev_device *sev = psp_master->sev_data; 125 126 if (sev->api_major > maj) 127 return true; 128 129 if (sev->api_major == maj && sev->api_minor >= min) 130 return true; 131 132 return false; 133 } 134 135 static void sev_irq_handler(int irq, void *data, unsigned int status) 136 { 137 struct sev_device *sev = data; 138 int reg; 139 140 /* Check if it is command completion: */ 141 if (!(status & SEV_CMD_COMPLETE)) 142 return; 143 144 /* Check if it is SEV command completion: */ 145 reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 146 if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { 147 sev->int_rcvd = 1; 148 wake_up(&sev->int_queue); 149 } 150 } 151 152 static int sev_wait_cmd_ioc(struct sev_device *sev, 153 unsigned int *reg, unsigned int timeout) 154 { 155 int ret; 156 157 /* 158 * If invoked during panic handling, local interrupts are disabled, 159 * so the PSP command completion interrupt can't be used. Poll for 160 * PSP command completion instead. 161 */ 162 if (irqs_disabled()) { 163 unsigned long timeout_usecs = (timeout * USEC_PER_SEC) / 10; 164 165 /* Poll for SEV command completion: */ 166 while (timeout_usecs--) { 167 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 168 if (*reg & PSP_CMDRESP_RESP) 169 return 0; 170 171 udelay(10); 172 } 173 return -ETIMEDOUT; 174 } 175 176 ret = wait_event_timeout(sev->int_queue, 177 sev->int_rcvd, timeout * HZ); 178 if (!ret) 179 return -ETIMEDOUT; 180 181 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 182 183 return 0; 184 } 185 186 static int sev_cmd_buffer_len(int cmd) 187 { 188 switch (cmd) { 189 case SEV_CMD_INIT: return sizeof(struct sev_data_init); 190 case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); 191 case SEV_CMD_SNP_SHUTDOWN_EX: return sizeof(struct sev_data_snp_shutdown_ex); 192 case SEV_CMD_SNP_INIT_EX: return sizeof(struct sev_data_snp_init_ex); 193 case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); 194 case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); 195 case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); 196 case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); 197 case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); 198 case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); 199 case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); 200 case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); 201 case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); 202 case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); 203 case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); 204 case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); 205 case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); 206 case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); 207 case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); 208 case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); 209 case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); 210 case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); 211 case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); 212 case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); 213 case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); 214 case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); 215 case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); 216 case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); 217 case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); 218 case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); 219 case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); 220 case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); 221 case SEV_CMD_SNP_GCTX_CREATE: return sizeof(struct sev_data_snp_addr); 222 case SEV_CMD_SNP_LAUNCH_START: return sizeof(struct sev_data_snp_launch_start); 223 case SEV_CMD_SNP_LAUNCH_UPDATE: return sizeof(struct sev_data_snp_launch_update); 224 case SEV_CMD_SNP_ACTIVATE: return sizeof(struct sev_data_snp_activate); 225 case SEV_CMD_SNP_DECOMMISSION: return sizeof(struct sev_data_snp_addr); 226 case SEV_CMD_SNP_PAGE_RECLAIM: return sizeof(struct sev_data_snp_page_reclaim); 227 case SEV_CMD_SNP_GUEST_STATUS: return sizeof(struct sev_data_snp_guest_status); 228 case SEV_CMD_SNP_LAUNCH_FINISH: return sizeof(struct sev_data_snp_launch_finish); 229 case SEV_CMD_SNP_DBG_DECRYPT: return sizeof(struct sev_data_snp_dbg); 230 case SEV_CMD_SNP_DBG_ENCRYPT: return sizeof(struct sev_data_snp_dbg); 231 case SEV_CMD_SNP_PAGE_UNSMASH: return sizeof(struct sev_data_snp_page_unsmash); 232 case SEV_CMD_SNP_PLATFORM_STATUS: return sizeof(struct sev_data_snp_addr); 233 case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request); 234 case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config); 235 case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit); 236 case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info); 237 case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load); 238 default: return 0; 239 } 240 241 return 0; 242 } 243 244 static struct file *open_file_as_root(const char *filename, int flags, umode_t mode) 245 { 246 struct file *fp; 247 struct path root; 248 struct cred *cred; 249 const struct cred *old_cred; 250 251 task_lock(&init_task); 252 get_fs_root(init_task.fs, &root); 253 task_unlock(&init_task); 254 255 cred = prepare_creds(); 256 if (!cred) 257 return ERR_PTR(-ENOMEM); 258 cred->fsuid = GLOBAL_ROOT_UID; 259 old_cred = override_creds(cred); 260 261 fp = file_open_root(&root, filename, flags, mode); 262 path_put(&root); 263 264 put_cred(revert_creds(old_cred)); 265 266 return fp; 267 } 268 269 static int sev_read_init_ex_file(void) 270 { 271 struct sev_device *sev = psp_master->sev_data; 272 struct file *fp; 273 ssize_t nread; 274 275 lockdep_assert_held(&sev_cmd_mutex); 276 277 if (!sev_init_ex_buffer) 278 return -EOPNOTSUPP; 279 280 fp = open_file_as_root(init_ex_path, O_RDONLY, 0); 281 if (IS_ERR(fp)) { 282 int ret = PTR_ERR(fp); 283 284 if (ret == -ENOENT) { 285 dev_info(sev->dev, 286 "SEV: %s does not exist and will be created later.\n", 287 init_ex_path); 288 ret = 0; 289 } else { 290 dev_err(sev->dev, 291 "SEV: could not open %s for read, error %d\n", 292 init_ex_path, ret); 293 } 294 return ret; 295 } 296 297 nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL); 298 if (nread != NV_LENGTH) { 299 dev_info(sev->dev, 300 "SEV: could not read %u bytes to non volatile memory area, ret %ld\n", 301 NV_LENGTH, nread); 302 } 303 304 dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread); 305 filp_close(fp, NULL); 306 307 return 0; 308 } 309 310 static int sev_write_init_ex_file(void) 311 { 312 struct sev_device *sev = psp_master->sev_data; 313 struct file *fp; 314 loff_t offset = 0; 315 ssize_t nwrite; 316 317 lockdep_assert_held(&sev_cmd_mutex); 318 319 if (!sev_init_ex_buffer) 320 return 0; 321 322 fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600); 323 if (IS_ERR(fp)) { 324 int ret = PTR_ERR(fp); 325 326 dev_err(sev->dev, 327 "SEV: could not open file for write, error %d\n", 328 ret); 329 return ret; 330 } 331 332 nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset); 333 vfs_fsync(fp, 0); 334 filp_close(fp, NULL); 335 336 if (nwrite != NV_LENGTH) { 337 dev_err(sev->dev, 338 "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n", 339 NV_LENGTH, nwrite); 340 return -EIO; 341 } 342 343 dev_dbg(sev->dev, "SEV: write successful to NV file\n"); 344 345 return 0; 346 } 347 348 static int sev_write_init_ex_file_if_required(int cmd_id) 349 { 350 lockdep_assert_held(&sev_cmd_mutex); 351 352 if (!sev_init_ex_buffer) 353 return 0; 354 355 /* 356 * Only a few platform commands modify the SPI/NV area, but none of the 357 * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN, 358 * PEK_CERT_IMPORT, and PDH_GEN do. 359 */ 360 switch (cmd_id) { 361 case SEV_CMD_FACTORY_RESET: 362 case SEV_CMD_INIT_EX: 363 case SEV_CMD_PDH_GEN: 364 case SEV_CMD_PEK_CERT_IMPORT: 365 case SEV_CMD_PEK_GEN: 366 break; 367 default: 368 return 0; 369 } 370 371 return sev_write_init_ex_file(); 372 } 373 374 /* 375 * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked() 376 * needs snp_reclaim_pages(), so a forward declaration is needed. 377 */ 378 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret); 379 380 static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked) 381 { 382 int ret, err, i; 383 384 paddr = __sme_clr(ALIGN_DOWN(paddr, PAGE_SIZE)); 385 386 for (i = 0; i < npages; i++, paddr += PAGE_SIZE) { 387 struct sev_data_snp_page_reclaim data = {0}; 388 389 data.paddr = paddr; 390 391 if (locked) 392 ret = __sev_do_cmd_locked(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err); 393 else 394 ret = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err); 395 396 if (ret) 397 goto cleanup; 398 399 ret = rmp_make_shared(__phys_to_pfn(paddr), PG_LEVEL_4K); 400 if (ret) 401 goto cleanup; 402 } 403 404 return 0; 405 406 cleanup: 407 /* 408 * If there was a failure reclaiming the page then it is no longer safe 409 * to release it back to the system; leak it instead. 410 */ 411 snp_leak_pages(__phys_to_pfn(paddr), npages - i); 412 return ret; 413 } 414 415 static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked) 416 { 417 unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT; 418 int rc, i; 419 420 for (i = 0; i < npages; i++, pfn++) { 421 rc = rmp_make_private(pfn, 0, PG_LEVEL_4K, 0, true); 422 if (rc) 423 goto cleanup; 424 } 425 426 return 0; 427 428 cleanup: 429 /* 430 * Try unrolling the firmware state changes by 431 * reclaiming the pages which were already changed to the 432 * firmware state. 433 */ 434 snp_reclaim_pages(paddr, i, locked); 435 436 return rc; 437 } 438 439 static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked) 440 { 441 unsigned long npages = 1ul << order, paddr; 442 struct sev_device *sev; 443 struct page *page; 444 445 if (!psp_master || !psp_master->sev_data) 446 return NULL; 447 448 page = alloc_pages(gfp_mask, order); 449 if (!page) 450 return NULL; 451 452 /* If SEV-SNP is initialized then add the page in RMP table. */ 453 sev = psp_master->sev_data; 454 if (!sev->snp_initialized) 455 return page; 456 457 paddr = __pa((unsigned long)page_address(page)); 458 if (rmp_mark_pages_firmware(paddr, npages, locked)) 459 return NULL; 460 461 return page; 462 } 463 464 void *snp_alloc_firmware_page(gfp_t gfp_mask) 465 { 466 struct page *page; 467 468 page = __snp_alloc_firmware_pages(gfp_mask, 0, false); 469 470 return page ? page_address(page) : NULL; 471 } 472 EXPORT_SYMBOL_GPL(snp_alloc_firmware_page); 473 474 static void __snp_free_firmware_pages(struct page *page, int order, bool locked) 475 { 476 struct sev_device *sev = psp_master->sev_data; 477 unsigned long paddr, npages = 1ul << order; 478 479 if (!page) 480 return; 481 482 paddr = __pa((unsigned long)page_address(page)); 483 if (sev->snp_initialized && 484 snp_reclaim_pages(paddr, npages, locked)) 485 return; 486 487 __free_pages(page, order); 488 } 489 490 void snp_free_firmware_page(void *addr) 491 { 492 if (!addr) 493 return; 494 495 __snp_free_firmware_pages(virt_to_page(addr), 0, false); 496 } 497 EXPORT_SYMBOL_GPL(snp_free_firmware_page); 498 499 static void *sev_fw_alloc(unsigned long len) 500 { 501 struct page *page; 502 503 page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true); 504 if (!page) 505 return NULL; 506 507 return page_address(page); 508 } 509 510 /** 511 * struct cmd_buf_desc - descriptors for managing legacy SEV command address 512 * parameters corresponding to buffers that may be written to by firmware. 513 * 514 * @paddr_ptr: pointer to the address parameter in the command buffer which may 515 * need to be saved/restored depending on whether a bounce buffer 516 * is used. In the case of a bounce buffer, the command buffer 517 * needs to be updated with the address of the new bounce buffer 518 * snp_map_cmd_buf_desc() has allocated specifically for it. Must 519 * be NULL if this descriptor is only an end-of-list indicator. 520 * 521 * @paddr_orig: storage for the original address parameter, which can be used to 522 * restore the original value in @paddr_ptr in cases where it is 523 * replaced with the address of a bounce buffer. 524 * 525 * @len: length of buffer located at the address originally stored at @paddr_ptr 526 * 527 * @guest_owned: true if the address corresponds to guest-owned pages, in which 528 * case bounce buffers are not needed. 529 */ 530 struct cmd_buf_desc { 531 u64 *paddr_ptr; 532 u64 paddr_orig; 533 u32 len; 534 bool guest_owned; 535 }; 536 537 /* 538 * If a legacy SEV command parameter is a memory address, those pages in 539 * turn need to be transitioned to/from firmware-owned before/after 540 * executing the firmware command. 541 * 542 * Additionally, in cases where those pages are not guest-owned, a bounce 543 * buffer is needed in place of the original memory address parameter. 544 * 545 * A set of descriptors are used to keep track of this handling, and 546 * initialized here based on the specific commands being executed. 547 */ 548 static void snp_populate_cmd_buf_desc_list(int cmd, void *cmd_buf, 549 struct cmd_buf_desc *desc_list) 550 { 551 switch (cmd) { 552 case SEV_CMD_PDH_CERT_EXPORT: { 553 struct sev_data_pdh_cert_export *data = cmd_buf; 554 555 desc_list[0].paddr_ptr = &data->pdh_cert_address; 556 desc_list[0].len = data->pdh_cert_len; 557 desc_list[1].paddr_ptr = &data->cert_chain_address; 558 desc_list[1].len = data->cert_chain_len; 559 break; 560 } 561 case SEV_CMD_GET_ID: { 562 struct sev_data_get_id *data = cmd_buf; 563 564 desc_list[0].paddr_ptr = &data->address; 565 desc_list[0].len = data->len; 566 break; 567 } 568 case SEV_CMD_PEK_CSR: { 569 struct sev_data_pek_csr *data = cmd_buf; 570 571 desc_list[0].paddr_ptr = &data->address; 572 desc_list[0].len = data->len; 573 break; 574 } 575 case SEV_CMD_LAUNCH_UPDATE_DATA: { 576 struct sev_data_launch_update_data *data = cmd_buf; 577 578 desc_list[0].paddr_ptr = &data->address; 579 desc_list[0].len = data->len; 580 desc_list[0].guest_owned = true; 581 break; 582 } 583 case SEV_CMD_LAUNCH_UPDATE_VMSA: { 584 struct sev_data_launch_update_vmsa *data = cmd_buf; 585 586 desc_list[0].paddr_ptr = &data->address; 587 desc_list[0].len = data->len; 588 desc_list[0].guest_owned = true; 589 break; 590 } 591 case SEV_CMD_LAUNCH_MEASURE: { 592 struct sev_data_launch_measure *data = cmd_buf; 593 594 desc_list[0].paddr_ptr = &data->address; 595 desc_list[0].len = data->len; 596 break; 597 } 598 case SEV_CMD_LAUNCH_UPDATE_SECRET: { 599 struct sev_data_launch_secret *data = cmd_buf; 600 601 desc_list[0].paddr_ptr = &data->guest_address; 602 desc_list[0].len = data->guest_len; 603 desc_list[0].guest_owned = true; 604 break; 605 } 606 case SEV_CMD_DBG_DECRYPT: { 607 struct sev_data_dbg *data = cmd_buf; 608 609 desc_list[0].paddr_ptr = &data->dst_addr; 610 desc_list[0].len = data->len; 611 desc_list[0].guest_owned = true; 612 break; 613 } 614 case SEV_CMD_DBG_ENCRYPT: { 615 struct sev_data_dbg *data = cmd_buf; 616 617 desc_list[0].paddr_ptr = &data->dst_addr; 618 desc_list[0].len = data->len; 619 desc_list[0].guest_owned = true; 620 break; 621 } 622 case SEV_CMD_ATTESTATION_REPORT: { 623 struct sev_data_attestation_report *data = cmd_buf; 624 625 desc_list[0].paddr_ptr = &data->address; 626 desc_list[0].len = data->len; 627 break; 628 } 629 case SEV_CMD_SEND_START: { 630 struct sev_data_send_start *data = cmd_buf; 631 632 desc_list[0].paddr_ptr = &data->session_address; 633 desc_list[0].len = data->session_len; 634 break; 635 } 636 case SEV_CMD_SEND_UPDATE_DATA: { 637 struct sev_data_send_update_data *data = cmd_buf; 638 639 desc_list[0].paddr_ptr = &data->hdr_address; 640 desc_list[0].len = data->hdr_len; 641 desc_list[1].paddr_ptr = &data->trans_address; 642 desc_list[1].len = data->trans_len; 643 break; 644 } 645 case SEV_CMD_SEND_UPDATE_VMSA: { 646 struct sev_data_send_update_vmsa *data = cmd_buf; 647 648 desc_list[0].paddr_ptr = &data->hdr_address; 649 desc_list[0].len = data->hdr_len; 650 desc_list[1].paddr_ptr = &data->trans_address; 651 desc_list[1].len = data->trans_len; 652 break; 653 } 654 case SEV_CMD_RECEIVE_UPDATE_DATA: { 655 struct sev_data_receive_update_data *data = cmd_buf; 656 657 desc_list[0].paddr_ptr = &data->guest_address; 658 desc_list[0].len = data->guest_len; 659 desc_list[0].guest_owned = true; 660 break; 661 } 662 case SEV_CMD_RECEIVE_UPDATE_VMSA: { 663 struct sev_data_receive_update_vmsa *data = cmd_buf; 664 665 desc_list[0].paddr_ptr = &data->guest_address; 666 desc_list[0].len = data->guest_len; 667 desc_list[0].guest_owned = true; 668 break; 669 } 670 default: 671 break; 672 } 673 } 674 675 static int snp_map_cmd_buf_desc(struct cmd_buf_desc *desc) 676 { 677 unsigned int npages; 678 679 if (!desc->len) 680 return 0; 681 682 /* Allocate a bounce buffer if this isn't a guest owned page. */ 683 if (!desc->guest_owned) { 684 struct page *page; 685 686 page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(desc->len)); 687 if (!page) { 688 pr_warn("Failed to allocate bounce buffer for SEV legacy command.\n"); 689 return -ENOMEM; 690 } 691 692 desc->paddr_orig = *desc->paddr_ptr; 693 *desc->paddr_ptr = __psp_pa(page_to_virt(page)); 694 } 695 696 npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT; 697 698 /* Transition the buffer to firmware-owned. */ 699 if (rmp_mark_pages_firmware(*desc->paddr_ptr, npages, true)) { 700 pr_warn("Error moving pages to firmware-owned state for SEV legacy command.\n"); 701 return -EFAULT; 702 } 703 704 return 0; 705 } 706 707 static int snp_unmap_cmd_buf_desc(struct cmd_buf_desc *desc) 708 { 709 unsigned int npages; 710 711 if (!desc->len) 712 return 0; 713 714 npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT; 715 716 /* Transition the buffers back to hypervisor-owned. */ 717 if (snp_reclaim_pages(*desc->paddr_ptr, npages, true)) { 718 pr_warn("Failed to reclaim firmware-owned pages while issuing SEV legacy command.\n"); 719 return -EFAULT; 720 } 721 722 /* Copy data from bounce buffer and then free it. */ 723 if (!desc->guest_owned) { 724 void *bounce_buf = __va(__sme_clr(*desc->paddr_ptr)); 725 void *dst_buf = __va(__sme_clr(desc->paddr_orig)); 726 727 memcpy(dst_buf, bounce_buf, desc->len); 728 __free_pages(virt_to_page(bounce_buf), get_order(desc->len)); 729 730 /* Restore the original address in the command buffer. */ 731 *desc->paddr_ptr = desc->paddr_orig; 732 } 733 734 return 0; 735 } 736 737 static int snp_map_cmd_buf_desc_list(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list) 738 { 739 int i; 740 741 snp_populate_cmd_buf_desc_list(cmd, cmd_buf, desc_list); 742 743 for (i = 0; i < CMD_BUF_DESC_MAX; i++) { 744 struct cmd_buf_desc *desc = &desc_list[i]; 745 746 if (!desc->paddr_ptr) 747 break; 748 749 if (snp_map_cmd_buf_desc(desc)) 750 goto err_unmap; 751 } 752 753 return 0; 754 755 err_unmap: 756 for (i--; i >= 0; i--) 757 snp_unmap_cmd_buf_desc(&desc_list[i]); 758 759 return -EFAULT; 760 } 761 762 static int snp_unmap_cmd_buf_desc_list(struct cmd_buf_desc *desc_list) 763 { 764 int i, ret = 0; 765 766 for (i = 0; i < CMD_BUF_DESC_MAX; i++) { 767 struct cmd_buf_desc *desc = &desc_list[i]; 768 769 if (!desc->paddr_ptr) 770 break; 771 772 if (snp_unmap_cmd_buf_desc(&desc_list[i])) 773 ret = -EFAULT; 774 } 775 776 return ret; 777 } 778 779 static bool sev_cmd_buf_writable(int cmd) 780 { 781 switch (cmd) { 782 case SEV_CMD_PLATFORM_STATUS: 783 case SEV_CMD_GUEST_STATUS: 784 case SEV_CMD_LAUNCH_START: 785 case SEV_CMD_RECEIVE_START: 786 case SEV_CMD_LAUNCH_MEASURE: 787 case SEV_CMD_SEND_START: 788 case SEV_CMD_SEND_UPDATE_DATA: 789 case SEV_CMD_SEND_UPDATE_VMSA: 790 case SEV_CMD_PEK_CSR: 791 case SEV_CMD_PDH_CERT_EXPORT: 792 case SEV_CMD_GET_ID: 793 case SEV_CMD_ATTESTATION_REPORT: 794 return true; 795 default: 796 return false; 797 } 798 } 799 800 /* After SNP is INIT'ed, the behavior of legacy SEV commands is changed. */ 801 static bool snp_legacy_handling_needed(int cmd) 802 { 803 struct sev_device *sev = psp_master->sev_data; 804 805 return cmd < SEV_CMD_SNP_INIT && sev->snp_initialized; 806 } 807 808 static int snp_prep_cmd_buf(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list) 809 { 810 if (!snp_legacy_handling_needed(cmd)) 811 return 0; 812 813 if (snp_map_cmd_buf_desc_list(cmd, cmd_buf, desc_list)) 814 return -EFAULT; 815 816 /* 817 * Before command execution, the command buffer needs to be put into 818 * the firmware-owned state. 819 */ 820 if (sev_cmd_buf_writable(cmd)) { 821 if (rmp_mark_pages_firmware(__pa(cmd_buf), 1, true)) 822 return -EFAULT; 823 } 824 825 return 0; 826 } 827 828 static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf) 829 { 830 if (!snp_legacy_handling_needed(cmd)) 831 return 0; 832 833 /* 834 * After command completion, the command buffer needs to be put back 835 * into the hypervisor-owned state. 836 */ 837 if (sev_cmd_buf_writable(cmd)) 838 if (snp_reclaim_pages(__pa(cmd_buf), 1, true)) 839 return -EFAULT; 840 841 return 0; 842 } 843 844 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) 845 { 846 struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0}; 847 struct psp_device *psp = psp_master; 848 struct sev_device *sev; 849 unsigned int cmdbuff_hi, cmdbuff_lo; 850 unsigned int phys_lsb, phys_msb; 851 unsigned int reg, ret = 0; 852 void *cmd_buf; 853 int buf_len; 854 855 if (!psp || !psp->sev_data) 856 return -ENODEV; 857 858 if (psp_dead) 859 return -EBUSY; 860 861 sev = psp->sev_data; 862 863 buf_len = sev_cmd_buffer_len(cmd); 864 if (WARN_ON_ONCE(!data != !buf_len)) 865 return -EINVAL; 866 867 /* 868 * Copy the incoming data to driver's scratch buffer as __pa() will not 869 * work for some memory, e.g. vmalloc'd addresses, and @data may not be 870 * physically contiguous. 871 */ 872 if (data) { 873 /* 874 * Commands are generally issued one at a time and require the 875 * sev_cmd_mutex, but there could be recursive firmware requests 876 * due to SEV_CMD_SNP_PAGE_RECLAIM needing to be issued while 877 * preparing buffers for another command. This is the only known 878 * case of nesting in the current code, so exactly one 879 * additional command buffer is available for that purpose. 880 */ 881 if (!sev->cmd_buf_active) { 882 cmd_buf = sev->cmd_buf; 883 sev->cmd_buf_active = true; 884 } else if (!sev->cmd_buf_backup_active) { 885 cmd_buf = sev->cmd_buf_backup; 886 sev->cmd_buf_backup_active = true; 887 } else { 888 dev_err(sev->dev, 889 "SEV: too many firmware commands in progress, no command buffers available.\n"); 890 return -EBUSY; 891 } 892 893 memcpy(cmd_buf, data, buf_len); 894 895 /* 896 * The behavior of the SEV-legacy commands is altered when the 897 * SNP firmware is in the INIT state. 898 */ 899 ret = snp_prep_cmd_buf(cmd, cmd_buf, desc_list); 900 if (ret) { 901 dev_err(sev->dev, 902 "SEV: failed to prepare buffer for legacy command 0x%x. Error: %d\n", 903 cmd, ret); 904 return ret; 905 } 906 } else { 907 cmd_buf = sev->cmd_buf; 908 } 909 910 /* Get the physical address of the command buffer */ 911 phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0; 912 phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0; 913 914 dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", 915 cmd, phys_msb, phys_lsb, psp_timeout); 916 917 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, 918 buf_len, false); 919 920 iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); 921 iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); 922 923 sev->int_rcvd = 0; 924 925 reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd); 926 927 /* 928 * If invoked during panic handling, local interrupts are disabled so 929 * the PSP command completion interrupt can't be used. 930 * sev_wait_cmd_ioc() already checks for interrupts disabled and 931 * polls for PSP command completion. Ensure we do not request an 932 * interrupt from the PSP if irqs disabled. 933 */ 934 if (!irqs_disabled()) 935 reg |= SEV_CMDRESP_IOC; 936 937 iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); 938 939 /* wait for command completion */ 940 ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); 941 if (ret) { 942 if (psp_ret) 943 *psp_ret = 0; 944 945 dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); 946 psp_dead = true; 947 948 return ret; 949 } 950 951 psp_timeout = psp_cmd_timeout; 952 953 if (psp_ret) 954 *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); 955 956 if (FIELD_GET(PSP_CMDRESP_STS, reg)) { 957 dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", 958 cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); 959 960 /* 961 * PSP firmware may report additional error information in the 962 * command buffer registers on error. Print contents of command 963 * buffer registers if they changed. 964 */ 965 cmdbuff_hi = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); 966 cmdbuff_lo = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); 967 if (cmdbuff_hi != phys_msb || cmdbuff_lo != phys_lsb) { 968 dev_dbg(sev->dev, "Additional error information reported in cmdbuff:"); 969 dev_dbg(sev->dev, " cmdbuff hi: %#010x\n", cmdbuff_hi); 970 dev_dbg(sev->dev, " cmdbuff lo: %#010x\n", cmdbuff_lo); 971 } 972 ret = -EIO; 973 } else { 974 ret = sev_write_init_ex_file_if_required(cmd); 975 } 976 977 /* 978 * Copy potential output from the PSP back to data. Do this even on 979 * failure in case the caller wants to glean something from the error. 980 */ 981 if (data) { 982 int ret_reclaim; 983 /* 984 * Restore the page state after the command completes. 985 */ 986 ret_reclaim = snp_reclaim_cmd_buf(cmd, cmd_buf); 987 if (ret_reclaim) { 988 dev_err(sev->dev, 989 "SEV: failed to reclaim buffer for legacy command %#x. Error: %d\n", 990 cmd, ret_reclaim); 991 return ret_reclaim; 992 } 993 994 memcpy(data, cmd_buf, buf_len); 995 996 if (sev->cmd_buf_backup_active) 997 sev->cmd_buf_backup_active = false; 998 else 999 sev->cmd_buf_active = false; 1000 1001 if (snp_unmap_cmd_buf_desc_list(desc_list)) 1002 return -EFAULT; 1003 } 1004 1005 print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, 1006 buf_len, false); 1007 1008 return ret; 1009 } 1010 1011 int sev_do_cmd(int cmd, void *data, int *psp_ret) 1012 { 1013 int rc; 1014 1015 mutex_lock(&sev_cmd_mutex); 1016 rc = __sev_do_cmd_locked(cmd, data, psp_ret); 1017 mutex_unlock(&sev_cmd_mutex); 1018 1019 return rc; 1020 } 1021 EXPORT_SYMBOL_GPL(sev_do_cmd); 1022 1023 static int __sev_init_locked(int *error) 1024 { 1025 struct sev_data_init data; 1026 1027 memset(&data, 0, sizeof(data)); 1028 if (sev_es_tmr) { 1029 /* 1030 * Do not include the encryption mask on the physical 1031 * address of the TMR (firmware should clear it anyway). 1032 */ 1033 data.tmr_address = __pa(sev_es_tmr); 1034 1035 data.flags |= SEV_INIT_FLAGS_SEV_ES; 1036 data.tmr_len = sev_es_tmr_size; 1037 } 1038 1039 return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error); 1040 } 1041 1042 static int __sev_init_ex_locked(int *error) 1043 { 1044 struct sev_data_init_ex data; 1045 1046 memset(&data, 0, sizeof(data)); 1047 data.length = sizeof(data); 1048 data.nv_address = __psp_pa(sev_init_ex_buffer); 1049 data.nv_len = NV_LENGTH; 1050 1051 if (sev_es_tmr) { 1052 /* 1053 * Do not include the encryption mask on the physical 1054 * address of the TMR (firmware should clear it anyway). 1055 */ 1056 data.tmr_address = __pa(sev_es_tmr); 1057 1058 data.flags |= SEV_INIT_FLAGS_SEV_ES; 1059 data.tmr_len = sev_es_tmr_size; 1060 } 1061 1062 return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error); 1063 } 1064 1065 static inline int __sev_do_init_locked(int *psp_ret) 1066 { 1067 if (sev_init_ex_buffer) 1068 return __sev_init_ex_locked(psp_ret); 1069 else 1070 return __sev_init_locked(psp_ret); 1071 } 1072 1073 static void snp_set_hsave_pa(void *arg) 1074 { 1075 wrmsrq(MSR_VM_HSAVE_PA, 0); 1076 } 1077 1078 bool sev_is_snp_ciphertext_hiding_supported(void) 1079 { 1080 struct psp_device *psp = psp_master; 1081 struct sev_device *sev; 1082 1083 if (!psp || !psp->sev_data) 1084 return false; 1085 1086 sev = psp->sev_data; 1087 1088 /* 1089 * Feature information indicates if CipherTextHiding feature is 1090 * supported by the SEV firmware and additionally platform status 1091 * indicates if CipherTextHiding feature is enabled in the 1092 * Platform BIOS. 1093 */ 1094 return ((sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED) && 1095 sev->snp_plat_status.ciphertext_hiding_cap); 1096 } 1097 EXPORT_SYMBOL_GPL(sev_is_snp_ciphertext_hiding_supported); 1098 1099 static int snp_get_platform_data(struct sev_device *sev, int *error) 1100 { 1101 struct sev_data_snp_feature_info snp_feat_info; 1102 struct snp_feature_info *feat_info; 1103 struct sev_data_snp_addr buf; 1104 struct page *page; 1105 int rc; 1106 1107 /* 1108 * This function is expected to be called before SNP is 1109 * initialized. 1110 */ 1111 if (sev->snp_initialized) 1112 return -EINVAL; 1113 1114 buf.address = __psp_pa(&sev->snp_plat_status); 1115 rc = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, error); 1116 if (rc) { 1117 dev_err(sev->dev, "SNP PLATFORM_STATUS command failed, ret = %d, error = %#x\n", 1118 rc, *error); 1119 return rc; 1120 } 1121 1122 sev->api_major = sev->snp_plat_status.api_major; 1123 sev->api_minor = sev->snp_plat_status.api_minor; 1124 sev->build = sev->snp_plat_status.build_id; 1125 1126 /* 1127 * Do feature discovery of the currently loaded firmware, 1128 * and cache feature information from CPUID 0x8000_0024, 1129 * sub-function 0. 1130 */ 1131 if (!sev->snp_plat_status.feature_info) 1132 return 0; 1133 1134 /* 1135 * Use dynamically allocated structure for the SNP_FEATURE_INFO 1136 * command to ensure structure is 8-byte aligned, and does not 1137 * cross a page boundary. 1138 */ 1139 page = alloc_page(GFP_KERNEL); 1140 if (!page) 1141 return -ENOMEM; 1142 1143 feat_info = page_address(page); 1144 snp_feat_info.length = sizeof(snp_feat_info); 1145 snp_feat_info.ecx_in = 0; 1146 snp_feat_info.feature_info_paddr = __psp_pa(feat_info); 1147 1148 rc = sev_do_cmd(SEV_CMD_SNP_FEATURE_INFO, &snp_feat_info, error); 1149 if (!rc) 1150 sev->snp_feat_info_0 = *feat_info; 1151 else 1152 dev_err(sev->dev, "SNP FEATURE_INFO command failed, ret = %d, error = %#x\n", 1153 rc, *error); 1154 1155 __free_page(page); 1156 1157 return rc; 1158 } 1159 1160 static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg) 1161 { 1162 struct sev_data_range_list *range_list = arg; 1163 struct sev_data_range *range = &range_list->ranges[range_list->num_elements]; 1164 size_t size; 1165 1166 /* 1167 * Ensure the list of HV_FIXED pages that will be passed to firmware 1168 * do not exceed the page-sized argument buffer. 1169 */ 1170 if ((range_list->num_elements * sizeof(struct sev_data_range) + 1171 sizeof(struct sev_data_range_list)) > PAGE_SIZE) 1172 return -E2BIG; 1173 1174 switch (rs->desc) { 1175 case E820_TYPE_RESERVED: 1176 case E820_TYPE_PMEM: 1177 case E820_TYPE_ACPI: 1178 range->base = rs->start & PAGE_MASK; 1179 size = PAGE_ALIGN((rs->end + 1) - rs->start); 1180 range->page_count = size >> PAGE_SHIFT; 1181 range_list->num_elements++; 1182 break; 1183 default: 1184 break; 1185 } 1186 1187 return 0; 1188 } 1189 1190 static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid) 1191 { 1192 struct psp_device *psp = psp_master; 1193 struct sev_data_snp_init_ex data; 1194 struct sev_device *sev; 1195 void *arg = &data; 1196 int cmd, rc = 0; 1197 1198 if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) 1199 return -ENODEV; 1200 1201 sev = psp->sev_data; 1202 1203 if (sev->snp_initialized) 1204 return 0; 1205 1206 if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) { 1207 dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n", 1208 SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR); 1209 return -EOPNOTSUPP; 1210 } 1211 1212 /* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */ 1213 on_each_cpu(snp_set_hsave_pa, NULL, 1); 1214 1215 /* 1216 * Starting in SNP firmware v1.52, the SNP_INIT_EX command takes a list 1217 * of system physical address ranges to convert into HV-fixed page 1218 * states during the RMP initialization. For instance, the memory that 1219 * UEFI reserves should be included in the that list. This allows system 1220 * components that occasionally write to memory (e.g. logging to UEFI 1221 * reserved regions) to not fail due to RMP initialization and SNP 1222 * enablement. 1223 * 1224 */ 1225 if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) { 1226 /* 1227 * Firmware checks that the pages containing the ranges enumerated 1228 * in the RANGES structure are either in the default page state or in the 1229 * firmware page state. 1230 */ 1231 snp_range_list = kzalloc(PAGE_SIZE, GFP_KERNEL); 1232 if (!snp_range_list) { 1233 dev_err(sev->dev, 1234 "SEV: SNP_INIT_EX range list memory allocation failed\n"); 1235 return -ENOMEM; 1236 } 1237 1238 /* 1239 * Retrieve all reserved memory regions from the e820 memory map 1240 * to be setup as HV-fixed pages. 1241 */ 1242 rc = walk_iomem_res_desc(IORES_DESC_NONE, IORESOURCE_MEM, 0, ~0, 1243 snp_range_list, snp_filter_reserved_mem_regions); 1244 if (rc) { 1245 dev_err(sev->dev, 1246 "SEV: SNP_INIT_EX walk_iomem_res_desc failed rc = %d\n", rc); 1247 return rc; 1248 } 1249 1250 memset(&data, 0, sizeof(data)); 1251 1252 if (max_snp_asid) { 1253 data.ciphertext_hiding_en = 1; 1254 data.max_snp_asid = max_snp_asid; 1255 } 1256 1257 data.init_rmp = 1; 1258 data.list_paddr_en = 1; 1259 data.list_paddr = __psp_pa(snp_range_list); 1260 cmd = SEV_CMD_SNP_INIT_EX; 1261 } else { 1262 cmd = SEV_CMD_SNP_INIT; 1263 arg = NULL; 1264 } 1265 1266 /* 1267 * The following sequence must be issued before launching the first SNP 1268 * guest to ensure all dirty cache lines are flushed, including from 1269 * updates to the RMP table itself via the RMPUPDATE instruction: 1270 * 1271 * - WBINVD on all running CPUs 1272 * - SEV_CMD_SNP_INIT[_EX] firmware command 1273 * - WBINVD on all running CPUs 1274 * - SEV_CMD_SNP_DF_FLUSH firmware command 1275 */ 1276 wbinvd_on_all_cpus(); 1277 1278 rc = __sev_do_cmd_locked(cmd, arg, error); 1279 if (rc) { 1280 dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n", 1281 cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT", 1282 rc, *error); 1283 return rc; 1284 } 1285 1286 /* Prepare for first SNP guest launch after INIT. */ 1287 wbinvd_on_all_cpus(); 1288 rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error); 1289 if (rc) { 1290 dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n", 1291 rc, *error); 1292 return rc; 1293 } 1294 1295 sev->snp_initialized = true; 1296 dev_dbg(sev->dev, "SEV-SNP firmware initialized\n"); 1297 1298 dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major, 1299 sev->api_minor, sev->build); 1300 1301 atomic_notifier_chain_register(&panic_notifier_list, 1302 &snp_panic_notifier); 1303 1304 sev_es_tmr_size = SNP_TMR_SIZE; 1305 1306 return 0; 1307 } 1308 1309 static void __sev_platform_init_handle_tmr(struct sev_device *sev) 1310 { 1311 if (sev_es_tmr) 1312 return; 1313 1314 /* Obtain the TMR memory area for SEV-ES use */ 1315 sev_es_tmr = sev_fw_alloc(sev_es_tmr_size); 1316 if (sev_es_tmr) { 1317 /* Must flush the cache before giving it to the firmware */ 1318 if (!sev->snp_initialized) 1319 clflush_cache_range(sev_es_tmr, sev_es_tmr_size); 1320 } else { 1321 dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n"); 1322 } 1323 } 1324 1325 /* 1326 * If an init_ex_path is provided allocate a buffer for the file and 1327 * read in the contents. Additionally, if SNP is initialized, convert 1328 * the buffer pages to firmware pages. 1329 */ 1330 static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev) 1331 { 1332 struct page *page; 1333 int rc; 1334 1335 if (!init_ex_path) 1336 return 0; 1337 1338 if (sev_init_ex_buffer) 1339 return 0; 1340 1341 page = alloc_pages(GFP_KERNEL, get_order(NV_LENGTH)); 1342 if (!page) { 1343 dev_err(sev->dev, "SEV: INIT_EX NV memory allocation failed\n"); 1344 return -ENOMEM; 1345 } 1346 1347 sev_init_ex_buffer = page_address(page); 1348 1349 rc = sev_read_init_ex_file(); 1350 if (rc) 1351 return rc; 1352 1353 /* If SEV-SNP is initialized, transition to firmware page. */ 1354 if (sev->snp_initialized) { 1355 unsigned long npages; 1356 1357 npages = 1UL << get_order(NV_LENGTH); 1358 if (rmp_mark_pages_firmware(__pa(sev_init_ex_buffer), npages, false)) { 1359 dev_err(sev->dev, "SEV: INIT_EX NV memory page state change failed.\n"); 1360 return -ENOMEM; 1361 } 1362 } 1363 1364 return 0; 1365 } 1366 1367 static int __sev_platform_init_locked(int *error) 1368 { 1369 int rc, psp_ret, dfflush_error; 1370 struct sev_device *sev; 1371 1372 psp_ret = dfflush_error = SEV_RET_NO_FW_CALL; 1373 1374 if (!psp_master || !psp_master->sev_data) 1375 return -ENODEV; 1376 1377 sev = psp_master->sev_data; 1378 1379 if (sev->sev_plat_status.state == SEV_STATE_INIT) 1380 return 0; 1381 1382 __sev_platform_init_handle_tmr(sev); 1383 1384 rc = __sev_platform_init_handle_init_ex_path(sev); 1385 if (rc) 1386 return rc; 1387 1388 rc = __sev_do_init_locked(&psp_ret); 1389 if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { 1390 /* 1391 * Initialization command returned an integrity check failure 1392 * status code, meaning that firmware load and validation of SEV 1393 * related persistent data has failed. Retrying the 1394 * initialization function should succeed by replacing the state 1395 * with a reset state. 1396 */ 1397 dev_err(sev->dev, 1398 "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state."); 1399 rc = __sev_do_init_locked(&psp_ret); 1400 } 1401 1402 if (error) 1403 *error = psp_ret; 1404 1405 if (rc) { 1406 dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n", 1407 sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc); 1408 return rc; 1409 } 1410 1411 sev->sev_plat_status.state = SEV_STATE_INIT; 1412 1413 /* Prepare for first SEV guest launch after INIT */ 1414 wbinvd_on_all_cpus(); 1415 rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, &dfflush_error); 1416 if (rc) { 1417 dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n", 1418 dfflush_error, rc); 1419 return rc; 1420 } 1421 1422 dev_dbg(sev->dev, "SEV firmware initialized\n"); 1423 1424 dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, 1425 sev->api_minor, sev->build); 1426 1427 return 0; 1428 } 1429 1430 static int _sev_platform_init_locked(struct sev_platform_init_args *args) 1431 { 1432 struct sev_device *sev; 1433 int rc; 1434 1435 if (!psp_master || !psp_master->sev_data) 1436 return -ENODEV; 1437 1438 sev = psp_master->sev_data; 1439 1440 if (sev->sev_plat_status.state == SEV_STATE_INIT) 1441 return 0; 1442 1443 rc = __sev_snp_init_locked(&args->error, args->max_snp_asid); 1444 if (rc && rc != -ENODEV) 1445 return rc; 1446 1447 /* Defer legacy SEV/SEV-ES support if allowed by caller/module. */ 1448 if (args->probe && !psp_init_on_probe) 1449 return 0; 1450 1451 return __sev_platform_init_locked(&args->error); 1452 } 1453 1454 int sev_platform_init(struct sev_platform_init_args *args) 1455 { 1456 int rc; 1457 1458 mutex_lock(&sev_cmd_mutex); 1459 rc = _sev_platform_init_locked(args); 1460 mutex_unlock(&sev_cmd_mutex); 1461 1462 return rc; 1463 } 1464 EXPORT_SYMBOL_GPL(sev_platform_init); 1465 1466 static int __sev_platform_shutdown_locked(int *error) 1467 { 1468 struct psp_device *psp = psp_master; 1469 struct sev_device *sev; 1470 int ret; 1471 1472 if (!psp || !psp->sev_data) 1473 return 0; 1474 1475 sev = psp->sev_data; 1476 1477 if (sev->sev_plat_status.state == SEV_STATE_UNINIT) 1478 return 0; 1479 1480 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); 1481 if (ret) { 1482 dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n", 1483 *error, ret); 1484 return ret; 1485 } 1486 1487 sev->sev_plat_status.state = SEV_STATE_UNINIT; 1488 dev_dbg(sev->dev, "SEV firmware shutdown\n"); 1489 1490 return ret; 1491 } 1492 1493 static int sev_get_platform_state(int *state, int *error) 1494 { 1495 struct sev_user_data_status data; 1496 int rc; 1497 1498 rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error); 1499 if (rc) 1500 return rc; 1501 1502 *state = data.state; 1503 return rc; 1504 } 1505 1506 static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required) 1507 { 1508 struct sev_platform_init_args init_args = {0}; 1509 int rc; 1510 1511 rc = _sev_platform_init_locked(&init_args); 1512 if (rc) { 1513 argp->error = SEV_RET_INVALID_PLATFORM_STATE; 1514 return rc; 1515 } 1516 1517 *shutdown_required = true; 1518 1519 return 0; 1520 } 1521 1522 static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required) 1523 { 1524 int error, rc; 1525 1526 rc = __sev_snp_init_locked(&error, 0); 1527 if (rc) { 1528 argp->error = SEV_RET_INVALID_PLATFORM_STATE; 1529 return rc; 1530 } 1531 1532 *shutdown_required = true; 1533 1534 return 0; 1535 } 1536 1537 static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) 1538 { 1539 int state, rc; 1540 1541 if (!writable) 1542 return -EPERM; 1543 1544 /* 1545 * The SEV spec requires that FACTORY_RESET must be issued in 1546 * UNINIT state. Before we go further lets check if any guest is 1547 * active. 1548 * 1549 * If FW is in WORKING state then deny the request otherwise issue 1550 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. 1551 * 1552 */ 1553 rc = sev_get_platform_state(&state, &argp->error); 1554 if (rc) 1555 return rc; 1556 1557 if (state == SEV_STATE_WORKING) 1558 return -EBUSY; 1559 1560 if (state == SEV_STATE_INIT) { 1561 rc = __sev_platform_shutdown_locked(&argp->error); 1562 if (rc) 1563 return rc; 1564 } 1565 1566 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); 1567 } 1568 1569 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) 1570 { 1571 struct sev_user_data_status data; 1572 int ret; 1573 1574 memset(&data, 0, sizeof(data)); 1575 1576 ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); 1577 if (ret) 1578 return ret; 1579 1580 if (copy_to_user((void __user *)argp->data, &data, sizeof(data))) 1581 ret = -EFAULT; 1582 1583 return ret; 1584 } 1585 1586 static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable) 1587 { 1588 struct sev_device *sev = psp_master->sev_data; 1589 bool shutdown_required = false; 1590 int rc; 1591 1592 if (!writable) 1593 return -EPERM; 1594 1595 if (sev->sev_plat_status.state == SEV_STATE_UNINIT) { 1596 rc = sev_move_to_init_state(argp, &shutdown_required); 1597 if (rc) 1598 return rc; 1599 } 1600 1601 rc = __sev_do_cmd_locked(cmd, NULL, &argp->error); 1602 1603 if (shutdown_required) 1604 __sev_firmware_shutdown(sev, false); 1605 1606 return rc; 1607 } 1608 1609 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) 1610 { 1611 struct sev_device *sev = psp_master->sev_data; 1612 struct sev_user_data_pek_csr input; 1613 bool shutdown_required = false; 1614 struct sev_data_pek_csr data; 1615 void __user *input_address; 1616 void *blob = NULL; 1617 int ret; 1618 1619 if (!writable) 1620 return -EPERM; 1621 1622 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1623 return -EFAULT; 1624 1625 memset(&data, 0, sizeof(data)); 1626 1627 /* userspace wants to query CSR length */ 1628 if (!input.address || !input.length) 1629 goto cmd; 1630 1631 /* allocate a physically contiguous buffer to store the CSR blob */ 1632 input_address = (void __user *)input.address; 1633 if (input.length > SEV_FW_BLOB_MAX_SIZE) 1634 return -EFAULT; 1635 1636 blob = kzalloc(input.length, GFP_KERNEL); 1637 if (!blob) 1638 return -ENOMEM; 1639 1640 data.address = __psp_pa(blob); 1641 data.len = input.length; 1642 1643 cmd: 1644 if (sev->sev_plat_status.state == SEV_STATE_UNINIT) { 1645 ret = sev_move_to_init_state(argp, &shutdown_required); 1646 if (ret) 1647 goto e_free_blob; 1648 } 1649 1650 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error); 1651 1652 /* If we query the CSR length, FW responded with expected data. */ 1653 input.length = data.len; 1654 1655 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 1656 ret = -EFAULT; 1657 goto e_free_blob; 1658 } 1659 1660 if (blob) { 1661 if (copy_to_user(input_address, blob, input.length)) 1662 ret = -EFAULT; 1663 } 1664 1665 e_free_blob: 1666 if (shutdown_required) 1667 __sev_firmware_shutdown(sev, false); 1668 1669 kfree(blob); 1670 return ret; 1671 } 1672 1673 void *psp_copy_user_blob(u64 uaddr, u32 len) 1674 { 1675 if (!uaddr || !len) 1676 return ERR_PTR(-EINVAL); 1677 1678 /* verify that blob length does not exceed our limit */ 1679 if (len > SEV_FW_BLOB_MAX_SIZE) 1680 return ERR_PTR(-EINVAL); 1681 1682 return memdup_user((void __user *)uaddr, len); 1683 } 1684 EXPORT_SYMBOL_GPL(psp_copy_user_blob); 1685 1686 static int sev_get_api_version(void) 1687 { 1688 struct sev_device *sev = psp_master->sev_data; 1689 struct sev_user_data_status status; 1690 int error = 0, ret; 1691 1692 /* 1693 * Cache SNP platform status and SNP feature information 1694 * if SNP is available. 1695 */ 1696 if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) { 1697 ret = snp_get_platform_data(sev, &error); 1698 if (ret) 1699 return 1; 1700 } 1701 1702 ret = sev_platform_status(&status, &error); 1703 if (ret) { 1704 dev_err(sev->dev, 1705 "SEV: failed to get status. Error: %#x\n", error); 1706 return 1; 1707 } 1708 1709 /* Cache SEV platform status */ 1710 sev->sev_plat_status = status; 1711 1712 sev->api_major = status.api_major; 1713 sev->api_minor = status.api_minor; 1714 sev->build = status.build; 1715 1716 return 0; 1717 } 1718 1719 static int sev_get_firmware(struct device *dev, 1720 const struct firmware **firmware) 1721 { 1722 char fw_name_specific[SEV_FW_NAME_SIZE]; 1723 char fw_name_subset[SEV_FW_NAME_SIZE]; 1724 1725 snprintf(fw_name_specific, sizeof(fw_name_specific), 1726 "amd/amd_sev_fam%.2xh_model%.2xh.sbin", 1727 boot_cpu_data.x86, boot_cpu_data.x86_model); 1728 1729 snprintf(fw_name_subset, sizeof(fw_name_subset), 1730 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin", 1731 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4); 1732 1733 /* Check for SEV FW for a particular model. 1734 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h 1735 * 1736 * or 1737 * 1738 * Check for SEV FW common to a subset of models. 1739 * Ex. amd_sev_fam17h_model0xh.sbin for 1740 * Family 17h Model 00h -- Family 17h Model 0Fh 1741 * 1742 * or 1743 * 1744 * Fall-back to using generic name: sev.fw 1745 */ 1746 if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) || 1747 (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) || 1748 (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) 1749 return 0; 1750 1751 return -ENOENT; 1752 } 1753 1754 /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ 1755 static int sev_update_firmware(struct device *dev) 1756 { 1757 struct sev_data_download_firmware *data; 1758 const struct firmware *firmware; 1759 int ret, error, order; 1760 struct page *p; 1761 u64 data_size; 1762 1763 if (!sev_version_greater_or_equal(0, 15)) { 1764 dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); 1765 return -1; 1766 } 1767 1768 if (sev_get_firmware(dev, &firmware) == -ENOENT) { 1769 dev_dbg(dev, "No SEV firmware file present\n"); 1770 return -1; 1771 } 1772 1773 /* 1774 * SEV FW expects the physical address given to it to be 32 1775 * byte aligned. Memory allocated has structure placed at the 1776 * beginning followed by the firmware being passed to the SEV 1777 * FW. Allocate enough memory for data structure + alignment 1778 * padding + SEV FW. 1779 */ 1780 data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); 1781 1782 order = get_order(firmware->size + data_size); 1783 p = alloc_pages(GFP_KERNEL, order); 1784 if (!p) { 1785 ret = -1; 1786 goto fw_err; 1787 } 1788 1789 /* 1790 * Copy firmware data to a kernel allocated contiguous 1791 * memory region. 1792 */ 1793 data = page_address(p); 1794 memcpy(page_address(p) + data_size, firmware->data, firmware->size); 1795 1796 data->address = __psp_pa(page_address(p) + data_size); 1797 data->len = firmware->size; 1798 1799 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 1800 1801 /* 1802 * A quirk for fixing the committed TCB version, when upgrading from 1803 * earlier firmware version than 1.50. 1804 */ 1805 if (!ret && !sev_version_greater_or_equal(1, 50)) 1806 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 1807 1808 if (ret) 1809 dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); 1810 1811 __free_pages(p, order); 1812 1813 fw_err: 1814 release_firmware(firmware); 1815 1816 return ret; 1817 } 1818 1819 static int __sev_snp_shutdown_locked(int *error, bool panic) 1820 { 1821 struct psp_device *psp = psp_master; 1822 struct sev_device *sev; 1823 struct sev_data_snp_shutdown_ex data; 1824 int ret; 1825 1826 if (!psp || !psp->sev_data) 1827 return 0; 1828 1829 sev = psp->sev_data; 1830 1831 if (!sev->snp_initialized) 1832 return 0; 1833 1834 memset(&data, 0, sizeof(data)); 1835 data.len = sizeof(data); 1836 data.iommu_snp_shutdown = 1; 1837 1838 /* 1839 * If invoked during panic handling, local interrupts are disabled 1840 * and all CPUs are stopped, so wbinvd_on_all_cpus() can't be called. 1841 * In that case, a wbinvd() is done on remote CPUs via the NMI 1842 * callback, so only a local wbinvd() is needed here. 1843 */ 1844 if (!panic) 1845 wbinvd_on_all_cpus(); 1846 else 1847 wbinvd(); 1848 1849 ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error); 1850 /* SHUTDOWN may require DF_FLUSH */ 1851 if (*error == SEV_RET_DFFLUSH_REQUIRED) { 1852 int dfflush_error = SEV_RET_NO_FW_CALL; 1853 1854 ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error); 1855 if (ret) { 1856 dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n", 1857 ret, dfflush_error); 1858 return ret; 1859 } 1860 /* reissue the shutdown command */ 1861 ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, 1862 error); 1863 } 1864 if (ret) { 1865 dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n", 1866 ret, *error); 1867 return ret; 1868 } 1869 1870 /* 1871 * SNP_SHUTDOWN_EX with IOMMU_SNP_SHUTDOWN set to 1 disables SNP 1872 * enforcement by the IOMMU and also transitions all pages 1873 * associated with the IOMMU to the Reclaim state. 1874 * Firmware was transitioning the IOMMU pages to Hypervisor state 1875 * before version 1.53. But, accounting for the number of assigned 1876 * 4kB pages in a 2M page was done incorrectly by not transitioning 1877 * to the Reclaim state. This resulted in RMP #PF when later accessing 1878 * the 2M page containing those pages during kexec boot. Hence, the 1879 * firmware now transitions these pages to Reclaim state and hypervisor 1880 * needs to transition these pages to shared state. SNP Firmware 1881 * version 1.53 and above are needed for kexec boot. 1882 */ 1883 ret = amd_iommu_snp_disable(); 1884 if (ret) { 1885 dev_err(sev->dev, "SNP IOMMU shutdown failed\n"); 1886 return ret; 1887 } 1888 1889 sev->snp_initialized = false; 1890 dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n"); 1891 1892 /* 1893 * __sev_snp_shutdown_locked() deadlocks when it tries to unregister 1894 * itself during panic as the panic notifier is called with RCU read 1895 * lock held and notifier unregistration does RCU synchronization. 1896 */ 1897 if (!panic) 1898 atomic_notifier_chain_unregister(&panic_notifier_list, 1899 &snp_panic_notifier); 1900 1901 /* Reset TMR size back to default */ 1902 sev_es_tmr_size = SEV_TMR_SIZE; 1903 1904 return ret; 1905 } 1906 1907 static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) 1908 { 1909 struct sev_device *sev = psp_master->sev_data; 1910 struct sev_user_data_pek_cert_import input; 1911 struct sev_data_pek_cert_import data; 1912 bool shutdown_required = false; 1913 void *pek_blob, *oca_blob; 1914 int ret; 1915 1916 if (!writable) 1917 return -EPERM; 1918 1919 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1920 return -EFAULT; 1921 1922 /* copy PEK certificate blobs from userspace */ 1923 pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); 1924 if (IS_ERR(pek_blob)) 1925 return PTR_ERR(pek_blob); 1926 1927 data.reserved = 0; 1928 data.pek_cert_address = __psp_pa(pek_blob); 1929 data.pek_cert_len = input.pek_cert_len; 1930 1931 /* copy PEK certificate blobs from userspace */ 1932 oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); 1933 if (IS_ERR(oca_blob)) { 1934 ret = PTR_ERR(oca_blob); 1935 goto e_free_pek; 1936 } 1937 1938 data.oca_cert_address = __psp_pa(oca_blob); 1939 data.oca_cert_len = input.oca_cert_len; 1940 1941 /* If platform is not in INIT state then transition it to INIT */ 1942 if (sev->sev_plat_status.state != SEV_STATE_INIT) { 1943 ret = sev_move_to_init_state(argp, &shutdown_required); 1944 if (ret) 1945 goto e_free_oca; 1946 } 1947 1948 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); 1949 1950 e_free_oca: 1951 if (shutdown_required) 1952 __sev_firmware_shutdown(sev, false); 1953 1954 kfree(oca_blob); 1955 e_free_pek: 1956 kfree(pek_blob); 1957 return ret; 1958 } 1959 1960 static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) 1961 { 1962 struct sev_user_data_get_id2 input; 1963 struct sev_data_get_id data; 1964 void __user *input_address; 1965 void *id_blob = NULL; 1966 int ret; 1967 1968 /* SEV GET_ID is available from SEV API v0.16 and up */ 1969 if (!sev_version_greater_or_equal(0, 16)) 1970 return -ENOTSUPP; 1971 1972 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1973 return -EFAULT; 1974 1975 input_address = (void __user *)input.address; 1976 1977 if (input.address && input.length) { 1978 /* 1979 * The length of the ID shouldn't be assumed by software since 1980 * it may change in the future. The allocation size is limited 1981 * to 1 << (PAGE_SHIFT + MAX_PAGE_ORDER) by the page allocator. 1982 * If the allocation fails, simply return ENOMEM rather than 1983 * warning in the kernel log. 1984 */ 1985 id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN); 1986 if (!id_blob) 1987 return -ENOMEM; 1988 1989 data.address = __psp_pa(id_blob); 1990 data.len = input.length; 1991 } else { 1992 data.address = 0; 1993 data.len = 0; 1994 } 1995 1996 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error); 1997 1998 /* 1999 * Firmware will return the length of the ID value (either the minimum 2000 * required length or the actual length written), return it to the user. 2001 */ 2002 input.length = data.len; 2003 2004 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 2005 ret = -EFAULT; 2006 goto e_free; 2007 } 2008 2009 if (id_blob) { 2010 if (copy_to_user(input_address, id_blob, data.len)) { 2011 ret = -EFAULT; 2012 goto e_free; 2013 } 2014 } 2015 2016 e_free: 2017 kfree(id_blob); 2018 2019 return ret; 2020 } 2021 2022 static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) 2023 { 2024 struct sev_data_get_id *data; 2025 u64 data_size, user_size; 2026 void *id_blob, *mem; 2027 int ret; 2028 2029 /* SEV GET_ID available from SEV API v0.16 and up */ 2030 if (!sev_version_greater_or_equal(0, 16)) 2031 return -ENOTSUPP; 2032 2033 /* SEV FW expects the buffer it fills with the ID to be 2034 * 8-byte aligned. Memory allocated should be enough to 2035 * hold data structure + alignment padding + memory 2036 * where SEV FW writes the ID. 2037 */ 2038 data_size = ALIGN(sizeof(struct sev_data_get_id), 8); 2039 user_size = sizeof(struct sev_user_data_get_id); 2040 2041 mem = kzalloc(data_size + user_size, GFP_KERNEL); 2042 if (!mem) 2043 return -ENOMEM; 2044 2045 data = mem; 2046 id_blob = mem + data_size; 2047 2048 data->address = __psp_pa(id_blob); 2049 data->len = user_size; 2050 2051 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); 2052 if (!ret) { 2053 if (copy_to_user((void __user *)argp->data, id_blob, data->len)) 2054 ret = -EFAULT; 2055 } 2056 2057 kfree(mem); 2058 2059 return ret; 2060 } 2061 2062 static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) 2063 { 2064 struct sev_device *sev = psp_master->sev_data; 2065 struct sev_user_data_pdh_cert_export input; 2066 void *pdh_blob = NULL, *cert_blob = NULL; 2067 struct sev_data_pdh_cert_export data; 2068 void __user *input_cert_chain_address; 2069 void __user *input_pdh_cert_address; 2070 bool shutdown_required = false; 2071 int ret; 2072 2073 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 2074 return -EFAULT; 2075 2076 memset(&data, 0, sizeof(data)); 2077 2078 input_pdh_cert_address = (void __user *)input.pdh_cert_address; 2079 input_cert_chain_address = (void __user *)input.cert_chain_address; 2080 2081 /* Userspace wants to query the certificate length. */ 2082 if (!input.pdh_cert_address || 2083 !input.pdh_cert_len || 2084 !input.cert_chain_address) 2085 goto cmd; 2086 2087 /* Allocate a physically contiguous buffer to store the PDH blob. */ 2088 if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) 2089 return -EFAULT; 2090 2091 /* Allocate a physically contiguous buffer to store the cert chain blob. */ 2092 if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) 2093 return -EFAULT; 2094 2095 pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL); 2096 if (!pdh_blob) 2097 return -ENOMEM; 2098 2099 data.pdh_cert_address = __psp_pa(pdh_blob); 2100 data.pdh_cert_len = input.pdh_cert_len; 2101 2102 cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL); 2103 if (!cert_blob) { 2104 ret = -ENOMEM; 2105 goto e_free_pdh; 2106 } 2107 2108 data.cert_chain_address = __psp_pa(cert_blob); 2109 data.cert_chain_len = input.cert_chain_len; 2110 2111 cmd: 2112 /* If platform is not in INIT state then transition it to INIT. */ 2113 if (sev->sev_plat_status.state != SEV_STATE_INIT) { 2114 if (!writable) { 2115 ret = -EPERM; 2116 goto e_free_cert; 2117 } 2118 ret = sev_move_to_init_state(argp, &shutdown_required); 2119 if (ret) 2120 goto e_free_cert; 2121 } 2122 2123 ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); 2124 2125 /* If we query the length, FW responded with expected data. */ 2126 input.cert_chain_len = data.cert_chain_len; 2127 input.pdh_cert_len = data.pdh_cert_len; 2128 2129 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 2130 ret = -EFAULT; 2131 goto e_free_cert; 2132 } 2133 2134 if (pdh_blob) { 2135 if (copy_to_user(input_pdh_cert_address, 2136 pdh_blob, input.pdh_cert_len)) { 2137 ret = -EFAULT; 2138 goto e_free_cert; 2139 } 2140 } 2141 2142 if (cert_blob) { 2143 if (copy_to_user(input_cert_chain_address, 2144 cert_blob, input.cert_chain_len)) 2145 ret = -EFAULT; 2146 } 2147 2148 e_free_cert: 2149 if (shutdown_required) 2150 __sev_firmware_shutdown(sev, false); 2151 2152 kfree(cert_blob); 2153 e_free_pdh: 2154 kfree(pdh_blob); 2155 return ret; 2156 } 2157 2158 static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp) 2159 { 2160 struct sev_device *sev = psp_master->sev_data; 2161 bool shutdown_required = false; 2162 struct sev_data_snp_addr buf; 2163 struct page *status_page; 2164 int ret, error; 2165 void *data; 2166 2167 if (!argp->data) 2168 return -EINVAL; 2169 2170 status_page = alloc_page(GFP_KERNEL_ACCOUNT); 2171 if (!status_page) 2172 return -ENOMEM; 2173 2174 data = page_address(status_page); 2175 2176 if (!sev->snp_initialized) { 2177 ret = snp_move_to_init_state(argp, &shutdown_required); 2178 if (ret) 2179 goto cleanup; 2180 } 2181 2182 /* 2183 * Firmware expects status page to be in firmware-owned state, otherwise 2184 * it will report firmware error code INVALID_PAGE_STATE (0x1A). 2185 */ 2186 if (rmp_mark_pages_firmware(__pa(data), 1, true)) { 2187 ret = -EFAULT; 2188 goto cleanup; 2189 } 2190 2191 buf.address = __psp_pa(data); 2192 ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error); 2193 2194 /* 2195 * Status page will be transitioned to Reclaim state upon success, or 2196 * left in Firmware state in failure. Use snp_reclaim_pages() to 2197 * transition either case back to Hypervisor-owned state. 2198 */ 2199 if (snp_reclaim_pages(__pa(data), 1, true)) 2200 return -EFAULT; 2201 2202 if (ret) 2203 goto cleanup; 2204 2205 if (copy_to_user((void __user *)argp->data, data, 2206 sizeof(struct sev_user_data_snp_status))) 2207 ret = -EFAULT; 2208 2209 cleanup: 2210 if (shutdown_required) 2211 __sev_snp_shutdown_locked(&error, false); 2212 2213 __free_pages(status_page, 0); 2214 return ret; 2215 } 2216 2217 static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp) 2218 { 2219 struct sev_device *sev = psp_master->sev_data; 2220 struct sev_data_snp_commit buf; 2221 bool shutdown_required = false; 2222 int ret, error; 2223 2224 if (!sev->snp_initialized) { 2225 ret = snp_move_to_init_state(argp, &shutdown_required); 2226 if (ret) 2227 return ret; 2228 } 2229 2230 buf.len = sizeof(buf); 2231 2232 ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error); 2233 2234 if (shutdown_required) 2235 __sev_snp_shutdown_locked(&error, false); 2236 2237 return ret; 2238 } 2239 2240 static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable) 2241 { 2242 struct sev_device *sev = psp_master->sev_data; 2243 struct sev_user_data_snp_config config; 2244 bool shutdown_required = false; 2245 int ret, error; 2246 2247 if (!argp->data) 2248 return -EINVAL; 2249 2250 if (!writable) 2251 return -EPERM; 2252 2253 if (copy_from_user(&config, (void __user *)argp->data, sizeof(config))) 2254 return -EFAULT; 2255 2256 if (!sev->snp_initialized) { 2257 ret = snp_move_to_init_state(argp, &shutdown_required); 2258 if (ret) 2259 return ret; 2260 } 2261 2262 ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error); 2263 2264 if (shutdown_required) 2265 __sev_snp_shutdown_locked(&error, false); 2266 2267 return ret; 2268 } 2269 2270 static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable) 2271 { 2272 struct sev_device *sev = psp_master->sev_data; 2273 struct sev_user_data_snp_vlek_load input; 2274 bool shutdown_required = false; 2275 int ret, error; 2276 void *blob; 2277 2278 if (!argp->data) 2279 return -EINVAL; 2280 2281 if (!writable) 2282 return -EPERM; 2283 2284 if (copy_from_user(&input, u64_to_user_ptr(argp->data), sizeof(input))) 2285 return -EFAULT; 2286 2287 if (input.len != sizeof(input) || input.vlek_wrapped_version != 0) 2288 return -EINVAL; 2289 2290 blob = psp_copy_user_blob(input.vlek_wrapped_address, 2291 sizeof(struct sev_user_data_snp_wrapped_vlek_hashstick)); 2292 if (IS_ERR(blob)) 2293 return PTR_ERR(blob); 2294 2295 input.vlek_wrapped_address = __psp_pa(blob); 2296 2297 if (!sev->snp_initialized) { 2298 ret = snp_move_to_init_state(argp, &shutdown_required); 2299 if (ret) 2300 goto cleanup; 2301 } 2302 2303 ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error); 2304 2305 if (shutdown_required) 2306 __sev_snp_shutdown_locked(&error, false); 2307 2308 cleanup: 2309 kfree(blob); 2310 2311 return ret; 2312 } 2313 2314 static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) 2315 { 2316 void __user *argp = (void __user *)arg; 2317 struct sev_issue_cmd input; 2318 int ret = -EFAULT; 2319 bool writable = file->f_mode & FMODE_WRITE; 2320 2321 if (!psp_master || !psp_master->sev_data) 2322 return -ENODEV; 2323 2324 if (ioctl != SEV_ISSUE_CMD) 2325 return -EINVAL; 2326 2327 if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) 2328 return -EFAULT; 2329 2330 if (input.cmd > SEV_MAX) 2331 return -EINVAL; 2332 2333 mutex_lock(&sev_cmd_mutex); 2334 2335 switch (input.cmd) { 2336 2337 case SEV_FACTORY_RESET: 2338 ret = sev_ioctl_do_reset(&input, writable); 2339 break; 2340 case SEV_PLATFORM_STATUS: 2341 ret = sev_ioctl_do_platform_status(&input); 2342 break; 2343 case SEV_PEK_GEN: 2344 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable); 2345 break; 2346 case SEV_PDH_GEN: 2347 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable); 2348 break; 2349 case SEV_PEK_CSR: 2350 ret = sev_ioctl_do_pek_csr(&input, writable); 2351 break; 2352 case SEV_PEK_CERT_IMPORT: 2353 ret = sev_ioctl_do_pek_import(&input, writable); 2354 break; 2355 case SEV_PDH_CERT_EXPORT: 2356 ret = sev_ioctl_do_pdh_export(&input, writable); 2357 break; 2358 case SEV_GET_ID: 2359 pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n"); 2360 ret = sev_ioctl_do_get_id(&input); 2361 break; 2362 case SEV_GET_ID2: 2363 ret = sev_ioctl_do_get_id2(&input); 2364 break; 2365 case SNP_PLATFORM_STATUS: 2366 ret = sev_ioctl_do_snp_platform_status(&input); 2367 break; 2368 case SNP_COMMIT: 2369 ret = sev_ioctl_do_snp_commit(&input); 2370 break; 2371 case SNP_SET_CONFIG: 2372 ret = sev_ioctl_do_snp_set_config(&input, writable); 2373 break; 2374 case SNP_VLEK_LOAD: 2375 ret = sev_ioctl_do_snp_vlek_load(&input, writable); 2376 break; 2377 default: 2378 ret = -EINVAL; 2379 goto out; 2380 } 2381 2382 if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) 2383 ret = -EFAULT; 2384 out: 2385 mutex_unlock(&sev_cmd_mutex); 2386 2387 return ret; 2388 } 2389 2390 static const struct file_operations sev_fops = { 2391 .owner = THIS_MODULE, 2392 .unlocked_ioctl = sev_ioctl, 2393 }; 2394 2395 int sev_platform_status(struct sev_user_data_status *data, int *error) 2396 { 2397 return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); 2398 } 2399 EXPORT_SYMBOL_GPL(sev_platform_status); 2400 2401 int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) 2402 { 2403 return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); 2404 } 2405 EXPORT_SYMBOL_GPL(sev_guest_deactivate); 2406 2407 int sev_guest_activate(struct sev_data_activate *data, int *error) 2408 { 2409 return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); 2410 } 2411 EXPORT_SYMBOL_GPL(sev_guest_activate); 2412 2413 int sev_guest_decommission(struct sev_data_decommission *data, int *error) 2414 { 2415 return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); 2416 } 2417 EXPORT_SYMBOL_GPL(sev_guest_decommission); 2418 2419 int sev_guest_df_flush(int *error) 2420 { 2421 return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); 2422 } 2423 EXPORT_SYMBOL_GPL(sev_guest_df_flush); 2424 2425 static void sev_exit(struct kref *ref) 2426 { 2427 misc_deregister(&misc_dev->misc); 2428 kfree(misc_dev); 2429 misc_dev = NULL; 2430 } 2431 2432 static int sev_misc_init(struct sev_device *sev) 2433 { 2434 struct device *dev = sev->dev; 2435 int ret; 2436 2437 /* 2438 * SEV feature support can be detected on multiple devices but the SEV 2439 * FW commands must be issued on the master. During probe, we do not 2440 * know the master hence we create /dev/sev on the first device probe. 2441 * sev_do_cmd() finds the right master device to which to issue the 2442 * command to the firmware. 2443 */ 2444 if (!misc_dev) { 2445 struct miscdevice *misc; 2446 2447 misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL); 2448 if (!misc_dev) 2449 return -ENOMEM; 2450 2451 misc = &misc_dev->misc; 2452 misc->minor = MISC_DYNAMIC_MINOR; 2453 misc->name = DEVICE_NAME; 2454 misc->fops = &sev_fops; 2455 2456 ret = misc_register(misc); 2457 if (ret) 2458 return ret; 2459 2460 kref_init(&misc_dev->refcount); 2461 } else { 2462 kref_get(&misc_dev->refcount); 2463 } 2464 2465 init_waitqueue_head(&sev->int_queue); 2466 sev->misc = misc_dev; 2467 dev_dbg(dev, "registered SEV device\n"); 2468 2469 return 0; 2470 } 2471 2472 int sev_dev_init(struct psp_device *psp) 2473 { 2474 struct device *dev = psp->dev; 2475 struct sev_device *sev; 2476 int ret = -ENOMEM; 2477 2478 if (!boot_cpu_has(X86_FEATURE_SEV)) { 2479 dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); 2480 return 0; 2481 } 2482 2483 sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); 2484 if (!sev) 2485 goto e_err; 2486 2487 sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 1); 2488 if (!sev->cmd_buf) 2489 goto e_sev; 2490 2491 sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE; 2492 2493 psp->sev_data = sev; 2494 2495 sev->dev = dev; 2496 sev->psp = psp; 2497 2498 sev->io_regs = psp->io_regs; 2499 2500 sev->vdata = (struct sev_vdata *)psp->vdata->sev; 2501 if (!sev->vdata) { 2502 ret = -ENODEV; 2503 dev_err(dev, "sev: missing driver data\n"); 2504 goto e_buf; 2505 } 2506 2507 psp_set_sev_irq_handler(psp, sev_irq_handler, sev); 2508 2509 ret = sev_misc_init(sev); 2510 if (ret) 2511 goto e_irq; 2512 2513 dev_notice(dev, "sev enabled\n"); 2514 2515 return 0; 2516 2517 e_irq: 2518 psp_clear_sev_irq_handler(psp); 2519 e_buf: 2520 devm_free_pages(dev, (unsigned long)sev->cmd_buf); 2521 e_sev: 2522 devm_kfree(dev, sev); 2523 e_err: 2524 psp->sev_data = NULL; 2525 2526 dev_notice(dev, "sev initialization failed\n"); 2527 2528 return ret; 2529 } 2530 2531 static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) 2532 { 2533 int error; 2534 2535 __sev_platform_shutdown_locked(NULL); 2536 2537 if (sev_es_tmr) { 2538 /* 2539 * The TMR area was encrypted, flush it from the cache. 2540 * 2541 * If invoked during panic handling, local interrupts are 2542 * disabled and all CPUs are stopped, so wbinvd_on_all_cpus() 2543 * can't be used. In that case, wbinvd() is done on remote CPUs 2544 * via the NMI callback, and done for this CPU later during 2545 * SNP shutdown, so wbinvd_on_all_cpus() can be skipped. 2546 */ 2547 if (!panic) 2548 wbinvd_on_all_cpus(); 2549 2550 __snp_free_firmware_pages(virt_to_page(sev_es_tmr), 2551 get_order(sev_es_tmr_size), 2552 true); 2553 sev_es_tmr = NULL; 2554 } 2555 2556 if (sev_init_ex_buffer) { 2557 __snp_free_firmware_pages(virt_to_page(sev_init_ex_buffer), 2558 get_order(NV_LENGTH), 2559 true); 2560 sev_init_ex_buffer = NULL; 2561 } 2562 2563 if (snp_range_list) { 2564 kfree(snp_range_list); 2565 snp_range_list = NULL; 2566 } 2567 2568 __sev_snp_shutdown_locked(&error, panic); 2569 } 2570 2571 static void sev_firmware_shutdown(struct sev_device *sev) 2572 { 2573 mutex_lock(&sev_cmd_mutex); 2574 __sev_firmware_shutdown(sev, false); 2575 mutex_unlock(&sev_cmd_mutex); 2576 } 2577 2578 void sev_platform_shutdown(void) 2579 { 2580 if (!psp_master || !psp_master->sev_data) 2581 return; 2582 2583 sev_firmware_shutdown(psp_master->sev_data); 2584 } 2585 EXPORT_SYMBOL_GPL(sev_platform_shutdown); 2586 2587 void sev_dev_destroy(struct psp_device *psp) 2588 { 2589 struct sev_device *sev = psp->sev_data; 2590 2591 if (!sev) 2592 return; 2593 2594 sev_firmware_shutdown(sev); 2595 2596 if (sev->misc) 2597 kref_put(&misc_dev->refcount, sev_exit); 2598 2599 psp_clear_sev_irq_handler(psp); 2600 } 2601 2602 static int snp_shutdown_on_panic(struct notifier_block *nb, 2603 unsigned long reason, void *arg) 2604 { 2605 struct sev_device *sev = psp_master->sev_data; 2606 2607 /* 2608 * If sev_cmd_mutex is already acquired, then it's likely 2609 * another PSP command is in flight and issuing a shutdown 2610 * would fail in unexpected ways. Rather than create even 2611 * more confusion during a panic, just bail out here. 2612 */ 2613 if (mutex_is_locked(&sev_cmd_mutex)) 2614 return NOTIFY_DONE; 2615 2616 __sev_firmware_shutdown(sev, true); 2617 2618 return NOTIFY_DONE; 2619 } 2620 2621 int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, 2622 void *data, int *error) 2623 { 2624 if (!filep || filep->f_op != &sev_fops) 2625 return -EBADF; 2626 2627 return sev_do_cmd(cmd, data, error); 2628 } 2629 EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); 2630 2631 void sev_pci_init(void) 2632 { 2633 struct sev_device *sev = psp_master->sev_data; 2634 u8 api_major, api_minor, build; 2635 2636 if (!sev) 2637 return; 2638 2639 psp_timeout = psp_probe_timeout; 2640 2641 if (sev_get_api_version()) 2642 goto err; 2643 2644 api_major = sev->api_major; 2645 api_minor = sev->api_minor; 2646 build = sev->build; 2647 2648 if (sev_update_firmware(sev->dev) == 0) 2649 sev_get_api_version(); 2650 2651 if (api_major != sev->api_major || api_minor != sev->api_minor || 2652 build != sev->build) 2653 dev_info(sev->dev, "SEV firmware updated from %d.%d.%d to %d.%d.%d\n", 2654 api_major, api_minor, build, 2655 sev->api_major, sev->api_minor, sev->build); 2656 2657 return; 2658 2659 err: 2660 sev_dev_destroy(psp_master); 2661 2662 psp_master->sev_data = NULL; 2663 } 2664 2665 void sev_pci_exit(void) 2666 { 2667 struct sev_device *sev = psp_master->sev_data; 2668 2669 if (!sev) 2670 return; 2671 2672 sev_firmware_shutdown(sev); 2673 } 2674