1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure Encrypted Virtualization (SEV) guest driver interface 4 * 5 * Copyright (C) 2021-2024 Advanced Micro Devices, Inc. 6 * 7 * Author: Brijesh Singh <brijesh.singh@amd.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/mutex.h> 14 #include <linux/io.h> 15 #include <linux/platform_device.h> 16 #include <linux/miscdevice.h> 17 #include <linux/set_memory.h> 18 #include <linux/fs.h> 19 #include <linux/tsm.h> 20 #include <crypto/aead.h> 21 #include <linux/scatterlist.h> 22 #include <linux/psp-sev.h> 23 #include <linux/sockptr.h> 24 #include <linux/cleanup.h> 25 #include <linux/uuid.h> 26 #include <linux/configfs.h> 27 #include <uapi/linux/sev-guest.h> 28 #include <uapi/linux/psp-sev.h> 29 30 #include <asm/svm.h> 31 #include <asm/sev.h> 32 33 #define DEVICE_NAME "sev-guest" 34 #define AAD_LEN 48 35 #define MSG_HDR_VER 1 36 37 #define SNP_REQ_MAX_RETRY_DURATION (60*HZ) 38 #define SNP_REQ_RETRY_DELAY (2*HZ) 39 40 #define SVSM_MAX_RETRIES 3 41 42 struct snp_guest_crypto { 43 struct crypto_aead *tfm; 44 u8 *iv, *authtag; 45 int iv_len, a_len; 46 }; 47 48 struct snp_guest_dev { 49 struct device *dev; 50 struct miscdevice misc; 51 52 void *certs_data; 53 struct snp_guest_crypto *crypto; 54 /* request and response are in unencrypted memory */ 55 struct snp_guest_msg *request, *response; 56 57 /* 58 * Avoid information leakage by double-buffering shared messages 59 * in fields that are in regular encrypted memory. 60 */ 61 struct snp_guest_msg secret_request, secret_response; 62 63 struct snp_secrets_page *secrets; 64 struct snp_req_data input; 65 union { 66 struct snp_report_req report; 67 struct snp_derived_key_req derived_key; 68 struct snp_ext_report_req ext_report; 69 } req; 70 u32 *os_area_msg_seqno; 71 u8 *vmpck; 72 }; 73 74 /* 75 * The VMPCK ID represents the key used by the SNP guest to communicate with the 76 * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key 77 * used will be the key associated with the VMPL at which the guest is running. 78 * Should the default key be wiped (see snp_disable_vmpck()), this parameter 79 * allows for using one of the remaining VMPCKs. 80 */ 81 static int vmpck_id = -1; 82 module_param(vmpck_id, int, 0444); 83 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP."); 84 85 /* Mutex to serialize the shared buffer access and command handling. */ 86 static DEFINE_MUTEX(snp_cmd_mutex); 87 88 static bool is_vmpck_empty(struct snp_guest_dev *snp_dev) 89 { 90 char zero_key[VMPCK_KEY_LEN] = {0}; 91 92 if (snp_dev->vmpck) 93 return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN); 94 95 return true; 96 } 97 98 /* 99 * If an error is received from the host or AMD Secure Processor (ASP) there 100 * are two options. Either retry the exact same encrypted request or discontinue 101 * using the VMPCK. 102 * 103 * This is because in the current encryption scheme GHCB v2 uses AES-GCM to 104 * encrypt the requests. The IV for this scheme is the sequence number. GCM 105 * cannot tolerate IV reuse. 106 * 107 * The ASP FW v1.51 only increments the sequence numbers on a successful 108 * guest<->ASP back and forth and only accepts messages at its exact sequence 109 * number. 110 * 111 * So if the sequence number were to be reused the encryption scheme is 112 * vulnerable. If the sequence number were incremented for a fresh IV the ASP 113 * will reject the request. 114 */ 115 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev) 116 { 117 dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n", 118 vmpck_id); 119 memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN); 120 snp_dev->vmpck = NULL; 121 } 122 123 static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev) 124 { 125 u64 count; 126 127 lockdep_assert_held(&snp_cmd_mutex); 128 129 /* Read the current message sequence counter from secrets pages */ 130 count = *snp_dev->os_area_msg_seqno; 131 132 return count + 1; 133 } 134 135 /* Return a non-zero on success */ 136 static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev) 137 { 138 u64 count = __snp_get_msg_seqno(snp_dev); 139 140 /* 141 * The message sequence counter for the SNP guest request is a 64-bit 142 * value but the version 2 of GHCB specification defines a 32-bit storage 143 * for it. If the counter exceeds the 32-bit value then return zero. 144 * The caller should check the return value, but if the caller happens to 145 * not check the value and use it, then the firmware treats zero as an 146 * invalid number and will fail the message request. 147 */ 148 if (count >= UINT_MAX) { 149 dev_err(snp_dev->dev, "request message sequence counter overflow\n"); 150 return 0; 151 } 152 153 return count; 154 } 155 156 static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev) 157 { 158 /* 159 * The counter is also incremented by the PSP, so increment it by 2 160 * and save in secrets page. 161 */ 162 *snp_dev->os_area_msg_seqno += 2; 163 } 164 165 static inline struct snp_guest_dev *to_snp_dev(struct file *file) 166 { 167 struct miscdevice *dev = file->private_data; 168 169 return container_of(dev, struct snp_guest_dev, misc); 170 } 171 172 static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen) 173 { 174 struct snp_guest_crypto *crypto; 175 176 crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT); 177 if (!crypto) 178 return NULL; 179 180 crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 181 if (IS_ERR(crypto->tfm)) 182 goto e_free; 183 184 if (crypto_aead_setkey(crypto->tfm, key, keylen)) 185 goto e_free_crypto; 186 187 crypto->iv_len = crypto_aead_ivsize(crypto->tfm); 188 crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT); 189 if (!crypto->iv) 190 goto e_free_crypto; 191 192 if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) { 193 if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) { 194 dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN); 195 goto e_free_iv; 196 } 197 } 198 199 crypto->a_len = crypto_aead_authsize(crypto->tfm); 200 crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT); 201 if (!crypto->authtag) 202 goto e_free_iv; 203 204 return crypto; 205 206 e_free_iv: 207 kfree(crypto->iv); 208 e_free_crypto: 209 crypto_free_aead(crypto->tfm); 210 e_free: 211 kfree(crypto); 212 213 return NULL; 214 } 215 216 static void deinit_crypto(struct snp_guest_crypto *crypto) 217 { 218 crypto_free_aead(crypto->tfm); 219 kfree(crypto->iv); 220 kfree(crypto->authtag); 221 kfree(crypto); 222 } 223 224 static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg, 225 u8 *src_buf, u8 *dst_buf, size_t len, bool enc) 226 { 227 struct snp_guest_msg_hdr *hdr = &msg->hdr; 228 struct scatterlist src[3], dst[3]; 229 DECLARE_CRYPTO_WAIT(wait); 230 struct aead_request *req; 231 int ret; 232 233 req = aead_request_alloc(crypto->tfm, GFP_KERNEL); 234 if (!req) 235 return -ENOMEM; 236 237 /* 238 * AEAD memory operations: 239 * +------ AAD -------+------- DATA -----+---- AUTHTAG----+ 240 * | msg header | plaintext | hdr->authtag | 241 * | bytes 30h - 5Fh | or | | 242 * | | cipher | | 243 * +------------------+------------------+----------------+ 244 */ 245 sg_init_table(src, 3); 246 sg_set_buf(&src[0], &hdr->algo, AAD_LEN); 247 sg_set_buf(&src[1], src_buf, hdr->msg_sz); 248 sg_set_buf(&src[2], hdr->authtag, crypto->a_len); 249 250 sg_init_table(dst, 3); 251 sg_set_buf(&dst[0], &hdr->algo, AAD_LEN); 252 sg_set_buf(&dst[1], dst_buf, hdr->msg_sz); 253 sg_set_buf(&dst[2], hdr->authtag, crypto->a_len); 254 255 aead_request_set_ad(req, AAD_LEN); 256 aead_request_set_tfm(req, crypto->tfm); 257 aead_request_set_callback(req, 0, crypto_req_done, &wait); 258 259 aead_request_set_crypt(req, src, dst, len, crypto->iv); 260 ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait); 261 262 aead_request_free(req); 263 return ret; 264 } 265 266 static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, 267 void *plaintext, size_t len) 268 { 269 struct snp_guest_crypto *crypto = snp_dev->crypto; 270 struct snp_guest_msg_hdr *hdr = &msg->hdr; 271 272 memset(crypto->iv, 0, crypto->iv_len); 273 memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); 274 275 return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true); 276 } 277 278 static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, 279 void *plaintext, size_t len) 280 { 281 struct snp_guest_crypto *crypto = snp_dev->crypto; 282 struct snp_guest_msg_hdr *hdr = &msg->hdr; 283 284 /* Build IV with response buffer sequence number */ 285 memset(crypto->iv, 0, crypto->iv_len); 286 memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); 287 288 return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false); 289 } 290 291 static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz) 292 { 293 struct snp_guest_crypto *crypto = snp_dev->crypto; 294 struct snp_guest_msg *resp = &snp_dev->secret_response; 295 struct snp_guest_msg *req = &snp_dev->secret_request; 296 struct snp_guest_msg_hdr *req_hdr = &req->hdr; 297 struct snp_guest_msg_hdr *resp_hdr = &resp->hdr; 298 299 dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n", 300 resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz); 301 302 /* Copy response from shared memory to encrypted memory. */ 303 memcpy(resp, snp_dev->response, sizeof(*resp)); 304 305 /* Verify that the sequence counter is incremented by 1 */ 306 if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1))) 307 return -EBADMSG; 308 309 /* Verify response message type and version number. */ 310 if (resp_hdr->msg_type != (req_hdr->msg_type + 1) || 311 resp_hdr->msg_version != req_hdr->msg_version) 312 return -EBADMSG; 313 314 /* 315 * If the message size is greater than our buffer length then return 316 * an error. 317 */ 318 if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz)) 319 return -EBADMSG; 320 321 /* Decrypt the payload */ 322 return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len); 323 } 324 325 static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type, 326 void *payload, size_t sz) 327 { 328 struct snp_guest_msg *req = &snp_dev->secret_request; 329 struct snp_guest_msg_hdr *hdr = &req->hdr; 330 331 memset(req, 0, sizeof(*req)); 332 333 hdr->algo = SNP_AEAD_AES_256_GCM; 334 hdr->hdr_version = MSG_HDR_VER; 335 hdr->hdr_sz = sizeof(*hdr); 336 hdr->msg_type = type; 337 hdr->msg_version = version; 338 hdr->msg_seqno = seqno; 339 hdr->msg_vmpck = vmpck_id; 340 hdr->msg_sz = sz; 341 342 /* Verify the sequence number is non-zero */ 343 if (!hdr->msg_seqno) 344 return -ENOSR; 345 346 dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n", 347 hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz); 348 349 return __enc_payload(snp_dev, req, payload, sz); 350 } 351 352 static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, 353 struct snp_guest_request_ioctl *rio) 354 { 355 unsigned long req_start = jiffies; 356 unsigned int override_npages = 0; 357 u64 override_err = 0; 358 int rc; 359 360 retry_request: 361 /* 362 * Call firmware to process the request. In this function the encrypted 363 * message enters shared memory with the host. So after this call the 364 * sequence number must be incremented or the VMPCK must be deleted to 365 * prevent reuse of the IV. 366 */ 367 rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio); 368 switch (rc) { 369 case -ENOSPC: 370 /* 371 * If the extended guest request fails due to having too 372 * small of a certificate data buffer, retry the same 373 * guest request without the extended data request in 374 * order to increment the sequence number and thus avoid 375 * IV reuse. 376 */ 377 override_npages = snp_dev->input.data_npages; 378 exit_code = SVM_VMGEXIT_GUEST_REQUEST; 379 380 /* 381 * Override the error to inform callers the given extended 382 * request buffer size was too small and give the caller the 383 * required buffer size. 384 */ 385 override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN); 386 387 /* 388 * If this call to the firmware succeeds, the sequence number can 389 * be incremented allowing for continued use of the VMPCK. If 390 * there is an error reflected in the return value, this value 391 * is checked further down and the result will be the deletion 392 * of the VMPCK and the error code being propagated back to the 393 * user as an ioctl() return code. 394 */ 395 goto retry_request; 396 397 /* 398 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been 399 * throttled. Retry in the driver to avoid returning and reusing the 400 * message sequence number on a different message. 401 */ 402 case -EAGAIN: 403 if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) { 404 rc = -ETIMEDOUT; 405 break; 406 } 407 schedule_timeout_killable(SNP_REQ_RETRY_DELAY); 408 goto retry_request; 409 } 410 411 /* 412 * Increment the message sequence number. There is no harm in doing 413 * this now because decryption uses the value stored in the response 414 * structure and any failure will wipe the VMPCK, preventing further 415 * use anyway. 416 */ 417 snp_inc_msg_seqno(snp_dev); 418 419 if (override_err) { 420 rio->exitinfo2 = override_err; 421 422 /* 423 * If an extended guest request was issued and the supplied certificate 424 * buffer was not large enough, a standard guest request was issued to 425 * prevent IV reuse. If the standard request was successful, return -EIO 426 * back to the caller as would have originally been returned. 427 */ 428 if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN)) 429 rc = -EIO; 430 } 431 432 if (override_npages) 433 snp_dev->input.data_npages = override_npages; 434 435 return rc; 436 } 437 438 static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, 439 struct snp_guest_request_ioctl *rio, u8 type, 440 void *req_buf, size_t req_sz, void *resp_buf, 441 u32 resp_sz) 442 { 443 u64 seqno; 444 int rc; 445 446 /* Get message sequence and verify that its a non-zero */ 447 seqno = snp_get_msg_seqno(snp_dev); 448 if (!seqno) 449 return -EIO; 450 451 /* Clear shared memory's response for the host to populate. */ 452 memset(snp_dev->response, 0, sizeof(struct snp_guest_msg)); 453 454 /* Encrypt the userspace provided payload in snp_dev->secret_request. */ 455 rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz); 456 if (rc) 457 return rc; 458 459 /* 460 * Write the fully encrypted request to the shared unencrypted 461 * request page. 462 */ 463 memcpy(snp_dev->request, &snp_dev->secret_request, 464 sizeof(snp_dev->secret_request)); 465 466 rc = __handle_guest_request(snp_dev, exit_code, rio); 467 if (rc) { 468 if (rc == -EIO && 469 rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN)) 470 return rc; 471 472 dev_alert(snp_dev->dev, 473 "Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n", 474 rc, rio->exitinfo2); 475 476 snp_disable_vmpck(snp_dev); 477 return rc; 478 } 479 480 rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz); 481 if (rc) { 482 dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc); 483 snp_disable_vmpck(snp_dev); 484 return rc; 485 } 486 487 return 0; 488 } 489 490 struct snp_req_resp { 491 sockptr_t req_data; 492 sockptr_t resp_data; 493 }; 494 495 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 496 { 497 struct snp_guest_crypto *crypto = snp_dev->crypto; 498 struct snp_report_req *req = &snp_dev->req.report; 499 struct snp_report_resp *resp; 500 int rc, resp_len; 501 502 lockdep_assert_held(&snp_cmd_mutex); 503 504 if (!arg->req_data || !arg->resp_data) 505 return -EINVAL; 506 507 if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req))) 508 return -EFAULT; 509 510 /* 511 * The intermediate response buffer is used while decrypting the 512 * response payload. Make sure that it has enough space to cover the 513 * authtag. 514 */ 515 resp_len = sizeof(resp->data) + crypto->a_len; 516 resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 517 if (!resp) 518 return -ENOMEM; 519 520 rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, 521 SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data, 522 resp_len); 523 if (rc) 524 goto e_free; 525 526 if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp))) 527 rc = -EFAULT; 528 529 e_free: 530 kfree(resp); 531 return rc; 532 } 533 534 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 535 { 536 struct snp_derived_key_req *req = &snp_dev->req.derived_key; 537 struct snp_guest_crypto *crypto = snp_dev->crypto; 538 struct snp_derived_key_resp resp = {0}; 539 int rc, resp_len; 540 /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ 541 u8 buf[64 + 16]; 542 543 lockdep_assert_held(&snp_cmd_mutex); 544 545 if (!arg->req_data || !arg->resp_data) 546 return -EINVAL; 547 548 /* 549 * The intermediate response buffer is used while decrypting the 550 * response payload. Make sure that it has enough space to cover the 551 * authtag. 552 */ 553 resp_len = sizeof(resp.data) + crypto->a_len; 554 if (sizeof(buf) < resp_len) 555 return -ENOMEM; 556 557 if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req))) 558 return -EFAULT; 559 560 rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, 561 SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len); 562 if (rc) 563 return rc; 564 565 memcpy(resp.data, buf, sizeof(resp.data)); 566 if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp))) 567 rc = -EFAULT; 568 569 /* The response buffer contains the sensitive data, explicitly clear it. */ 570 memzero_explicit(buf, sizeof(buf)); 571 memzero_explicit(&resp, sizeof(resp)); 572 return rc; 573 } 574 575 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg, 576 struct snp_req_resp *io) 577 578 { 579 struct snp_ext_report_req *req = &snp_dev->req.ext_report; 580 struct snp_guest_crypto *crypto = snp_dev->crypto; 581 struct snp_report_resp *resp; 582 int ret, npages = 0, resp_len; 583 sockptr_t certs_address; 584 585 lockdep_assert_held(&snp_cmd_mutex); 586 587 if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data)) 588 return -EINVAL; 589 590 if (copy_from_sockptr(req, io->req_data, sizeof(*req))) 591 return -EFAULT; 592 593 /* caller does not want certificate data */ 594 if (!req->certs_len || !req->certs_address) 595 goto cmd; 596 597 if (req->certs_len > SEV_FW_BLOB_MAX_SIZE || 598 !IS_ALIGNED(req->certs_len, PAGE_SIZE)) 599 return -EINVAL; 600 601 if (sockptr_is_kernel(io->resp_data)) { 602 certs_address = KERNEL_SOCKPTR((void *)req->certs_address); 603 } else { 604 certs_address = USER_SOCKPTR((void __user *)req->certs_address); 605 if (!access_ok(certs_address.user, req->certs_len)) 606 return -EFAULT; 607 } 608 609 /* 610 * Initialize the intermediate buffer with all zeros. This buffer 611 * is used in the guest request message to get the certs blob from 612 * the host. If host does not supply any certs in it, then copy 613 * zeros to indicate that certificate data was not provided. 614 */ 615 memset(snp_dev->certs_data, 0, req->certs_len); 616 npages = req->certs_len >> PAGE_SHIFT; 617 cmd: 618 /* 619 * The intermediate response buffer is used while decrypting the 620 * response payload. Make sure that it has enough space to cover the 621 * authtag. 622 */ 623 resp_len = sizeof(resp->data) + crypto->a_len; 624 resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 625 if (!resp) 626 return -ENOMEM; 627 628 snp_dev->input.data_npages = npages; 629 ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg, 630 SNP_MSG_REPORT_REQ, &req->data, 631 sizeof(req->data), resp->data, resp_len); 632 633 /* If certs length is invalid then copy the returned length */ 634 if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) { 635 req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT; 636 637 if (copy_to_sockptr(io->req_data, req, sizeof(*req))) 638 ret = -EFAULT; 639 } 640 641 if (ret) 642 goto e_free; 643 644 if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, req->certs_len)) { 645 ret = -EFAULT; 646 goto e_free; 647 } 648 649 if (copy_to_sockptr(io->resp_data, resp, sizeof(*resp))) 650 ret = -EFAULT; 651 652 e_free: 653 kfree(resp); 654 return ret; 655 } 656 657 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) 658 { 659 struct snp_guest_dev *snp_dev = to_snp_dev(file); 660 void __user *argp = (void __user *)arg; 661 struct snp_guest_request_ioctl input; 662 struct snp_req_resp io; 663 int ret = -ENOTTY; 664 665 if (copy_from_user(&input, argp, sizeof(input))) 666 return -EFAULT; 667 668 input.exitinfo2 = 0xff; 669 670 /* Message version must be non-zero */ 671 if (!input.msg_version) 672 return -EINVAL; 673 674 mutex_lock(&snp_cmd_mutex); 675 676 /* Check if the VMPCK is not empty */ 677 if (is_vmpck_empty(snp_dev)) { 678 dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n"); 679 mutex_unlock(&snp_cmd_mutex); 680 return -ENOTTY; 681 } 682 683 switch (ioctl) { 684 case SNP_GET_REPORT: 685 ret = get_report(snp_dev, &input); 686 break; 687 case SNP_GET_DERIVED_KEY: 688 ret = get_derived_key(snp_dev, &input); 689 break; 690 case SNP_GET_EXT_REPORT: 691 /* 692 * As get_ext_report() may be called from the ioctl() path and a 693 * kernel internal path (configfs-tsm), decorate the passed 694 * buffers as user pointers. 695 */ 696 io.req_data = USER_SOCKPTR((void __user *)input.req_data); 697 io.resp_data = USER_SOCKPTR((void __user *)input.resp_data); 698 ret = get_ext_report(snp_dev, &input, &io); 699 break; 700 default: 701 break; 702 } 703 704 mutex_unlock(&snp_cmd_mutex); 705 706 if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input))) 707 return -EFAULT; 708 709 return ret; 710 } 711 712 static void free_shared_pages(void *buf, size_t sz) 713 { 714 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; 715 int ret; 716 717 if (!buf) 718 return; 719 720 ret = set_memory_encrypted((unsigned long)buf, npages); 721 if (ret) { 722 WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); 723 return; 724 } 725 726 __free_pages(virt_to_page(buf), get_order(sz)); 727 } 728 729 static void *alloc_shared_pages(struct device *dev, size_t sz) 730 { 731 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; 732 struct page *page; 733 int ret; 734 735 page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz)); 736 if (!page) 737 return NULL; 738 739 ret = set_memory_decrypted((unsigned long)page_address(page), npages); 740 if (ret) { 741 dev_err(dev, "failed to mark page shared, ret=%d\n", ret); 742 __free_pages(page, get_order(sz)); 743 return NULL; 744 } 745 746 return page_address(page); 747 } 748 749 static const struct file_operations snp_guest_fops = { 750 .owner = THIS_MODULE, 751 .unlocked_ioctl = snp_guest_ioctl, 752 }; 753 754 static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno) 755 { 756 u8 *key = NULL; 757 758 switch (id) { 759 case 0: 760 *seqno = &secrets->os_area.msg_seqno_0; 761 key = secrets->vmpck0; 762 break; 763 case 1: 764 *seqno = &secrets->os_area.msg_seqno_1; 765 key = secrets->vmpck1; 766 break; 767 case 2: 768 *seqno = &secrets->os_area.msg_seqno_2; 769 key = secrets->vmpck2; 770 break; 771 case 3: 772 *seqno = &secrets->os_area.msg_seqno_3; 773 key = secrets->vmpck3; 774 break; 775 default: 776 break; 777 } 778 779 return key; 780 } 781 782 struct snp_msg_report_resp_hdr { 783 u32 status; 784 u32 report_size; 785 u8 rsvd[24]; 786 }; 787 788 struct snp_msg_cert_entry { 789 guid_t guid; 790 u32 offset; 791 u32 length; 792 }; 793 794 static int sev_svsm_report_new(struct tsm_report *report, void *data) 795 { 796 unsigned int rep_len, man_len, certs_len; 797 struct tsm_desc *desc = &report->desc; 798 struct svsm_attest_call ac = {}; 799 unsigned int retry_count; 800 void *rep, *man, *certs; 801 struct svsm_call call; 802 unsigned int size; 803 bool try_again; 804 void *buffer; 805 u64 call_id; 806 int ret; 807 808 /* 809 * Allocate pages for the request: 810 * - Report blob (4K) 811 * - Manifest blob (4K) 812 * - Certificate blob (16K) 813 * 814 * Above addresses must be 4K aligned 815 */ 816 rep_len = SZ_4K; 817 man_len = SZ_4K; 818 certs_len = SEV_FW_BLOB_MAX_SIZE; 819 820 guard(mutex)(&snp_cmd_mutex); 821 822 if (guid_is_null(&desc->service_guid)) { 823 call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES); 824 } else { 825 export_guid(ac.service_guid, &desc->service_guid); 826 ac.service_manifest_ver = desc->service_manifest_version; 827 828 call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE); 829 } 830 831 retry_count = 0; 832 833 retry: 834 memset(&call, 0, sizeof(call)); 835 836 size = rep_len + man_len + certs_len; 837 buffer = alloc_pages_exact(size, __GFP_ZERO); 838 if (!buffer) 839 return -ENOMEM; 840 841 rep = buffer; 842 ac.report_buf.pa = __pa(rep); 843 ac.report_buf.len = rep_len; 844 845 man = rep + rep_len; 846 ac.manifest_buf.pa = __pa(man); 847 ac.manifest_buf.len = man_len; 848 849 certs = man + man_len; 850 ac.certificates_buf.pa = __pa(certs); 851 ac.certificates_buf.len = certs_len; 852 853 ac.nonce.pa = __pa(desc->inblob); 854 ac.nonce.len = desc->inblob_len; 855 856 ret = snp_issue_svsm_attest_req(call_id, &call, &ac); 857 if (ret) { 858 free_pages_exact(buffer, size); 859 860 switch (call.rax_out) { 861 case SVSM_ERR_INVALID_PARAMETER: 862 try_again = false; 863 864 if (ac.report_buf.len > rep_len) { 865 rep_len = PAGE_ALIGN(ac.report_buf.len); 866 try_again = true; 867 } 868 869 if (ac.manifest_buf.len > man_len) { 870 man_len = PAGE_ALIGN(ac.manifest_buf.len); 871 try_again = true; 872 } 873 874 if (ac.certificates_buf.len > certs_len) { 875 certs_len = PAGE_ALIGN(ac.certificates_buf.len); 876 try_again = true; 877 } 878 879 /* If one of the buffers wasn't large enough, retry the request */ 880 if (try_again && retry_count < SVSM_MAX_RETRIES) { 881 retry_count++; 882 goto retry; 883 } 884 885 return -EINVAL; 886 default: 887 pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n", 888 ret, call.rax_out); 889 return -EINVAL; 890 } 891 } 892 893 /* 894 * Allocate all the blob memory buffers at once so that the cleanup is 895 * done for errors that occur after the first allocation (i.e. before 896 * using no_free_ptr()). 897 */ 898 rep_len = ac.report_buf.len; 899 void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL); 900 901 man_len = ac.manifest_buf.len; 902 void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL); 903 904 certs_len = ac.certificates_buf.len; 905 void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL; 906 907 if (!rbuf || !mbuf || (certs_len && !cbuf)) { 908 free_pages_exact(buffer, size); 909 return -ENOMEM; 910 } 911 912 memcpy(rbuf, rep, rep_len); 913 report->outblob = no_free_ptr(rbuf); 914 report->outblob_len = rep_len; 915 916 memcpy(mbuf, man, man_len); 917 report->manifestblob = no_free_ptr(mbuf); 918 report->manifestblob_len = man_len; 919 920 if (certs_len) { 921 memcpy(cbuf, certs, certs_len); 922 report->auxblob = no_free_ptr(cbuf); 923 report->auxblob_len = certs_len; 924 } 925 926 free_pages_exact(buffer, size); 927 928 return 0; 929 } 930 931 static int sev_report_new(struct tsm_report *report, void *data) 932 { 933 struct snp_msg_cert_entry *cert_table; 934 struct tsm_desc *desc = &report->desc; 935 struct snp_guest_dev *snp_dev = data; 936 struct snp_msg_report_resp_hdr hdr; 937 const u32 report_size = SZ_4K; 938 const u32 ext_size = SEV_FW_BLOB_MAX_SIZE; 939 u32 certs_size, i, size = report_size + ext_size; 940 int ret; 941 942 if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE) 943 return -EINVAL; 944 945 if (desc->service_provider) { 946 if (strcmp(desc->service_provider, "svsm")) 947 return -EINVAL; 948 949 return sev_svsm_report_new(report, data); 950 } 951 952 void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL); 953 if (!buf) 954 return -ENOMEM; 955 956 guard(mutex)(&snp_cmd_mutex); 957 958 /* Check if the VMPCK is not empty */ 959 if (is_vmpck_empty(snp_dev)) { 960 dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n"); 961 return -ENOTTY; 962 } 963 964 cert_table = buf + report_size; 965 struct snp_ext_report_req ext_req = { 966 .data = { .vmpl = desc->privlevel }, 967 .certs_address = (__u64)cert_table, 968 .certs_len = ext_size, 969 }; 970 memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len); 971 972 struct snp_guest_request_ioctl input = { 973 .msg_version = 1, 974 .req_data = (__u64)&ext_req, 975 .resp_data = (__u64)buf, 976 .exitinfo2 = 0xff, 977 }; 978 struct snp_req_resp io = { 979 .req_data = KERNEL_SOCKPTR(&ext_req), 980 .resp_data = KERNEL_SOCKPTR(buf), 981 }; 982 983 ret = get_ext_report(snp_dev, &input, &io); 984 if (ret) 985 return ret; 986 987 memcpy(&hdr, buf, sizeof(hdr)); 988 if (hdr.status == SEV_RET_INVALID_PARAM) 989 return -EINVAL; 990 if (hdr.status == SEV_RET_INVALID_KEY) 991 return -EINVAL; 992 if (hdr.status) 993 return -ENXIO; 994 if ((hdr.report_size + sizeof(hdr)) > report_size) 995 return -ENOMEM; 996 997 void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL); 998 if (!rbuf) 999 return -ENOMEM; 1000 1001 memcpy(rbuf, buf + sizeof(hdr), hdr.report_size); 1002 report->outblob = no_free_ptr(rbuf); 1003 report->outblob_len = hdr.report_size; 1004 1005 certs_size = 0; 1006 for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) { 1007 struct snp_msg_cert_entry *ent = &cert_table[i]; 1008 1009 if (guid_is_null(&ent->guid) && !ent->offset && !ent->length) 1010 break; 1011 certs_size = max(certs_size, ent->offset + ent->length); 1012 } 1013 1014 /* Suspicious that the response populated entries without populating size */ 1015 if (!certs_size && i) 1016 dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n"); 1017 1018 /* No certs to report */ 1019 if (!certs_size) 1020 return 0; 1021 1022 /* Suspicious that the certificate blob size contract was violated 1023 */ 1024 if (certs_size > ext_size) { 1025 dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n"); 1026 certs_size = ext_size; 1027 } 1028 1029 void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL); 1030 if (!cbuf) 1031 return -ENOMEM; 1032 1033 memcpy(cbuf, cert_table, certs_size); 1034 report->auxblob = no_free_ptr(cbuf); 1035 report->auxblob_len = certs_size; 1036 1037 return 0; 1038 } 1039 1040 static bool sev_report_attr_visible(int n) 1041 { 1042 switch (n) { 1043 case TSM_REPORT_GENERATION: 1044 case TSM_REPORT_PROVIDER: 1045 case TSM_REPORT_PRIVLEVEL: 1046 case TSM_REPORT_PRIVLEVEL_FLOOR: 1047 return true; 1048 case TSM_REPORT_SERVICE_PROVIDER: 1049 case TSM_REPORT_SERVICE_GUID: 1050 case TSM_REPORT_SERVICE_MANIFEST_VER: 1051 return snp_vmpl; 1052 } 1053 1054 return false; 1055 } 1056 1057 static bool sev_report_bin_attr_visible(int n) 1058 { 1059 switch (n) { 1060 case TSM_REPORT_INBLOB: 1061 case TSM_REPORT_OUTBLOB: 1062 case TSM_REPORT_AUXBLOB: 1063 return true; 1064 case TSM_REPORT_MANIFESTBLOB: 1065 return snp_vmpl; 1066 } 1067 1068 return false; 1069 } 1070 1071 static struct tsm_ops sev_tsm_ops = { 1072 .name = KBUILD_MODNAME, 1073 .report_new = sev_report_new, 1074 .report_attr_visible = sev_report_attr_visible, 1075 .report_bin_attr_visible = sev_report_bin_attr_visible, 1076 }; 1077 1078 static void unregister_sev_tsm(void *data) 1079 { 1080 tsm_unregister(&sev_tsm_ops); 1081 } 1082 1083 static int __init sev_guest_probe(struct platform_device *pdev) 1084 { 1085 struct sev_guest_platform_data *data; 1086 struct snp_secrets_page *secrets; 1087 struct device *dev = &pdev->dev; 1088 struct snp_guest_dev *snp_dev; 1089 struct miscdevice *misc; 1090 void __iomem *mapping; 1091 int ret; 1092 1093 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 1094 return -ENODEV; 1095 1096 if (!dev->platform_data) 1097 return -ENODEV; 1098 1099 data = (struct sev_guest_platform_data *)dev->platform_data; 1100 mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); 1101 if (!mapping) 1102 return -ENODEV; 1103 1104 secrets = (__force void *)mapping; 1105 1106 ret = -ENOMEM; 1107 snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL); 1108 if (!snp_dev) 1109 goto e_unmap; 1110 1111 /* Adjust the default VMPCK key based on the executing VMPL level */ 1112 if (vmpck_id == -1) 1113 vmpck_id = snp_vmpl; 1114 1115 ret = -EINVAL; 1116 snp_dev->vmpck = get_vmpck(vmpck_id, secrets, &snp_dev->os_area_msg_seqno); 1117 if (!snp_dev->vmpck) { 1118 dev_err(dev, "invalid vmpck id %d\n", vmpck_id); 1119 goto e_unmap; 1120 } 1121 1122 /* Verify that VMPCK is not zero. */ 1123 if (is_vmpck_empty(snp_dev)) { 1124 dev_err(dev, "vmpck id %d is null\n", vmpck_id); 1125 goto e_unmap; 1126 } 1127 1128 platform_set_drvdata(pdev, snp_dev); 1129 snp_dev->dev = dev; 1130 snp_dev->secrets = secrets; 1131 1132 /* Allocate the shared page used for the request and response message. */ 1133 snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); 1134 if (!snp_dev->request) 1135 goto e_unmap; 1136 1137 snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); 1138 if (!snp_dev->response) 1139 goto e_free_request; 1140 1141 snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE); 1142 if (!snp_dev->certs_data) 1143 goto e_free_response; 1144 1145 ret = -EIO; 1146 snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN); 1147 if (!snp_dev->crypto) 1148 goto e_free_cert_data; 1149 1150 misc = &snp_dev->misc; 1151 misc->minor = MISC_DYNAMIC_MINOR; 1152 misc->name = DEVICE_NAME; 1153 misc->fops = &snp_guest_fops; 1154 1155 /* initial the input address for guest request */ 1156 snp_dev->input.req_gpa = __pa(snp_dev->request); 1157 snp_dev->input.resp_gpa = __pa(snp_dev->response); 1158 snp_dev->input.data_gpa = __pa(snp_dev->certs_data); 1159 1160 /* Set the privlevel_floor attribute based on the vmpck_id */ 1161 sev_tsm_ops.privlevel_floor = vmpck_id; 1162 1163 ret = tsm_register(&sev_tsm_ops, snp_dev); 1164 if (ret) 1165 goto e_free_cert_data; 1166 1167 ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL); 1168 if (ret) 1169 goto e_free_cert_data; 1170 1171 ret = misc_register(misc); 1172 if (ret) 1173 goto e_free_cert_data; 1174 1175 dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id); 1176 return 0; 1177 1178 e_free_cert_data: 1179 free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); 1180 e_free_response: 1181 free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); 1182 e_free_request: 1183 free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); 1184 e_unmap: 1185 iounmap(mapping); 1186 return ret; 1187 } 1188 1189 static void __exit sev_guest_remove(struct platform_device *pdev) 1190 { 1191 struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev); 1192 1193 free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); 1194 free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); 1195 free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); 1196 deinit_crypto(snp_dev->crypto); 1197 misc_deregister(&snp_dev->misc); 1198 } 1199 1200 /* 1201 * This driver is meant to be a common SEV guest interface driver and to 1202 * support any SEV guest API. As such, even though it has been introduced 1203 * with the SEV-SNP support, it is named "sev-guest". 1204 * 1205 * sev_guest_remove() lives in .exit.text. For drivers registered via 1206 * module_platform_driver_probe() this is ok because they cannot get unbound 1207 * at runtime. So mark the driver struct with __refdata to prevent modpost 1208 * triggering a section mismatch warning. 1209 */ 1210 static struct platform_driver sev_guest_driver __refdata = { 1211 .remove_new = __exit_p(sev_guest_remove), 1212 .driver = { 1213 .name = "sev-guest", 1214 }, 1215 }; 1216 1217 module_platform_driver_probe(sev_guest_driver, sev_guest_probe); 1218 1219 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>"); 1220 MODULE_LICENSE("GPL"); 1221 MODULE_VERSION("1.0.0"); 1222 MODULE_DESCRIPTION("AMD SEV Guest Driver"); 1223 MODULE_ALIAS("platform:sev-guest"); 1224