1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4 *
5 * Copyright (C) 2021-2024 Advanced Micro Devices, Inc.
6 *
7 * Author: Brijesh Singh <brijesh.singh@amd.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <linux/tsm.h>
20 #include <crypto/gcm.h>
21 #include <linux/psp-sev.h>
22 #include <linux/sockptr.h>
23 #include <linux/cleanup.h>
24 #include <linux/uuid.h>
25 #include <linux/configfs.h>
26 #include <uapi/linux/sev-guest.h>
27 #include <uapi/linux/psp-sev.h>
28
29 #include <asm/svm.h>
30 #include <asm/sev.h>
31
32 #define DEVICE_NAME "sev-guest"
33
34 #define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
35 #define SNP_REQ_RETRY_DELAY (2*HZ)
36
37 #define SVSM_MAX_RETRIES 3
38
39 struct snp_guest_dev {
40 struct device *dev;
41 struct miscdevice misc;
42
43 struct snp_msg_desc *msg_desc;
44
45 union {
46 struct snp_report_req report;
47 struct snp_derived_key_req derived_key;
48 struct snp_ext_report_req ext_report;
49 } req;
50 };
51
52 /*
53 * The VMPCK ID represents the key used by the SNP guest to communicate with the
54 * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key
55 * used will be the key associated with the VMPL at which the guest is running.
56 * Should the default key be wiped (see snp_disable_vmpck()), this parameter
57 * allows for using one of the remaining VMPCKs.
58 */
59 static int vmpck_id = -1;
60 module_param(vmpck_id, int, 0444);
61 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
62
63 /* Mutex to serialize the shared buffer access and command handling. */
64 static DEFINE_MUTEX(snp_cmd_mutex);
65
is_vmpck_empty(struct snp_msg_desc * mdesc)66 static bool is_vmpck_empty(struct snp_msg_desc *mdesc)
67 {
68 char zero_key[VMPCK_KEY_LEN] = {0};
69
70 if (mdesc->vmpck)
71 return !memcmp(mdesc->vmpck, zero_key, VMPCK_KEY_LEN);
72
73 return true;
74 }
75
76 /*
77 * If an error is received from the host or AMD Secure Processor (ASP) there
78 * are two options. Either retry the exact same encrypted request or discontinue
79 * using the VMPCK.
80 *
81 * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
82 * encrypt the requests. The IV for this scheme is the sequence number. GCM
83 * cannot tolerate IV reuse.
84 *
85 * The ASP FW v1.51 only increments the sequence numbers on a successful
86 * guest<->ASP back and forth and only accepts messages at its exact sequence
87 * number.
88 *
89 * So if the sequence number were to be reused the encryption scheme is
90 * vulnerable. If the sequence number were incremented for a fresh IV the ASP
91 * will reject the request.
92 */
snp_disable_vmpck(struct snp_msg_desc * mdesc)93 static void snp_disable_vmpck(struct snp_msg_desc *mdesc)
94 {
95 pr_alert("Disabling VMPCK%d communication key to prevent IV reuse.\n",
96 vmpck_id);
97 memzero_explicit(mdesc->vmpck, VMPCK_KEY_LEN);
98 mdesc->vmpck = NULL;
99 }
100
__snp_get_msg_seqno(struct snp_msg_desc * mdesc)101 static inline u64 __snp_get_msg_seqno(struct snp_msg_desc *mdesc)
102 {
103 u64 count;
104
105 lockdep_assert_held(&snp_cmd_mutex);
106
107 /* Read the current message sequence counter from secrets pages */
108 count = *mdesc->os_area_msg_seqno;
109
110 return count + 1;
111 }
112
113 /* Return a non-zero on success */
snp_get_msg_seqno(struct snp_msg_desc * mdesc)114 static u64 snp_get_msg_seqno(struct snp_msg_desc *mdesc)
115 {
116 u64 count = __snp_get_msg_seqno(mdesc);
117
118 /*
119 * The message sequence counter for the SNP guest request is a 64-bit
120 * value but the version 2 of GHCB specification defines a 32-bit storage
121 * for it. If the counter exceeds the 32-bit value then return zero.
122 * The caller should check the return value, but if the caller happens to
123 * not check the value and use it, then the firmware treats zero as an
124 * invalid number and will fail the message request.
125 */
126 if (count >= UINT_MAX) {
127 pr_err("request message sequence counter overflow\n");
128 return 0;
129 }
130
131 return count;
132 }
133
snp_inc_msg_seqno(struct snp_msg_desc * mdesc)134 static void snp_inc_msg_seqno(struct snp_msg_desc *mdesc)
135 {
136 /*
137 * The counter is also incremented by the PSP, so increment it by 2
138 * and save in secrets page.
139 */
140 *mdesc->os_area_msg_seqno += 2;
141 }
142
to_snp_dev(struct file * file)143 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
144 {
145 struct miscdevice *dev = file->private_data;
146
147 return container_of(dev, struct snp_guest_dev, misc);
148 }
149
snp_init_crypto(u8 * key,size_t keylen)150 static struct aesgcm_ctx *snp_init_crypto(u8 *key, size_t keylen)
151 {
152 struct aesgcm_ctx *ctx;
153
154 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
155 if (!ctx)
156 return NULL;
157
158 if (aesgcm_expandkey(ctx, key, keylen, AUTHTAG_LEN)) {
159 pr_err("Crypto context initialization failed\n");
160 kfree(ctx);
161 return NULL;
162 }
163
164 return ctx;
165 }
166
verify_and_dec_payload(struct snp_msg_desc * mdesc,struct snp_guest_req * req)167 static int verify_and_dec_payload(struct snp_msg_desc *mdesc, struct snp_guest_req *req)
168 {
169 struct snp_guest_msg *resp_msg = &mdesc->secret_response;
170 struct snp_guest_msg *req_msg = &mdesc->secret_request;
171 struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr;
172 struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr;
173 struct aesgcm_ctx *ctx = mdesc->ctx;
174 u8 iv[GCM_AES_IV_SIZE] = {};
175
176 pr_debug("response [seqno %lld type %d version %d sz %d]\n",
177 resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version,
178 resp_msg_hdr->msg_sz);
179
180 /* Copy response from shared memory to encrypted memory. */
181 memcpy(resp_msg, mdesc->response, sizeof(*resp_msg));
182
183 /* Verify that the sequence counter is incremented by 1 */
184 if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1)))
185 return -EBADMSG;
186
187 /* Verify response message type and version number. */
188 if (resp_msg_hdr->msg_type != (req_msg_hdr->msg_type + 1) ||
189 resp_msg_hdr->msg_version != req_msg_hdr->msg_version)
190 return -EBADMSG;
191
192 /*
193 * If the message size is greater than our buffer length then return
194 * an error.
195 */
196 if (unlikely((resp_msg_hdr->msg_sz + ctx->authsize) > req->resp_sz))
197 return -EBADMSG;
198
199 /* Decrypt the payload */
200 memcpy(iv, &resp_msg_hdr->msg_seqno, min(sizeof(iv), sizeof(resp_msg_hdr->msg_seqno)));
201 if (!aesgcm_decrypt(ctx, req->resp_buf, resp_msg->payload, resp_msg_hdr->msg_sz,
202 &resp_msg_hdr->algo, AAD_LEN, iv, resp_msg_hdr->authtag))
203 return -EBADMSG;
204
205 return 0;
206 }
207
enc_payload(struct snp_msg_desc * mdesc,u64 seqno,struct snp_guest_req * req)208 static int enc_payload(struct snp_msg_desc *mdesc, u64 seqno, struct snp_guest_req *req)
209 {
210 struct snp_guest_msg *msg = &mdesc->secret_request;
211 struct snp_guest_msg_hdr *hdr = &msg->hdr;
212 struct aesgcm_ctx *ctx = mdesc->ctx;
213 u8 iv[GCM_AES_IV_SIZE] = {};
214
215 memset(msg, 0, sizeof(*msg));
216
217 hdr->algo = SNP_AEAD_AES_256_GCM;
218 hdr->hdr_version = MSG_HDR_VER;
219 hdr->hdr_sz = sizeof(*hdr);
220 hdr->msg_type = req->msg_type;
221 hdr->msg_version = req->msg_version;
222 hdr->msg_seqno = seqno;
223 hdr->msg_vmpck = req->vmpck_id;
224 hdr->msg_sz = req->req_sz;
225
226 /* Verify the sequence number is non-zero */
227 if (!hdr->msg_seqno)
228 return -ENOSR;
229
230 pr_debug("request [seqno %lld type %d version %d sz %d]\n",
231 hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
232
233 if (WARN_ON((req->req_sz + ctx->authsize) > sizeof(msg->payload)))
234 return -EBADMSG;
235
236 memcpy(iv, &hdr->msg_seqno, min(sizeof(iv), sizeof(hdr->msg_seqno)));
237 aesgcm_encrypt(ctx, msg->payload, req->req_buf, req->req_sz, &hdr->algo,
238 AAD_LEN, iv, hdr->authtag);
239
240 return 0;
241 }
242
__handle_guest_request(struct snp_msg_desc * mdesc,struct snp_guest_req * req,struct snp_guest_request_ioctl * rio)243 static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
244 struct snp_guest_request_ioctl *rio)
245 {
246 unsigned long req_start = jiffies;
247 unsigned int override_npages = 0;
248 u64 override_err = 0;
249 int rc;
250
251 retry_request:
252 /*
253 * Call firmware to process the request. In this function the encrypted
254 * message enters shared memory with the host. So after this call the
255 * sequence number must be incremented or the VMPCK must be deleted to
256 * prevent reuse of the IV.
257 */
258 rc = snp_issue_guest_request(req, &mdesc->input, rio);
259 switch (rc) {
260 case -ENOSPC:
261 /*
262 * If the extended guest request fails due to having too
263 * small of a certificate data buffer, retry the same
264 * guest request without the extended data request in
265 * order to increment the sequence number and thus avoid
266 * IV reuse.
267 */
268 override_npages = mdesc->input.data_npages;
269 req->exit_code = SVM_VMGEXIT_GUEST_REQUEST;
270
271 /*
272 * Override the error to inform callers the given extended
273 * request buffer size was too small and give the caller the
274 * required buffer size.
275 */
276 override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
277
278 /*
279 * If this call to the firmware succeeds, the sequence number can
280 * be incremented allowing for continued use of the VMPCK. If
281 * there is an error reflected in the return value, this value
282 * is checked further down and the result will be the deletion
283 * of the VMPCK and the error code being propagated back to the
284 * user as an ioctl() return code.
285 */
286 goto retry_request;
287
288 /*
289 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
290 * throttled. Retry in the driver to avoid returning and reusing the
291 * message sequence number on a different message.
292 */
293 case -EAGAIN:
294 if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
295 rc = -ETIMEDOUT;
296 break;
297 }
298 schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
299 goto retry_request;
300 }
301
302 /*
303 * Increment the message sequence number. There is no harm in doing
304 * this now because decryption uses the value stored in the response
305 * structure and any failure will wipe the VMPCK, preventing further
306 * use anyway.
307 */
308 snp_inc_msg_seqno(mdesc);
309
310 if (override_err) {
311 rio->exitinfo2 = override_err;
312
313 /*
314 * If an extended guest request was issued and the supplied certificate
315 * buffer was not large enough, a standard guest request was issued to
316 * prevent IV reuse. If the standard request was successful, return -EIO
317 * back to the caller as would have originally been returned.
318 */
319 if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
320 rc = -EIO;
321 }
322
323 if (override_npages)
324 mdesc->input.data_npages = override_npages;
325
326 return rc;
327 }
328
snp_send_guest_request(struct snp_msg_desc * mdesc,struct snp_guest_req * req,struct snp_guest_request_ioctl * rio)329 static int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
330 struct snp_guest_request_ioctl *rio)
331 {
332 u64 seqno;
333 int rc;
334
335 guard(mutex)(&snp_cmd_mutex);
336
337 /* Check if the VMPCK is not empty */
338 if (is_vmpck_empty(mdesc)) {
339 pr_err_ratelimited("VMPCK is disabled\n");
340 return -ENOTTY;
341 }
342
343 /* Get message sequence and verify that its a non-zero */
344 seqno = snp_get_msg_seqno(mdesc);
345 if (!seqno)
346 return -EIO;
347
348 /* Clear shared memory's response for the host to populate. */
349 memset(mdesc->response, 0, sizeof(struct snp_guest_msg));
350
351 /* Encrypt the userspace provided payload in mdesc->secret_request. */
352 rc = enc_payload(mdesc, seqno, req);
353 if (rc)
354 return rc;
355
356 /*
357 * Write the fully encrypted request to the shared unencrypted
358 * request page.
359 */
360 memcpy(mdesc->request, &mdesc->secret_request,
361 sizeof(mdesc->secret_request));
362
363 rc = __handle_guest_request(mdesc, req, rio);
364 if (rc) {
365 if (rc == -EIO &&
366 rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
367 return rc;
368
369 pr_alert("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
370 rc, rio->exitinfo2);
371
372 snp_disable_vmpck(mdesc);
373 return rc;
374 }
375
376 rc = verify_and_dec_payload(mdesc, req);
377 if (rc) {
378 pr_alert("Detected unexpected decode failure from ASP. rc: %d\n", rc);
379 snp_disable_vmpck(mdesc);
380 return rc;
381 }
382
383 return 0;
384 }
385
386 struct snp_req_resp {
387 sockptr_t req_data;
388 sockptr_t resp_data;
389 };
390
get_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)391 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
392 {
393 struct snp_report_req *report_req = &snp_dev->req.report;
394 struct snp_msg_desc *mdesc = snp_dev->msg_desc;
395 struct snp_report_resp *report_resp;
396 struct snp_guest_req req = {};
397 int rc, resp_len;
398
399 if (!arg->req_data || !arg->resp_data)
400 return -EINVAL;
401
402 if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
403 return -EFAULT;
404
405 /*
406 * The intermediate response buffer is used while decrypting the
407 * response payload. Make sure that it has enough space to cover the
408 * authtag.
409 */
410 resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
411 report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
412 if (!report_resp)
413 return -ENOMEM;
414
415 req.msg_version = arg->msg_version;
416 req.msg_type = SNP_MSG_REPORT_REQ;
417 req.vmpck_id = vmpck_id;
418 req.req_buf = report_req;
419 req.req_sz = sizeof(*report_req);
420 req.resp_buf = report_resp->data;
421 req.resp_sz = resp_len;
422 req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
423
424 rc = snp_send_guest_request(mdesc, &req, arg);
425 if (rc)
426 goto e_free;
427
428 if (copy_to_user((void __user *)arg->resp_data, report_resp, sizeof(*report_resp)))
429 rc = -EFAULT;
430
431 e_free:
432 kfree(report_resp);
433 return rc;
434 }
435
get_derived_key(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)436 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
437 {
438 struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key;
439 struct snp_derived_key_resp derived_key_resp = {0};
440 struct snp_msg_desc *mdesc = snp_dev->msg_desc;
441 struct snp_guest_req req = {};
442 int rc, resp_len;
443 /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
444 u8 buf[64 + 16];
445
446 if (!arg->req_data || !arg->resp_data)
447 return -EINVAL;
448
449 /*
450 * The intermediate response buffer is used while decrypting the
451 * response payload. Make sure that it has enough space to cover the
452 * authtag.
453 */
454 resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize;
455 if (sizeof(buf) < resp_len)
456 return -ENOMEM;
457
458 if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
459 sizeof(*derived_key_req)))
460 return -EFAULT;
461
462 req.msg_version = arg->msg_version;
463 req.msg_type = SNP_MSG_KEY_REQ;
464 req.vmpck_id = vmpck_id;
465 req.req_buf = derived_key_req;
466 req.req_sz = sizeof(*derived_key_req);
467 req.resp_buf = buf;
468 req.resp_sz = resp_len;
469 req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
470
471 rc = snp_send_guest_request(mdesc, &req, arg);
472 if (rc)
473 return rc;
474
475 memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data));
476 if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp,
477 sizeof(derived_key_resp)))
478 rc = -EFAULT;
479
480 /* The response buffer contains the sensitive data, explicitly clear it. */
481 memzero_explicit(buf, sizeof(buf));
482 memzero_explicit(&derived_key_resp, sizeof(derived_key_resp));
483 return rc;
484 }
485
get_ext_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg,struct snp_req_resp * io)486 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg,
487 struct snp_req_resp *io)
488
489 {
490 struct snp_ext_report_req *report_req = &snp_dev->req.ext_report;
491 struct snp_msg_desc *mdesc = snp_dev->msg_desc;
492 struct snp_report_resp *report_resp;
493 struct snp_guest_req req = {};
494 int ret, npages = 0, resp_len;
495 sockptr_t certs_address;
496
497 if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
498 return -EINVAL;
499
500 if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
501 return -EFAULT;
502
503 /* caller does not want certificate data */
504 if (!report_req->certs_len || !report_req->certs_address)
505 goto cmd;
506
507 if (report_req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
508 !IS_ALIGNED(report_req->certs_len, PAGE_SIZE))
509 return -EINVAL;
510
511 if (sockptr_is_kernel(io->resp_data)) {
512 certs_address = KERNEL_SOCKPTR((void *)report_req->certs_address);
513 } else {
514 certs_address = USER_SOCKPTR((void __user *)report_req->certs_address);
515 if (!access_ok(certs_address.user, report_req->certs_len))
516 return -EFAULT;
517 }
518
519 /*
520 * Initialize the intermediate buffer with all zeros. This buffer
521 * is used in the guest request message to get the certs blob from
522 * the host. If host does not supply any certs in it, then copy
523 * zeros to indicate that certificate data was not provided.
524 */
525 memset(mdesc->certs_data, 0, report_req->certs_len);
526 npages = report_req->certs_len >> PAGE_SHIFT;
527 cmd:
528 /*
529 * The intermediate response buffer is used while decrypting the
530 * response payload. Make sure that it has enough space to cover the
531 * authtag.
532 */
533 resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
534 report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
535 if (!report_resp)
536 return -ENOMEM;
537
538 mdesc->input.data_npages = npages;
539
540 req.msg_version = arg->msg_version;
541 req.msg_type = SNP_MSG_REPORT_REQ;
542 req.vmpck_id = vmpck_id;
543 req.req_buf = &report_req->data;
544 req.req_sz = sizeof(report_req->data);
545 req.resp_buf = report_resp->data;
546 req.resp_sz = resp_len;
547 req.exit_code = SVM_VMGEXIT_EXT_GUEST_REQUEST;
548
549 ret = snp_send_guest_request(mdesc, &req, arg);
550
551 /* If certs length is invalid then copy the returned length */
552 if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
553 report_req->certs_len = mdesc->input.data_npages << PAGE_SHIFT;
554
555 if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
556 ret = -EFAULT;
557 }
558
559 if (ret)
560 goto e_free;
561
562 if (npages && copy_to_sockptr(certs_address, mdesc->certs_data, report_req->certs_len)) {
563 ret = -EFAULT;
564 goto e_free;
565 }
566
567 if (copy_to_sockptr(io->resp_data, report_resp, sizeof(*report_resp)))
568 ret = -EFAULT;
569
570 e_free:
571 kfree(report_resp);
572 return ret;
573 }
574
snp_guest_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)575 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
576 {
577 struct snp_guest_dev *snp_dev = to_snp_dev(file);
578 void __user *argp = (void __user *)arg;
579 struct snp_guest_request_ioctl input;
580 struct snp_req_resp io;
581 int ret = -ENOTTY;
582
583 if (copy_from_user(&input, argp, sizeof(input)))
584 return -EFAULT;
585
586 input.exitinfo2 = 0xff;
587
588 /* Message version must be non-zero */
589 if (!input.msg_version)
590 return -EINVAL;
591
592 switch (ioctl) {
593 case SNP_GET_REPORT:
594 ret = get_report(snp_dev, &input);
595 break;
596 case SNP_GET_DERIVED_KEY:
597 ret = get_derived_key(snp_dev, &input);
598 break;
599 case SNP_GET_EXT_REPORT:
600 /*
601 * As get_ext_report() may be called from the ioctl() path and a
602 * kernel internal path (configfs-tsm), decorate the passed
603 * buffers as user pointers.
604 */
605 io.req_data = USER_SOCKPTR((void __user *)input.req_data);
606 io.resp_data = USER_SOCKPTR((void __user *)input.resp_data);
607 ret = get_ext_report(snp_dev, &input, &io);
608 break;
609 default:
610 break;
611 }
612
613 if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
614 return -EFAULT;
615
616 return ret;
617 }
618
free_shared_pages(void * buf,size_t sz)619 static void free_shared_pages(void *buf, size_t sz)
620 {
621 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
622 int ret;
623
624 if (!buf)
625 return;
626
627 ret = set_memory_encrypted((unsigned long)buf, npages);
628 if (ret) {
629 WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
630 return;
631 }
632
633 __free_pages(virt_to_page(buf), get_order(sz));
634 }
635
alloc_shared_pages(struct device * dev,size_t sz)636 static void *alloc_shared_pages(struct device *dev, size_t sz)
637 {
638 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
639 struct page *page;
640 int ret;
641
642 page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
643 if (!page)
644 return NULL;
645
646 ret = set_memory_decrypted((unsigned long)page_address(page), npages);
647 if (ret) {
648 dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
649 __free_pages(page, get_order(sz));
650 return NULL;
651 }
652
653 return page_address(page);
654 }
655
656 static const struct file_operations snp_guest_fops = {
657 .owner = THIS_MODULE,
658 .unlocked_ioctl = snp_guest_ioctl,
659 };
660
get_vmpck(int id,struct snp_secrets_page * secrets,u32 ** seqno)661 static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno)
662 {
663 u8 *key = NULL;
664
665 switch (id) {
666 case 0:
667 *seqno = &secrets->os_area.msg_seqno_0;
668 key = secrets->vmpck0;
669 break;
670 case 1:
671 *seqno = &secrets->os_area.msg_seqno_1;
672 key = secrets->vmpck1;
673 break;
674 case 2:
675 *seqno = &secrets->os_area.msg_seqno_2;
676 key = secrets->vmpck2;
677 break;
678 case 3:
679 *seqno = &secrets->os_area.msg_seqno_3;
680 key = secrets->vmpck3;
681 break;
682 default:
683 break;
684 }
685
686 return key;
687 }
688
689 struct snp_msg_report_resp_hdr {
690 u32 status;
691 u32 report_size;
692 u8 rsvd[24];
693 };
694
695 struct snp_msg_cert_entry {
696 guid_t guid;
697 u32 offset;
698 u32 length;
699 };
700
sev_svsm_report_new(struct tsm_report * report,void * data)701 static int sev_svsm_report_new(struct tsm_report *report, void *data)
702 {
703 unsigned int rep_len, man_len, certs_len;
704 struct tsm_desc *desc = &report->desc;
705 struct svsm_attest_call ac = {};
706 unsigned int retry_count;
707 void *rep, *man, *certs;
708 struct svsm_call call;
709 unsigned int size;
710 bool try_again;
711 void *buffer;
712 u64 call_id;
713 int ret;
714
715 /*
716 * Allocate pages for the request:
717 * - Report blob (4K)
718 * - Manifest blob (4K)
719 * - Certificate blob (16K)
720 *
721 * Above addresses must be 4K aligned
722 */
723 rep_len = SZ_4K;
724 man_len = SZ_4K;
725 certs_len = SEV_FW_BLOB_MAX_SIZE;
726
727 if (guid_is_null(&desc->service_guid)) {
728 call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES);
729 } else {
730 export_guid(ac.service_guid, &desc->service_guid);
731 ac.service_manifest_ver = desc->service_manifest_version;
732
733 call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE);
734 }
735
736 retry_count = 0;
737
738 retry:
739 memset(&call, 0, sizeof(call));
740
741 size = rep_len + man_len + certs_len;
742 buffer = alloc_pages_exact(size, __GFP_ZERO);
743 if (!buffer)
744 return -ENOMEM;
745
746 rep = buffer;
747 ac.report_buf.pa = __pa(rep);
748 ac.report_buf.len = rep_len;
749
750 man = rep + rep_len;
751 ac.manifest_buf.pa = __pa(man);
752 ac.manifest_buf.len = man_len;
753
754 certs = man + man_len;
755 ac.certificates_buf.pa = __pa(certs);
756 ac.certificates_buf.len = certs_len;
757
758 ac.nonce.pa = __pa(desc->inblob);
759 ac.nonce.len = desc->inblob_len;
760
761 ret = snp_issue_svsm_attest_req(call_id, &call, &ac);
762 if (ret) {
763 free_pages_exact(buffer, size);
764
765 switch (call.rax_out) {
766 case SVSM_ERR_INVALID_PARAMETER:
767 try_again = false;
768
769 if (ac.report_buf.len > rep_len) {
770 rep_len = PAGE_ALIGN(ac.report_buf.len);
771 try_again = true;
772 }
773
774 if (ac.manifest_buf.len > man_len) {
775 man_len = PAGE_ALIGN(ac.manifest_buf.len);
776 try_again = true;
777 }
778
779 if (ac.certificates_buf.len > certs_len) {
780 certs_len = PAGE_ALIGN(ac.certificates_buf.len);
781 try_again = true;
782 }
783
784 /* If one of the buffers wasn't large enough, retry the request */
785 if (try_again && retry_count < SVSM_MAX_RETRIES) {
786 retry_count++;
787 goto retry;
788 }
789
790 return -EINVAL;
791 default:
792 pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n",
793 ret, call.rax_out);
794 return -EINVAL;
795 }
796 }
797
798 /*
799 * Allocate all the blob memory buffers at once so that the cleanup is
800 * done for errors that occur after the first allocation (i.e. before
801 * using no_free_ptr()).
802 */
803 rep_len = ac.report_buf.len;
804 void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL);
805
806 man_len = ac.manifest_buf.len;
807 void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL);
808
809 certs_len = ac.certificates_buf.len;
810 void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL;
811
812 if (!rbuf || !mbuf || (certs_len && !cbuf)) {
813 free_pages_exact(buffer, size);
814 return -ENOMEM;
815 }
816
817 memcpy(rbuf, rep, rep_len);
818 report->outblob = no_free_ptr(rbuf);
819 report->outblob_len = rep_len;
820
821 memcpy(mbuf, man, man_len);
822 report->manifestblob = no_free_ptr(mbuf);
823 report->manifestblob_len = man_len;
824
825 if (certs_len) {
826 memcpy(cbuf, certs, certs_len);
827 report->auxblob = no_free_ptr(cbuf);
828 report->auxblob_len = certs_len;
829 }
830
831 free_pages_exact(buffer, size);
832
833 return 0;
834 }
835
sev_report_new(struct tsm_report * report,void * data)836 static int sev_report_new(struct tsm_report *report, void *data)
837 {
838 struct snp_msg_cert_entry *cert_table;
839 struct tsm_desc *desc = &report->desc;
840 struct snp_guest_dev *snp_dev = data;
841 struct snp_msg_report_resp_hdr hdr;
842 const u32 report_size = SZ_4K;
843 const u32 ext_size = SEV_FW_BLOB_MAX_SIZE;
844 u32 certs_size, i, size = report_size + ext_size;
845 int ret;
846
847 if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
848 return -EINVAL;
849
850 if (desc->service_provider) {
851 if (strcmp(desc->service_provider, "svsm"))
852 return -EINVAL;
853
854 return sev_svsm_report_new(report, data);
855 }
856
857 void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
858 if (!buf)
859 return -ENOMEM;
860
861 cert_table = buf + report_size;
862 struct snp_ext_report_req ext_req = {
863 .data = { .vmpl = desc->privlevel },
864 .certs_address = (__u64)cert_table,
865 .certs_len = ext_size,
866 };
867 memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len);
868
869 struct snp_guest_request_ioctl input = {
870 .msg_version = 1,
871 .req_data = (__u64)&ext_req,
872 .resp_data = (__u64)buf,
873 .exitinfo2 = 0xff,
874 };
875 struct snp_req_resp io = {
876 .req_data = KERNEL_SOCKPTR(&ext_req),
877 .resp_data = KERNEL_SOCKPTR(buf),
878 };
879
880 ret = get_ext_report(snp_dev, &input, &io);
881 if (ret)
882 return ret;
883
884 memcpy(&hdr, buf, sizeof(hdr));
885 if (hdr.status == SEV_RET_INVALID_PARAM)
886 return -EINVAL;
887 if (hdr.status == SEV_RET_INVALID_KEY)
888 return -EINVAL;
889 if (hdr.status)
890 return -ENXIO;
891 if ((hdr.report_size + sizeof(hdr)) > report_size)
892 return -ENOMEM;
893
894 void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL);
895 if (!rbuf)
896 return -ENOMEM;
897
898 memcpy(rbuf, buf + sizeof(hdr), hdr.report_size);
899 report->outblob = no_free_ptr(rbuf);
900 report->outblob_len = hdr.report_size;
901
902 certs_size = 0;
903 for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) {
904 struct snp_msg_cert_entry *ent = &cert_table[i];
905
906 if (guid_is_null(&ent->guid) && !ent->offset && !ent->length)
907 break;
908 certs_size = max(certs_size, ent->offset + ent->length);
909 }
910
911 /* Suspicious that the response populated entries without populating size */
912 if (!certs_size && i)
913 dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n");
914
915 /* No certs to report */
916 if (!certs_size)
917 return 0;
918
919 /* Suspicious that the certificate blob size contract was violated
920 */
921 if (certs_size > ext_size) {
922 dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n");
923 certs_size = ext_size;
924 }
925
926 void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL);
927 if (!cbuf)
928 return -ENOMEM;
929
930 memcpy(cbuf, cert_table, certs_size);
931 report->auxblob = no_free_ptr(cbuf);
932 report->auxblob_len = certs_size;
933
934 return 0;
935 }
936
sev_report_attr_visible(int n)937 static bool sev_report_attr_visible(int n)
938 {
939 switch (n) {
940 case TSM_REPORT_GENERATION:
941 case TSM_REPORT_PROVIDER:
942 case TSM_REPORT_PRIVLEVEL:
943 case TSM_REPORT_PRIVLEVEL_FLOOR:
944 return true;
945 case TSM_REPORT_SERVICE_PROVIDER:
946 case TSM_REPORT_SERVICE_GUID:
947 case TSM_REPORT_SERVICE_MANIFEST_VER:
948 return snp_vmpl;
949 }
950
951 return false;
952 }
953
sev_report_bin_attr_visible(int n)954 static bool sev_report_bin_attr_visible(int n)
955 {
956 switch (n) {
957 case TSM_REPORT_INBLOB:
958 case TSM_REPORT_OUTBLOB:
959 case TSM_REPORT_AUXBLOB:
960 return true;
961 case TSM_REPORT_MANIFESTBLOB:
962 return snp_vmpl;
963 }
964
965 return false;
966 }
967
968 static struct tsm_ops sev_tsm_ops = {
969 .name = KBUILD_MODNAME,
970 .report_new = sev_report_new,
971 .report_attr_visible = sev_report_attr_visible,
972 .report_bin_attr_visible = sev_report_bin_attr_visible,
973 };
974
unregister_sev_tsm(void * data)975 static void unregister_sev_tsm(void *data)
976 {
977 tsm_unregister(&sev_tsm_ops);
978 }
979
sev_guest_probe(struct platform_device * pdev)980 static int __init sev_guest_probe(struct platform_device *pdev)
981 {
982 struct sev_guest_platform_data *data;
983 struct snp_secrets_page *secrets;
984 struct device *dev = &pdev->dev;
985 struct snp_guest_dev *snp_dev;
986 struct snp_msg_desc *mdesc;
987 struct miscdevice *misc;
988 void __iomem *mapping;
989 int ret;
990
991 BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
992
993 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
994 return -ENODEV;
995
996 if (!dev->platform_data)
997 return -ENODEV;
998
999 data = (struct sev_guest_platform_data *)dev->platform_data;
1000 mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
1001 if (!mapping)
1002 return -ENODEV;
1003
1004 secrets = (__force void *)mapping;
1005
1006 ret = -ENOMEM;
1007 snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
1008 if (!snp_dev)
1009 goto e_unmap;
1010
1011 mdesc = devm_kzalloc(&pdev->dev, sizeof(struct snp_msg_desc), GFP_KERNEL);
1012 if (!mdesc)
1013 goto e_unmap;
1014
1015 /* Adjust the default VMPCK key based on the executing VMPL level */
1016 if (vmpck_id == -1)
1017 vmpck_id = snp_vmpl;
1018
1019 ret = -EINVAL;
1020 mdesc->vmpck = get_vmpck(vmpck_id, secrets, &mdesc->os_area_msg_seqno);
1021 if (!mdesc->vmpck) {
1022 dev_err(dev, "Invalid VMPCK%d communication key\n", vmpck_id);
1023 goto e_unmap;
1024 }
1025
1026 /* Verify that VMPCK is not zero. */
1027 if (is_vmpck_empty(mdesc)) {
1028 dev_err(dev, "Empty VMPCK%d communication key\n", vmpck_id);
1029 goto e_unmap;
1030 }
1031
1032 platform_set_drvdata(pdev, snp_dev);
1033 snp_dev->dev = dev;
1034 mdesc->secrets = secrets;
1035
1036 /* Allocate the shared page used for the request and response message. */
1037 mdesc->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
1038 if (!mdesc->request)
1039 goto e_unmap;
1040
1041 mdesc->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
1042 if (!mdesc->response)
1043 goto e_free_request;
1044
1045 mdesc->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
1046 if (!mdesc->certs_data)
1047 goto e_free_response;
1048
1049 ret = -EIO;
1050 mdesc->ctx = snp_init_crypto(mdesc->vmpck, VMPCK_KEY_LEN);
1051 if (!mdesc->ctx)
1052 goto e_free_cert_data;
1053
1054 misc = &snp_dev->misc;
1055 misc->minor = MISC_DYNAMIC_MINOR;
1056 misc->name = DEVICE_NAME;
1057 misc->fops = &snp_guest_fops;
1058
1059 /* Initialize the input addresses for guest request */
1060 mdesc->input.req_gpa = __pa(mdesc->request);
1061 mdesc->input.resp_gpa = __pa(mdesc->response);
1062 mdesc->input.data_gpa = __pa(mdesc->certs_data);
1063
1064 /* Set the privlevel_floor attribute based on the vmpck_id */
1065 sev_tsm_ops.privlevel_floor = vmpck_id;
1066
1067 ret = tsm_register(&sev_tsm_ops, snp_dev);
1068 if (ret)
1069 goto e_free_cert_data;
1070
1071 ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
1072 if (ret)
1073 goto e_free_cert_data;
1074
1075 ret = misc_register(misc);
1076 if (ret)
1077 goto e_free_ctx;
1078
1079 snp_dev->msg_desc = mdesc;
1080 dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n", vmpck_id);
1081 return 0;
1082
1083 e_free_ctx:
1084 kfree(mdesc->ctx);
1085 e_free_cert_data:
1086 free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
1087 e_free_response:
1088 free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
1089 e_free_request:
1090 free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
1091 e_unmap:
1092 iounmap(mapping);
1093 return ret;
1094 }
1095
sev_guest_remove(struct platform_device * pdev)1096 static void __exit sev_guest_remove(struct platform_device *pdev)
1097 {
1098 struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
1099 struct snp_msg_desc *mdesc = snp_dev->msg_desc;
1100
1101 free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
1102 free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
1103 free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
1104 kfree(mdesc->ctx);
1105 misc_deregister(&snp_dev->misc);
1106 }
1107
1108 /*
1109 * This driver is meant to be a common SEV guest interface driver and to
1110 * support any SEV guest API. As such, even though it has been introduced
1111 * with the SEV-SNP support, it is named "sev-guest".
1112 *
1113 * sev_guest_remove() lives in .exit.text. For drivers registered via
1114 * module_platform_driver_probe() this is ok because they cannot get unbound
1115 * at runtime. So mark the driver struct with __refdata to prevent modpost
1116 * triggering a section mismatch warning.
1117 */
1118 static struct platform_driver sev_guest_driver __refdata = {
1119 .remove = __exit_p(sev_guest_remove),
1120 .driver = {
1121 .name = "sev-guest",
1122 },
1123 };
1124
1125 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
1126
1127 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
1128 MODULE_LICENSE("GPL");
1129 MODULE_VERSION("1.0.0");
1130 MODULE_DESCRIPTION("AMD SEV Guest Driver");
1131 MODULE_ALIAS("platform:sev-guest");
1132