xref: /linux/drivers/virt/coco/sev-guest/sev-guest.c (revision eb65f96cb332d577b490ab9c9f5f8de8c0316076)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4  *
5  * Copyright (C) 2021-2024 Advanced Micro Devices, Inc.
6  *
7  * Author: Brijesh Singh <brijesh.singh@amd.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <linux/tsm.h>
20 #include <crypto/aead.h>
21 #include <linux/scatterlist.h>
22 #include <linux/psp-sev.h>
23 #include <linux/sockptr.h>
24 #include <linux/cleanup.h>
25 #include <linux/uuid.h>
26 #include <uapi/linux/sev-guest.h>
27 #include <uapi/linux/psp-sev.h>
28 
29 #include <asm/svm.h>
30 #include <asm/sev.h>
31 
32 #include "sev-guest.h"
33 
34 #define DEVICE_NAME	"sev-guest"
35 #define AAD_LEN		48
36 #define MSG_HDR_VER	1
37 
38 #define SNP_REQ_MAX_RETRY_DURATION	(60*HZ)
39 #define SNP_REQ_RETRY_DELAY		(2*HZ)
40 
41 struct snp_guest_crypto {
42 	struct crypto_aead *tfm;
43 	u8 *iv, *authtag;
44 	int iv_len, a_len;
45 };
46 
47 struct snp_guest_dev {
48 	struct device *dev;
49 	struct miscdevice misc;
50 
51 	void *certs_data;
52 	struct snp_guest_crypto *crypto;
53 	/* request and response are in unencrypted memory */
54 	struct snp_guest_msg *request, *response;
55 
56 	/*
57 	 * Avoid information leakage by double-buffering shared messages
58 	 * in fields that are in regular encrypted memory.
59 	 */
60 	struct snp_guest_msg secret_request, secret_response;
61 
62 	struct snp_secrets_page *secrets;
63 	struct snp_req_data input;
64 	union {
65 		struct snp_report_req report;
66 		struct snp_derived_key_req derived_key;
67 		struct snp_ext_report_req ext_report;
68 	} req;
69 	u32 *os_area_msg_seqno;
70 	u8 *vmpck;
71 };
72 
73 /*
74  * The VMPCK ID represents the key used by the SNP guest to communicate with the
75  * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key
76  * used will be the key associated with the VMPL at which the guest is running.
77  * Should the default key be wiped (see snp_disable_vmpck()), this parameter
78  * allows for using one of the remaining VMPCKs.
79  */
80 static int vmpck_id = -1;
81 module_param(vmpck_id, int, 0444);
82 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
83 
84 /* Mutex to serialize the shared buffer access and command handling. */
85 static DEFINE_MUTEX(snp_cmd_mutex);
86 
87 static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
88 {
89 	char zero_key[VMPCK_KEY_LEN] = {0};
90 
91 	if (snp_dev->vmpck)
92 		return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN);
93 
94 	return true;
95 }
96 
97 /*
98  * If an error is received from the host or AMD Secure Processor (ASP) there
99  * are two options. Either retry the exact same encrypted request or discontinue
100  * using the VMPCK.
101  *
102  * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
103  * encrypt the requests. The IV for this scheme is the sequence number. GCM
104  * cannot tolerate IV reuse.
105  *
106  * The ASP FW v1.51 only increments the sequence numbers on a successful
107  * guest<->ASP back and forth and only accepts messages at its exact sequence
108  * number.
109  *
110  * So if the sequence number were to be reused the encryption scheme is
111  * vulnerable. If the sequence number were incremented for a fresh IV the ASP
112  * will reject the request.
113  */
114 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
115 {
116 	dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
117 		  vmpck_id);
118 	memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
119 	snp_dev->vmpck = NULL;
120 }
121 
122 static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
123 {
124 	u64 count;
125 
126 	lockdep_assert_held(&snp_cmd_mutex);
127 
128 	/* Read the current message sequence counter from secrets pages */
129 	count = *snp_dev->os_area_msg_seqno;
130 
131 	return count + 1;
132 }
133 
134 /* Return a non-zero on success */
135 static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
136 {
137 	u64 count = __snp_get_msg_seqno(snp_dev);
138 
139 	/*
140 	 * The message sequence counter for the SNP guest request is a  64-bit
141 	 * value but the version 2 of GHCB specification defines a 32-bit storage
142 	 * for it. If the counter exceeds the 32-bit value then return zero.
143 	 * The caller should check the return value, but if the caller happens to
144 	 * not check the value and use it, then the firmware treats zero as an
145 	 * invalid number and will fail the  message request.
146 	 */
147 	if (count >= UINT_MAX) {
148 		dev_err(snp_dev->dev, "request message sequence counter overflow\n");
149 		return 0;
150 	}
151 
152 	return count;
153 }
154 
155 static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev)
156 {
157 	/*
158 	 * The counter is also incremented by the PSP, so increment it by 2
159 	 * and save in secrets page.
160 	 */
161 	*snp_dev->os_area_msg_seqno += 2;
162 }
163 
164 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
165 {
166 	struct miscdevice *dev = file->private_data;
167 
168 	return container_of(dev, struct snp_guest_dev, misc);
169 }
170 
171 static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen)
172 {
173 	struct snp_guest_crypto *crypto;
174 
175 	crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT);
176 	if (!crypto)
177 		return NULL;
178 
179 	crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
180 	if (IS_ERR(crypto->tfm))
181 		goto e_free;
182 
183 	if (crypto_aead_setkey(crypto->tfm, key, keylen))
184 		goto e_free_crypto;
185 
186 	crypto->iv_len = crypto_aead_ivsize(crypto->tfm);
187 	crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT);
188 	if (!crypto->iv)
189 		goto e_free_crypto;
190 
191 	if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) {
192 		if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) {
193 			dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN);
194 			goto e_free_iv;
195 		}
196 	}
197 
198 	crypto->a_len = crypto_aead_authsize(crypto->tfm);
199 	crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT);
200 	if (!crypto->authtag)
201 		goto e_free_iv;
202 
203 	return crypto;
204 
205 e_free_iv:
206 	kfree(crypto->iv);
207 e_free_crypto:
208 	crypto_free_aead(crypto->tfm);
209 e_free:
210 	kfree(crypto);
211 
212 	return NULL;
213 }
214 
215 static void deinit_crypto(struct snp_guest_crypto *crypto)
216 {
217 	crypto_free_aead(crypto->tfm);
218 	kfree(crypto->iv);
219 	kfree(crypto->authtag);
220 	kfree(crypto);
221 }
222 
223 static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg,
224 			   u8 *src_buf, u8 *dst_buf, size_t len, bool enc)
225 {
226 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
227 	struct scatterlist src[3], dst[3];
228 	DECLARE_CRYPTO_WAIT(wait);
229 	struct aead_request *req;
230 	int ret;
231 
232 	req = aead_request_alloc(crypto->tfm, GFP_KERNEL);
233 	if (!req)
234 		return -ENOMEM;
235 
236 	/*
237 	 * AEAD memory operations:
238 	 * +------ AAD -------+------- DATA -----+---- AUTHTAG----+
239 	 * |  msg header      |  plaintext       |  hdr->authtag  |
240 	 * | bytes 30h - 5Fh  |    or            |                |
241 	 * |                  |   cipher         |                |
242 	 * +------------------+------------------+----------------+
243 	 */
244 	sg_init_table(src, 3);
245 	sg_set_buf(&src[0], &hdr->algo, AAD_LEN);
246 	sg_set_buf(&src[1], src_buf, hdr->msg_sz);
247 	sg_set_buf(&src[2], hdr->authtag, crypto->a_len);
248 
249 	sg_init_table(dst, 3);
250 	sg_set_buf(&dst[0], &hdr->algo, AAD_LEN);
251 	sg_set_buf(&dst[1], dst_buf, hdr->msg_sz);
252 	sg_set_buf(&dst[2], hdr->authtag, crypto->a_len);
253 
254 	aead_request_set_ad(req, AAD_LEN);
255 	aead_request_set_tfm(req, crypto->tfm);
256 	aead_request_set_callback(req, 0, crypto_req_done, &wait);
257 
258 	aead_request_set_crypt(req, src, dst, len, crypto->iv);
259 	ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait);
260 
261 	aead_request_free(req);
262 	return ret;
263 }
264 
265 static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
266 			 void *plaintext, size_t len)
267 {
268 	struct snp_guest_crypto *crypto = snp_dev->crypto;
269 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
270 
271 	memset(crypto->iv, 0, crypto->iv_len);
272 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
273 
274 	return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true);
275 }
276 
277 static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
278 		       void *plaintext, size_t len)
279 {
280 	struct snp_guest_crypto *crypto = snp_dev->crypto;
281 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
282 
283 	/* Build IV with response buffer sequence number */
284 	memset(crypto->iv, 0, crypto->iv_len);
285 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
286 
287 	return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false);
288 }
289 
290 static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
291 {
292 	struct snp_guest_crypto *crypto = snp_dev->crypto;
293 	struct snp_guest_msg *resp = &snp_dev->secret_response;
294 	struct snp_guest_msg *req = &snp_dev->secret_request;
295 	struct snp_guest_msg_hdr *req_hdr = &req->hdr;
296 	struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
297 
298 	dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
299 		resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
300 
301 	/* Copy response from shared memory to encrypted memory. */
302 	memcpy(resp, snp_dev->response, sizeof(*resp));
303 
304 	/* Verify that the sequence counter is incremented by 1 */
305 	if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
306 		return -EBADMSG;
307 
308 	/* Verify response message type and version number. */
309 	if (resp_hdr->msg_type != (req_hdr->msg_type + 1) ||
310 	    resp_hdr->msg_version != req_hdr->msg_version)
311 		return -EBADMSG;
312 
313 	/*
314 	 * If the message size is greater than our buffer length then return
315 	 * an error.
316 	 */
317 	if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz))
318 		return -EBADMSG;
319 
320 	/* Decrypt the payload */
321 	return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len);
322 }
323 
324 static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
325 			void *payload, size_t sz)
326 {
327 	struct snp_guest_msg *req = &snp_dev->secret_request;
328 	struct snp_guest_msg_hdr *hdr = &req->hdr;
329 
330 	memset(req, 0, sizeof(*req));
331 
332 	hdr->algo = SNP_AEAD_AES_256_GCM;
333 	hdr->hdr_version = MSG_HDR_VER;
334 	hdr->hdr_sz = sizeof(*hdr);
335 	hdr->msg_type = type;
336 	hdr->msg_version = version;
337 	hdr->msg_seqno = seqno;
338 	hdr->msg_vmpck = vmpck_id;
339 	hdr->msg_sz = sz;
340 
341 	/* Verify the sequence number is non-zero */
342 	if (!hdr->msg_seqno)
343 		return -ENOSR;
344 
345 	dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n",
346 		hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
347 
348 	return __enc_payload(snp_dev, req, payload, sz);
349 }
350 
351 static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
352 				  struct snp_guest_request_ioctl *rio)
353 {
354 	unsigned long req_start = jiffies;
355 	unsigned int override_npages = 0;
356 	u64 override_err = 0;
357 	int rc;
358 
359 retry_request:
360 	/*
361 	 * Call firmware to process the request. In this function the encrypted
362 	 * message enters shared memory with the host. So after this call the
363 	 * sequence number must be incremented or the VMPCK must be deleted to
364 	 * prevent reuse of the IV.
365 	 */
366 	rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio);
367 	switch (rc) {
368 	case -ENOSPC:
369 		/*
370 		 * If the extended guest request fails due to having too
371 		 * small of a certificate data buffer, retry the same
372 		 * guest request without the extended data request in
373 		 * order to increment the sequence number and thus avoid
374 		 * IV reuse.
375 		 */
376 		override_npages = snp_dev->input.data_npages;
377 		exit_code	= SVM_VMGEXIT_GUEST_REQUEST;
378 
379 		/*
380 		 * Override the error to inform callers the given extended
381 		 * request buffer size was too small and give the caller the
382 		 * required buffer size.
383 		 */
384 		override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
385 
386 		/*
387 		 * If this call to the firmware succeeds, the sequence number can
388 		 * be incremented allowing for continued use of the VMPCK. If
389 		 * there is an error reflected in the return value, this value
390 		 * is checked further down and the result will be the deletion
391 		 * of the VMPCK and the error code being propagated back to the
392 		 * user as an ioctl() return code.
393 		 */
394 		goto retry_request;
395 
396 	/*
397 	 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
398 	 * throttled. Retry in the driver to avoid returning and reusing the
399 	 * message sequence number on a different message.
400 	 */
401 	case -EAGAIN:
402 		if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
403 			rc = -ETIMEDOUT;
404 			break;
405 		}
406 		schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
407 		goto retry_request;
408 	}
409 
410 	/*
411 	 * Increment the message sequence number. There is no harm in doing
412 	 * this now because decryption uses the value stored in the response
413 	 * structure and any failure will wipe the VMPCK, preventing further
414 	 * use anyway.
415 	 */
416 	snp_inc_msg_seqno(snp_dev);
417 
418 	if (override_err) {
419 		rio->exitinfo2 = override_err;
420 
421 		/*
422 		 * If an extended guest request was issued and the supplied certificate
423 		 * buffer was not large enough, a standard guest request was issued to
424 		 * prevent IV reuse. If the standard request was successful, return -EIO
425 		 * back to the caller as would have originally been returned.
426 		 */
427 		if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
428 			rc = -EIO;
429 	}
430 
431 	if (override_npages)
432 		snp_dev->input.data_npages = override_npages;
433 
434 	return rc;
435 }
436 
437 static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
438 				struct snp_guest_request_ioctl *rio, u8 type,
439 				void *req_buf, size_t req_sz, void *resp_buf,
440 				u32 resp_sz)
441 {
442 	u64 seqno;
443 	int rc;
444 
445 	/* Get message sequence and verify that its a non-zero */
446 	seqno = snp_get_msg_seqno(snp_dev);
447 	if (!seqno)
448 		return -EIO;
449 
450 	/* Clear shared memory's response for the host to populate. */
451 	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
452 
453 	/* Encrypt the userspace provided payload in snp_dev->secret_request. */
454 	rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz);
455 	if (rc)
456 		return rc;
457 
458 	/*
459 	 * Write the fully encrypted request to the shared unencrypted
460 	 * request page.
461 	 */
462 	memcpy(snp_dev->request, &snp_dev->secret_request,
463 	       sizeof(snp_dev->secret_request));
464 
465 	rc = __handle_guest_request(snp_dev, exit_code, rio);
466 	if (rc) {
467 		if (rc == -EIO &&
468 		    rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
469 			return rc;
470 
471 		dev_alert(snp_dev->dev,
472 			  "Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
473 			  rc, rio->exitinfo2);
474 
475 		snp_disable_vmpck(snp_dev);
476 		return rc;
477 	}
478 
479 	rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
480 	if (rc) {
481 		dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
482 		snp_disable_vmpck(snp_dev);
483 		return rc;
484 	}
485 
486 	return 0;
487 }
488 
489 struct snp_req_resp {
490 	sockptr_t req_data;
491 	sockptr_t resp_data;
492 };
493 
494 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
495 {
496 	struct snp_guest_crypto *crypto = snp_dev->crypto;
497 	struct snp_report_req *req = &snp_dev->req.report;
498 	struct snp_report_resp *resp;
499 	int rc, resp_len;
500 
501 	lockdep_assert_held(&snp_cmd_mutex);
502 
503 	if (!arg->req_data || !arg->resp_data)
504 		return -EINVAL;
505 
506 	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
507 		return -EFAULT;
508 
509 	/*
510 	 * The intermediate response buffer is used while decrypting the
511 	 * response payload. Make sure that it has enough space to cover the
512 	 * authtag.
513 	 */
514 	resp_len = sizeof(resp->data) + crypto->a_len;
515 	resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
516 	if (!resp)
517 		return -ENOMEM;
518 
519 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
520 				  SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
521 				  resp_len);
522 	if (rc)
523 		goto e_free;
524 
525 	if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
526 		rc = -EFAULT;
527 
528 e_free:
529 	kfree(resp);
530 	return rc;
531 }
532 
533 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
534 {
535 	struct snp_derived_key_req *req = &snp_dev->req.derived_key;
536 	struct snp_guest_crypto *crypto = snp_dev->crypto;
537 	struct snp_derived_key_resp resp = {0};
538 	int rc, resp_len;
539 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
540 	u8 buf[64 + 16];
541 
542 	lockdep_assert_held(&snp_cmd_mutex);
543 
544 	if (!arg->req_data || !arg->resp_data)
545 		return -EINVAL;
546 
547 	/*
548 	 * The intermediate response buffer is used while decrypting the
549 	 * response payload. Make sure that it has enough space to cover the
550 	 * authtag.
551 	 */
552 	resp_len = sizeof(resp.data) + crypto->a_len;
553 	if (sizeof(buf) < resp_len)
554 		return -ENOMEM;
555 
556 	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
557 		return -EFAULT;
558 
559 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
560 				  SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
561 	if (rc)
562 		return rc;
563 
564 	memcpy(resp.data, buf, sizeof(resp.data));
565 	if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp)))
566 		rc = -EFAULT;
567 
568 	/* The response buffer contains the sensitive data, explicitly clear it. */
569 	memzero_explicit(buf, sizeof(buf));
570 	memzero_explicit(&resp, sizeof(resp));
571 	return rc;
572 }
573 
574 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg,
575 			  struct snp_req_resp *io)
576 
577 {
578 	struct snp_ext_report_req *req = &snp_dev->req.ext_report;
579 	struct snp_guest_crypto *crypto = snp_dev->crypto;
580 	struct snp_report_resp *resp;
581 	int ret, npages = 0, resp_len;
582 	sockptr_t certs_address;
583 
584 	lockdep_assert_held(&snp_cmd_mutex);
585 
586 	if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
587 		return -EINVAL;
588 
589 	if (copy_from_sockptr(req, io->req_data, sizeof(*req)))
590 		return -EFAULT;
591 
592 	/* caller does not want certificate data */
593 	if (!req->certs_len || !req->certs_address)
594 		goto cmd;
595 
596 	if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
597 	    !IS_ALIGNED(req->certs_len, PAGE_SIZE))
598 		return -EINVAL;
599 
600 	if (sockptr_is_kernel(io->resp_data)) {
601 		certs_address = KERNEL_SOCKPTR((void *)req->certs_address);
602 	} else {
603 		certs_address = USER_SOCKPTR((void __user *)req->certs_address);
604 		if (!access_ok(certs_address.user, req->certs_len))
605 			return -EFAULT;
606 	}
607 
608 	/*
609 	 * Initialize the intermediate buffer with all zeros. This buffer
610 	 * is used in the guest request message to get the certs blob from
611 	 * the host. If host does not supply any certs in it, then copy
612 	 * zeros to indicate that certificate data was not provided.
613 	 */
614 	memset(snp_dev->certs_data, 0, req->certs_len);
615 	npages = req->certs_len >> PAGE_SHIFT;
616 cmd:
617 	/*
618 	 * The intermediate response buffer is used while decrypting the
619 	 * response payload. Make sure that it has enough space to cover the
620 	 * authtag.
621 	 */
622 	resp_len = sizeof(resp->data) + crypto->a_len;
623 	resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
624 	if (!resp)
625 		return -ENOMEM;
626 
627 	snp_dev->input.data_npages = npages;
628 	ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
629 				   SNP_MSG_REPORT_REQ, &req->data,
630 				   sizeof(req->data), resp->data, resp_len);
631 
632 	/* If certs length is invalid then copy the returned length */
633 	if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
634 		req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
635 
636 		if (copy_to_sockptr(io->req_data, req, sizeof(*req)))
637 			ret = -EFAULT;
638 	}
639 
640 	if (ret)
641 		goto e_free;
642 
643 	if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, req->certs_len)) {
644 		ret = -EFAULT;
645 		goto e_free;
646 	}
647 
648 	if (copy_to_sockptr(io->resp_data, resp, sizeof(*resp)))
649 		ret = -EFAULT;
650 
651 e_free:
652 	kfree(resp);
653 	return ret;
654 }
655 
656 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
657 {
658 	struct snp_guest_dev *snp_dev = to_snp_dev(file);
659 	void __user *argp = (void __user *)arg;
660 	struct snp_guest_request_ioctl input;
661 	struct snp_req_resp io;
662 	int ret = -ENOTTY;
663 
664 	if (copy_from_user(&input, argp, sizeof(input)))
665 		return -EFAULT;
666 
667 	input.exitinfo2 = 0xff;
668 
669 	/* Message version must be non-zero */
670 	if (!input.msg_version)
671 		return -EINVAL;
672 
673 	mutex_lock(&snp_cmd_mutex);
674 
675 	/* Check if the VMPCK is not empty */
676 	if (is_vmpck_empty(snp_dev)) {
677 		dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
678 		mutex_unlock(&snp_cmd_mutex);
679 		return -ENOTTY;
680 	}
681 
682 	switch (ioctl) {
683 	case SNP_GET_REPORT:
684 		ret = get_report(snp_dev, &input);
685 		break;
686 	case SNP_GET_DERIVED_KEY:
687 		ret = get_derived_key(snp_dev, &input);
688 		break;
689 	case SNP_GET_EXT_REPORT:
690 		/*
691 		 * As get_ext_report() may be called from the ioctl() path and a
692 		 * kernel internal path (configfs-tsm), decorate the passed
693 		 * buffers as user pointers.
694 		 */
695 		io.req_data = USER_SOCKPTR((void __user *)input.req_data);
696 		io.resp_data = USER_SOCKPTR((void __user *)input.resp_data);
697 		ret = get_ext_report(snp_dev, &input, &io);
698 		break;
699 	default:
700 		break;
701 	}
702 
703 	mutex_unlock(&snp_cmd_mutex);
704 
705 	if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
706 		return -EFAULT;
707 
708 	return ret;
709 }
710 
711 static void free_shared_pages(void *buf, size_t sz)
712 {
713 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
714 	int ret;
715 
716 	if (!buf)
717 		return;
718 
719 	ret = set_memory_encrypted((unsigned long)buf, npages);
720 	if (ret) {
721 		WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
722 		return;
723 	}
724 
725 	__free_pages(virt_to_page(buf), get_order(sz));
726 }
727 
728 static void *alloc_shared_pages(struct device *dev, size_t sz)
729 {
730 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
731 	struct page *page;
732 	int ret;
733 
734 	page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
735 	if (!page)
736 		return NULL;
737 
738 	ret = set_memory_decrypted((unsigned long)page_address(page), npages);
739 	if (ret) {
740 		dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
741 		__free_pages(page, get_order(sz));
742 		return NULL;
743 	}
744 
745 	return page_address(page);
746 }
747 
748 static const struct file_operations snp_guest_fops = {
749 	.owner	= THIS_MODULE,
750 	.unlocked_ioctl = snp_guest_ioctl,
751 };
752 
753 static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno)
754 {
755 	u8 *key = NULL;
756 
757 	switch (id) {
758 	case 0:
759 		*seqno = &secrets->os_area.msg_seqno_0;
760 		key = secrets->vmpck0;
761 		break;
762 	case 1:
763 		*seqno = &secrets->os_area.msg_seqno_1;
764 		key = secrets->vmpck1;
765 		break;
766 	case 2:
767 		*seqno = &secrets->os_area.msg_seqno_2;
768 		key = secrets->vmpck2;
769 		break;
770 	case 3:
771 		*seqno = &secrets->os_area.msg_seqno_3;
772 		key = secrets->vmpck3;
773 		break;
774 	default:
775 		break;
776 	}
777 
778 	return key;
779 }
780 
781 struct snp_msg_report_resp_hdr {
782 	u32 status;
783 	u32 report_size;
784 	u8 rsvd[24];
785 };
786 
787 struct snp_msg_cert_entry {
788 	guid_t guid;
789 	u32 offset;
790 	u32 length;
791 };
792 
793 static int sev_report_new(struct tsm_report *report, void *data)
794 {
795 	struct snp_msg_cert_entry *cert_table;
796 	struct tsm_desc *desc = &report->desc;
797 	struct snp_guest_dev *snp_dev = data;
798 	struct snp_msg_report_resp_hdr hdr;
799 	const u32 report_size = SZ_4K;
800 	const u32 ext_size = SEV_FW_BLOB_MAX_SIZE;
801 	u32 certs_size, i, size = report_size + ext_size;
802 	int ret;
803 
804 	if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
805 		return -EINVAL;
806 
807 	void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
808 	if (!buf)
809 		return -ENOMEM;
810 
811 	guard(mutex)(&snp_cmd_mutex);
812 
813 	/* Check if the VMPCK is not empty */
814 	if (is_vmpck_empty(snp_dev)) {
815 		dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
816 		return -ENOTTY;
817 	}
818 
819 	cert_table = buf + report_size;
820 	struct snp_ext_report_req ext_req = {
821 		.data = { .vmpl = desc->privlevel },
822 		.certs_address = (__u64)cert_table,
823 		.certs_len = ext_size,
824 	};
825 	memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len);
826 
827 	struct snp_guest_request_ioctl input = {
828 		.msg_version = 1,
829 		.req_data = (__u64)&ext_req,
830 		.resp_data = (__u64)buf,
831 		.exitinfo2 = 0xff,
832 	};
833 	struct snp_req_resp io = {
834 		.req_data = KERNEL_SOCKPTR(&ext_req),
835 		.resp_data = KERNEL_SOCKPTR(buf),
836 	};
837 
838 	ret = get_ext_report(snp_dev, &input, &io);
839 	if (ret)
840 		return ret;
841 
842 	memcpy(&hdr, buf, sizeof(hdr));
843 	if (hdr.status == SEV_RET_INVALID_PARAM)
844 		return -EINVAL;
845 	if (hdr.status == SEV_RET_INVALID_KEY)
846 		return -EINVAL;
847 	if (hdr.status)
848 		return -ENXIO;
849 	if ((hdr.report_size + sizeof(hdr)) > report_size)
850 		return -ENOMEM;
851 
852 	void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL);
853 	if (!rbuf)
854 		return -ENOMEM;
855 
856 	memcpy(rbuf, buf + sizeof(hdr), hdr.report_size);
857 	report->outblob = no_free_ptr(rbuf);
858 	report->outblob_len = hdr.report_size;
859 
860 	certs_size = 0;
861 	for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) {
862 		struct snp_msg_cert_entry *ent = &cert_table[i];
863 
864 		if (guid_is_null(&ent->guid) && !ent->offset && !ent->length)
865 			break;
866 		certs_size = max(certs_size, ent->offset + ent->length);
867 	}
868 
869 	/* Suspicious that the response populated entries without populating size */
870 	if (!certs_size && i)
871 		dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n");
872 
873 	/* No certs to report */
874 	if (!certs_size)
875 		return 0;
876 
877 	/* Suspicious that the certificate blob size contract was violated
878 	 */
879 	if (certs_size > ext_size) {
880 		dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n");
881 		certs_size = ext_size;
882 	}
883 
884 	void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL);
885 	if (!cbuf)
886 		return -ENOMEM;
887 
888 	memcpy(cbuf, cert_table, certs_size);
889 	report->auxblob = no_free_ptr(cbuf);
890 	report->auxblob_len = certs_size;
891 
892 	return 0;
893 }
894 
895 static const struct tsm_ops sev_tsm_ops = {
896 	.name = KBUILD_MODNAME,
897 	.report_new = sev_report_new,
898 };
899 
900 static void unregister_sev_tsm(void *data)
901 {
902 	tsm_unregister(&sev_tsm_ops);
903 }
904 
905 static int __init sev_guest_probe(struct platform_device *pdev)
906 {
907 	struct sev_guest_platform_data *data;
908 	struct snp_secrets_page *secrets;
909 	struct device *dev = &pdev->dev;
910 	struct snp_guest_dev *snp_dev;
911 	struct miscdevice *misc;
912 	void __iomem *mapping;
913 	int ret;
914 
915 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
916 		return -ENODEV;
917 
918 	if (!dev->platform_data)
919 		return -ENODEV;
920 
921 	data = (struct sev_guest_platform_data *)dev->platform_data;
922 	mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
923 	if (!mapping)
924 		return -ENODEV;
925 
926 	secrets = (__force void *)mapping;
927 
928 	ret = -ENOMEM;
929 	snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
930 	if (!snp_dev)
931 		goto e_unmap;
932 
933 	/* Adjust the default VMPCK key based on the executing VMPL level */
934 	if (vmpck_id == -1)
935 		vmpck_id = snp_vmpl;
936 
937 	ret = -EINVAL;
938 	snp_dev->vmpck = get_vmpck(vmpck_id, secrets, &snp_dev->os_area_msg_seqno);
939 	if (!snp_dev->vmpck) {
940 		dev_err(dev, "invalid vmpck id %d\n", vmpck_id);
941 		goto e_unmap;
942 	}
943 
944 	/* Verify that VMPCK is not zero. */
945 	if (is_vmpck_empty(snp_dev)) {
946 		dev_err(dev, "vmpck id %d is null\n", vmpck_id);
947 		goto e_unmap;
948 	}
949 
950 	platform_set_drvdata(pdev, snp_dev);
951 	snp_dev->dev = dev;
952 	snp_dev->secrets = secrets;
953 
954 	/* Allocate the shared page used for the request and response message. */
955 	snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
956 	if (!snp_dev->request)
957 		goto e_unmap;
958 
959 	snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
960 	if (!snp_dev->response)
961 		goto e_free_request;
962 
963 	snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
964 	if (!snp_dev->certs_data)
965 		goto e_free_response;
966 
967 	ret = -EIO;
968 	snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN);
969 	if (!snp_dev->crypto)
970 		goto e_free_cert_data;
971 
972 	misc = &snp_dev->misc;
973 	misc->minor = MISC_DYNAMIC_MINOR;
974 	misc->name = DEVICE_NAME;
975 	misc->fops = &snp_guest_fops;
976 
977 	/* initial the input address for guest request */
978 	snp_dev->input.req_gpa = __pa(snp_dev->request);
979 	snp_dev->input.resp_gpa = __pa(snp_dev->response);
980 	snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
981 
982 	ret = tsm_register(&sev_tsm_ops, snp_dev, &tsm_report_extra_type);
983 	if (ret)
984 		goto e_free_cert_data;
985 
986 	ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
987 	if (ret)
988 		goto e_free_cert_data;
989 
990 	ret =  misc_register(misc);
991 	if (ret)
992 		goto e_free_cert_data;
993 
994 	dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id);
995 	return 0;
996 
997 e_free_cert_data:
998 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
999 e_free_response:
1000 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
1001 e_free_request:
1002 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
1003 e_unmap:
1004 	iounmap(mapping);
1005 	return ret;
1006 }
1007 
1008 static void __exit sev_guest_remove(struct platform_device *pdev)
1009 {
1010 	struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
1011 
1012 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
1013 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
1014 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
1015 	deinit_crypto(snp_dev->crypto);
1016 	misc_deregister(&snp_dev->misc);
1017 }
1018 
1019 /*
1020  * This driver is meant to be a common SEV guest interface driver and to
1021  * support any SEV guest API. As such, even though it has been introduced
1022  * with the SEV-SNP support, it is named "sev-guest".
1023  */
1024 static struct platform_driver sev_guest_driver = {
1025 	.remove_new	= __exit_p(sev_guest_remove),
1026 	.driver		= {
1027 		.name = "sev-guest",
1028 	},
1029 };
1030 
1031 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
1032 
1033 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
1034 MODULE_LICENSE("GPL");
1035 MODULE_VERSION("1.0.0");
1036 MODULE_DESCRIPTION("AMD SEV Guest Driver");
1037 MODULE_ALIAS("platform:sev-guest");
1038