xref: /linux/drivers/virt/coco/sev-guest/sev-guest.c (revision b56dff267d1246a6cd4a6ae1f850e12893dadf94)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4  *
5  * Copyright (C) 2021-2024 Advanced Micro Devices, Inc.
6  *
7  * Author: Brijesh Singh <brijesh.singh@amd.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <linux/tsm.h>
20 #include <crypto/aead.h>
21 #include <linux/scatterlist.h>
22 #include <linux/psp-sev.h>
23 #include <linux/sockptr.h>
24 #include <linux/cleanup.h>
25 #include <linux/uuid.h>
26 #include <linux/configfs.h>
27 #include <uapi/linux/sev-guest.h>
28 #include <uapi/linux/psp-sev.h>
29 
30 #include <asm/svm.h>
31 #include <asm/sev.h>
32 
33 #define DEVICE_NAME	"sev-guest"
34 #define AAD_LEN		48
35 #define MSG_HDR_VER	1
36 
37 #define SNP_REQ_MAX_RETRY_DURATION	(60*HZ)
38 #define SNP_REQ_RETRY_DELAY		(2*HZ)
39 
40 #define SVSM_MAX_RETRIES		3
41 
42 struct snp_guest_crypto {
43 	struct crypto_aead *tfm;
44 	u8 *iv, *authtag;
45 	int iv_len, a_len;
46 };
47 
48 struct snp_guest_dev {
49 	struct device *dev;
50 	struct miscdevice misc;
51 
52 	void *certs_data;
53 	struct snp_guest_crypto *crypto;
54 	/* request and response are in unencrypted memory */
55 	struct snp_guest_msg *request, *response;
56 
57 	/*
58 	 * Avoid information leakage by double-buffering shared messages
59 	 * in fields that are in regular encrypted memory.
60 	 */
61 	struct snp_guest_msg secret_request, secret_response;
62 
63 	struct snp_secrets_page *secrets;
64 	struct snp_req_data input;
65 	union {
66 		struct snp_report_req report;
67 		struct snp_derived_key_req derived_key;
68 		struct snp_ext_report_req ext_report;
69 	} req;
70 	u32 *os_area_msg_seqno;
71 	u8 *vmpck;
72 };
73 
74 /*
75  * The VMPCK ID represents the key used by the SNP guest to communicate with the
76  * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key
77  * used will be the key associated with the VMPL at which the guest is running.
78  * Should the default key be wiped (see snp_disable_vmpck()), this parameter
79  * allows for using one of the remaining VMPCKs.
80  */
81 static int vmpck_id = -1;
82 module_param(vmpck_id, int, 0444);
83 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
84 
85 /* Mutex to serialize the shared buffer access and command handling. */
86 static DEFINE_MUTEX(snp_cmd_mutex);
87 
is_vmpck_empty(struct snp_guest_dev * snp_dev)88 static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
89 {
90 	char zero_key[VMPCK_KEY_LEN] = {0};
91 
92 	if (snp_dev->vmpck)
93 		return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN);
94 
95 	return true;
96 }
97 
98 /*
99  * If an error is received from the host or AMD Secure Processor (ASP) there
100  * are two options. Either retry the exact same encrypted request or discontinue
101  * using the VMPCK.
102  *
103  * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
104  * encrypt the requests. The IV for this scheme is the sequence number. GCM
105  * cannot tolerate IV reuse.
106  *
107  * The ASP FW v1.51 only increments the sequence numbers on a successful
108  * guest<->ASP back and forth and only accepts messages at its exact sequence
109  * number.
110  *
111  * So if the sequence number were to be reused the encryption scheme is
112  * vulnerable. If the sequence number were incremented for a fresh IV the ASP
113  * will reject the request.
114  */
snp_disable_vmpck(struct snp_guest_dev * snp_dev)115 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
116 {
117 	dev_alert(snp_dev->dev, "Disabling VMPCK%d communication key to prevent IV reuse.\n",
118 		  vmpck_id);
119 	memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
120 	snp_dev->vmpck = NULL;
121 }
122 
__snp_get_msg_seqno(struct snp_guest_dev * snp_dev)123 static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
124 {
125 	u64 count;
126 
127 	lockdep_assert_held(&snp_cmd_mutex);
128 
129 	/* Read the current message sequence counter from secrets pages */
130 	count = *snp_dev->os_area_msg_seqno;
131 
132 	return count + 1;
133 }
134 
135 /* Return a non-zero on success */
snp_get_msg_seqno(struct snp_guest_dev * snp_dev)136 static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
137 {
138 	u64 count = __snp_get_msg_seqno(snp_dev);
139 
140 	/*
141 	 * The message sequence counter for the SNP guest request is a  64-bit
142 	 * value but the version 2 of GHCB specification defines a 32-bit storage
143 	 * for it. If the counter exceeds the 32-bit value then return zero.
144 	 * The caller should check the return value, but if the caller happens to
145 	 * not check the value and use it, then the firmware treats zero as an
146 	 * invalid number and will fail the  message request.
147 	 */
148 	if (count >= UINT_MAX) {
149 		dev_err(snp_dev->dev, "request message sequence counter overflow\n");
150 		return 0;
151 	}
152 
153 	return count;
154 }
155 
snp_inc_msg_seqno(struct snp_guest_dev * snp_dev)156 static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev)
157 {
158 	/*
159 	 * The counter is also incremented by the PSP, so increment it by 2
160 	 * and save in secrets page.
161 	 */
162 	*snp_dev->os_area_msg_seqno += 2;
163 }
164 
to_snp_dev(struct file * file)165 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
166 {
167 	struct miscdevice *dev = file->private_data;
168 
169 	return container_of(dev, struct snp_guest_dev, misc);
170 }
171 
init_crypto(struct snp_guest_dev * snp_dev,u8 * key,size_t keylen)172 static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen)
173 {
174 	struct snp_guest_crypto *crypto;
175 
176 	crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT);
177 	if (!crypto)
178 		return NULL;
179 
180 	crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
181 	if (IS_ERR(crypto->tfm))
182 		goto e_free;
183 
184 	if (crypto_aead_setkey(crypto->tfm, key, keylen))
185 		goto e_free_crypto;
186 
187 	crypto->iv_len = crypto_aead_ivsize(crypto->tfm);
188 	crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT);
189 	if (!crypto->iv)
190 		goto e_free_crypto;
191 
192 	if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) {
193 		if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) {
194 			dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN);
195 			goto e_free_iv;
196 		}
197 	}
198 
199 	crypto->a_len = crypto_aead_authsize(crypto->tfm);
200 	crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT);
201 	if (!crypto->authtag)
202 		goto e_free_iv;
203 
204 	return crypto;
205 
206 e_free_iv:
207 	kfree(crypto->iv);
208 e_free_crypto:
209 	crypto_free_aead(crypto->tfm);
210 e_free:
211 	kfree(crypto);
212 
213 	return NULL;
214 }
215 
deinit_crypto(struct snp_guest_crypto * crypto)216 static void deinit_crypto(struct snp_guest_crypto *crypto)
217 {
218 	crypto_free_aead(crypto->tfm);
219 	kfree(crypto->iv);
220 	kfree(crypto->authtag);
221 	kfree(crypto);
222 }
223 
enc_dec_message(struct snp_guest_crypto * crypto,struct snp_guest_msg * msg,u8 * src_buf,u8 * dst_buf,size_t len,bool enc)224 static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg,
225 			   u8 *src_buf, u8 *dst_buf, size_t len, bool enc)
226 {
227 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
228 	struct scatterlist src[3], dst[3];
229 	DECLARE_CRYPTO_WAIT(wait);
230 	struct aead_request *req;
231 	int ret;
232 
233 	req = aead_request_alloc(crypto->tfm, GFP_KERNEL);
234 	if (!req)
235 		return -ENOMEM;
236 
237 	/*
238 	 * AEAD memory operations:
239 	 * +------ AAD -------+------- DATA -----+---- AUTHTAG----+
240 	 * |  msg header      |  plaintext       |  hdr->authtag  |
241 	 * | bytes 30h - 5Fh  |    or            |                |
242 	 * |                  |   cipher         |                |
243 	 * +------------------+------------------+----------------+
244 	 */
245 	sg_init_table(src, 3);
246 	sg_set_buf(&src[0], &hdr->algo, AAD_LEN);
247 	sg_set_buf(&src[1], src_buf, hdr->msg_sz);
248 	sg_set_buf(&src[2], hdr->authtag, crypto->a_len);
249 
250 	sg_init_table(dst, 3);
251 	sg_set_buf(&dst[0], &hdr->algo, AAD_LEN);
252 	sg_set_buf(&dst[1], dst_buf, hdr->msg_sz);
253 	sg_set_buf(&dst[2], hdr->authtag, crypto->a_len);
254 
255 	aead_request_set_ad(req, AAD_LEN);
256 	aead_request_set_tfm(req, crypto->tfm);
257 	aead_request_set_callback(req, 0, crypto_req_done, &wait);
258 
259 	aead_request_set_crypt(req, src, dst, len, crypto->iv);
260 	ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait);
261 
262 	aead_request_free(req);
263 	return ret;
264 }
265 
__enc_payload(struct snp_guest_dev * snp_dev,struct snp_guest_msg * msg,void * plaintext,size_t len)266 static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
267 			 void *plaintext, size_t len)
268 {
269 	struct snp_guest_crypto *crypto = snp_dev->crypto;
270 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
271 
272 	memset(crypto->iv, 0, crypto->iv_len);
273 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
274 
275 	return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true);
276 }
277 
dec_payload(struct snp_guest_dev * snp_dev,struct snp_guest_msg * msg,void * plaintext,size_t len)278 static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
279 		       void *plaintext, size_t len)
280 {
281 	struct snp_guest_crypto *crypto = snp_dev->crypto;
282 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
283 
284 	/* Build IV with response buffer sequence number */
285 	memset(crypto->iv, 0, crypto->iv_len);
286 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
287 
288 	return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false);
289 }
290 
verify_and_dec_payload(struct snp_guest_dev * snp_dev,void * payload,u32 sz)291 static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
292 {
293 	struct snp_guest_crypto *crypto = snp_dev->crypto;
294 	struct snp_guest_msg *resp_msg = &snp_dev->secret_response;
295 	struct snp_guest_msg *req_msg = &snp_dev->secret_request;
296 	struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr;
297 	struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr;
298 
299 	pr_debug("response [seqno %lld type %d version %d sz %d]\n",
300 		 resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version,
301 		 resp_msg_hdr->msg_sz);
302 
303 	/* Copy response from shared memory to encrypted memory. */
304 	memcpy(resp_msg, snp_dev->response, sizeof(*resp_msg));
305 
306 	/* Verify that the sequence counter is incremented by 1 */
307 	if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1)))
308 		return -EBADMSG;
309 
310 	/* Verify response message type and version number. */
311 	if (resp_msg_hdr->msg_type != (req_msg_hdr->msg_type + 1) ||
312 	    resp_msg_hdr->msg_version != req_msg_hdr->msg_version)
313 		return -EBADMSG;
314 
315 	/*
316 	 * If the message size is greater than our buffer length then return
317 	 * an error.
318 	 */
319 	if (unlikely((resp_msg_hdr->msg_sz + crypto->a_len) > sz))
320 		return -EBADMSG;
321 
322 	/* Decrypt the payload */
323 	return dec_payload(snp_dev, resp_msg, payload, resp_msg_hdr->msg_sz + crypto->a_len);
324 }
325 
enc_payload(struct snp_guest_dev * snp_dev,u64 seqno,int version,u8 type,void * payload,size_t sz)326 static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
327 			void *payload, size_t sz)
328 {
329 	struct snp_guest_msg *msg = &snp_dev->secret_request;
330 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
331 
332 	memset(msg, 0, sizeof(*msg));
333 
334 	hdr->algo = SNP_AEAD_AES_256_GCM;
335 	hdr->hdr_version = MSG_HDR_VER;
336 	hdr->hdr_sz = sizeof(*hdr);
337 	hdr->msg_type = type;
338 	hdr->msg_version = version;
339 	hdr->msg_seqno = seqno;
340 	hdr->msg_vmpck = vmpck_id;
341 	hdr->msg_sz = sz;
342 
343 	/* Verify the sequence number is non-zero */
344 	if (!hdr->msg_seqno)
345 		return -ENOSR;
346 
347 	pr_debug("request [seqno %lld type %d version %d sz %d]\n",
348 		 hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
349 
350 	return __enc_payload(snp_dev, msg, payload, sz);
351 }
352 
__handle_guest_request(struct snp_guest_dev * snp_dev,u64 exit_code,struct snp_guest_request_ioctl * rio)353 static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
354 				  struct snp_guest_request_ioctl *rio)
355 {
356 	unsigned long req_start = jiffies;
357 	unsigned int override_npages = 0;
358 	u64 override_err = 0;
359 	int rc;
360 
361 retry_request:
362 	/*
363 	 * Call firmware to process the request. In this function the encrypted
364 	 * message enters shared memory with the host. So after this call the
365 	 * sequence number must be incremented or the VMPCK must be deleted to
366 	 * prevent reuse of the IV.
367 	 */
368 	rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio);
369 	switch (rc) {
370 	case -ENOSPC:
371 		/*
372 		 * If the extended guest request fails due to having too
373 		 * small of a certificate data buffer, retry the same
374 		 * guest request without the extended data request in
375 		 * order to increment the sequence number and thus avoid
376 		 * IV reuse.
377 		 */
378 		override_npages = snp_dev->input.data_npages;
379 		exit_code	= SVM_VMGEXIT_GUEST_REQUEST;
380 
381 		/*
382 		 * Override the error to inform callers the given extended
383 		 * request buffer size was too small and give the caller the
384 		 * required buffer size.
385 		 */
386 		override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
387 
388 		/*
389 		 * If this call to the firmware succeeds, the sequence number can
390 		 * be incremented allowing for continued use of the VMPCK. If
391 		 * there is an error reflected in the return value, this value
392 		 * is checked further down and the result will be the deletion
393 		 * of the VMPCK and the error code being propagated back to the
394 		 * user as an ioctl() return code.
395 		 */
396 		goto retry_request;
397 
398 	/*
399 	 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
400 	 * throttled. Retry in the driver to avoid returning and reusing the
401 	 * message sequence number on a different message.
402 	 */
403 	case -EAGAIN:
404 		if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
405 			rc = -ETIMEDOUT;
406 			break;
407 		}
408 		schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
409 		goto retry_request;
410 	}
411 
412 	/*
413 	 * Increment the message sequence number. There is no harm in doing
414 	 * this now because decryption uses the value stored in the response
415 	 * structure and any failure will wipe the VMPCK, preventing further
416 	 * use anyway.
417 	 */
418 	snp_inc_msg_seqno(snp_dev);
419 
420 	if (override_err) {
421 		rio->exitinfo2 = override_err;
422 
423 		/*
424 		 * If an extended guest request was issued and the supplied certificate
425 		 * buffer was not large enough, a standard guest request was issued to
426 		 * prevent IV reuse. If the standard request was successful, return -EIO
427 		 * back to the caller as would have originally been returned.
428 		 */
429 		if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
430 			rc = -EIO;
431 	}
432 
433 	if (override_npages)
434 		snp_dev->input.data_npages = override_npages;
435 
436 	return rc;
437 }
438 
handle_guest_request(struct snp_guest_dev * snp_dev,u64 exit_code,struct snp_guest_request_ioctl * rio,u8 type,void * req_buf,size_t req_sz,void * resp_buf,u32 resp_sz)439 static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
440 				struct snp_guest_request_ioctl *rio, u8 type,
441 				void *req_buf, size_t req_sz, void *resp_buf,
442 				u32 resp_sz)
443 {
444 	u64 seqno;
445 	int rc;
446 
447 	/* Get message sequence and verify that its a non-zero */
448 	seqno = snp_get_msg_seqno(snp_dev);
449 	if (!seqno)
450 		return -EIO;
451 
452 	/* Clear shared memory's response for the host to populate. */
453 	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
454 
455 	/* Encrypt the userspace provided payload in snp_dev->secret_request. */
456 	rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz);
457 	if (rc)
458 		return rc;
459 
460 	/*
461 	 * Write the fully encrypted request to the shared unencrypted
462 	 * request page.
463 	 */
464 	memcpy(snp_dev->request, &snp_dev->secret_request,
465 	       sizeof(snp_dev->secret_request));
466 
467 	rc = __handle_guest_request(snp_dev, exit_code, rio);
468 	if (rc) {
469 		if (rc == -EIO &&
470 		    rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
471 			return rc;
472 
473 		dev_alert(snp_dev->dev,
474 			  "Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
475 			  rc, rio->exitinfo2);
476 
477 		snp_disable_vmpck(snp_dev);
478 		return rc;
479 	}
480 
481 	rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
482 	if (rc) {
483 		dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
484 		snp_disable_vmpck(snp_dev);
485 		return rc;
486 	}
487 
488 	return 0;
489 }
490 
491 struct snp_req_resp {
492 	sockptr_t req_data;
493 	sockptr_t resp_data;
494 };
495 
get_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)496 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
497 {
498 	struct snp_guest_crypto *crypto = snp_dev->crypto;
499 	struct snp_report_req *report_req = &snp_dev->req.report;
500 	struct snp_report_resp *report_resp;
501 	int rc, resp_len;
502 
503 	lockdep_assert_held(&snp_cmd_mutex);
504 
505 	if (!arg->req_data || !arg->resp_data)
506 		return -EINVAL;
507 
508 	if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
509 		return -EFAULT;
510 
511 	/*
512 	 * The intermediate response buffer is used while decrypting the
513 	 * response payload. Make sure that it has enough space to cover the
514 	 * authtag.
515 	 */
516 	resp_len = sizeof(report_resp->data) + crypto->a_len;
517 	report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
518 	if (!report_resp)
519 		return -ENOMEM;
520 
521 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, SNP_MSG_REPORT_REQ,
522 				  report_req, sizeof(*report_req), report_resp->data, resp_len);
523 	if (rc)
524 		goto e_free;
525 
526 	if (copy_to_user((void __user *)arg->resp_data, report_resp, sizeof(*report_resp)))
527 		rc = -EFAULT;
528 
529 e_free:
530 	kfree(report_resp);
531 	return rc;
532 }
533 
get_derived_key(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)534 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
535 {
536 	struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key;
537 	struct snp_guest_crypto *crypto = snp_dev->crypto;
538 	struct snp_derived_key_resp derived_key_resp = {0};
539 	int rc, resp_len;
540 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
541 	u8 buf[64 + 16];
542 
543 	lockdep_assert_held(&snp_cmd_mutex);
544 
545 	if (!arg->req_data || !arg->resp_data)
546 		return -EINVAL;
547 
548 	/*
549 	 * The intermediate response buffer is used while decrypting the
550 	 * response payload. Make sure that it has enough space to cover the
551 	 * authtag.
552 	 */
553 	resp_len = sizeof(derived_key_resp.data) + crypto->a_len;
554 	if (sizeof(buf) < resp_len)
555 		return -ENOMEM;
556 
557 	if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
558 			   sizeof(*derived_key_req)))
559 		return -EFAULT;
560 
561 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, SNP_MSG_KEY_REQ,
562 				  derived_key_req, sizeof(*derived_key_req), buf, resp_len);
563 	if (rc)
564 		return rc;
565 
566 	memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data));
567 	if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp,
568 			 sizeof(derived_key_resp)))
569 		rc = -EFAULT;
570 
571 	/* The response buffer contains the sensitive data, explicitly clear it. */
572 	memzero_explicit(buf, sizeof(buf));
573 	memzero_explicit(&derived_key_resp, sizeof(derived_key_resp));
574 	return rc;
575 }
576 
get_ext_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg,struct snp_req_resp * io)577 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg,
578 			  struct snp_req_resp *io)
579 
580 {
581 	struct snp_ext_report_req *report_req = &snp_dev->req.ext_report;
582 	struct snp_guest_crypto *crypto = snp_dev->crypto;
583 	struct snp_report_resp *report_resp;
584 	int ret, npages = 0, resp_len;
585 	sockptr_t certs_address;
586 
587 	lockdep_assert_held(&snp_cmd_mutex);
588 
589 	if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
590 		return -EINVAL;
591 
592 	if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
593 		return -EFAULT;
594 
595 	/* caller does not want certificate data */
596 	if (!report_req->certs_len || !report_req->certs_address)
597 		goto cmd;
598 
599 	if (report_req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
600 	    !IS_ALIGNED(report_req->certs_len, PAGE_SIZE))
601 		return -EINVAL;
602 
603 	if (sockptr_is_kernel(io->resp_data)) {
604 		certs_address = KERNEL_SOCKPTR((void *)report_req->certs_address);
605 	} else {
606 		certs_address = USER_SOCKPTR((void __user *)report_req->certs_address);
607 		if (!access_ok(certs_address.user, report_req->certs_len))
608 			return -EFAULT;
609 	}
610 
611 	/*
612 	 * Initialize the intermediate buffer with all zeros. This buffer
613 	 * is used in the guest request message to get the certs blob from
614 	 * the host. If host does not supply any certs in it, then copy
615 	 * zeros to indicate that certificate data was not provided.
616 	 */
617 	memset(snp_dev->certs_data, 0, report_req->certs_len);
618 	npages = report_req->certs_len >> PAGE_SHIFT;
619 cmd:
620 	/*
621 	 * The intermediate response buffer is used while decrypting the
622 	 * response payload. Make sure that it has enough space to cover the
623 	 * authtag.
624 	 */
625 	resp_len = sizeof(report_resp->data) + crypto->a_len;
626 	report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
627 	if (!report_resp)
628 		return -ENOMEM;
629 
630 	snp_dev->input.data_npages = npages;
631 	ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg, SNP_MSG_REPORT_REQ,
632 				   &report_req->data, sizeof(report_req->data),
633 				   report_resp->data, resp_len);
634 
635 	/* If certs length is invalid then copy the returned length */
636 	if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
637 		report_req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
638 
639 		if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
640 			ret = -EFAULT;
641 	}
642 
643 	if (ret)
644 		goto e_free;
645 
646 	if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, report_req->certs_len)) {
647 		ret = -EFAULT;
648 		goto e_free;
649 	}
650 
651 	if (copy_to_sockptr(io->resp_data, report_resp, sizeof(*report_resp)))
652 		ret = -EFAULT;
653 
654 e_free:
655 	kfree(report_resp);
656 	return ret;
657 }
658 
snp_guest_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)659 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
660 {
661 	struct snp_guest_dev *snp_dev = to_snp_dev(file);
662 	void __user *argp = (void __user *)arg;
663 	struct snp_guest_request_ioctl input;
664 	struct snp_req_resp io;
665 	int ret = -ENOTTY;
666 
667 	if (copy_from_user(&input, argp, sizeof(input)))
668 		return -EFAULT;
669 
670 	input.exitinfo2 = 0xff;
671 
672 	/* Message version must be non-zero */
673 	if (!input.msg_version)
674 		return -EINVAL;
675 
676 	mutex_lock(&snp_cmd_mutex);
677 
678 	/* Check if the VMPCK is not empty */
679 	if (is_vmpck_empty(snp_dev)) {
680 		dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
681 		mutex_unlock(&snp_cmd_mutex);
682 		return -ENOTTY;
683 	}
684 
685 	switch (ioctl) {
686 	case SNP_GET_REPORT:
687 		ret = get_report(snp_dev, &input);
688 		break;
689 	case SNP_GET_DERIVED_KEY:
690 		ret = get_derived_key(snp_dev, &input);
691 		break;
692 	case SNP_GET_EXT_REPORT:
693 		/*
694 		 * As get_ext_report() may be called from the ioctl() path and a
695 		 * kernel internal path (configfs-tsm), decorate the passed
696 		 * buffers as user pointers.
697 		 */
698 		io.req_data = USER_SOCKPTR((void __user *)input.req_data);
699 		io.resp_data = USER_SOCKPTR((void __user *)input.resp_data);
700 		ret = get_ext_report(snp_dev, &input, &io);
701 		break;
702 	default:
703 		break;
704 	}
705 
706 	mutex_unlock(&snp_cmd_mutex);
707 
708 	if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
709 		return -EFAULT;
710 
711 	return ret;
712 }
713 
free_shared_pages(void * buf,size_t sz)714 static void free_shared_pages(void *buf, size_t sz)
715 {
716 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
717 	int ret;
718 
719 	if (!buf)
720 		return;
721 
722 	ret = set_memory_encrypted((unsigned long)buf, npages);
723 	if (ret) {
724 		WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
725 		return;
726 	}
727 
728 	__free_pages(virt_to_page(buf), get_order(sz));
729 }
730 
alloc_shared_pages(struct device * dev,size_t sz)731 static void *alloc_shared_pages(struct device *dev, size_t sz)
732 {
733 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
734 	struct page *page;
735 	int ret;
736 
737 	page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
738 	if (!page)
739 		return NULL;
740 
741 	ret = set_memory_decrypted((unsigned long)page_address(page), npages);
742 	if (ret) {
743 		dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
744 		__free_pages(page, get_order(sz));
745 		return NULL;
746 	}
747 
748 	return page_address(page);
749 }
750 
751 static const struct file_operations snp_guest_fops = {
752 	.owner	= THIS_MODULE,
753 	.unlocked_ioctl = snp_guest_ioctl,
754 };
755 
get_vmpck(int id,struct snp_secrets_page * secrets,u32 ** seqno)756 static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno)
757 {
758 	u8 *key = NULL;
759 
760 	switch (id) {
761 	case 0:
762 		*seqno = &secrets->os_area.msg_seqno_0;
763 		key = secrets->vmpck0;
764 		break;
765 	case 1:
766 		*seqno = &secrets->os_area.msg_seqno_1;
767 		key = secrets->vmpck1;
768 		break;
769 	case 2:
770 		*seqno = &secrets->os_area.msg_seqno_2;
771 		key = secrets->vmpck2;
772 		break;
773 	case 3:
774 		*seqno = &secrets->os_area.msg_seqno_3;
775 		key = secrets->vmpck3;
776 		break;
777 	default:
778 		break;
779 	}
780 
781 	return key;
782 }
783 
784 struct snp_msg_report_resp_hdr {
785 	u32 status;
786 	u32 report_size;
787 	u8 rsvd[24];
788 };
789 
790 struct snp_msg_cert_entry {
791 	guid_t guid;
792 	u32 offset;
793 	u32 length;
794 };
795 
sev_svsm_report_new(struct tsm_report * report,void * data)796 static int sev_svsm_report_new(struct tsm_report *report, void *data)
797 {
798 	unsigned int rep_len, man_len, certs_len;
799 	struct tsm_desc *desc = &report->desc;
800 	struct svsm_attest_call ac = {};
801 	unsigned int retry_count;
802 	void *rep, *man, *certs;
803 	struct svsm_call call;
804 	unsigned int size;
805 	bool try_again;
806 	void *buffer;
807 	u64 call_id;
808 	int ret;
809 
810 	/*
811 	 * Allocate pages for the request:
812 	 * - Report blob (4K)
813 	 * - Manifest blob (4K)
814 	 * - Certificate blob (16K)
815 	 *
816 	 * Above addresses must be 4K aligned
817 	 */
818 	rep_len = SZ_4K;
819 	man_len = SZ_4K;
820 	certs_len = SEV_FW_BLOB_MAX_SIZE;
821 
822 	guard(mutex)(&snp_cmd_mutex);
823 
824 	if (guid_is_null(&desc->service_guid)) {
825 		call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES);
826 	} else {
827 		export_guid(ac.service_guid, &desc->service_guid);
828 		ac.service_manifest_ver = desc->service_manifest_version;
829 
830 		call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE);
831 	}
832 
833 	retry_count = 0;
834 
835 retry:
836 	memset(&call, 0, sizeof(call));
837 
838 	size = rep_len + man_len + certs_len;
839 	buffer = alloc_pages_exact(size, __GFP_ZERO);
840 	if (!buffer)
841 		return -ENOMEM;
842 
843 	rep = buffer;
844 	ac.report_buf.pa = __pa(rep);
845 	ac.report_buf.len = rep_len;
846 
847 	man = rep + rep_len;
848 	ac.manifest_buf.pa = __pa(man);
849 	ac.manifest_buf.len = man_len;
850 
851 	certs = man + man_len;
852 	ac.certificates_buf.pa = __pa(certs);
853 	ac.certificates_buf.len = certs_len;
854 
855 	ac.nonce.pa = __pa(desc->inblob);
856 	ac.nonce.len = desc->inblob_len;
857 
858 	ret = snp_issue_svsm_attest_req(call_id, &call, &ac);
859 	if (ret) {
860 		free_pages_exact(buffer, size);
861 
862 		switch (call.rax_out) {
863 		case SVSM_ERR_INVALID_PARAMETER:
864 			try_again = false;
865 
866 			if (ac.report_buf.len > rep_len) {
867 				rep_len = PAGE_ALIGN(ac.report_buf.len);
868 				try_again = true;
869 			}
870 
871 			if (ac.manifest_buf.len > man_len) {
872 				man_len = PAGE_ALIGN(ac.manifest_buf.len);
873 				try_again = true;
874 			}
875 
876 			if (ac.certificates_buf.len > certs_len) {
877 				certs_len = PAGE_ALIGN(ac.certificates_buf.len);
878 				try_again = true;
879 			}
880 
881 			/* If one of the buffers wasn't large enough, retry the request */
882 			if (try_again && retry_count < SVSM_MAX_RETRIES) {
883 				retry_count++;
884 				goto retry;
885 			}
886 
887 			return -EINVAL;
888 		default:
889 			pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n",
890 					   ret, call.rax_out);
891 			return -EINVAL;
892 		}
893 	}
894 
895 	/*
896 	 * Allocate all the blob memory buffers at once so that the cleanup is
897 	 * done for errors that occur after the first allocation (i.e. before
898 	 * using no_free_ptr()).
899 	 */
900 	rep_len = ac.report_buf.len;
901 	void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL);
902 
903 	man_len = ac.manifest_buf.len;
904 	void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL);
905 
906 	certs_len = ac.certificates_buf.len;
907 	void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL;
908 
909 	if (!rbuf || !mbuf || (certs_len && !cbuf)) {
910 		free_pages_exact(buffer, size);
911 		return -ENOMEM;
912 	}
913 
914 	memcpy(rbuf, rep, rep_len);
915 	report->outblob = no_free_ptr(rbuf);
916 	report->outblob_len = rep_len;
917 
918 	memcpy(mbuf, man, man_len);
919 	report->manifestblob = no_free_ptr(mbuf);
920 	report->manifestblob_len = man_len;
921 
922 	if (certs_len) {
923 		memcpy(cbuf, certs, certs_len);
924 		report->auxblob = no_free_ptr(cbuf);
925 		report->auxblob_len = certs_len;
926 	}
927 
928 	free_pages_exact(buffer, size);
929 
930 	return 0;
931 }
932 
sev_report_new(struct tsm_report * report,void * data)933 static int sev_report_new(struct tsm_report *report, void *data)
934 {
935 	struct snp_msg_cert_entry *cert_table;
936 	struct tsm_desc *desc = &report->desc;
937 	struct snp_guest_dev *snp_dev = data;
938 	struct snp_msg_report_resp_hdr hdr;
939 	const u32 report_size = SZ_4K;
940 	const u32 ext_size = SEV_FW_BLOB_MAX_SIZE;
941 	u32 certs_size, i, size = report_size + ext_size;
942 	int ret;
943 
944 	if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
945 		return -EINVAL;
946 
947 	if (desc->service_provider) {
948 		if (strcmp(desc->service_provider, "svsm"))
949 			return -EINVAL;
950 
951 		return sev_svsm_report_new(report, data);
952 	}
953 
954 	void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
955 	if (!buf)
956 		return -ENOMEM;
957 
958 	guard(mutex)(&snp_cmd_mutex);
959 
960 	/* Check if the VMPCK is not empty */
961 	if (is_vmpck_empty(snp_dev)) {
962 		dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
963 		return -ENOTTY;
964 	}
965 
966 	cert_table = buf + report_size;
967 	struct snp_ext_report_req ext_req = {
968 		.data = { .vmpl = desc->privlevel },
969 		.certs_address = (__u64)cert_table,
970 		.certs_len = ext_size,
971 	};
972 	memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len);
973 
974 	struct snp_guest_request_ioctl input = {
975 		.msg_version = 1,
976 		.req_data = (__u64)&ext_req,
977 		.resp_data = (__u64)buf,
978 		.exitinfo2 = 0xff,
979 	};
980 	struct snp_req_resp io = {
981 		.req_data = KERNEL_SOCKPTR(&ext_req),
982 		.resp_data = KERNEL_SOCKPTR(buf),
983 	};
984 
985 	ret = get_ext_report(snp_dev, &input, &io);
986 	if (ret)
987 		return ret;
988 
989 	memcpy(&hdr, buf, sizeof(hdr));
990 	if (hdr.status == SEV_RET_INVALID_PARAM)
991 		return -EINVAL;
992 	if (hdr.status == SEV_RET_INVALID_KEY)
993 		return -EINVAL;
994 	if (hdr.status)
995 		return -ENXIO;
996 	if ((hdr.report_size + sizeof(hdr)) > report_size)
997 		return -ENOMEM;
998 
999 	void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL);
1000 	if (!rbuf)
1001 		return -ENOMEM;
1002 
1003 	memcpy(rbuf, buf + sizeof(hdr), hdr.report_size);
1004 	report->outblob = no_free_ptr(rbuf);
1005 	report->outblob_len = hdr.report_size;
1006 
1007 	certs_size = 0;
1008 	for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) {
1009 		struct snp_msg_cert_entry *ent = &cert_table[i];
1010 
1011 		if (guid_is_null(&ent->guid) && !ent->offset && !ent->length)
1012 			break;
1013 		certs_size = max(certs_size, ent->offset + ent->length);
1014 	}
1015 
1016 	/* Suspicious that the response populated entries without populating size */
1017 	if (!certs_size && i)
1018 		dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n");
1019 
1020 	/* No certs to report */
1021 	if (!certs_size)
1022 		return 0;
1023 
1024 	/* Suspicious that the certificate blob size contract was violated
1025 	 */
1026 	if (certs_size > ext_size) {
1027 		dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n");
1028 		certs_size = ext_size;
1029 	}
1030 
1031 	void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL);
1032 	if (!cbuf)
1033 		return -ENOMEM;
1034 
1035 	memcpy(cbuf, cert_table, certs_size);
1036 	report->auxblob = no_free_ptr(cbuf);
1037 	report->auxblob_len = certs_size;
1038 
1039 	return 0;
1040 }
1041 
sev_report_attr_visible(int n)1042 static bool sev_report_attr_visible(int n)
1043 {
1044 	switch (n) {
1045 	case TSM_REPORT_GENERATION:
1046 	case TSM_REPORT_PROVIDER:
1047 	case TSM_REPORT_PRIVLEVEL:
1048 	case TSM_REPORT_PRIVLEVEL_FLOOR:
1049 		return true;
1050 	case TSM_REPORT_SERVICE_PROVIDER:
1051 	case TSM_REPORT_SERVICE_GUID:
1052 	case TSM_REPORT_SERVICE_MANIFEST_VER:
1053 		return snp_vmpl;
1054 	}
1055 
1056 	return false;
1057 }
1058 
sev_report_bin_attr_visible(int n)1059 static bool sev_report_bin_attr_visible(int n)
1060 {
1061 	switch (n) {
1062 	case TSM_REPORT_INBLOB:
1063 	case TSM_REPORT_OUTBLOB:
1064 	case TSM_REPORT_AUXBLOB:
1065 		return true;
1066 	case TSM_REPORT_MANIFESTBLOB:
1067 		return snp_vmpl;
1068 	}
1069 
1070 	return false;
1071 }
1072 
1073 static struct tsm_ops sev_tsm_ops = {
1074 	.name = KBUILD_MODNAME,
1075 	.report_new = sev_report_new,
1076 	.report_attr_visible = sev_report_attr_visible,
1077 	.report_bin_attr_visible = sev_report_bin_attr_visible,
1078 };
1079 
unregister_sev_tsm(void * data)1080 static void unregister_sev_tsm(void *data)
1081 {
1082 	tsm_unregister(&sev_tsm_ops);
1083 }
1084 
sev_guest_probe(struct platform_device * pdev)1085 static int __init sev_guest_probe(struct platform_device *pdev)
1086 {
1087 	struct sev_guest_platform_data *data;
1088 	struct snp_secrets_page *secrets;
1089 	struct device *dev = &pdev->dev;
1090 	struct snp_guest_dev *snp_dev;
1091 	struct miscdevice *misc;
1092 	void __iomem *mapping;
1093 	int ret;
1094 
1095 	BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
1096 
1097 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1098 		return -ENODEV;
1099 
1100 	if (!dev->platform_data)
1101 		return -ENODEV;
1102 
1103 	data = (struct sev_guest_platform_data *)dev->platform_data;
1104 	mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
1105 	if (!mapping)
1106 		return -ENODEV;
1107 
1108 	secrets = (__force void *)mapping;
1109 
1110 	ret = -ENOMEM;
1111 	snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
1112 	if (!snp_dev)
1113 		goto e_unmap;
1114 
1115 	/* Adjust the default VMPCK key based on the executing VMPL level */
1116 	if (vmpck_id == -1)
1117 		vmpck_id = snp_vmpl;
1118 
1119 	ret = -EINVAL;
1120 	snp_dev->vmpck = get_vmpck(vmpck_id, secrets, &snp_dev->os_area_msg_seqno);
1121 	if (!snp_dev->vmpck) {
1122 		dev_err(dev, "Invalid VMPCK%d communication key\n", vmpck_id);
1123 		goto e_unmap;
1124 	}
1125 
1126 	/* Verify that VMPCK is not zero. */
1127 	if (is_vmpck_empty(snp_dev)) {
1128 		dev_err(dev, "Empty VMPCK%d communication key\n", vmpck_id);
1129 		goto e_unmap;
1130 	}
1131 
1132 	platform_set_drvdata(pdev, snp_dev);
1133 	snp_dev->dev = dev;
1134 	snp_dev->secrets = secrets;
1135 
1136 	/* Allocate the shared page used for the request and response message. */
1137 	snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
1138 	if (!snp_dev->request)
1139 		goto e_unmap;
1140 
1141 	snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
1142 	if (!snp_dev->response)
1143 		goto e_free_request;
1144 
1145 	snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
1146 	if (!snp_dev->certs_data)
1147 		goto e_free_response;
1148 
1149 	ret = -EIO;
1150 	snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN);
1151 	if (!snp_dev->crypto)
1152 		goto e_free_cert_data;
1153 
1154 	misc = &snp_dev->misc;
1155 	misc->minor = MISC_DYNAMIC_MINOR;
1156 	misc->name = DEVICE_NAME;
1157 	misc->fops = &snp_guest_fops;
1158 
1159 	/* initial the input address for guest request */
1160 	snp_dev->input.req_gpa = __pa(snp_dev->request);
1161 	snp_dev->input.resp_gpa = __pa(snp_dev->response);
1162 	snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
1163 
1164 	/* Set the privlevel_floor attribute based on the vmpck_id */
1165 	sev_tsm_ops.privlevel_floor = vmpck_id;
1166 
1167 	ret = tsm_register(&sev_tsm_ops, snp_dev);
1168 	if (ret)
1169 		goto e_free_cert_data;
1170 
1171 	ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
1172 	if (ret)
1173 		goto e_free_cert_data;
1174 
1175 	ret =  misc_register(misc);
1176 	if (ret)
1177 		goto e_free_cert_data;
1178 
1179 	dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n", vmpck_id);
1180 	return 0;
1181 
1182 e_free_cert_data:
1183 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
1184 e_free_response:
1185 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
1186 e_free_request:
1187 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
1188 e_unmap:
1189 	iounmap(mapping);
1190 	return ret;
1191 }
1192 
sev_guest_remove(struct platform_device * pdev)1193 static void __exit sev_guest_remove(struct platform_device *pdev)
1194 {
1195 	struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
1196 
1197 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
1198 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
1199 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
1200 	deinit_crypto(snp_dev->crypto);
1201 	misc_deregister(&snp_dev->misc);
1202 }
1203 
1204 /*
1205  * This driver is meant to be a common SEV guest interface driver and to
1206  * support any SEV guest API. As such, even though it has been introduced
1207  * with the SEV-SNP support, it is named "sev-guest".
1208  *
1209  * sev_guest_remove() lives in .exit.text. For drivers registered via
1210  * module_platform_driver_probe() this is ok because they cannot get unbound
1211  * at runtime. So mark the driver struct with __refdata to prevent modpost
1212  * triggering a section mismatch warning.
1213  */
1214 static struct platform_driver sev_guest_driver __refdata = {
1215 	.remove_new	= __exit_p(sev_guest_remove),
1216 	.driver		= {
1217 		.name = "sev-guest",
1218 	},
1219 };
1220 
1221 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
1222 
1223 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
1224 MODULE_LICENSE("GPL");
1225 MODULE_VERSION("1.0.0");
1226 MODULE_DESCRIPTION("AMD SEV Guest Driver");
1227 MODULE_ALIAS("platform:sev-guest");
1228