xref: /linux/drivers/virt/coco/sev-guest/sev-guest.c (revision de5ca699bc3f7fe9f90ba927d8a6e7783cd7311d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4  *
5  * Copyright (C) 2021-2024 Advanced Micro Devices, Inc.
6  *
7  * Author: Brijesh Singh <brijesh.singh@amd.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <linux/tsm.h>
20 #include <crypto/gcm.h>
21 #include <linux/psp-sev.h>
22 #include <linux/sockptr.h>
23 #include <linux/cleanup.h>
24 #include <linux/uuid.h>
25 #include <linux/configfs.h>
26 #include <uapi/linux/sev-guest.h>
27 #include <uapi/linux/psp-sev.h>
28 
29 #include <asm/svm.h>
30 #include <asm/sev.h>
31 
32 #define DEVICE_NAME	"sev-guest"
33 
34 #define SVSM_MAX_RETRIES		3
35 
36 struct snp_guest_dev {
37 	struct device *dev;
38 	struct miscdevice misc;
39 
40 	struct snp_msg_desc *msg_desc;
41 };
42 
43 /*
44  * The VMPCK ID represents the key used by the SNP guest to communicate with the
45  * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key
46  * used will be the key associated with the VMPL at which the guest is running.
47  * Should the default key be wiped (see snp_disable_vmpck()), this parameter
48  * allows for using one of the remaining VMPCKs.
49  */
50 static int vmpck_id = -1;
51 module_param(vmpck_id, int, 0444);
52 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
53 
54 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
55 {
56 	struct miscdevice *dev = file->private_data;
57 
58 	return container_of(dev, struct snp_guest_dev, misc);
59 }
60 
61 struct snp_req_resp {
62 	sockptr_t req_data;
63 	sockptr_t resp_data;
64 };
65 
66 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
67 {
68 	struct snp_report_req *report_req __free(kfree) = NULL;
69 	struct snp_msg_desc *mdesc = snp_dev->msg_desc;
70 	struct snp_report_resp *report_resp;
71 	struct snp_guest_req req = {};
72 	int rc, resp_len;
73 
74 	if (!arg->req_data || !arg->resp_data)
75 		return -EINVAL;
76 
77 	report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
78 	if (!report_req)
79 		return -ENOMEM;
80 
81 	if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
82 		return -EFAULT;
83 
84 	/*
85 	 * The intermediate response buffer is used while decrypting the
86 	 * response payload. Make sure that it has enough space to cover the
87 	 * authtag.
88 	 */
89 	resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
90 	report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
91 	if (!report_resp)
92 		return -ENOMEM;
93 
94 	req.msg_version = arg->msg_version;
95 	req.msg_type = SNP_MSG_REPORT_REQ;
96 	req.vmpck_id = mdesc->vmpck_id;
97 	req.req_buf = report_req;
98 	req.req_sz = sizeof(*report_req);
99 	req.resp_buf = report_resp->data;
100 	req.resp_sz = resp_len;
101 	req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
102 
103 	rc = snp_send_guest_request(mdesc, &req, arg);
104 	if (rc)
105 		goto e_free;
106 
107 	if (copy_to_user((void __user *)arg->resp_data, report_resp, sizeof(*report_resp)))
108 		rc = -EFAULT;
109 
110 e_free:
111 	kfree(report_resp);
112 	return rc;
113 }
114 
115 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
116 {
117 	struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
118 	struct snp_derived_key_resp derived_key_resp = {0};
119 	struct snp_msg_desc *mdesc = snp_dev->msg_desc;
120 	struct snp_guest_req req = {};
121 	int rc, resp_len;
122 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
123 	u8 buf[64 + 16];
124 
125 	if (!arg->req_data || !arg->resp_data)
126 		return -EINVAL;
127 
128 	/*
129 	 * The intermediate response buffer is used while decrypting the
130 	 * response payload. Make sure that it has enough space to cover the
131 	 * authtag.
132 	 */
133 	resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize;
134 	if (sizeof(buf) < resp_len)
135 		return -ENOMEM;
136 
137 	derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
138 	if (!derived_key_req)
139 		return -ENOMEM;
140 
141 	if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
142 			   sizeof(*derived_key_req)))
143 		return -EFAULT;
144 
145 	req.msg_version = arg->msg_version;
146 	req.msg_type = SNP_MSG_KEY_REQ;
147 	req.vmpck_id = mdesc->vmpck_id;
148 	req.req_buf = derived_key_req;
149 	req.req_sz = sizeof(*derived_key_req);
150 	req.resp_buf = buf;
151 	req.resp_sz = resp_len;
152 	req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
153 
154 	rc = snp_send_guest_request(mdesc, &req, arg);
155 	if (rc)
156 		return rc;
157 
158 	memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data));
159 	if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp,
160 			 sizeof(derived_key_resp)))
161 		rc = -EFAULT;
162 
163 	/* The response buffer contains the sensitive data, explicitly clear it. */
164 	memzero_explicit(buf, sizeof(buf));
165 	memzero_explicit(&derived_key_resp, sizeof(derived_key_resp));
166 	return rc;
167 }
168 
169 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg,
170 			  struct snp_req_resp *io)
171 
172 {
173 	struct snp_ext_report_req *report_req __free(kfree) = NULL;
174 	struct snp_msg_desc *mdesc = snp_dev->msg_desc;
175 	struct snp_report_resp *report_resp;
176 	struct snp_guest_req req = {};
177 	int ret, npages = 0, resp_len;
178 	sockptr_t certs_address;
179 	struct page *page;
180 
181 	if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
182 		return -EINVAL;
183 
184 	report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
185 	if (!report_req)
186 		return -ENOMEM;
187 
188 	if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
189 		return -EFAULT;
190 
191 	/* caller does not want certificate data */
192 	if (!report_req->certs_len || !report_req->certs_address)
193 		goto cmd;
194 
195 	if (report_req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
196 	    !IS_ALIGNED(report_req->certs_len, PAGE_SIZE))
197 		return -EINVAL;
198 
199 	if (sockptr_is_kernel(io->resp_data)) {
200 		certs_address = KERNEL_SOCKPTR((void *)report_req->certs_address);
201 	} else {
202 		certs_address = USER_SOCKPTR((void __user *)report_req->certs_address);
203 		if (!access_ok(certs_address.user, report_req->certs_len))
204 			return -EFAULT;
205 	}
206 
207 	/*
208 	 * Initialize the intermediate buffer with all zeros. This buffer
209 	 * is used in the guest request message to get the certs blob from
210 	 * the host. If host does not supply any certs in it, then copy
211 	 * zeros to indicate that certificate data was not provided.
212 	 */
213 	npages = report_req->certs_len >> PAGE_SHIFT;
214 	page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
215 			   get_order(report_req->certs_len));
216 	if (!page)
217 		return -ENOMEM;
218 
219 	req.certs_data = page_address(page);
220 	ret = set_memory_decrypted((unsigned long)req.certs_data, npages);
221 	if (ret) {
222 		pr_err("failed to mark page shared, ret=%d\n", ret);
223 		__free_pages(page, get_order(report_req->certs_len));
224 		return -EFAULT;
225 	}
226 
227 cmd:
228 	/*
229 	 * The intermediate response buffer is used while decrypting the
230 	 * response payload. Make sure that it has enough space to cover the
231 	 * authtag.
232 	 */
233 	resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
234 	report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
235 	if (!report_resp) {
236 		ret = -ENOMEM;
237 		goto e_free_data;
238 	}
239 
240 	req.input.data_npages = npages;
241 
242 	req.msg_version = arg->msg_version;
243 	req.msg_type = SNP_MSG_REPORT_REQ;
244 	req.vmpck_id = mdesc->vmpck_id;
245 	req.req_buf = &report_req->data;
246 	req.req_sz = sizeof(report_req->data);
247 	req.resp_buf = report_resp->data;
248 	req.resp_sz = resp_len;
249 	req.exit_code = SVM_VMGEXIT_EXT_GUEST_REQUEST;
250 
251 	ret = snp_send_guest_request(mdesc, &req, arg);
252 
253 	/* If certs length is invalid then copy the returned length */
254 	if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
255 		report_req->certs_len = req.input.data_npages << PAGE_SHIFT;
256 
257 		if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
258 			ret = -EFAULT;
259 	}
260 
261 	if (ret)
262 		goto e_free;
263 
264 	if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) {
265 		ret = -EFAULT;
266 		goto e_free;
267 	}
268 
269 	if (copy_to_sockptr(io->resp_data, report_resp, sizeof(*report_resp)))
270 		ret = -EFAULT;
271 
272 e_free:
273 	kfree(report_resp);
274 e_free_data:
275 	if (npages) {
276 		if (set_memory_encrypted((unsigned long)req.certs_data, npages))
277 			WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
278 		else
279 			__free_pages(page, get_order(report_req->certs_len));
280 	}
281 	return ret;
282 }
283 
284 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
285 {
286 	struct snp_guest_dev *snp_dev = to_snp_dev(file);
287 	void __user *argp = (void __user *)arg;
288 	struct snp_guest_request_ioctl input;
289 	struct snp_req_resp io;
290 	int ret = -ENOTTY;
291 
292 	if (copy_from_user(&input, argp, sizeof(input)))
293 		return -EFAULT;
294 
295 	input.exitinfo2 = 0xff;
296 
297 	/* Message version must be non-zero */
298 	if (!input.msg_version)
299 		return -EINVAL;
300 
301 	switch (ioctl) {
302 	case SNP_GET_REPORT:
303 		ret = get_report(snp_dev, &input);
304 		break;
305 	case SNP_GET_DERIVED_KEY:
306 		ret = get_derived_key(snp_dev, &input);
307 		break;
308 	case SNP_GET_EXT_REPORT:
309 		/*
310 		 * As get_ext_report() may be called from the ioctl() path and a
311 		 * kernel internal path (configfs-tsm), decorate the passed
312 		 * buffers as user pointers.
313 		 */
314 		io.req_data = USER_SOCKPTR((void __user *)input.req_data);
315 		io.resp_data = USER_SOCKPTR((void __user *)input.resp_data);
316 		ret = get_ext_report(snp_dev, &input, &io);
317 		break;
318 	default:
319 		break;
320 	}
321 
322 	if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
323 		return -EFAULT;
324 
325 	return ret;
326 }
327 
328 static const struct file_operations snp_guest_fops = {
329 	.owner	= THIS_MODULE,
330 	.unlocked_ioctl = snp_guest_ioctl,
331 };
332 
333 struct snp_msg_report_resp_hdr {
334 	u32 status;
335 	u32 report_size;
336 	u8 rsvd[24];
337 };
338 
339 struct snp_msg_cert_entry {
340 	guid_t guid;
341 	u32 offset;
342 	u32 length;
343 };
344 
345 static int sev_svsm_report_new(struct tsm_report *report, void *data)
346 {
347 	unsigned int rep_len, man_len, certs_len;
348 	struct tsm_desc *desc = &report->desc;
349 	struct svsm_attest_call ac = {};
350 	unsigned int retry_count;
351 	void *rep, *man, *certs;
352 	struct svsm_call call;
353 	unsigned int size;
354 	bool try_again;
355 	void *buffer;
356 	u64 call_id;
357 	int ret;
358 
359 	/*
360 	 * Allocate pages for the request:
361 	 * - Report blob (4K)
362 	 * - Manifest blob (4K)
363 	 * - Certificate blob (16K)
364 	 *
365 	 * Above addresses must be 4K aligned
366 	 */
367 	rep_len = SZ_4K;
368 	man_len = SZ_4K;
369 	certs_len = SEV_FW_BLOB_MAX_SIZE;
370 
371 	if (guid_is_null(&desc->service_guid)) {
372 		call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES);
373 	} else {
374 		export_guid(ac.service_guid, &desc->service_guid);
375 		ac.service_manifest_ver = desc->service_manifest_version;
376 
377 		call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE);
378 	}
379 
380 	retry_count = 0;
381 
382 retry:
383 	memset(&call, 0, sizeof(call));
384 
385 	size = rep_len + man_len + certs_len;
386 	buffer = alloc_pages_exact(size, __GFP_ZERO);
387 	if (!buffer)
388 		return -ENOMEM;
389 
390 	rep = buffer;
391 	ac.report_buf.pa = __pa(rep);
392 	ac.report_buf.len = rep_len;
393 
394 	man = rep + rep_len;
395 	ac.manifest_buf.pa = __pa(man);
396 	ac.manifest_buf.len = man_len;
397 
398 	certs = man + man_len;
399 	ac.certificates_buf.pa = __pa(certs);
400 	ac.certificates_buf.len = certs_len;
401 
402 	ac.nonce.pa = __pa(desc->inblob);
403 	ac.nonce.len = desc->inblob_len;
404 
405 	ret = snp_issue_svsm_attest_req(call_id, &call, &ac);
406 	if (ret) {
407 		free_pages_exact(buffer, size);
408 
409 		switch (call.rax_out) {
410 		case SVSM_ERR_INVALID_PARAMETER:
411 			try_again = false;
412 
413 			if (ac.report_buf.len > rep_len) {
414 				rep_len = PAGE_ALIGN(ac.report_buf.len);
415 				try_again = true;
416 			}
417 
418 			if (ac.manifest_buf.len > man_len) {
419 				man_len = PAGE_ALIGN(ac.manifest_buf.len);
420 				try_again = true;
421 			}
422 
423 			if (ac.certificates_buf.len > certs_len) {
424 				certs_len = PAGE_ALIGN(ac.certificates_buf.len);
425 				try_again = true;
426 			}
427 
428 			/* If one of the buffers wasn't large enough, retry the request */
429 			if (try_again && retry_count < SVSM_MAX_RETRIES) {
430 				retry_count++;
431 				goto retry;
432 			}
433 
434 			return -EINVAL;
435 		default:
436 			pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n",
437 					   ret, call.rax_out);
438 			return -EINVAL;
439 		}
440 	}
441 
442 	/*
443 	 * Allocate all the blob memory buffers at once so that the cleanup is
444 	 * done for errors that occur after the first allocation (i.e. before
445 	 * using no_free_ptr()).
446 	 */
447 	rep_len = ac.report_buf.len;
448 	void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL);
449 
450 	man_len = ac.manifest_buf.len;
451 	void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL);
452 
453 	certs_len = ac.certificates_buf.len;
454 	void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL;
455 
456 	if (!rbuf || !mbuf || (certs_len && !cbuf)) {
457 		free_pages_exact(buffer, size);
458 		return -ENOMEM;
459 	}
460 
461 	memcpy(rbuf, rep, rep_len);
462 	report->outblob = no_free_ptr(rbuf);
463 	report->outblob_len = rep_len;
464 
465 	memcpy(mbuf, man, man_len);
466 	report->manifestblob = no_free_ptr(mbuf);
467 	report->manifestblob_len = man_len;
468 
469 	if (certs_len) {
470 		memcpy(cbuf, certs, certs_len);
471 		report->auxblob = no_free_ptr(cbuf);
472 		report->auxblob_len = certs_len;
473 	}
474 
475 	free_pages_exact(buffer, size);
476 
477 	return 0;
478 }
479 
480 static int sev_report_new(struct tsm_report *report, void *data)
481 {
482 	struct snp_msg_cert_entry *cert_table;
483 	struct tsm_desc *desc = &report->desc;
484 	struct snp_guest_dev *snp_dev = data;
485 	struct snp_msg_report_resp_hdr hdr;
486 	const u32 report_size = SZ_4K;
487 	const u32 ext_size = SEV_FW_BLOB_MAX_SIZE;
488 	u32 certs_size, i, size = report_size + ext_size;
489 	int ret;
490 
491 	if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
492 		return -EINVAL;
493 
494 	if (desc->service_provider) {
495 		if (strcmp(desc->service_provider, "svsm"))
496 			return -EINVAL;
497 
498 		return sev_svsm_report_new(report, data);
499 	}
500 
501 	void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
502 	if (!buf)
503 		return -ENOMEM;
504 
505 	cert_table = buf + report_size;
506 	struct snp_ext_report_req ext_req = {
507 		.data = { .vmpl = desc->privlevel },
508 		.certs_address = (__u64)cert_table,
509 		.certs_len = ext_size,
510 	};
511 	memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len);
512 
513 	struct snp_guest_request_ioctl input = {
514 		.msg_version = 1,
515 		.req_data = (__u64)&ext_req,
516 		.resp_data = (__u64)buf,
517 		.exitinfo2 = 0xff,
518 	};
519 	struct snp_req_resp io = {
520 		.req_data = KERNEL_SOCKPTR(&ext_req),
521 		.resp_data = KERNEL_SOCKPTR(buf),
522 	};
523 
524 	ret = get_ext_report(snp_dev, &input, &io);
525 	if (ret)
526 		return ret;
527 
528 	memcpy(&hdr, buf, sizeof(hdr));
529 	if (hdr.status == SEV_RET_INVALID_PARAM)
530 		return -EINVAL;
531 	if (hdr.status == SEV_RET_INVALID_KEY)
532 		return -EINVAL;
533 	if (hdr.status)
534 		return -ENXIO;
535 	if ((hdr.report_size + sizeof(hdr)) > report_size)
536 		return -ENOMEM;
537 
538 	void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL);
539 	if (!rbuf)
540 		return -ENOMEM;
541 
542 	memcpy(rbuf, buf + sizeof(hdr), hdr.report_size);
543 	report->outblob = no_free_ptr(rbuf);
544 	report->outblob_len = hdr.report_size;
545 
546 	certs_size = 0;
547 	for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) {
548 		struct snp_msg_cert_entry *ent = &cert_table[i];
549 
550 		if (guid_is_null(&ent->guid) && !ent->offset && !ent->length)
551 			break;
552 		certs_size = max(certs_size, ent->offset + ent->length);
553 	}
554 
555 	/* Suspicious that the response populated entries without populating size */
556 	if (!certs_size && i)
557 		dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n");
558 
559 	/* No certs to report */
560 	if (!certs_size)
561 		return 0;
562 
563 	/* Suspicious that the certificate blob size contract was violated
564 	 */
565 	if (certs_size > ext_size) {
566 		dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n");
567 		certs_size = ext_size;
568 	}
569 
570 	void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL);
571 	if (!cbuf)
572 		return -ENOMEM;
573 
574 	memcpy(cbuf, cert_table, certs_size);
575 	report->auxblob = no_free_ptr(cbuf);
576 	report->auxblob_len = certs_size;
577 
578 	return 0;
579 }
580 
581 static bool sev_report_attr_visible(int n)
582 {
583 	switch (n) {
584 	case TSM_REPORT_GENERATION:
585 	case TSM_REPORT_PROVIDER:
586 	case TSM_REPORT_PRIVLEVEL:
587 	case TSM_REPORT_PRIVLEVEL_FLOOR:
588 		return true;
589 	case TSM_REPORT_SERVICE_PROVIDER:
590 	case TSM_REPORT_SERVICE_GUID:
591 	case TSM_REPORT_SERVICE_MANIFEST_VER:
592 		return snp_vmpl;
593 	}
594 
595 	return false;
596 }
597 
598 static bool sev_report_bin_attr_visible(int n)
599 {
600 	switch (n) {
601 	case TSM_REPORT_INBLOB:
602 	case TSM_REPORT_OUTBLOB:
603 	case TSM_REPORT_AUXBLOB:
604 		return true;
605 	case TSM_REPORT_MANIFESTBLOB:
606 		return snp_vmpl;
607 	}
608 
609 	return false;
610 }
611 
612 static struct tsm_ops sev_tsm_ops = {
613 	.name = KBUILD_MODNAME,
614 	.report_new = sev_report_new,
615 	.report_attr_visible = sev_report_attr_visible,
616 	.report_bin_attr_visible = sev_report_bin_attr_visible,
617 };
618 
619 static void unregister_sev_tsm(void *data)
620 {
621 	tsm_unregister(&sev_tsm_ops);
622 }
623 
624 static int __init sev_guest_probe(struct platform_device *pdev)
625 {
626 	struct device *dev = &pdev->dev;
627 	struct snp_guest_dev *snp_dev;
628 	struct snp_msg_desc *mdesc;
629 	struct miscdevice *misc;
630 	int ret;
631 
632 	BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
633 
634 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
635 		return -ENODEV;
636 
637 	snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
638 	if (!snp_dev)
639 		return -ENOMEM;
640 
641 	mdesc = snp_msg_alloc();
642 	if (IS_ERR_OR_NULL(mdesc))
643 		return -ENOMEM;
644 
645 	ret = snp_msg_init(mdesc, vmpck_id);
646 	if (ret)
647 		goto e_msg_init;
648 
649 	platform_set_drvdata(pdev, snp_dev);
650 	snp_dev->dev = dev;
651 
652 	misc = &snp_dev->misc;
653 	misc->minor = MISC_DYNAMIC_MINOR;
654 	misc->name = DEVICE_NAME;
655 	misc->fops = &snp_guest_fops;
656 
657 	/* Set the privlevel_floor attribute based on the vmpck_id */
658 	sev_tsm_ops.privlevel_floor = mdesc->vmpck_id;
659 
660 	ret = tsm_register(&sev_tsm_ops, snp_dev);
661 	if (ret)
662 		goto e_msg_init;
663 
664 	ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
665 	if (ret)
666 		goto e_msg_init;
667 
668 	ret =  misc_register(misc);
669 	if (ret)
670 		goto e_msg_init;
671 
672 	snp_dev->msg_desc = mdesc;
673 	dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n",
674 		 mdesc->vmpck_id);
675 	return 0;
676 
677 e_msg_init:
678 	snp_msg_free(mdesc);
679 
680 	return ret;
681 }
682 
683 static void __exit sev_guest_remove(struct platform_device *pdev)
684 {
685 	struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
686 
687 	snp_msg_free(snp_dev->msg_desc);
688 	misc_deregister(&snp_dev->misc);
689 }
690 
691 /*
692  * This driver is meant to be a common SEV guest interface driver and to
693  * support any SEV guest API. As such, even though it has been introduced
694  * with the SEV-SNP support, it is named "sev-guest".
695  *
696  * sev_guest_remove() lives in .exit.text. For drivers registered via
697  * module_platform_driver_probe() this is ok because they cannot get unbound
698  * at runtime. So mark the driver struct with __refdata to prevent modpost
699  * triggering a section mismatch warning.
700  */
701 static struct platform_driver sev_guest_driver __refdata = {
702 	.remove		= __exit_p(sev_guest_remove),
703 	.driver		= {
704 		.name = "sev-guest",
705 	},
706 };
707 
708 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
709 
710 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
711 MODULE_LICENSE("GPL");
712 MODULE_VERSION("1.0.0");
713 MODULE_DESCRIPTION("AMD SEV Guest Driver");
714 MODULE_ALIAS("platform:sev-guest");
715