xref: /linux/drivers/virt/coco/sev-guest/sev-guest.c (revision ae5ec8adb8ec9c2aa916f853737c101faa87e5ba)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4  *
5  * Copyright (C) 2021-2024 Advanced Micro Devices, Inc.
6  *
7  * Author: Brijesh Singh <brijesh.singh@amd.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <linux/tsm.h>
20 #include <crypto/gcm.h>
21 #include <linux/psp-sev.h>
22 #include <linux/sockptr.h>
23 #include <linux/cleanup.h>
24 #include <linux/uuid.h>
25 #include <linux/configfs.h>
26 #include <linux/mm.h>
27 #include <uapi/linux/sev-guest.h>
28 #include <uapi/linux/psp-sev.h>
29 
30 #include <asm/svm.h>
31 #include <asm/sev.h>
32 
33 #define DEVICE_NAME	"sev-guest"
34 
35 #define SVSM_MAX_RETRIES		3
36 
37 struct snp_guest_dev {
38 	struct device *dev;
39 	struct miscdevice misc;
40 
41 	struct snp_msg_desc *msg_desc;
42 };
43 
44 /*
45  * The VMPCK ID represents the key used by the SNP guest to communicate with the
46  * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key
47  * used will be the key associated with the VMPL at which the guest is running.
48  * Should the default key be wiped (see snp_disable_vmpck()), this parameter
49  * allows for using one of the remaining VMPCKs.
50  */
51 static int vmpck_id = -1;
52 module_param(vmpck_id, int, 0444);
53 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
54 
to_snp_dev(struct file * file)55 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
56 {
57 	struct miscdevice *dev = file->private_data;
58 
59 	return container_of(dev, struct snp_guest_dev, misc);
60 }
61 
62 struct snp_req_resp {
63 	sockptr_t req_data;
64 	sockptr_t resp_data;
65 };
66 
get_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)67 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
68 {
69 	struct snp_report_req *report_req __free(kfree) = NULL;
70 	struct snp_msg_desc *mdesc = snp_dev->msg_desc;
71 	struct snp_report_resp *report_resp;
72 	struct snp_guest_req req = {};
73 	int rc, resp_len;
74 
75 	if (!arg->req_data || !arg->resp_data)
76 		return -EINVAL;
77 
78 	report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
79 	if (!report_req)
80 		return -ENOMEM;
81 
82 	if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
83 		return -EFAULT;
84 
85 	/*
86 	 * The intermediate response buffer is used while decrypting the
87 	 * response payload. Make sure that it has enough space to cover the
88 	 * authtag.
89 	 */
90 	resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
91 	report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
92 	if (!report_resp)
93 		return -ENOMEM;
94 
95 	req.msg_version = arg->msg_version;
96 	req.msg_type = SNP_MSG_REPORT_REQ;
97 	req.vmpck_id = mdesc->vmpck_id;
98 	req.req_buf = report_req;
99 	req.req_sz = sizeof(*report_req);
100 	req.resp_buf = report_resp->data;
101 	req.resp_sz = resp_len;
102 	req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
103 
104 	rc = snp_send_guest_request(mdesc, &req, arg);
105 	if (rc)
106 		goto e_free;
107 
108 	if (copy_to_user((void __user *)arg->resp_data, report_resp, sizeof(*report_resp)))
109 		rc = -EFAULT;
110 
111 e_free:
112 	kfree(report_resp);
113 	return rc;
114 }
115 
get_derived_key(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)116 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
117 {
118 	struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
119 	struct snp_derived_key_resp derived_key_resp = {0};
120 	struct snp_msg_desc *mdesc = snp_dev->msg_desc;
121 	struct snp_guest_req req = {};
122 	int rc, resp_len;
123 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
124 	u8 buf[64 + 16];
125 
126 	if (!arg->req_data || !arg->resp_data)
127 		return -EINVAL;
128 
129 	/*
130 	 * The intermediate response buffer is used while decrypting the
131 	 * response payload. Make sure that it has enough space to cover the
132 	 * authtag.
133 	 */
134 	resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize;
135 	if (sizeof(buf) < resp_len)
136 		return -ENOMEM;
137 
138 	derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
139 	if (!derived_key_req)
140 		return -ENOMEM;
141 
142 	if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
143 			   sizeof(*derived_key_req)))
144 		return -EFAULT;
145 
146 	req.msg_version = arg->msg_version;
147 	req.msg_type = SNP_MSG_KEY_REQ;
148 	req.vmpck_id = mdesc->vmpck_id;
149 	req.req_buf = derived_key_req;
150 	req.req_sz = sizeof(*derived_key_req);
151 	req.resp_buf = buf;
152 	req.resp_sz = resp_len;
153 	req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
154 
155 	rc = snp_send_guest_request(mdesc, &req, arg);
156 	if (rc)
157 		return rc;
158 
159 	memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data));
160 	if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp,
161 			 sizeof(derived_key_resp)))
162 		rc = -EFAULT;
163 
164 	/* The response buffer contains the sensitive data, explicitly clear it. */
165 	memzero_explicit(buf, sizeof(buf));
166 	memzero_explicit(&derived_key_resp, sizeof(derived_key_resp));
167 	return rc;
168 }
169 
get_ext_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg,struct snp_req_resp * io)170 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg,
171 			  struct snp_req_resp *io)
172 
173 {
174 	struct snp_ext_report_req *report_req __free(kfree) = NULL;
175 	struct snp_msg_desc *mdesc = snp_dev->msg_desc;
176 	struct snp_report_resp *report_resp;
177 	struct snp_guest_req req = {};
178 	int ret, npages = 0, resp_len;
179 	sockptr_t certs_address;
180 	struct page *page;
181 
182 	if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
183 		return -EINVAL;
184 
185 	report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
186 	if (!report_req)
187 		return -ENOMEM;
188 
189 	if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
190 		return -EFAULT;
191 
192 	/* caller does not want certificate data */
193 	if (!report_req->certs_len || !report_req->certs_address)
194 		goto cmd;
195 
196 	if (report_req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
197 	    !IS_ALIGNED(report_req->certs_len, PAGE_SIZE))
198 		return -EINVAL;
199 
200 	if (sockptr_is_kernel(io->resp_data)) {
201 		certs_address = KERNEL_SOCKPTR((void *)report_req->certs_address);
202 	} else {
203 		certs_address = USER_SOCKPTR((void __user *)report_req->certs_address);
204 		if (!access_ok(certs_address.user, report_req->certs_len))
205 			return -EFAULT;
206 	}
207 
208 	/*
209 	 * Initialize the intermediate buffer with all zeros. This buffer
210 	 * is used in the guest request message to get the certs blob from
211 	 * the host. If host does not supply any certs in it, then copy
212 	 * zeros to indicate that certificate data was not provided.
213 	 */
214 	npages = report_req->certs_len >> PAGE_SHIFT;
215 	page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
216 			   get_order(report_req->certs_len));
217 	if (!page)
218 		return -ENOMEM;
219 
220 	req.certs_data = page_address(page);
221 	ret = set_memory_decrypted((unsigned long)req.certs_data, npages);
222 	if (ret) {
223 		pr_err("failed to mark page shared, ret=%d\n", ret);
224 		__free_pages(page, get_order(report_req->certs_len));
225 		return -EFAULT;
226 	}
227 
228 cmd:
229 	/*
230 	 * The intermediate response buffer is used while decrypting the
231 	 * response payload. Make sure that it has enough space to cover the
232 	 * authtag.
233 	 */
234 	resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
235 	report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
236 	if (!report_resp) {
237 		ret = -ENOMEM;
238 		goto e_free_data;
239 	}
240 
241 	req.input.data_npages = npages;
242 
243 	req.msg_version = arg->msg_version;
244 	req.msg_type = SNP_MSG_REPORT_REQ;
245 	req.vmpck_id = mdesc->vmpck_id;
246 	req.req_buf = &report_req->data;
247 	req.req_sz = sizeof(report_req->data);
248 	req.resp_buf = report_resp->data;
249 	req.resp_sz = resp_len;
250 	req.exit_code = SVM_VMGEXIT_EXT_GUEST_REQUEST;
251 
252 	ret = snp_send_guest_request(mdesc, &req, arg);
253 
254 	/* If certs length is invalid then copy the returned length */
255 	if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
256 		report_req->certs_len = req.input.data_npages << PAGE_SHIFT;
257 
258 		if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
259 			ret = -EFAULT;
260 	}
261 
262 	if (ret)
263 		goto e_free;
264 
265 	if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) {
266 		ret = -EFAULT;
267 		goto e_free;
268 	}
269 
270 	if (copy_to_sockptr(io->resp_data, report_resp, sizeof(*report_resp)))
271 		ret = -EFAULT;
272 
273 e_free:
274 	kfree(report_resp);
275 e_free_data:
276 	if (npages) {
277 		if (set_memory_encrypted((unsigned long)req.certs_data, npages))
278 			WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
279 		else
280 			__free_pages(page, get_order(report_req->certs_len));
281 	}
282 	return ret;
283 }
284 
snp_guest_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)285 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
286 {
287 	struct snp_guest_dev *snp_dev = to_snp_dev(file);
288 	void __user *argp = (void __user *)arg;
289 	struct snp_guest_request_ioctl input;
290 	struct snp_req_resp io;
291 	int ret = -ENOTTY;
292 
293 	if (copy_from_user(&input, argp, sizeof(input)))
294 		return -EFAULT;
295 
296 	input.exitinfo2 = 0xff;
297 
298 	/* Message version must be non-zero */
299 	if (!input.msg_version)
300 		return -EINVAL;
301 
302 	switch (ioctl) {
303 	case SNP_GET_REPORT:
304 		ret = get_report(snp_dev, &input);
305 		break;
306 	case SNP_GET_DERIVED_KEY:
307 		ret = get_derived_key(snp_dev, &input);
308 		break;
309 	case SNP_GET_EXT_REPORT:
310 		/*
311 		 * As get_ext_report() may be called from the ioctl() path and a
312 		 * kernel internal path (configfs-tsm), decorate the passed
313 		 * buffers as user pointers.
314 		 */
315 		io.req_data = USER_SOCKPTR((void __user *)input.req_data);
316 		io.resp_data = USER_SOCKPTR((void __user *)input.resp_data);
317 		ret = get_ext_report(snp_dev, &input, &io);
318 		break;
319 	default:
320 		break;
321 	}
322 
323 	if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
324 		return -EFAULT;
325 
326 	return ret;
327 }
328 
329 static const struct file_operations snp_guest_fops = {
330 	.owner	= THIS_MODULE,
331 	.unlocked_ioctl = snp_guest_ioctl,
332 };
333 
334 struct snp_msg_report_resp_hdr {
335 	u32 status;
336 	u32 report_size;
337 	u8 rsvd[24];
338 };
339 
340 struct snp_msg_cert_entry {
341 	guid_t guid;
342 	u32 offset;
343 	u32 length;
344 };
345 
sev_svsm_report_new(struct tsm_report * report,void * data)346 static int sev_svsm_report_new(struct tsm_report *report, void *data)
347 {
348 	unsigned int rep_len, man_len, certs_len;
349 	struct tsm_report_desc *desc = &report->desc;
350 	struct svsm_attest_call ac = {};
351 	unsigned int retry_count;
352 	void *rep, *man, *certs;
353 	struct svsm_call call;
354 	unsigned int size;
355 	bool try_again;
356 	void *buffer;
357 	u64 call_id;
358 	int ret;
359 
360 	/*
361 	 * Allocate pages for the request:
362 	 * - Report blob (4K)
363 	 * - Manifest blob (4K)
364 	 * - Certificate blob (16K)
365 	 *
366 	 * Above addresses must be 4K aligned
367 	 */
368 	rep_len = SZ_4K;
369 	man_len = SZ_4K;
370 	certs_len = SEV_FW_BLOB_MAX_SIZE;
371 
372 	if (guid_is_null(&desc->service_guid)) {
373 		call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES);
374 	} else {
375 		export_guid(ac.service_guid, &desc->service_guid);
376 		ac.service_manifest_ver = desc->service_manifest_version;
377 
378 		call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE);
379 	}
380 
381 	retry_count = 0;
382 
383 retry:
384 	memset(&call, 0, sizeof(call));
385 
386 	size = rep_len + man_len + certs_len;
387 	buffer = alloc_pages_exact(size, __GFP_ZERO);
388 	if (!buffer)
389 		return -ENOMEM;
390 
391 	rep = buffer;
392 	ac.report_buf.pa = __pa(rep);
393 	ac.report_buf.len = rep_len;
394 
395 	man = rep + rep_len;
396 	ac.manifest_buf.pa = __pa(man);
397 	ac.manifest_buf.len = man_len;
398 
399 	certs = man + man_len;
400 	ac.certificates_buf.pa = __pa(certs);
401 	ac.certificates_buf.len = certs_len;
402 
403 	ac.nonce.pa = __pa(desc->inblob);
404 	ac.nonce.len = desc->inblob_len;
405 
406 	ret = snp_issue_svsm_attest_req(call_id, &call, &ac);
407 	if (ret) {
408 		free_pages_exact(buffer, size);
409 
410 		switch (call.rax_out) {
411 		case SVSM_ERR_INVALID_PARAMETER:
412 			try_again = false;
413 
414 			if (ac.report_buf.len > rep_len) {
415 				rep_len = PAGE_ALIGN(ac.report_buf.len);
416 				try_again = true;
417 			}
418 
419 			if (ac.manifest_buf.len > man_len) {
420 				man_len = PAGE_ALIGN(ac.manifest_buf.len);
421 				try_again = true;
422 			}
423 
424 			if (ac.certificates_buf.len > certs_len) {
425 				certs_len = PAGE_ALIGN(ac.certificates_buf.len);
426 				try_again = true;
427 			}
428 
429 			/* If one of the buffers wasn't large enough, retry the request */
430 			if (try_again && retry_count < SVSM_MAX_RETRIES) {
431 				retry_count++;
432 				goto retry;
433 			}
434 
435 			return -EINVAL;
436 		default:
437 			pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n",
438 					   ret, call.rax_out);
439 			return -EINVAL;
440 		}
441 	}
442 
443 	/*
444 	 * Allocate all the blob memory buffers at once so that the cleanup is
445 	 * done for errors that occur after the first allocation (i.e. before
446 	 * using no_free_ptr()).
447 	 */
448 	rep_len = ac.report_buf.len;
449 	void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL);
450 
451 	man_len = ac.manifest_buf.len;
452 	void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL);
453 
454 	certs_len = ac.certificates_buf.len;
455 	void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL;
456 
457 	if (!rbuf || !mbuf || (certs_len && !cbuf)) {
458 		free_pages_exact(buffer, size);
459 		return -ENOMEM;
460 	}
461 
462 	memcpy(rbuf, rep, rep_len);
463 	report->outblob = no_free_ptr(rbuf);
464 	report->outblob_len = rep_len;
465 
466 	memcpy(mbuf, man, man_len);
467 	report->manifestblob = no_free_ptr(mbuf);
468 	report->manifestblob_len = man_len;
469 
470 	if (certs_len) {
471 		memcpy(cbuf, certs, certs_len);
472 		report->auxblob = no_free_ptr(cbuf);
473 		report->auxblob_len = certs_len;
474 	}
475 
476 	free_pages_exact(buffer, size);
477 
478 	return 0;
479 }
480 
sev_report_new(struct tsm_report * report,void * data)481 static int sev_report_new(struct tsm_report *report, void *data)
482 {
483 	struct snp_msg_cert_entry *cert_table;
484 	struct tsm_report_desc *desc = &report->desc;
485 	struct snp_guest_dev *snp_dev = data;
486 	struct snp_msg_report_resp_hdr hdr;
487 	const u32 report_size = SZ_4K;
488 	const u32 ext_size = SEV_FW_BLOB_MAX_SIZE;
489 	u32 certs_size, i, size = report_size + ext_size;
490 	int ret;
491 
492 	if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
493 		return -EINVAL;
494 
495 	if (desc->service_provider) {
496 		if (strcmp(desc->service_provider, "svsm"))
497 			return -EINVAL;
498 
499 		return sev_svsm_report_new(report, data);
500 	}
501 
502 	void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
503 	if (!buf)
504 		return -ENOMEM;
505 
506 	cert_table = buf + report_size;
507 	struct snp_ext_report_req ext_req = {
508 		.data = { .vmpl = desc->privlevel },
509 		.certs_address = (__u64)cert_table,
510 		.certs_len = ext_size,
511 	};
512 	memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len);
513 
514 	struct snp_guest_request_ioctl input = {
515 		.msg_version = 1,
516 		.req_data = (__u64)&ext_req,
517 		.resp_data = (__u64)buf,
518 		.exitinfo2 = 0xff,
519 	};
520 	struct snp_req_resp io = {
521 		.req_data = KERNEL_SOCKPTR(&ext_req),
522 		.resp_data = KERNEL_SOCKPTR(buf),
523 	};
524 
525 	ret = get_ext_report(snp_dev, &input, &io);
526 	if (ret)
527 		return ret;
528 
529 	memcpy(&hdr, buf, sizeof(hdr));
530 	if (hdr.status == SEV_RET_INVALID_PARAM)
531 		return -EINVAL;
532 	if (hdr.status == SEV_RET_INVALID_KEY)
533 		return -EINVAL;
534 	if (hdr.status)
535 		return -ENXIO;
536 	if ((hdr.report_size + sizeof(hdr)) > report_size)
537 		return -ENOMEM;
538 
539 	void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL);
540 	if (!rbuf)
541 		return -ENOMEM;
542 
543 	memcpy(rbuf, buf + sizeof(hdr), hdr.report_size);
544 	report->outblob = no_free_ptr(rbuf);
545 	report->outblob_len = hdr.report_size;
546 
547 	certs_size = 0;
548 	for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) {
549 		struct snp_msg_cert_entry *ent = &cert_table[i];
550 
551 		if (guid_is_null(&ent->guid) && !ent->offset && !ent->length)
552 			break;
553 		certs_size = max(certs_size, ent->offset + ent->length);
554 	}
555 
556 	/* Suspicious that the response populated entries without populating size */
557 	if (!certs_size && i)
558 		dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n");
559 
560 	/* No certs to report */
561 	if (!certs_size)
562 		return 0;
563 
564 	/* Suspicious that the certificate blob size contract was violated
565 	 */
566 	if (certs_size > ext_size) {
567 		dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n");
568 		certs_size = ext_size;
569 	}
570 
571 	void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL);
572 	if (!cbuf)
573 		return -ENOMEM;
574 
575 	memcpy(cbuf, cert_table, certs_size);
576 	report->auxblob = no_free_ptr(cbuf);
577 	report->auxblob_len = certs_size;
578 
579 	return 0;
580 }
581 
sev_report_attr_visible(int n)582 static bool sev_report_attr_visible(int n)
583 {
584 	switch (n) {
585 	case TSM_REPORT_GENERATION:
586 	case TSM_REPORT_PROVIDER:
587 	case TSM_REPORT_PRIVLEVEL:
588 	case TSM_REPORT_PRIVLEVEL_FLOOR:
589 		return true;
590 	case TSM_REPORT_SERVICE_PROVIDER:
591 	case TSM_REPORT_SERVICE_GUID:
592 	case TSM_REPORT_SERVICE_MANIFEST_VER:
593 		return snp_vmpl;
594 	}
595 
596 	return false;
597 }
598 
sev_report_bin_attr_visible(int n)599 static bool sev_report_bin_attr_visible(int n)
600 {
601 	switch (n) {
602 	case TSM_REPORT_INBLOB:
603 	case TSM_REPORT_OUTBLOB:
604 	case TSM_REPORT_AUXBLOB:
605 		return true;
606 	case TSM_REPORT_MANIFESTBLOB:
607 		return snp_vmpl;
608 	}
609 
610 	return false;
611 }
612 
613 static struct tsm_report_ops sev_tsm_report_ops = {
614 	.name = KBUILD_MODNAME,
615 	.report_new = sev_report_new,
616 	.report_attr_visible = sev_report_attr_visible,
617 	.report_bin_attr_visible = sev_report_bin_attr_visible,
618 };
619 
unregister_sev_tsm(void * data)620 static void unregister_sev_tsm(void *data)
621 {
622 	tsm_report_unregister(&sev_tsm_report_ops);
623 }
624 
sev_guest_probe(struct platform_device * pdev)625 static int __init sev_guest_probe(struct platform_device *pdev)
626 {
627 	struct device *dev = &pdev->dev;
628 	struct snp_guest_dev *snp_dev;
629 	struct snp_msg_desc *mdesc;
630 	struct miscdevice *misc;
631 	int ret;
632 
633 	BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
634 
635 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
636 		return -ENODEV;
637 
638 	snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
639 	if (!snp_dev)
640 		return -ENOMEM;
641 
642 	mdesc = snp_msg_alloc();
643 	if (IS_ERR_OR_NULL(mdesc))
644 		return -ENOMEM;
645 
646 	ret = snp_msg_init(mdesc, vmpck_id);
647 	if (ret)
648 		goto e_msg_init;
649 
650 	platform_set_drvdata(pdev, snp_dev);
651 	snp_dev->dev = dev;
652 
653 	misc = &snp_dev->misc;
654 	misc->minor = MISC_DYNAMIC_MINOR;
655 	misc->name = DEVICE_NAME;
656 	misc->fops = &snp_guest_fops;
657 
658 	/* Set the privlevel_floor attribute based on the vmpck_id */
659 	sev_tsm_report_ops.privlevel_floor = mdesc->vmpck_id;
660 
661 	ret = tsm_report_register(&sev_tsm_report_ops, snp_dev);
662 	if (ret)
663 		goto e_msg_init;
664 
665 	ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
666 	if (ret)
667 		goto e_msg_init;
668 
669 	ret =  misc_register(misc);
670 	if (ret)
671 		goto e_msg_init;
672 
673 	snp_dev->msg_desc = mdesc;
674 	dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n",
675 		 mdesc->vmpck_id);
676 	return 0;
677 
678 e_msg_init:
679 	snp_msg_free(mdesc);
680 
681 	return ret;
682 }
683 
sev_guest_remove(struct platform_device * pdev)684 static void __exit sev_guest_remove(struct platform_device *pdev)
685 {
686 	struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
687 
688 	snp_msg_free(snp_dev->msg_desc);
689 	misc_deregister(&snp_dev->misc);
690 }
691 
692 /*
693  * This driver is meant to be a common SEV guest interface driver and to
694  * support any SEV guest API. As such, even though it has been introduced
695  * with the SEV-SNP support, it is named "sev-guest".
696  *
697  * sev_guest_remove() lives in .exit.text. For drivers registered via
698  * module_platform_driver_probe() this is ok because they cannot get unbound
699  * at runtime. So mark the driver struct with __refdata to prevent modpost
700  * triggering a section mismatch warning.
701  */
702 static struct platform_driver sev_guest_driver __refdata = {
703 	.remove		= __exit_p(sev_guest_remove),
704 	.driver		= {
705 		.name = "sev-guest",
706 	},
707 };
708 
709 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
710 
711 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
712 MODULE_LICENSE("GPL");
713 MODULE_VERSION("1.0.0");
714 MODULE_DESCRIPTION("AMD SEV Guest Driver");
715 MODULE_ALIAS("platform:sev-guest");
716