xref: /linux/drivers/crypto/ccp/sev-dev.c (revision 249872f53d64441690927853e9d3af36394802d5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure Encrypted Virtualization (SEV) interface
4  *
5  * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
6  *
7  * Author: Brijesh Singh <brijesh.singh@amd.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/kthread.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/spinlock.h>
17 #include <linux/spinlock_types.h>
18 #include <linux/types.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/hw_random.h>
22 #include <linux/ccp.h>
23 #include <linux/firmware.h>
24 #include <linux/panic_notifier.h>
25 #include <linux/gfp.h>
26 #include <linux/cpufeature.h>
27 #include <linux/fs.h>
28 #include <linux/fs_struct.h>
29 #include <linux/psp.h>
30 #include <linux/amd-iommu.h>
31 #include <linux/crash_dump.h>
32 
33 #include <asm/smp.h>
34 #include <asm/cacheflush.h>
35 #include <asm/e820/types.h>
36 #include <asm/sev.h>
37 #include <asm/msr.h>
38 
39 #include "psp-dev.h"
40 #include "sev-dev.h"
41 
42 #define DEVICE_NAME		"sev"
43 #define SEV_FW_FILE		"amd/sev.fw"
44 #define SEV_FW_NAME_SIZE	64
45 
46 /* Minimum firmware version required for the SEV-SNP support */
47 #define SNP_MIN_API_MAJOR	1
48 #define SNP_MIN_API_MINOR	51
49 
50 /*
51  * Maximum number of firmware-writable buffers that might be specified
52  * in the parameters of a legacy SEV command buffer.
53  */
54 #define CMD_BUF_FW_WRITABLE_MAX 2
55 
56 /* Leave room in the descriptor array for an end-of-list indicator. */
57 #define CMD_BUF_DESC_MAX (CMD_BUF_FW_WRITABLE_MAX + 1)
58 
59 static DEFINE_MUTEX(sev_cmd_mutex);
60 static struct sev_misc_dev *misc_dev;
61 
62 static int psp_cmd_timeout = 100;
63 module_param(psp_cmd_timeout, int, 0644);
64 MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
65 
66 static int psp_probe_timeout = 5;
67 module_param(psp_probe_timeout, int, 0644);
68 MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
69 
70 static char *init_ex_path;
71 module_param(init_ex_path, charp, 0444);
72 MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX");
73 
74 static bool psp_init_on_probe = true;
75 module_param(psp_init_on_probe, bool, 0444);
76 MODULE_PARM_DESC(psp_init_on_probe, "  if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
77 
78 #if IS_ENABLED(CONFIG_PCI_TSM)
79 static bool sev_tio_enabled = true;
80 module_param_named(tio, sev_tio_enabled, bool, 0444);
81 MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX");
82 #else
83 static const bool sev_tio_enabled = false;
84 #endif
85 
86 MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
87 MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
88 MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
89 MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
90 
91 static bool psp_dead;
92 static int psp_timeout;
93 
94 enum snp_hv_fixed_pages_state {
95 	ALLOCATED,
96 	HV_FIXED,
97 };
98 
99 struct snp_hv_fixed_pages_entry {
100 	struct list_head list;
101 	struct page *page;
102 	unsigned int order;
103 	bool free;
104 	enum snp_hv_fixed_pages_state page_state;
105 };
106 
107 static LIST_HEAD(snp_hv_fixed_pages);
108 
109 /* Trusted Memory Region (TMR):
110  *   The TMR is a 1MB area that must be 1MB aligned.  Use the page allocator
111  *   to allocate the memory, which will return aligned memory for the specified
112  *   allocation order.
113  *
114  * When SEV-SNP is enabled the TMR needs to be 2MB aligned and 2MB sized.
115  */
116 #define SEV_TMR_SIZE		(1024 * 1024)
117 #define SNP_TMR_SIZE		(2 * 1024 * 1024)
118 
119 static void *sev_es_tmr;
120 static size_t sev_es_tmr_size = SEV_TMR_SIZE;
121 
122 /* INIT_EX NV Storage:
123  *   The NV Storage is a 32Kb area and must be 4Kb page aligned.  Use the page
124  *   allocator to allocate the memory, which will return aligned memory for the
125  *   specified allocation order.
126  */
127 #define NV_LENGTH (32 * 1024)
128 static void *sev_init_ex_buffer;
129 
130 /*
131  * SEV_DATA_RANGE_LIST:
132  *   Array containing range of pages that firmware transitions to HV-fixed
133  *   page state.
134  */
135 static struct sev_data_range_list *snp_range_list;
136 
137 static void __sev_firmware_shutdown(struct sev_device *sev, bool panic);
138 
139 static int snp_shutdown_on_panic(struct notifier_block *nb,
140 				 unsigned long reason, void *arg);
141 
142 static struct notifier_block snp_panic_notifier = {
143 	.notifier_call = snp_shutdown_on_panic,
144 };
145 
sev_version_greater_or_equal(u8 maj,u8 min)146 static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
147 {
148 	struct sev_device *sev = psp_master->sev_data;
149 
150 	if (sev->api_major > maj)
151 		return true;
152 
153 	if (sev->api_major == maj && sev->api_minor >= min)
154 		return true;
155 
156 	return false;
157 }
158 
sev_irq_handler(int irq,void * data,unsigned int status)159 static void sev_irq_handler(int irq, void *data, unsigned int status)
160 {
161 	struct sev_device *sev = data;
162 	int reg;
163 
164 	/* Check if it is command completion: */
165 	if (!(status & SEV_CMD_COMPLETE))
166 		return;
167 
168 	/* Check if it is SEV command completion: */
169 	reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
170 	if (FIELD_GET(PSP_CMDRESP_RESP, reg)) {
171 		sev->int_rcvd = 1;
172 		wake_up(&sev->int_queue);
173 	}
174 }
175 
sev_wait_cmd_ioc(struct sev_device * sev,unsigned int * reg,unsigned int timeout)176 static int sev_wait_cmd_ioc(struct sev_device *sev,
177 			    unsigned int *reg, unsigned int timeout)
178 {
179 	int ret;
180 
181 	/*
182 	 * If invoked during panic handling, local interrupts are disabled,
183 	 * so the PSP command completion interrupt can't be used. Poll for
184 	 * PSP command completion instead.
185 	 */
186 	if (irqs_disabled()) {
187 		unsigned long timeout_usecs = (timeout * USEC_PER_SEC) / 10;
188 
189 		/* Poll for SEV command completion: */
190 		while (timeout_usecs--) {
191 			*reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
192 			if (*reg & PSP_CMDRESP_RESP)
193 				return 0;
194 
195 			udelay(10);
196 		}
197 		return -ETIMEDOUT;
198 	}
199 
200 	ret = wait_event_timeout(sev->int_queue,
201 			sev->int_rcvd, timeout * HZ);
202 	if (!ret)
203 		return -ETIMEDOUT;
204 
205 	*reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
206 
207 	return 0;
208 }
209 
sev_cmd_buffer_len(int cmd)210 static int sev_cmd_buffer_len(int cmd)
211 {
212 	switch (cmd) {
213 	case SEV_CMD_INIT:			return sizeof(struct sev_data_init);
214 	case SEV_CMD_INIT_EX:                   return sizeof(struct sev_data_init_ex);
215 	case SEV_CMD_SNP_SHUTDOWN_EX:		return sizeof(struct sev_data_snp_shutdown_ex);
216 	case SEV_CMD_SNP_INIT_EX:		return sizeof(struct sev_data_snp_init_ex);
217 	case SEV_CMD_PLATFORM_STATUS:		return sizeof(struct sev_user_data_status);
218 	case SEV_CMD_PEK_CSR:			return sizeof(struct sev_data_pek_csr);
219 	case SEV_CMD_PEK_CERT_IMPORT:		return sizeof(struct sev_data_pek_cert_import);
220 	case SEV_CMD_PDH_CERT_EXPORT:		return sizeof(struct sev_data_pdh_cert_export);
221 	case SEV_CMD_LAUNCH_START:		return sizeof(struct sev_data_launch_start);
222 	case SEV_CMD_LAUNCH_UPDATE_DATA:	return sizeof(struct sev_data_launch_update_data);
223 	case SEV_CMD_LAUNCH_UPDATE_VMSA:	return sizeof(struct sev_data_launch_update_vmsa);
224 	case SEV_CMD_LAUNCH_FINISH:		return sizeof(struct sev_data_launch_finish);
225 	case SEV_CMD_LAUNCH_MEASURE:		return sizeof(struct sev_data_launch_measure);
226 	case SEV_CMD_ACTIVATE:			return sizeof(struct sev_data_activate);
227 	case SEV_CMD_DEACTIVATE:		return sizeof(struct sev_data_deactivate);
228 	case SEV_CMD_DECOMMISSION:		return sizeof(struct sev_data_decommission);
229 	case SEV_CMD_GUEST_STATUS:		return sizeof(struct sev_data_guest_status);
230 	case SEV_CMD_DBG_DECRYPT:		return sizeof(struct sev_data_dbg);
231 	case SEV_CMD_DBG_ENCRYPT:		return sizeof(struct sev_data_dbg);
232 	case SEV_CMD_SEND_START:		return sizeof(struct sev_data_send_start);
233 	case SEV_CMD_SEND_UPDATE_DATA:		return sizeof(struct sev_data_send_update_data);
234 	case SEV_CMD_SEND_UPDATE_VMSA:		return sizeof(struct sev_data_send_update_vmsa);
235 	case SEV_CMD_SEND_FINISH:		return sizeof(struct sev_data_send_finish);
236 	case SEV_CMD_RECEIVE_START:		return sizeof(struct sev_data_receive_start);
237 	case SEV_CMD_RECEIVE_FINISH:		return sizeof(struct sev_data_receive_finish);
238 	case SEV_CMD_RECEIVE_UPDATE_DATA:	return sizeof(struct sev_data_receive_update_data);
239 	case SEV_CMD_RECEIVE_UPDATE_VMSA:	return sizeof(struct sev_data_receive_update_vmsa);
240 	case SEV_CMD_LAUNCH_UPDATE_SECRET:	return sizeof(struct sev_data_launch_secret);
241 	case SEV_CMD_DOWNLOAD_FIRMWARE:		return sizeof(struct sev_data_download_firmware);
242 	case SEV_CMD_GET_ID:			return sizeof(struct sev_data_get_id);
243 	case SEV_CMD_ATTESTATION_REPORT:	return sizeof(struct sev_data_attestation_report);
244 	case SEV_CMD_SEND_CANCEL:		return sizeof(struct sev_data_send_cancel);
245 	case SEV_CMD_SNP_GCTX_CREATE:		return sizeof(struct sev_data_snp_addr);
246 	case SEV_CMD_SNP_LAUNCH_START:		return sizeof(struct sev_data_snp_launch_start);
247 	case SEV_CMD_SNP_LAUNCH_UPDATE:		return sizeof(struct sev_data_snp_launch_update);
248 	case SEV_CMD_SNP_ACTIVATE:		return sizeof(struct sev_data_snp_activate);
249 	case SEV_CMD_SNP_DECOMMISSION:		return sizeof(struct sev_data_snp_addr);
250 	case SEV_CMD_SNP_PAGE_RECLAIM:		return sizeof(struct sev_data_snp_page_reclaim);
251 	case SEV_CMD_SNP_GUEST_STATUS:		return sizeof(struct sev_data_snp_guest_status);
252 	case SEV_CMD_SNP_LAUNCH_FINISH:		return sizeof(struct sev_data_snp_launch_finish);
253 	case SEV_CMD_SNP_DBG_DECRYPT:		return sizeof(struct sev_data_snp_dbg);
254 	case SEV_CMD_SNP_DBG_ENCRYPT:		return sizeof(struct sev_data_snp_dbg);
255 	case SEV_CMD_SNP_PAGE_UNSMASH:		return sizeof(struct sev_data_snp_page_unsmash);
256 	case SEV_CMD_SNP_PLATFORM_STATUS:	return sizeof(struct sev_data_snp_addr);
257 	case SEV_CMD_SNP_GUEST_REQUEST:		return sizeof(struct sev_data_snp_guest_request);
258 	case SEV_CMD_SNP_CONFIG:		return sizeof(struct sev_user_data_snp_config);
259 	case SEV_CMD_SNP_COMMIT:		return sizeof(struct sev_data_snp_commit);
260 	case SEV_CMD_SNP_FEATURE_INFO:		return sizeof(struct sev_data_snp_feature_info);
261 	case SEV_CMD_SNP_VLEK_LOAD:		return sizeof(struct sev_user_data_snp_vlek_load);
262 	default:				return sev_tio_cmd_buffer_len(cmd);
263 	}
264 
265 	return 0;
266 }
267 
open_file_as_root(const char * filename,int flags,umode_t mode)268 static struct file *open_file_as_root(const char *filename, int flags, umode_t mode)
269 {
270 	struct path root __free(path_put) = {};
271 
272 	task_lock(&init_task);
273 	get_fs_root(init_task.fs, &root);
274 	task_unlock(&init_task);
275 
276 	CLASS(prepare_creds, cred)();
277 	if (!cred)
278 		return ERR_PTR(-ENOMEM);
279 
280 	cred->fsuid = GLOBAL_ROOT_UID;
281 
282 	scoped_with_creds(cred)
283 		return file_open_root(&root, filename, flags, mode);
284 }
285 
sev_read_init_ex_file(void)286 static int sev_read_init_ex_file(void)
287 {
288 	struct sev_device *sev = psp_master->sev_data;
289 	struct file *fp;
290 	ssize_t nread;
291 
292 	lockdep_assert_held(&sev_cmd_mutex);
293 
294 	if (!sev_init_ex_buffer)
295 		return -EOPNOTSUPP;
296 
297 	fp = open_file_as_root(init_ex_path, O_RDONLY, 0);
298 	if (IS_ERR(fp)) {
299 		int ret = PTR_ERR(fp);
300 
301 		if (ret == -ENOENT) {
302 			dev_info(sev->dev,
303 				"SEV: %s does not exist and will be created later.\n",
304 				init_ex_path);
305 			ret = 0;
306 		} else {
307 			dev_err(sev->dev,
308 				"SEV: could not open %s for read, error %d\n",
309 				init_ex_path, ret);
310 		}
311 		return ret;
312 	}
313 
314 	nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL);
315 	if (nread != NV_LENGTH) {
316 		dev_info(sev->dev,
317 			"SEV: could not read %u bytes to non volatile memory area, ret %ld\n",
318 			NV_LENGTH, nread);
319 	}
320 
321 	dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread);
322 	filp_close(fp, NULL);
323 
324 	return 0;
325 }
326 
sev_write_init_ex_file(void)327 static int sev_write_init_ex_file(void)
328 {
329 	struct sev_device *sev = psp_master->sev_data;
330 	struct file *fp;
331 	loff_t offset = 0;
332 	ssize_t nwrite;
333 
334 	lockdep_assert_held(&sev_cmd_mutex);
335 
336 	if (!sev_init_ex_buffer)
337 		return 0;
338 
339 	fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600);
340 	if (IS_ERR(fp)) {
341 		int ret = PTR_ERR(fp);
342 
343 		dev_err(sev->dev,
344 			"SEV: could not open file for write, error %d\n",
345 			ret);
346 		return ret;
347 	}
348 
349 	nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset);
350 	vfs_fsync(fp, 0);
351 	filp_close(fp, NULL);
352 
353 	if (nwrite != NV_LENGTH) {
354 		dev_err(sev->dev,
355 			"SEV: failed to write %u bytes to non volatile memory area, ret %ld\n",
356 			NV_LENGTH, nwrite);
357 		return -EIO;
358 	}
359 
360 	dev_dbg(sev->dev, "SEV: write successful to NV file\n");
361 
362 	return 0;
363 }
364 
sev_write_init_ex_file_if_required(int cmd_id)365 static int sev_write_init_ex_file_if_required(int cmd_id)
366 {
367 	lockdep_assert_held(&sev_cmd_mutex);
368 
369 	if (!sev_init_ex_buffer)
370 		return 0;
371 
372 	/*
373 	 * Only a few platform commands modify the SPI/NV area, but none of the
374 	 * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN,
375 	 * PEK_CERT_IMPORT, and PDH_GEN do.
376 	 */
377 	switch (cmd_id) {
378 	case SEV_CMD_FACTORY_RESET:
379 	case SEV_CMD_INIT_EX:
380 	case SEV_CMD_PDH_GEN:
381 	case SEV_CMD_PEK_CERT_IMPORT:
382 	case SEV_CMD_PEK_GEN:
383 		break;
384 	default:
385 		return 0;
386 	}
387 
388 	return sev_write_init_ex_file();
389 }
390 
snp_reclaim_pages(unsigned long paddr,unsigned int npages,bool locked)391 int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
392 {
393 	int ret, err, i;
394 
395 	paddr = __sme_clr(ALIGN_DOWN(paddr, PAGE_SIZE));
396 
397 	for (i = 0; i < npages; i++, paddr += PAGE_SIZE) {
398 		struct sev_data_snp_page_reclaim data = {0};
399 
400 		data.paddr = paddr;
401 
402 		if (locked)
403 			ret = __sev_do_cmd_locked(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
404 		else
405 			ret = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
406 
407 		if (ret)
408 			goto cleanup;
409 
410 		ret = rmp_make_shared(__phys_to_pfn(paddr), PG_LEVEL_4K);
411 		if (ret)
412 			goto cleanup;
413 	}
414 
415 	return 0;
416 
417 cleanup:
418 	/*
419 	 * If there was a failure reclaiming the page then it is no longer safe
420 	 * to release it back to the system; leak it instead.
421 	 */
422 	snp_leak_pages(__phys_to_pfn(paddr), npages - i);
423 	return ret;
424 }
425 EXPORT_SYMBOL_GPL(snp_reclaim_pages);
426 
rmp_mark_pages_firmware(unsigned long paddr,unsigned int npages,bool locked)427 static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
428 {
429 	unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT;
430 	int rc, i;
431 
432 	for (i = 0; i < npages; i++, pfn++) {
433 		rc = rmp_make_private(pfn, 0, PG_LEVEL_4K, 0, true);
434 		if (rc)
435 			goto cleanup;
436 	}
437 
438 	return 0;
439 
440 cleanup:
441 	/*
442 	 * Try unrolling the firmware state changes by
443 	 * reclaiming the pages which were already changed to the
444 	 * firmware state.
445 	 */
446 	snp_reclaim_pages(paddr, i, locked);
447 
448 	return rc;
449 }
450 
__snp_alloc_firmware_pages(gfp_t gfp_mask,int order,bool locked)451 static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked)
452 {
453 	unsigned long npages = 1ul << order, paddr;
454 	struct sev_device *sev;
455 	struct page *page;
456 
457 	if (!psp_master || !psp_master->sev_data)
458 		return NULL;
459 
460 	page = alloc_pages(gfp_mask, order);
461 	if (!page)
462 		return NULL;
463 
464 	/* If SEV-SNP is initialized then add the page in RMP table. */
465 	sev = psp_master->sev_data;
466 	if (!sev->snp_initialized)
467 		return page;
468 
469 	paddr = __pa((unsigned long)page_address(page));
470 	if (rmp_mark_pages_firmware(paddr, npages, locked))
471 		return NULL;
472 
473 	return page;
474 }
475 
snp_alloc_firmware_page(gfp_t gfp_mask)476 void *snp_alloc_firmware_page(gfp_t gfp_mask)
477 {
478 	struct page *page;
479 
480 	page = __snp_alloc_firmware_pages(gfp_mask, 0, false);
481 
482 	return page ? page_address(page) : NULL;
483 }
484 EXPORT_SYMBOL_GPL(snp_alloc_firmware_page);
485 
__snp_free_firmware_pages(struct page * page,int order,bool locked)486 static void __snp_free_firmware_pages(struct page *page, int order, bool locked)
487 {
488 	struct sev_device *sev = psp_master->sev_data;
489 	unsigned long paddr, npages = 1ul << order;
490 
491 	if (!page)
492 		return;
493 
494 	paddr = __pa((unsigned long)page_address(page));
495 	if (sev->snp_initialized &&
496 	    snp_reclaim_pages(paddr, npages, locked))
497 		return;
498 
499 	__free_pages(page, order);
500 }
501 
snp_free_firmware_page(void * addr)502 void snp_free_firmware_page(void *addr)
503 {
504 	if (!addr)
505 		return;
506 
507 	__snp_free_firmware_pages(virt_to_page(addr), 0, false);
508 }
509 EXPORT_SYMBOL_GPL(snp_free_firmware_page);
510 
sev_fw_alloc(unsigned long len)511 static void *sev_fw_alloc(unsigned long len)
512 {
513 	struct page *page;
514 
515 	page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true);
516 	if (!page)
517 		return NULL;
518 
519 	return page_address(page);
520 }
521 
522 /**
523  * struct cmd_buf_desc - descriptors for managing legacy SEV command address
524  * parameters corresponding to buffers that may be written to by firmware.
525  *
526  * @paddr_ptr:  pointer to the address parameter in the command buffer which may
527  *              need to be saved/restored depending on whether a bounce buffer
528  *              is used. In the case of a bounce buffer, the command buffer
529  *              needs to be updated with the address of the new bounce buffer
530  *              snp_map_cmd_buf_desc() has allocated specifically for it. Must
531  *              be NULL if this descriptor is only an end-of-list indicator.
532  *
533  * @paddr_orig: storage for the original address parameter, which can be used to
534  *              restore the original value in @paddr_ptr in cases where it is
535  *              replaced with the address of a bounce buffer.
536  *
537  * @len: length of buffer located at the address originally stored at @paddr_ptr
538  *
539  * @guest_owned: true if the address corresponds to guest-owned pages, in which
540  *               case bounce buffers are not needed.
541  */
542 struct cmd_buf_desc {
543 	u64 *paddr_ptr;
544 	u64 paddr_orig;
545 	u32 len;
546 	bool guest_owned;
547 };
548 
549 /*
550  * If a legacy SEV command parameter is a memory address, those pages in
551  * turn need to be transitioned to/from firmware-owned before/after
552  * executing the firmware command.
553  *
554  * Additionally, in cases where those pages are not guest-owned, a bounce
555  * buffer is needed in place of the original memory address parameter.
556  *
557  * A set of descriptors are used to keep track of this handling, and
558  * initialized here based on the specific commands being executed.
559  */
snp_populate_cmd_buf_desc_list(int cmd,void * cmd_buf,struct cmd_buf_desc * desc_list)560 static void snp_populate_cmd_buf_desc_list(int cmd, void *cmd_buf,
561 					   struct cmd_buf_desc *desc_list)
562 {
563 	switch (cmd) {
564 	case SEV_CMD_PDH_CERT_EXPORT: {
565 		struct sev_data_pdh_cert_export *data = cmd_buf;
566 
567 		desc_list[0].paddr_ptr = &data->pdh_cert_address;
568 		desc_list[0].len = data->pdh_cert_len;
569 		desc_list[1].paddr_ptr = &data->cert_chain_address;
570 		desc_list[1].len = data->cert_chain_len;
571 		break;
572 	}
573 	case SEV_CMD_GET_ID: {
574 		struct sev_data_get_id *data = cmd_buf;
575 
576 		desc_list[0].paddr_ptr = &data->address;
577 		desc_list[0].len = data->len;
578 		break;
579 	}
580 	case SEV_CMD_PEK_CSR: {
581 		struct sev_data_pek_csr *data = cmd_buf;
582 
583 		desc_list[0].paddr_ptr = &data->address;
584 		desc_list[0].len = data->len;
585 		break;
586 	}
587 	case SEV_CMD_LAUNCH_UPDATE_DATA: {
588 		struct sev_data_launch_update_data *data = cmd_buf;
589 
590 		desc_list[0].paddr_ptr = &data->address;
591 		desc_list[0].len = data->len;
592 		desc_list[0].guest_owned = true;
593 		break;
594 	}
595 	case SEV_CMD_LAUNCH_UPDATE_VMSA: {
596 		struct sev_data_launch_update_vmsa *data = cmd_buf;
597 
598 		desc_list[0].paddr_ptr = &data->address;
599 		desc_list[0].len = data->len;
600 		desc_list[0].guest_owned = true;
601 		break;
602 	}
603 	case SEV_CMD_LAUNCH_MEASURE: {
604 		struct sev_data_launch_measure *data = cmd_buf;
605 
606 		desc_list[0].paddr_ptr = &data->address;
607 		desc_list[0].len = data->len;
608 		break;
609 	}
610 	case SEV_CMD_LAUNCH_UPDATE_SECRET: {
611 		struct sev_data_launch_secret *data = cmd_buf;
612 
613 		desc_list[0].paddr_ptr = &data->guest_address;
614 		desc_list[0].len = data->guest_len;
615 		desc_list[0].guest_owned = true;
616 		break;
617 	}
618 	case SEV_CMD_DBG_DECRYPT: {
619 		struct sev_data_dbg *data = cmd_buf;
620 
621 		desc_list[0].paddr_ptr = &data->dst_addr;
622 		desc_list[0].len = data->len;
623 		desc_list[0].guest_owned = true;
624 		break;
625 	}
626 	case SEV_CMD_DBG_ENCRYPT: {
627 		struct sev_data_dbg *data = cmd_buf;
628 
629 		desc_list[0].paddr_ptr = &data->dst_addr;
630 		desc_list[0].len = data->len;
631 		desc_list[0].guest_owned = true;
632 		break;
633 	}
634 	case SEV_CMD_ATTESTATION_REPORT: {
635 		struct sev_data_attestation_report *data = cmd_buf;
636 
637 		desc_list[0].paddr_ptr = &data->address;
638 		desc_list[0].len = data->len;
639 		break;
640 	}
641 	case SEV_CMD_SEND_START: {
642 		struct sev_data_send_start *data = cmd_buf;
643 
644 		desc_list[0].paddr_ptr = &data->session_address;
645 		desc_list[0].len = data->session_len;
646 		break;
647 	}
648 	case SEV_CMD_SEND_UPDATE_DATA: {
649 		struct sev_data_send_update_data *data = cmd_buf;
650 
651 		desc_list[0].paddr_ptr = &data->hdr_address;
652 		desc_list[0].len = data->hdr_len;
653 		desc_list[1].paddr_ptr = &data->trans_address;
654 		desc_list[1].len = data->trans_len;
655 		break;
656 	}
657 	case SEV_CMD_SEND_UPDATE_VMSA: {
658 		struct sev_data_send_update_vmsa *data = cmd_buf;
659 
660 		desc_list[0].paddr_ptr = &data->hdr_address;
661 		desc_list[0].len = data->hdr_len;
662 		desc_list[1].paddr_ptr = &data->trans_address;
663 		desc_list[1].len = data->trans_len;
664 		break;
665 	}
666 	case SEV_CMD_RECEIVE_UPDATE_DATA: {
667 		struct sev_data_receive_update_data *data = cmd_buf;
668 
669 		desc_list[0].paddr_ptr = &data->guest_address;
670 		desc_list[0].len = data->guest_len;
671 		desc_list[0].guest_owned = true;
672 		break;
673 	}
674 	case SEV_CMD_RECEIVE_UPDATE_VMSA: {
675 		struct sev_data_receive_update_vmsa *data = cmd_buf;
676 
677 		desc_list[0].paddr_ptr = &data->guest_address;
678 		desc_list[0].len = data->guest_len;
679 		desc_list[0].guest_owned = true;
680 		break;
681 	}
682 	default:
683 		break;
684 	}
685 }
686 
snp_map_cmd_buf_desc(struct cmd_buf_desc * desc)687 static int snp_map_cmd_buf_desc(struct cmd_buf_desc *desc)
688 {
689 	unsigned int npages;
690 
691 	if (!desc->len)
692 		return 0;
693 
694 	/* Allocate a bounce buffer if this isn't a guest owned page. */
695 	if (!desc->guest_owned) {
696 		struct page *page;
697 
698 		page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(desc->len));
699 		if (!page) {
700 			pr_warn("Failed to allocate bounce buffer for SEV legacy command.\n");
701 			return -ENOMEM;
702 		}
703 
704 		desc->paddr_orig = *desc->paddr_ptr;
705 		*desc->paddr_ptr = __psp_pa(page_to_virt(page));
706 	}
707 
708 	npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT;
709 
710 	/* Transition the buffer to firmware-owned. */
711 	if (rmp_mark_pages_firmware(*desc->paddr_ptr, npages, true)) {
712 		pr_warn("Error moving pages to firmware-owned state for SEV legacy command.\n");
713 		return -EFAULT;
714 	}
715 
716 	return 0;
717 }
718 
snp_unmap_cmd_buf_desc(struct cmd_buf_desc * desc)719 static int snp_unmap_cmd_buf_desc(struct cmd_buf_desc *desc)
720 {
721 	unsigned int npages;
722 
723 	if (!desc->len)
724 		return 0;
725 
726 	npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT;
727 
728 	/* Transition the buffers back to hypervisor-owned. */
729 	if (snp_reclaim_pages(*desc->paddr_ptr, npages, true)) {
730 		pr_warn("Failed to reclaim firmware-owned pages while issuing SEV legacy command.\n");
731 		return -EFAULT;
732 	}
733 
734 	/* Copy data from bounce buffer and then free it. */
735 	if (!desc->guest_owned) {
736 		void *bounce_buf = __va(__sme_clr(*desc->paddr_ptr));
737 		void *dst_buf = __va(__sme_clr(desc->paddr_orig));
738 
739 		memcpy(dst_buf, bounce_buf, desc->len);
740 		__free_pages(virt_to_page(bounce_buf), get_order(desc->len));
741 
742 		/* Restore the original address in the command buffer. */
743 		*desc->paddr_ptr = desc->paddr_orig;
744 	}
745 
746 	return 0;
747 }
748 
snp_map_cmd_buf_desc_list(int cmd,void * cmd_buf,struct cmd_buf_desc * desc_list)749 static int snp_map_cmd_buf_desc_list(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list)
750 {
751 	int i;
752 
753 	snp_populate_cmd_buf_desc_list(cmd, cmd_buf, desc_list);
754 
755 	for (i = 0; i < CMD_BUF_DESC_MAX; i++) {
756 		struct cmd_buf_desc *desc = &desc_list[i];
757 
758 		if (!desc->paddr_ptr)
759 			break;
760 
761 		if (snp_map_cmd_buf_desc(desc))
762 			goto err_unmap;
763 	}
764 
765 	return 0;
766 
767 err_unmap:
768 	for (i--; i >= 0; i--)
769 		snp_unmap_cmd_buf_desc(&desc_list[i]);
770 
771 	return -EFAULT;
772 }
773 
snp_unmap_cmd_buf_desc_list(struct cmd_buf_desc * desc_list)774 static int snp_unmap_cmd_buf_desc_list(struct cmd_buf_desc *desc_list)
775 {
776 	int i, ret = 0;
777 
778 	for (i = 0; i < CMD_BUF_DESC_MAX; i++) {
779 		struct cmd_buf_desc *desc = &desc_list[i];
780 
781 		if (!desc->paddr_ptr)
782 			break;
783 
784 		if (snp_unmap_cmd_buf_desc(&desc_list[i]))
785 			ret = -EFAULT;
786 	}
787 
788 	return ret;
789 }
790 
sev_cmd_buf_writable(int cmd)791 static bool sev_cmd_buf_writable(int cmd)
792 {
793 	switch (cmd) {
794 	case SEV_CMD_PLATFORM_STATUS:
795 	case SEV_CMD_GUEST_STATUS:
796 	case SEV_CMD_LAUNCH_START:
797 	case SEV_CMD_RECEIVE_START:
798 	case SEV_CMD_LAUNCH_MEASURE:
799 	case SEV_CMD_SEND_START:
800 	case SEV_CMD_SEND_UPDATE_DATA:
801 	case SEV_CMD_SEND_UPDATE_VMSA:
802 	case SEV_CMD_PEK_CSR:
803 	case SEV_CMD_PDH_CERT_EXPORT:
804 	case SEV_CMD_GET_ID:
805 	case SEV_CMD_ATTESTATION_REPORT:
806 		return true;
807 	default:
808 		return false;
809 	}
810 }
811 
812 /* After SNP is INIT'ed, the behavior of legacy SEV commands is changed. */
snp_legacy_handling_needed(int cmd)813 static bool snp_legacy_handling_needed(int cmd)
814 {
815 	struct sev_device *sev = psp_master->sev_data;
816 
817 	return cmd < SEV_CMD_SNP_INIT && sev->snp_initialized;
818 }
819 
snp_prep_cmd_buf(int cmd,void * cmd_buf,struct cmd_buf_desc * desc_list)820 static int snp_prep_cmd_buf(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list)
821 {
822 	if (!snp_legacy_handling_needed(cmd))
823 		return 0;
824 
825 	if (snp_map_cmd_buf_desc_list(cmd, cmd_buf, desc_list))
826 		return -EFAULT;
827 
828 	/*
829 	 * Before command execution, the command buffer needs to be put into
830 	 * the firmware-owned state.
831 	 */
832 	if (sev_cmd_buf_writable(cmd)) {
833 		if (rmp_mark_pages_firmware(__pa(cmd_buf), 1, true))
834 			return -EFAULT;
835 	}
836 
837 	return 0;
838 }
839 
snp_reclaim_cmd_buf(int cmd,void * cmd_buf)840 static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
841 {
842 	if (!snp_legacy_handling_needed(cmd))
843 		return 0;
844 
845 	/*
846 	 * After command completion, the command buffer needs to be put back
847 	 * into the hypervisor-owned state.
848 	 */
849 	if (sev_cmd_buf_writable(cmd))
850 		if (snp_reclaim_pages(__pa(cmd_buf), 1, true))
851 			return -EFAULT;
852 
853 	return 0;
854 }
855 
__sev_do_cmd_locked(int cmd,void * data,int * psp_ret)856 int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
857 {
858 	struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
859 	struct psp_device *psp = psp_master;
860 	struct sev_device *sev;
861 	unsigned int cmdbuff_hi, cmdbuff_lo;
862 	unsigned int phys_lsb, phys_msb;
863 	unsigned int reg;
864 	void *cmd_buf;
865 	int buf_len;
866 	int ret = 0;
867 
868 	if (!psp || !psp->sev_data)
869 		return -ENODEV;
870 
871 	if (psp_dead)
872 		return -EBUSY;
873 
874 	sev = psp->sev_data;
875 
876 	buf_len = sev_cmd_buffer_len(cmd);
877 	if (WARN_ON_ONCE(!data != !buf_len))
878 		return -EINVAL;
879 
880 	/*
881 	 * Copy the incoming data to driver's scratch buffer as __pa() will not
882 	 * work for some memory, e.g. vmalloc'd addresses, and @data may not be
883 	 * physically contiguous.
884 	 */
885 	if (data) {
886 		/*
887 		 * Commands are generally issued one at a time and require the
888 		 * sev_cmd_mutex, but there could be recursive firmware requests
889 		 * due to SEV_CMD_SNP_PAGE_RECLAIM needing to be issued while
890 		 * preparing buffers for another command. This is the only known
891 		 * case of nesting in the current code, so exactly one
892 		 * additional command buffer is available for that purpose.
893 		 */
894 		if (!sev->cmd_buf_active) {
895 			cmd_buf = sev->cmd_buf;
896 			sev->cmd_buf_active = true;
897 		} else if (!sev->cmd_buf_backup_active) {
898 			cmd_buf = sev->cmd_buf_backup;
899 			sev->cmd_buf_backup_active = true;
900 		} else {
901 			dev_err(sev->dev,
902 				"SEV: too many firmware commands in progress, no command buffers available.\n");
903 			return -EBUSY;
904 		}
905 
906 		memcpy(cmd_buf, data, buf_len);
907 
908 		/*
909 		 * The behavior of the SEV-legacy commands is altered when the
910 		 * SNP firmware is in the INIT state.
911 		 */
912 		ret = snp_prep_cmd_buf(cmd, cmd_buf, desc_list);
913 		if (ret) {
914 			dev_err(sev->dev,
915 				"SEV: failed to prepare buffer for legacy command 0x%x. Error: %d\n",
916 				cmd, ret);
917 			return ret;
918 		}
919 	} else {
920 		cmd_buf = sev->cmd_buf;
921 	}
922 
923 	/* Get the physical address of the command buffer */
924 	phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0;
925 	phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0;
926 
927 	dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
928 		cmd, phys_msb, phys_lsb, psp_timeout);
929 
930 	print_hex_dump_debug("(in):  ", DUMP_PREFIX_OFFSET, 16, 2, data,
931 			     buf_len, false);
932 
933 	iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
934 	iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
935 
936 	sev->int_rcvd = 0;
937 
938 	reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd);
939 
940 	/*
941 	 * If invoked during panic handling, local interrupts are disabled so
942 	 * the PSP command completion interrupt can't be used.
943 	 * sev_wait_cmd_ioc() already checks for interrupts disabled and
944 	 * polls for PSP command completion.  Ensure we do not request an
945 	 * interrupt from the PSP if irqs disabled.
946 	 */
947 	if (!irqs_disabled())
948 		reg |= SEV_CMDRESP_IOC;
949 
950 	iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
951 
952 	/* wait for command completion */
953 	ret = sev_wait_cmd_ioc(sev, &reg, psp_timeout);
954 	if (ret) {
955 		if (psp_ret)
956 			*psp_ret = 0;
957 
958 		dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
959 		psp_dead = true;
960 
961 		return ret;
962 	}
963 
964 	psp_timeout = psp_cmd_timeout;
965 
966 	if (psp_ret)
967 		*psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
968 
969 	if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
970 		dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
971 			cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
972 
973 		/*
974 		 * PSP firmware may report additional error information in the
975 		 * command buffer registers on error. Print contents of command
976 		 * buffer registers if they changed.
977 		 */
978 		cmdbuff_hi = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
979 		cmdbuff_lo = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
980 		if (cmdbuff_hi != phys_msb || cmdbuff_lo != phys_lsb) {
981 			dev_dbg(sev->dev, "Additional error information reported in cmdbuff:");
982 			dev_dbg(sev->dev, "  cmdbuff hi: %#010x\n", cmdbuff_hi);
983 			dev_dbg(sev->dev, "  cmdbuff lo: %#010x\n", cmdbuff_lo);
984 		}
985 		ret = -EIO;
986 	} else {
987 		ret = sev_write_init_ex_file_if_required(cmd);
988 	}
989 
990 	/*
991 	 * Copy potential output from the PSP back to data.  Do this even on
992 	 * failure in case the caller wants to glean something from the error.
993 	 */
994 	if (data) {
995 		int ret_reclaim;
996 		/*
997 		 * Restore the page state after the command completes.
998 		 */
999 		ret_reclaim = snp_reclaim_cmd_buf(cmd, cmd_buf);
1000 		if (ret_reclaim) {
1001 			dev_err(sev->dev,
1002 				"SEV: failed to reclaim buffer for legacy command %#x. Error: %d\n",
1003 				cmd, ret_reclaim);
1004 			return ret_reclaim;
1005 		}
1006 
1007 		memcpy(data, cmd_buf, buf_len);
1008 
1009 		if (sev->cmd_buf_backup_active)
1010 			sev->cmd_buf_backup_active = false;
1011 		else
1012 			sev->cmd_buf_active = false;
1013 
1014 		if (snp_unmap_cmd_buf_desc_list(desc_list))
1015 			return -EFAULT;
1016 	}
1017 
1018 	print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
1019 			     buf_len, false);
1020 
1021 	return ret;
1022 }
1023 
sev_do_cmd(int cmd,void * data,int * psp_ret)1024 int sev_do_cmd(int cmd, void *data, int *psp_ret)
1025 {
1026 	int rc;
1027 
1028 	mutex_lock(&sev_cmd_mutex);
1029 	rc = __sev_do_cmd_locked(cmd, data, psp_ret);
1030 	mutex_unlock(&sev_cmd_mutex);
1031 
1032 	return rc;
1033 }
1034 EXPORT_SYMBOL_GPL(sev_do_cmd);
1035 
__sev_init_locked(int * error)1036 static int __sev_init_locked(int *error)
1037 {
1038 	struct sev_data_init data;
1039 
1040 	memset(&data, 0, sizeof(data));
1041 	if (sev_es_tmr) {
1042 		/*
1043 		 * Do not include the encryption mask on the physical
1044 		 * address of the TMR (firmware should clear it anyway).
1045 		 */
1046 		data.tmr_address = __pa(sev_es_tmr);
1047 
1048 		data.flags |= SEV_INIT_FLAGS_SEV_ES;
1049 		data.tmr_len = sev_es_tmr_size;
1050 	}
1051 
1052 	return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
1053 }
1054 
__sev_init_ex_locked(int * error)1055 static int __sev_init_ex_locked(int *error)
1056 {
1057 	struct sev_data_init_ex data;
1058 
1059 	memset(&data, 0, sizeof(data));
1060 	data.length = sizeof(data);
1061 	data.nv_address = __psp_pa(sev_init_ex_buffer);
1062 	data.nv_len = NV_LENGTH;
1063 
1064 	if (sev_es_tmr) {
1065 		/*
1066 		 * Do not include the encryption mask on the physical
1067 		 * address of the TMR (firmware should clear it anyway).
1068 		 */
1069 		data.tmr_address = __pa(sev_es_tmr);
1070 
1071 		data.flags |= SEV_INIT_FLAGS_SEV_ES;
1072 		data.tmr_len = sev_es_tmr_size;
1073 	}
1074 
1075 	return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error);
1076 }
1077 
__sev_do_init_locked(int * psp_ret)1078 static inline int __sev_do_init_locked(int *psp_ret)
1079 {
1080 	if (sev_init_ex_buffer)
1081 		return __sev_init_ex_locked(psp_ret);
1082 	else
1083 		return __sev_init_locked(psp_ret);
1084 }
1085 
snp_set_hsave_pa(void * arg)1086 static void snp_set_hsave_pa(void *arg)
1087 {
1088 	wrmsrq(MSR_VM_HSAVE_PA, 0);
1089 }
1090 
1091 /* Hypervisor Fixed pages API interface */
snp_hv_fixed_pages_state_update(struct sev_device * sev,enum snp_hv_fixed_pages_state page_state)1092 static void snp_hv_fixed_pages_state_update(struct sev_device *sev,
1093 					    enum snp_hv_fixed_pages_state page_state)
1094 {
1095 	struct snp_hv_fixed_pages_entry *entry;
1096 
1097 	/* List is protected by sev_cmd_mutex */
1098 	lockdep_assert_held(&sev_cmd_mutex);
1099 
1100 	if (list_empty(&snp_hv_fixed_pages))
1101 		return;
1102 
1103 	list_for_each_entry(entry, &snp_hv_fixed_pages, list)
1104 		entry->page_state = page_state;
1105 }
1106 
1107 /*
1108  * Allocate HV_FIXED pages in 2MB aligned sizes to ensure the whole
1109  * 2MB pages are marked as HV_FIXED.
1110  */
snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages)1111 struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages)
1112 {
1113 	struct psp_device *psp_master = psp_get_master_device();
1114 	struct snp_hv_fixed_pages_entry *entry;
1115 	struct sev_device *sev;
1116 	unsigned int order;
1117 	struct page *page;
1118 
1119 	if (!psp_master || !psp_master->sev_data)
1120 		return NULL;
1121 
1122 	sev = psp_master->sev_data;
1123 
1124 	order = get_order(PMD_SIZE * num_2mb_pages);
1125 
1126 	/*
1127 	 * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
1128 	 * also needs to be protected using the same mutex.
1129 	 */
1130 	guard(mutex)(&sev_cmd_mutex);
1131 
1132 	/*
1133 	 * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed
1134 	 * page state, fail if SNP is already initialized.
1135 	 */
1136 	if (sev->snp_initialized)
1137 		return NULL;
1138 
1139 	/* Re-use freed pages that match the request */
1140 	list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
1141 		/* Hypervisor fixed page allocator implements exact fit policy */
1142 		if (entry->order == order && entry->free) {
1143 			entry->free = false;
1144 			memset(page_address(entry->page), 0,
1145 			       (1 << entry->order) * PAGE_SIZE);
1146 			return entry->page;
1147 		}
1148 	}
1149 
1150 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1151 	if (!page)
1152 		return NULL;
1153 
1154 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1155 	if (!entry) {
1156 		__free_pages(page, order);
1157 		return NULL;
1158 	}
1159 
1160 	entry->page = page;
1161 	entry->order = order;
1162 	list_add_tail(&entry->list, &snp_hv_fixed_pages);
1163 
1164 	return page;
1165 }
1166 
snp_free_hv_fixed_pages(struct page * page)1167 void snp_free_hv_fixed_pages(struct page *page)
1168 {
1169 	struct psp_device *psp_master = psp_get_master_device();
1170 	struct snp_hv_fixed_pages_entry *entry, *nentry;
1171 
1172 	if (!psp_master || !psp_master->sev_data)
1173 		return;
1174 
1175 	/*
1176 	 * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
1177 	 * also needs to be protected using the same mutex.
1178 	 */
1179 	guard(mutex)(&sev_cmd_mutex);
1180 
1181 	list_for_each_entry_safe(entry, nentry, &snp_hv_fixed_pages, list) {
1182 		if (entry->page != page)
1183 			continue;
1184 
1185 		/*
1186 		 * HV_FIXED page state cannot be changed until reboot
1187 		 * and they cannot be used by an SNP guest, so they cannot
1188 		 * be returned back to the page allocator.
1189 		 * Mark the pages as free internally to allow possible re-use.
1190 		 */
1191 		if (entry->page_state == HV_FIXED) {
1192 			entry->free = true;
1193 		} else {
1194 			__free_pages(page, entry->order);
1195 			list_del(&entry->list);
1196 			kfree(entry);
1197 		}
1198 		return;
1199 	}
1200 }
1201 
snp_add_hv_fixed_pages(struct sev_device * sev,struct sev_data_range_list * range_list)1202 static void snp_add_hv_fixed_pages(struct sev_device *sev, struct sev_data_range_list *range_list)
1203 {
1204 	struct snp_hv_fixed_pages_entry *entry;
1205 	struct sev_data_range *range;
1206 	int num_elements;
1207 
1208 	lockdep_assert_held(&sev_cmd_mutex);
1209 
1210 	if (list_empty(&snp_hv_fixed_pages))
1211 		return;
1212 
1213 	num_elements = list_count_nodes(&snp_hv_fixed_pages) +
1214 		       range_list->num_elements;
1215 
1216 	/*
1217 	 * Ensure the list of HV_FIXED pages that will be passed to firmware
1218 	 * do not exceed the page-sized argument buffer.
1219 	 */
1220 	if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) {
1221 		dev_warn(sev->dev, "Additional HV_Fixed pages cannot be accommodated, omitting\n");
1222 		return;
1223 	}
1224 
1225 	range = &range_list->ranges[range_list->num_elements];
1226 	list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
1227 		range->base = page_to_pfn(entry->page) << PAGE_SHIFT;
1228 		range->page_count = 1 << entry->order;
1229 		range++;
1230 	}
1231 	range_list->num_elements = num_elements;
1232 }
1233 
snp_leak_hv_fixed_pages(void)1234 static void snp_leak_hv_fixed_pages(void)
1235 {
1236 	struct snp_hv_fixed_pages_entry *entry;
1237 
1238 	/* List is protected by sev_cmd_mutex */
1239 	lockdep_assert_held(&sev_cmd_mutex);
1240 
1241 	if (list_empty(&snp_hv_fixed_pages))
1242 		return;
1243 
1244 	list_for_each_entry(entry, &snp_hv_fixed_pages, list)
1245 		if (entry->page_state == HV_FIXED)
1246 			__snp_leak_pages(page_to_pfn(entry->page),
1247 					 1 << entry->order, false);
1248 }
1249 
sev_is_snp_ciphertext_hiding_supported(void)1250 bool sev_is_snp_ciphertext_hiding_supported(void)
1251 {
1252 	struct psp_device *psp = psp_master;
1253 	struct sev_device *sev;
1254 
1255 	if (!psp || !psp->sev_data)
1256 		return false;
1257 
1258 	sev = psp->sev_data;
1259 
1260 	/*
1261 	 * Feature information indicates if CipherTextHiding feature is
1262 	 * supported by the SEV firmware and additionally platform status
1263 	 * indicates if CipherTextHiding feature is enabled in the
1264 	 * Platform BIOS.
1265 	 */
1266 	return ((sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED) &&
1267 		 sev->snp_plat_status.ciphertext_hiding_cap);
1268 }
1269 EXPORT_SYMBOL_GPL(sev_is_snp_ciphertext_hiding_supported);
1270 
snp_get_platform_data(struct sev_device * sev,int * error)1271 static int snp_get_platform_data(struct sev_device *sev, int *error)
1272 {
1273 	struct sev_data_snp_feature_info snp_feat_info;
1274 	struct snp_feature_info *feat_info;
1275 	struct sev_data_snp_addr buf;
1276 	struct page *page;
1277 	int rc;
1278 
1279 	/*
1280 	 * This function is expected to be called before SNP is
1281 	 * initialized.
1282 	 */
1283 	if (sev->snp_initialized)
1284 		return -EINVAL;
1285 
1286 	buf.address = __psp_pa(&sev->snp_plat_status);
1287 	rc = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, error);
1288 	if (rc) {
1289 		dev_err(sev->dev, "SNP PLATFORM_STATUS command failed, ret = %d, error = %#x\n",
1290 			rc, *error);
1291 		return rc;
1292 	}
1293 
1294 	sev->api_major = sev->snp_plat_status.api_major;
1295 	sev->api_minor = sev->snp_plat_status.api_minor;
1296 	sev->build = sev->snp_plat_status.build_id;
1297 
1298 	/*
1299 	 * Do feature discovery of the currently loaded firmware,
1300 	 * and cache feature information from CPUID 0x8000_0024,
1301 	 * sub-function 0.
1302 	 */
1303 	if (!sev->snp_plat_status.feature_info)
1304 		return 0;
1305 
1306 	/*
1307 	 * Use dynamically allocated structure for the SNP_FEATURE_INFO
1308 	 * command to ensure structure is 8-byte aligned, and does not
1309 	 * cross a page boundary.
1310 	 */
1311 	page = alloc_page(GFP_KERNEL);
1312 	if (!page)
1313 		return -ENOMEM;
1314 
1315 	feat_info = page_address(page);
1316 	snp_feat_info.length = sizeof(snp_feat_info);
1317 	snp_feat_info.ecx_in = 0;
1318 	snp_feat_info.feature_info_paddr = __psp_pa(feat_info);
1319 
1320 	rc = sev_do_cmd(SEV_CMD_SNP_FEATURE_INFO, &snp_feat_info, error);
1321 	if (!rc)
1322 		sev->snp_feat_info_0 = *feat_info;
1323 	else
1324 		dev_err(sev->dev, "SNP FEATURE_INFO command failed, ret = %d, error = %#x\n",
1325 			rc, *error);
1326 
1327 	__free_page(page);
1328 
1329 	return rc;
1330 }
1331 
snp_filter_reserved_mem_regions(struct resource * rs,void * arg)1332 static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
1333 {
1334 	struct sev_data_range_list *range_list = arg;
1335 	struct sev_data_range *range = &range_list->ranges[range_list->num_elements];
1336 	size_t size;
1337 
1338 	/*
1339 	 * Ensure the list of HV_FIXED pages that will be passed to firmware
1340 	 * do not exceed the page-sized argument buffer.
1341 	 */
1342 	if ((range_list->num_elements * sizeof(struct sev_data_range) +
1343 	     sizeof(struct sev_data_range_list)) > PAGE_SIZE)
1344 		return -E2BIG;
1345 
1346 	switch (rs->desc) {
1347 	case E820_TYPE_RESERVED:
1348 	case E820_TYPE_PMEM:
1349 	case E820_TYPE_ACPI:
1350 		range->base = rs->start & PAGE_MASK;
1351 		size = PAGE_ALIGN((rs->end + 1) - rs->start);
1352 		range->page_count = size >> PAGE_SHIFT;
1353 		range_list->num_elements++;
1354 		break;
1355 	default:
1356 		break;
1357 	}
1358 
1359 	return 0;
1360 }
1361 
__sev_snp_init_locked(int * error,unsigned int max_snp_asid)1362 static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
1363 {
1364 	struct psp_device *psp = psp_master;
1365 	struct sev_data_snp_init_ex data;
1366 	struct sev_device *sev;
1367 	void *arg = &data;
1368 	int cmd, rc = 0;
1369 
1370 	if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
1371 		return -ENODEV;
1372 
1373 	sev = psp->sev_data;
1374 
1375 	if (sev->snp_initialized)
1376 		return 0;
1377 
1378 	if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
1379 		dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
1380 			SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR);
1381 		return -EOPNOTSUPP;
1382 	}
1383 
1384 	/* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
1385 	on_each_cpu(snp_set_hsave_pa, NULL, 1);
1386 
1387 	/*
1388 	 * Starting in SNP firmware v1.52, the SNP_INIT_EX command takes a list
1389 	 * of system physical address ranges to convert into HV-fixed page
1390 	 * states during the RMP initialization.  For instance, the memory that
1391 	 * UEFI reserves should be included in the that list. This allows system
1392 	 * components that occasionally write to memory (e.g. logging to UEFI
1393 	 * reserved regions) to not fail due to RMP initialization and SNP
1394 	 * enablement.
1395 	 *
1396 	 */
1397 	if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
1398 		bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED);
1399 
1400 		/*
1401 		 * Firmware checks that the pages containing the ranges enumerated
1402 		 * in the RANGES structure are either in the default page state or in the
1403 		 * firmware page state.
1404 		 */
1405 		snp_range_list = kzalloc(PAGE_SIZE, GFP_KERNEL);
1406 		if (!snp_range_list) {
1407 			dev_err(sev->dev,
1408 				"SEV: SNP_INIT_EX range list memory allocation failed\n");
1409 			return -ENOMEM;
1410 		}
1411 
1412 		/*
1413 		 * Retrieve all reserved memory regions from the e820 memory map
1414 		 * to be setup as HV-fixed pages.
1415 		 */
1416 		rc = walk_iomem_res_desc(IORES_DESC_NONE, IORESOURCE_MEM, 0, ~0,
1417 					 snp_range_list, snp_filter_reserved_mem_regions);
1418 		if (rc) {
1419 			dev_err(sev->dev,
1420 				"SEV: SNP_INIT_EX walk_iomem_res_desc failed rc = %d\n", rc);
1421 			return rc;
1422 		}
1423 
1424 		/*
1425 		 * Add HV_Fixed pages from other PSP sub-devices, such as SFS to the
1426 		 * HV_Fixed page list.
1427 		 */
1428 		snp_add_hv_fixed_pages(sev, snp_range_list);
1429 
1430 		memset(&data, 0, sizeof(data));
1431 
1432 		if (max_snp_asid) {
1433 			data.ciphertext_hiding_en = 1;
1434 			data.max_snp_asid = max_snp_asid;
1435 		}
1436 
1437 		data.init_rmp = 1;
1438 		data.list_paddr_en = 1;
1439 		data.list_paddr = __psp_pa(snp_range_list);
1440 
1441 		data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported();
1442 
1443 		/*
1444 		 * When psp_init_on_probe is disabled, the userspace calling
1445 		 * SEV ioctl can inadvertently shut down SNP and SEV-TIO causing
1446 		 * unexpected state loss.
1447 		 */
1448 		if (data.tio_en && !psp_init_on_probe)
1449 			dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n");
1450 
1451 		cmd = SEV_CMD_SNP_INIT_EX;
1452 	} else {
1453 		cmd = SEV_CMD_SNP_INIT;
1454 		arg = NULL;
1455 	}
1456 
1457 	/*
1458 	 * The following sequence must be issued before launching the first SNP
1459 	 * guest to ensure all dirty cache lines are flushed, including from
1460 	 * updates to the RMP table itself via the RMPUPDATE instruction:
1461 	 *
1462 	 * - WBINVD on all running CPUs
1463 	 * - SEV_CMD_SNP_INIT[_EX] firmware command
1464 	 * - WBINVD on all running CPUs
1465 	 * - SEV_CMD_SNP_DF_FLUSH firmware command
1466 	 */
1467 	wbinvd_on_all_cpus();
1468 
1469 	rc = __sev_do_cmd_locked(cmd, arg, error);
1470 	if (rc) {
1471 		dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n",
1472 			cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT",
1473 			rc, *error);
1474 		return rc;
1475 	}
1476 
1477 	/* Prepare for first SNP guest launch after INIT. */
1478 	wbinvd_on_all_cpus();
1479 	rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error);
1480 	if (rc) {
1481 		dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n",
1482 			rc, *error);
1483 		return rc;
1484 	}
1485 
1486 	snp_hv_fixed_pages_state_update(sev, HV_FIXED);
1487 	sev->snp_initialized = true;
1488 	dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n",
1489 		data.tio_en ? "enabled" : "disabled");
1490 
1491 	dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
1492 		 sev->api_minor, sev->build);
1493 
1494 	atomic_notifier_chain_register(&panic_notifier_list,
1495 				       &snp_panic_notifier);
1496 
1497 	if (data.tio_en) {
1498 		/*
1499 		 * This executes with the sev_cmd_mutex held so down the stack
1500 		 * snp_reclaim_pages(locked=false) might be needed (which is extremely
1501 		 * unlikely) but will cause a deadlock.
1502 		 * Instead of exporting __snp_alloc_firmware_pages(), allocate a page
1503 		 * for this one call here.
1504 		 */
1505 		void *tio_status = page_address(__snp_alloc_firmware_pages(
1506 			GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true));
1507 
1508 		if (tio_status) {
1509 			sev_tsm_init_locked(sev, tio_status);
1510 			__snp_free_firmware_pages(virt_to_page(tio_status), 0, true);
1511 		}
1512 	}
1513 
1514 	sev_es_tmr_size = SNP_TMR_SIZE;
1515 
1516 	return 0;
1517 }
1518 
__sev_platform_init_handle_tmr(struct sev_device * sev)1519 static void __sev_platform_init_handle_tmr(struct sev_device *sev)
1520 {
1521 	if (sev_es_tmr)
1522 		return;
1523 
1524 	/* Obtain the TMR memory area for SEV-ES use */
1525 	sev_es_tmr = sev_fw_alloc(sev_es_tmr_size);
1526 	if (sev_es_tmr) {
1527 		/* Must flush the cache before giving it to the firmware */
1528 		if (!sev->snp_initialized)
1529 			clflush_cache_range(sev_es_tmr, sev_es_tmr_size);
1530 	} else {
1531 			dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n");
1532 	}
1533 }
1534 
1535 /*
1536  * If an init_ex_path is provided allocate a buffer for the file and
1537  * read in the contents. Additionally, if SNP is initialized, convert
1538  * the buffer pages to firmware pages.
1539  */
__sev_platform_init_handle_init_ex_path(struct sev_device * sev)1540 static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev)
1541 {
1542 	struct page *page;
1543 	int rc;
1544 
1545 	if (!init_ex_path)
1546 		return 0;
1547 
1548 	if (sev_init_ex_buffer)
1549 		return 0;
1550 
1551 	page = alloc_pages(GFP_KERNEL, get_order(NV_LENGTH));
1552 	if (!page) {
1553 		dev_err(sev->dev, "SEV: INIT_EX NV memory allocation failed\n");
1554 		return -ENOMEM;
1555 	}
1556 
1557 	sev_init_ex_buffer = page_address(page);
1558 
1559 	rc = sev_read_init_ex_file();
1560 	if (rc)
1561 		return rc;
1562 
1563 	/* If SEV-SNP is initialized, transition to firmware page. */
1564 	if (sev->snp_initialized) {
1565 		unsigned long npages;
1566 
1567 		npages = 1UL << get_order(NV_LENGTH);
1568 		if (rmp_mark_pages_firmware(__pa(sev_init_ex_buffer), npages, false)) {
1569 			dev_err(sev->dev, "SEV: INIT_EX NV memory page state change failed.\n");
1570 			return -ENOMEM;
1571 		}
1572 	}
1573 
1574 	return 0;
1575 }
1576 
__sev_platform_init_locked(int * error)1577 static int __sev_platform_init_locked(int *error)
1578 {
1579 	int rc, psp_ret, dfflush_error;
1580 	struct sev_device *sev;
1581 
1582 	psp_ret = dfflush_error = SEV_RET_NO_FW_CALL;
1583 
1584 	if (!psp_master || !psp_master->sev_data)
1585 		return -ENODEV;
1586 
1587 	sev = psp_master->sev_data;
1588 
1589 	if (sev->sev_plat_status.state == SEV_STATE_INIT)
1590 		return 0;
1591 
1592 	__sev_platform_init_handle_tmr(sev);
1593 
1594 	rc = __sev_platform_init_handle_init_ex_path(sev);
1595 	if (rc)
1596 		return rc;
1597 
1598 	rc = __sev_do_init_locked(&psp_ret);
1599 	if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
1600 		/*
1601 		 * Initialization command returned an integrity check failure
1602 		 * status code, meaning that firmware load and validation of SEV
1603 		 * related persistent data has failed. Retrying the
1604 		 * initialization function should succeed by replacing the state
1605 		 * with a reset state.
1606 		 */
1607 		dev_err(sev->dev,
1608 "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
1609 		rc = __sev_do_init_locked(&psp_ret);
1610 	}
1611 
1612 	if (error)
1613 		*error = psp_ret;
1614 
1615 	if (rc) {
1616 		dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n",
1617 			sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc);
1618 		return rc;
1619 	}
1620 
1621 	sev->sev_plat_status.state = SEV_STATE_INIT;
1622 
1623 	/* Prepare for first SEV guest launch after INIT */
1624 	wbinvd_on_all_cpus();
1625 	rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, &dfflush_error);
1626 	if (rc) {
1627 		dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
1628 			dfflush_error, rc);
1629 		return rc;
1630 	}
1631 
1632 	dev_dbg(sev->dev, "SEV firmware initialized\n");
1633 
1634 	dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
1635 		 sev->api_minor, sev->build);
1636 
1637 	return 0;
1638 }
1639 
_sev_platform_init_locked(struct sev_platform_init_args * args)1640 static int _sev_platform_init_locked(struct sev_platform_init_args *args)
1641 {
1642 	struct sev_device *sev;
1643 	int rc;
1644 
1645 	if (!psp_master || !psp_master->sev_data)
1646 		return -ENODEV;
1647 
1648 	/*
1649 	 * Skip SNP/SEV initialization under a kdump kernel as SEV/SNP
1650 	 * may already be initialized in the previous kernel. Since no
1651 	 * SNP/SEV guests are run under a kdump kernel, there is no
1652 	 * need to initialize SNP or SEV during kdump boot.
1653 	 */
1654 	if (is_kdump_kernel())
1655 		return 0;
1656 
1657 	sev = psp_master->sev_data;
1658 
1659 	if (sev->sev_plat_status.state == SEV_STATE_INIT)
1660 		return 0;
1661 
1662 	rc = __sev_snp_init_locked(&args->error, args->max_snp_asid);
1663 	if (rc && rc != -ENODEV)
1664 		return rc;
1665 
1666 	/* Defer legacy SEV/SEV-ES support if allowed by caller/module. */
1667 	if (args->probe && !psp_init_on_probe)
1668 		return 0;
1669 
1670 	return __sev_platform_init_locked(&args->error);
1671 }
1672 
sev_platform_init(struct sev_platform_init_args * args)1673 int sev_platform_init(struct sev_platform_init_args *args)
1674 {
1675 	int rc;
1676 
1677 	mutex_lock(&sev_cmd_mutex);
1678 	rc = _sev_platform_init_locked(args);
1679 	mutex_unlock(&sev_cmd_mutex);
1680 
1681 	return rc;
1682 }
1683 EXPORT_SYMBOL_GPL(sev_platform_init);
1684 
__sev_platform_shutdown_locked(int * error)1685 static int __sev_platform_shutdown_locked(int *error)
1686 {
1687 	struct psp_device *psp = psp_master;
1688 	struct sev_device *sev;
1689 	int ret;
1690 
1691 	if (!psp || !psp->sev_data)
1692 		return 0;
1693 
1694 	sev = psp->sev_data;
1695 
1696 	if (sev->sev_plat_status.state == SEV_STATE_UNINIT)
1697 		return 0;
1698 
1699 	ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
1700 	if (ret) {
1701 		dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n",
1702 			*error, ret);
1703 		return ret;
1704 	}
1705 
1706 	sev->sev_plat_status.state = SEV_STATE_UNINIT;
1707 	dev_dbg(sev->dev, "SEV firmware shutdown\n");
1708 
1709 	return ret;
1710 }
1711 
sev_get_platform_state(int * state,int * error)1712 static int sev_get_platform_state(int *state, int *error)
1713 {
1714 	struct sev_user_data_status data;
1715 	int rc;
1716 
1717 	rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error);
1718 	if (rc)
1719 		return rc;
1720 
1721 	*state = data.state;
1722 	return rc;
1723 }
1724 
sev_move_to_init_state(struct sev_issue_cmd * argp,bool * shutdown_required)1725 static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
1726 {
1727 	struct sev_platform_init_args init_args = {0};
1728 	int rc;
1729 
1730 	rc = _sev_platform_init_locked(&init_args);
1731 	if (rc) {
1732 		argp->error = SEV_RET_INVALID_PLATFORM_STATE;
1733 		return rc;
1734 	}
1735 
1736 	*shutdown_required = true;
1737 
1738 	return 0;
1739 }
1740 
snp_move_to_init_state(struct sev_issue_cmd * argp,bool * shutdown_required)1741 static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
1742 {
1743 	int error, rc;
1744 
1745 	rc = __sev_snp_init_locked(&error, 0);
1746 	if (rc) {
1747 		argp->error = SEV_RET_INVALID_PLATFORM_STATE;
1748 		return rc;
1749 	}
1750 
1751 	*shutdown_required = true;
1752 
1753 	return 0;
1754 }
1755 
sev_ioctl_do_reset(struct sev_issue_cmd * argp,bool writable)1756 static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
1757 {
1758 	int state, rc;
1759 
1760 	if (!writable)
1761 		return -EPERM;
1762 
1763 	/*
1764 	 * The SEV spec requires that FACTORY_RESET must be issued in
1765 	 * UNINIT state. Before we go further lets check if any guest is
1766 	 * active.
1767 	 *
1768 	 * If FW is in WORKING state then deny the request otherwise issue
1769 	 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
1770 	 *
1771 	 */
1772 	rc = sev_get_platform_state(&state, &argp->error);
1773 	if (rc)
1774 		return rc;
1775 
1776 	if (state == SEV_STATE_WORKING)
1777 		return -EBUSY;
1778 
1779 	if (state == SEV_STATE_INIT) {
1780 		rc = __sev_platform_shutdown_locked(&argp->error);
1781 		if (rc)
1782 			return rc;
1783 	}
1784 
1785 	return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
1786 }
1787 
sev_ioctl_do_platform_status(struct sev_issue_cmd * argp)1788 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
1789 {
1790 	struct sev_user_data_status data;
1791 	int ret;
1792 
1793 	memset(&data, 0, sizeof(data));
1794 
1795 	ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error);
1796 	if (ret)
1797 		return ret;
1798 
1799 	if (copy_to_user((void __user *)argp->data, &data, sizeof(data)))
1800 		ret = -EFAULT;
1801 
1802 	return ret;
1803 }
1804 
sev_ioctl_do_pek_pdh_gen(int cmd,struct sev_issue_cmd * argp,bool writable)1805 static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
1806 {
1807 	struct sev_device *sev = psp_master->sev_data;
1808 	bool shutdown_required = false;
1809 	int rc;
1810 
1811 	if (!writable)
1812 		return -EPERM;
1813 
1814 	if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
1815 		rc = sev_move_to_init_state(argp, &shutdown_required);
1816 		if (rc)
1817 			return rc;
1818 	}
1819 
1820 	rc = __sev_do_cmd_locked(cmd, NULL, &argp->error);
1821 
1822 	if (shutdown_required)
1823 		__sev_firmware_shutdown(sev, false);
1824 
1825 	return rc;
1826 }
1827 
sev_ioctl_do_pek_csr(struct sev_issue_cmd * argp,bool writable)1828 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
1829 {
1830 	struct sev_device *sev = psp_master->sev_data;
1831 	struct sev_user_data_pek_csr input;
1832 	bool shutdown_required = false;
1833 	struct sev_data_pek_csr data;
1834 	void __user *input_address;
1835 	void *blob = NULL;
1836 	int ret;
1837 
1838 	if (!writable)
1839 		return -EPERM;
1840 
1841 	if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
1842 		return -EFAULT;
1843 
1844 	memset(&data, 0, sizeof(data));
1845 
1846 	/* userspace wants to query CSR length */
1847 	if (!input.address || !input.length)
1848 		goto cmd;
1849 
1850 	/* allocate a physically contiguous buffer to store the CSR blob */
1851 	input_address = (void __user *)input.address;
1852 	if (input.length > SEV_FW_BLOB_MAX_SIZE)
1853 		return -EFAULT;
1854 
1855 	blob = kzalloc(input.length, GFP_KERNEL);
1856 	if (!blob)
1857 		return -ENOMEM;
1858 
1859 	data.address = __psp_pa(blob);
1860 	data.len = input.length;
1861 
1862 cmd:
1863 	if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
1864 		ret = sev_move_to_init_state(argp, &shutdown_required);
1865 		if (ret)
1866 			goto e_free_blob;
1867 	}
1868 
1869 	ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error);
1870 
1871 	 /* If we query the CSR length, FW responded with expected data. */
1872 	input.length = data.len;
1873 
1874 	if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
1875 		ret = -EFAULT;
1876 		goto e_free_blob;
1877 	}
1878 
1879 	if (blob) {
1880 		if (copy_to_user(input_address, blob, input.length))
1881 			ret = -EFAULT;
1882 	}
1883 
1884 e_free_blob:
1885 	if (shutdown_required)
1886 		__sev_firmware_shutdown(sev, false);
1887 
1888 	kfree(blob);
1889 	return ret;
1890 }
1891 
psp_copy_user_blob(u64 uaddr,u32 len)1892 void *psp_copy_user_blob(u64 uaddr, u32 len)
1893 {
1894 	if (!uaddr || !len)
1895 		return ERR_PTR(-EINVAL);
1896 
1897 	/* verify that blob length does not exceed our limit */
1898 	if (len > SEV_FW_BLOB_MAX_SIZE)
1899 		return ERR_PTR(-EINVAL);
1900 
1901 	return memdup_user((void __user *)uaddr, len);
1902 }
1903 EXPORT_SYMBOL_GPL(psp_copy_user_blob);
1904 
sev_get_api_version(void)1905 static int sev_get_api_version(void)
1906 {
1907 	struct sev_device *sev = psp_master->sev_data;
1908 	struct sev_user_data_status status;
1909 	int error = 0, ret;
1910 
1911 	/*
1912 	 * Cache SNP platform status and SNP feature information
1913 	 * if SNP is available.
1914 	 */
1915 	if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) {
1916 		ret = snp_get_platform_data(sev, &error);
1917 		if (ret)
1918 			return 1;
1919 	}
1920 
1921 	ret = sev_platform_status(&status, &error);
1922 	if (ret) {
1923 		dev_err(sev->dev,
1924 			"SEV: failed to get status. Error: %#x\n", error);
1925 		return 1;
1926 	}
1927 
1928 	/* Cache SEV platform status */
1929 	sev->sev_plat_status = status;
1930 
1931 	sev->api_major = status.api_major;
1932 	sev->api_minor = status.api_minor;
1933 	sev->build = status.build;
1934 
1935 	return 0;
1936 }
1937 
sev_get_firmware(struct device * dev,const struct firmware ** firmware)1938 static int sev_get_firmware(struct device *dev,
1939 			    const struct firmware **firmware)
1940 {
1941 	char fw_name_specific[SEV_FW_NAME_SIZE];
1942 	char fw_name_subset[SEV_FW_NAME_SIZE];
1943 
1944 	snprintf(fw_name_specific, sizeof(fw_name_specific),
1945 		 "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
1946 		 boot_cpu_data.x86, boot_cpu_data.x86_model);
1947 
1948 	snprintf(fw_name_subset, sizeof(fw_name_subset),
1949 		 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
1950 		 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
1951 
1952 	/* Check for SEV FW for a particular model.
1953 	 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
1954 	 *
1955 	 * or
1956 	 *
1957 	 * Check for SEV FW common to a subset of models.
1958 	 * Ex. amd_sev_fam17h_model0xh.sbin for
1959 	 *     Family 17h Model 00h -- Family 17h Model 0Fh
1960 	 *
1961 	 * or
1962 	 *
1963 	 * Fall-back to using generic name: sev.fw
1964 	 */
1965 	if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
1966 	    (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
1967 	    (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
1968 		return 0;
1969 
1970 	return -ENOENT;
1971 }
1972 
1973 /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
sev_update_firmware(struct device * dev)1974 static int sev_update_firmware(struct device *dev)
1975 {
1976 	struct sev_data_download_firmware *data;
1977 	const struct firmware *firmware;
1978 	int ret, error, order;
1979 	struct page *p;
1980 	u64 data_size;
1981 
1982 	if (!sev_version_greater_or_equal(0, 15)) {
1983 		dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n");
1984 		return -1;
1985 	}
1986 
1987 	if (sev_get_firmware(dev, &firmware) == -ENOENT) {
1988 		dev_dbg(dev, "No SEV firmware file present\n");
1989 		return -1;
1990 	}
1991 
1992 	/*
1993 	 * SEV FW expects the physical address given to it to be 32
1994 	 * byte aligned. Memory allocated has structure placed at the
1995 	 * beginning followed by the firmware being passed to the SEV
1996 	 * FW. Allocate enough memory for data structure + alignment
1997 	 * padding + SEV FW.
1998 	 */
1999 	data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
2000 
2001 	order = get_order(firmware->size + data_size);
2002 	p = alloc_pages(GFP_KERNEL, order);
2003 	if (!p) {
2004 		ret = -1;
2005 		goto fw_err;
2006 	}
2007 
2008 	/*
2009 	 * Copy firmware data to a kernel allocated contiguous
2010 	 * memory region.
2011 	 */
2012 	data = page_address(p);
2013 	memcpy(page_address(p) + data_size, firmware->data, firmware->size);
2014 
2015 	data->address = __psp_pa(page_address(p) + data_size);
2016 	data->len = firmware->size;
2017 
2018 	ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
2019 
2020 	/*
2021 	 * A quirk for fixing the committed TCB version, when upgrading from
2022 	 * earlier firmware version than 1.50.
2023 	 */
2024 	if (!ret && !sev_version_greater_or_equal(1, 50))
2025 		ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
2026 
2027 	if (ret)
2028 		dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
2029 
2030 	__free_pages(p, order);
2031 
2032 fw_err:
2033 	release_firmware(firmware);
2034 
2035 	return ret;
2036 }
2037 
__sev_snp_shutdown_locked(int * error,bool panic)2038 static int __sev_snp_shutdown_locked(int *error, bool panic)
2039 {
2040 	struct psp_device *psp = psp_master;
2041 	struct sev_device *sev;
2042 	struct sev_data_snp_shutdown_ex data;
2043 	int ret;
2044 
2045 	if (!psp || !psp->sev_data)
2046 		return 0;
2047 
2048 	sev = psp->sev_data;
2049 
2050 	if (!sev->snp_initialized)
2051 		return 0;
2052 
2053 	memset(&data, 0, sizeof(data));
2054 	data.len = sizeof(data);
2055 	data.iommu_snp_shutdown = 1;
2056 
2057 	/*
2058 	 * If invoked during panic handling, local interrupts are disabled
2059 	 * and all CPUs are stopped, so wbinvd_on_all_cpus() can't be called.
2060 	 * In that case, a wbinvd() is done on remote CPUs via the NMI
2061 	 * callback, so only a local wbinvd() is needed here.
2062 	 */
2063 	if (!panic)
2064 		wbinvd_on_all_cpus();
2065 	else
2066 		wbinvd();
2067 
2068 	ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error);
2069 	/* SHUTDOWN may require DF_FLUSH */
2070 	if (*error == SEV_RET_DFFLUSH_REQUIRED) {
2071 		int dfflush_error = SEV_RET_NO_FW_CALL;
2072 
2073 		ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error);
2074 		if (ret) {
2075 			dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n",
2076 				ret, dfflush_error);
2077 			return ret;
2078 		}
2079 		/* reissue the shutdown command */
2080 		ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data,
2081 					  error);
2082 	}
2083 	if (ret) {
2084 		dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n",
2085 			ret, *error);
2086 		return ret;
2087 	}
2088 
2089 	/*
2090 	 * SNP_SHUTDOWN_EX with IOMMU_SNP_SHUTDOWN set to 1 disables SNP
2091 	 * enforcement by the IOMMU and also transitions all pages
2092 	 * associated with the IOMMU to the Reclaim state.
2093 	 * Firmware was transitioning the IOMMU pages to Hypervisor state
2094 	 * before version 1.53. But, accounting for the number of assigned
2095 	 * 4kB pages in a 2M page was done incorrectly by not transitioning
2096 	 * to the Reclaim state. This resulted in RMP #PF when later accessing
2097 	 * the 2M page containing those pages during kexec boot. Hence, the
2098 	 * firmware now transitions these pages to Reclaim state and hypervisor
2099 	 * needs to transition these pages to shared state. SNP Firmware
2100 	 * version 1.53 and above are needed for kexec boot.
2101 	 */
2102 	ret = amd_iommu_snp_disable();
2103 	if (ret) {
2104 		dev_err(sev->dev, "SNP IOMMU shutdown failed\n");
2105 		return ret;
2106 	}
2107 
2108 	snp_leak_hv_fixed_pages();
2109 	sev->snp_initialized = false;
2110 	dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
2111 
2112 	/*
2113 	 * __sev_snp_shutdown_locked() deadlocks when it tries to unregister
2114 	 * itself during panic as the panic notifier is called with RCU read
2115 	 * lock held and notifier unregistration does RCU synchronization.
2116 	 */
2117 	if (!panic)
2118 		atomic_notifier_chain_unregister(&panic_notifier_list,
2119 						 &snp_panic_notifier);
2120 
2121 	/* Reset TMR size back to default */
2122 	sev_es_tmr_size = SEV_TMR_SIZE;
2123 
2124 	return ret;
2125 }
2126 
sev_ioctl_do_pek_import(struct sev_issue_cmd * argp,bool writable)2127 static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
2128 {
2129 	struct sev_device *sev = psp_master->sev_data;
2130 	struct sev_user_data_pek_cert_import input;
2131 	struct sev_data_pek_cert_import data;
2132 	bool shutdown_required = false;
2133 	void *pek_blob, *oca_blob;
2134 	int ret;
2135 
2136 	if (!writable)
2137 		return -EPERM;
2138 
2139 	if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
2140 		return -EFAULT;
2141 
2142 	/* copy PEK certificate blobs from userspace */
2143 	pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
2144 	if (IS_ERR(pek_blob))
2145 		return PTR_ERR(pek_blob);
2146 
2147 	data.reserved = 0;
2148 	data.pek_cert_address = __psp_pa(pek_blob);
2149 	data.pek_cert_len = input.pek_cert_len;
2150 
2151 	/* copy PEK certificate blobs from userspace */
2152 	oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
2153 	if (IS_ERR(oca_blob)) {
2154 		ret = PTR_ERR(oca_blob);
2155 		goto e_free_pek;
2156 	}
2157 
2158 	data.oca_cert_address = __psp_pa(oca_blob);
2159 	data.oca_cert_len = input.oca_cert_len;
2160 
2161 	/* If platform is not in INIT state then transition it to INIT */
2162 	if (sev->sev_plat_status.state != SEV_STATE_INIT) {
2163 		ret = sev_move_to_init_state(argp, &shutdown_required);
2164 		if (ret)
2165 			goto e_free_oca;
2166 	}
2167 
2168 	ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
2169 
2170 e_free_oca:
2171 	if (shutdown_required)
2172 		__sev_firmware_shutdown(sev, false);
2173 
2174 	kfree(oca_blob);
2175 e_free_pek:
2176 	kfree(pek_blob);
2177 	return ret;
2178 }
2179 
sev_ioctl_do_get_id2(struct sev_issue_cmd * argp)2180 static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
2181 {
2182 	struct sev_user_data_get_id2 input;
2183 	struct sev_data_get_id data;
2184 	void __user *input_address;
2185 	void *id_blob = NULL;
2186 	int ret;
2187 
2188 	/* SEV GET_ID is available from SEV API v0.16 and up */
2189 	if (!sev_version_greater_or_equal(0, 16))
2190 		return -ENOTSUPP;
2191 
2192 	if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
2193 		return -EFAULT;
2194 
2195 	input_address = (void __user *)input.address;
2196 
2197 	if (input.address && input.length) {
2198 		/*
2199 		 * The length of the ID shouldn't be assumed by software since
2200 		 * it may change in the future.  The allocation size is limited
2201 		 * to 1 << (PAGE_SHIFT + MAX_PAGE_ORDER) by the page allocator.
2202 		 * If the allocation fails, simply return ENOMEM rather than
2203 		 * warning in the kernel log.
2204 		 */
2205 		id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN);
2206 		if (!id_blob)
2207 			return -ENOMEM;
2208 
2209 		data.address = __psp_pa(id_blob);
2210 		data.len = input.length;
2211 	} else {
2212 		data.address = 0;
2213 		data.len = 0;
2214 	}
2215 
2216 	ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error);
2217 
2218 	/*
2219 	 * Firmware will return the length of the ID value (either the minimum
2220 	 * required length or the actual length written), return it to the user.
2221 	 */
2222 	input.length = data.len;
2223 
2224 	if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
2225 		ret = -EFAULT;
2226 		goto e_free;
2227 	}
2228 
2229 	if (id_blob) {
2230 		if (copy_to_user(input_address, id_blob, data.len)) {
2231 			ret = -EFAULT;
2232 			goto e_free;
2233 		}
2234 	}
2235 
2236 e_free:
2237 	kfree(id_blob);
2238 
2239 	return ret;
2240 }
2241 
sev_ioctl_do_get_id(struct sev_issue_cmd * argp)2242 static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
2243 {
2244 	struct sev_data_get_id *data;
2245 	u64 data_size, user_size;
2246 	void *id_blob, *mem;
2247 	int ret;
2248 
2249 	/* SEV GET_ID available from SEV API v0.16 and up */
2250 	if (!sev_version_greater_or_equal(0, 16))
2251 		return -ENOTSUPP;
2252 
2253 	/* SEV FW expects the buffer it fills with the ID to be
2254 	 * 8-byte aligned. Memory allocated should be enough to
2255 	 * hold data structure + alignment padding + memory
2256 	 * where SEV FW writes the ID.
2257 	 */
2258 	data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
2259 	user_size = sizeof(struct sev_user_data_get_id);
2260 
2261 	mem = kzalloc(data_size + user_size, GFP_KERNEL);
2262 	if (!mem)
2263 		return -ENOMEM;
2264 
2265 	data = mem;
2266 	id_blob = mem + data_size;
2267 
2268 	data->address = __psp_pa(id_blob);
2269 	data->len = user_size;
2270 
2271 	ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
2272 	if (!ret) {
2273 		if (copy_to_user((void __user *)argp->data, id_blob, data->len))
2274 			ret = -EFAULT;
2275 	}
2276 
2277 	kfree(mem);
2278 
2279 	return ret;
2280 }
2281 
sev_ioctl_do_pdh_export(struct sev_issue_cmd * argp,bool writable)2282 static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
2283 {
2284 	struct sev_device *sev = psp_master->sev_data;
2285 	struct sev_user_data_pdh_cert_export input;
2286 	void *pdh_blob = NULL, *cert_blob = NULL;
2287 	struct sev_data_pdh_cert_export data;
2288 	void __user *input_cert_chain_address;
2289 	void __user *input_pdh_cert_address;
2290 	bool shutdown_required = false;
2291 	int ret;
2292 
2293 	if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
2294 		return -EFAULT;
2295 
2296 	memset(&data, 0, sizeof(data));
2297 
2298 	input_pdh_cert_address = (void __user *)input.pdh_cert_address;
2299 	input_cert_chain_address = (void __user *)input.cert_chain_address;
2300 
2301 	/* Userspace wants to query the certificate length. */
2302 	if (!input.pdh_cert_address ||
2303 	    !input.pdh_cert_len ||
2304 	    !input.cert_chain_address)
2305 		goto cmd;
2306 
2307 	/* Allocate a physically contiguous buffer to store the PDH blob. */
2308 	if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE)
2309 		return -EFAULT;
2310 
2311 	/* Allocate a physically contiguous buffer to store the cert chain blob. */
2312 	if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE)
2313 		return -EFAULT;
2314 
2315 	pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL);
2316 	if (!pdh_blob)
2317 		return -ENOMEM;
2318 
2319 	data.pdh_cert_address = __psp_pa(pdh_blob);
2320 	data.pdh_cert_len = input.pdh_cert_len;
2321 
2322 	cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL);
2323 	if (!cert_blob) {
2324 		ret = -ENOMEM;
2325 		goto e_free_pdh;
2326 	}
2327 
2328 	data.cert_chain_address = __psp_pa(cert_blob);
2329 	data.cert_chain_len = input.cert_chain_len;
2330 
2331 cmd:
2332 	/* If platform is not in INIT state then transition it to INIT. */
2333 	if (sev->sev_plat_status.state != SEV_STATE_INIT) {
2334 		if (!writable) {
2335 			ret = -EPERM;
2336 			goto e_free_cert;
2337 		}
2338 		ret = sev_move_to_init_state(argp, &shutdown_required);
2339 		if (ret)
2340 			goto e_free_cert;
2341 	}
2342 
2343 	ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
2344 
2345 	/* If we query the length, FW responded with expected data. */
2346 	input.cert_chain_len = data.cert_chain_len;
2347 	input.pdh_cert_len = data.pdh_cert_len;
2348 
2349 	if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
2350 		ret = -EFAULT;
2351 		goto e_free_cert;
2352 	}
2353 
2354 	if (pdh_blob) {
2355 		if (copy_to_user(input_pdh_cert_address,
2356 				 pdh_blob, input.pdh_cert_len)) {
2357 			ret = -EFAULT;
2358 			goto e_free_cert;
2359 		}
2360 	}
2361 
2362 	if (cert_blob) {
2363 		if (copy_to_user(input_cert_chain_address,
2364 				 cert_blob, input.cert_chain_len))
2365 			ret = -EFAULT;
2366 	}
2367 
2368 e_free_cert:
2369 	if (shutdown_required)
2370 		__sev_firmware_shutdown(sev, false);
2371 
2372 	kfree(cert_blob);
2373 e_free_pdh:
2374 	kfree(pdh_blob);
2375 	return ret;
2376 }
2377 
sev_ioctl_do_snp_platform_status(struct sev_issue_cmd * argp)2378 static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
2379 {
2380 	struct sev_device *sev = psp_master->sev_data;
2381 	bool shutdown_required = false;
2382 	struct sev_data_snp_addr buf;
2383 	struct page *status_page;
2384 	int ret, error;
2385 	void *data;
2386 
2387 	if (!argp->data)
2388 		return -EINVAL;
2389 
2390 	status_page = alloc_page(GFP_KERNEL_ACCOUNT);
2391 	if (!status_page)
2392 		return -ENOMEM;
2393 
2394 	data = page_address(status_page);
2395 
2396 	if (!sev->snp_initialized) {
2397 		ret = snp_move_to_init_state(argp, &shutdown_required);
2398 		if (ret)
2399 			goto cleanup;
2400 	}
2401 
2402 	/*
2403 	 * Firmware expects status page to be in firmware-owned state, otherwise
2404 	 * it will report firmware error code INVALID_PAGE_STATE (0x1A).
2405 	 */
2406 	if (rmp_mark_pages_firmware(__pa(data), 1, true)) {
2407 		ret = -EFAULT;
2408 		goto cleanup;
2409 	}
2410 
2411 	buf.address = __psp_pa(data);
2412 	ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error);
2413 
2414 	/*
2415 	 * Status page will be transitioned to Reclaim state upon success, or
2416 	 * left in Firmware state in failure. Use snp_reclaim_pages() to
2417 	 * transition either case back to Hypervisor-owned state.
2418 	 */
2419 	if (snp_reclaim_pages(__pa(data), 1, true))
2420 		return -EFAULT;
2421 
2422 	if (ret)
2423 		goto cleanup;
2424 
2425 	if (copy_to_user((void __user *)argp->data, data,
2426 			 sizeof(struct sev_user_data_snp_status)))
2427 		ret = -EFAULT;
2428 
2429 cleanup:
2430 	if (shutdown_required)
2431 		__sev_snp_shutdown_locked(&error, false);
2432 
2433 	__free_pages(status_page, 0);
2434 	return ret;
2435 }
2436 
sev_ioctl_do_snp_commit(struct sev_issue_cmd * argp)2437 static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp)
2438 {
2439 	struct sev_device *sev = psp_master->sev_data;
2440 	struct sev_data_snp_commit buf;
2441 	bool shutdown_required = false;
2442 	int ret, error;
2443 
2444 	if (!sev->snp_initialized) {
2445 		ret = snp_move_to_init_state(argp, &shutdown_required);
2446 		if (ret)
2447 			return ret;
2448 	}
2449 
2450 	buf.len = sizeof(buf);
2451 
2452 	ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
2453 
2454 	if (shutdown_required)
2455 		__sev_snp_shutdown_locked(&error, false);
2456 
2457 	return ret;
2458 }
2459 
sev_ioctl_do_snp_set_config(struct sev_issue_cmd * argp,bool writable)2460 static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable)
2461 {
2462 	struct sev_device *sev = psp_master->sev_data;
2463 	struct sev_user_data_snp_config config;
2464 	bool shutdown_required = false;
2465 	int ret, error;
2466 
2467 	if (!argp->data)
2468 		return -EINVAL;
2469 
2470 	if (!writable)
2471 		return -EPERM;
2472 
2473 	if (copy_from_user(&config, (void __user *)argp->data, sizeof(config)))
2474 		return -EFAULT;
2475 
2476 	if (!sev->snp_initialized) {
2477 		ret = snp_move_to_init_state(argp, &shutdown_required);
2478 		if (ret)
2479 			return ret;
2480 	}
2481 
2482 	ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
2483 
2484 	if (shutdown_required)
2485 		__sev_snp_shutdown_locked(&error, false);
2486 
2487 	return ret;
2488 }
2489 
sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd * argp,bool writable)2490 static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
2491 {
2492 	struct sev_device *sev = psp_master->sev_data;
2493 	struct sev_user_data_snp_vlek_load input;
2494 	bool shutdown_required = false;
2495 	int ret, error;
2496 	void *blob;
2497 
2498 	if (!argp->data)
2499 		return -EINVAL;
2500 
2501 	if (!writable)
2502 		return -EPERM;
2503 
2504 	if (copy_from_user(&input, u64_to_user_ptr(argp->data), sizeof(input)))
2505 		return -EFAULT;
2506 
2507 	if (input.len != sizeof(input) || input.vlek_wrapped_version != 0)
2508 		return -EINVAL;
2509 
2510 	blob = psp_copy_user_blob(input.vlek_wrapped_address,
2511 				  sizeof(struct sev_user_data_snp_wrapped_vlek_hashstick));
2512 	if (IS_ERR(blob))
2513 		return PTR_ERR(blob);
2514 
2515 	input.vlek_wrapped_address = __psp_pa(blob);
2516 
2517 	if (!sev->snp_initialized) {
2518 		ret = snp_move_to_init_state(argp, &shutdown_required);
2519 		if (ret)
2520 			goto cleanup;
2521 	}
2522 
2523 	ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error);
2524 
2525 	if (shutdown_required)
2526 		__sev_snp_shutdown_locked(&error, false);
2527 
2528 cleanup:
2529 	kfree(blob);
2530 
2531 	return ret;
2532 }
2533 
sev_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)2534 static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
2535 {
2536 	void __user *argp = (void __user *)arg;
2537 	struct sev_issue_cmd input;
2538 	int ret = -EFAULT;
2539 	bool writable = file->f_mode & FMODE_WRITE;
2540 
2541 	if (!psp_master || !psp_master->sev_data)
2542 		return -ENODEV;
2543 
2544 	if (ioctl != SEV_ISSUE_CMD)
2545 		return -EINVAL;
2546 
2547 	if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
2548 		return -EFAULT;
2549 
2550 	if (input.cmd > SEV_MAX)
2551 		return -EINVAL;
2552 
2553 	mutex_lock(&sev_cmd_mutex);
2554 
2555 	switch (input.cmd) {
2556 
2557 	case SEV_FACTORY_RESET:
2558 		ret = sev_ioctl_do_reset(&input, writable);
2559 		break;
2560 	case SEV_PLATFORM_STATUS:
2561 		ret = sev_ioctl_do_platform_status(&input);
2562 		break;
2563 	case SEV_PEK_GEN:
2564 		ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable);
2565 		break;
2566 	case SEV_PDH_GEN:
2567 		ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable);
2568 		break;
2569 	case SEV_PEK_CSR:
2570 		ret = sev_ioctl_do_pek_csr(&input, writable);
2571 		break;
2572 	case SEV_PEK_CERT_IMPORT:
2573 		ret = sev_ioctl_do_pek_import(&input, writable);
2574 		break;
2575 	case SEV_PDH_CERT_EXPORT:
2576 		ret = sev_ioctl_do_pdh_export(&input, writable);
2577 		break;
2578 	case SEV_GET_ID:
2579 		pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
2580 		ret = sev_ioctl_do_get_id(&input);
2581 		break;
2582 	case SEV_GET_ID2:
2583 		ret = sev_ioctl_do_get_id2(&input);
2584 		break;
2585 	case SNP_PLATFORM_STATUS:
2586 		ret = sev_ioctl_do_snp_platform_status(&input);
2587 		break;
2588 	case SNP_COMMIT:
2589 		ret = sev_ioctl_do_snp_commit(&input);
2590 		break;
2591 	case SNP_SET_CONFIG:
2592 		ret = sev_ioctl_do_snp_set_config(&input, writable);
2593 		break;
2594 	case SNP_VLEK_LOAD:
2595 		ret = sev_ioctl_do_snp_vlek_load(&input, writable);
2596 		break;
2597 	default:
2598 		ret = -EINVAL;
2599 		goto out;
2600 	}
2601 
2602 	if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
2603 		ret = -EFAULT;
2604 out:
2605 	mutex_unlock(&sev_cmd_mutex);
2606 
2607 	return ret;
2608 }
2609 
2610 static const struct file_operations sev_fops = {
2611 	.owner	= THIS_MODULE,
2612 	.unlocked_ioctl = sev_ioctl,
2613 };
2614 
sev_platform_status(struct sev_user_data_status * data,int * error)2615 int sev_platform_status(struct sev_user_data_status *data, int *error)
2616 {
2617 	return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
2618 }
2619 EXPORT_SYMBOL_GPL(sev_platform_status);
2620 
sev_guest_deactivate(struct sev_data_deactivate * data,int * error)2621 int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
2622 {
2623 	return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
2624 }
2625 EXPORT_SYMBOL_GPL(sev_guest_deactivate);
2626 
sev_guest_activate(struct sev_data_activate * data,int * error)2627 int sev_guest_activate(struct sev_data_activate *data, int *error)
2628 {
2629 	return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
2630 }
2631 EXPORT_SYMBOL_GPL(sev_guest_activate);
2632 
sev_guest_decommission(struct sev_data_decommission * data,int * error)2633 int sev_guest_decommission(struct sev_data_decommission *data, int *error)
2634 {
2635 	return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
2636 }
2637 EXPORT_SYMBOL_GPL(sev_guest_decommission);
2638 
sev_guest_df_flush(int * error)2639 int sev_guest_df_flush(int *error)
2640 {
2641 	return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
2642 }
2643 EXPORT_SYMBOL_GPL(sev_guest_df_flush);
2644 
sev_exit(struct kref * ref)2645 static void sev_exit(struct kref *ref)
2646 {
2647 	misc_deregister(&misc_dev->misc);
2648 	kfree(misc_dev);
2649 	misc_dev = NULL;
2650 }
2651 
sev_misc_init(struct sev_device * sev)2652 static int sev_misc_init(struct sev_device *sev)
2653 {
2654 	struct device *dev = sev->dev;
2655 	int ret;
2656 
2657 	/*
2658 	 * SEV feature support can be detected on multiple devices but the SEV
2659 	 * FW commands must be issued on the master. During probe, we do not
2660 	 * know the master hence we create /dev/sev on the first device probe.
2661 	 * sev_do_cmd() finds the right master device to which to issue the
2662 	 * command to the firmware.
2663 	 */
2664 	if (!misc_dev) {
2665 		struct miscdevice *misc;
2666 
2667 		misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
2668 		if (!misc_dev)
2669 			return -ENOMEM;
2670 
2671 		misc = &misc_dev->misc;
2672 		misc->minor = MISC_DYNAMIC_MINOR;
2673 		misc->name = DEVICE_NAME;
2674 		misc->fops = &sev_fops;
2675 
2676 		ret = misc_register(misc);
2677 		if (ret)
2678 			return ret;
2679 
2680 		kref_init(&misc_dev->refcount);
2681 	} else {
2682 		kref_get(&misc_dev->refcount);
2683 	}
2684 
2685 	init_waitqueue_head(&sev->int_queue);
2686 	sev->misc = misc_dev;
2687 	dev_dbg(dev, "registered SEV device\n");
2688 
2689 	return 0;
2690 }
2691 
sev_dev_init(struct psp_device * psp)2692 int sev_dev_init(struct psp_device *psp)
2693 {
2694 	struct device *dev = psp->dev;
2695 	struct sev_device *sev;
2696 	int ret = -ENOMEM;
2697 
2698 	if (!boot_cpu_has(X86_FEATURE_SEV)) {
2699 		dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n");
2700 		return 0;
2701 	}
2702 
2703 	sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
2704 	if (!sev)
2705 		goto e_err;
2706 
2707 	sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 1);
2708 	if (!sev->cmd_buf)
2709 		goto e_sev;
2710 
2711 	sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE;
2712 
2713 	psp->sev_data = sev;
2714 
2715 	sev->dev = dev;
2716 	sev->psp = psp;
2717 
2718 	sev->io_regs = psp->io_regs;
2719 
2720 	sev->vdata = (struct sev_vdata *)psp->vdata->sev;
2721 	if (!sev->vdata) {
2722 		ret = -ENODEV;
2723 		dev_err(dev, "sev: missing driver data\n");
2724 		goto e_buf;
2725 	}
2726 
2727 	psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
2728 
2729 	ret = sev_misc_init(sev);
2730 	if (ret)
2731 		goto e_irq;
2732 
2733 	dev_notice(dev, "sev enabled\n");
2734 
2735 	return 0;
2736 
2737 e_irq:
2738 	psp_clear_sev_irq_handler(psp);
2739 e_buf:
2740 	devm_free_pages(dev, (unsigned long)sev->cmd_buf);
2741 e_sev:
2742 	devm_kfree(dev, sev);
2743 e_err:
2744 	psp->sev_data = NULL;
2745 
2746 	dev_notice(dev, "sev initialization failed\n");
2747 
2748 	return ret;
2749 }
2750 
__sev_firmware_shutdown(struct sev_device * sev,bool panic)2751 static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
2752 {
2753 	int error;
2754 
2755 	__sev_platform_shutdown_locked(&error);
2756 
2757 	if (sev_es_tmr) {
2758 		/*
2759 		 * The TMR area was encrypted, flush it from the cache.
2760 		 *
2761 		 * If invoked during panic handling, local interrupts are
2762 		 * disabled and all CPUs are stopped, so wbinvd_on_all_cpus()
2763 		 * can't be used. In that case, wbinvd() is done on remote CPUs
2764 		 * via the NMI callback, and done for this CPU later during
2765 		 * SNP shutdown, so wbinvd_on_all_cpus() can be skipped.
2766 		 */
2767 		if (!panic)
2768 			wbinvd_on_all_cpus();
2769 
2770 		__snp_free_firmware_pages(virt_to_page(sev_es_tmr),
2771 					  get_order(sev_es_tmr_size),
2772 					  true);
2773 		sev_es_tmr = NULL;
2774 	}
2775 
2776 	if (sev_init_ex_buffer) {
2777 		__snp_free_firmware_pages(virt_to_page(sev_init_ex_buffer),
2778 					  get_order(NV_LENGTH),
2779 					  true);
2780 		sev_init_ex_buffer = NULL;
2781 	}
2782 
2783 	if (snp_range_list) {
2784 		kfree(snp_range_list);
2785 		snp_range_list = NULL;
2786 	}
2787 
2788 	__sev_snp_shutdown_locked(&error, panic);
2789 }
2790 
sev_firmware_shutdown(struct sev_device * sev)2791 static void sev_firmware_shutdown(struct sev_device *sev)
2792 {
2793 	/*
2794 	 * Calling without sev_cmd_mutex held as TSM will likely try disconnecting
2795 	 * IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex.
2796 	 */
2797 	if (sev->tio_status)
2798 		sev_tsm_uninit(sev);
2799 
2800 	mutex_lock(&sev_cmd_mutex);
2801 
2802 	__sev_firmware_shutdown(sev, false);
2803 
2804 	kfree(sev->tio_status);
2805 	sev->tio_status = NULL;
2806 
2807 	mutex_unlock(&sev_cmd_mutex);
2808 }
2809 
sev_platform_shutdown(void)2810 void sev_platform_shutdown(void)
2811 {
2812 	if (!psp_master || !psp_master->sev_data)
2813 		return;
2814 
2815 	sev_firmware_shutdown(psp_master->sev_data);
2816 }
2817 EXPORT_SYMBOL_GPL(sev_platform_shutdown);
2818 
sev_get_snp_policy_bits(void)2819 u64 sev_get_snp_policy_bits(void)
2820 {
2821 	struct psp_device *psp = psp_master;
2822 	struct sev_device *sev;
2823 	u64 policy_bits;
2824 
2825 	if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
2826 		return 0;
2827 
2828 	if (!psp || !psp->sev_data)
2829 		return 0;
2830 
2831 	sev = psp->sev_data;
2832 
2833 	policy_bits = SNP_POLICY_MASK_BASE;
2834 
2835 	if (sev->snp_plat_status.feature_info) {
2836 		if (sev->snp_feat_info_0.ecx & SNP_RAPL_DISABLE_SUPPORTED)
2837 			policy_bits |= SNP_POLICY_MASK_RAPL_DIS;
2838 
2839 		if (sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED)
2840 			policy_bits |= SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM;
2841 
2842 		if (sev->snp_feat_info_0.ecx & SNP_AES_256_XTS_POLICY_SUPPORTED)
2843 			policy_bits |= SNP_POLICY_MASK_MEM_AES_256_XTS;
2844 
2845 		if (sev->snp_feat_info_0.ecx & SNP_CXL_ALLOW_POLICY_SUPPORTED)
2846 			policy_bits |= SNP_POLICY_MASK_CXL_ALLOW;
2847 
2848 		if (sev_version_greater_or_equal(1, 58))
2849 			policy_bits |= SNP_POLICY_MASK_PAGE_SWAP_DISABLE;
2850 	}
2851 
2852 	return policy_bits;
2853 }
2854 EXPORT_SYMBOL_GPL(sev_get_snp_policy_bits);
2855 
sev_dev_destroy(struct psp_device * psp)2856 void sev_dev_destroy(struct psp_device *psp)
2857 {
2858 	struct sev_device *sev = psp->sev_data;
2859 
2860 	if (!sev)
2861 		return;
2862 
2863 	sev_firmware_shutdown(sev);
2864 
2865 	if (sev->misc)
2866 		kref_put(&misc_dev->refcount, sev_exit);
2867 
2868 	psp_clear_sev_irq_handler(psp);
2869 }
2870 
snp_shutdown_on_panic(struct notifier_block * nb,unsigned long reason,void * arg)2871 static int snp_shutdown_on_panic(struct notifier_block *nb,
2872 				 unsigned long reason, void *arg)
2873 {
2874 	struct sev_device *sev = psp_master->sev_data;
2875 
2876 	/*
2877 	 * If sev_cmd_mutex is already acquired, then it's likely
2878 	 * another PSP command is in flight and issuing a shutdown
2879 	 * would fail in unexpected ways. Rather than create even
2880 	 * more confusion during a panic, just bail out here.
2881 	 */
2882 	if (mutex_is_locked(&sev_cmd_mutex))
2883 		return NOTIFY_DONE;
2884 
2885 	__sev_firmware_shutdown(sev, true);
2886 
2887 	return NOTIFY_DONE;
2888 }
2889 
sev_issue_cmd_external_user(struct file * filep,unsigned int cmd,void * data,int * error)2890 int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
2891 				void *data, int *error)
2892 {
2893 	if (!filep || filep->f_op != &sev_fops)
2894 		return -EBADF;
2895 
2896 	return sev_do_cmd(cmd, data, error);
2897 }
2898 EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
2899 
sev_pci_init(void)2900 void sev_pci_init(void)
2901 {
2902 	struct sev_device *sev = psp_master->sev_data;
2903 	u8 api_major, api_minor, build;
2904 
2905 	if (!sev)
2906 		return;
2907 
2908 	psp_timeout = psp_probe_timeout;
2909 
2910 	if (sev_get_api_version())
2911 		goto err;
2912 
2913 	api_major = sev->api_major;
2914 	api_minor = sev->api_minor;
2915 	build     = sev->build;
2916 
2917 	if (sev_update_firmware(sev->dev) == 0)
2918 		sev_get_api_version();
2919 
2920 	if (api_major != sev->api_major || api_minor != sev->api_minor ||
2921 	    build != sev->build)
2922 		dev_info(sev->dev, "SEV firmware updated from %d.%d.%d to %d.%d.%d\n",
2923 			 api_major, api_minor, build,
2924 			 sev->api_major, sev->api_minor, sev->build);
2925 
2926 	return;
2927 
2928 err:
2929 	sev_dev_destroy(psp_master);
2930 
2931 	psp_master->sev_data = NULL;
2932 }
2933 
sev_pci_exit(void)2934 void sev_pci_exit(void)
2935 {
2936 	struct sev_device *sev = psp_master->sev_data;
2937 
2938 	if (!sev)
2939 		return;
2940 
2941 	sev_firmware_shutdown(sev);
2942 }
2943