xref: /linux/arch/x86/mm/mem_encrypt.c (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Memory Encryption Support Common Code
4  *
5  * Copyright (C) 2016 Advanced Micro Devices, Inc.
6  *
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  */
9 
10 #include <linux/dma-direct.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/swiotlb.h>
13 #include <linux/cc_platform.h>
14 #include <linux/mem_encrypt.h>
15 #include <linux/virtio_anchor.h>
16 
17 #include <asm/sev.h>
18 
19 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
20 bool force_dma_unencrypted(struct device *dev)
21 {
22 	/*
23 	 * For SEV, all DMA must be to unencrypted addresses.
24 	 */
25 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
26 		return true;
27 
28 	/*
29 	 * For SME, all DMA must be to unencrypted addresses if the
30 	 * device does not support DMA to addresses that include the
31 	 * encryption mask.
32 	 */
33 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
34 		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
35 		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
36 						dev->bus_dma_limit);
37 
38 		if (dma_dev_mask <= dma_enc_mask)
39 			return true;
40 	}
41 
42 	return false;
43 }
44 
45 static void print_mem_encrypt_feature_info(void)
46 {
47 	pr_info("Memory Encryption Features active: ");
48 
49 	switch (cc_vendor) {
50 	case CC_VENDOR_INTEL:
51 		pr_cont("Intel TDX\n");
52 		break;
53 	case CC_VENDOR_AMD:
54 		pr_cont("AMD");
55 
56 		/* Secure Memory Encryption */
57 		if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
58 		/*
59 		 * SME is mutually exclusive with any of the SEV
60 		 * features below.
61 		*/
62 			pr_cont(" SME\n");
63 			return;
64 		}
65 
66 		/* Secure Encrypted Virtualization */
67 		if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
68 			pr_cont(" SEV");
69 
70 		/* Encrypted Register State */
71 		if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
72 			pr_cont(" SEV-ES");
73 
74 		/* Secure Nested Paging */
75 		if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
76 			pr_cont(" SEV-SNP");
77 
78 		pr_cont("\n");
79 
80 		sev_show_status();
81 
82 		break;
83 	default:
84 		pr_cont("Unknown\n");
85 	}
86 }
87 
88 /* Architecture __weak replacement functions */
89 void __init mem_encrypt_init(void)
90 {
91 	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
92 		return;
93 
94 	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
95 	swiotlb_update_mem_attributes();
96 
97 	snp_secure_tsc_prepare();
98 
99 	print_mem_encrypt_feature_info();
100 }
101 
102 void __init mem_encrypt_setup_arch(void)
103 {
104 	phys_addr_t total_mem = memblock_phys_mem_size();
105 	unsigned long size;
106 
107 	/*
108 	 * Do RMP table fixups after the e820 tables have been setup by
109 	 * e820__memory_setup().
110 	 */
111 	if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
112 		snp_fixup_e820_tables();
113 
114 	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
115 		return;
116 
117 	/*
118 	 * For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
119 	 * Kernel uses SWIOTLB to make this happen without changing device
120 	 * drivers. However, depending on the workload being run, the
121 	 * default 64MB of SWIOTLB may not be enough and SWIOTLB may
122 	 * run out of buffers for DMA, resulting in I/O errors and/or
123 	 * performance degradation especially with high I/O workloads.
124 	 *
125 	 * Adjust the default size of SWIOTLB using a percentage of guest
126 	 * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
127 	 * memory is allocated from low memory, ensure that the adjusted size
128 	 * is within the limits of low available memory.
129 	 *
130 	 * The percentage of guest memory used here for SWIOTLB buffers
131 	 * is more of an approximation of the static adjustment which
132 	 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
133 	 */
134 	size = total_mem * 6 / 100;
135 	size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
136 	swiotlb_adjust_size(size);
137 
138 	/* Set restricted memory access for virtio. */
139 	virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
140 }
141