xref: /linux/arch/x86/mm/mem_encrypt.c (revision b8e85e6f3a09fc56b0ff574887798962ef8a8f80)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Memory Encryption Support Common Code
4  *
5  * Copyright (C) 2016 Advanced Micro Devices, Inc.
6  *
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  */
9 
10 #include <linux/dma-direct.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/swiotlb.h>
13 #include <linux/cc_platform.h>
14 #include <linux/mem_encrypt.h>
15 #include <linux/virtio_anchor.h>
16 
17 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
18 bool force_dma_unencrypted(struct device *dev)
19 {
20 	/*
21 	 * For SEV, all DMA must be to unencrypted addresses.
22 	 */
23 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
24 		return true;
25 
26 	/*
27 	 * For SME, all DMA must be to unencrypted addresses if the
28 	 * device does not support DMA to addresses that include the
29 	 * encryption mask.
30 	 */
31 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
32 		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
33 		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
34 						dev->bus_dma_limit);
35 
36 		if (dma_dev_mask <= dma_enc_mask)
37 			return true;
38 	}
39 
40 	return false;
41 }
42 
43 static void print_mem_encrypt_feature_info(void)
44 {
45 	pr_info("Memory Encryption Features active:");
46 
47 	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
48 		pr_cont(" Intel TDX\n");
49 		return;
50 	}
51 
52 	pr_cont(" AMD");
53 
54 	/* Secure Memory Encryption */
55 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
56 		/*
57 		 * SME is mutually exclusive with any of the SEV
58 		 * features below.
59 		 */
60 		pr_cont(" SME\n");
61 		return;
62 	}
63 
64 	/* Secure Encrypted Virtualization */
65 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
66 		pr_cont(" SEV");
67 
68 	/* Encrypted Register State */
69 	if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
70 		pr_cont(" SEV-ES");
71 
72 	/* Secure Nested Paging */
73 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
74 		pr_cont(" SEV-SNP");
75 
76 	pr_cont("\n");
77 }
78 
79 /* Architecture __weak replacement functions */
80 void __init mem_encrypt_init(void)
81 {
82 	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
83 		return;
84 
85 	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
86 	swiotlb_update_mem_attributes();
87 
88 	print_mem_encrypt_feature_info();
89 }
90 
91 void __init mem_encrypt_setup_arch(void)
92 {
93 	phys_addr_t total_mem = memblock_phys_mem_size();
94 	unsigned long size;
95 
96 	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
97 		return;
98 
99 	/*
100 	 * For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
101 	 * Kernel uses SWIOTLB to make this happen without changing device
102 	 * drivers. However, depending on the workload being run, the
103 	 * default 64MB of SWIOTLB may not be enough and SWIOTLB may
104 	 * run out of buffers for DMA, resulting in I/O errors and/or
105 	 * performance degradation especially with high I/O workloads.
106 	 *
107 	 * Adjust the default size of SWIOTLB using a percentage of guest
108 	 * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
109 	 * memory is allocated from low memory, ensure that the adjusted size
110 	 * is within the limits of low available memory.
111 	 *
112 	 * The percentage of guest memory used here for SWIOTLB buffers
113 	 * is more of an approximation of the static adjustment which
114 	 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
115 	 */
116 	size = total_mem * 6 / 100;
117 	size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
118 	swiotlb_adjust_size(size);
119 
120 	/* Set restricted memory access for virtio. */
121 	virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
122 }
123