1 /* 2 * AMD Memory Encryption Support 3 * 4 * Copyright (C) 2016 Advanced Micro Devices, Inc. 5 * 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/linkage.h> 14 #include <linux/init.h> 15 #include <linux/mm.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/swiotlb.h> 18 19 #include <asm/tlbflush.h> 20 #include <asm/fixmap.h> 21 #include <asm/setup.h> 22 #include <asm/bootparam.h> 23 #include <asm/set_memory.h> 24 25 /* 26 * Since SME related variables are set early in the boot process they must 27 * reside in the .data section so as not to be zeroed out when the .bss 28 * section is later cleared. 29 */ 30 unsigned long sme_me_mask __section(.data) = 0; 31 EXPORT_SYMBOL_GPL(sme_me_mask); 32 33 /* Buffer used for early in-place encryption by BSP, no locking needed */ 34 static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); 35 36 /* 37 * This routine does not change the underlying encryption setting of the 38 * page(s) that map this memory. It assumes that eventually the memory is 39 * meant to be accessed as either encrypted or decrypted but the contents 40 * are currently not in the desired state. 41 * 42 * This routine follows the steps outlined in the AMD64 Architecture 43 * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place. 44 */ 45 static void __init __sme_early_enc_dec(resource_size_t paddr, 46 unsigned long size, bool enc) 47 { 48 void *src, *dst; 49 size_t len; 50 51 if (!sme_me_mask) 52 return; 53 54 local_flush_tlb(); 55 wbinvd(); 56 57 /* 58 * There are limited number of early mapping slots, so map (at most) 59 * one page at time. 60 */ 61 while (size) { 62 len = min_t(size_t, sizeof(sme_early_buffer), size); 63 64 /* 65 * Create mappings for the current and desired format of 66 * the memory. Use a write-protected mapping for the source. 67 */ 68 src = enc ? early_memremap_decrypted_wp(paddr, len) : 69 early_memremap_encrypted_wp(paddr, len); 70 71 dst = enc ? early_memremap_encrypted(paddr, len) : 72 early_memremap_decrypted(paddr, len); 73 74 /* 75 * If a mapping can't be obtained to perform the operation, 76 * then eventual access of that area in the desired mode 77 * will cause a crash. 78 */ 79 BUG_ON(!src || !dst); 80 81 /* 82 * Use a temporary buffer, of cache-line multiple size, to 83 * avoid data corruption as documented in the APM. 84 */ 85 memcpy(sme_early_buffer, src, len); 86 memcpy(dst, sme_early_buffer, len); 87 88 early_memunmap(dst, len); 89 early_memunmap(src, len); 90 91 paddr += len; 92 size -= len; 93 } 94 } 95 96 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) 97 { 98 __sme_early_enc_dec(paddr, size, true); 99 } 100 101 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size) 102 { 103 __sme_early_enc_dec(paddr, size, false); 104 } 105 106 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, 107 bool map) 108 { 109 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; 110 pmdval_t pmd_flags, pmd; 111 112 /* Use early_pmd_flags but remove the encryption mask */ 113 pmd_flags = __sme_clr(early_pmd_flags); 114 115 do { 116 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; 117 __early_make_pgtable((unsigned long)vaddr, pmd); 118 119 vaddr += PMD_SIZE; 120 paddr += PMD_SIZE; 121 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; 122 } while (size); 123 124 __native_flush_tlb(); 125 } 126 127 void __init sme_unmap_bootdata(char *real_mode_data) 128 { 129 struct boot_params *boot_data; 130 unsigned long cmdline_paddr; 131 132 if (!sme_active()) 133 return; 134 135 /* Get the command line address before unmapping the real_mode_data */ 136 boot_data = (struct boot_params *)real_mode_data; 137 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 138 139 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false); 140 141 if (!cmdline_paddr) 142 return; 143 144 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false); 145 } 146 147 void __init sme_map_bootdata(char *real_mode_data) 148 { 149 struct boot_params *boot_data; 150 unsigned long cmdline_paddr; 151 152 if (!sme_active()) 153 return; 154 155 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); 156 157 /* Get the command line address after mapping the real_mode_data */ 158 boot_data = (struct boot_params *)real_mode_data; 159 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 160 161 if (!cmdline_paddr) 162 return; 163 164 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); 165 } 166 167 void __init sme_early_init(void) 168 { 169 unsigned int i; 170 171 if (!sme_me_mask) 172 return; 173 174 early_pmd_flags = __sme_set(early_pmd_flags); 175 176 __supported_pte_mask = __sme_set(__supported_pte_mask); 177 178 /* Update the protection map with memory encryption mask */ 179 for (i = 0; i < ARRAY_SIZE(protection_map); i++) 180 protection_map[i] = pgprot_encrypted(protection_map[i]); 181 } 182 183 /* Architecture __weak replacement functions */ 184 void __init mem_encrypt_init(void) 185 { 186 if (!sme_me_mask) 187 return; 188 189 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ 190 swiotlb_update_mem_attributes(); 191 } 192 193 void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) 194 { 195 WARN(PAGE_ALIGN(size) != size, 196 "size is not page-aligned (%#lx)\n", size); 197 198 /* Make the SWIOTLB buffer area decrypted */ 199 set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); 200 } 201 202 void __init sme_encrypt_kernel(void) 203 { 204 } 205 206 void __init sme_enable(void) 207 { 208 } 209