xref: /linux/arch/arm64/kernel/rsi.c (revision 165160265e4be3a6639dd4ea5ca0953a858e2156)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 ARM Ltd.
4  */
5 
6 #include <linux/jump_label.h>
7 #include <linux/memblock.h>
8 #include <linux/psci.h>
9 #include <linux/swiotlb.h>
10 #include <linux/cc_platform.h>
11 #include <linux/platform_device.h>
12 
13 #include <asm/io.h>
14 #include <asm/mem_encrypt.h>
15 #include <asm/pgtable.h>
16 #include <asm/rsi.h>
17 
18 static struct realm_config config;
19 
20 unsigned long prot_ns_shared;
21 EXPORT_SYMBOL(prot_ns_shared);
22 
23 DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
24 EXPORT_SYMBOL(rsi_present);
25 
cc_platform_has(enum cc_attr attr)26 bool cc_platform_has(enum cc_attr attr)
27 {
28 	switch (attr) {
29 	case CC_ATTR_MEM_ENCRYPT:
30 		return is_realm_world();
31 	default:
32 		return false;
33 	}
34 }
35 EXPORT_SYMBOL_GPL(cc_platform_has);
36 
rsi_version_matches(void)37 static bool rsi_version_matches(void)
38 {
39 	unsigned long ver_lower, ver_higher;
40 	unsigned long ret = rsi_request_version(RSI_ABI_VERSION,
41 						&ver_lower,
42 						&ver_higher);
43 
44 	if (ret == SMCCC_RET_NOT_SUPPORTED)
45 		return false;
46 
47 	if (ret != RSI_SUCCESS) {
48 		pr_err("RME: RMM doesn't support RSI version %lu.%lu. Supported range: %lu.%lu-%lu.%lu\n",
49 		       RSI_ABI_VERSION_MAJOR, RSI_ABI_VERSION_MINOR,
50 		       RSI_ABI_VERSION_GET_MAJOR(ver_lower),
51 		       RSI_ABI_VERSION_GET_MINOR(ver_lower),
52 		       RSI_ABI_VERSION_GET_MAJOR(ver_higher),
53 		       RSI_ABI_VERSION_GET_MINOR(ver_higher));
54 		return false;
55 	}
56 
57 	pr_info("RME: Using RSI version %lu.%lu\n",
58 		RSI_ABI_VERSION_GET_MAJOR(ver_lower),
59 		RSI_ABI_VERSION_GET_MINOR(ver_lower));
60 
61 	return true;
62 }
63 
arm64_rsi_setup_memory(void)64 static void __init arm64_rsi_setup_memory(void)
65 {
66 	u64 i;
67 	phys_addr_t start, end;
68 
69 	/*
70 	 * Iterate over the available memory ranges and convert the state to
71 	 * protected memory. We should take extra care to ensure that we DO NOT
72 	 * permit any "DESTROYED" pages to be converted to "RAM".
73 	 *
74 	 * panic() is used because if the attempt to switch the memory to
75 	 * protected has failed here, then future accesses to the memory are
76 	 * simply going to be reflected as a SEA (Synchronous External Abort)
77 	 * which we can't handle.  Bailing out early prevents the guest limping
78 	 * on and dying later.
79 	 */
80 	for_each_mem_range(i, &start, &end) {
81 		if (rsi_set_memory_range_protected_safe(start, end)) {
82 			panic("Failed to set memory range to protected: %pa-%pa",
83 			      &start, &end);
84 		}
85 	}
86 }
87 
88 /*
89  * Check if a given PA range is Trusted (e.g., Protected memory, a Trusted Device
90  * mapping, or an MMIO emulated in the Realm world).
91  *
92  * We can rely on the RIPAS value of the region to detect if a given region is
93  * protected.
94  *
95  *  RIPAS_DEV - A trusted device memory or a trusted emulated MMIO (in the Realm
96  *		world
97  *  RIPAS_RAM - Memory (RAM), protected by the RMM guarantees. (e.g., Firmware
98  *		reserved regions for data sharing).
99  *
100  *  RIPAS_DESTROYED is a special case of one of the above, where the host did
101  *  something without our permission and as such we can't do anything about it.
102  *
103  * The only case where something is emulated by the untrusted hypervisor or is
104  * backed by shared memory is indicated by RSI_RIPAS_EMPTY.
105  */
arm64_rsi_is_protected(phys_addr_t base,size_t size)106 bool arm64_rsi_is_protected(phys_addr_t base, size_t size)
107 {
108 	enum ripas ripas;
109 	phys_addr_t end, top;
110 
111 	/* Overflow ? */
112 	if (WARN_ON(base + size <= base))
113 		return false;
114 
115 	end = ALIGN(base + size, RSI_GRANULE_SIZE);
116 	base = ALIGN_DOWN(base, RSI_GRANULE_SIZE);
117 
118 	while (base < end) {
119 		if (WARN_ON(rsi_ipa_state_get(base, end, &ripas, &top)))
120 			break;
121 		if (WARN_ON(top <= base))
122 			break;
123 		if (ripas == RSI_RIPAS_EMPTY)
124 			break;
125 		base = top;
126 	}
127 
128 	return base >= end;
129 }
130 EXPORT_SYMBOL(arm64_rsi_is_protected);
131 
realm_ioremap_hook(phys_addr_t phys,size_t size,pgprot_t * prot)132 static int realm_ioremap_hook(phys_addr_t phys, size_t size, pgprot_t *prot)
133 {
134 	if (arm64_rsi_is_protected(phys, size))
135 		*prot = pgprot_encrypted(*prot);
136 	else
137 		*prot = pgprot_decrypted(*prot);
138 
139 	return 0;
140 }
141 
arm64_rsi_init(void)142 void __init arm64_rsi_init(void)
143 {
144 	if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_SMC)
145 		return;
146 	if (!rsi_version_matches())
147 		return;
148 	if (WARN_ON(rsi_get_realm_config(&config)))
149 		return;
150 	prot_ns_shared = __phys_to_pte_val(BIT(config.ipa_bits - 1));
151 
152 	if (arm64_ioremap_prot_hook_register(realm_ioremap_hook))
153 		return;
154 
155 	if (realm_register_memory_enc_ops())
156 		return;
157 
158 	arm64_rsi_setup_memory();
159 
160 	static_branch_enable(&rsi_present);
161 }
162 
163 static struct platform_device rsi_dev = {
164 	.name = RSI_PDEV_NAME,
165 	.id = PLATFORM_DEVID_NONE
166 };
167 
arm64_create_dummy_rsi_dev(void)168 static int __init arm64_create_dummy_rsi_dev(void)
169 {
170 	if (is_realm_world() &&
171 	    platform_device_register(&rsi_dev))
172 		pr_err("failed to register rsi platform device\n");
173 	return 0;
174 }
175 
176 arch_initcall(arm64_create_dummy_rsi_dev)
177