xref: /linux/arch/arm64/kvm/hyp/nvhe/psci-relay.c (revision 6607aa6f6b68fc9b5955755f1b1be125cf2a9d03)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: David Brazdil <dbrazdil@google.com>
5  */
6 
7 #include <asm/kvm_asm.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <uapi/linux/psci.h>
13 
14 #include <nvhe/trap_handler.h>
15 
16 void kvm_hyp_cpu_entry(unsigned long r0);
17 void kvm_hyp_cpu_resume(unsigned long r0);
18 
19 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
20 
21 /* Config options set by the host. */
22 struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
23 s64 __ro_after_init hyp_physvirt_offset;
24 
25 #define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
26 
27 #define INVALID_CPU_ID	UINT_MAX
28 
29 struct psci_boot_args {
30 	atomic_t lock;
31 	unsigned long pc;
32 	unsigned long r0;
33 };
34 
35 #define PSCI_BOOT_ARGS_UNLOCKED		0
36 #define PSCI_BOOT_ARGS_LOCKED		1
37 
38 #define PSCI_BOOT_ARGS_INIT					\
39 	((struct psci_boot_args){				\
40 		.lock = ATOMIC_INIT(PSCI_BOOT_ARGS_UNLOCKED),	\
41 	})
42 
43 static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
44 static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
45 
46 #define	is_psci_0_1(what, func_id)					\
47 	(kvm_host_psci_config.psci_0_1_ ## what ## _implemented &&	\
48 	 (func_id) == kvm_host_psci_config.function_ids_0_1.what)
49 
50 static bool is_psci_0_1_call(u64 func_id)
51 {
52 	return (is_psci_0_1(cpu_suspend, func_id) ||
53 		is_psci_0_1(cpu_on, func_id) ||
54 		is_psci_0_1(cpu_off, func_id) ||
55 		is_psci_0_1(migrate, func_id));
56 }
57 
58 static bool is_psci_0_2_call(u64 func_id)
59 {
60 	/* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */
61 	return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
62 	       (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
63 }
64 
65 static unsigned long psci_call(unsigned long fn, unsigned long arg0,
66 			       unsigned long arg1, unsigned long arg2)
67 {
68 	struct arm_smccc_res res;
69 
70 	arm_smccc_1_1_smc(fn, arg0, arg1, arg2, &res);
71 	return res.a0;
72 }
73 
74 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
75 {
76 	return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1),
77 			 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
78 }
79 
80 static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
81 {
82 	psci_forward(host_ctxt);
83 	hyp_panic(); /* unreachable */
84 }
85 
86 static unsigned int find_cpu_id(u64 mpidr)
87 {
88 	unsigned int i;
89 
90 	/* Reject invalid MPIDRs */
91 	if (mpidr & ~MPIDR_HWID_BITMASK)
92 		return INVALID_CPU_ID;
93 
94 	for (i = 0; i < NR_CPUS; i++) {
95 		if (cpu_logical_map(i) == mpidr)
96 			return i;
97 	}
98 
99 	return INVALID_CPU_ID;
100 }
101 
102 static __always_inline bool try_acquire_boot_args(struct psci_boot_args *args)
103 {
104 	return atomic_cmpxchg_acquire(&args->lock,
105 				      PSCI_BOOT_ARGS_UNLOCKED,
106 				      PSCI_BOOT_ARGS_LOCKED) ==
107 		PSCI_BOOT_ARGS_UNLOCKED;
108 }
109 
110 static __always_inline void release_boot_args(struct psci_boot_args *args)
111 {
112 	atomic_set_release(&args->lock, PSCI_BOOT_ARGS_UNLOCKED);
113 }
114 
115 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
116 {
117 	DECLARE_REG(u64, mpidr, host_ctxt, 1);
118 	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
119 	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
120 
121 	unsigned int cpu_id;
122 	struct psci_boot_args *boot_args;
123 	struct kvm_nvhe_init_params *init_params;
124 	int ret;
125 
126 	/*
127 	 * Find the logical CPU ID for the given MPIDR. The search set is
128 	 * the set of CPUs that were online at the point of KVM initialization.
129 	 * Booting other CPUs is rejected because their cpufeatures were not
130 	 * checked against the finalized capabilities. This could be relaxed
131 	 * by doing the feature checks in hyp.
132 	 */
133 	cpu_id = find_cpu_id(mpidr);
134 	if (cpu_id == INVALID_CPU_ID)
135 		return PSCI_RET_INVALID_PARAMS;
136 
137 	boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id);
138 	init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id);
139 
140 	/* Check if the target CPU is already being booted. */
141 	if (!try_acquire_boot_args(boot_args))
142 		return PSCI_RET_ALREADY_ON;
143 
144 	boot_args->pc = pc;
145 	boot_args->r0 = r0;
146 	wmb();
147 
148 	ret = psci_call(func_id, mpidr,
149 			__hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)),
150 			__hyp_pa(init_params));
151 
152 	/* If successful, the lock will be released by the target CPU. */
153 	if (ret != PSCI_RET_SUCCESS)
154 		release_boot_args(boot_args);
155 
156 	return ret;
157 }
158 
159 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
160 {
161 	DECLARE_REG(u64, power_state, host_ctxt, 1);
162 	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
163 	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
164 
165 	struct psci_boot_args *boot_args;
166 	struct kvm_nvhe_init_params *init_params;
167 
168 	boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
169 	init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
170 
171 	/*
172 	 * No need to acquire a lock before writing to boot_args because a core
173 	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
174 	 */
175 	boot_args->pc = pc;
176 	boot_args->r0 = r0;
177 
178 	/*
179 	 * Will either return if shallow sleep state, or wake up into the entry
180 	 * point if it is a deep sleep state.
181 	 */
182 	return psci_call(func_id, power_state,
183 			 __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
184 			 __hyp_pa(init_params));
185 }
186 
187 static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
188 {
189 	DECLARE_REG(unsigned long, pc, host_ctxt, 1);
190 	DECLARE_REG(unsigned long, r0, host_ctxt, 2);
191 
192 	struct psci_boot_args *boot_args;
193 	struct kvm_nvhe_init_params *init_params;
194 
195 	boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
196 	init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
197 
198 	/*
199 	 * No need to acquire a lock before writing to boot_args because a core
200 	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
201 	 */
202 	boot_args->pc = pc;
203 	boot_args->r0 = r0;
204 
205 	/* Will only return on error. */
206 	return psci_call(func_id,
207 			 __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
208 			 __hyp_pa(init_params), 0);
209 }
210 
211 asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
212 {
213 	struct psci_boot_args *boot_args;
214 	struct kvm_cpu_context *host_ctxt;
215 
216 	host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt;
217 
218 	if (is_cpu_on)
219 		boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args));
220 	else
221 		boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
222 
223 	cpu_reg(host_ctxt, 0) = boot_args->r0;
224 	write_sysreg_el2(boot_args->pc, SYS_ELR);
225 
226 	if (is_cpu_on)
227 		release_boot_args(boot_args);
228 
229 	__host_enter(host_ctxt);
230 }
231 
232 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
233 {
234 	if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
235 		return psci_forward(host_ctxt);
236 	if (is_psci_0_1(cpu_on, func_id))
237 		return psci_cpu_on(func_id, host_ctxt);
238 	if (is_psci_0_1(cpu_suspend, func_id))
239 		return psci_cpu_suspend(func_id, host_ctxt);
240 
241 	return PSCI_RET_NOT_SUPPORTED;
242 }
243 
244 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
245 {
246 	switch (func_id) {
247 	case PSCI_0_2_FN_PSCI_VERSION:
248 	case PSCI_0_2_FN_CPU_OFF:
249 	case PSCI_0_2_FN64_AFFINITY_INFO:
250 	case PSCI_0_2_FN64_MIGRATE:
251 	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
252 	case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
253 		return psci_forward(host_ctxt);
254 	case PSCI_0_2_FN_SYSTEM_OFF:
255 	case PSCI_0_2_FN_SYSTEM_RESET:
256 		psci_forward_noreturn(host_ctxt);
257 		unreachable();
258 	case PSCI_0_2_FN64_CPU_SUSPEND:
259 		return psci_cpu_suspend(func_id, host_ctxt);
260 	case PSCI_0_2_FN64_CPU_ON:
261 		return psci_cpu_on(func_id, host_ctxt);
262 	default:
263 		return PSCI_RET_NOT_SUPPORTED;
264 	}
265 }
266 
267 static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
268 {
269 	switch (func_id) {
270 	case PSCI_1_0_FN_PSCI_FEATURES:
271 	case PSCI_1_0_FN_SET_SUSPEND_MODE:
272 	case PSCI_1_1_FN64_SYSTEM_RESET2:
273 		return psci_forward(host_ctxt);
274 	case PSCI_1_0_FN64_SYSTEM_SUSPEND:
275 		return psci_system_suspend(func_id, host_ctxt);
276 	default:
277 		return psci_0_2_handler(func_id, host_ctxt);
278 	}
279 }
280 
281 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
282 {
283 	DECLARE_REG(u64, func_id, host_ctxt, 0);
284 	unsigned long ret;
285 
286 	switch (kvm_host_psci_config.version) {
287 	case PSCI_VERSION(0, 1):
288 		if (!is_psci_0_1_call(func_id))
289 			return false;
290 		ret = psci_0_1_handler(func_id, host_ctxt);
291 		break;
292 	case PSCI_VERSION(0, 2):
293 		if (!is_psci_0_2_call(func_id))
294 			return false;
295 		ret = psci_0_2_handler(func_id, host_ctxt);
296 		break;
297 	default:
298 		if (!is_psci_0_2_call(func_id))
299 			return false;
300 		ret = psci_1_0_handler(func_id, host_ctxt);
301 		break;
302 	}
303 
304 	cpu_reg(host_ctxt, 0) = ret;
305 	cpu_reg(host_ctxt, 1) = 0;
306 	cpu_reg(host_ctxt, 2) = 0;
307 	cpu_reg(host_ctxt, 3) = 0;
308 	return true;
309 }
310