xref: /linux/arch/arm64/kvm/hyp/nvhe/ffa.c (revision 4ea7c1717f3f2344f7a1cdab4f5875cfa89c87a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4  * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5  * Framework for Arm A-profile", which is specified by Arm in document
6  * number DEN0077.
7  *
8  * Copyright (C) 2022 - Google LLC
9  * Author: Andrew Walbran <qwandor@google.com>
10  *
11  * This driver hooks into the SMC trapping logic for the host and intercepts
12  * all calls falling within the FF-A range. Each call is either:
13  *
14  *	- Forwarded on unmodified to the SPMD at EL3
15  *	- Rejected as "unsupported"
16  *	- Accompanied by a host stage-2 page-table check/update and reissued
17  *
18  * Consequently, any attempts by the host to make guest memory pages
19  * accessible to the secure world using FF-A will be detected either here
20  * (in the case that the memory is already owned by the guest) or during
21  * donation to the guest (in the case that the memory was previously shared
22  * with the secure world).
23  *
24  * To allow the rolling-back of page-table updates and FF-A calls in the
25  * event of failure, operations involving the RXTX buffers are locked for
26  * the duration and are therefore serialised.
27  */
28 
29 #include <linux/arm-smccc.h>
30 #include <linux/arm_ffa.h>
31 #include <asm/kvm_pkvm.h>
32 
33 #include <nvhe/ffa.h>
34 #include <nvhe/mem_protect.h>
35 #include <nvhe/memory.h>
36 #include <nvhe/trap_handler.h>
37 #include <nvhe/spinlock.h>
38 
39 /*
40  * "ID value 0 must be returned at the Non-secure physical FF-A instance"
41  * We share this ID with the host.
42  */
43 #define HOST_FFA_ID	0
44 
45 /*
46  * A buffer to hold the maximum descriptor size we can see from the host,
47  * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
48  * when resolving the handle on the reclaim path.
49  */
50 struct kvm_ffa_descriptor_buffer {
51 	void	*buf;
52 	size_t	len;
53 };
54 
55 static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
56 
57 struct kvm_ffa_buffers {
58 	hyp_spinlock_t lock;
59 	void *tx;
60 	void *rx;
61 };
62 
63 /*
64  * Note that we don't currently lock these buffers explicitly, instead
65  * relying on the locking of the host FFA buffers as we only have one
66  * client.
67  */
68 static struct kvm_ffa_buffers hyp_buffers;
69 static struct kvm_ffa_buffers host_buffers;
70 static u32 hyp_ffa_version;
71 static bool has_version_negotiated;
72 static hyp_spinlock_t version_lock;
73 
ffa_to_smccc_error(struct arm_smccc_1_2_regs * res,u64 ffa_errno)74 static void ffa_to_smccc_error(struct arm_smccc_1_2_regs *res, u64 ffa_errno)
75 {
76 	*res = (struct arm_smccc_1_2_regs) {
77 		.a0	= FFA_ERROR,
78 		.a2	= ffa_errno,
79 	};
80 }
81 
ffa_to_smccc_res_prop(struct arm_smccc_1_2_regs * res,int ret,u64 prop)82 static void ffa_to_smccc_res_prop(struct arm_smccc_1_2_regs *res, int ret, u64 prop)
83 {
84 	if (ret == FFA_RET_SUCCESS) {
85 		*res = (struct arm_smccc_1_2_regs) { .a0 = FFA_SUCCESS,
86 						      .a2 = prop };
87 	} else {
88 		ffa_to_smccc_error(res, ret);
89 	}
90 }
91 
ffa_to_smccc_res(struct arm_smccc_1_2_regs * res,int ret)92 static void ffa_to_smccc_res(struct arm_smccc_1_2_regs *res, int ret)
93 {
94 	ffa_to_smccc_res_prop(res, ret, 0);
95 }
96 
ffa_set_retval(struct kvm_cpu_context * ctxt,struct arm_smccc_1_2_regs * res)97 static void ffa_set_retval(struct kvm_cpu_context *ctxt,
98 			   struct arm_smccc_1_2_regs *res)
99 {
100 	cpu_reg(ctxt, 0) = res->a0;
101 	cpu_reg(ctxt, 1) = res->a1;
102 	cpu_reg(ctxt, 2) = res->a2;
103 	cpu_reg(ctxt, 3) = res->a3;
104 	cpu_reg(ctxt, 4) = res->a4;
105 	cpu_reg(ctxt, 5) = res->a5;
106 	cpu_reg(ctxt, 6) = res->a6;
107 	cpu_reg(ctxt, 7) = res->a7;
108 
109 	/*
110 	 * DEN0028C 2.6: SMC32/HVC32 call from aarch64 must preserve x8-x30.
111 	 *
112 	 * In FF-A 1.2, we cannot rely on the function ID sent by the caller to
113 	 * detect 32-bit calls because the CPU cycle management interfaces (e.g.
114 	 * FFA_MSG_WAIT, FFA_RUN) are 32-bit only but can have 64-bit responses.
115 	 *
116 	 * FFA-1.3 introduces 64-bit variants of the CPU cycle management
117 	 * interfaces. Moreover, FF-A 1.3 clarifies that SMC32 direct requests
118 	 * complete with SMC32 direct reponses which *should* allow us use the
119 	 * function ID sent by the caller to determine whether to return x8-x17.
120 	 *
121 	 * Note that we also cannot rely on function IDs in the response.
122 	 *
123 	 * Given the above, assume SMC64 and send back x0-x17 unconditionally
124 	 * as the passthrough code (__kvm_hyp_host_forward_smc) does the same.
125 	 */
126 	cpu_reg(ctxt, 8) = res->a8;
127 	cpu_reg(ctxt, 9) = res->a9;
128 	cpu_reg(ctxt, 10) = res->a10;
129 	cpu_reg(ctxt, 11) = res->a11;
130 	cpu_reg(ctxt, 12) = res->a12;
131 	cpu_reg(ctxt, 13) = res->a13;
132 	cpu_reg(ctxt, 14) = res->a14;
133 	cpu_reg(ctxt, 15) = res->a15;
134 	cpu_reg(ctxt, 16) = res->a16;
135 	cpu_reg(ctxt, 17) = res->a17;
136 }
137 
is_ffa_call(u64 func_id)138 static bool is_ffa_call(u64 func_id)
139 {
140 	return ARM_SMCCC_IS_FAST_CALL(func_id) &&
141 	       ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
142 	       ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
143 	       ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
144 }
145 
ffa_map_hyp_buffers(u64 ffa_page_count)146 static int ffa_map_hyp_buffers(u64 ffa_page_count)
147 {
148 	struct arm_smccc_1_2_regs res;
149 
150 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
151 		.a0 = FFA_FN64_RXTX_MAP,
152 		.a1 = hyp_virt_to_phys(hyp_buffers.tx),
153 		.a2 = hyp_virt_to_phys(hyp_buffers.rx),
154 		.a3 = ffa_page_count,
155 	}, &res);
156 
157 	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
158 }
159 
ffa_unmap_hyp_buffers(void)160 static int ffa_unmap_hyp_buffers(void)
161 {
162 	struct arm_smccc_1_2_regs res;
163 
164 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
165 		.a0 = FFA_RXTX_UNMAP,
166 		.a1 = HOST_FFA_ID,
167 	}, &res);
168 
169 	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
170 }
171 
ffa_mem_frag_tx(struct arm_smccc_1_2_regs * res,u32 handle_lo,u32 handle_hi,u32 fraglen,u32 endpoint_id)172 static void ffa_mem_frag_tx(struct arm_smccc_1_2_regs *res, u32 handle_lo,
173 			     u32 handle_hi, u32 fraglen, u32 endpoint_id)
174 {
175 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
176 		.a0 = FFA_MEM_FRAG_TX,
177 		.a1 = handle_lo,
178 		.a2 = handle_hi,
179 		.a3 = fraglen,
180 		.a4 = endpoint_id,
181 	}, res);
182 }
183 
ffa_mem_frag_rx(struct arm_smccc_1_2_regs * res,u32 handle_lo,u32 handle_hi,u32 fragoff)184 static void ffa_mem_frag_rx(struct arm_smccc_1_2_regs *res, u32 handle_lo,
185 			     u32 handle_hi, u32 fragoff)
186 {
187 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
188 		.a0 = FFA_MEM_FRAG_RX,
189 		.a1 = handle_lo,
190 		.a2 = handle_hi,
191 		.a3 = fragoff,
192 		.a4 = HOST_FFA_ID,
193 	}, res);
194 }
195 
ffa_mem_xfer(struct arm_smccc_1_2_regs * res,u64 func_id,u32 len,u32 fraglen)196 static void ffa_mem_xfer(struct arm_smccc_1_2_regs *res, u64 func_id, u32 len,
197 			  u32 fraglen)
198 {
199 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
200 		.a0 = func_id,
201 		.a1 = len,
202 		.a2 = fraglen,
203 	}, res);
204 }
205 
ffa_mem_reclaim(struct arm_smccc_1_2_regs * res,u32 handle_lo,u32 handle_hi,u32 flags)206 static void ffa_mem_reclaim(struct arm_smccc_1_2_regs *res, u32 handle_lo,
207 			     u32 handle_hi, u32 flags)
208 {
209 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
210 		.a0 = FFA_MEM_RECLAIM,
211 		.a1 = handle_lo,
212 		.a2 = handle_hi,
213 		.a3 = flags,
214 	}, res);
215 }
216 
ffa_retrieve_req(struct arm_smccc_1_2_regs * res,u32 len)217 static void ffa_retrieve_req(struct arm_smccc_1_2_regs *res, u32 len)
218 {
219 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
220 		.a0 = FFA_FN64_MEM_RETRIEVE_REQ,
221 		.a1 = len,
222 		.a2 = len,
223 	}, res);
224 }
225 
ffa_rx_release(struct arm_smccc_1_2_regs * res)226 static void ffa_rx_release(struct arm_smccc_1_2_regs *res)
227 {
228 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
229 		.a0 = FFA_RX_RELEASE,
230 	}, res);
231 }
232 
do_ffa_rxtx_map(struct arm_smccc_1_2_regs * res,struct kvm_cpu_context * ctxt)233 static void do_ffa_rxtx_map(struct arm_smccc_1_2_regs *res,
234 			    struct kvm_cpu_context *ctxt)
235 {
236 	DECLARE_REG(phys_addr_t, tx, ctxt, 1);
237 	DECLARE_REG(phys_addr_t, rx, ctxt, 2);
238 	DECLARE_REG(u32, npages, ctxt, 3);
239 	int ret = 0;
240 	void *rx_virt, *tx_virt;
241 
242 	if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
243 		ret = FFA_RET_INVALID_PARAMETERS;
244 		goto out;
245 	}
246 
247 	if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
248 		ret = FFA_RET_INVALID_PARAMETERS;
249 		goto out;
250 	}
251 
252 	hyp_spin_lock(&host_buffers.lock);
253 	if (host_buffers.tx) {
254 		ret = FFA_RET_DENIED;
255 		goto out_unlock;
256 	}
257 
258 	/*
259 	 * Map our hypervisor buffers into the SPMD before mapping and
260 	 * pinning the host buffers in our own address space.
261 	 */
262 	ret = ffa_map_hyp_buffers(npages);
263 	if (ret)
264 		goto out_unlock;
265 
266 	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
267 	if (ret) {
268 		ret = FFA_RET_INVALID_PARAMETERS;
269 		goto err_unmap;
270 	}
271 
272 	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
273 	if (ret) {
274 		ret = FFA_RET_INVALID_PARAMETERS;
275 		goto err_unshare_tx;
276 	}
277 
278 	tx_virt = hyp_phys_to_virt(tx);
279 	ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
280 	if (ret) {
281 		ret = FFA_RET_INVALID_PARAMETERS;
282 		goto err_unshare_rx;
283 	}
284 
285 	rx_virt = hyp_phys_to_virt(rx);
286 	ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
287 	if (ret) {
288 		ret = FFA_RET_INVALID_PARAMETERS;
289 		goto err_unpin_tx;
290 	}
291 
292 	host_buffers.tx = tx_virt;
293 	host_buffers.rx = rx_virt;
294 
295 out_unlock:
296 	hyp_spin_unlock(&host_buffers.lock);
297 out:
298 	ffa_to_smccc_res(res, ret);
299 	return;
300 
301 err_unpin_tx:
302 	hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
303 err_unshare_rx:
304 	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
305 err_unshare_tx:
306 	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
307 err_unmap:
308 	ffa_unmap_hyp_buffers();
309 	goto out_unlock;
310 }
311 
do_ffa_rxtx_unmap(struct arm_smccc_1_2_regs * res,struct kvm_cpu_context * ctxt)312 static void do_ffa_rxtx_unmap(struct arm_smccc_1_2_regs *res,
313 			      struct kvm_cpu_context *ctxt)
314 {
315 	DECLARE_REG(u32, id, ctxt, 1);
316 	int ret = 0;
317 
318 	if (id != HOST_FFA_ID) {
319 		ret = FFA_RET_INVALID_PARAMETERS;
320 		goto out;
321 	}
322 
323 	hyp_spin_lock(&host_buffers.lock);
324 	if (!host_buffers.tx) {
325 		ret = FFA_RET_INVALID_PARAMETERS;
326 		goto out_unlock;
327 	}
328 
329 	hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
330 	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
331 	host_buffers.tx = NULL;
332 
333 	hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
334 	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
335 	host_buffers.rx = NULL;
336 
337 	ffa_unmap_hyp_buffers();
338 
339 out_unlock:
340 	hyp_spin_unlock(&host_buffers.lock);
341 out:
342 	ffa_to_smccc_res(res, ret);
343 }
344 
__ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)345 static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
346 				   u32 nranges)
347 {
348 	u32 i;
349 
350 	for (i = 0; i < nranges; ++i) {
351 		struct ffa_mem_region_addr_range *range = &ranges[i];
352 		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
353 		u64 pfn = hyp_phys_to_pfn(range->address);
354 
355 		if (!PAGE_ALIGNED(sz))
356 			break;
357 
358 		if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
359 			break;
360 	}
361 
362 	return i;
363 }
364 
__ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)365 static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
366 				     u32 nranges)
367 {
368 	u32 i;
369 
370 	for (i = 0; i < nranges; ++i) {
371 		struct ffa_mem_region_addr_range *range = &ranges[i];
372 		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
373 		u64 pfn = hyp_phys_to_pfn(range->address);
374 
375 		if (!PAGE_ALIGNED(sz))
376 			break;
377 
378 		if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
379 			break;
380 	}
381 
382 	return i;
383 }
384 
ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)385 static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
386 				 u32 nranges)
387 {
388 	u32 nshared = __ffa_host_share_ranges(ranges, nranges);
389 	int ret = 0;
390 
391 	if (nshared != nranges) {
392 		WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
393 		ret = FFA_RET_DENIED;
394 	}
395 
396 	return ret;
397 }
398 
ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)399 static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
400 				   u32 nranges)
401 {
402 	u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
403 	int ret = 0;
404 
405 	if (nunshared != nranges) {
406 		WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
407 		ret = FFA_RET_DENIED;
408 	}
409 
410 	return ret;
411 }
412 
do_ffa_mem_frag_tx(struct arm_smccc_1_2_regs * res,struct kvm_cpu_context * ctxt)413 static void do_ffa_mem_frag_tx(struct arm_smccc_1_2_regs *res,
414 			       struct kvm_cpu_context *ctxt)
415 {
416 	DECLARE_REG(u32, handle_lo, ctxt, 1);
417 	DECLARE_REG(u32, handle_hi, ctxt, 2);
418 	DECLARE_REG(u32, fraglen, ctxt, 3);
419 	DECLARE_REG(u32, endpoint_id, ctxt, 4);
420 	struct ffa_mem_region_addr_range *buf;
421 	int ret = FFA_RET_INVALID_PARAMETERS;
422 	u32 nr_ranges;
423 
424 	if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
425 		goto out;
426 
427 	if (fraglen % sizeof(*buf))
428 		goto out;
429 
430 	hyp_spin_lock(&host_buffers.lock);
431 	if (!host_buffers.tx)
432 		goto out_unlock;
433 
434 	buf = hyp_buffers.tx;
435 	memcpy(buf, host_buffers.tx, fraglen);
436 	nr_ranges = fraglen / sizeof(*buf);
437 
438 	ret = ffa_host_share_ranges(buf, nr_ranges);
439 	if (ret) {
440 		/*
441 		 * We're effectively aborting the transaction, so we need
442 		 * to restore the global state back to what it was prior to
443 		 * transmission of the first fragment.
444 		 */
445 		ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
446 		WARN_ON(res->a0 != FFA_SUCCESS);
447 		goto out_unlock;
448 	}
449 
450 	ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
451 	if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
452 		WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
453 
454 out_unlock:
455 	hyp_spin_unlock(&host_buffers.lock);
456 out:
457 	if (ret)
458 		ffa_to_smccc_res(res, ret);
459 
460 	/*
461 	 * If for any reason this did not succeed, we're in trouble as we have
462 	 * now lost the content of the previous fragments and we can't rollback
463 	 * the host stage-2 changes. The pages previously marked as shared will
464 	 * remain stuck in that state forever, hence preventing the host from
465 	 * sharing/donating them again and may possibly lead to subsequent
466 	 * failures, but this will not compromise confidentiality.
467 	 */
468 	return;
469 }
470 
__do_ffa_mem_xfer(const u64 func_id,struct arm_smccc_1_2_regs * res,struct kvm_cpu_context * ctxt)471 static void __do_ffa_mem_xfer(const u64 func_id,
472 			      struct arm_smccc_1_2_regs *res,
473 			      struct kvm_cpu_context *ctxt)
474 {
475 	DECLARE_REG(u32, len, ctxt, 1);
476 	DECLARE_REG(u32, fraglen, ctxt, 2);
477 	DECLARE_REG(u64, addr_mbz, ctxt, 3);
478 	DECLARE_REG(u32, npages_mbz, ctxt, 4);
479 	struct ffa_mem_region_attributes *ep_mem_access;
480 	struct ffa_composite_mem_region *reg;
481 	struct ffa_mem_region *buf;
482 	u32 offset, nr_ranges, checked_offset;
483 	int ret = 0;
484 
485 	if (addr_mbz || npages_mbz || fraglen > len ||
486 	    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
487 		ret = FFA_RET_INVALID_PARAMETERS;
488 		goto out;
489 	}
490 
491 	if (fraglen < sizeof(struct ffa_mem_region) +
492 		      sizeof(struct ffa_mem_region_attributes)) {
493 		ret = FFA_RET_INVALID_PARAMETERS;
494 		goto out;
495 	}
496 
497 	hyp_spin_lock(&host_buffers.lock);
498 	if (!host_buffers.tx) {
499 		ret = FFA_RET_INVALID_PARAMETERS;
500 		goto out_unlock;
501 	}
502 
503 	if (len > ffa_desc_buf.len) {
504 		ret = FFA_RET_NO_MEMORY;
505 		goto out_unlock;
506 	}
507 
508 	buf = hyp_buffers.tx;
509 	memcpy(buf, host_buffers.tx, fraglen);
510 
511 	ep_mem_access = (void *)buf +
512 			ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
513 	offset = ep_mem_access->composite_off;
514 	if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
515 		ret = FFA_RET_INVALID_PARAMETERS;
516 		goto out_unlock;
517 	}
518 
519 	if (check_add_overflow(offset, sizeof(struct ffa_composite_mem_region), &checked_offset)) {
520 		ret = FFA_RET_INVALID_PARAMETERS;
521 		goto out_unlock;
522 	}
523 
524 	if (fraglen < checked_offset) {
525 		ret = FFA_RET_INVALID_PARAMETERS;
526 		goto out_unlock;
527 	}
528 
529 	reg = (void *)buf + offset;
530 	nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
531 	if (nr_ranges % sizeof(reg->constituents[0])) {
532 		ret = FFA_RET_INVALID_PARAMETERS;
533 		goto out_unlock;
534 	}
535 
536 	nr_ranges /= sizeof(reg->constituents[0]);
537 	ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
538 	if (ret)
539 		goto out_unlock;
540 
541 	ffa_mem_xfer(res, func_id, len, fraglen);
542 	if (fraglen != len) {
543 		if (res->a0 != FFA_MEM_FRAG_RX)
544 			goto err_unshare;
545 
546 		if (res->a3 != fraglen)
547 			goto err_unshare;
548 	} else if (res->a0 != FFA_SUCCESS) {
549 		goto err_unshare;
550 	}
551 
552 out_unlock:
553 	hyp_spin_unlock(&host_buffers.lock);
554 out:
555 	if (ret)
556 		ffa_to_smccc_res(res, ret);
557 	return;
558 
559 err_unshare:
560 	WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
561 	goto out_unlock;
562 }
563 
564 #define do_ffa_mem_xfer(fid, res, ctxt)				\
565 	do {							\
566 		BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE &&	\
567 			     (fid) != FFA_FN64_MEM_LEND);	\
568 		__do_ffa_mem_xfer((fid), (res), (ctxt));	\
569 	} while (0);
570 
do_ffa_mem_reclaim(struct arm_smccc_1_2_regs * res,struct kvm_cpu_context * ctxt)571 static void do_ffa_mem_reclaim(struct arm_smccc_1_2_regs *res,
572 			       struct kvm_cpu_context *ctxt)
573 {
574 	DECLARE_REG(u32, handle_lo, ctxt, 1);
575 	DECLARE_REG(u32, handle_hi, ctxt, 2);
576 	DECLARE_REG(u32, flags, ctxt, 3);
577 	struct ffa_mem_region_attributes *ep_mem_access;
578 	struct ffa_composite_mem_region *reg;
579 	u32 offset, len, fraglen, fragoff;
580 	struct ffa_mem_region *buf;
581 	int ret = 0;
582 	u64 handle;
583 
584 	handle = PACK_HANDLE(handle_lo, handle_hi);
585 
586 	hyp_spin_lock(&host_buffers.lock);
587 
588 	buf = hyp_buffers.tx;
589 	*buf = (struct ffa_mem_region) {
590 		.sender_id	= HOST_FFA_ID,
591 		.handle		= handle,
592 	};
593 
594 	ffa_retrieve_req(res, sizeof(*buf));
595 	buf = hyp_buffers.rx;
596 	if (res->a0 != FFA_MEM_RETRIEVE_RESP)
597 		goto out_unlock;
598 
599 	len = res->a1;
600 	fraglen = res->a2;
601 
602 	ep_mem_access = (void *)buf +
603 			ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
604 	offset = ep_mem_access->composite_off;
605 	/*
606 	 * We can trust the SPMD to get this right, but let's at least
607 	 * check that we end up with something that doesn't look _completely_
608 	 * bogus.
609 	 */
610 	if (WARN_ON(offset > len ||
611 		    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
612 		ret = FFA_RET_ABORTED;
613 		ffa_rx_release(res);
614 		goto out_unlock;
615 	}
616 
617 	if (len > ffa_desc_buf.len) {
618 		ret = FFA_RET_NO_MEMORY;
619 		ffa_rx_release(res);
620 		goto out_unlock;
621 	}
622 
623 	buf = ffa_desc_buf.buf;
624 	memcpy(buf, hyp_buffers.rx, fraglen);
625 	ffa_rx_release(res);
626 
627 	for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
628 		ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
629 		if (res->a0 != FFA_MEM_FRAG_TX) {
630 			ret = FFA_RET_INVALID_PARAMETERS;
631 			goto out_unlock;
632 		}
633 
634 		fraglen = res->a3;
635 		memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
636 		ffa_rx_release(res);
637 	}
638 
639 	ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
640 	if (res->a0 != FFA_SUCCESS)
641 		goto out_unlock;
642 
643 	reg = (void *)buf + offset;
644 	/* If the SPMD was happy, then we should be too. */
645 	WARN_ON(ffa_host_unshare_ranges(reg->constituents,
646 					reg->addr_range_cnt));
647 out_unlock:
648 	hyp_spin_unlock(&host_buffers.lock);
649 
650 	if (ret)
651 		ffa_to_smccc_res(res, ret);
652 }
653 
654 /*
655  * Is a given FFA function supported, either by forwarding on directly
656  * or by handling at EL2?
657  */
ffa_call_supported(u64 func_id)658 static bool ffa_call_supported(u64 func_id)
659 {
660 	switch (func_id) {
661 	/* Unsupported memory management calls */
662 	case FFA_FN64_MEM_RETRIEVE_REQ:
663 	case FFA_MEM_RETRIEVE_RESP:
664 	case FFA_MEM_RELINQUISH:
665 	case FFA_MEM_OP_PAUSE:
666 	case FFA_MEM_OP_RESUME:
667 	case FFA_MEM_FRAG_RX:
668 	case FFA_FN64_MEM_DONATE:
669 	/* Indirect message passing via RX/TX buffers */
670 	case FFA_MSG_SEND:
671 	case FFA_MSG_POLL:
672 	case FFA_MSG_WAIT:
673 	/* 32-bit variants of 64-bit calls */
674 	case FFA_MSG_SEND_DIRECT_RESP:
675 	case FFA_RXTX_MAP:
676 	case FFA_MEM_DONATE:
677 	case FFA_MEM_RETRIEVE_REQ:
678        /* Optional notification interfaces added in FF-A 1.1 */
679 	case FFA_NOTIFICATION_BITMAP_CREATE:
680 	case FFA_NOTIFICATION_BITMAP_DESTROY:
681 	case FFA_NOTIFICATION_BIND:
682 	case FFA_NOTIFICATION_UNBIND:
683 	case FFA_NOTIFICATION_SET:
684 	case FFA_NOTIFICATION_GET:
685 	case FFA_NOTIFICATION_INFO_GET:
686 	/* Optional interfaces added in FF-A 1.2 */
687 	case FFA_MSG_SEND_DIRECT_REQ2:		/* Optional per 7.5.1 */
688 	case FFA_MSG_SEND_DIRECT_RESP2:		/* Optional per 7.5.1 */
689 	case FFA_CONSOLE_LOG:			/* Optional per 13.1: not in Table 13.1 */
690 	case FFA_PARTITION_INFO_GET_REGS:	/* Optional for virtual instances per 13.1 */
691 		return false;
692 	}
693 
694 	return true;
695 }
696 
do_ffa_features(struct arm_smccc_1_2_regs * res,struct kvm_cpu_context * ctxt)697 static bool do_ffa_features(struct arm_smccc_1_2_regs *res,
698 			    struct kvm_cpu_context *ctxt)
699 {
700 	DECLARE_REG(u32, id, ctxt, 1);
701 	u64 prop = 0;
702 	int ret = 0;
703 
704 	if (!ffa_call_supported(id)) {
705 		ret = FFA_RET_NOT_SUPPORTED;
706 		goto out_handled;
707 	}
708 
709 	switch (id) {
710 	case FFA_MEM_SHARE:
711 	case FFA_FN64_MEM_SHARE:
712 	case FFA_MEM_LEND:
713 	case FFA_FN64_MEM_LEND:
714 		ret = FFA_RET_SUCCESS;
715 		prop = 0; /* No support for dynamic buffers */
716 		goto out_handled;
717 	default:
718 		return false;
719 	}
720 
721 out_handled:
722 	ffa_to_smccc_res_prop(res, ret, prop);
723 	return true;
724 }
725 
hyp_ffa_post_init(void)726 static int hyp_ffa_post_init(void)
727 {
728 	size_t min_rxtx_sz;
729 	struct arm_smccc_1_2_regs res;
730 
731 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs){
732 		.a0 = FFA_ID_GET,
733 	}, &res);
734 	if (res.a0 != FFA_SUCCESS)
735 		return -EOPNOTSUPP;
736 
737 	if (res.a2 != HOST_FFA_ID)
738 		return -EINVAL;
739 
740 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs){
741 		.a0 = FFA_FEATURES,
742 		.a1 = FFA_FN64_RXTX_MAP,
743 	}, &res);
744 	if (res.a0 != FFA_SUCCESS)
745 		return -EOPNOTSUPP;
746 
747 	switch (res.a2 & FFA_FEAT_RXTX_MIN_SZ_MASK) {
748 	case FFA_FEAT_RXTX_MIN_SZ_4K:
749 		min_rxtx_sz = SZ_4K;
750 		break;
751 	case FFA_FEAT_RXTX_MIN_SZ_16K:
752 		min_rxtx_sz = SZ_16K;
753 		break;
754 	case FFA_FEAT_RXTX_MIN_SZ_64K:
755 		min_rxtx_sz = SZ_64K;
756 		break;
757 	default:
758 		return -EINVAL;
759 	}
760 
761 	if (min_rxtx_sz > PAGE_SIZE)
762 		return -EOPNOTSUPP;
763 
764 	return 0;
765 }
766 
do_ffa_version(struct arm_smccc_1_2_regs * res,struct kvm_cpu_context * ctxt)767 static void do_ffa_version(struct arm_smccc_1_2_regs *res,
768 			   struct kvm_cpu_context *ctxt)
769 {
770 	DECLARE_REG(u32, ffa_req_version, ctxt, 1);
771 
772 	if (FFA_MAJOR_VERSION(ffa_req_version) != 1) {
773 		res->a0 = FFA_RET_NOT_SUPPORTED;
774 		return;
775 	}
776 
777 	hyp_spin_lock(&version_lock);
778 	if (has_version_negotiated) {
779 		if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version))
780 			res->a0 = FFA_RET_NOT_SUPPORTED;
781 		else
782 			res->a0 = hyp_ffa_version;
783 		goto unlock;
784 	}
785 
786 	/*
787 	 * If the client driver tries to downgrade the version, we need to ask
788 	 * first if TEE supports it.
789 	 */
790 	if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) {
791 		arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
792 			.a0 = FFA_VERSION,
793 			.a1 = ffa_req_version,
794 		}, res);
795 		if (res->a0 == FFA_RET_NOT_SUPPORTED)
796 			goto unlock;
797 
798 		hyp_ffa_version = ffa_req_version;
799 	}
800 
801 	if (hyp_ffa_post_init()) {
802 		res->a0 = FFA_RET_NOT_SUPPORTED;
803 	} else {
804 		smp_store_release(&has_version_negotiated, true);
805 		res->a0 = hyp_ffa_version;
806 	}
807 unlock:
808 	hyp_spin_unlock(&version_lock);
809 }
810 
do_ffa_part_get(struct arm_smccc_1_2_regs * res,struct kvm_cpu_context * ctxt)811 static void do_ffa_part_get(struct arm_smccc_1_2_regs *res,
812 			    struct kvm_cpu_context *ctxt)
813 {
814 	DECLARE_REG(u32, uuid0, ctxt, 1);
815 	DECLARE_REG(u32, uuid1, ctxt, 2);
816 	DECLARE_REG(u32, uuid2, ctxt, 3);
817 	DECLARE_REG(u32, uuid3, ctxt, 4);
818 	DECLARE_REG(u32, flags, ctxt, 5);
819 	u32 count, partition_sz, copy_sz;
820 
821 	hyp_spin_lock(&host_buffers.lock);
822 	if (!host_buffers.rx) {
823 		ffa_to_smccc_res(res, FFA_RET_BUSY);
824 		goto out_unlock;
825 	}
826 
827 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
828 		.a0 = FFA_PARTITION_INFO_GET,
829 		.a1 = uuid0,
830 		.a2 = uuid1,
831 		.a3 = uuid2,
832 		.a4 = uuid3,
833 		.a5 = flags,
834 	}, res);
835 
836 	if (res->a0 != FFA_SUCCESS)
837 		goto out_unlock;
838 
839 	count = res->a2;
840 	if (!count)
841 		goto out_unlock;
842 
843 	if (hyp_ffa_version > FFA_VERSION_1_0) {
844 		/* Get the number of partitions deployed in the system */
845 		if (flags & 0x1)
846 			goto out_unlock;
847 
848 		partition_sz  = res->a3;
849 	} else {
850 		/* FFA_VERSION_1_0 lacks the size in the response */
851 		partition_sz = FFA_1_0_PARTITON_INFO_SZ;
852 	}
853 
854 	copy_sz = partition_sz * count;
855 	if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
856 		ffa_to_smccc_res(res, FFA_RET_ABORTED);
857 		goto out_unlock;
858 	}
859 
860 	memcpy(host_buffers.rx, hyp_buffers.rx, copy_sz);
861 out_unlock:
862 	hyp_spin_unlock(&host_buffers.lock);
863 }
864 
kvm_host_ffa_handler(struct kvm_cpu_context * host_ctxt,u32 func_id)865 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
866 {
867 	struct arm_smccc_1_2_regs res;
868 
869 	/*
870 	 * There's no way we can tell what a non-standard SMC call might
871 	 * be up to. Ideally, we would terminate these here and return
872 	 * an error to the host, but sadly devices make use of custom
873 	 * firmware calls for things like power management, debugging,
874 	 * RNG access and crash reporting.
875 	 *
876 	 * Given that the architecture requires us to trust EL3 anyway,
877 	 * we forward unrecognised calls on under the assumption that
878 	 * the firmware doesn't expose a mechanism to access arbitrary
879 	 * non-secure memory. Short of a per-device table of SMCs, this
880 	 * is the best we can do.
881 	 */
882 	if (!is_ffa_call(func_id))
883 		return false;
884 
885 	if (func_id != FFA_VERSION &&
886 	    !smp_load_acquire(&has_version_negotiated)) {
887 		ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
888 		goto out_handled;
889 	}
890 
891 	switch (func_id) {
892 	case FFA_FEATURES:
893 		if (!do_ffa_features(&res, host_ctxt))
894 			return false;
895 		goto out_handled;
896 	/* Memory management */
897 	case FFA_FN64_RXTX_MAP:
898 		do_ffa_rxtx_map(&res, host_ctxt);
899 		goto out_handled;
900 	case FFA_RXTX_UNMAP:
901 		do_ffa_rxtx_unmap(&res, host_ctxt);
902 		goto out_handled;
903 	case FFA_MEM_SHARE:
904 	case FFA_FN64_MEM_SHARE:
905 		do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
906 		goto out_handled;
907 	case FFA_MEM_RECLAIM:
908 		do_ffa_mem_reclaim(&res, host_ctxt);
909 		goto out_handled;
910 	case FFA_MEM_LEND:
911 	case FFA_FN64_MEM_LEND:
912 		do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
913 		goto out_handled;
914 	case FFA_MEM_FRAG_TX:
915 		do_ffa_mem_frag_tx(&res, host_ctxt);
916 		goto out_handled;
917 	case FFA_VERSION:
918 		do_ffa_version(&res, host_ctxt);
919 		goto out_handled;
920 	case FFA_PARTITION_INFO_GET:
921 		do_ffa_part_get(&res, host_ctxt);
922 		goto out_handled;
923 	}
924 
925 	if (ffa_call_supported(func_id))
926 		return false; /* Pass through */
927 
928 	ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
929 out_handled:
930 	ffa_set_retval(host_ctxt, &res);
931 	return true;
932 }
933 
hyp_ffa_init(void * pages)934 int hyp_ffa_init(void *pages)
935 {
936 	struct arm_smccc_1_2_regs res;
937 	void *tx, *rx;
938 
939 	if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
940 		return 0;
941 
942 	arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
943 		.a0 = FFA_VERSION,
944 		.a1 = FFA_VERSION_1_2,
945 	}, &res);
946 	if (res.a0 == FFA_RET_NOT_SUPPORTED)
947 		return 0;
948 
949 	/*
950 	 * Firmware returns the maximum supported version of the FF-A
951 	 * implementation. Check that the returned version is
952 	 * backwards-compatible with the hyp according to the rules in DEN0077A
953 	 * v1.1 REL0 13.2.1.
954 	 *
955 	 * Of course, things are never simple when dealing with firmware. v1.1
956 	 * broke ABI with v1.0 on several structures, which is itself
957 	 * incompatible with the aforementioned versioning scheme. The
958 	 * expectation is that v1.x implementations that do not support the v1.0
959 	 * ABI return NOT_SUPPORTED rather than a version number, according to
960 	 * DEN0077A v1.1 REL0 18.6.4.
961 	 */
962 	if (FFA_MAJOR_VERSION(res.a0) != 1)
963 		return -EOPNOTSUPP;
964 
965 	if (FFA_MINOR_VERSION(res.a0) < FFA_MINOR_VERSION(FFA_VERSION_1_2))
966 		hyp_ffa_version = res.a0;
967 	else
968 		hyp_ffa_version = FFA_VERSION_1_2;
969 
970 	tx = pages;
971 	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
972 	rx = pages;
973 	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
974 
975 	ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
976 		.buf	= pages,
977 		.len	= PAGE_SIZE *
978 			  (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
979 	};
980 
981 	hyp_buffers = (struct kvm_ffa_buffers) {
982 		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
983 		.tx	= tx,
984 		.rx	= rx,
985 	};
986 
987 	host_buffers = (struct kvm_ffa_buffers) {
988 		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
989 	};
990 
991 	version_lock = __HYP_SPIN_LOCK_UNLOCKED;
992 	return 0;
993 }
994