xref: /linux/arch/arm64/kvm/hyp/nvhe/ffa.c (revision a1944676767e855869b6af8e1c7e185372feaf31)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4  * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5  * Framework for Arm A-profile", which is specified by Arm in document
6  * number DEN0077.
7  *
8  * Copyright (C) 2022 - Google LLC
9  * Author: Andrew Walbran <qwandor@google.com>
10  *
11  * This driver hooks into the SMC trapping logic for the host and intercepts
12  * all calls falling within the FF-A range. Each call is either:
13  *
14  *	- Forwarded on unmodified to the SPMD at EL3
15  *	- Rejected as "unsupported"
16  *	- Accompanied by a host stage-2 page-table check/update and reissued
17  *
18  * Consequently, any attempts by the host to make guest memory pages
19  * accessible to the secure world using FF-A will be detected either here
20  * (in the case that the memory is already owned by the guest) or during
21  * donation to the guest (in the case that the memory was previously shared
22  * with the secure world).
23  *
24  * To allow the rolling-back of page-table updates and FF-A calls in the
25  * event of failure, operations involving the RXTX buffers are locked for
26  * the duration and are therefore serialised.
27  */
28 
29 #include <linux/arm-smccc.h>
30 #include <linux/arm_ffa.h>
31 #include <asm/kvm_pkvm.h>
32 
33 #include <nvhe/ffa.h>
34 #include <nvhe/mem_protect.h>
35 #include <nvhe/memory.h>
36 #include <nvhe/trap_handler.h>
37 #include <nvhe/spinlock.h>
38 
39 /*
40  * "ID value 0 must be returned at the Non-secure physical FF-A instance"
41  * We share this ID with the host.
42  */
43 #define HOST_FFA_ID	0
44 
45 /*
46  * A buffer to hold the maximum descriptor size we can see from the host,
47  * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
48  * when resolving the handle on the reclaim path.
49  */
50 struct kvm_ffa_descriptor_buffer {
51 	void	*buf;
52 	size_t	len;
53 };
54 
55 static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
56 
57 struct kvm_ffa_buffers {
58 	hyp_spinlock_t lock;
59 	void *tx;
60 	void *rx;
61 };
62 
63 /*
64  * Note that we don't currently lock these buffers explicitly, instead
65  * relying on the locking of the host FFA buffers as we only have one
66  * client.
67  */
68 static struct kvm_ffa_buffers hyp_buffers;
69 static struct kvm_ffa_buffers host_buffers;
70 
71 static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
72 {
73 	*res = (struct arm_smccc_res) {
74 		.a0	= FFA_ERROR,
75 		.a2	= ffa_errno,
76 	};
77 }
78 
79 static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
80 {
81 	if (ret == FFA_RET_SUCCESS) {
82 		*res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
83 						.a2 = prop };
84 	} else {
85 		ffa_to_smccc_error(res, ret);
86 	}
87 }
88 
89 static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
90 {
91 	ffa_to_smccc_res_prop(res, ret, 0);
92 }
93 
94 static void ffa_set_retval(struct kvm_cpu_context *ctxt,
95 			   struct arm_smccc_res *res)
96 {
97 	cpu_reg(ctxt, 0) = res->a0;
98 	cpu_reg(ctxt, 1) = res->a1;
99 	cpu_reg(ctxt, 2) = res->a2;
100 	cpu_reg(ctxt, 3) = res->a3;
101 }
102 
103 static bool is_ffa_call(u64 func_id)
104 {
105 	return ARM_SMCCC_IS_FAST_CALL(func_id) &&
106 	       ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
107 	       ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
108 	       ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
109 }
110 
111 static int ffa_map_hyp_buffers(u64 ffa_page_count)
112 {
113 	struct arm_smccc_res res;
114 
115 	arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
116 			  hyp_virt_to_phys(hyp_buffers.tx),
117 			  hyp_virt_to_phys(hyp_buffers.rx),
118 			  ffa_page_count,
119 			  0, 0, 0, 0,
120 			  &res);
121 
122 	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
123 }
124 
125 static int ffa_unmap_hyp_buffers(void)
126 {
127 	struct arm_smccc_res res;
128 
129 	arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
130 			  HOST_FFA_ID,
131 			  0, 0, 0, 0, 0, 0,
132 			  &res);
133 
134 	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
135 }
136 
137 static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
138 			     u32 handle_hi, u32 fraglen, u32 endpoint_id)
139 {
140 	arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
141 			  handle_lo, handle_hi, fraglen, endpoint_id,
142 			  0, 0, 0,
143 			  res);
144 }
145 
146 static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
147 			     u32 handle_hi, u32 fragoff)
148 {
149 	arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
150 			  handle_lo, handle_hi, fragoff, HOST_FFA_ID,
151 			  0, 0, 0,
152 			  res);
153 }
154 
155 static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
156 			  u32 fraglen)
157 {
158 	arm_smccc_1_1_smc(func_id, len, fraglen,
159 			  0, 0, 0, 0, 0,
160 			  res);
161 }
162 
163 static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
164 			     u32 handle_hi, u32 flags)
165 {
166 	arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
167 			  handle_lo, handle_hi, flags,
168 			  0, 0, 0, 0,
169 			  res);
170 }
171 
172 static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
173 {
174 	arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
175 			  len, len,
176 			  0, 0, 0, 0, 0,
177 			  res);
178 }
179 
180 static void ffa_rx_release(struct arm_smccc_res *res)
181 {
182 	arm_smccc_1_1_smc(FFA_RX_RELEASE,
183 			  0, 0,
184 			  0, 0, 0, 0, 0,
185 			  res);
186 }
187 
188 static void do_ffa_rxtx_map(struct arm_smccc_res *res,
189 			    struct kvm_cpu_context *ctxt)
190 {
191 	DECLARE_REG(phys_addr_t, tx, ctxt, 1);
192 	DECLARE_REG(phys_addr_t, rx, ctxt, 2);
193 	DECLARE_REG(u32, npages, ctxt, 3);
194 	int ret = 0;
195 	void *rx_virt, *tx_virt;
196 
197 	if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
198 		ret = FFA_RET_INVALID_PARAMETERS;
199 		goto out;
200 	}
201 
202 	if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
203 		ret = FFA_RET_INVALID_PARAMETERS;
204 		goto out;
205 	}
206 
207 	hyp_spin_lock(&host_buffers.lock);
208 	if (host_buffers.tx) {
209 		ret = FFA_RET_DENIED;
210 		goto out_unlock;
211 	}
212 
213 	/*
214 	 * Map our hypervisor buffers into the SPMD before mapping and
215 	 * pinning the host buffers in our own address space.
216 	 */
217 	ret = ffa_map_hyp_buffers(npages);
218 	if (ret)
219 		goto out_unlock;
220 
221 	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
222 	if (ret) {
223 		ret = FFA_RET_INVALID_PARAMETERS;
224 		goto err_unmap;
225 	}
226 
227 	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
228 	if (ret) {
229 		ret = FFA_RET_INVALID_PARAMETERS;
230 		goto err_unshare_tx;
231 	}
232 
233 	tx_virt = hyp_phys_to_virt(tx);
234 	ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
235 	if (ret) {
236 		ret = FFA_RET_INVALID_PARAMETERS;
237 		goto err_unshare_rx;
238 	}
239 
240 	rx_virt = hyp_phys_to_virt(rx);
241 	ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
242 	if (ret) {
243 		ret = FFA_RET_INVALID_PARAMETERS;
244 		goto err_unpin_tx;
245 	}
246 
247 	host_buffers.tx = tx_virt;
248 	host_buffers.rx = rx_virt;
249 
250 out_unlock:
251 	hyp_spin_unlock(&host_buffers.lock);
252 out:
253 	ffa_to_smccc_res(res, ret);
254 	return;
255 
256 err_unpin_tx:
257 	hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
258 err_unshare_rx:
259 	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
260 err_unshare_tx:
261 	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
262 err_unmap:
263 	ffa_unmap_hyp_buffers();
264 	goto out_unlock;
265 }
266 
267 static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
268 			      struct kvm_cpu_context *ctxt)
269 {
270 	DECLARE_REG(u32, id, ctxt, 1);
271 	int ret = 0;
272 
273 	if (id != HOST_FFA_ID) {
274 		ret = FFA_RET_INVALID_PARAMETERS;
275 		goto out;
276 	}
277 
278 	hyp_spin_lock(&host_buffers.lock);
279 	if (!host_buffers.tx) {
280 		ret = FFA_RET_INVALID_PARAMETERS;
281 		goto out_unlock;
282 	}
283 
284 	hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
285 	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
286 	host_buffers.tx = NULL;
287 
288 	hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
289 	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
290 	host_buffers.rx = NULL;
291 
292 	ffa_unmap_hyp_buffers();
293 
294 out_unlock:
295 	hyp_spin_unlock(&host_buffers.lock);
296 out:
297 	ffa_to_smccc_res(res, ret);
298 }
299 
300 static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
301 				   u32 nranges)
302 {
303 	u32 i;
304 
305 	for (i = 0; i < nranges; ++i) {
306 		struct ffa_mem_region_addr_range *range = &ranges[i];
307 		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
308 		u64 pfn = hyp_phys_to_pfn(range->address);
309 
310 		if (!PAGE_ALIGNED(sz))
311 			break;
312 
313 		if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
314 			break;
315 	}
316 
317 	return i;
318 }
319 
320 static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
321 				     u32 nranges)
322 {
323 	u32 i;
324 
325 	for (i = 0; i < nranges; ++i) {
326 		struct ffa_mem_region_addr_range *range = &ranges[i];
327 		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
328 		u64 pfn = hyp_phys_to_pfn(range->address);
329 
330 		if (!PAGE_ALIGNED(sz))
331 			break;
332 
333 		if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
334 			break;
335 	}
336 
337 	return i;
338 }
339 
340 static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
341 				 u32 nranges)
342 {
343 	u32 nshared = __ffa_host_share_ranges(ranges, nranges);
344 	int ret = 0;
345 
346 	if (nshared != nranges) {
347 		WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
348 		ret = FFA_RET_DENIED;
349 	}
350 
351 	return ret;
352 }
353 
354 static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
355 				   u32 nranges)
356 {
357 	u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
358 	int ret = 0;
359 
360 	if (nunshared != nranges) {
361 		WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
362 		ret = FFA_RET_DENIED;
363 	}
364 
365 	return ret;
366 }
367 
368 static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
369 			       struct kvm_cpu_context *ctxt)
370 {
371 	DECLARE_REG(u32, handle_lo, ctxt, 1);
372 	DECLARE_REG(u32, handle_hi, ctxt, 2);
373 	DECLARE_REG(u32, fraglen, ctxt, 3);
374 	DECLARE_REG(u32, endpoint_id, ctxt, 4);
375 	struct ffa_mem_region_addr_range *buf;
376 	int ret = FFA_RET_INVALID_PARAMETERS;
377 	u32 nr_ranges;
378 
379 	if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
380 		goto out;
381 
382 	if (fraglen % sizeof(*buf))
383 		goto out;
384 
385 	hyp_spin_lock(&host_buffers.lock);
386 	if (!host_buffers.tx)
387 		goto out_unlock;
388 
389 	buf = hyp_buffers.tx;
390 	memcpy(buf, host_buffers.tx, fraglen);
391 	nr_ranges = fraglen / sizeof(*buf);
392 
393 	ret = ffa_host_share_ranges(buf, nr_ranges);
394 	if (ret) {
395 		/*
396 		 * We're effectively aborting the transaction, so we need
397 		 * to restore the global state back to what it was prior to
398 		 * transmission of the first fragment.
399 		 */
400 		ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
401 		WARN_ON(res->a0 != FFA_SUCCESS);
402 		goto out_unlock;
403 	}
404 
405 	ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
406 	if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
407 		WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
408 
409 out_unlock:
410 	hyp_spin_unlock(&host_buffers.lock);
411 out:
412 	if (ret)
413 		ffa_to_smccc_res(res, ret);
414 
415 	/*
416 	 * If for any reason this did not succeed, we're in trouble as we have
417 	 * now lost the content of the previous fragments and we can't rollback
418 	 * the host stage-2 changes. The pages previously marked as shared will
419 	 * remain stuck in that state forever, hence preventing the host from
420 	 * sharing/donating them again and may possibly lead to subsequent
421 	 * failures, but this will not compromise confidentiality.
422 	 */
423 	return;
424 }
425 
426 static __always_inline void do_ffa_mem_xfer(const u64 func_id,
427 					    struct arm_smccc_res *res,
428 					    struct kvm_cpu_context *ctxt)
429 {
430 	DECLARE_REG(u32, len, ctxt, 1);
431 	DECLARE_REG(u32, fraglen, ctxt, 2);
432 	DECLARE_REG(u64, addr_mbz, ctxt, 3);
433 	DECLARE_REG(u32, npages_mbz, ctxt, 4);
434 	struct ffa_mem_region_attributes *ep_mem_access;
435 	struct ffa_composite_mem_region *reg;
436 	struct ffa_mem_region *buf;
437 	u32 offset, nr_ranges;
438 	int ret = 0;
439 
440 	BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
441 		     func_id != FFA_FN64_MEM_LEND);
442 
443 	if (addr_mbz || npages_mbz || fraglen > len ||
444 	    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
445 		ret = FFA_RET_INVALID_PARAMETERS;
446 		goto out;
447 	}
448 
449 	if (fraglen < sizeof(struct ffa_mem_region) +
450 		      sizeof(struct ffa_mem_region_attributes)) {
451 		ret = FFA_RET_INVALID_PARAMETERS;
452 		goto out;
453 	}
454 
455 	hyp_spin_lock(&host_buffers.lock);
456 	if (!host_buffers.tx) {
457 		ret = FFA_RET_INVALID_PARAMETERS;
458 		goto out_unlock;
459 	}
460 
461 	buf = hyp_buffers.tx;
462 	memcpy(buf, host_buffers.tx, fraglen);
463 
464 	ep_mem_access = (void *)buf +
465 			ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0);
466 	offset = ep_mem_access->composite_off;
467 	if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
468 		ret = FFA_RET_INVALID_PARAMETERS;
469 		goto out_unlock;
470 	}
471 
472 	if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
473 		ret = FFA_RET_INVALID_PARAMETERS;
474 		goto out_unlock;
475 	}
476 
477 	reg = (void *)buf + offset;
478 	nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
479 	if (nr_ranges % sizeof(reg->constituents[0])) {
480 		ret = FFA_RET_INVALID_PARAMETERS;
481 		goto out_unlock;
482 	}
483 
484 	nr_ranges /= sizeof(reg->constituents[0]);
485 	ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
486 	if (ret)
487 		goto out_unlock;
488 
489 	ffa_mem_xfer(res, func_id, len, fraglen);
490 	if (fraglen != len) {
491 		if (res->a0 != FFA_MEM_FRAG_RX)
492 			goto err_unshare;
493 
494 		if (res->a3 != fraglen)
495 			goto err_unshare;
496 	} else if (res->a0 != FFA_SUCCESS) {
497 		goto err_unshare;
498 	}
499 
500 out_unlock:
501 	hyp_spin_unlock(&host_buffers.lock);
502 out:
503 	if (ret)
504 		ffa_to_smccc_res(res, ret);
505 	return;
506 
507 err_unshare:
508 	WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
509 	goto out_unlock;
510 }
511 
512 static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
513 			       struct kvm_cpu_context *ctxt)
514 {
515 	DECLARE_REG(u32, handle_lo, ctxt, 1);
516 	DECLARE_REG(u32, handle_hi, ctxt, 2);
517 	DECLARE_REG(u32, flags, ctxt, 3);
518 	struct ffa_mem_region_attributes *ep_mem_access;
519 	struct ffa_composite_mem_region *reg;
520 	u32 offset, len, fraglen, fragoff;
521 	struct ffa_mem_region *buf;
522 	int ret = 0;
523 	u64 handle;
524 
525 	handle = PACK_HANDLE(handle_lo, handle_hi);
526 
527 	hyp_spin_lock(&host_buffers.lock);
528 
529 	buf = hyp_buffers.tx;
530 	*buf = (struct ffa_mem_region) {
531 		.sender_id	= HOST_FFA_ID,
532 		.handle		= handle,
533 	};
534 
535 	ffa_retrieve_req(res, sizeof(*buf));
536 	buf = hyp_buffers.rx;
537 	if (res->a0 != FFA_MEM_RETRIEVE_RESP)
538 		goto out_unlock;
539 
540 	len = res->a1;
541 	fraglen = res->a2;
542 
543 	ep_mem_access = (void *)buf +
544 			ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0);
545 	offset = ep_mem_access->composite_off;
546 	/*
547 	 * We can trust the SPMD to get this right, but let's at least
548 	 * check that we end up with something that doesn't look _completely_
549 	 * bogus.
550 	 */
551 	if (WARN_ON(offset > len ||
552 		    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
553 		ret = FFA_RET_ABORTED;
554 		ffa_rx_release(res);
555 		goto out_unlock;
556 	}
557 
558 	if (len > ffa_desc_buf.len) {
559 		ret = FFA_RET_NO_MEMORY;
560 		ffa_rx_release(res);
561 		goto out_unlock;
562 	}
563 
564 	buf = ffa_desc_buf.buf;
565 	memcpy(buf, hyp_buffers.rx, fraglen);
566 	ffa_rx_release(res);
567 
568 	for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
569 		ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
570 		if (res->a0 != FFA_MEM_FRAG_TX) {
571 			ret = FFA_RET_INVALID_PARAMETERS;
572 			goto out_unlock;
573 		}
574 
575 		fraglen = res->a3;
576 		memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
577 		ffa_rx_release(res);
578 	}
579 
580 	ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
581 	if (res->a0 != FFA_SUCCESS)
582 		goto out_unlock;
583 
584 	reg = (void *)buf + offset;
585 	/* If the SPMD was happy, then we should be too. */
586 	WARN_ON(ffa_host_unshare_ranges(reg->constituents,
587 					reg->addr_range_cnt));
588 out_unlock:
589 	hyp_spin_unlock(&host_buffers.lock);
590 
591 	if (ret)
592 		ffa_to_smccc_res(res, ret);
593 }
594 
595 /*
596  * Is a given FFA function supported, either by forwarding on directly
597  * or by handling at EL2?
598  */
599 static bool ffa_call_supported(u64 func_id)
600 {
601 	switch (func_id) {
602 	/* Unsupported memory management calls */
603 	case FFA_FN64_MEM_RETRIEVE_REQ:
604 	case FFA_MEM_RETRIEVE_RESP:
605 	case FFA_MEM_RELINQUISH:
606 	case FFA_MEM_OP_PAUSE:
607 	case FFA_MEM_OP_RESUME:
608 	case FFA_MEM_FRAG_RX:
609 	case FFA_FN64_MEM_DONATE:
610 	/* Indirect message passing via RX/TX buffers */
611 	case FFA_MSG_SEND:
612 	case FFA_MSG_POLL:
613 	case FFA_MSG_WAIT:
614 	/* 32-bit variants of 64-bit calls */
615 	case FFA_MSG_SEND_DIRECT_RESP:
616 	case FFA_RXTX_MAP:
617 	case FFA_MEM_DONATE:
618 	case FFA_MEM_RETRIEVE_REQ:
619 		return false;
620 	}
621 
622 	return true;
623 }
624 
625 static bool do_ffa_features(struct arm_smccc_res *res,
626 			    struct kvm_cpu_context *ctxt)
627 {
628 	DECLARE_REG(u32, id, ctxt, 1);
629 	u64 prop = 0;
630 	int ret = 0;
631 
632 	if (!ffa_call_supported(id)) {
633 		ret = FFA_RET_NOT_SUPPORTED;
634 		goto out_handled;
635 	}
636 
637 	switch (id) {
638 	case FFA_MEM_SHARE:
639 	case FFA_FN64_MEM_SHARE:
640 	case FFA_MEM_LEND:
641 	case FFA_FN64_MEM_LEND:
642 		ret = FFA_RET_SUCCESS;
643 		prop = 0; /* No support for dynamic buffers */
644 		goto out_handled;
645 	default:
646 		return false;
647 	}
648 
649 out_handled:
650 	ffa_to_smccc_res_prop(res, ret, prop);
651 	return true;
652 }
653 
654 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
655 {
656 	struct arm_smccc_res res;
657 
658 	/*
659 	 * There's no way we can tell what a non-standard SMC call might
660 	 * be up to. Ideally, we would terminate these here and return
661 	 * an error to the host, but sadly devices make use of custom
662 	 * firmware calls for things like power management, debugging,
663 	 * RNG access and crash reporting.
664 	 *
665 	 * Given that the architecture requires us to trust EL3 anyway,
666 	 * we forward unrecognised calls on under the assumption that
667 	 * the firmware doesn't expose a mechanism to access arbitrary
668 	 * non-secure memory. Short of a per-device table of SMCs, this
669 	 * is the best we can do.
670 	 */
671 	if (!is_ffa_call(func_id))
672 		return false;
673 
674 	switch (func_id) {
675 	case FFA_FEATURES:
676 		if (!do_ffa_features(&res, host_ctxt))
677 			return false;
678 		goto out_handled;
679 	/* Memory management */
680 	case FFA_FN64_RXTX_MAP:
681 		do_ffa_rxtx_map(&res, host_ctxt);
682 		goto out_handled;
683 	case FFA_RXTX_UNMAP:
684 		do_ffa_rxtx_unmap(&res, host_ctxt);
685 		goto out_handled;
686 	case FFA_MEM_SHARE:
687 	case FFA_FN64_MEM_SHARE:
688 		do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
689 		goto out_handled;
690 	case FFA_MEM_RECLAIM:
691 		do_ffa_mem_reclaim(&res, host_ctxt);
692 		goto out_handled;
693 	case FFA_MEM_LEND:
694 	case FFA_FN64_MEM_LEND:
695 		do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
696 		goto out_handled;
697 	case FFA_MEM_FRAG_TX:
698 		do_ffa_mem_frag_tx(&res, host_ctxt);
699 		goto out_handled;
700 	}
701 
702 	if (ffa_call_supported(func_id))
703 		return false; /* Pass through */
704 
705 	ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
706 out_handled:
707 	ffa_set_retval(host_ctxt, &res);
708 	return true;
709 }
710 
711 int hyp_ffa_init(void *pages)
712 {
713 	struct arm_smccc_res res;
714 	size_t min_rxtx_sz;
715 	void *tx, *rx;
716 
717 	if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
718 		return 0;
719 
720 	arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
721 	if (res.a0 == FFA_RET_NOT_SUPPORTED)
722 		return 0;
723 
724 	/*
725 	 * Firmware returns the maximum supported version of the FF-A
726 	 * implementation. Check that the returned version is
727 	 * backwards-compatible with the hyp according to the rules in DEN0077A
728 	 * v1.1 REL0 13.2.1.
729 	 *
730 	 * Of course, things are never simple when dealing with firmware. v1.1
731 	 * broke ABI with v1.0 on several structures, which is itself
732 	 * incompatible with the aforementioned versioning scheme. The
733 	 * expectation is that v1.x implementations that do not support the v1.0
734 	 * ABI return NOT_SUPPORTED rather than a version number, according to
735 	 * DEN0077A v1.1 REL0 18.6.4.
736 	 */
737 	if (FFA_MAJOR_VERSION(res.a0) != 1)
738 		return -EOPNOTSUPP;
739 
740 	arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
741 	if (res.a0 != FFA_SUCCESS)
742 		return -EOPNOTSUPP;
743 
744 	if (res.a2 != HOST_FFA_ID)
745 		return -EINVAL;
746 
747 	arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
748 			  0, 0, 0, 0, 0, 0, &res);
749 	if (res.a0 != FFA_SUCCESS)
750 		return -EOPNOTSUPP;
751 
752 	switch (res.a2) {
753 	case FFA_FEAT_RXTX_MIN_SZ_4K:
754 		min_rxtx_sz = SZ_4K;
755 		break;
756 	case FFA_FEAT_RXTX_MIN_SZ_16K:
757 		min_rxtx_sz = SZ_16K;
758 		break;
759 	case FFA_FEAT_RXTX_MIN_SZ_64K:
760 		min_rxtx_sz = SZ_64K;
761 		break;
762 	default:
763 		return -EINVAL;
764 	}
765 
766 	if (min_rxtx_sz > PAGE_SIZE)
767 		return -EOPNOTSUPP;
768 
769 	tx = pages;
770 	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
771 	rx = pages;
772 	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
773 
774 	ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
775 		.buf	= pages,
776 		.len	= PAGE_SIZE *
777 			  (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
778 	};
779 
780 	hyp_buffers = (struct kvm_ffa_buffers) {
781 		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
782 		.tx	= tx,
783 		.rx	= rx,
784 	};
785 
786 	host_buffers = (struct kvm_ffa_buffers) {
787 		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
788 	};
789 
790 	return 0;
791 }
792