xref: /linux/arch/arm64/kvm/hyp/nvhe/ffa.c (revision 1270dad3109770fc12c1f09f7bab4bceaf2fb829)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4  * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5  * Framework for Arm A-profile", which is specified by Arm in document
6  * number DEN0077.
7  *
8  * Copyright (C) 2022 - Google LLC
9  * Author: Andrew Walbran <qwandor@google.com>
10  *
11  * This driver hooks into the SMC trapping logic for the host and intercepts
12  * all calls falling within the FF-A range. Each call is either:
13  *
14  *	- Forwarded on unmodified to the SPMD at EL3
15  *	- Rejected as "unsupported"
16  *	- Accompanied by a host stage-2 page-table check/update and reissued
17  *
18  * Consequently, any attempts by the host to make guest memory pages
19  * accessible to the secure world using FF-A will be detected either here
20  * (in the case that the memory is already owned by the guest) or during
21  * donation to the guest (in the case that the memory was previously shared
22  * with the secure world).
23  *
24  * To allow the rolling-back of page-table updates and FF-A calls in the
25  * event of failure, operations involving the RXTX buffers are locked for
26  * the duration and are therefore serialised.
27  */
28 
29 #include <linux/arm-smccc.h>
30 #include <linux/arm_ffa.h>
31 #include <asm/kvm_pkvm.h>
32 
33 #include <nvhe/ffa.h>
34 #include <nvhe/mem_protect.h>
35 #include <nvhe/memory.h>
36 #include <nvhe/trap_handler.h>
37 #include <nvhe/spinlock.h>
38 
39 /*
40  * "ID value 0 must be returned at the Non-secure physical FF-A instance"
41  * We share this ID with the host.
42  */
43 #define HOST_FFA_ID	0
44 
45 /*
46  * A buffer to hold the maximum descriptor size we can see from the host,
47  * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
48  * when resolving the handle on the reclaim path.
49  */
50 struct kvm_ffa_descriptor_buffer {
51 	void	*buf;
52 	size_t	len;
53 };
54 
55 static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
56 
57 struct kvm_ffa_buffers {
58 	hyp_spinlock_t lock;
59 	void *tx;
60 	void *rx;
61 };
62 
63 /*
64  * Note that we don't currently lock these buffers explicitly, instead
65  * relying on the locking of the host FFA buffers as we only have one
66  * client.
67  */
68 static struct kvm_ffa_buffers hyp_buffers;
69 static struct kvm_ffa_buffers host_buffers;
70 static u32 hyp_ffa_version;
71 static bool has_version_negotiated;
72 static hyp_spinlock_t version_lock;
73 
74 static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
75 {
76 	*res = (struct arm_smccc_res) {
77 		.a0	= FFA_ERROR,
78 		.a2	= ffa_errno,
79 	};
80 }
81 
82 static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
83 {
84 	if (ret == FFA_RET_SUCCESS) {
85 		*res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
86 						.a2 = prop };
87 	} else {
88 		ffa_to_smccc_error(res, ret);
89 	}
90 }
91 
92 static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
93 {
94 	ffa_to_smccc_res_prop(res, ret, 0);
95 }
96 
97 static void ffa_set_retval(struct kvm_cpu_context *ctxt,
98 			   struct arm_smccc_res *res)
99 {
100 	cpu_reg(ctxt, 0) = res->a0;
101 	cpu_reg(ctxt, 1) = res->a1;
102 	cpu_reg(ctxt, 2) = res->a2;
103 	cpu_reg(ctxt, 3) = res->a3;
104 }
105 
106 static bool is_ffa_call(u64 func_id)
107 {
108 	return ARM_SMCCC_IS_FAST_CALL(func_id) &&
109 	       ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
110 	       ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
111 	       ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
112 }
113 
114 static int ffa_map_hyp_buffers(u64 ffa_page_count)
115 {
116 	struct arm_smccc_res res;
117 
118 	arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
119 			  hyp_virt_to_phys(hyp_buffers.tx),
120 			  hyp_virt_to_phys(hyp_buffers.rx),
121 			  ffa_page_count,
122 			  0, 0, 0, 0,
123 			  &res);
124 
125 	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
126 }
127 
128 static int ffa_unmap_hyp_buffers(void)
129 {
130 	struct arm_smccc_res res;
131 
132 	arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
133 			  HOST_FFA_ID,
134 			  0, 0, 0, 0, 0, 0,
135 			  &res);
136 
137 	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
138 }
139 
140 static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
141 			     u32 handle_hi, u32 fraglen, u32 endpoint_id)
142 {
143 	arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
144 			  handle_lo, handle_hi, fraglen, endpoint_id,
145 			  0, 0, 0,
146 			  res);
147 }
148 
149 static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
150 			     u32 handle_hi, u32 fragoff)
151 {
152 	arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
153 			  handle_lo, handle_hi, fragoff, HOST_FFA_ID,
154 			  0, 0, 0,
155 			  res);
156 }
157 
158 static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
159 			  u32 fraglen)
160 {
161 	arm_smccc_1_1_smc(func_id, len, fraglen,
162 			  0, 0, 0, 0, 0,
163 			  res);
164 }
165 
166 static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
167 			     u32 handle_hi, u32 flags)
168 {
169 	arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
170 			  handle_lo, handle_hi, flags,
171 			  0, 0, 0, 0,
172 			  res);
173 }
174 
175 static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
176 {
177 	arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
178 			  len, len,
179 			  0, 0, 0, 0, 0,
180 			  res);
181 }
182 
183 static void do_ffa_rxtx_map(struct arm_smccc_res *res,
184 			    struct kvm_cpu_context *ctxt)
185 {
186 	DECLARE_REG(phys_addr_t, tx, ctxt, 1);
187 	DECLARE_REG(phys_addr_t, rx, ctxt, 2);
188 	DECLARE_REG(u32, npages, ctxt, 3);
189 	int ret = 0;
190 	void *rx_virt, *tx_virt;
191 
192 	if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
193 		ret = FFA_RET_INVALID_PARAMETERS;
194 		goto out;
195 	}
196 
197 	if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
198 		ret = FFA_RET_INVALID_PARAMETERS;
199 		goto out;
200 	}
201 
202 	hyp_spin_lock(&host_buffers.lock);
203 	if (host_buffers.tx) {
204 		ret = FFA_RET_DENIED;
205 		goto out_unlock;
206 	}
207 
208 	/*
209 	 * Map our hypervisor buffers into the SPMD before mapping and
210 	 * pinning the host buffers in our own address space.
211 	 */
212 	ret = ffa_map_hyp_buffers(npages);
213 	if (ret)
214 		goto out_unlock;
215 
216 	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
217 	if (ret) {
218 		ret = FFA_RET_INVALID_PARAMETERS;
219 		goto err_unmap;
220 	}
221 
222 	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
223 	if (ret) {
224 		ret = FFA_RET_INVALID_PARAMETERS;
225 		goto err_unshare_tx;
226 	}
227 
228 	tx_virt = hyp_phys_to_virt(tx);
229 	ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
230 	if (ret) {
231 		ret = FFA_RET_INVALID_PARAMETERS;
232 		goto err_unshare_rx;
233 	}
234 
235 	rx_virt = hyp_phys_to_virt(rx);
236 	ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
237 	if (ret) {
238 		ret = FFA_RET_INVALID_PARAMETERS;
239 		goto err_unpin_tx;
240 	}
241 
242 	host_buffers.tx = tx_virt;
243 	host_buffers.rx = rx_virt;
244 
245 out_unlock:
246 	hyp_spin_unlock(&host_buffers.lock);
247 out:
248 	ffa_to_smccc_res(res, ret);
249 	return;
250 
251 err_unpin_tx:
252 	hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
253 err_unshare_rx:
254 	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
255 err_unshare_tx:
256 	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
257 err_unmap:
258 	ffa_unmap_hyp_buffers();
259 	goto out_unlock;
260 }
261 
262 static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
263 			      struct kvm_cpu_context *ctxt)
264 {
265 	DECLARE_REG(u32, id, ctxt, 1);
266 	int ret = 0;
267 
268 	if (id != HOST_FFA_ID) {
269 		ret = FFA_RET_INVALID_PARAMETERS;
270 		goto out;
271 	}
272 
273 	hyp_spin_lock(&host_buffers.lock);
274 	if (!host_buffers.tx) {
275 		ret = FFA_RET_INVALID_PARAMETERS;
276 		goto out_unlock;
277 	}
278 
279 	hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
280 	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
281 	host_buffers.tx = NULL;
282 
283 	hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
284 	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
285 	host_buffers.rx = NULL;
286 
287 	ffa_unmap_hyp_buffers();
288 
289 out_unlock:
290 	hyp_spin_unlock(&host_buffers.lock);
291 out:
292 	ffa_to_smccc_res(res, ret);
293 }
294 
295 static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
296 				   u32 nranges)
297 {
298 	u32 i;
299 
300 	for (i = 0; i < nranges; ++i) {
301 		struct ffa_mem_region_addr_range *range = &ranges[i];
302 		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
303 		u64 pfn = hyp_phys_to_pfn(range->address);
304 
305 		if (!PAGE_ALIGNED(sz))
306 			break;
307 
308 		if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
309 			break;
310 	}
311 
312 	return i;
313 }
314 
315 static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
316 				     u32 nranges)
317 {
318 	u32 i;
319 
320 	for (i = 0; i < nranges; ++i) {
321 		struct ffa_mem_region_addr_range *range = &ranges[i];
322 		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
323 		u64 pfn = hyp_phys_to_pfn(range->address);
324 
325 		if (!PAGE_ALIGNED(sz))
326 			break;
327 
328 		if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
329 			break;
330 	}
331 
332 	return i;
333 }
334 
335 static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
336 				 u32 nranges)
337 {
338 	u32 nshared = __ffa_host_share_ranges(ranges, nranges);
339 	int ret = 0;
340 
341 	if (nshared != nranges) {
342 		WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
343 		ret = FFA_RET_DENIED;
344 	}
345 
346 	return ret;
347 }
348 
349 static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
350 				   u32 nranges)
351 {
352 	u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
353 	int ret = 0;
354 
355 	if (nunshared != nranges) {
356 		WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
357 		ret = FFA_RET_DENIED;
358 	}
359 
360 	return ret;
361 }
362 
363 static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
364 			       struct kvm_cpu_context *ctxt)
365 {
366 	DECLARE_REG(u32, handle_lo, ctxt, 1);
367 	DECLARE_REG(u32, handle_hi, ctxt, 2);
368 	DECLARE_REG(u32, fraglen, ctxt, 3);
369 	DECLARE_REG(u32, endpoint_id, ctxt, 4);
370 	struct ffa_mem_region_addr_range *buf;
371 	int ret = FFA_RET_INVALID_PARAMETERS;
372 	u32 nr_ranges;
373 
374 	if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
375 		goto out;
376 
377 	if (fraglen % sizeof(*buf))
378 		goto out;
379 
380 	hyp_spin_lock(&host_buffers.lock);
381 	if (!host_buffers.tx)
382 		goto out_unlock;
383 
384 	buf = hyp_buffers.tx;
385 	memcpy(buf, host_buffers.tx, fraglen);
386 	nr_ranges = fraglen / sizeof(*buf);
387 
388 	ret = ffa_host_share_ranges(buf, nr_ranges);
389 	if (ret) {
390 		/*
391 		 * We're effectively aborting the transaction, so we need
392 		 * to restore the global state back to what it was prior to
393 		 * transmission of the first fragment.
394 		 */
395 		ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
396 		WARN_ON(res->a0 != FFA_SUCCESS);
397 		goto out_unlock;
398 	}
399 
400 	ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
401 	if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
402 		WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
403 
404 out_unlock:
405 	hyp_spin_unlock(&host_buffers.lock);
406 out:
407 	if (ret)
408 		ffa_to_smccc_res(res, ret);
409 
410 	/*
411 	 * If for any reason this did not succeed, we're in trouble as we have
412 	 * now lost the content of the previous fragments and we can't rollback
413 	 * the host stage-2 changes. The pages previously marked as shared will
414 	 * remain stuck in that state forever, hence preventing the host from
415 	 * sharing/donating them again and may possibly lead to subsequent
416 	 * failures, but this will not compromise confidentiality.
417 	 */
418 	return;
419 }
420 
421 static __always_inline void do_ffa_mem_xfer(const u64 func_id,
422 					    struct arm_smccc_res *res,
423 					    struct kvm_cpu_context *ctxt)
424 {
425 	DECLARE_REG(u32, len, ctxt, 1);
426 	DECLARE_REG(u32, fraglen, ctxt, 2);
427 	DECLARE_REG(u64, addr_mbz, ctxt, 3);
428 	DECLARE_REG(u32, npages_mbz, ctxt, 4);
429 	struct ffa_mem_region_attributes *ep_mem_access;
430 	struct ffa_composite_mem_region *reg;
431 	struct ffa_mem_region *buf;
432 	u32 offset, nr_ranges;
433 	int ret = 0;
434 
435 	BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
436 		     func_id != FFA_FN64_MEM_LEND);
437 
438 	if (addr_mbz || npages_mbz || fraglen > len ||
439 	    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
440 		ret = FFA_RET_INVALID_PARAMETERS;
441 		goto out;
442 	}
443 
444 	if (fraglen < sizeof(struct ffa_mem_region) +
445 		      sizeof(struct ffa_mem_region_attributes)) {
446 		ret = FFA_RET_INVALID_PARAMETERS;
447 		goto out;
448 	}
449 
450 	hyp_spin_lock(&host_buffers.lock);
451 	if (!host_buffers.tx) {
452 		ret = FFA_RET_INVALID_PARAMETERS;
453 		goto out_unlock;
454 	}
455 
456 	buf = hyp_buffers.tx;
457 	memcpy(buf, host_buffers.tx, fraglen);
458 
459 	ep_mem_access = (void *)buf +
460 			ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
461 	offset = ep_mem_access->composite_off;
462 	if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
463 		ret = FFA_RET_INVALID_PARAMETERS;
464 		goto out_unlock;
465 	}
466 
467 	if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
468 		ret = FFA_RET_INVALID_PARAMETERS;
469 		goto out_unlock;
470 	}
471 
472 	reg = (void *)buf + offset;
473 	nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
474 	if (nr_ranges % sizeof(reg->constituents[0])) {
475 		ret = FFA_RET_INVALID_PARAMETERS;
476 		goto out_unlock;
477 	}
478 
479 	nr_ranges /= sizeof(reg->constituents[0]);
480 	ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
481 	if (ret)
482 		goto out_unlock;
483 
484 	ffa_mem_xfer(res, func_id, len, fraglen);
485 	if (fraglen != len) {
486 		if (res->a0 != FFA_MEM_FRAG_RX)
487 			goto err_unshare;
488 
489 		if (res->a3 != fraglen)
490 			goto err_unshare;
491 	} else if (res->a0 != FFA_SUCCESS) {
492 		goto err_unshare;
493 	}
494 
495 out_unlock:
496 	hyp_spin_unlock(&host_buffers.lock);
497 out:
498 	if (ret)
499 		ffa_to_smccc_res(res, ret);
500 	return;
501 
502 err_unshare:
503 	WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
504 	goto out_unlock;
505 }
506 
507 static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
508 			       struct kvm_cpu_context *ctxt)
509 {
510 	DECLARE_REG(u32, handle_lo, ctxt, 1);
511 	DECLARE_REG(u32, handle_hi, ctxt, 2);
512 	DECLARE_REG(u32, flags, ctxt, 3);
513 	struct ffa_mem_region_attributes *ep_mem_access;
514 	struct ffa_composite_mem_region *reg;
515 	u32 offset, len, fraglen, fragoff;
516 	struct ffa_mem_region *buf;
517 	int ret = 0;
518 	u64 handle;
519 
520 	handle = PACK_HANDLE(handle_lo, handle_hi);
521 
522 	hyp_spin_lock(&host_buffers.lock);
523 
524 	buf = hyp_buffers.tx;
525 	*buf = (struct ffa_mem_region) {
526 		.sender_id	= HOST_FFA_ID,
527 		.handle		= handle,
528 	};
529 
530 	ffa_retrieve_req(res, sizeof(*buf));
531 	buf = hyp_buffers.rx;
532 	if (res->a0 != FFA_MEM_RETRIEVE_RESP)
533 		goto out_unlock;
534 
535 	len = res->a1;
536 	fraglen = res->a2;
537 
538 	ep_mem_access = (void *)buf +
539 			ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
540 	offset = ep_mem_access->composite_off;
541 	/*
542 	 * We can trust the SPMD to get this right, but let's at least
543 	 * check that we end up with something that doesn't look _completely_
544 	 * bogus.
545 	 */
546 	if (WARN_ON(offset > len ||
547 		    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
548 		ret = FFA_RET_ABORTED;
549 		goto out_unlock;
550 	}
551 
552 	if (len > ffa_desc_buf.len) {
553 		ret = FFA_RET_NO_MEMORY;
554 		goto out_unlock;
555 	}
556 
557 	buf = ffa_desc_buf.buf;
558 	memcpy(buf, hyp_buffers.rx, fraglen);
559 
560 	for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
561 		ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
562 		if (res->a0 != FFA_MEM_FRAG_TX) {
563 			ret = FFA_RET_INVALID_PARAMETERS;
564 			goto out_unlock;
565 		}
566 
567 		fraglen = res->a3;
568 		memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
569 	}
570 
571 	ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
572 	if (res->a0 != FFA_SUCCESS)
573 		goto out_unlock;
574 
575 	reg = (void *)buf + offset;
576 	/* If the SPMD was happy, then we should be too. */
577 	WARN_ON(ffa_host_unshare_ranges(reg->constituents,
578 					reg->addr_range_cnt));
579 out_unlock:
580 	hyp_spin_unlock(&host_buffers.lock);
581 
582 	if (ret)
583 		ffa_to_smccc_res(res, ret);
584 }
585 
586 /*
587  * Is a given FFA function supported, either by forwarding on directly
588  * or by handling at EL2?
589  */
590 static bool ffa_call_supported(u64 func_id)
591 {
592 	switch (func_id) {
593 	/* Unsupported memory management calls */
594 	case FFA_FN64_MEM_RETRIEVE_REQ:
595 	case FFA_MEM_RETRIEVE_RESP:
596 	case FFA_MEM_RELINQUISH:
597 	case FFA_MEM_OP_PAUSE:
598 	case FFA_MEM_OP_RESUME:
599 	case FFA_MEM_FRAG_RX:
600 	case FFA_FN64_MEM_DONATE:
601 	/* Indirect message passing via RX/TX buffers */
602 	case FFA_MSG_SEND:
603 	case FFA_MSG_POLL:
604 	case FFA_MSG_WAIT:
605 	/* 32-bit variants of 64-bit calls */
606 	case FFA_MSG_SEND_DIRECT_RESP:
607 	case FFA_RXTX_MAP:
608 	case FFA_MEM_DONATE:
609 	case FFA_MEM_RETRIEVE_REQ:
610 		return false;
611 	}
612 
613 	return true;
614 }
615 
616 static bool do_ffa_features(struct arm_smccc_res *res,
617 			    struct kvm_cpu_context *ctxt)
618 {
619 	DECLARE_REG(u32, id, ctxt, 1);
620 	u64 prop = 0;
621 	int ret = 0;
622 
623 	if (!ffa_call_supported(id)) {
624 		ret = FFA_RET_NOT_SUPPORTED;
625 		goto out_handled;
626 	}
627 
628 	switch (id) {
629 	case FFA_MEM_SHARE:
630 	case FFA_FN64_MEM_SHARE:
631 	case FFA_MEM_LEND:
632 	case FFA_FN64_MEM_LEND:
633 		ret = FFA_RET_SUCCESS;
634 		prop = 0; /* No support for dynamic buffers */
635 		goto out_handled;
636 	default:
637 		return false;
638 	}
639 
640 out_handled:
641 	ffa_to_smccc_res_prop(res, ret, prop);
642 	return true;
643 }
644 
645 static int hyp_ffa_post_init(void)
646 {
647 	size_t min_rxtx_sz;
648 	struct arm_smccc_res res;
649 
650 	arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
651 	if (res.a0 != FFA_SUCCESS)
652 		return -EOPNOTSUPP;
653 
654 	if (res.a2 != HOST_FFA_ID)
655 		return -EINVAL;
656 
657 	arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
658 			  0, 0, 0, 0, 0, 0, &res);
659 	if (res.a0 != FFA_SUCCESS)
660 		return -EOPNOTSUPP;
661 
662 	switch (res.a2) {
663 	case FFA_FEAT_RXTX_MIN_SZ_4K:
664 		min_rxtx_sz = SZ_4K;
665 		break;
666 	case FFA_FEAT_RXTX_MIN_SZ_16K:
667 		min_rxtx_sz = SZ_16K;
668 		break;
669 	case FFA_FEAT_RXTX_MIN_SZ_64K:
670 		min_rxtx_sz = SZ_64K;
671 		break;
672 	default:
673 		return -EINVAL;
674 	}
675 
676 	if (min_rxtx_sz > PAGE_SIZE)
677 		return -EOPNOTSUPP;
678 
679 	return 0;
680 }
681 
682 static void do_ffa_version(struct arm_smccc_res *res,
683 			   struct kvm_cpu_context *ctxt)
684 {
685 	DECLARE_REG(u32, ffa_req_version, ctxt, 1);
686 
687 	if (FFA_MAJOR_VERSION(ffa_req_version) != 1) {
688 		res->a0 = FFA_RET_NOT_SUPPORTED;
689 		return;
690 	}
691 
692 	hyp_spin_lock(&version_lock);
693 	if (has_version_negotiated) {
694 		res->a0 = hyp_ffa_version;
695 		goto unlock;
696 	}
697 
698 	/*
699 	 * If the client driver tries to downgrade the version, we need to ask
700 	 * first if TEE supports it.
701 	 */
702 	if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) {
703 		arm_smccc_1_1_smc(FFA_VERSION, ffa_req_version, 0,
704 				  0, 0, 0, 0, 0,
705 				  res);
706 		if (res->a0 == FFA_RET_NOT_SUPPORTED)
707 			goto unlock;
708 
709 		hyp_ffa_version = ffa_req_version;
710 	}
711 
712 	if (hyp_ffa_post_init())
713 		res->a0 = FFA_RET_NOT_SUPPORTED;
714 	else {
715 		has_version_negotiated = true;
716 		res->a0 = hyp_ffa_version;
717 	}
718 unlock:
719 	hyp_spin_unlock(&version_lock);
720 }
721 
722 static void do_ffa_part_get(struct arm_smccc_res *res,
723 			    struct kvm_cpu_context *ctxt)
724 {
725 	DECLARE_REG(u32, uuid0, ctxt, 1);
726 	DECLARE_REG(u32, uuid1, ctxt, 2);
727 	DECLARE_REG(u32, uuid2, ctxt, 3);
728 	DECLARE_REG(u32, uuid3, ctxt, 4);
729 	DECLARE_REG(u32, flags, ctxt, 5);
730 	u32 count, partition_sz, copy_sz;
731 
732 	hyp_spin_lock(&host_buffers.lock);
733 	if (!host_buffers.rx) {
734 		ffa_to_smccc_res(res, FFA_RET_BUSY);
735 		goto out_unlock;
736 	}
737 
738 	arm_smccc_1_1_smc(FFA_PARTITION_INFO_GET, uuid0, uuid1,
739 			  uuid2, uuid3, flags, 0, 0,
740 			  res);
741 
742 	if (res->a0 != FFA_SUCCESS)
743 		goto out_unlock;
744 
745 	count = res->a2;
746 	if (!count)
747 		goto out_unlock;
748 
749 	if (hyp_ffa_version > FFA_VERSION_1_0) {
750 		/* Get the number of partitions deployed in the system */
751 		if (flags & 0x1)
752 			goto out_unlock;
753 
754 		partition_sz  = res->a3;
755 	} else {
756 		/* FFA_VERSION_1_0 lacks the size in the response */
757 		partition_sz = FFA_1_0_PARTITON_INFO_SZ;
758 	}
759 
760 	copy_sz = partition_sz * count;
761 	if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
762 		ffa_to_smccc_res(res, FFA_RET_ABORTED);
763 		goto out_unlock;
764 	}
765 
766 	memcpy(host_buffers.rx, hyp_buffers.rx, copy_sz);
767 out_unlock:
768 	hyp_spin_unlock(&host_buffers.lock);
769 }
770 
771 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
772 {
773 	struct arm_smccc_res res;
774 
775 	/*
776 	 * There's no way we can tell what a non-standard SMC call might
777 	 * be up to. Ideally, we would terminate these here and return
778 	 * an error to the host, but sadly devices make use of custom
779 	 * firmware calls for things like power management, debugging,
780 	 * RNG access and crash reporting.
781 	 *
782 	 * Given that the architecture requires us to trust EL3 anyway,
783 	 * we forward unrecognised calls on under the assumption that
784 	 * the firmware doesn't expose a mechanism to access arbitrary
785 	 * non-secure memory. Short of a per-device table of SMCs, this
786 	 * is the best we can do.
787 	 */
788 	if (!is_ffa_call(func_id))
789 		return false;
790 
791 	if (!has_version_negotiated && func_id != FFA_VERSION) {
792 		ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
793 		goto out_handled;
794 	}
795 
796 	switch (func_id) {
797 	case FFA_FEATURES:
798 		if (!do_ffa_features(&res, host_ctxt))
799 			return false;
800 		goto out_handled;
801 	/* Memory management */
802 	case FFA_FN64_RXTX_MAP:
803 		do_ffa_rxtx_map(&res, host_ctxt);
804 		goto out_handled;
805 	case FFA_RXTX_UNMAP:
806 		do_ffa_rxtx_unmap(&res, host_ctxt);
807 		goto out_handled;
808 	case FFA_MEM_SHARE:
809 	case FFA_FN64_MEM_SHARE:
810 		do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
811 		goto out_handled;
812 	case FFA_MEM_RECLAIM:
813 		do_ffa_mem_reclaim(&res, host_ctxt);
814 		goto out_handled;
815 	case FFA_MEM_LEND:
816 	case FFA_FN64_MEM_LEND:
817 		do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
818 		goto out_handled;
819 	case FFA_MEM_FRAG_TX:
820 		do_ffa_mem_frag_tx(&res, host_ctxt);
821 		goto out_handled;
822 	case FFA_VERSION:
823 		do_ffa_version(&res, host_ctxt);
824 		goto out_handled;
825 	case FFA_PARTITION_INFO_GET:
826 		do_ffa_part_get(&res, host_ctxt);
827 		goto out_handled;
828 	}
829 
830 	if (ffa_call_supported(func_id))
831 		return false; /* Pass through */
832 
833 	ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
834 out_handled:
835 	ffa_set_retval(host_ctxt, &res);
836 	return true;
837 }
838 
839 int hyp_ffa_init(void *pages)
840 {
841 	struct arm_smccc_res res;
842 	void *tx, *rx;
843 
844 	if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
845 		return 0;
846 
847 	arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_1, 0, 0, 0, 0, 0, 0, &res);
848 	if (res.a0 == FFA_RET_NOT_SUPPORTED)
849 		return 0;
850 
851 	/*
852 	 * Firmware returns the maximum supported version of the FF-A
853 	 * implementation. Check that the returned version is
854 	 * backwards-compatible with the hyp according to the rules in DEN0077A
855 	 * v1.1 REL0 13.2.1.
856 	 *
857 	 * Of course, things are never simple when dealing with firmware. v1.1
858 	 * broke ABI with v1.0 on several structures, which is itself
859 	 * incompatible with the aforementioned versioning scheme. The
860 	 * expectation is that v1.x implementations that do not support the v1.0
861 	 * ABI return NOT_SUPPORTED rather than a version number, according to
862 	 * DEN0077A v1.1 REL0 18.6.4.
863 	 */
864 	if (FFA_MAJOR_VERSION(res.a0) != 1)
865 		return -EOPNOTSUPP;
866 
867 	if (FFA_MINOR_VERSION(res.a0) < FFA_MINOR_VERSION(FFA_VERSION_1_1))
868 		hyp_ffa_version = res.a0;
869 	else
870 		hyp_ffa_version = FFA_VERSION_1_1;
871 
872 	tx = pages;
873 	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
874 	rx = pages;
875 	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
876 
877 	ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
878 		.buf	= pages,
879 		.len	= PAGE_SIZE *
880 			  (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
881 	};
882 
883 	hyp_buffers = (struct kvm_ffa_buffers) {
884 		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
885 		.tx	= tx,
886 		.rx	= rx,
887 	};
888 
889 	host_buffers = (struct kvm_ffa_buffers) {
890 		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
891 	};
892 
893 	version_lock = __HYP_SPIN_LOCK_UNLOCKED;
894 	return 0;
895 }
896