1048be5feSWill Deacon // SPDX-License-Identifier: GPL-2.0-only
2048be5feSWill Deacon /*
3048be5feSWill Deacon * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4048be5feSWill Deacon * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5048be5feSWill Deacon * Framework for Arm A-profile", which is specified by Arm in document
6048be5feSWill Deacon * number DEN0077.
7048be5feSWill Deacon *
8048be5feSWill Deacon * Copyright (C) 2022 - Google LLC
9048be5feSWill Deacon * Author: Andrew Walbran <qwandor@google.com>
10048be5feSWill Deacon *
11048be5feSWill Deacon * This driver hooks into the SMC trapping logic for the host and intercepts
12048be5feSWill Deacon * all calls falling within the FF-A range. Each call is either:
13048be5feSWill Deacon *
14048be5feSWill Deacon * - Forwarded on unmodified to the SPMD at EL3
15048be5feSWill Deacon * - Rejected as "unsupported"
16048be5feSWill Deacon * - Accompanied by a host stage-2 page-table check/update and reissued
17048be5feSWill Deacon *
18048be5feSWill Deacon * Consequently, any attempts by the host to make guest memory pages
19048be5feSWill Deacon * accessible to the secure world using FF-A will be detected either here
20048be5feSWill Deacon * (in the case that the memory is already owned by the guest) or during
21048be5feSWill Deacon * donation to the guest (in the case that the memory was previously shared
22048be5feSWill Deacon * with the secure world).
23048be5feSWill Deacon *
24048be5feSWill Deacon * To allow the rolling-back of page-table updates and FF-A calls in the
25048be5feSWill Deacon * event of failure, operations involving the RXTX buffers are locked for
26048be5feSWill Deacon * the duration and are therefore serialised.
27048be5feSWill Deacon */
28048be5feSWill Deacon
29048be5feSWill Deacon #include <linux/arm-smccc.h>
30048be5feSWill Deacon #include <linux/arm_ffa.h>
31bc3888a0SWill Deacon #include <asm/kvm_pkvm.h>
32bc3888a0SWill Deacon
33048be5feSWill Deacon #include <nvhe/ffa.h>
349d0c6a9aSWill Deacon #include <nvhe/mem_protect.h>
359d0c6a9aSWill Deacon #include <nvhe/memory.h>
36048be5feSWill Deacon #include <nvhe/trap_handler.h>
37bc3888a0SWill Deacon #include <nvhe/spinlock.h>
38048be5feSWill Deacon
3912bdce4fSWill Deacon /*
4012bdce4fSWill Deacon * "ID value 0 must be returned at the Non-secure physical FF-A instance"
4112bdce4fSWill Deacon * We share this ID with the host.
4212bdce4fSWill Deacon */
4312bdce4fSWill Deacon #define HOST_FFA_ID 0
4412bdce4fSWill Deacon
450a9f15fdSQuentin Perret /*
460a9f15fdSQuentin Perret * A buffer to hold the maximum descriptor size we can see from the host,
470a9f15fdSQuentin Perret * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
480a9f15fdSQuentin Perret * when resolving the handle on the reclaim path.
490a9f15fdSQuentin Perret */
500a9f15fdSQuentin Perret struct kvm_ffa_descriptor_buffer {
510a9f15fdSQuentin Perret void *buf;
520a9f15fdSQuentin Perret size_t len;
530a9f15fdSQuentin Perret };
540a9f15fdSQuentin Perret
550a9f15fdSQuentin Perret static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
560a9f15fdSQuentin Perret
57bc3888a0SWill Deacon struct kvm_ffa_buffers {
58bc3888a0SWill Deacon hyp_spinlock_t lock;
59bc3888a0SWill Deacon void *tx;
60bc3888a0SWill Deacon void *rx;
61bc3888a0SWill Deacon };
62bc3888a0SWill Deacon
63bc3888a0SWill Deacon /*
64bc3888a0SWill Deacon * Note that we don't currently lock these buffers explicitly, instead
65bc3888a0SWill Deacon * relying on the locking of the host FFA buffers as we only have one
66bc3888a0SWill Deacon * client.
67bc3888a0SWill Deacon */
68bc3888a0SWill Deacon static struct kvm_ffa_buffers hyp_buffers;
699d0c6a9aSWill Deacon static struct kvm_ffa_buffers host_buffers;
70c9c01262SSebastian Ene static u32 hyp_ffa_version;
71c9c01262SSebastian Ene static bool has_version_negotiated;
72c9c01262SSebastian Ene static hyp_spinlock_t version_lock;
73bc3888a0SWill Deacon
ffa_to_smccc_error(struct arm_smccc_res * res,u64 ffa_errno)74048be5feSWill Deacon static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
75048be5feSWill Deacon {
76048be5feSWill Deacon *res = (struct arm_smccc_res) {
77048be5feSWill Deacon .a0 = FFA_ERROR,
78048be5feSWill Deacon .a2 = ffa_errno,
79048be5feSWill Deacon };
80048be5feSWill Deacon }
81048be5feSWill Deacon
ffa_to_smccc_res_prop(struct arm_smccc_res * res,int ret,u64 prop)8220936cd1SFuad Tabba static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
839d0c6a9aSWill Deacon {
849d0c6a9aSWill Deacon if (ret == FFA_RET_SUCCESS) {
8520936cd1SFuad Tabba *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
8620936cd1SFuad Tabba .a2 = prop };
879d0c6a9aSWill Deacon } else {
889d0c6a9aSWill Deacon ffa_to_smccc_error(res, ret);
899d0c6a9aSWill Deacon }
909d0c6a9aSWill Deacon }
919d0c6a9aSWill Deacon
ffa_to_smccc_res(struct arm_smccc_res * res,int ret)9220936cd1SFuad Tabba static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
9320936cd1SFuad Tabba {
9420936cd1SFuad Tabba ffa_to_smccc_res_prop(res, ret, 0);
9520936cd1SFuad Tabba }
9620936cd1SFuad Tabba
ffa_set_retval(struct kvm_cpu_context * ctxt,struct arm_smccc_res * res)97048be5feSWill Deacon static void ffa_set_retval(struct kvm_cpu_context *ctxt,
98048be5feSWill Deacon struct arm_smccc_res *res)
99048be5feSWill Deacon {
100048be5feSWill Deacon cpu_reg(ctxt, 0) = res->a0;
101048be5feSWill Deacon cpu_reg(ctxt, 1) = res->a1;
102048be5feSWill Deacon cpu_reg(ctxt, 2) = res->a2;
103048be5feSWill Deacon cpu_reg(ctxt, 3) = res->a3;
104048be5feSWill Deacon }
105048be5feSWill Deacon
is_ffa_call(u64 func_id)106048be5feSWill Deacon static bool is_ffa_call(u64 func_id)
107048be5feSWill Deacon {
108048be5feSWill Deacon return ARM_SMCCC_IS_FAST_CALL(func_id) &&
109048be5feSWill Deacon ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
110048be5feSWill Deacon ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
111048be5feSWill Deacon ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
112048be5feSWill Deacon }
113048be5feSWill Deacon
ffa_map_hyp_buffers(u64 ffa_page_count)1149d0c6a9aSWill Deacon static int ffa_map_hyp_buffers(u64 ffa_page_count)
1159d0c6a9aSWill Deacon {
1169d0c6a9aSWill Deacon struct arm_smccc_res res;
1179d0c6a9aSWill Deacon
1189d0c6a9aSWill Deacon arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
1199d0c6a9aSWill Deacon hyp_virt_to_phys(hyp_buffers.tx),
1209d0c6a9aSWill Deacon hyp_virt_to_phys(hyp_buffers.rx),
1219d0c6a9aSWill Deacon ffa_page_count,
1229d0c6a9aSWill Deacon 0, 0, 0, 0,
1239d0c6a9aSWill Deacon &res);
1249d0c6a9aSWill Deacon
1259d0c6a9aSWill Deacon return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
1269d0c6a9aSWill Deacon }
1279d0c6a9aSWill Deacon
ffa_unmap_hyp_buffers(void)1289d0c6a9aSWill Deacon static int ffa_unmap_hyp_buffers(void)
1299d0c6a9aSWill Deacon {
1309d0c6a9aSWill Deacon struct arm_smccc_res res;
1319d0c6a9aSWill Deacon
1329d0c6a9aSWill Deacon arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
1339d0c6a9aSWill Deacon HOST_FFA_ID,
1349d0c6a9aSWill Deacon 0, 0, 0, 0, 0, 0,
1359d0c6a9aSWill Deacon &res);
1369d0c6a9aSWill Deacon
1379d0c6a9aSWill Deacon return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
1389d0c6a9aSWill Deacon }
1399d0c6a9aSWill Deacon
ffa_mem_frag_tx(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 fraglen,u32 endpoint_id)1400a9f15fdSQuentin Perret static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
1410a9f15fdSQuentin Perret u32 handle_hi, u32 fraglen, u32 endpoint_id)
1420a9f15fdSQuentin Perret {
1430a9f15fdSQuentin Perret arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
1440a9f15fdSQuentin Perret handle_lo, handle_hi, fraglen, endpoint_id,
1450a9f15fdSQuentin Perret 0, 0, 0,
1460a9f15fdSQuentin Perret res);
1470a9f15fdSQuentin Perret }
1480a9f15fdSQuentin Perret
ffa_mem_frag_rx(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 fragoff)1490a9f15fdSQuentin Perret static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
1500a9f15fdSQuentin Perret u32 handle_hi, u32 fragoff)
1510a9f15fdSQuentin Perret {
1520a9f15fdSQuentin Perret arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
1530a9f15fdSQuentin Perret handle_lo, handle_hi, fragoff, HOST_FFA_ID,
1540a9f15fdSQuentin Perret 0, 0, 0,
1550a9f15fdSQuentin Perret res);
1560a9f15fdSQuentin Perret }
1570a9f15fdSQuentin Perret
ffa_mem_xfer(struct arm_smccc_res * res,u64 func_id,u32 len,u32 fraglen)158634d90cfSWill Deacon static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
159634d90cfSWill Deacon u32 fraglen)
16043609000SWill Deacon {
161634d90cfSWill Deacon arm_smccc_1_1_smc(func_id, len, fraglen,
16243609000SWill Deacon 0, 0, 0, 0, 0,
16343609000SWill Deacon res);
16443609000SWill Deacon }
16543609000SWill Deacon
ffa_mem_reclaim(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 flags)1660e3bcb49SWill Deacon static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
1670e3bcb49SWill Deacon u32 handle_hi, u32 flags)
1680e3bcb49SWill Deacon {
1690e3bcb49SWill Deacon arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
1700e3bcb49SWill Deacon handle_lo, handle_hi, flags,
1710e3bcb49SWill Deacon 0, 0, 0, 0,
1720e3bcb49SWill Deacon res);
1730e3bcb49SWill Deacon }
1740e3bcb49SWill Deacon
ffa_retrieve_req(struct arm_smccc_res * res,u32 len)1750e3bcb49SWill Deacon static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
1760e3bcb49SWill Deacon {
1770e3bcb49SWill Deacon arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
1780e3bcb49SWill Deacon len, len,
1790e3bcb49SWill Deacon 0, 0, 0, 0, 0,
1800e3bcb49SWill Deacon res);
1810e3bcb49SWill Deacon }
1820e3bcb49SWill Deacon
ffa_rx_release(struct arm_smccc_res * res)183d66e50beSVincent Donnefort static void ffa_rx_release(struct arm_smccc_res *res)
184d66e50beSVincent Donnefort {
185d66e50beSVincent Donnefort arm_smccc_1_1_smc(FFA_RX_RELEASE,
186d66e50beSVincent Donnefort 0, 0,
187d66e50beSVincent Donnefort 0, 0, 0, 0, 0,
188d66e50beSVincent Donnefort res);
189d66e50beSVincent Donnefort }
190d66e50beSVincent Donnefort
do_ffa_rxtx_map(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)1919d0c6a9aSWill Deacon static void do_ffa_rxtx_map(struct arm_smccc_res *res,
1929d0c6a9aSWill Deacon struct kvm_cpu_context *ctxt)
1939d0c6a9aSWill Deacon {
1949d0c6a9aSWill Deacon DECLARE_REG(phys_addr_t, tx, ctxt, 1);
1959d0c6a9aSWill Deacon DECLARE_REG(phys_addr_t, rx, ctxt, 2);
1969d0c6a9aSWill Deacon DECLARE_REG(u32, npages, ctxt, 3);
1979d0c6a9aSWill Deacon int ret = 0;
1989d0c6a9aSWill Deacon void *rx_virt, *tx_virt;
1999d0c6a9aSWill Deacon
2009d0c6a9aSWill Deacon if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
2019d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2029d0c6a9aSWill Deacon goto out;
2039d0c6a9aSWill Deacon }
2049d0c6a9aSWill Deacon
2059d0c6a9aSWill Deacon if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
2069d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2079d0c6a9aSWill Deacon goto out;
2089d0c6a9aSWill Deacon }
2099d0c6a9aSWill Deacon
2109d0c6a9aSWill Deacon hyp_spin_lock(&host_buffers.lock);
2119d0c6a9aSWill Deacon if (host_buffers.tx) {
2129d0c6a9aSWill Deacon ret = FFA_RET_DENIED;
2139d0c6a9aSWill Deacon goto out_unlock;
2149d0c6a9aSWill Deacon }
2159d0c6a9aSWill Deacon
2169d0c6a9aSWill Deacon /*
2179d0c6a9aSWill Deacon * Map our hypervisor buffers into the SPMD before mapping and
2189d0c6a9aSWill Deacon * pinning the host buffers in our own address space.
2199d0c6a9aSWill Deacon */
2209d0c6a9aSWill Deacon ret = ffa_map_hyp_buffers(npages);
2219d0c6a9aSWill Deacon if (ret)
2229d0c6a9aSWill Deacon goto out_unlock;
2239d0c6a9aSWill Deacon
2249d0c6a9aSWill Deacon ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
2259d0c6a9aSWill Deacon if (ret) {
2269d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2279d0c6a9aSWill Deacon goto err_unmap;
2289d0c6a9aSWill Deacon }
2299d0c6a9aSWill Deacon
2309d0c6a9aSWill Deacon ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
2319d0c6a9aSWill Deacon if (ret) {
2329d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2339d0c6a9aSWill Deacon goto err_unshare_tx;
2349d0c6a9aSWill Deacon }
2359d0c6a9aSWill Deacon
2369d0c6a9aSWill Deacon tx_virt = hyp_phys_to_virt(tx);
2379d0c6a9aSWill Deacon ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
2389d0c6a9aSWill Deacon if (ret) {
2399d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2409d0c6a9aSWill Deacon goto err_unshare_rx;
2419d0c6a9aSWill Deacon }
2429d0c6a9aSWill Deacon
2439d0c6a9aSWill Deacon rx_virt = hyp_phys_to_virt(rx);
2449d0c6a9aSWill Deacon ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
2459d0c6a9aSWill Deacon if (ret) {
2469d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2479d0c6a9aSWill Deacon goto err_unpin_tx;
2489d0c6a9aSWill Deacon }
2499d0c6a9aSWill Deacon
2509d0c6a9aSWill Deacon host_buffers.tx = tx_virt;
2519d0c6a9aSWill Deacon host_buffers.rx = rx_virt;
2529d0c6a9aSWill Deacon
2539d0c6a9aSWill Deacon out_unlock:
2549d0c6a9aSWill Deacon hyp_spin_unlock(&host_buffers.lock);
2559d0c6a9aSWill Deacon out:
2569d0c6a9aSWill Deacon ffa_to_smccc_res(res, ret);
2579d0c6a9aSWill Deacon return;
2589d0c6a9aSWill Deacon
2599d0c6a9aSWill Deacon err_unpin_tx:
2609d0c6a9aSWill Deacon hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
2619d0c6a9aSWill Deacon err_unshare_rx:
2629d0c6a9aSWill Deacon __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
2639d0c6a9aSWill Deacon err_unshare_tx:
2649d0c6a9aSWill Deacon __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
2659d0c6a9aSWill Deacon err_unmap:
2669d0c6a9aSWill Deacon ffa_unmap_hyp_buffers();
2679d0c6a9aSWill Deacon goto out_unlock;
2689d0c6a9aSWill Deacon }
2699d0c6a9aSWill Deacon
do_ffa_rxtx_unmap(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)2709d0c6a9aSWill Deacon static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
2719d0c6a9aSWill Deacon struct kvm_cpu_context *ctxt)
2729d0c6a9aSWill Deacon {
2739d0c6a9aSWill Deacon DECLARE_REG(u32, id, ctxt, 1);
2749d0c6a9aSWill Deacon int ret = 0;
2759d0c6a9aSWill Deacon
2769d0c6a9aSWill Deacon if (id != HOST_FFA_ID) {
2779d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2789d0c6a9aSWill Deacon goto out;
2799d0c6a9aSWill Deacon }
2809d0c6a9aSWill Deacon
2819d0c6a9aSWill Deacon hyp_spin_lock(&host_buffers.lock);
2829d0c6a9aSWill Deacon if (!host_buffers.tx) {
2839d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2849d0c6a9aSWill Deacon goto out_unlock;
2859d0c6a9aSWill Deacon }
2869d0c6a9aSWill Deacon
2879d0c6a9aSWill Deacon hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
2889d0c6a9aSWill Deacon WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
2899d0c6a9aSWill Deacon host_buffers.tx = NULL;
2909d0c6a9aSWill Deacon
2919d0c6a9aSWill Deacon hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
2929d0c6a9aSWill Deacon WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
2939d0c6a9aSWill Deacon host_buffers.rx = NULL;
2949d0c6a9aSWill Deacon
2959d0c6a9aSWill Deacon ffa_unmap_hyp_buffers();
2969d0c6a9aSWill Deacon
2979d0c6a9aSWill Deacon out_unlock:
2989d0c6a9aSWill Deacon hyp_spin_unlock(&host_buffers.lock);
2999d0c6a9aSWill Deacon out:
3009d0c6a9aSWill Deacon ffa_to_smccc_res(res, ret);
3019d0c6a9aSWill Deacon }
3029d0c6a9aSWill Deacon
__ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)30343609000SWill Deacon static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
30443609000SWill Deacon u32 nranges)
30543609000SWill Deacon {
30643609000SWill Deacon u32 i;
30743609000SWill Deacon
30843609000SWill Deacon for (i = 0; i < nranges; ++i) {
30943609000SWill Deacon struct ffa_mem_region_addr_range *range = &ranges[i];
31043609000SWill Deacon u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
31143609000SWill Deacon u64 pfn = hyp_phys_to_pfn(range->address);
31243609000SWill Deacon
31343609000SWill Deacon if (!PAGE_ALIGNED(sz))
31443609000SWill Deacon break;
31543609000SWill Deacon
31643609000SWill Deacon if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
31743609000SWill Deacon break;
31843609000SWill Deacon }
31943609000SWill Deacon
32043609000SWill Deacon return i;
32143609000SWill Deacon }
32243609000SWill Deacon
__ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)32343609000SWill Deacon static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
32443609000SWill Deacon u32 nranges)
32543609000SWill Deacon {
32643609000SWill Deacon u32 i;
32743609000SWill Deacon
32843609000SWill Deacon for (i = 0; i < nranges; ++i) {
32943609000SWill Deacon struct ffa_mem_region_addr_range *range = &ranges[i];
33043609000SWill Deacon u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
33143609000SWill Deacon u64 pfn = hyp_phys_to_pfn(range->address);
33243609000SWill Deacon
33343609000SWill Deacon if (!PAGE_ALIGNED(sz))
33443609000SWill Deacon break;
33543609000SWill Deacon
33643609000SWill Deacon if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
33743609000SWill Deacon break;
33843609000SWill Deacon }
33943609000SWill Deacon
34043609000SWill Deacon return i;
34143609000SWill Deacon }
34243609000SWill Deacon
ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)34343609000SWill Deacon static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
34443609000SWill Deacon u32 nranges)
34543609000SWill Deacon {
34643609000SWill Deacon u32 nshared = __ffa_host_share_ranges(ranges, nranges);
34743609000SWill Deacon int ret = 0;
34843609000SWill Deacon
34943609000SWill Deacon if (nshared != nranges) {
35043609000SWill Deacon WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
35143609000SWill Deacon ret = FFA_RET_DENIED;
35243609000SWill Deacon }
35343609000SWill Deacon
35443609000SWill Deacon return ret;
35543609000SWill Deacon }
35643609000SWill Deacon
ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)35743609000SWill Deacon static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
35843609000SWill Deacon u32 nranges)
35943609000SWill Deacon {
36043609000SWill Deacon u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
36143609000SWill Deacon int ret = 0;
36243609000SWill Deacon
36343609000SWill Deacon if (nunshared != nranges) {
36443609000SWill Deacon WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
36543609000SWill Deacon ret = FFA_RET_DENIED;
36643609000SWill Deacon }
36743609000SWill Deacon
36843609000SWill Deacon return ret;
36943609000SWill Deacon }
37043609000SWill Deacon
do_ffa_mem_frag_tx(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)3710a9f15fdSQuentin Perret static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
3720a9f15fdSQuentin Perret struct kvm_cpu_context *ctxt)
3730a9f15fdSQuentin Perret {
3740a9f15fdSQuentin Perret DECLARE_REG(u32, handle_lo, ctxt, 1);
3750a9f15fdSQuentin Perret DECLARE_REG(u32, handle_hi, ctxt, 2);
3760a9f15fdSQuentin Perret DECLARE_REG(u32, fraglen, ctxt, 3);
3770a9f15fdSQuentin Perret DECLARE_REG(u32, endpoint_id, ctxt, 4);
3780a9f15fdSQuentin Perret struct ffa_mem_region_addr_range *buf;
3790a9f15fdSQuentin Perret int ret = FFA_RET_INVALID_PARAMETERS;
3800a9f15fdSQuentin Perret u32 nr_ranges;
3810a9f15fdSQuentin Perret
3820a9f15fdSQuentin Perret if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
3830a9f15fdSQuentin Perret goto out;
3840a9f15fdSQuentin Perret
3850a9f15fdSQuentin Perret if (fraglen % sizeof(*buf))
3860a9f15fdSQuentin Perret goto out;
3870a9f15fdSQuentin Perret
3880a9f15fdSQuentin Perret hyp_spin_lock(&host_buffers.lock);
3890a9f15fdSQuentin Perret if (!host_buffers.tx)
3900a9f15fdSQuentin Perret goto out_unlock;
3910a9f15fdSQuentin Perret
3920a9f15fdSQuentin Perret buf = hyp_buffers.tx;
3930a9f15fdSQuentin Perret memcpy(buf, host_buffers.tx, fraglen);
3940a9f15fdSQuentin Perret nr_ranges = fraglen / sizeof(*buf);
3950a9f15fdSQuentin Perret
3960a9f15fdSQuentin Perret ret = ffa_host_share_ranges(buf, nr_ranges);
3970a9f15fdSQuentin Perret if (ret) {
3980a9f15fdSQuentin Perret /*
3990a9f15fdSQuentin Perret * We're effectively aborting the transaction, so we need
4000a9f15fdSQuentin Perret * to restore the global state back to what it was prior to
4010a9f15fdSQuentin Perret * transmission of the first fragment.
4020a9f15fdSQuentin Perret */
4030a9f15fdSQuentin Perret ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
4040a9f15fdSQuentin Perret WARN_ON(res->a0 != FFA_SUCCESS);
4050a9f15fdSQuentin Perret goto out_unlock;
4060a9f15fdSQuentin Perret }
4070a9f15fdSQuentin Perret
4080a9f15fdSQuentin Perret ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
4090a9f15fdSQuentin Perret if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
4100a9f15fdSQuentin Perret WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
4110a9f15fdSQuentin Perret
4120a9f15fdSQuentin Perret out_unlock:
4130a9f15fdSQuentin Perret hyp_spin_unlock(&host_buffers.lock);
4140a9f15fdSQuentin Perret out:
4150a9f15fdSQuentin Perret if (ret)
4160a9f15fdSQuentin Perret ffa_to_smccc_res(res, ret);
4170a9f15fdSQuentin Perret
4180a9f15fdSQuentin Perret /*
4190a9f15fdSQuentin Perret * If for any reason this did not succeed, we're in trouble as we have
4200a9f15fdSQuentin Perret * now lost the content of the previous fragments and we can't rollback
4210a9f15fdSQuentin Perret * the host stage-2 changes. The pages previously marked as shared will
4220a9f15fdSQuentin Perret * remain stuck in that state forever, hence preventing the host from
4230a9f15fdSQuentin Perret * sharing/donating them again and may possibly lead to subsequent
4240a9f15fdSQuentin Perret * failures, but this will not compromise confidentiality.
4250a9f15fdSQuentin Perret */
4260a9f15fdSQuentin Perret return;
4270a9f15fdSQuentin Perret }
4280a9f15fdSQuentin Perret
__do_ffa_mem_xfer(const u64 func_id,struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)429*f26a525bSSnehal Koukuntla static void __do_ffa_mem_xfer(const u64 func_id,
430634d90cfSWill Deacon struct arm_smccc_res *res,
43143609000SWill Deacon struct kvm_cpu_context *ctxt)
43243609000SWill Deacon {
43343609000SWill Deacon DECLARE_REG(u32, len, ctxt, 1);
43443609000SWill Deacon DECLARE_REG(u32, fraglen, ctxt, 2);
43543609000SWill Deacon DECLARE_REG(u64, addr_mbz, ctxt, 3);
43643609000SWill Deacon DECLARE_REG(u32, npages_mbz, ctxt, 4);
43776cf932cSSudeep Holla struct ffa_mem_region_attributes *ep_mem_access;
43843609000SWill Deacon struct ffa_composite_mem_region *reg;
43943609000SWill Deacon struct ffa_mem_region *buf;
4400a9f15fdSQuentin Perret u32 offset, nr_ranges;
44143609000SWill Deacon int ret = 0;
44243609000SWill Deacon
44343609000SWill Deacon if (addr_mbz || npages_mbz || fraglen > len ||
44443609000SWill Deacon fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
44543609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
44643609000SWill Deacon goto out;
44743609000SWill Deacon }
44843609000SWill Deacon
44943609000SWill Deacon if (fraglen < sizeof(struct ffa_mem_region) +
45043609000SWill Deacon sizeof(struct ffa_mem_region_attributes)) {
45143609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
45243609000SWill Deacon goto out;
45343609000SWill Deacon }
45443609000SWill Deacon
45543609000SWill Deacon hyp_spin_lock(&host_buffers.lock);
45643609000SWill Deacon if (!host_buffers.tx) {
45743609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
45843609000SWill Deacon goto out_unlock;
45943609000SWill Deacon }
46043609000SWill Deacon
461*f26a525bSSnehal Koukuntla if (len > ffa_desc_buf.len) {
462*f26a525bSSnehal Koukuntla ret = FFA_RET_NO_MEMORY;
463*f26a525bSSnehal Koukuntla goto out_unlock;
464*f26a525bSSnehal Koukuntla }
465*f26a525bSSnehal Koukuntla
46643609000SWill Deacon buf = hyp_buffers.tx;
46743609000SWill Deacon memcpy(buf, host_buffers.tx, fraglen);
46843609000SWill Deacon
46976cf932cSSudeep Holla ep_mem_access = (void *)buf +
47042fb33ddSSebastian Ene ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
47176cf932cSSudeep Holla offset = ep_mem_access->composite_off;
47243609000SWill Deacon if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
47343609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
47443609000SWill Deacon goto out_unlock;
47543609000SWill Deacon }
47643609000SWill Deacon
47743609000SWill Deacon if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
47843609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
47943609000SWill Deacon goto out_unlock;
48043609000SWill Deacon }
48143609000SWill Deacon
48243609000SWill Deacon reg = (void *)buf + offset;
4830a9f15fdSQuentin Perret nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
4840a9f15fdSQuentin Perret if (nr_ranges % sizeof(reg->constituents[0])) {
48543609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
48643609000SWill Deacon goto out_unlock;
48743609000SWill Deacon }
48843609000SWill Deacon
4890a9f15fdSQuentin Perret nr_ranges /= sizeof(reg->constituents[0]);
4900a9f15fdSQuentin Perret ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
49143609000SWill Deacon if (ret)
49243609000SWill Deacon goto out_unlock;
49343609000SWill Deacon
494634d90cfSWill Deacon ffa_mem_xfer(res, func_id, len, fraglen);
4950a9f15fdSQuentin Perret if (fraglen != len) {
4960a9f15fdSQuentin Perret if (res->a0 != FFA_MEM_FRAG_RX)
4970a9f15fdSQuentin Perret goto err_unshare;
4980a9f15fdSQuentin Perret
4990a9f15fdSQuentin Perret if (res->a3 != fraglen)
5000a9f15fdSQuentin Perret goto err_unshare;
5010a9f15fdSQuentin Perret } else if (res->a0 != FFA_SUCCESS) {
5020a9f15fdSQuentin Perret goto err_unshare;
50343609000SWill Deacon }
50443609000SWill Deacon
50543609000SWill Deacon out_unlock:
50643609000SWill Deacon hyp_spin_unlock(&host_buffers.lock);
50743609000SWill Deacon out:
50843609000SWill Deacon if (ret)
50943609000SWill Deacon ffa_to_smccc_res(res, ret);
51043609000SWill Deacon return;
5110a9f15fdSQuentin Perret
5120a9f15fdSQuentin Perret err_unshare:
5130a9f15fdSQuentin Perret WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
5140a9f15fdSQuentin Perret goto out_unlock;
51543609000SWill Deacon }
51643609000SWill Deacon
517*f26a525bSSnehal Koukuntla #define do_ffa_mem_xfer(fid, res, ctxt) \
518*f26a525bSSnehal Koukuntla do { \
519*f26a525bSSnehal Koukuntla BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \
520*f26a525bSSnehal Koukuntla (fid) != FFA_FN64_MEM_LEND); \
521*f26a525bSSnehal Koukuntla __do_ffa_mem_xfer((fid), (res), (ctxt)); \
522*f26a525bSSnehal Koukuntla } while (0);
523*f26a525bSSnehal Koukuntla
do_ffa_mem_reclaim(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)5240e3bcb49SWill Deacon static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
5250e3bcb49SWill Deacon struct kvm_cpu_context *ctxt)
5260e3bcb49SWill Deacon {
5270e3bcb49SWill Deacon DECLARE_REG(u32, handle_lo, ctxt, 1);
5280e3bcb49SWill Deacon DECLARE_REG(u32, handle_hi, ctxt, 2);
5290e3bcb49SWill Deacon DECLARE_REG(u32, flags, ctxt, 3);
53076cf932cSSudeep Holla struct ffa_mem_region_attributes *ep_mem_access;
5310e3bcb49SWill Deacon struct ffa_composite_mem_region *reg;
5320a9f15fdSQuentin Perret u32 offset, len, fraglen, fragoff;
5330e3bcb49SWill Deacon struct ffa_mem_region *buf;
5340e3bcb49SWill Deacon int ret = 0;
5350e3bcb49SWill Deacon u64 handle;
5360e3bcb49SWill Deacon
5370e3bcb49SWill Deacon handle = PACK_HANDLE(handle_lo, handle_hi);
5380e3bcb49SWill Deacon
5390e3bcb49SWill Deacon hyp_spin_lock(&host_buffers.lock);
5400e3bcb49SWill Deacon
5410e3bcb49SWill Deacon buf = hyp_buffers.tx;
5420e3bcb49SWill Deacon *buf = (struct ffa_mem_region) {
5430e3bcb49SWill Deacon .sender_id = HOST_FFA_ID,
5440e3bcb49SWill Deacon .handle = handle,
5450e3bcb49SWill Deacon };
5460e3bcb49SWill Deacon
5470e3bcb49SWill Deacon ffa_retrieve_req(res, sizeof(*buf));
5480e3bcb49SWill Deacon buf = hyp_buffers.rx;
5490e3bcb49SWill Deacon if (res->a0 != FFA_MEM_RETRIEVE_RESP)
5500e3bcb49SWill Deacon goto out_unlock;
5510e3bcb49SWill Deacon
5520a9f15fdSQuentin Perret len = res->a1;
5530a9f15fdSQuentin Perret fraglen = res->a2;
5540e3bcb49SWill Deacon
55576cf932cSSudeep Holla ep_mem_access = (void *)buf +
55642fb33ddSSebastian Ene ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
55776cf932cSSudeep Holla offset = ep_mem_access->composite_off;
5580e3bcb49SWill Deacon /*
5590e3bcb49SWill Deacon * We can trust the SPMD to get this right, but let's at least
5600e3bcb49SWill Deacon * check that we end up with something that doesn't look _completely_
5610e3bcb49SWill Deacon * bogus.
5620e3bcb49SWill Deacon */
5630a9f15fdSQuentin Perret if (WARN_ON(offset > len ||
5640a9f15fdSQuentin Perret fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
5650e3bcb49SWill Deacon ret = FFA_RET_ABORTED;
566d66e50beSVincent Donnefort ffa_rx_release(res);
5670e3bcb49SWill Deacon goto out_unlock;
5680e3bcb49SWill Deacon }
5690e3bcb49SWill Deacon
5700a9f15fdSQuentin Perret if (len > ffa_desc_buf.len) {
5710a9f15fdSQuentin Perret ret = FFA_RET_NO_MEMORY;
572d66e50beSVincent Donnefort ffa_rx_release(res);
5730a9f15fdSQuentin Perret goto out_unlock;
5740a9f15fdSQuentin Perret }
5750a9f15fdSQuentin Perret
5760a9f15fdSQuentin Perret buf = ffa_desc_buf.buf;
5770a9f15fdSQuentin Perret memcpy(buf, hyp_buffers.rx, fraglen);
578d66e50beSVincent Donnefort ffa_rx_release(res);
5790a9f15fdSQuentin Perret
5800a9f15fdSQuentin Perret for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
5810a9f15fdSQuentin Perret ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
5820a9f15fdSQuentin Perret if (res->a0 != FFA_MEM_FRAG_TX) {
5830a9f15fdSQuentin Perret ret = FFA_RET_INVALID_PARAMETERS;
5840a9f15fdSQuentin Perret goto out_unlock;
5850a9f15fdSQuentin Perret }
5860a9f15fdSQuentin Perret
5870a9f15fdSQuentin Perret fraglen = res->a3;
5880a9f15fdSQuentin Perret memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
589d66e50beSVincent Donnefort ffa_rx_release(res);
5900a9f15fdSQuentin Perret }
5910a9f15fdSQuentin Perret
5920e3bcb49SWill Deacon ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
5930e3bcb49SWill Deacon if (res->a0 != FFA_SUCCESS)
5940e3bcb49SWill Deacon goto out_unlock;
5950e3bcb49SWill Deacon
5960a9f15fdSQuentin Perret reg = (void *)buf + offset;
5970e3bcb49SWill Deacon /* If the SPMD was happy, then we should be too. */
5980e3bcb49SWill Deacon WARN_ON(ffa_host_unshare_ranges(reg->constituents,
5990e3bcb49SWill Deacon reg->addr_range_cnt));
6000e3bcb49SWill Deacon out_unlock:
6010e3bcb49SWill Deacon hyp_spin_unlock(&host_buffers.lock);
6020e3bcb49SWill Deacon
6030e3bcb49SWill Deacon if (ret)
6040e3bcb49SWill Deacon ffa_to_smccc_res(res, ret);
6050e3bcb49SWill Deacon }
6060e3bcb49SWill Deacon
607048be5feSWill Deacon /*
608048be5feSWill Deacon * Is a given FFA function supported, either by forwarding on directly
609048be5feSWill Deacon * or by handling at EL2?
610048be5feSWill Deacon */
ffa_call_supported(u64 func_id)611048be5feSWill Deacon static bool ffa_call_supported(u64 func_id)
612048be5feSWill Deacon {
613048be5feSWill Deacon switch (func_id) {
614048be5feSWill Deacon /* Unsupported memory management calls */
615048be5feSWill Deacon case FFA_FN64_MEM_RETRIEVE_REQ:
616048be5feSWill Deacon case FFA_MEM_RETRIEVE_RESP:
617048be5feSWill Deacon case FFA_MEM_RELINQUISH:
618048be5feSWill Deacon case FFA_MEM_OP_PAUSE:
619048be5feSWill Deacon case FFA_MEM_OP_RESUME:
620048be5feSWill Deacon case FFA_MEM_FRAG_RX:
621048be5feSWill Deacon case FFA_FN64_MEM_DONATE:
622048be5feSWill Deacon /* Indirect message passing via RX/TX buffers */
623048be5feSWill Deacon case FFA_MSG_SEND:
624048be5feSWill Deacon case FFA_MSG_POLL:
625048be5feSWill Deacon case FFA_MSG_WAIT:
626048be5feSWill Deacon /* 32-bit variants of 64-bit calls */
627048be5feSWill Deacon case FFA_MSG_SEND_DIRECT_RESP:
628048be5feSWill Deacon case FFA_RXTX_MAP:
629048be5feSWill Deacon case FFA_MEM_DONATE:
630048be5feSWill Deacon case FFA_MEM_RETRIEVE_REQ:
631048be5feSWill Deacon return false;
632048be5feSWill Deacon }
633048be5feSWill Deacon
634048be5feSWill Deacon return true;
635048be5feSWill Deacon }
636048be5feSWill Deacon
do_ffa_features(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)63720936cd1SFuad Tabba static bool do_ffa_features(struct arm_smccc_res *res,
63820936cd1SFuad Tabba struct kvm_cpu_context *ctxt)
63920936cd1SFuad Tabba {
64020936cd1SFuad Tabba DECLARE_REG(u32, id, ctxt, 1);
64120936cd1SFuad Tabba u64 prop = 0;
64220936cd1SFuad Tabba int ret = 0;
64320936cd1SFuad Tabba
64420936cd1SFuad Tabba if (!ffa_call_supported(id)) {
64520936cd1SFuad Tabba ret = FFA_RET_NOT_SUPPORTED;
64620936cd1SFuad Tabba goto out_handled;
64720936cd1SFuad Tabba }
64820936cd1SFuad Tabba
64920936cd1SFuad Tabba switch (id) {
65020936cd1SFuad Tabba case FFA_MEM_SHARE:
65120936cd1SFuad Tabba case FFA_FN64_MEM_SHARE:
65220936cd1SFuad Tabba case FFA_MEM_LEND:
65320936cd1SFuad Tabba case FFA_FN64_MEM_LEND:
65420936cd1SFuad Tabba ret = FFA_RET_SUCCESS;
65520936cd1SFuad Tabba prop = 0; /* No support for dynamic buffers */
65620936cd1SFuad Tabba goto out_handled;
65720936cd1SFuad Tabba default:
65820936cd1SFuad Tabba return false;
65920936cd1SFuad Tabba }
66020936cd1SFuad Tabba
66120936cd1SFuad Tabba out_handled:
66220936cd1SFuad Tabba ffa_to_smccc_res_prop(res, ret, prop);
66320936cd1SFuad Tabba return true;
66420936cd1SFuad Tabba }
66520936cd1SFuad Tabba
hyp_ffa_post_init(void)666c9c01262SSebastian Ene static int hyp_ffa_post_init(void)
667048be5feSWill Deacon {
6689d0c6a9aSWill Deacon size_t min_rxtx_sz;
669c9c01262SSebastian Ene struct arm_smccc_res res;
67012bdce4fSWill Deacon
67112bdce4fSWill Deacon arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
67212bdce4fSWill Deacon if (res.a0 != FFA_SUCCESS)
67312bdce4fSWill Deacon return -EOPNOTSUPP;
67412bdce4fSWill Deacon
67512bdce4fSWill Deacon if (res.a2 != HOST_FFA_ID)
67612bdce4fSWill Deacon return -EINVAL;
67712bdce4fSWill Deacon
6789d0c6a9aSWill Deacon arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
6799d0c6a9aSWill Deacon 0, 0, 0, 0, 0, 0, &res);
6809d0c6a9aSWill Deacon if (res.a0 != FFA_SUCCESS)
6819d0c6a9aSWill Deacon return -EOPNOTSUPP;
6829d0c6a9aSWill Deacon
6839d0c6a9aSWill Deacon switch (res.a2) {
6849d0c6a9aSWill Deacon case FFA_FEAT_RXTX_MIN_SZ_4K:
6859d0c6a9aSWill Deacon min_rxtx_sz = SZ_4K;
6869d0c6a9aSWill Deacon break;
6879d0c6a9aSWill Deacon case FFA_FEAT_RXTX_MIN_SZ_16K:
6889d0c6a9aSWill Deacon min_rxtx_sz = SZ_16K;
6899d0c6a9aSWill Deacon break;
6909d0c6a9aSWill Deacon case FFA_FEAT_RXTX_MIN_SZ_64K:
6919d0c6a9aSWill Deacon min_rxtx_sz = SZ_64K;
6929d0c6a9aSWill Deacon break;
6939d0c6a9aSWill Deacon default:
6949d0c6a9aSWill Deacon return -EINVAL;
6959d0c6a9aSWill Deacon }
6969d0c6a9aSWill Deacon
6979d0c6a9aSWill Deacon if (min_rxtx_sz > PAGE_SIZE)
6989d0c6a9aSWill Deacon return -EOPNOTSUPP;
6999d0c6a9aSWill Deacon
700c9c01262SSebastian Ene return 0;
701c9c01262SSebastian Ene }
702c9c01262SSebastian Ene
do_ffa_version(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)703c9c01262SSebastian Ene static void do_ffa_version(struct arm_smccc_res *res,
704c9c01262SSebastian Ene struct kvm_cpu_context *ctxt)
705c9c01262SSebastian Ene {
706c9c01262SSebastian Ene DECLARE_REG(u32, ffa_req_version, ctxt, 1);
707c9c01262SSebastian Ene
708c9c01262SSebastian Ene if (FFA_MAJOR_VERSION(ffa_req_version) != 1) {
709c9c01262SSebastian Ene res->a0 = FFA_RET_NOT_SUPPORTED;
710c9c01262SSebastian Ene return;
711c9c01262SSebastian Ene }
712c9c01262SSebastian Ene
713c9c01262SSebastian Ene hyp_spin_lock(&version_lock);
714c9c01262SSebastian Ene if (has_version_negotiated) {
715c9c01262SSebastian Ene res->a0 = hyp_ffa_version;
716c9c01262SSebastian Ene goto unlock;
717c9c01262SSebastian Ene }
718c9c01262SSebastian Ene
719c9c01262SSebastian Ene /*
720c9c01262SSebastian Ene * If the client driver tries to downgrade the version, we need to ask
721c9c01262SSebastian Ene * first if TEE supports it.
722c9c01262SSebastian Ene */
723c9c01262SSebastian Ene if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) {
724c9c01262SSebastian Ene arm_smccc_1_1_smc(FFA_VERSION, ffa_req_version, 0,
725c9c01262SSebastian Ene 0, 0, 0, 0, 0,
726c9c01262SSebastian Ene res);
727c9c01262SSebastian Ene if (res->a0 == FFA_RET_NOT_SUPPORTED)
728c9c01262SSebastian Ene goto unlock;
729c9c01262SSebastian Ene
730c9c01262SSebastian Ene hyp_ffa_version = ffa_req_version;
731c9c01262SSebastian Ene }
732c9c01262SSebastian Ene
733c9c01262SSebastian Ene if (hyp_ffa_post_init())
734c9c01262SSebastian Ene res->a0 = FFA_RET_NOT_SUPPORTED;
735c9c01262SSebastian Ene else {
736c9c01262SSebastian Ene has_version_negotiated = true;
737c9c01262SSebastian Ene res->a0 = hyp_ffa_version;
738c9c01262SSebastian Ene }
739c9c01262SSebastian Ene unlock:
740c9c01262SSebastian Ene hyp_spin_unlock(&version_lock);
741c9c01262SSebastian Ene }
742c9c01262SSebastian Ene
do_ffa_part_get(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)74389437638SSebastian Ene static void do_ffa_part_get(struct arm_smccc_res *res,
74489437638SSebastian Ene struct kvm_cpu_context *ctxt)
74589437638SSebastian Ene {
74689437638SSebastian Ene DECLARE_REG(u32, uuid0, ctxt, 1);
74789437638SSebastian Ene DECLARE_REG(u32, uuid1, ctxt, 2);
74889437638SSebastian Ene DECLARE_REG(u32, uuid2, ctxt, 3);
74989437638SSebastian Ene DECLARE_REG(u32, uuid3, ctxt, 4);
75089437638SSebastian Ene DECLARE_REG(u32, flags, ctxt, 5);
75189437638SSebastian Ene u32 count, partition_sz, copy_sz;
75289437638SSebastian Ene
75389437638SSebastian Ene hyp_spin_lock(&host_buffers.lock);
75489437638SSebastian Ene if (!host_buffers.rx) {
75589437638SSebastian Ene ffa_to_smccc_res(res, FFA_RET_BUSY);
75689437638SSebastian Ene goto out_unlock;
75789437638SSebastian Ene }
75889437638SSebastian Ene
75989437638SSebastian Ene arm_smccc_1_1_smc(FFA_PARTITION_INFO_GET, uuid0, uuid1,
76089437638SSebastian Ene uuid2, uuid3, flags, 0, 0,
76189437638SSebastian Ene res);
76289437638SSebastian Ene
76389437638SSebastian Ene if (res->a0 != FFA_SUCCESS)
76489437638SSebastian Ene goto out_unlock;
76589437638SSebastian Ene
76689437638SSebastian Ene count = res->a2;
76789437638SSebastian Ene if (!count)
76889437638SSebastian Ene goto out_unlock;
76989437638SSebastian Ene
77089437638SSebastian Ene if (hyp_ffa_version > FFA_VERSION_1_0) {
77189437638SSebastian Ene /* Get the number of partitions deployed in the system */
77289437638SSebastian Ene if (flags & 0x1)
77389437638SSebastian Ene goto out_unlock;
77489437638SSebastian Ene
77589437638SSebastian Ene partition_sz = res->a3;
77689437638SSebastian Ene } else {
77789437638SSebastian Ene /* FFA_VERSION_1_0 lacks the size in the response */
77889437638SSebastian Ene partition_sz = FFA_1_0_PARTITON_INFO_SZ;
77989437638SSebastian Ene }
78089437638SSebastian Ene
78189437638SSebastian Ene copy_sz = partition_sz * count;
78289437638SSebastian Ene if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
78389437638SSebastian Ene ffa_to_smccc_res(res, FFA_RET_ABORTED);
78489437638SSebastian Ene goto out_unlock;
78589437638SSebastian Ene }
78689437638SSebastian Ene
78789437638SSebastian Ene memcpy(host_buffers.rx, hyp_buffers.rx, copy_sz);
78889437638SSebastian Ene out_unlock:
78989437638SSebastian Ene hyp_spin_unlock(&host_buffers.lock);
79089437638SSebastian Ene }
79189437638SSebastian Ene
kvm_host_ffa_handler(struct kvm_cpu_context * host_ctxt,u32 func_id)792c9c01262SSebastian Ene bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
793c9c01262SSebastian Ene {
794c9c01262SSebastian Ene struct arm_smccc_res res;
795c9c01262SSebastian Ene
796c9c01262SSebastian Ene /*
797c9c01262SSebastian Ene * There's no way we can tell what a non-standard SMC call might
798c9c01262SSebastian Ene * be up to. Ideally, we would terminate these here and return
799c9c01262SSebastian Ene * an error to the host, but sadly devices make use of custom
800c9c01262SSebastian Ene * firmware calls for things like power management, debugging,
801c9c01262SSebastian Ene * RNG access and crash reporting.
802c9c01262SSebastian Ene *
803c9c01262SSebastian Ene * Given that the architecture requires us to trust EL3 anyway,
804c9c01262SSebastian Ene * we forward unrecognised calls on under the assumption that
805c9c01262SSebastian Ene * the firmware doesn't expose a mechanism to access arbitrary
806c9c01262SSebastian Ene * non-secure memory. Short of a per-device table of SMCs, this
807c9c01262SSebastian Ene * is the best we can do.
808c9c01262SSebastian Ene */
809c9c01262SSebastian Ene if (!is_ffa_call(func_id))
810c9c01262SSebastian Ene return false;
811c9c01262SSebastian Ene
812c9c01262SSebastian Ene if (!has_version_negotiated && func_id != FFA_VERSION) {
813c9c01262SSebastian Ene ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
814c9c01262SSebastian Ene goto out_handled;
815c9c01262SSebastian Ene }
816c9c01262SSebastian Ene
817c9c01262SSebastian Ene switch (func_id) {
818c9c01262SSebastian Ene case FFA_FEATURES:
819c9c01262SSebastian Ene if (!do_ffa_features(&res, host_ctxt))
820c9c01262SSebastian Ene return false;
821c9c01262SSebastian Ene goto out_handled;
822c9c01262SSebastian Ene /* Memory management */
823c9c01262SSebastian Ene case FFA_FN64_RXTX_MAP:
824c9c01262SSebastian Ene do_ffa_rxtx_map(&res, host_ctxt);
825c9c01262SSebastian Ene goto out_handled;
826c9c01262SSebastian Ene case FFA_RXTX_UNMAP:
827c9c01262SSebastian Ene do_ffa_rxtx_unmap(&res, host_ctxt);
828c9c01262SSebastian Ene goto out_handled;
829c9c01262SSebastian Ene case FFA_MEM_SHARE:
830c9c01262SSebastian Ene case FFA_FN64_MEM_SHARE:
831c9c01262SSebastian Ene do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
832c9c01262SSebastian Ene goto out_handled;
833c9c01262SSebastian Ene case FFA_MEM_RECLAIM:
834c9c01262SSebastian Ene do_ffa_mem_reclaim(&res, host_ctxt);
835c9c01262SSebastian Ene goto out_handled;
836c9c01262SSebastian Ene case FFA_MEM_LEND:
837c9c01262SSebastian Ene case FFA_FN64_MEM_LEND:
838c9c01262SSebastian Ene do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
839c9c01262SSebastian Ene goto out_handled;
840c9c01262SSebastian Ene case FFA_MEM_FRAG_TX:
841c9c01262SSebastian Ene do_ffa_mem_frag_tx(&res, host_ctxt);
842c9c01262SSebastian Ene goto out_handled;
843c9c01262SSebastian Ene case FFA_VERSION:
844c9c01262SSebastian Ene do_ffa_version(&res, host_ctxt);
845c9c01262SSebastian Ene goto out_handled;
84689437638SSebastian Ene case FFA_PARTITION_INFO_GET:
84789437638SSebastian Ene do_ffa_part_get(&res, host_ctxt);
84889437638SSebastian Ene goto out_handled;
849c9c01262SSebastian Ene }
850c9c01262SSebastian Ene
851c9c01262SSebastian Ene if (ffa_call_supported(func_id))
852c9c01262SSebastian Ene return false; /* Pass through */
853c9c01262SSebastian Ene
854c9c01262SSebastian Ene ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
855c9c01262SSebastian Ene out_handled:
856c9c01262SSebastian Ene ffa_set_retval(host_ctxt, &res);
857c9c01262SSebastian Ene return true;
858c9c01262SSebastian Ene }
859c9c01262SSebastian Ene
hyp_ffa_init(void * pages)860c9c01262SSebastian Ene int hyp_ffa_init(void *pages)
861c9c01262SSebastian Ene {
862c9c01262SSebastian Ene struct arm_smccc_res res;
863c9c01262SSebastian Ene void *tx, *rx;
864c9c01262SSebastian Ene
865c9c01262SSebastian Ene if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
866c9c01262SSebastian Ene return 0;
867c9c01262SSebastian Ene
86842fb33ddSSebastian Ene arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_1, 0, 0, 0, 0, 0, 0, &res);
869c9c01262SSebastian Ene if (res.a0 == FFA_RET_NOT_SUPPORTED)
870c9c01262SSebastian Ene return 0;
871c9c01262SSebastian Ene
872c9c01262SSebastian Ene /*
873c9c01262SSebastian Ene * Firmware returns the maximum supported version of the FF-A
874c9c01262SSebastian Ene * implementation. Check that the returned version is
875c9c01262SSebastian Ene * backwards-compatible with the hyp according to the rules in DEN0077A
876c9c01262SSebastian Ene * v1.1 REL0 13.2.1.
877c9c01262SSebastian Ene *
878c9c01262SSebastian Ene * Of course, things are never simple when dealing with firmware. v1.1
879c9c01262SSebastian Ene * broke ABI with v1.0 on several structures, which is itself
880c9c01262SSebastian Ene * incompatible with the aforementioned versioning scheme. The
881c9c01262SSebastian Ene * expectation is that v1.x implementations that do not support the v1.0
882c9c01262SSebastian Ene * ABI return NOT_SUPPORTED rather than a version number, according to
883c9c01262SSebastian Ene * DEN0077A v1.1 REL0 18.6.4.
884c9c01262SSebastian Ene */
885c9c01262SSebastian Ene if (FFA_MAJOR_VERSION(res.a0) != 1)
886c9c01262SSebastian Ene return -EOPNOTSUPP;
887c9c01262SSebastian Ene
88842fb33ddSSebastian Ene if (FFA_MINOR_VERSION(res.a0) < FFA_MINOR_VERSION(FFA_VERSION_1_1))
88942fb33ddSSebastian Ene hyp_ffa_version = res.a0;
89042fb33ddSSebastian Ene else
89142fb33ddSSebastian Ene hyp_ffa_version = FFA_VERSION_1_1;
89242fb33ddSSebastian Ene
8930a9f15fdSQuentin Perret tx = pages;
8940a9f15fdSQuentin Perret pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
8950a9f15fdSQuentin Perret rx = pages;
8960a9f15fdSQuentin Perret pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
8970a9f15fdSQuentin Perret
8980a9f15fdSQuentin Perret ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
8990a9f15fdSQuentin Perret .buf = pages,
9000a9f15fdSQuentin Perret .len = PAGE_SIZE *
9010a9f15fdSQuentin Perret (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
9020a9f15fdSQuentin Perret };
9030a9f15fdSQuentin Perret
904bc3888a0SWill Deacon hyp_buffers = (struct kvm_ffa_buffers) {
905bc3888a0SWill Deacon .lock = __HYP_SPIN_LOCK_UNLOCKED,
9060a9f15fdSQuentin Perret .tx = tx,
9070a9f15fdSQuentin Perret .rx = rx,
908bc3888a0SWill Deacon };
909bc3888a0SWill Deacon
9109d0c6a9aSWill Deacon host_buffers = (struct kvm_ffa_buffers) {
9119d0c6a9aSWill Deacon .lock = __HYP_SPIN_LOCK_UNLOCKED,
9129d0c6a9aSWill Deacon };
9139d0c6a9aSWill Deacon
914c9c01262SSebastian Ene version_lock = __HYP_SPIN_LOCK_UNLOCKED;
91512bdce4fSWill Deacon return 0;
91612bdce4fSWill Deacon }
917