1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by 4 * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware 5 * Framework for Arm A-profile", which is specified by Arm in document 6 * number DEN0077. 7 * 8 * Copyright (C) 2022 - Google LLC 9 * Author: Andrew Walbran <qwandor@google.com> 10 * 11 * This driver hooks into the SMC trapping logic for the host and intercepts 12 * all calls falling within the FF-A range. Each call is either: 13 * 14 * - Forwarded on unmodified to the SPMD at EL3 15 * - Rejected as "unsupported" 16 * - Accompanied by a host stage-2 page-table check/update and reissued 17 * 18 * Consequently, any attempts by the host to make guest memory pages 19 * accessible to the secure world using FF-A will be detected either here 20 * (in the case that the memory is already owned by the guest) or during 21 * donation to the guest (in the case that the memory was previously shared 22 * with the secure world). 23 * 24 * To allow the rolling-back of page-table updates and FF-A calls in the 25 * event of failure, operations involving the RXTX buffers are locked for 26 * the duration and are therefore serialised. 27 */ 28 29 #include <linux/arm-smccc.h> 30 #include <linux/arm_ffa.h> 31 #include <asm/kvm_pkvm.h> 32 33 #include <nvhe/ffa.h> 34 #include <nvhe/mem_protect.h> 35 #include <nvhe/memory.h> 36 #include <nvhe/trap_handler.h> 37 #include <nvhe/spinlock.h> 38 39 /* 40 * "ID value 0 must be returned at the Non-secure physical FF-A instance" 41 * We share this ID with the host. 42 */ 43 #define HOST_FFA_ID 0 44 45 /* 46 * A buffer to hold the maximum descriptor size we can see from the host, 47 * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP 48 * when resolving the handle on the reclaim path. 49 */ 50 struct kvm_ffa_descriptor_buffer { 51 void *buf; 52 size_t len; 53 }; 54 55 static struct kvm_ffa_descriptor_buffer ffa_desc_buf; 56 57 struct kvm_ffa_buffers { 58 hyp_spinlock_t lock; 59 void *tx; 60 void *rx; 61 }; 62 63 /* 64 * Note that we don't currently lock these buffers explicitly, instead 65 * relying on the locking of the host FFA buffers as we only have one 66 * client. 67 */ 68 static struct kvm_ffa_buffers hyp_buffers; 69 static struct kvm_ffa_buffers host_buffers; 70 static u32 hyp_ffa_version; 71 static bool has_version_negotiated; 72 static hyp_spinlock_t version_lock; 73 74 static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) 75 { 76 *res = (struct arm_smccc_res) { 77 .a0 = FFA_ERROR, 78 .a2 = ffa_errno, 79 }; 80 } 81 82 static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop) 83 { 84 if (ret == FFA_RET_SUCCESS) { 85 *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS, 86 .a2 = prop }; 87 } else { 88 ffa_to_smccc_error(res, ret); 89 } 90 } 91 92 static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret) 93 { 94 ffa_to_smccc_res_prop(res, ret, 0); 95 } 96 97 static void ffa_set_retval(struct kvm_cpu_context *ctxt, 98 struct arm_smccc_res *res) 99 { 100 cpu_reg(ctxt, 0) = res->a0; 101 cpu_reg(ctxt, 1) = res->a1; 102 cpu_reg(ctxt, 2) = res->a2; 103 cpu_reg(ctxt, 3) = res->a3; 104 } 105 106 static bool is_ffa_call(u64 func_id) 107 { 108 return ARM_SMCCC_IS_FAST_CALL(func_id) && 109 ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD && 110 ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM && 111 ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM; 112 } 113 114 static int ffa_map_hyp_buffers(u64 ffa_page_count) 115 { 116 struct arm_smccc_res res; 117 118 arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP, 119 hyp_virt_to_phys(hyp_buffers.tx), 120 hyp_virt_to_phys(hyp_buffers.rx), 121 ffa_page_count, 122 0, 0, 0, 0, 123 &res); 124 125 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; 126 } 127 128 static int ffa_unmap_hyp_buffers(void) 129 { 130 struct arm_smccc_res res; 131 132 arm_smccc_1_1_smc(FFA_RXTX_UNMAP, 133 HOST_FFA_ID, 134 0, 0, 0, 0, 0, 0, 135 &res); 136 137 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; 138 } 139 140 static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo, 141 u32 handle_hi, u32 fraglen, u32 endpoint_id) 142 { 143 arm_smccc_1_1_smc(FFA_MEM_FRAG_TX, 144 handle_lo, handle_hi, fraglen, endpoint_id, 145 0, 0, 0, 146 res); 147 } 148 149 static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo, 150 u32 handle_hi, u32 fragoff) 151 { 152 arm_smccc_1_1_smc(FFA_MEM_FRAG_RX, 153 handle_lo, handle_hi, fragoff, HOST_FFA_ID, 154 0, 0, 0, 155 res); 156 } 157 158 static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len, 159 u32 fraglen) 160 { 161 arm_smccc_1_1_smc(func_id, len, fraglen, 162 0, 0, 0, 0, 0, 163 res); 164 } 165 166 static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo, 167 u32 handle_hi, u32 flags) 168 { 169 arm_smccc_1_1_smc(FFA_MEM_RECLAIM, 170 handle_lo, handle_hi, flags, 171 0, 0, 0, 0, 172 res); 173 } 174 175 static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len) 176 { 177 arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ, 178 len, len, 179 0, 0, 0, 0, 0, 180 res); 181 } 182 183 static void ffa_rx_release(struct arm_smccc_res *res) 184 { 185 arm_smccc_1_1_smc(FFA_RX_RELEASE, 186 0, 0, 187 0, 0, 0, 0, 0, 188 res); 189 } 190 191 static void do_ffa_rxtx_map(struct arm_smccc_res *res, 192 struct kvm_cpu_context *ctxt) 193 { 194 DECLARE_REG(phys_addr_t, tx, ctxt, 1); 195 DECLARE_REG(phys_addr_t, rx, ctxt, 2); 196 DECLARE_REG(u32, npages, ctxt, 3); 197 int ret = 0; 198 void *rx_virt, *tx_virt; 199 200 if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) { 201 ret = FFA_RET_INVALID_PARAMETERS; 202 goto out; 203 } 204 205 if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) { 206 ret = FFA_RET_INVALID_PARAMETERS; 207 goto out; 208 } 209 210 hyp_spin_lock(&host_buffers.lock); 211 if (host_buffers.tx) { 212 ret = FFA_RET_DENIED; 213 goto out_unlock; 214 } 215 216 /* 217 * Map our hypervisor buffers into the SPMD before mapping and 218 * pinning the host buffers in our own address space. 219 */ 220 ret = ffa_map_hyp_buffers(npages); 221 if (ret) 222 goto out_unlock; 223 224 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx)); 225 if (ret) { 226 ret = FFA_RET_INVALID_PARAMETERS; 227 goto err_unmap; 228 } 229 230 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx)); 231 if (ret) { 232 ret = FFA_RET_INVALID_PARAMETERS; 233 goto err_unshare_tx; 234 } 235 236 tx_virt = hyp_phys_to_virt(tx); 237 ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1); 238 if (ret) { 239 ret = FFA_RET_INVALID_PARAMETERS; 240 goto err_unshare_rx; 241 } 242 243 rx_virt = hyp_phys_to_virt(rx); 244 ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1); 245 if (ret) { 246 ret = FFA_RET_INVALID_PARAMETERS; 247 goto err_unpin_tx; 248 } 249 250 host_buffers.tx = tx_virt; 251 host_buffers.rx = rx_virt; 252 253 out_unlock: 254 hyp_spin_unlock(&host_buffers.lock); 255 out: 256 ffa_to_smccc_res(res, ret); 257 return; 258 259 err_unpin_tx: 260 hyp_unpin_shared_mem(tx_virt, tx_virt + 1); 261 err_unshare_rx: 262 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx)); 263 err_unshare_tx: 264 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx)); 265 err_unmap: 266 ffa_unmap_hyp_buffers(); 267 goto out_unlock; 268 } 269 270 static void do_ffa_rxtx_unmap(struct arm_smccc_res *res, 271 struct kvm_cpu_context *ctxt) 272 { 273 DECLARE_REG(u32, id, ctxt, 1); 274 int ret = 0; 275 276 if (id != HOST_FFA_ID) { 277 ret = FFA_RET_INVALID_PARAMETERS; 278 goto out; 279 } 280 281 hyp_spin_lock(&host_buffers.lock); 282 if (!host_buffers.tx) { 283 ret = FFA_RET_INVALID_PARAMETERS; 284 goto out_unlock; 285 } 286 287 hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1); 288 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx))); 289 host_buffers.tx = NULL; 290 291 hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1); 292 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx))); 293 host_buffers.rx = NULL; 294 295 ffa_unmap_hyp_buffers(); 296 297 out_unlock: 298 hyp_spin_unlock(&host_buffers.lock); 299 out: 300 ffa_to_smccc_res(res, ret); 301 } 302 303 static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges, 304 u32 nranges) 305 { 306 u32 i; 307 308 for (i = 0; i < nranges; ++i) { 309 struct ffa_mem_region_addr_range *range = &ranges[i]; 310 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE; 311 u64 pfn = hyp_phys_to_pfn(range->address); 312 313 if (!PAGE_ALIGNED(sz)) 314 break; 315 316 if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE)) 317 break; 318 } 319 320 return i; 321 } 322 323 static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges, 324 u32 nranges) 325 { 326 u32 i; 327 328 for (i = 0; i < nranges; ++i) { 329 struct ffa_mem_region_addr_range *range = &ranges[i]; 330 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE; 331 u64 pfn = hyp_phys_to_pfn(range->address); 332 333 if (!PAGE_ALIGNED(sz)) 334 break; 335 336 if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE)) 337 break; 338 } 339 340 return i; 341 } 342 343 static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges, 344 u32 nranges) 345 { 346 u32 nshared = __ffa_host_share_ranges(ranges, nranges); 347 int ret = 0; 348 349 if (nshared != nranges) { 350 WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared); 351 ret = FFA_RET_DENIED; 352 } 353 354 return ret; 355 } 356 357 static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges, 358 u32 nranges) 359 { 360 u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges); 361 int ret = 0; 362 363 if (nunshared != nranges) { 364 WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared); 365 ret = FFA_RET_DENIED; 366 } 367 368 return ret; 369 } 370 371 static void do_ffa_mem_frag_tx(struct arm_smccc_res *res, 372 struct kvm_cpu_context *ctxt) 373 { 374 DECLARE_REG(u32, handle_lo, ctxt, 1); 375 DECLARE_REG(u32, handle_hi, ctxt, 2); 376 DECLARE_REG(u32, fraglen, ctxt, 3); 377 DECLARE_REG(u32, endpoint_id, ctxt, 4); 378 struct ffa_mem_region_addr_range *buf; 379 int ret = FFA_RET_INVALID_PARAMETERS; 380 u32 nr_ranges; 381 382 if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) 383 goto out; 384 385 if (fraglen % sizeof(*buf)) 386 goto out; 387 388 hyp_spin_lock(&host_buffers.lock); 389 if (!host_buffers.tx) 390 goto out_unlock; 391 392 buf = hyp_buffers.tx; 393 memcpy(buf, host_buffers.tx, fraglen); 394 nr_ranges = fraglen / sizeof(*buf); 395 396 ret = ffa_host_share_ranges(buf, nr_ranges); 397 if (ret) { 398 /* 399 * We're effectively aborting the transaction, so we need 400 * to restore the global state back to what it was prior to 401 * transmission of the first fragment. 402 */ 403 ffa_mem_reclaim(res, handle_lo, handle_hi, 0); 404 WARN_ON(res->a0 != FFA_SUCCESS); 405 goto out_unlock; 406 } 407 408 ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id); 409 if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX) 410 WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges)); 411 412 out_unlock: 413 hyp_spin_unlock(&host_buffers.lock); 414 out: 415 if (ret) 416 ffa_to_smccc_res(res, ret); 417 418 /* 419 * If for any reason this did not succeed, we're in trouble as we have 420 * now lost the content of the previous fragments and we can't rollback 421 * the host stage-2 changes. The pages previously marked as shared will 422 * remain stuck in that state forever, hence preventing the host from 423 * sharing/donating them again and may possibly lead to subsequent 424 * failures, but this will not compromise confidentiality. 425 */ 426 return; 427 } 428 429 static __always_inline void do_ffa_mem_xfer(const u64 func_id, 430 struct arm_smccc_res *res, 431 struct kvm_cpu_context *ctxt) 432 { 433 DECLARE_REG(u32, len, ctxt, 1); 434 DECLARE_REG(u32, fraglen, ctxt, 2); 435 DECLARE_REG(u64, addr_mbz, ctxt, 3); 436 DECLARE_REG(u32, npages_mbz, ctxt, 4); 437 struct ffa_mem_region_attributes *ep_mem_access; 438 struct ffa_composite_mem_region *reg; 439 struct ffa_mem_region *buf; 440 u32 offset, nr_ranges; 441 int ret = 0; 442 443 BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE && 444 func_id != FFA_FN64_MEM_LEND); 445 446 if (addr_mbz || npages_mbz || fraglen > len || 447 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { 448 ret = FFA_RET_INVALID_PARAMETERS; 449 goto out; 450 } 451 452 if (fraglen < sizeof(struct ffa_mem_region) + 453 sizeof(struct ffa_mem_region_attributes)) { 454 ret = FFA_RET_INVALID_PARAMETERS; 455 goto out; 456 } 457 458 hyp_spin_lock(&host_buffers.lock); 459 if (!host_buffers.tx) { 460 ret = FFA_RET_INVALID_PARAMETERS; 461 goto out_unlock; 462 } 463 464 buf = hyp_buffers.tx; 465 memcpy(buf, host_buffers.tx, fraglen); 466 467 ep_mem_access = (void *)buf + 468 ffa_mem_desc_offset(buf, 0, hyp_ffa_version); 469 offset = ep_mem_access->composite_off; 470 if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) { 471 ret = FFA_RET_INVALID_PARAMETERS; 472 goto out_unlock; 473 } 474 475 if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) { 476 ret = FFA_RET_INVALID_PARAMETERS; 477 goto out_unlock; 478 } 479 480 reg = (void *)buf + offset; 481 nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents; 482 if (nr_ranges % sizeof(reg->constituents[0])) { 483 ret = FFA_RET_INVALID_PARAMETERS; 484 goto out_unlock; 485 } 486 487 nr_ranges /= sizeof(reg->constituents[0]); 488 ret = ffa_host_share_ranges(reg->constituents, nr_ranges); 489 if (ret) 490 goto out_unlock; 491 492 ffa_mem_xfer(res, func_id, len, fraglen); 493 if (fraglen != len) { 494 if (res->a0 != FFA_MEM_FRAG_RX) 495 goto err_unshare; 496 497 if (res->a3 != fraglen) 498 goto err_unshare; 499 } else if (res->a0 != FFA_SUCCESS) { 500 goto err_unshare; 501 } 502 503 out_unlock: 504 hyp_spin_unlock(&host_buffers.lock); 505 out: 506 if (ret) 507 ffa_to_smccc_res(res, ret); 508 return; 509 510 err_unshare: 511 WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges)); 512 goto out_unlock; 513 } 514 515 static void do_ffa_mem_reclaim(struct arm_smccc_res *res, 516 struct kvm_cpu_context *ctxt) 517 { 518 DECLARE_REG(u32, handle_lo, ctxt, 1); 519 DECLARE_REG(u32, handle_hi, ctxt, 2); 520 DECLARE_REG(u32, flags, ctxt, 3); 521 struct ffa_mem_region_attributes *ep_mem_access; 522 struct ffa_composite_mem_region *reg; 523 u32 offset, len, fraglen, fragoff; 524 struct ffa_mem_region *buf; 525 int ret = 0; 526 u64 handle; 527 528 handle = PACK_HANDLE(handle_lo, handle_hi); 529 530 hyp_spin_lock(&host_buffers.lock); 531 532 buf = hyp_buffers.tx; 533 *buf = (struct ffa_mem_region) { 534 .sender_id = HOST_FFA_ID, 535 .handle = handle, 536 }; 537 538 ffa_retrieve_req(res, sizeof(*buf)); 539 buf = hyp_buffers.rx; 540 if (res->a0 != FFA_MEM_RETRIEVE_RESP) 541 goto out_unlock; 542 543 len = res->a1; 544 fraglen = res->a2; 545 546 ep_mem_access = (void *)buf + 547 ffa_mem_desc_offset(buf, 0, hyp_ffa_version); 548 offset = ep_mem_access->composite_off; 549 /* 550 * We can trust the SPMD to get this right, but let's at least 551 * check that we end up with something that doesn't look _completely_ 552 * bogus. 553 */ 554 if (WARN_ON(offset > len || 555 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) { 556 ret = FFA_RET_ABORTED; 557 ffa_rx_release(res); 558 goto out_unlock; 559 } 560 561 if (len > ffa_desc_buf.len) { 562 ret = FFA_RET_NO_MEMORY; 563 ffa_rx_release(res); 564 goto out_unlock; 565 } 566 567 buf = ffa_desc_buf.buf; 568 memcpy(buf, hyp_buffers.rx, fraglen); 569 ffa_rx_release(res); 570 571 for (fragoff = fraglen; fragoff < len; fragoff += fraglen) { 572 ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff); 573 if (res->a0 != FFA_MEM_FRAG_TX) { 574 ret = FFA_RET_INVALID_PARAMETERS; 575 goto out_unlock; 576 } 577 578 fraglen = res->a3; 579 memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen); 580 ffa_rx_release(res); 581 } 582 583 ffa_mem_reclaim(res, handle_lo, handle_hi, flags); 584 if (res->a0 != FFA_SUCCESS) 585 goto out_unlock; 586 587 reg = (void *)buf + offset; 588 /* If the SPMD was happy, then we should be too. */ 589 WARN_ON(ffa_host_unshare_ranges(reg->constituents, 590 reg->addr_range_cnt)); 591 out_unlock: 592 hyp_spin_unlock(&host_buffers.lock); 593 594 if (ret) 595 ffa_to_smccc_res(res, ret); 596 } 597 598 /* 599 * Is a given FFA function supported, either by forwarding on directly 600 * or by handling at EL2? 601 */ 602 static bool ffa_call_supported(u64 func_id) 603 { 604 switch (func_id) { 605 /* Unsupported memory management calls */ 606 case FFA_FN64_MEM_RETRIEVE_REQ: 607 case FFA_MEM_RETRIEVE_RESP: 608 case FFA_MEM_RELINQUISH: 609 case FFA_MEM_OP_PAUSE: 610 case FFA_MEM_OP_RESUME: 611 case FFA_MEM_FRAG_RX: 612 case FFA_FN64_MEM_DONATE: 613 /* Indirect message passing via RX/TX buffers */ 614 case FFA_MSG_SEND: 615 case FFA_MSG_POLL: 616 case FFA_MSG_WAIT: 617 /* 32-bit variants of 64-bit calls */ 618 case FFA_MSG_SEND_DIRECT_RESP: 619 case FFA_RXTX_MAP: 620 case FFA_MEM_DONATE: 621 case FFA_MEM_RETRIEVE_REQ: 622 return false; 623 } 624 625 return true; 626 } 627 628 static bool do_ffa_features(struct arm_smccc_res *res, 629 struct kvm_cpu_context *ctxt) 630 { 631 DECLARE_REG(u32, id, ctxt, 1); 632 u64 prop = 0; 633 int ret = 0; 634 635 if (!ffa_call_supported(id)) { 636 ret = FFA_RET_NOT_SUPPORTED; 637 goto out_handled; 638 } 639 640 switch (id) { 641 case FFA_MEM_SHARE: 642 case FFA_FN64_MEM_SHARE: 643 case FFA_MEM_LEND: 644 case FFA_FN64_MEM_LEND: 645 ret = FFA_RET_SUCCESS; 646 prop = 0; /* No support for dynamic buffers */ 647 goto out_handled; 648 default: 649 return false; 650 } 651 652 out_handled: 653 ffa_to_smccc_res_prop(res, ret, prop); 654 return true; 655 } 656 657 static int hyp_ffa_post_init(void) 658 { 659 size_t min_rxtx_sz; 660 struct arm_smccc_res res; 661 662 arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res); 663 if (res.a0 != FFA_SUCCESS) 664 return -EOPNOTSUPP; 665 666 if (res.a2 != HOST_FFA_ID) 667 return -EINVAL; 668 669 arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP, 670 0, 0, 0, 0, 0, 0, &res); 671 if (res.a0 != FFA_SUCCESS) 672 return -EOPNOTSUPP; 673 674 switch (res.a2) { 675 case FFA_FEAT_RXTX_MIN_SZ_4K: 676 min_rxtx_sz = SZ_4K; 677 break; 678 case FFA_FEAT_RXTX_MIN_SZ_16K: 679 min_rxtx_sz = SZ_16K; 680 break; 681 case FFA_FEAT_RXTX_MIN_SZ_64K: 682 min_rxtx_sz = SZ_64K; 683 break; 684 default: 685 return -EINVAL; 686 } 687 688 if (min_rxtx_sz > PAGE_SIZE) 689 return -EOPNOTSUPP; 690 691 return 0; 692 } 693 694 static void do_ffa_version(struct arm_smccc_res *res, 695 struct kvm_cpu_context *ctxt) 696 { 697 DECLARE_REG(u32, ffa_req_version, ctxt, 1); 698 699 if (FFA_MAJOR_VERSION(ffa_req_version) != 1) { 700 res->a0 = FFA_RET_NOT_SUPPORTED; 701 return; 702 } 703 704 hyp_spin_lock(&version_lock); 705 if (has_version_negotiated) { 706 res->a0 = hyp_ffa_version; 707 goto unlock; 708 } 709 710 /* 711 * If the client driver tries to downgrade the version, we need to ask 712 * first if TEE supports it. 713 */ 714 if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) { 715 arm_smccc_1_1_smc(FFA_VERSION, ffa_req_version, 0, 716 0, 0, 0, 0, 0, 717 res); 718 if (res->a0 == FFA_RET_NOT_SUPPORTED) 719 goto unlock; 720 721 hyp_ffa_version = ffa_req_version; 722 } 723 724 if (hyp_ffa_post_init()) 725 res->a0 = FFA_RET_NOT_SUPPORTED; 726 else { 727 has_version_negotiated = true; 728 res->a0 = hyp_ffa_version; 729 } 730 unlock: 731 hyp_spin_unlock(&version_lock); 732 } 733 734 static void do_ffa_part_get(struct arm_smccc_res *res, 735 struct kvm_cpu_context *ctxt) 736 { 737 DECLARE_REG(u32, uuid0, ctxt, 1); 738 DECLARE_REG(u32, uuid1, ctxt, 2); 739 DECLARE_REG(u32, uuid2, ctxt, 3); 740 DECLARE_REG(u32, uuid3, ctxt, 4); 741 DECLARE_REG(u32, flags, ctxt, 5); 742 u32 count, partition_sz, copy_sz; 743 744 hyp_spin_lock(&host_buffers.lock); 745 if (!host_buffers.rx) { 746 ffa_to_smccc_res(res, FFA_RET_BUSY); 747 goto out_unlock; 748 } 749 750 arm_smccc_1_1_smc(FFA_PARTITION_INFO_GET, uuid0, uuid1, 751 uuid2, uuid3, flags, 0, 0, 752 res); 753 754 if (res->a0 != FFA_SUCCESS) 755 goto out_unlock; 756 757 count = res->a2; 758 if (!count) 759 goto out_unlock; 760 761 if (hyp_ffa_version > FFA_VERSION_1_0) { 762 /* Get the number of partitions deployed in the system */ 763 if (flags & 0x1) 764 goto out_unlock; 765 766 partition_sz = res->a3; 767 } else { 768 /* FFA_VERSION_1_0 lacks the size in the response */ 769 partition_sz = FFA_1_0_PARTITON_INFO_SZ; 770 } 771 772 copy_sz = partition_sz * count; 773 if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { 774 ffa_to_smccc_res(res, FFA_RET_ABORTED); 775 goto out_unlock; 776 } 777 778 memcpy(host_buffers.rx, hyp_buffers.rx, copy_sz); 779 out_unlock: 780 hyp_spin_unlock(&host_buffers.lock); 781 } 782 783 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) 784 { 785 struct arm_smccc_res res; 786 787 /* 788 * There's no way we can tell what a non-standard SMC call might 789 * be up to. Ideally, we would terminate these here and return 790 * an error to the host, but sadly devices make use of custom 791 * firmware calls for things like power management, debugging, 792 * RNG access and crash reporting. 793 * 794 * Given that the architecture requires us to trust EL3 anyway, 795 * we forward unrecognised calls on under the assumption that 796 * the firmware doesn't expose a mechanism to access arbitrary 797 * non-secure memory. Short of a per-device table of SMCs, this 798 * is the best we can do. 799 */ 800 if (!is_ffa_call(func_id)) 801 return false; 802 803 if (!has_version_negotiated && func_id != FFA_VERSION) { 804 ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS); 805 goto out_handled; 806 } 807 808 switch (func_id) { 809 case FFA_FEATURES: 810 if (!do_ffa_features(&res, host_ctxt)) 811 return false; 812 goto out_handled; 813 /* Memory management */ 814 case FFA_FN64_RXTX_MAP: 815 do_ffa_rxtx_map(&res, host_ctxt); 816 goto out_handled; 817 case FFA_RXTX_UNMAP: 818 do_ffa_rxtx_unmap(&res, host_ctxt); 819 goto out_handled; 820 case FFA_MEM_SHARE: 821 case FFA_FN64_MEM_SHARE: 822 do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt); 823 goto out_handled; 824 case FFA_MEM_RECLAIM: 825 do_ffa_mem_reclaim(&res, host_ctxt); 826 goto out_handled; 827 case FFA_MEM_LEND: 828 case FFA_FN64_MEM_LEND: 829 do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt); 830 goto out_handled; 831 case FFA_MEM_FRAG_TX: 832 do_ffa_mem_frag_tx(&res, host_ctxt); 833 goto out_handled; 834 case FFA_VERSION: 835 do_ffa_version(&res, host_ctxt); 836 goto out_handled; 837 case FFA_PARTITION_INFO_GET: 838 do_ffa_part_get(&res, host_ctxt); 839 goto out_handled; 840 } 841 842 if (ffa_call_supported(func_id)) 843 return false; /* Pass through */ 844 845 ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED); 846 out_handled: 847 ffa_set_retval(host_ctxt, &res); 848 return true; 849 } 850 851 int hyp_ffa_init(void *pages) 852 { 853 struct arm_smccc_res res; 854 void *tx, *rx; 855 856 if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2) 857 return 0; 858 859 arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_1, 0, 0, 0, 0, 0, 0, &res); 860 if (res.a0 == FFA_RET_NOT_SUPPORTED) 861 return 0; 862 863 /* 864 * Firmware returns the maximum supported version of the FF-A 865 * implementation. Check that the returned version is 866 * backwards-compatible with the hyp according to the rules in DEN0077A 867 * v1.1 REL0 13.2.1. 868 * 869 * Of course, things are never simple when dealing with firmware. v1.1 870 * broke ABI with v1.0 on several structures, which is itself 871 * incompatible with the aforementioned versioning scheme. The 872 * expectation is that v1.x implementations that do not support the v1.0 873 * ABI return NOT_SUPPORTED rather than a version number, according to 874 * DEN0077A v1.1 REL0 18.6.4. 875 */ 876 if (FFA_MAJOR_VERSION(res.a0) != 1) 877 return -EOPNOTSUPP; 878 879 if (FFA_MINOR_VERSION(res.a0) < FFA_MINOR_VERSION(FFA_VERSION_1_1)) 880 hyp_ffa_version = res.a0; 881 else 882 hyp_ffa_version = FFA_VERSION_1_1; 883 884 tx = pages; 885 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE; 886 rx = pages; 887 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE; 888 889 ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) { 890 .buf = pages, 891 .len = PAGE_SIZE * 892 (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)), 893 }; 894 895 hyp_buffers = (struct kvm_ffa_buffers) { 896 .lock = __HYP_SPIN_LOCK_UNLOCKED, 897 .tx = tx, 898 .rx = rx, 899 }; 900 901 host_buffers = (struct kvm_ffa_buffers) { 902 .lock = __HYP_SPIN_LOCK_UNLOCKED, 903 }; 904 905 version_lock = __HYP_SPIN_LOCK_UNLOCKED; 906 return 0; 907 } 908