1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Arm Firmware Framework for ARMv8-A(FFA) interface driver 4 * 5 * The Arm FFA specification[1] describes a software architecture to 6 * leverages the virtualization extension to isolate software images 7 * provided by an ecosystem of vendors from each other and describes 8 * interfaces that standardize communication between the various software 9 * images including communication between images in the Secure world and 10 * Normal world. Any Hypervisor could use the FFA interfaces to enable 11 * communication between VMs it manages. 12 * 13 * The Hypervisor a.k.a Partition managers in FFA terminology can assign 14 * system resources(Memory regions, Devices, CPU cycles) to the partitions 15 * and manage isolation amongst them. 16 * 17 * [1] https://developer.arm.com/docs/den0077/latest 18 * 19 * Copyright (C) 2021 ARM Ltd. 20 */ 21 22 #define DRIVER_NAME "ARM FF-A" 23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 24 25 #include <linux/acpi.h> 26 #include <linux/arm_ffa.h> 27 #include <linux/bitfield.h> 28 #include <linux/cpuhotplug.h> 29 #include <linux/device.h> 30 #include <linux/hashtable.h> 31 #include <linux/interrupt.h> 32 #include <linux/io.h> 33 #include <linux/kernel.h> 34 #include <linux/module.h> 35 #include <linux/mm.h> 36 #include <linux/mutex.h> 37 #include <linux/of_irq.h> 38 #include <linux/scatterlist.h> 39 #include <linux/slab.h> 40 #include <linux/smp.h> 41 #include <linux/uuid.h> 42 #include <linux/xarray.h> 43 44 #include "common.h" 45 46 #define FFA_DRIVER_VERSION FFA_VERSION_1_1 47 #define FFA_MIN_VERSION FFA_VERSION_1_0 48 49 #define SENDER_ID_MASK GENMASK(31, 16) 50 #define RECEIVER_ID_MASK GENMASK(15, 0) 51 #define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x)))) 52 #define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x)))) 53 #define PACK_TARGET_INFO(s, r) \ 54 (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) 55 56 /* 57 * Keeping RX TX buffer size as 4K for now 58 * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config 59 */ 60 #define RXTX_BUFFER_SIZE SZ_4K 61 62 #define FFA_MAX_NOTIFICATIONS 64 63 64 static ffa_fn *invoke_ffa_fn; 65 66 static const int ffa_linux_errmap[] = { 67 /* better than switch case as long as return value is continuous */ 68 0, /* FFA_RET_SUCCESS */ 69 -EOPNOTSUPP, /* FFA_RET_NOT_SUPPORTED */ 70 -EINVAL, /* FFA_RET_INVALID_PARAMETERS */ 71 -ENOMEM, /* FFA_RET_NO_MEMORY */ 72 -EBUSY, /* FFA_RET_BUSY */ 73 -EINTR, /* FFA_RET_INTERRUPTED */ 74 -EACCES, /* FFA_RET_DENIED */ 75 -EAGAIN, /* FFA_RET_RETRY */ 76 -ECANCELED, /* FFA_RET_ABORTED */ 77 -ENODATA, /* FFA_RET_NO_DATA */ 78 -EAGAIN, /* FFA_RET_NOT_READY */ 79 }; 80 81 static inline int ffa_to_linux_errno(int errno) 82 { 83 int err_idx = -errno; 84 85 if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap)) 86 return ffa_linux_errmap[err_idx]; 87 return -EINVAL; 88 } 89 90 struct ffa_pcpu_irq { 91 struct ffa_drv_info *info; 92 }; 93 94 struct ffa_drv_info { 95 u32 version; 96 u16 vm_id; 97 struct mutex rx_lock; /* lock to protect Rx buffer */ 98 struct mutex tx_lock; /* lock to protect Tx buffer */ 99 void *rx_buffer; 100 void *tx_buffer; 101 bool mem_ops_native; 102 bool msg_direct_req2_supp; 103 bool bitmap_created; 104 bool notif_enabled; 105 unsigned int sched_recv_irq; 106 unsigned int notif_pend_irq; 107 unsigned int cpuhp_state; 108 struct ffa_pcpu_irq __percpu *irq_pcpu; 109 struct workqueue_struct *notif_pcpu_wq; 110 struct work_struct notif_pcpu_work; 111 struct work_struct sched_recv_irq_work; 112 struct xarray partition_info; 113 DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); 114 struct mutex notify_lock; /* lock to protect notifier hashtable */ 115 }; 116 117 static struct ffa_drv_info *drv_info; 118 static void ffa_partitions_cleanup(void); 119 120 /* 121 * The driver must be able to support all the versions from the earliest 122 * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION. 123 * The specification states that if firmware supports a FFA implementation 124 * that is incompatible with and at a greater version number than specified 125 * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION), 126 * it must return the NOT_SUPPORTED error code. 127 */ 128 static u32 ffa_compatible_version_find(u32 version) 129 { 130 u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version); 131 u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION); 132 u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION); 133 134 if ((major < drv_major) || (major == drv_major && minor <= drv_minor)) 135 return version; 136 137 pr_info("Firmware version higher than driver version, downgrading\n"); 138 return FFA_DRIVER_VERSION; 139 } 140 141 static int ffa_version_check(u32 *version) 142 { 143 ffa_value_t ver; 144 145 invoke_ffa_fn((ffa_value_t){ 146 .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION, 147 }, &ver); 148 149 if (ver.a0 == FFA_RET_NOT_SUPPORTED) { 150 pr_info("FFA_VERSION returned not supported\n"); 151 return -EOPNOTSUPP; 152 } 153 154 if (ver.a0 < FFA_MIN_VERSION) { 155 pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n", 156 FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), 157 FFA_MAJOR_VERSION(FFA_MIN_VERSION), 158 FFA_MINOR_VERSION(FFA_MIN_VERSION)); 159 return -EINVAL; 160 } 161 162 pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION), 163 FFA_MINOR_VERSION(FFA_DRIVER_VERSION)); 164 pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0), 165 FFA_MINOR_VERSION(ver.a0)); 166 *version = ffa_compatible_version_find(ver.a0); 167 168 return 0; 169 } 170 171 static int ffa_rx_release(void) 172 { 173 ffa_value_t ret; 174 175 invoke_ffa_fn((ffa_value_t){ 176 .a0 = FFA_RX_RELEASE, 177 }, &ret); 178 179 if (ret.a0 == FFA_ERROR) 180 return ffa_to_linux_errno((int)ret.a2); 181 182 /* check for ret.a0 == FFA_RX_RELEASE ? */ 183 184 return 0; 185 } 186 187 static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt) 188 { 189 ffa_value_t ret; 190 191 invoke_ffa_fn((ffa_value_t){ 192 .a0 = FFA_FN_NATIVE(RXTX_MAP), 193 .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt, 194 }, &ret); 195 196 if (ret.a0 == FFA_ERROR) 197 return ffa_to_linux_errno((int)ret.a2); 198 199 return 0; 200 } 201 202 static int ffa_rxtx_unmap(u16 vm_id) 203 { 204 ffa_value_t ret; 205 206 invoke_ffa_fn((ffa_value_t){ 207 .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0), 208 }, &ret); 209 210 if (ret.a0 == FFA_ERROR) 211 return ffa_to_linux_errno((int)ret.a2); 212 213 return 0; 214 } 215 216 static int ffa_features(u32 func_feat_id, u32 input_props, 217 u32 *if_props_1, u32 *if_props_2) 218 { 219 ffa_value_t id; 220 221 if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { 222 pr_err("%s: Invalid Parameters: %x, %x", __func__, 223 func_feat_id, input_props); 224 return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); 225 } 226 227 invoke_ffa_fn((ffa_value_t){ 228 .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, 229 }, &id); 230 231 if (id.a0 == FFA_ERROR) 232 return ffa_to_linux_errno((int)id.a2); 233 234 if (if_props_1) 235 *if_props_1 = id.a2; 236 if (if_props_2) 237 *if_props_2 = id.a3; 238 239 return 0; 240 } 241 242 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0) 243 244 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */ 245 static int 246 __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, 247 struct ffa_partition_info *buffer, int num_partitions) 248 { 249 int idx, count, flags = 0, sz, buf_sz; 250 ffa_value_t partition_info; 251 252 if (drv_info->version > FFA_VERSION_1_0 && 253 (!buffer || !num_partitions)) /* Just get the count for now */ 254 flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY; 255 256 mutex_lock(&drv_info->rx_lock); 257 invoke_ffa_fn((ffa_value_t){ 258 .a0 = FFA_PARTITION_INFO_GET, 259 .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3, 260 .a5 = flags, 261 }, &partition_info); 262 263 if (partition_info.a0 == FFA_ERROR) { 264 mutex_unlock(&drv_info->rx_lock); 265 return ffa_to_linux_errno((int)partition_info.a2); 266 } 267 268 count = partition_info.a2; 269 270 if (drv_info->version > FFA_VERSION_1_0) { 271 buf_sz = sz = partition_info.a3; 272 if (sz > sizeof(*buffer)) 273 buf_sz = sizeof(*buffer); 274 } else { 275 /* FFA_VERSION_1_0 lacks size in the response */ 276 buf_sz = sz = 8; 277 } 278 279 if (buffer && count <= num_partitions) 280 for (idx = 0; idx < count; idx++) 281 memcpy(buffer + idx, drv_info->rx_buffer + idx * sz, 282 buf_sz); 283 284 ffa_rx_release(); 285 286 mutex_unlock(&drv_info->rx_lock); 287 288 return count; 289 } 290 291 #define LAST_INDEX_MASK GENMASK(15, 0) 292 #define CURRENT_INDEX_MASK GENMASK(31, 16) 293 #define UUID_INFO_TAG_MASK GENMASK(47, 32) 294 #define PARTITION_INFO_SZ_MASK GENMASK(63, 48) 295 #define PARTITION_COUNT(x) ((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1) 296 #define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x)))) 297 #define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x)))) 298 #define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x)))) 299 static int 300 __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, 301 struct ffa_partition_info *buffer, int num_parts) 302 { 303 u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0; 304 ffa_value_t partition_info; 305 306 do { 307 start_idx = prev_idx ? prev_idx + 1 : 0; 308 309 invoke_ffa_fn((ffa_value_t){ 310 .a0 = FFA_PARTITION_INFO_GET_REGS, 311 .a1 = (u64)uuid1 << 32 | uuid0, 312 .a2 = (u64)uuid3 << 32 | uuid2, 313 .a3 = start_idx | tag << 16, 314 }, &partition_info); 315 316 if (partition_info.a0 == FFA_ERROR) 317 return ffa_to_linux_errno((int)partition_info.a2); 318 319 if (!count) 320 count = PARTITION_COUNT(partition_info.a2); 321 if (!buffer || !num_parts) /* count only */ 322 return count; 323 324 cur_idx = CURRENT_INDEX(partition_info.a2); 325 tag = UUID_INFO_TAG(partition_info.a2); 326 buf_sz = PARTITION_INFO_SZ(partition_info.a2); 327 if (buf_sz > sizeof(*buffer)) 328 buf_sz = sizeof(*buffer); 329 330 memcpy(buffer + prev_idx * buf_sz, &partition_info.a3, 331 (cur_idx - start_idx + 1) * buf_sz); 332 prev_idx = cur_idx; 333 334 } while (cur_idx < (count - 1)); 335 336 return count; 337 } 338 339 /* buffer is allocated and caller must free the same if returned count > 0 */ 340 static int 341 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) 342 { 343 int count; 344 u32 uuid0_4[4]; 345 bool reg_mode = false; 346 struct ffa_partition_info *pbuf; 347 348 if (!ffa_features(FFA_PARTITION_INFO_GET_REGS, 0, NULL, NULL)) 349 reg_mode = true; 350 351 export_uuid((u8 *)uuid0_4, uuid); 352 if (reg_mode) 353 count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], 354 uuid0_4[2], uuid0_4[3], 355 NULL, 0); 356 else 357 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], 358 uuid0_4[2], uuid0_4[3], 359 NULL, 0); 360 if (count <= 0) 361 return count; 362 363 pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL); 364 if (!pbuf) 365 return -ENOMEM; 366 367 if (reg_mode) 368 count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], 369 uuid0_4[2], uuid0_4[3], 370 pbuf, count); 371 else 372 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], 373 uuid0_4[2], uuid0_4[3], 374 pbuf, count); 375 if (count <= 0) 376 kfree(pbuf); 377 else 378 *buffer = pbuf; 379 380 return count; 381 } 382 383 #define VM_ID_MASK GENMASK(15, 0) 384 static int ffa_id_get(u16 *vm_id) 385 { 386 ffa_value_t id; 387 388 invoke_ffa_fn((ffa_value_t){ 389 .a0 = FFA_ID_GET, 390 }, &id); 391 392 if (id.a0 == FFA_ERROR) 393 return ffa_to_linux_errno((int)id.a2); 394 395 *vm_id = FIELD_GET(VM_ID_MASK, (id.a2)); 396 397 return 0; 398 } 399 400 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, 401 struct ffa_send_direct_data *data) 402 { 403 u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); 404 ffa_value_t ret; 405 406 if (mode_32bit) { 407 req_id = FFA_MSG_SEND_DIRECT_REQ; 408 resp_id = FFA_MSG_SEND_DIRECT_RESP; 409 } else { 410 req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ); 411 resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP); 412 } 413 414 invoke_ffa_fn((ffa_value_t){ 415 .a0 = req_id, .a1 = src_dst_ids, .a2 = 0, 416 .a3 = data->data0, .a4 = data->data1, .a5 = data->data2, 417 .a6 = data->data3, .a7 = data->data4, 418 }, &ret); 419 420 while (ret.a0 == FFA_INTERRUPT) 421 invoke_ffa_fn((ffa_value_t){ 422 .a0 = FFA_RUN, .a1 = ret.a1, 423 }, &ret); 424 425 if (ret.a0 == FFA_ERROR) 426 return ffa_to_linux_errno((int)ret.a2); 427 428 if (ret.a0 == resp_id) { 429 data->data0 = ret.a3; 430 data->data1 = ret.a4; 431 data->data2 = ret.a5; 432 data->data3 = ret.a6; 433 data->data4 = ret.a7; 434 return 0; 435 } 436 437 return -EINVAL; 438 } 439 440 static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz) 441 { 442 u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); 443 struct ffa_indirect_msg_hdr *msg; 444 ffa_value_t ret; 445 int retval = 0; 446 447 if (sz > (RXTX_BUFFER_SIZE - sizeof(*msg))) 448 return -ERANGE; 449 450 mutex_lock(&drv_info->tx_lock); 451 452 msg = drv_info->tx_buffer; 453 msg->flags = 0; 454 msg->res0 = 0; 455 msg->offset = sizeof(*msg); 456 msg->send_recv_id = src_dst_ids; 457 msg->size = sz; 458 memcpy((u8 *)msg + msg->offset, buf, sz); 459 460 /* flags = 0, sender VMID = 0 works for both physical/virtual NS */ 461 invoke_ffa_fn((ffa_value_t){ 462 .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0 463 }, &ret); 464 465 if (ret.a0 == FFA_ERROR) 466 retval = ffa_to_linux_errno((int)ret.a2); 467 468 mutex_unlock(&drv_info->tx_lock); 469 return retval; 470 } 471 472 static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid, 473 struct ffa_send_direct_data2 *data) 474 { 475 u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); 476 ffa_value_t ret, args = { 477 .a0 = FFA_MSG_SEND_DIRECT_REQ2, .a1 = src_dst_ids, 478 }; 479 480 export_uuid((u8 *)&args.a2, uuid); 481 memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data)); 482 483 invoke_ffa_fn(args, &ret); 484 485 while (ret.a0 == FFA_INTERRUPT) 486 invoke_ffa_fn((ffa_value_t){ 487 .a0 = FFA_RUN, .a1 = ret.a1, 488 }, &ret); 489 490 if (ret.a0 == FFA_ERROR) 491 return ffa_to_linux_errno((int)ret.a2); 492 493 if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) { 494 memcpy(data, &ret.a4, sizeof(*data)); 495 return 0; 496 } 497 498 return -EINVAL; 499 } 500 501 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, 502 u32 frag_len, u32 len, u64 *handle) 503 { 504 ffa_value_t ret; 505 506 invoke_ffa_fn((ffa_value_t){ 507 .a0 = func_id, .a1 = len, .a2 = frag_len, 508 .a3 = buf, .a4 = buf_sz, 509 }, &ret); 510 511 while (ret.a0 == FFA_MEM_OP_PAUSE) 512 invoke_ffa_fn((ffa_value_t){ 513 .a0 = FFA_MEM_OP_RESUME, 514 .a1 = ret.a1, .a2 = ret.a2, 515 }, &ret); 516 517 if (ret.a0 == FFA_ERROR) 518 return ffa_to_linux_errno((int)ret.a2); 519 520 if (ret.a0 == FFA_SUCCESS) { 521 if (handle) 522 *handle = PACK_HANDLE(ret.a2, ret.a3); 523 } else if (ret.a0 == FFA_MEM_FRAG_RX) { 524 if (handle) 525 *handle = PACK_HANDLE(ret.a1, ret.a2); 526 } else { 527 return -EOPNOTSUPP; 528 } 529 530 return frag_len; 531 } 532 533 static int ffa_mem_next_frag(u64 handle, u32 frag_len) 534 { 535 ffa_value_t ret; 536 537 invoke_ffa_fn((ffa_value_t){ 538 .a0 = FFA_MEM_FRAG_TX, 539 .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle), 540 .a3 = frag_len, 541 }, &ret); 542 543 while (ret.a0 == FFA_MEM_OP_PAUSE) 544 invoke_ffa_fn((ffa_value_t){ 545 .a0 = FFA_MEM_OP_RESUME, 546 .a1 = ret.a1, .a2 = ret.a2, 547 }, &ret); 548 549 if (ret.a0 == FFA_ERROR) 550 return ffa_to_linux_errno((int)ret.a2); 551 552 if (ret.a0 == FFA_MEM_FRAG_RX) 553 return ret.a3; 554 else if (ret.a0 == FFA_SUCCESS) 555 return 0; 556 557 return -EOPNOTSUPP; 558 } 559 560 static int 561 ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, 562 u32 len, u64 *handle, bool first) 563 { 564 if (!first) 565 return ffa_mem_next_frag(*handle, frag_len); 566 567 return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle); 568 } 569 570 static u32 ffa_get_num_pages_sg(struct scatterlist *sg) 571 { 572 u32 num_pages = 0; 573 574 do { 575 num_pages += sg->length / FFA_PAGE_SIZE; 576 } while ((sg = sg_next(sg))); 577 578 return num_pages; 579 } 580 581 static u16 ffa_memory_attributes_get(u32 func_id) 582 { 583 /* 584 * For the memory lend or donate operation, if the receiver is a PE or 585 * a proxy endpoint, the owner/sender must not specify the attributes 586 */ 587 if (func_id == FFA_FN_NATIVE(MEM_LEND) || 588 func_id == FFA_MEM_LEND) 589 return 0; 590 591 return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE; 592 } 593 594 static int 595 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, 596 struct ffa_mem_ops_args *args) 597 { 598 int rc = 0; 599 bool first = true; 600 u32 composite_offset; 601 phys_addr_t addr = 0; 602 struct ffa_mem_region *mem_region = buffer; 603 struct ffa_composite_mem_region *composite; 604 struct ffa_mem_region_addr_range *constituents; 605 struct ffa_mem_region_attributes *ep_mem_access; 606 u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg); 607 608 mem_region->tag = args->tag; 609 mem_region->flags = args->flags; 610 mem_region->sender_id = drv_info->vm_id; 611 mem_region->attributes = ffa_memory_attributes_get(func_id); 612 ep_mem_access = buffer + 613 ffa_mem_desc_offset(buffer, 0, drv_info->version); 614 composite_offset = ffa_mem_desc_offset(buffer, args->nattrs, 615 drv_info->version); 616 617 for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { 618 ep_mem_access->receiver = args->attrs[idx].receiver; 619 ep_mem_access->attrs = args->attrs[idx].attrs; 620 ep_mem_access->composite_off = composite_offset; 621 ep_mem_access->flag = 0; 622 ep_mem_access->reserved = 0; 623 } 624 mem_region->handle = 0; 625 mem_region->ep_count = args->nattrs; 626 if (drv_info->version <= FFA_VERSION_1_0) { 627 mem_region->ep_mem_size = 0; 628 } else { 629 mem_region->ep_mem_size = sizeof(*ep_mem_access); 630 mem_region->ep_mem_offset = sizeof(*mem_region); 631 memset(mem_region->reserved, 0, 12); 632 } 633 634 composite = buffer + composite_offset; 635 composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); 636 composite->addr_range_cnt = num_entries; 637 composite->reserved = 0; 638 639 length = composite_offset + CONSTITUENTS_OFFSET(num_entries); 640 frag_len = composite_offset + CONSTITUENTS_OFFSET(0); 641 if (frag_len > max_fragsize) 642 return -ENXIO; 643 644 if (!args->use_txbuf) { 645 addr = virt_to_phys(buffer); 646 buf_sz = max_fragsize / FFA_PAGE_SIZE; 647 } 648 649 constituents = buffer + frag_len; 650 idx = 0; 651 do { 652 if (frag_len == max_fragsize) { 653 rc = ffa_transmit_fragment(func_id, addr, buf_sz, 654 frag_len, length, 655 &args->g_handle, first); 656 if (rc < 0) 657 return -ENXIO; 658 659 first = false; 660 idx = 0; 661 frag_len = 0; 662 constituents = buffer; 663 } 664 665 if ((void *)constituents - buffer > max_fragsize) { 666 pr_err("Memory Region Fragment > Tx Buffer size\n"); 667 return -EFAULT; 668 } 669 670 constituents->address = sg_phys(args->sg); 671 constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE; 672 constituents->reserved = 0; 673 constituents++; 674 frag_len += sizeof(struct ffa_mem_region_addr_range); 675 } while ((args->sg = sg_next(args->sg))); 676 677 return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len, 678 length, &args->g_handle, first); 679 } 680 681 static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) 682 { 683 int ret; 684 void *buffer; 685 686 if (!args->use_txbuf) { 687 buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 688 if (!buffer) 689 return -ENOMEM; 690 } else { 691 buffer = drv_info->tx_buffer; 692 mutex_lock(&drv_info->tx_lock); 693 } 694 695 ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args); 696 697 if (args->use_txbuf) 698 mutex_unlock(&drv_info->tx_lock); 699 else 700 free_pages_exact(buffer, RXTX_BUFFER_SIZE); 701 702 return ret < 0 ? ret : 0; 703 } 704 705 static int ffa_memory_reclaim(u64 g_handle, u32 flags) 706 { 707 ffa_value_t ret; 708 709 invoke_ffa_fn((ffa_value_t){ 710 .a0 = FFA_MEM_RECLAIM, 711 .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle), 712 .a3 = flags, 713 }, &ret); 714 715 if (ret.a0 == FFA_ERROR) 716 return ffa_to_linux_errno((int)ret.a2); 717 718 return 0; 719 } 720 721 static int ffa_notification_bitmap_create(void) 722 { 723 ffa_value_t ret; 724 u16 vcpu_count = nr_cpu_ids; 725 726 invoke_ffa_fn((ffa_value_t){ 727 .a0 = FFA_NOTIFICATION_BITMAP_CREATE, 728 .a1 = drv_info->vm_id, .a2 = vcpu_count, 729 }, &ret); 730 731 if (ret.a0 == FFA_ERROR) 732 return ffa_to_linux_errno((int)ret.a2); 733 734 return 0; 735 } 736 737 static int ffa_notification_bitmap_destroy(void) 738 { 739 ffa_value_t ret; 740 741 invoke_ffa_fn((ffa_value_t){ 742 .a0 = FFA_NOTIFICATION_BITMAP_DESTROY, 743 .a1 = drv_info->vm_id, 744 }, &ret); 745 746 if (ret.a0 == FFA_ERROR) 747 return ffa_to_linux_errno((int)ret.a2); 748 749 return 0; 750 } 751 752 #define NOTIFICATION_LOW_MASK GENMASK(31, 0) 753 #define NOTIFICATION_HIGH_MASK GENMASK(63, 32) 754 #define NOTIFICATION_BITMAP_HIGH(x) \ 755 ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x)))) 756 #define NOTIFICATION_BITMAP_LOW(x) \ 757 ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x)))) 758 #define PACK_NOTIFICATION_BITMAP(low, high) \ 759 (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \ 760 FIELD_PREP(NOTIFICATION_HIGH_MASK, (high))) 761 762 #define RECEIVER_VCPU_MASK GENMASK(31, 16) 763 #define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \ 764 (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \ 765 FIELD_PREP(RECEIVER_ID_MASK, (r))) 766 767 #define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0) 768 #define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7) 769 #define ID_LIST_MASK_64 GENMASK(51, 12) 770 #define ID_LIST_MASK_32 GENMASK(31, 12) 771 #define MAX_IDS_64 20 772 #define MAX_IDS_32 10 773 774 #define PER_VCPU_NOTIFICATION_FLAG BIT(0) 775 #define SECURE_PARTITION_BITMAP BIT(0) 776 #define NON_SECURE_VM_BITMAP BIT(1) 777 #define SPM_FRAMEWORK_BITMAP BIT(2) 778 #define NS_HYP_FRAMEWORK_BITMAP BIT(3) 779 780 static int ffa_notification_bind_common(u16 dst_id, u64 bitmap, 781 u32 flags, bool is_bind) 782 { 783 ffa_value_t ret; 784 u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id); 785 786 func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND; 787 788 invoke_ffa_fn((ffa_value_t){ 789 .a0 = func, .a1 = src_dst_ids, .a2 = flags, 790 .a3 = NOTIFICATION_BITMAP_LOW(bitmap), 791 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), 792 }, &ret); 793 794 if (ret.a0 == FFA_ERROR) 795 return ffa_to_linux_errno((int)ret.a2); 796 else if (ret.a0 != FFA_SUCCESS) 797 return -EINVAL; 798 799 return 0; 800 } 801 802 static 803 int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap) 804 { 805 ffa_value_t ret; 806 u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id); 807 808 invoke_ffa_fn((ffa_value_t) { 809 .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags, 810 .a3 = NOTIFICATION_BITMAP_LOW(bitmap), 811 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), 812 }, &ret); 813 814 if (ret.a0 == FFA_ERROR) 815 return ffa_to_linux_errno((int)ret.a2); 816 else if (ret.a0 != FFA_SUCCESS) 817 return -EINVAL; 818 819 return 0; 820 } 821 822 struct ffa_notify_bitmaps { 823 u64 sp_map; 824 u64 vm_map; 825 u64 arch_map; 826 }; 827 828 static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify) 829 { 830 ffa_value_t ret; 831 u16 src_id = drv_info->vm_id; 832 u16 cpu_id = smp_processor_id(); 833 u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id); 834 835 invoke_ffa_fn((ffa_value_t){ 836 .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags, 837 }, &ret); 838 839 if (ret.a0 == FFA_ERROR) 840 return ffa_to_linux_errno((int)ret.a2); 841 else if (ret.a0 != FFA_SUCCESS) 842 return -EINVAL; /* Something else went wrong. */ 843 844 notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3); 845 notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5); 846 notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7); 847 848 return 0; 849 } 850 851 struct ffa_dev_part_info { 852 ffa_sched_recv_cb callback; 853 void *cb_data; 854 rwlock_t rw_lock; 855 }; 856 857 static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu) 858 { 859 struct ffa_dev_part_info *partition; 860 ffa_sched_recv_cb callback; 861 void *cb_data; 862 863 partition = xa_load(&drv_info->partition_info, part_id); 864 if (!partition) { 865 pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id); 866 return; 867 } 868 869 read_lock(&partition->rw_lock); 870 callback = partition->callback; 871 cb_data = partition->cb_data; 872 read_unlock(&partition->rw_lock); 873 874 if (callback) 875 callback(vcpu, is_per_vcpu, cb_data); 876 } 877 878 static void ffa_notification_info_get(void) 879 { 880 int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64]; 881 bool is_64b_resp; 882 ffa_value_t ret; 883 u64 id_list; 884 885 do { 886 invoke_ffa_fn((ffa_value_t){ 887 .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET), 888 }, &ret); 889 890 if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) { 891 if (ret.a2 != FFA_RET_NO_DATA) 892 pr_err("Notification Info fetch failed: 0x%lx (0x%lx)", 893 ret.a0, ret.a2); 894 return; 895 } 896 897 is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS); 898 899 ids_processed = 0; 900 lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2); 901 if (is_64b_resp) { 902 max_ids = MAX_IDS_64; 903 id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2); 904 } else { 905 max_ids = MAX_IDS_32; 906 id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2); 907 } 908 909 for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2) 910 ids_count[idx] = (id_list & 0x3) + 1; 911 912 /* Process IDs */ 913 for (list = 0; list < lists_cnt; list++) { 914 u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3; 915 916 if (ids_processed >= max_ids - 1) 917 break; 918 919 part_id = packed_id_list[ids_processed++]; 920 921 if (ids_count[list] == 1) { /* Global Notification */ 922 __do_sched_recv_cb(part_id, 0, false); 923 continue; 924 } 925 926 /* Per vCPU Notification */ 927 for (idx = 0; idx < ids_count[list]; idx++) { 928 if (ids_processed >= max_ids - 1) 929 break; 930 931 vcpu_id = packed_id_list[ids_processed++]; 932 933 __do_sched_recv_cb(part_id, vcpu_id, true); 934 } 935 } 936 } while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK); 937 } 938 939 static int ffa_run(struct ffa_device *dev, u16 vcpu) 940 { 941 ffa_value_t ret; 942 u32 target = dev->vm_id << 16 | vcpu; 943 944 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret); 945 946 while (ret.a0 == FFA_INTERRUPT) 947 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, }, 948 &ret); 949 950 if (ret.a0 == FFA_ERROR) 951 return ffa_to_linux_errno((int)ret.a2); 952 953 return 0; 954 } 955 956 static void ffa_drvinfo_flags_init(void) 957 { 958 if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || 959 !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL)) 960 drv_info->mem_ops_native = true; 961 962 if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2, 0, NULL, NULL) || 963 !ffa_features(FFA_MSG_SEND_DIRECT_RESP2, 0, NULL, NULL)) 964 drv_info->msg_direct_req2_supp = true; 965 } 966 967 static u32 ffa_api_version_get(void) 968 { 969 return drv_info->version; 970 } 971 972 static int ffa_partition_info_get(const char *uuid_str, 973 struct ffa_partition_info *buffer) 974 { 975 int count; 976 uuid_t uuid; 977 struct ffa_partition_info *pbuf; 978 979 if (uuid_parse(uuid_str, &uuid)) { 980 pr_err("invalid uuid (%s)\n", uuid_str); 981 return -ENODEV; 982 } 983 984 count = ffa_partition_probe(&uuid, &pbuf); 985 if (count <= 0) 986 return -ENOENT; 987 988 memcpy(buffer, pbuf, sizeof(*pbuf) * count); 989 kfree(pbuf); 990 return 0; 991 } 992 993 static void ffa_mode_32bit_set(struct ffa_device *dev) 994 { 995 dev->mode_32bit = true; 996 } 997 998 static int ffa_sync_send_receive(struct ffa_device *dev, 999 struct ffa_send_direct_data *data) 1000 { 1001 return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id, 1002 dev->mode_32bit, data); 1003 } 1004 1005 static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz) 1006 { 1007 return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz); 1008 } 1009 1010 static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid, 1011 struct ffa_send_direct_data2 *data) 1012 { 1013 if (!drv_info->msg_direct_req2_supp) 1014 return -EOPNOTSUPP; 1015 1016 return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id, 1017 uuid, data); 1018 } 1019 1020 static int ffa_memory_share(struct ffa_mem_ops_args *args) 1021 { 1022 if (drv_info->mem_ops_native) 1023 return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args); 1024 1025 return ffa_memory_ops(FFA_MEM_SHARE, args); 1026 } 1027 1028 static int ffa_memory_lend(struct ffa_mem_ops_args *args) 1029 { 1030 /* Note that upon a successful MEM_LEND request the caller 1031 * must ensure that the memory region specified is not accessed 1032 * until a successful MEM_RECALIM call has been made. 1033 * On systems with a hypervisor present this will been enforced, 1034 * however on systems without a hypervisor the responsibility 1035 * falls to the calling kernel driver to prevent access. 1036 */ 1037 if (drv_info->mem_ops_native) 1038 return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args); 1039 1040 return ffa_memory_ops(FFA_MEM_LEND, args); 1041 } 1042 1043 #define FFA_SECURE_PARTITION_ID_FLAG BIT(15) 1044 1045 #define ffa_notifications_disabled() (!drv_info->notif_enabled) 1046 1047 enum notify_type { 1048 NON_SECURE_VM, 1049 SECURE_PARTITION, 1050 FRAMEWORK, 1051 }; 1052 1053 struct notifier_cb_info { 1054 struct hlist_node hnode; 1055 ffa_notifier_cb cb; 1056 void *cb_data; 1057 enum notify_type type; 1058 }; 1059 1060 static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback, 1061 void *cb_data, bool is_registration) 1062 { 1063 struct ffa_dev_part_info *partition; 1064 bool cb_valid; 1065 1066 if (ffa_notifications_disabled()) 1067 return -EOPNOTSUPP; 1068 1069 partition = xa_load(&drv_info->partition_info, part_id); 1070 if (!partition) { 1071 pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id); 1072 return -EINVAL; 1073 } 1074 1075 write_lock(&partition->rw_lock); 1076 1077 cb_valid = !!partition->callback; 1078 if (!(is_registration ^ cb_valid)) { 1079 write_unlock(&partition->rw_lock); 1080 return -EINVAL; 1081 } 1082 1083 partition->callback = callback; 1084 partition->cb_data = cb_data; 1085 1086 write_unlock(&partition->rw_lock); 1087 return 0; 1088 } 1089 1090 static int ffa_sched_recv_cb_register(struct ffa_device *dev, 1091 ffa_sched_recv_cb cb, void *cb_data) 1092 { 1093 return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true); 1094 } 1095 1096 static int ffa_sched_recv_cb_unregister(struct ffa_device *dev) 1097 { 1098 return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false); 1099 } 1100 1101 static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags) 1102 { 1103 return ffa_notification_bind_common(dst_id, bitmap, flags, true); 1104 } 1105 1106 static int ffa_notification_unbind(u16 dst_id, u64 bitmap) 1107 { 1108 return ffa_notification_bind_common(dst_id, bitmap, 0, false); 1109 } 1110 1111 /* Should be called while the notify_lock is taken */ 1112 static struct notifier_cb_info * 1113 notifier_hash_node_get(u16 notify_id, enum notify_type type) 1114 { 1115 struct notifier_cb_info *node; 1116 1117 hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) 1118 if (type == node->type) 1119 return node; 1120 1121 return NULL; 1122 } 1123 1124 static int 1125 update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, 1126 void *cb_data, bool is_registration) 1127 { 1128 struct notifier_cb_info *cb_info = NULL; 1129 bool cb_found; 1130 1131 cb_info = notifier_hash_node_get(notify_id, type); 1132 cb_found = !!cb_info; 1133 1134 if (!(is_registration ^ cb_found)) 1135 return -EINVAL; 1136 1137 if (is_registration) { 1138 cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL); 1139 if (!cb_info) 1140 return -ENOMEM; 1141 1142 cb_info->type = type; 1143 cb_info->cb = cb; 1144 cb_info->cb_data = cb_data; 1145 1146 hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id); 1147 } else { 1148 hash_del(&cb_info->hnode); 1149 } 1150 1151 return 0; 1152 } 1153 1154 static enum notify_type ffa_notify_type_get(u16 vm_id) 1155 { 1156 if (vm_id & FFA_SECURE_PARTITION_ID_FLAG) 1157 return SECURE_PARTITION; 1158 else 1159 return NON_SECURE_VM; 1160 } 1161 1162 static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) 1163 { 1164 int rc; 1165 enum notify_type type = ffa_notify_type_get(dev->vm_id); 1166 1167 if (ffa_notifications_disabled()) 1168 return -EOPNOTSUPP; 1169 1170 if (notify_id >= FFA_MAX_NOTIFICATIONS) 1171 return -EINVAL; 1172 1173 mutex_lock(&drv_info->notify_lock); 1174 1175 rc = update_notifier_cb(notify_id, type, NULL, NULL, false); 1176 if (rc) { 1177 pr_err("Could not unregister notification callback\n"); 1178 mutex_unlock(&drv_info->notify_lock); 1179 return rc; 1180 } 1181 1182 rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); 1183 1184 mutex_unlock(&drv_info->notify_lock); 1185 1186 return rc; 1187 } 1188 1189 static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, 1190 ffa_notifier_cb cb, void *cb_data, int notify_id) 1191 { 1192 int rc; 1193 u32 flags = 0; 1194 enum notify_type type = ffa_notify_type_get(dev->vm_id); 1195 1196 if (ffa_notifications_disabled()) 1197 return -EOPNOTSUPP; 1198 1199 if (notify_id >= FFA_MAX_NOTIFICATIONS) 1200 return -EINVAL; 1201 1202 mutex_lock(&drv_info->notify_lock); 1203 1204 if (is_per_vcpu) 1205 flags = PER_VCPU_NOTIFICATION_FLAG; 1206 1207 rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); 1208 if (rc) { 1209 mutex_unlock(&drv_info->notify_lock); 1210 return rc; 1211 } 1212 1213 rc = update_notifier_cb(notify_id, type, cb, cb_data, true); 1214 if (rc) { 1215 pr_err("Failed to register callback for %d - %d\n", 1216 notify_id, rc); 1217 ffa_notification_unbind(dev->vm_id, BIT(notify_id)); 1218 } 1219 mutex_unlock(&drv_info->notify_lock); 1220 1221 return rc; 1222 } 1223 1224 static int ffa_notify_send(struct ffa_device *dev, int notify_id, 1225 bool is_per_vcpu, u16 vcpu) 1226 { 1227 u32 flags = 0; 1228 1229 if (ffa_notifications_disabled()) 1230 return -EOPNOTSUPP; 1231 1232 if (is_per_vcpu) 1233 flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16); 1234 1235 return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags, 1236 BIT(notify_id)); 1237 } 1238 1239 static void handle_notif_callbacks(u64 bitmap, enum notify_type type) 1240 { 1241 int notify_id; 1242 struct notifier_cb_info *cb_info = NULL; 1243 1244 for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap; 1245 notify_id++, bitmap >>= 1) { 1246 if (!(bitmap & 1)) 1247 continue; 1248 1249 mutex_lock(&drv_info->notify_lock); 1250 cb_info = notifier_hash_node_get(notify_id, type); 1251 mutex_unlock(&drv_info->notify_lock); 1252 1253 if (cb_info && cb_info->cb) 1254 cb_info->cb(notify_id, cb_info->cb_data); 1255 } 1256 } 1257 1258 static void notif_get_and_handle(void *unused) 1259 { 1260 int rc; 1261 struct ffa_notify_bitmaps bitmaps; 1262 1263 rc = ffa_notification_get(SECURE_PARTITION_BITMAP | 1264 SPM_FRAMEWORK_BITMAP, &bitmaps); 1265 if (rc) { 1266 pr_err("Failed to retrieve notifications with %d!\n", rc); 1267 return; 1268 } 1269 1270 handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM); 1271 handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION); 1272 handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK); 1273 } 1274 1275 static void 1276 ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data) 1277 { 1278 struct ffa_drv_info *info = cb_data; 1279 1280 if (!is_per_vcpu) 1281 notif_get_and_handle(info); 1282 else 1283 smp_call_function_single(vcpu, notif_get_and_handle, info, 0); 1284 } 1285 1286 static void notif_pcpu_irq_work_fn(struct work_struct *work) 1287 { 1288 struct ffa_drv_info *info = container_of(work, struct ffa_drv_info, 1289 notif_pcpu_work); 1290 1291 ffa_self_notif_handle(smp_processor_id(), true, info); 1292 } 1293 1294 static const struct ffa_info_ops ffa_drv_info_ops = { 1295 .api_version_get = ffa_api_version_get, 1296 .partition_info_get = ffa_partition_info_get, 1297 }; 1298 1299 static const struct ffa_msg_ops ffa_drv_msg_ops = { 1300 .mode_32bit_set = ffa_mode_32bit_set, 1301 .sync_send_receive = ffa_sync_send_receive, 1302 .indirect_send = ffa_indirect_msg_send, 1303 .sync_send_receive2 = ffa_sync_send_receive2, 1304 }; 1305 1306 static const struct ffa_mem_ops ffa_drv_mem_ops = { 1307 .memory_reclaim = ffa_memory_reclaim, 1308 .memory_share = ffa_memory_share, 1309 .memory_lend = ffa_memory_lend, 1310 }; 1311 1312 static const struct ffa_cpu_ops ffa_drv_cpu_ops = { 1313 .run = ffa_run, 1314 }; 1315 1316 static const struct ffa_notifier_ops ffa_drv_notifier_ops = { 1317 .sched_recv_cb_register = ffa_sched_recv_cb_register, 1318 .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister, 1319 .notify_request = ffa_notify_request, 1320 .notify_relinquish = ffa_notify_relinquish, 1321 .notify_send = ffa_notify_send, 1322 }; 1323 1324 static const struct ffa_ops ffa_drv_ops = { 1325 .info_ops = &ffa_drv_info_ops, 1326 .msg_ops = &ffa_drv_msg_ops, 1327 .mem_ops = &ffa_drv_mem_ops, 1328 .cpu_ops = &ffa_drv_cpu_ops, 1329 .notifier_ops = &ffa_drv_notifier_ops, 1330 }; 1331 1332 void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) 1333 { 1334 int count, idx; 1335 struct ffa_partition_info *pbuf, *tpbuf; 1336 1337 count = ffa_partition_probe(uuid, &pbuf); 1338 if (count <= 0) 1339 return; 1340 1341 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) 1342 if (tpbuf->id == ffa_dev->vm_id) 1343 uuid_copy(&ffa_dev->uuid, uuid); 1344 kfree(pbuf); 1345 } 1346 1347 static int 1348 ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) 1349 { 1350 struct device *dev = data; 1351 struct ffa_device *fdev = to_ffa_dev(dev); 1352 1353 if (action == BUS_NOTIFY_BIND_DRIVER) { 1354 struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); 1355 const struct ffa_device_id *id_table = ffa_drv->id_table; 1356 1357 /* 1358 * FF-A v1.1 provides UUID for each partition as part of the 1359 * discovery API, the discovered UUID must be populated in the 1360 * device's UUID and there is no need to workaround by copying 1361 * the same from the driver table. 1362 */ 1363 if (uuid_is_null(&fdev->uuid)) 1364 ffa_device_match_uuid(fdev, &id_table->uuid); 1365 1366 return NOTIFY_OK; 1367 } 1368 1369 return NOTIFY_DONE; 1370 } 1371 1372 static struct notifier_block ffa_bus_nb = { 1373 .notifier_call = ffa_bus_notifier, 1374 }; 1375 1376 static int ffa_setup_partitions(void) 1377 { 1378 int count, idx, ret; 1379 uuid_t uuid; 1380 struct ffa_device *ffa_dev; 1381 struct ffa_dev_part_info *info; 1382 struct ffa_partition_info *pbuf, *tpbuf; 1383 1384 if (drv_info->version == FFA_VERSION_1_0) { 1385 ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb); 1386 if (ret) 1387 pr_err("Failed to register FF-A bus notifiers\n"); 1388 } 1389 1390 count = ffa_partition_probe(&uuid_null, &pbuf); 1391 if (count <= 0) { 1392 pr_info("%s: No partitions found, error %d\n", __func__, count); 1393 return -EINVAL; 1394 } 1395 1396 xa_init(&drv_info->partition_info); 1397 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { 1398 import_uuid(&uuid, (u8 *)tpbuf->uuid); 1399 1400 /* Note that if the UUID will be uuid_null, that will require 1401 * ffa_bus_notifier() to find the UUID of this partition id 1402 * with help of ffa_device_match_uuid(). FF-A v1.1 and above 1403 * provides UUID here for each partition as part of the 1404 * discovery API and the same is passed. 1405 */ 1406 ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops); 1407 if (!ffa_dev) { 1408 pr_err("%s: failed to register partition ID 0x%x\n", 1409 __func__, tpbuf->id); 1410 continue; 1411 } 1412 1413 ffa_dev->properties = tpbuf->properties; 1414 1415 if (drv_info->version > FFA_VERSION_1_0 && 1416 !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) 1417 ffa_mode_32bit_set(ffa_dev); 1418 1419 info = kzalloc(sizeof(*info), GFP_KERNEL); 1420 if (!info) { 1421 ffa_device_unregister(ffa_dev); 1422 continue; 1423 } 1424 rwlock_init(&info->rw_lock); 1425 ret = xa_insert(&drv_info->partition_info, tpbuf->id, 1426 info, GFP_KERNEL); 1427 if (ret) { 1428 pr_err("%s: failed to save partition ID 0x%x - ret:%d\n", 1429 __func__, tpbuf->id, ret); 1430 ffa_device_unregister(ffa_dev); 1431 kfree(info); 1432 } 1433 } 1434 1435 kfree(pbuf); 1436 1437 /* Allocate for the host */ 1438 info = kzalloc(sizeof(*info), GFP_KERNEL); 1439 if (!info) { 1440 /* Already registered devices are freed on bus_exit */ 1441 ffa_partitions_cleanup(); 1442 return -ENOMEM; 1443 } 1444 1445 rwlock_init(&info->rw_lock); 1446 ret = xa_insert(&drv_info->partition_info, drv_info->vm_id, 1447 info, GFP_KERNEL); 1448 if (ret) { 1449 pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n", 1450 __func__, drv_info->vm_id, ret); 1451 kfree(info); 1452 /* Already registered devices are freed on bus_exit */ 1453 ffa_partitions_cleanup(); 1454 } 1455 1456 return ret; 1457 } 1458 1459 static void ffa_partitions_cleanup(void) 1460 { 1461 struct ffa_dev_part_info *info; 1462 unsigned long idx; 1463 1464 xa_for_each(&drv_info->partition_info, idx, info) { 1465 xa_erase(&drv_info->partition_info, idx); 1466 kfree(info); 1467 } 1468 1469 xa_destroy(&drv_info->partition_info); 1470 } 1471 1472 /* FFA FEATURE IDs */ 1473 #define FFA_FEAT_NOTIFICATION_PENDING_INT (1) 1474 #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2) 1475 #define FFA_FEAT_MANAGED_EXIT_INT (3) 1476 1477 static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data) 1478 { 1479 struct ffa_pcpu_irq *pcpu = irq_data; 1480 struct ffa_drv_info *info = pcpu->info; 1481 1482 queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work); 1483 1484 return IRQ_HANDLED; 1485 } 1486 1487 static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data) 1488 { 1489 struct ffa_pcpu_irq *pcpu = irq_data; 1490 struct ffa_drv_info *info = pcpu->info; 1491 1492 queue_work_on(smp_processor_id(), info->notif_pcpu_wq, 1493 &info->notif_pcpu_work); 1494 1495 return IRQ_HANDLED; 1496 } 1497 1498 static void ffa_sched_recv_irq_work_fn(struct work_struct *work) 1499 { 1500 ffa_notification_info_get(); 1501 } 1502 1503 static int ffa_irq_map(u32 id) 1504 { 1505 char *err_str; 1506 int ret, irq, intid; 1507 1508 if (id == FFA_FEAT_NOTIFICATION_PENDING_INT) 1509 err_str = "Notification Pending Interrupt"; 1510 else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT) 1511 err_str = "Schedule Receiver Interrupt"; 1512 else 1513 err_str = "Unknown ID"; 1514 1515 /* The returned intid is assumed to be SGI donated to NS world */ 1516 ret = ffa_features(id, 0, &intid, NULL); 1517 if (ret < 0) { 1518 if (ret != -EOPNOTSUPP) 1519 pr_err("Failed to retrieve FF-A %s %u\n", err_str, id); 1520 return ret; 1521 } 1522 1523 if (acpi_disabled) { 1524 struct of_phandle_args oirq = {}; 1525 struct device_node *gic; 1526 1527 /* Only GICv3 supported currently with the device tree */ 1528 gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3"); 1529 if (!gic) 1530 return -ENXIO; 1531 1532 oirq.np = gic; 1533 oirq.args_count = 1; 1534 oirq.args[0] = intid; 1535 irq = irq_create_of_mapping(&oirq); 1536 of_node_put(gic); 1537 #ifdef CONFIG_ACPI 1538 } else { 1539 irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE, 1540 ACPI_ACTIVE_HIGH); 1541 #endif 1542 } 1543 1544 if (irq <= 0) { 1545 pr_err("Failed to create IRQ mapping!\n"); 1546 return -ENODATA; 1547 } 1548 1549 return irq; 1550 } 1551 1552 static void ffa_irq_unmap(unsigned int irq) 1553 { 1554 if (!irq) 1555 return; 1556 irq_dispose_mapping(irq); 1557 } 1558 1559 static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu) 1560 { 1561 if (drv_info->sched_recv_irq) 1562 enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); 1563 if (drv_info->notif_pend_irq) 1564 enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE); 1565 return 0; 1566 } 1567 1568 static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu) 1569 { 1570 if (drv_info->sched_recv_irq) 1571 disable_percpu_irq(drv_info->sched_recv_irq); 1572 if (drv_info->notif_pend_irq) 1573 disable_percpu_irq(drv_info->notif_pend_irq); 1574 return 0; 1575 } 1576 1577 static void ffa_uninit_pcpu_irq(void) 1578 { 1579 if (drv_info->cpuhp_state) { 1580 cpuhp_remove_state(drv_info->cpuhp_state); 1581 drv_info->cpuhp_state = 0; 1582 } 1583 1584 if (drv_info->notif_pcpu_wq) { 1585 destroy_workqueue(drv_info->notif_pcpu_wq); 1586 drv_info->notif_pcpu_wq = NULL; 1587 } 1588 1589 if (drv_info->sched_recv_irq) 1590 free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu); 1591 1592 if (drv_info->notif_pend_irq) 1593 free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu); 1594 1595 if (drv_info->irq_pcpu) { 1596 free_percpu(drv_info->irq_pcpu); 1597 drv_info->irq_pcpu = NULL; 1598 } 1599 } 1600 1601 static int ffa_init_pcpu_irq(void) 1602 { 1603 struct ffa_pcpu_irq __percpu *irq_pcpu; 1604 int ret, cpu; 1605 1606 irq_pcpu = alloc_percpu(struct ffa_pcpu_irq); 1607 if (!irq_pcpu) 1608 return -ENOMEM; 1609 1610 for_each_present_cpu(cpu) 1611 per_cpu_ptr(irq_pcpu, cpu)->info = drv_info; 1612 1613 drv_info->irq_pcpu = irq_pcpu; 1614 1615 if (drv_info->sched_recv_irq) { 1616 ret = request_percpu_irq(drv_info->sched_recv_irq, 1617 ffa_sched_recv_irq_handler, 1618 "ARM-FFA-SRI", irq_pcpu); 1619 if (ret) { 1620 pr_err("Error registering percpu SRI nIRQ %d : %d\n", 1621 drv_info->sched_recv_irq, ret); 1622 drv_info->sched_recv_irq = 0; 1623 return ret; 1624 } 1625 } 1626 1627 if (drv_info->notif_pend_irq) { 1628 ret = request_percpu_irq(drv_info->notif_pend_irq, 1629 notif_pend_irq_handler, 1630 "ARM-FFA-NPI", irq_pcpu); 1631 if (ret) { 1632 pr_err("Error registering percpu NPI nIRQ %d : %d\n", 1633 drv_info->notif_pend_irq, ret); 1634 drv_info->notif_pend_irq = 0; 1635 return ret; 1636 } 1637 } 1638 1639 INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn); 1640 INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn); 1641 drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification"); 1642 if (!drv_info->notif_pcpu_wq) 1643 return -EINVAL; 1644 1645 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting", 1646 ffa_cpuhp_pcpu_irq_enable, 1647 ffa_cpuhp_pcpu_irq_disable); 1648 1649 if (ret < 0) 1650 return ret; 1651 1652 drv_info->cpuhp_state = ret; 1653 return 0; 1654 } 1655 1656 static void ffa_notifications_cleanup(void) 1657 { 1658 ffa_uninit_pcpu_irq(); 1659 ffa_irq_unmap(drv_info->sched_recv_irq); 1660 drv_info->sched_recv_irq = 0; 1661 ffa_irq_unmap(drv_info->notif_pend_irq); 1662 drv_info->notif_pend_irq = 0; 1663 1664 if (drv_info->bitmap_created) { 1665 ffa_notification_bitmap_destroy(); 1666 drv_info->bitmap_created = false; 1667 } 1668 drv_info->notif_enabled = false; 1669 } 1670 1671 static void ffa_notifications_setup(void) 1672 { 1673 int ret; 1674 1675 ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL); 1676 if (!ret) { 1677 ret = ffa_notification_bitmap_create(); 1678 if (ret) { 1679 pr_err("Notification bitmap create error %d\n", ret); 1680 return; 1681 } 1682 1683 drv_info->bitmap_created = true; 1684 } 1685 1686 ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT); 1687 if (ret > 0) 1688 drv_info->sched_recv_irq = ret; 1689 1690 ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT); 1691 if (ret > 0) 1692 drv_info->notif_pend_irq = ret; 1693 1694 if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq) 1695 goto cleanup; 1696 1697 ret = ffa_init_pcpu_irq(); 1698 if (ret) 1699 goto cleanup; 1700 1701 hash_init(drv_info->notifier_hash); 1702 mutex_init(&drv_info->notify_lock); 1703 1704 drv_info->notif_enabled = true; 1705 return; 1706 cleanup: 1707 pr_info("Notification setup failed %d, not enabled\n", ret); 1708 ffa_notifications_cleanup(); 1709 } 1710 1711 static int __init ffa_init(void) 1712 { 1713 int ret; 1714 1715 ret = ffa_transport_init(&invoke_ffa_fn); 1716 if (ret) 1717 return ret; 1718 1719 drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); 1720 if (!drv_info) 1721 return -ENOMEM; 1722 1723 ret = ffa_version_check(&drv_info->version); 1724 if (ret) 1725 goto free_drv_info; 1726 1727 if (ffa_id_get(&drv_info->vm_id)) { 1728 pr_err("failed to obtain VM id for self\n"); 1729 ret = -ENODEV; 1730 goto free_drv_info; 1731 } 1732 1733 drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 1734 if (!drv_info->rx_buffer) { 1735 ret = -ENOMEM; 1736 goto free_pages; 1737 } 1738 1739 drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 1740 if (!drv_info->tx_buffer) { 1741 ret = -ENOMEM; 1742 goto free_pages; 1743 } 1744 1745 ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer), 1746 virt_to_phys(drv_info->rx_buffer), 1747 RXTX_BUFFER_SIZE / FFA_PAGE_SIZE); 1748 if (ret) { 1749 pr_err("failed to register FFA RxTx buffers\n"); 1750 goto free_pages; 1751 } 1752 1753 mutex_init(&drv_info->rx_lock); 1754 mutex_init(&drv_info->tx_lock); 1755 1756 ffa_drvinfo_flags_init(); 1757 1758 ffa_notifications_setup(); 1759 1760 ret = ffa_setup_partitions(); 1761 if (ret) { 1762 pr_err("failed to setup partitions\n"); 1763 goto cleanup_notifs; 1764 } 1765 1766 ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle, 1767 drv_info, true); 1768 if (ret) 1769 pr_info("Failed to register driver sched callback %d\n", ret); 1770 1771 return 0; 1772 1773 cleanup_notifs: 1774 ffa_notifications_cleanup(); 1775 free_pages: 1776 if (drv_info->tx_buffer) 1777 free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); 1778 free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); 1779 free_drv_info: 1780 kfree(drv_info); 1781 return ret; 1782 } 1783 module_init(ffa_init); 1784 1785 static void __exit ffa_exit(void) 1786 { 1787 ffa_notifications_cleanup(); 1788 ffa_partitions_cleanup(); 1789 ffa_rxtx_unmap(drv_info->vm_id); 1790 free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); 1791 free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); 1792 kfree(drv_info); 1793 } 1794 module_exit(ffa_exit); 1795 1796 MODULE_ALIAS("arm-ffa"); 1797 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); 1798 MODULE_DESCRIPTION("Arm FF-A interface driver"); 1799 MODULE_LICENSE("GPL v2"); 1800