1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Arm Firmware Framework for ARMv8-A(FFA) interface driver 4 * 5 * The Arm FFA specification[1] describes a software architecture to 6 * leverages the virtualization extension to isolate software images 7 * provided by an ecosystem of vendors from each other and describes 8 * interfaces that standardize communication between the various software 9 * images including communication between images in the Secure world and 10 * Normal world. Any Hypervisor could use the FFA interfaces to enable 11 * communication between VMs it manages. 12 * 13 * The Hypervisor a.k.a Partition managers in FFA terminology can assign 14 * system resources(Memory regions, Devices, CPU cycles) to the partitions 15 * and manage isolation amongst them. 16 * 17 * [1] https://developer.arm.com/docs/den0077/latest 18 * 19 * Copyright (C) 2021 ARM Ltd. 20 */ 21 22 #define DRIVER_NAME "ARM FF-A" 23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 24 25 #include <linux/acpi.h> 26 #include <linux/arm_ffa.h> 27 #include <linux/bitfield.h> 28 #include <linux/cpuhotplug.h> 29 #include <linux/device.h> 30 #include <linux/hashtable.h> 31 #include <linux/interrupt.h> 32 #include <linux/io.h> 33 #include <linux/kernel.h> 34 #include <linux/module.h> 35 #include <linux/mm.h> 36 #include <linux/mutex.h> 37 #include <linux/of_irq.h> 38 #include <linux/scatterlist.h> 39 #include <linux/slab.h> 40 #include <linux/smp.h> 41 #include <linux/uuid.h> 42 #include <linux/xarray.h> 43 44 #include "common.h" 45 46 #define FFA_DRIVER_VERSION FFA_VERSION_1_1 47 #define FFA_MIN_VERSION FFA_VERSION_1_0 48 49 #define SENDER_ID_MASK GENMASK(31, 16) 50 #define RECEIVER_ID_MASK GENMASK(15, 0) 51 #define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x)))) 52 #define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x)))) 53 #define PACK_TARGET_INFO(s, r) \ 54 (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) 55 56 /* 57 * Keeping RX TX buffer size as 4K for now 58 * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config 59 */ 60 #define RXTX_BUFFER_SIZE SZ_4K 61 62 #define FFA_MAX_NOTIFICATIONS 64 63 64 static ffa_fn *invoke_ffa_fn; 65 66 static const int ffa_linux_errmap[] = { 67 /* better than switch case as long as return value is continuous */ 68 0, /* FFA_RET_SUCCESS */ 69 -EOPNOTSUPP, /* FFA_RET_NOT_SUPPORTED */ 70 -EINVAL, /* FFA_RET_INVALID_PARAMETERS */ 71 -ENOMEM, /* FFA_RET_NO_MEMORY */ 72 -EBUSY, /* FFA_RET_BUSY */ 73 -EINTR, /* FFA_RET_INTERRUPTED */ 74 -EACCES, /* FFA_RET_DENIED */ 75 -EAGAIN, /* FFA_RET_RETRY */ 76 -ECANCELED, /* FFA_RET_ABORTED */ 77 -ENODATA, /* FFA_RET_NO_DATA */ 78 }; 79 80 static inline int ffa_to_linux_errno(int errno) 81 { 82 int err_idx = -errno; 83 84 if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap)) 85 return ffa_linux_errmap[err_idx]; 86 return -EINVAL; 87 } 88 89 struct ffa_pcpu_irq { 90 struct ffa_drv_info *info; 91 }; 92 93 struct ffa_drv_info { 94 u32 version; 95 u16 vm_id; 96 struct mutex rx_lock; /* lock to protect Rx buffer */ 97 struct mutex tx_lock; /* lock to protect Tx buffer */ 98 void *rx_buffer; 99 void *tx_buffer; 100 bool mem_ops_native; 101 bool bitmap_created; 102 unsigned int sched_recv_irq; 103 unsigned int cpuhp_state; 104 struct ffa_pcpu_irq __percpu *irq_pcpu; 105 struct workqueue_struct *notif_pcpu_wq; 106 struct work_struct notif_pcpu_work; 107 struct work_struct irq_work; 108 struct xarray partition_info; 109 unsigned int partition_count; 110 DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); 111 struct mutex notify_lock; /* lock to protect notifier hashtable */ 112 }; 113 114 static struct ffa_drv_info *drv_info; 115 116 /* 117 * The driver must be able to support all the versions from the earliest 118 * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION. 119 * The specification states that if firmware supports a FFA implementation 120 * that is incompatible with and at a greater version number than specified 121 * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION), 122 * it must return the NOT_SUPPORTED error code. 123 */ 124 static u32 ffa_compatible_version_find(u32 version) 125 { 126 u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version); 127 u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION); 128 u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION); 129 130 if ((major < drv_major) || (major == drv_major && minor <= drv_minor)) 131 return version; 132 133 pr_info("Firmware version higher than driver version, downgrading\n"); 134 return FFA_DRIVER_VERSION; 135 } 136 137 static int ffa_version_check(u32 *version) 138 { 139 ffa_value_t ver; 140 141 invoke_ffa_fn((ffa_value_t){ 142 .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION, 143 }, &ver); 144 145 if (ver.a0 == FFA_RET_NOT_SUPPORTED) { 146 pr_info("FFA_VERSION returned not supported\n"); 147 return -EOPNOTSUPP; 148 } 149 150 if (ver.a0 < FFA_MIN_VERSION) { 151 pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n", 152 FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), 153 FFA_MAJOR_VERSION(FFA_MIN_VERSION), 154 FFA_MINOR_VERSION(FFA_MIN_VERSION)); 155 return -EINVAL; 156 } 157 158 pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION), 159 FFA_MINOR_VERSION(FFA_DRIVER_VERSION)); 160 pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0), 161 FFA_MINOR_VERSION(ver.a0)); 162 *version = ffa_compatible_version_find(ver.a0); 163 164 return 0; 165 } 166 167 static int ffa_rx_release(void) 168 { 169 ffa_value_t ret; 170 171 invoke_ffa_fn((ffa_value_t){ 172 .a0 = FFA_RX_RELEASE, 173 }, &ret); 174 175 if (ret.a0 == FFA_ERROR) 176 return ffa_to_linux_errno((int)ret.a2); 177 178 /* check for ret.a0 == FFA_RX_RELEASE ? */ 179 180 return 0; 181 } 182 183 static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt) 184 { 185 ffa_value_t ret; 186 187 invoke_ffa_fn((ffa_value_t){ 188 .a0 = FFA_FN_NATIVE(RXTX_MAP), 189 .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt, 190 }, &ret); 191 192 if (ret.a0 == FFA_ERROR) 193 return ffa_to_linux_errno((int)ret.a2); 194 195 return 0; 196 } 197 198 static int ffa_rxtx_unmap(u16 vm_id) 199 { 200 ffa_value_t ret; 201 202 invoke_ffa_fn((ffa_value_t){ 203 .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0), 204 }, &ret); 205 206 if (ret.a0 == FFA_ERROR) 207 return ffa_to_linux_errno((int)ret.a2); 208 209 return 0; 210 } 211 212 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0) 213 214 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */ 215 static int 216 __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, 217 struct ffa_partition_info *buffer, int num_partitions) 218 { 219 int idx, count, flags = 0, sz, buf_sz; 220 ffa_value_t partition_info; 221 222 if (drv_info->version > FFA_VERSION_1_0 && 223 (!buffer || !num_partitions)) /* Just get the count for now */ 224 flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY; 225 226 mutex_lock(&drv_info->rx_lock); 227 invoke_ffa_fn((ffa_value_t){ 228 .a0 = FFA_PARTITION_INFO_GET, 229 .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3, 230 .a5 = flags, 231 }, &partition_info); 232 233 if (partition_info.a0 == FFA_ERROR) { 234 mutex_unlock(&drv_info->rx_lock); 235 return ffa_to_linux_errno((int)partition_info.a2); 236 } 237 238 count = partition_info.a2; 239 240 if (drv_info->version > FFA_VERSION_1_0) { 241 buf_sz = sz = partition_info.a3; 242 if (sz > sizeof(*buffer)) 243 buf_sz = sizeof(*buffer); 244 } else { 245 /* FFA_VERSION_1_0 lacks size in the response */ 246 buf_sz = sz = 8; 247 } 248 249 if (buffer && count <= num_partitions) 250 for (idx = 0; idx < count; idx++) 251 memcpy(buffer + idx, drv_info->rx_buffer + idx * sz, 252 buf_sz); 253 254 ffa_rx_release(); 255 256 mutex_unlock(&drv_info->rx_lock); 257 258 return count; 259 } 260 261 /* buffer is allocated and caller must free the same if returned count > 0 */ 262 static int 263 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) 264 { 265 int count; 266 u32 uuid0_4[4]; 267 struct ffa_partition_info *pbuf; 268 269 export_uuid((u8 *)uuid0_4, uuid); 270 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], 271 uuid0_4[3], NULL, 0); 272 if (count <= 0) 273 return count; 274 275 pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL); 276 if (!pbuf) 277 return -ENOMEM; 278 279 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], 280 uuid0_4[3], pbuf, count); 281 if (count <= 0) 282 kfree(pbuf); 283 else 284 *buffer = pbuf; 285 286 return count; 287 } 288 289 #define VM_ID_MASK GENMASK(15, 0) 290 static int ffa_id_get(u16 *vm_id) 291 { 292 ffa_value_t id; 293 294 invoke_ffa_fn((ffa_value_t){ 295 .a0 = FFA_ID_GET, 296 }, &id); 297 298 if (id.a0 == FFA_ERROR) 299 return ffa_to_linux_errno((int)id.a2); 300 301 *vm_id = FIELD_GET(VM_ID_MASK, (id.a2)); 302 303 return 0; 304 } 305 306 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, 307 struct ffa_send_direct_data *data) 308 { 309 u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); 310 ffa_value_t ret; 311 312 if (mode_32bit) { 313 req_id = FFA_MSG_SEND_DIRECT_REQ; 314 resp_id = FFA_MSG_SEND_DIRECT_RESP; 315 } else { 316 req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ); 317 resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP); 318 } 319 320 invoke_ffa_fn((ffa_value_t){ 321 .a0 = req_id, .a1 = src_dst_ids, .a2 = 0, 322 .a3 = data->data0, .a4 = data->data1, .a5 = data->data2, 323 .a6 = data->data3, .a7 = data->data4, 324 }, &ret); 325 326 while (ret.a0 == FFA_INTERRUPT) 327 invoke_ffa_fn((ffa_value_t){ 328 .a0 = FFA_RUN, .a1 = ret.a1, 329 }, &ret); 330 331 if (ret.a0 == FFA_ERROR) 332 return ffa_to_linux_errno((int)ret.a2); 333 334 if (ret.a0 == resp_id) { 335 data->data0 = ret.a3; 336 data->data1 = ret.a4; 337 data->data2 = ret.a5; 338 data->data3 = ret.a6; 339 data->data4 = ret.a7; 340 return 0; 341 } 342 343 return -EINVAL; 344 } 345 346 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, 347 u32 frag_len, u32 len, u64 *handle) 348 { 349 ffa_value_t ret; 350 351 invoke_ffa_fn((ffa_value_t){ 352 .a0 = func_id, .a1 = len, .a2 = frag_len, 353 .a3 = buf, .a4 = buf_sz, 354 }, &ret); 355 356 while (ret.a0 == FFA_MEM_OP_PAUSE) 357 invoke_ffa_fn((ffa_value_t){ 358 .a0 = FFA_MEM_OP_RESUME, 359 .a1 = ret.a1, .a2 = ret.a2, 360 }, &ret); 361 362 if (ret.a0 == FFA_ERROR) 363 return ffa_to_linux_errno((int)ret.a2); 364 365 if (ret.a0 == FFA_SUCCESS) { 366 if (handle) 367 *handle = PACK_HANDLE(ret.a2, ret.a3); 368 } else if (ret.a0 == FFA_MEM_FRAG_RX) { 369 if (handle) 370 *handle = PACK_HANDLE(ret.a1, ret.a2); 371 } else { 372 return -EOPNOTSUPP; 373 } 374 375 return frag_len; 376 } 377 378 static int ffa_mem_next_frag(u64 handle, u32 frag_len) 379 { 380 ffa_value_t ret; 381 382 invoke_ffa_fn((ffa_value_t){ 383 .a0 = FFA_MEM_FRAG_TX, 384 .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle), 385 .a3 = frag_len, 386 }, &ret); 387 388 while (ret.a0 == FFA_MEM_OP_PAUSE) 389 invoke_ffa_fn((ffa_value_t){ 390 .a0 = FFA_MEM_OP_RESUME, 391 .a1 = ret.a1, .a2 = ret.a2, 392 }, &ret); 393 394 if (ret.a0 == FFA_ERROR) 395 return ffa_to_linux_errno((int)ret.a2); 396 397 if (ret.a0 == FFA_MEM_FRAG_RX) 398 return ret.a3; 399 else if (ret.a0 == FFA_SUCCESS) 400 return 0; 401 402 return -EOPNOTSUPP; 403 } 404 405 static int 406 ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, 407 u32 len, u64 *handle, bool first) 408 { 409 if (!first) 410 return ffa_mem_next_frag(*handle, frag_len); 411 412 return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle); 413 } 414 415 static u32 ffa_get_num_pages_sg(struct scatterlist *sg) 416 { 417 u32 num_pages = 0; 418 419 do { 420 num_pages += sg->length / FFA_PAGE_SIZE; 421 } while ((sg = sg_next(sg))); 422 423 return num_pages; 424 } 425 426 static u16 ffa_memory_attributes_get(u32 func_id) 427 { 428 /* 429 * For the memory lend or donate operation, if the receiver is a PE or 430 * a proxy endpoint, the owner/sender must not specify the attributes 431 */ 432 if (func_id == FFA_FN_NATIVE(MEM_LEND) || 433 func_id == FFA_MEM_LEND) 434 return 0; 435 436 return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE; 437 } 438 439 static int 440 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, 441 struct ffa_mem_ops_args *args) 442 { 443 int rc = 0; 444 bool first = true; 445 u32 composite_offset; 446 phys_addr_t addr = 0; 447 struct ffa_mem_region *mem_region = buffer; 448 struct ffa_composite_mem_region *composite; 449 struct ffa_mem_region_addr_range *constituents; 450 struct ffa_mem_region_attributes *ep_mem_access; 451 u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg); 452 453 mem_region->tag = args->tag; 454 mem_region->flags = args->flags; 455 mem_region->sender_id = drv_info->vm_id; 456 mem_region->attributes = ffa_memory_attributes_get(func_id); 457 ep_mem_access = buffer + 458 ffa_mem_desc_offset(buffer, 0, drv_info->version); 459 composite_offset = ffa_mem_desc_offset(buffer, args->nattrs, 460 drv_info->version); 461 462 for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { 463 ep_mem_access->receiver = args->attrs[idx].receiver; 464 ep_mem_access->attrs = args->attrs[idx].attrs; 465 ep_mem_access->composite_off = composite_offset; 466 ep_mem_access->flag = 0; 467 ep_mem_access->reserved = 0; 468 } 469 mem_region->handle = 0; 470 mem_region->ep_count = args->nattrs; 471 if (drv_info->version <= FFA_VERSION_1_0) { 472 mem_region->ep_mem_size = 0; 473 } else { 474 mem_region->ep_mem_size = sizeof(*ep_mem_access); 475 mem_region->ep_mem_offset = sizeof(*mem_region); 476 memset(mem_region->reserved, 0, 12); 477 } 478 479 composite = buffer + composite_offset; 480 composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); 481 composite->addr_range_cnt = num_entries; 482 composite->reserved = 0; 483 484 length = composite_offset + CONSTITUENTS_OFFSET(num_entries); 485 frag_len = composite_offset + CONSTITUENTS_OFFSET(0); 486 if (frag_len > max_fragsize) 487 return -ENXIO; 488 489 if (!args->use_txbuf) { 490 addr = virt_to_phys(buffer); 491 buf_sz = max_fragsize / FFA_PAGE_SIZE; 492 } 493 494 constituents = buffer + frag_len; 495 idx = 0; 496 do { 497 if (frag_len == max_fragsize) { 498 rc = ffa_transmit_fragment(func_id, addr, buf_sz, 499 frag_len, length, 500 &args->g_handle, first); 501 if (rc < 0) 502 return -ENXIO; 503 504 first = false; 505 idx = 0; 506 frag_len = 0; 507 constituents = buffer; 508 } 509 510 if ((void *)constituents - buffer > max_fragsize) { 511 pr_err("Memory Region Fragment > Tx Buffer size\n"); 512 return -EFAULT; 513 } 514 515 constituents->address = sg_phys(args->sg); 516 constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE; 517 constituents->reserved = 0; 518 constituents++; 519 frag_len += sizeof(struct ffa_mem_region_addr_range); 520 } while ((args->sg = sg_next(args->sg))); 521 522 return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len, 523 length, &args->g_handle, first); 524 } 525 526 static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) 527 { 528 int ret; 529 void *buffer; 530 531 if (!args->use_txbuf) { 532 buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 533 if (!buffer) 534 return -ENOMEM; 535 } else { 536 buffer = drv_info->tx_buffer; 537 mutex_lock(&drv_info->tx_lock); 538 } 539 540 ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args); 541 542 if (args->use_txbuf) 543 mutex_unlock(&drv_info->tx_lock); 544 else 545 free_pages_exact(buffer, RXTX_BUFFER_SIZE); 546 547 return ret < 0 ? ret : 0; 548 } 549 550 static int ffa_memory_reclaim(u64 g_handle, u32 flags) 551 { 552 ffa_value_t ret; 553 554 invoke_ffa_fn((ffa_value_t){ 555 .a0 = FFA_MEM_RECLAIM, 556 .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle), 557 .a3 = flags, 558 }, &ret); 559 560 if (ret.a0 == FFA_ERROR) 561 return ffa_to_linux_errno((int)ret.a2); 562 563 return 0; 564 } 565 566 static int ffa_features(u32 func_feat_id, u32 input_props, 567 u32 *if_props_1, u32 *if_props_2) 568 { 569 ffa_value_t id; 570 571 if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { 572 pr_err("%s: Invalid Parameters: %x, %x", __func__, 573 func_feat_id, input_props); 574 return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); 575 } 576 577 invoke_ffa_fn((ffa_value_t){ 578 .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, 579 }, &id); 580 581 if (id.a0 == FFA_ERROR) 582 return ffa_to_linux_errno((int)id.a2); 583 584 if (if_props_1) 585 *if_props_1 = id.a2; 586 if (if_props_2) 587 *if_props_2 = id.a3; 588 589 return 0; 590 } 591 592 static int ffa_notification_bitmap_create(void) 593 { 594 ffa_value_t ret; 595 u16 vcpu_count = nr_cpu_ids; 596 597 invoke_ffa_fn((ffa_value_t){ 598 .a0 = FFA_NOTIFICATION_BITMAP_CREATE, 599 .a1 = drv_info->vm_id, .a2 = vcpu_count, 600 }, &ret); 601 602 if (ret.a0 == FFA_ERROR) 603 return ffa_to_linux_errno((int)ret.a2); 604 605 return 0; 606 } 607 608 static int ffa_notification_bitmap_destroy(void) 609 { 610 ffa_value_t ret; 611 612 invoke_ffa_fn((ffa_value_t){ 613 .a0 = FFA_NOTIFICATION_BITMAP_DESTROY, 614 .a1 = drv_info->vm_id, 615 }, &ret); 616 617 if (ret.a0 == FFA_ERROR) 618 return ffa_to_linux_errno((int)ret.a2); 619 620 return 0; 621 } 622 623 #define NOTIFICATION_LOW_MASK GENMASK(31, 0) 624 #define NOTIFICATION_HIGH_MASK GENMASK(63, 32) 625 #define NOTIFICATION_BITMAP_HIGH(x) \ 626 ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x)))) 627 #define NOTIFICATION_BITMAP_LOW(x) \ 628 ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x)))) 629 #define PACK_NOTIFICATION_BITMAP(low, high) \ 630 (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \ 631 FIELD_PREP(NOTIFICATION_HIGH_MASK, (high))) 632 633 #define RECEIVER_VCPU_MASK GENMASK(31, 16) 634 #define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \ 635 (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \ 636 FIELD_PREP(RECEIVER_ID_MASK, (r))) 637 638 #define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0) 639 #define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7) 640 #define ID_LIST_MASK_64 GENMASK(51, 12) 641 #define ID_LIST_MASK_32 GENMASK(31, 12) 642 #define MAX_IDS_64 20 643 #define MAX_IDS_32 10 644 645 #define PER_VCPU_NOTIFICATION_FLAG BIT(0) 646 #define SECURE_PARTITION_BITMAP BIT(0) 647 #define NON_SECURE_VM_BITMAP BIT(1) 648 #define SPM_FRAMEWORK_BITMAP BIT(2) 649 #define NS_HYP_FRAMEWORK_BITMAP BIT(3) 650 651 static int ffa_notification_bind_common(u16 dst_id, u64 bitmap, 652 u32 flags, bool is_bind) 653 { 654 ffa_value_t ret; 655 u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id); 656 657 func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND; 658 659 invoke_ffa_fn((ffa_value_t){ 660 .a0 = func, .a1 = src_dst_ids, .a2 = flags, 661 .a3 = NOTIFICATION_BITMAP_LOW(bitmap), 662 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), 663 }, &ret); 664 665 if (ret.a0 == FFA_ERROR) 666 return ffa_to_linux_errno((int)ret.a2); 667 else if (ret.a0 != FFA_SUCCESS) 668 return -EINVAL; 669 670 return 0; 671 } 672 673 static 674 int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap) 675 { 676 ffa_value_t ret; 677 u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id); 678 679 invoke_ffa_fn((ffa_value_t) { 680 .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags, 681 .a3 = NOTIFICATION_BITMAP_LOW(bitmap), 682 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), 683 }, &ret); 684 685 if (ret.a0 == FFA_ERROR) 686 return ffa_to_linux_errno((int)ret.a2); 687 else if (ret.a0 != FFA_SUCCESS) 688 return -EINVAL; 689 690 return 0; 691 } 692 693 struct ffa_notify_bitmaps { 694 u64 sp_map; 695 u64 vm_map; 696 u64 arch_map; 697 }; 698 699 static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify) 700 { 701 ffa_value_t ret; 702 u16 src_id = drv_info->vm_id; 703 u16 cpu_id = smp_processor_id(); 704 u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id); 705 706 invoke_ffa_fn((ffa_value_t){ 707 .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags, 708 }, &ret); 709 710 if (ret.a0 == FFA_ERROR) 711 return ffa_to_linux_errno((int)ret.a2); 712 else if (ret.a0 != FFA_SUCCESS) 713 return -EINVAL; /* Something else went wrong. */ 714 715 notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3); 716 notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5); 717 notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7); 718 719 return 0; 720 } 721 722 struct ffa_dev_part_info { 723 ffa_sched_recv_cb callback; 724 void *cb_data; 725 rwlock_t rw_lock; 726 }; 727 728 static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu) 729 { 730 struct ffa_dev_part_info *partition; 731 ffa_sched_recv_cb callback; 732 void *cb_data; 733 734 partition = xa_load(&drv_info->partition_info, part_id); 735 read_lock(&partition->rw_lock); 736 callback = partition->callback; 737 cb_data = partition->cb_data; 738 read_unlock(&partition->rw_lock); 739 740 if (callback) 741 callback(vcpu, is_per_vcpu, cb_data); 742 } 743 744 static void ffa_notification_info_get(void) 745 { 746 int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64]; 747 bool is_64b_resp; 748 ffa_value_t ret; 749 u64 id_list; 750 751 do { 752 invoke_ffa_fn((ffa_value_t){ 753 .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET), 754 }, &ret); 755 756 if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) { 757 if (ret.a2 != FFA_RET_NO_DATA) 758 pr_err("Notification Info fetch failed: 0x%lx (0x%lx)", 759 ret.a0, ret.a2); 760 return; 761 } 762 763 is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS); 764 765 ids_processed = 0; 766 lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2); 767 if (is_64b_resp) { 768 max_ids = MAX_IDS_64; 769 id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2); 770 } else { 771 max_ids = MAX_IDS_32; 772 id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2); 773 } 774 775 for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2) 776 ids_count[idx] = (id_list & 0x3) + 1; 777 778 /* Process IDs */ 779 for (list = 0; list < lists_cnt; list++) { 780 u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3; 781 782 if (ids_processed >= max_ids - 1) 783 break; 784 785 part_id = packed_id_list[++ids_processed]; 786 787 if (!ids_count[list]) { /* Global Notification */ 788 __do_sched_recv_cb(part_id, 0, false); 789 continue; 790 } 791 792 /* Per vCPU Notification */ 793 for (idx = 0; idx < ids_count[list]; idx++) { 794 if (ids_processed >= max_ids - 1) 795 break; 796 797 vcpu_id = packed_id_list[++ids_processed]; 798 799 __do_sched_recv_cb(part_id, vcpu_id, true); 800 } 801 } 802 } while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK); 803 } 804 805 static int ffa_run(struct ffa_device *dev, u16 vcpu) 806 { 807 ffa_value_t ret; 808 u32 target = dev->vm_id << 16 | vcpu; 809 810 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret); 811 812 while (ret.a0 == FFA_INTERRUPT) 813 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, }, 814 &ret); 815 816 if (ret.a0 == FFA_ERROR) 817 return ffa_to_linux_errno((int)ret.a2); 818 819 return 0; 820 } 821 822 static void ffa_set_up_mem_ops_native_flag(void) 823 { 824 if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || 825 !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL)) 826 drv_info->mem_ops_native = true; 827 } 828 829 static u32 ffa_api_version_get(void) 830 { 831 return drv_info->version; 832 } 833 834 static int ffa_partition_info_get(const char *uuid_str, 835 struct ffa_partition_info *buffer) 836 { 837 int count; 838 uuid_t uuid; 839 struct ffa_partition_info *pbuf; 840 841 if (uuid_parse(uuid_str, &uuid)) { 842 pr_err("invalid uuid (%s)\n", uuid_str); 843 return -ENODEV; 844 } 845 846 count = ffa_partition_probe(&uuid, &pbuf); 847 if (count <= 0) 848 return -ENOENT; 849 850 memcpy(buffer, pbuf, sizeof(*pbuf) * count); 851 kfree(pbuf); 852 return 0; 853 } 854 855 static void ffa_mode_32bit_set(struct ffa_device *dev) 856 { 857 dev->mode_32bit = true; 858 } 859 860 static int ffa_sync_send_receive(struct ffa_device *dev, 861 struct ffa_send_direct_data *data) 862 { 863 return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id, 864 dev->mode_32bit, data); 865 } 866 867 static int ffa_memory_share(struct ffa_mem_ops_args *args) 868 { 869 if (drv_info->mem_ops_native) 870 return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args); 871 872 return ffa_memory_ops(FFA_MEM_SHARE, args); 873 } 874 875 static int ffa_memory_lend(struct ffa_mem_ops_args *args) 876 { 877 /* Note that upon a successful MEM_LEND request the caller 878 * must ensure that the memory region specified is not accessed 879 * until a successful MEM_RECALIM call has been made. 880 * On systems with a hypervisor present this will been enforced, 881 * however on systems without a hypervisor the responsibility 882 * falls to the calling kernel driver to prevent access. 883 */ 884 if (drv_info->mem_ops_native) 885 return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args); 886 887 return ffa_memory_ops(FFA_MEM_LEND, args); 888 } 889 890 #define FFA_SECURE_PARTITION_ID_FLAG BIT(15) 891 892 enum notify_type { 893 NON_SECURE_VM, 894 SECURE_PARTITION, 895 FRAMEWORK, 896 }; 897 898 struct notifier_cb_info { 899 struct hlist_node hnode; 900 ffa_notifier_cb cb; 901 void *cb_data; 902 enum notify_type type; 903 }; 904 905 static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback, 906 void *cb_data, bool is_registration) 907 { 908 struct ffa_dev_part_info *partition; 909 bool cb_valid; 910 911 partition = xa_load(&drv_info->partition_info, part_id); 912 write_lock(&partition->rw_lock); 913 914 cb_valid = !!partition->callback; 915 if (!(is_registration ^ cb_valid)) { 916 write_unlock(&partition->rw_lock); 917 return -EINVAL; 918 } 919 920 partition->callback = callback; 921 partition->cb_data = cb_data; 922 923 write_unlock(&partition->rw_lock); 924 return 0; 925 } 926 927 static int ffa_sched_recv_cb_register(struct ffa_device *dev, 928 ffa_sched_recv_cb cb, void *cb_data) 929 { 930 return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true); 931 } 932 933 static int ffa_sched_recv_cb_unregister(struct ffa_device *dev) 934 { 935 return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false); 936 } 937 938 static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags) 939 { 940 return ffa_notification_bind_common(dst_id, bitmap, flags, true); 941 } 942 943 static int ffa_notification_unbind(u16 dst_id, u64 bitmap) 944 { 945 return ffa_notification_bind_common(dst_id, bitmap, 0, false); 946 } 947 948 /* Should be called while the notify_lock is taken */ 949 static struct notifier_cb_info * 950 notifier_hash_node_get(u16 notify_id, enum notify_type type) 951 { 952 struct notifier_cb_info *node; 953 954 hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) 955 if (type == node->type) 956 return node; 957 958 return NULL; 959 } 960 961 static int 962 update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, 963 void *cb_data, bool is_registration) 964 { 965 struct notifier_cb_info *cb_info = NULL; 966 bool cb_found; 967 968 cb_info = notifier_hash_node_get(notify_id, type); 969 cb_found = !!cb_info; 970 971 if (!(is_registration ^ cb_found)) 972 return -EINVAL; 973 974 if (is_registration) { 975 cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL); 976 if (!cb_info) 977 return -ENOMEM; 978 979 cb_info->type = type; 980 cb_info->cb = cb; 981 cb_info->cb_data = cb_data; 982 983 hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id); 984 } else { 985 hash_del(&cb_info->hnode); 986 } 987 988 return 0; 989 } 990 991 static enum notify_type ffa_notify_type_get(u16 vm_id) 992 { 993 if (vm_id & FFA_SECURE_PARTITION_ID_FLAG) 994 return SECURE_PARTITION; 995 else 996 return NON_SECURE_VM; 997 } 998 999 static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) 1000 { 1001 int rc; 1002 enum notify_type type = ffa_notify_type_get(dev->vm_id); 1003 1004 if (notify_id >= FFA_MAX_NOTIFICATIONS) 1005 return -EINVAL; 1006 1007 mutex_lock(&drv_info->notify_lock); 1008 1009 rc = update_notifier_cb(notify_id, type, NULL, NULL, false); 1010 if (rc) { 1011 pr_err("Could not unregister notification callback\n"); 1012 mutex_unlock(&drv_info->notify_lock); 1013 return rc; 1014 } 1015 1016 rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); 1017 1018 mutex_unlock(&drv_info->notify_lock); 1019 1020 return rc; 1021 } 1022 1023 static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, 1024 ffa_notifier_cb cb, void *cb_data, int notify_id) 1025 { 1026 int rc; 1027 u32 flags = 0; 1028 enum notify_type type = ffa_notify_type_get(dev->vm_id); 1029 1030 if (notify_id >= FFA_MAX_NOTIFICATIONS) 1031 return -EINVAL; 1032 1033 mutex_lock(&drv_info->notify_lock); 1034 1035 if (is_per_vcpu) 1036 flags = PER_VCPU_NOTIFICATION_FLAG; 1037 1038 rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); 1039 if (rc) { 1040 mutex_unlock(&drv_info->notify_lock); 1041 return rc; 1042 } 1043 1044 rc = update_notifier_cb(notify_id, type, cb, cb_data, true); 1045 if (rc) { 1046 pr_err("Failed to register callback for %d - %d\n", 1047 notify_id, rc); 1048 ffa_notification_unbind(dev->vm_id, BIT(notify_id)); 1049 } 1050 mutex_unlock(&drv_info->notify_lock); 1051 1052 return rc; 1053 } 1054 1055 static int ffa_notify_send(struct ffa_device *dev, int notify_id, 1056 bool is_per_vcpu, u16 vcpu) 1057 { 1058 u32 flags = 0; 1059 1060 if (is_per_vcpu) 1061 flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16); 1062 1063 return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags, 1064 BIT(notify_id)); 1065 } 1066 1067 static void handle_notif_callbacks(u64 bitmap, enum notify_type type) 1068 { 1069 int notify_id; 1070 struct notifier_cb_info *cb_info = NULL; 1071 1072 for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap; 1073 notify_id++, bitmap >>= 1) { 1074 if (!(bitmap & 1)) 1075 continue; 1076 1077 mutex_lock(&drv_info->notify_lock); 1078 cb_info = notifier_hash_node_get(notify_id, type); 1079 mutex_unlock(&drv_info->notify_lock); 1080 1081 if (cb_info && cb_info->cb) 1082 cb_info->cb(notify_id, cb_info->cb_data); 1083 } 1084 } 1085 1086 static void notif_pcpu_irq_work_fn(struct work_struct *work) 1087 { 1088 int rc; 1089 struct ffa_notify_bitmaps bitmaps; 1090 1091 rc = ffa_notification_get(SECURE_PARTITION_BITMAP | 1092 SPM_FRAMEWORK_BITMAP, &bitmaps); 1093 if (rc) { 1094 pr_err("Failed to retrieve notifications with %d!\n", rc); 1095 return; 1096 } 1097 1098 handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM); 1099 handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION); 1100 handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK); 1101 } 1102 1103 static void 1104 ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data) 1105 { 1106 struct ffa_drv_info *info = cb_data; 1107 1108 if (!is_per_vcpu) 1109 notif_pcpu_irq_work_fn(&info->notif_pcpu_work); 1110 else 1111 queue_work_on(vcpu, info->notif_pcpu_wq, 1112 &info->notif_pcpu_work); 1113 } 1114 1115 static const struct ffa_info_ops ffa_drv_info_ops = { 1116 .api_version_get = ffa_api_version_get, 1117 .partition_info_get = ffa_partition_info_get, 1118 }; 1119 1120 static const struct ffa_msg_ops ffa_drv_msg_ops = { 1121 .mode_32bit_set = ffa_mode_32bit_set, 1122 .sync_send_receive = ffa_sync_send_receive, 1123 }; 1124 1125 static const struct ffa_mem_ops ffa_drv_mem_ops = { 1126 .memory_reclaim = ffa_memory_reclaim, 1127 .memory_share = ffa_memory_share, 1128 .memory_lend = ffa_memory_lend, 1129 }; 1130 1131 static const struct ffa_cpu_ops ffa_drv_cpu_ops = { 1132 .run = ffa_run, 1133 }; 1134 1135 static const struct ffa_notifier_ops ffa_drv_notifier_ops = { 1136 .sched_recv_cb_register = ffa_sched_recv_cb_register, 1137 .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister, 1138 .notify_request = ffa_notify_request, 1139 .notify_relinquish = ffa_notify_relinquish, 1140 .notify_send = ffa_notify_send, 1141 }; 1142 1143 static const struct ffa_ops ffa_drv_ops = { 1144 .info_ops = &ffa_drv_info_ops, 1145 .msg_ops = &ffa_drv_msg_ops, 1146 .mem_ops = &ffa_drv_mem_ops, 1147 .cpu_ops = &ffa_drv_cpu_ops, 1148 .notifier_ops = &ffa_drv_notifier_ops, 1149 }; 1150 1151 void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) 1152 { 1153 int count, idx; 1154 struct ffa_partition_info *pbuf, *tpbuf; 1155 1156 /* 1157 * FF-A v1.1 provides UUID for each partition as part of the discovery 1158 * API, the discovered UUID must be populated in the device's UUID and 1159 * there is no need to copy the same from the driver table. 1160 */ 1161 if (drv_info->version > FFA_VERSION_1_0) 1162 return; 1163 1164 count = ffa_partition_probe(uuid, &pbuf); 1165 if (count <= 0) 1166 return; 1167 1168 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) 1169 if (tpbuf->id == ffa_dev->vm_id) 1170 uuid_copy(&ffa_dev->uuid, uuid); 1171 kfree(pbuf); 1172 } 1173 1174 static void ffa_setup_partitions(void) 1175 { 1176 int count, idx; 1177 uuid_t uuid; 1178 struct ffa_device *ffa_dev; 1179 struct ffa_dev_part_info *info; 1180 struct ffa_partition_info *pbuf, *tpbuf; 1181 1182 count = ffa_partition_probe(&uuid_null, &pbuf); 1183 if (count <= 0) { 1184 pr_info("%s: No partitions found, error %d\n", __func__, count); 1185 return; 1186 } 1187 1188 xa_init(&drv_info->partition_info); 1189 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { 1190 import_uuid(&uuid, (u8 *)tpbuf->uuid); 1191 1192 /* Note that if the UUID will be uuid_null, that will require 1193 * ffa_device_match() to find the UUID of this partition id 1194 * with help of ffa_device_match_uuid(). FF-A v1.1 and above 1195 * provides UUID here for each partition as part of the 1196 * discovery API and the same is passed. 1197 */ 1198 ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops); 1199 if (!ffa_dev) { 1200 pr_err("%s: failed to register partition ID 0x%x\n", 1201 __func__, tpbuf->id); 1202 continue; 1203 } 1204 1205 if (drv_info->version > FFA_VERSION_1_0 && 1206 !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) 1207 ffa_mode_32bit_set(ffa_dev); 1208 1209 info = kzalloc(sizeof(*info), GFP_KERNEL); 1210 if (!info) { 1211 ffa_device_unregister(ffa_dev); 1212 continue; 1213 } 1214 xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL); 1215 } 1216 drv_info->partition_count = count; 1217 1218 kfree(pbuf); 1219 1220 /* Allocate for the host */ 1221 info = kzalloc(sizeof(*info), GFP_KERNEL); 1222 if (!info) 1223 return; 1224 xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL); 1225 drv_info->partition_count++; 1226 } 1227 1228 static void ffa_partitions_cleanup(void) 1229 { 1230 struct ffa_dev_part_info **info; 1231 int idx, count = drv_info->partition_count; 1232 1233 if (!count) 1234 return; 1235 1236 info = kcalloc(count, sizeof(**info), GFP_KERNEL); 1237 if (!info) 1238 return; 1239 1240 xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK, 1241 count, XA_PRESENT); 1242 1243 for (idx = 0; idx < count; idx++) 1244 kfree(info[idx]); 1245 kfree(info); 1246 1247 drv_info->partition_count = 0; 1248 xa_destroy(&drv_info->partition_info); 1249 } 1250 1251 /* FFA FEATURE IDs */ 1252 #define FFA_FEAT_NOTIFICATION_PENDING_INT (1) 1253 #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2) 1254 #define FFA_FEAT_MANAGED_EXIT_INT (3) 1255 1256 static irqreturn_t irq_handler(int irq, void *irq_data) 1257 { 1258 struct ffa_pcpu_irq *pcpu = irq_data; 1259 struct ffa_drv_info *info = pcpu->info; 1260 1261 queue_work(info->notif_pcpu_wq, &info->irq_work); 1262 1263 return IRQ_HANDLED; 1264 } 1265 1266 static void ffa_sched_recv_irq_work_fn(struct work_struct *work) 1267 { 1268 ffa_notification_info_get(); 1269 } 1270 1271 static int ffa_sched_recv_irq_map(void) 1272 { 1273 int ret, irq, sr_intid; 1274 1275 /* The returned sr_intid is assumed to be SGI donated to NS world */ 1276 ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL); 1277 if (ret < 0) { 1278 if (ret != -EOPNOTSUPP) 1279 pr_err("Failed to retrieve scheduler Rx interrupt\n"); 1280 return ret; 1281 } 1282 1283 if (acpi_disabled) { 1284 struct of_phandle_args oirq = {}; 1285 struct device_node *gic; 1286 1287 /* Only GICv3 supported currently with the device tree */ 1288 gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3"); 1289 if (!gic) 1290 return -ENXIO; 1291 1292 oirq.np = gic; 1293 oirq.args_count = 1; 1294 oirq.args[0] = sr_intid; 1295 irq = irq_create_of_mapping(&oirq); 1296 of_node_put(gic); 1297 #ifdef CONFIG_ACPI 1298 } else { 1299 irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE, 1300 ACPI_ACTIVE_HIGH); 1301 #endif 1302 } 1303 1304 if (irq <= 0) { 1305 pr_err("Failed to create IRQ mapping!\n"); 1306 return -ENODATA; 1307 } 1308 1309 return irq; 1310 } 1311 1312 static void ffa_sched_recv_irq_unmap(void) 1313 { 1314 if (drv_info->sched_recv_irq) 1315 irq_dispose_mapping(drv_info->sched_recv_irq); 1316 } 1317 1318 static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu) 1319 { 1320 enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); 1321 return 0; 1322 } 1323 1324 static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu) 1325 { 1326 disable_percpu_irq(drv_info->sched_recv_irq); 1327 return 0; 1328 } 1329 1330 static void ffa_uninit_pcpu_irq(void) 1331 { 1332 if (drv_info->cpuhp_state) 1333 cpuhp_remove_state(drv_info->cpuhp_state); 1334 1335 if (drv_info->notif_pcpu_wq) 1336 destroy_workqueue(drv_info->notif_pcpu_wq); 1337 1338 if (drv_info->sched_recv_irq) 1339 free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu); 1340 1341 if (drv_info->irq_pcpu) 1342 free_percpu(drv_info->irq_pcpu); 1343 } 1344 1345 static int ffa_init_pcpu_irq(unsigned int irq) 1346 { 1347 struct ffa_pcpu_irq __percpu *irq_pcpu; 1348 int ret, cpu; 1349 1350 irq_pcpu = alloc_percpu(struct ffa_pcpu_irq); 1351 if (!irq_pcpu) 1352 return -ENOMEM; 1353 1354 for_each_present_cpu(cpu) 1355 per_cpu_ptr(irq_pcpu, cpu)->info = drv_info; 1356 1357 drv_info->irq_pcpu = irq_pcpu; 1358 1359 ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu); 1360 if (ret) { 1361 pr_err("Error registering notification IRQ %d: %d\n", irq, ret); 1362 return ret; 1363 } 1364 1365 INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn); 1366 INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn); 1367 drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification"); 1368 if (!drv_info->notif_pcpu_wq) 1369 return -EINVAL; 1370 1371 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting", 1372 ffa_cpuhp_pcpu_irq_enable, 1373 ffa_cpuhp_pcpu_irq_disable); 1374 1375 if (ret < 0) 1376 return ret; 1377 1378 drv_info->cpuhp_state = ret; 1379 return 0; 1380 } 1381 1382 static void ffa_notifications_cleanup(void) 1383 { 1384 ffa_uninit_pcpu_irq(); 1385 ffa_sched_recv_irq_unmap(); 1386 1387 if (drv_info->bitmap_created) { 1388 ffa_notification_bitmap_destroy(); 1389 drv_info->bitmap_created = false; 1390 } 1391 } 1392 1393 static int ffa_notifications_setup(void) 1394 { 1395 int ret, irq; 1396 1397 ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL); 1398 if (ret) { 1399 pr_err("Notifications not supported, continuing with it ..\n"); 1400 return 0; 1401 } 1402 1403 ret = ffa_notification_bitmap_create(); 1404 if (ret) { 1405 pr_err("notification_bitmap_create error %d\n", ret); 1406 return ret; 1407 } 1408 drv_info->bitmap_created = true; 1409 1410 irq = ffa_sched_recv_irq_map(); 1411 if (irq <= 0) { 1412 ret = irq; 1413 goto cleanup; 1414 } 1415 1416 drv_info->sched_recv_irq = irq; 1417 1418 ret = ffa_init_pcpu_irq(irq); 1419 if (ret) 1420 goto cleanup; 1421 1422 hash_init(drv_info->notifier_hash); 1423 mutex_init(&drv_info->notify_lock); 1424 1425 /* Register internal scheduling callback */ 1426 ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle, 1427 drv_info, true); 1428 if (!ret) 1429 return ret; 1430 cleanup: 1431 ffa_notifications_cleanup(); 1432 return ret; 1433 } 1434 1435 static int __init ffa_init(void) 1436 { 1437 int ret; 1438 1439 ret = ffa_transport_init(&invoke_ffa_fn); 1440 if (ret) 1441 return ret; 1442 1443 ret = arm_ffa_bus_init(); 1444 if (ret) 1445 return ret; 1446 1447 drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); 1448 if (!drv_info) { 1449 ret = -ENOMEM; 1450 goto ffa_bus_exit; 1451 } 1452 1453 ret = ffa_version_check(&drv_info->version); 1454 if (ret) 1455 goto free_drv_info; 1456 1457 if (ffa_id_get(&drv_info->vm_id)) { 1458 pr_err("failed to obtain VM id for self\n"); 1459 ret = -ENODEV; 1460 goto free_drv_info; 1461 } 1462 1463 drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 1464 if (!drv_info->rx_buffer) { 1465 ret = -ENOMEM; 1466 goto free_pages; 1467 } 1468 1469 drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 1470 if (!drv_info->tx_buffer) { 1471 ret = -ENOMEM; 1472 goto free_pages; 1473 } 1474 1475 ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer), 1476 virt_to_phys(drv_info->rx_buffer), 1477 RXTX_BUFFER_SIZE / FFA_PAGE_SIZE); 1478 if (ret) { 1479 pr_err("failed to register FFA RxTx buffers\n"); 1480 goto free_pages; 1481 } 1482 1483 mutex_init(&drv_info->rx_lock); 1484 mutex_init(&drv_info->tx_lock); 1485 1486 ffa_setup_partitions(); 1487 1488 ffa_set_up_mem_ops_native_flag(); 1489 1490 ret = ffa_notifications_setup(); 1491 if (ret) 1492 goto partitions_cleanup; 1493 1494 return 0; 1495 partitions_cleanup: 1496 ffa_partitions_cleanup(); 1497 free_pages: 1498 if (drv_info->tx_buffer) 1499 free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); 1500 free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); 1501 free_drv_info: 1502 kfree(drv_info); 1503 ffa_bus_exit: 1504 arm_ffa_bus_exit(); 1505 return ret; 1506 } 1507 subsys_initcall(ffa_init); 1508 1509 static void __exit ffa_exit(void) 1510 { 1511 ffa_notifications_cleanup(); 1512 ffa_partitions_cleanup(); 1513 ffa_rxtx_unmap(drv_info->vm_id); 1514 free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); 1515 free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); 1516 xa_destroy(&drv_info->partition_info); 1517 kfree(drv_info); 1518 arm_ffa_bus_exit(); 1519 } 1520 module_exit(ffa_exit); 1521 1522 MODULE_ALIAS("arm-ffa"); 1523 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); 1524 MODULE_DESCRIPTION("Arm FF-A interface driver"); 1525 MODULE_LICENSE("GPL v2"); 1526