1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Arm Firmware Framework for ARMv8-A(FFA) interface driver
4 *
5 * The Arm FFA specification[1] describes a software architecture to
6 * leverages the virtualization extension to isolate software images
7 * provided by an ecosystem of vendors from each other and describes
8 * interfaces that standardize communication between the various software
9 * images including communication between images in the Secure world and
10 * Normal world. Any Hypervisor could use the FFA interfaces to enable
11 * communication between VMs it manages.
12 *
13 * The Hypervisor a.k.a Partition managers in FFA terminology can assign
14 * system resources(Memory regions, Devices, CPU cycles) to the partitions
15 * and manage isolation amongst them.
16 *
17 * [1] https://developer.arm.com/docs/den0077/latest
18 *
19 * Copyright (C) 2021 ARM Ltd.
20 */
21
22 #define DRIVER_NAME "ARM FF-A"
23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
24
25 #include <linux/acpi.h>
26 #include <linux/arm_ffa.h>
27 #include <linux/bitfield.h>
28 #include <linux/cpuhotplug.h>
29 #include <linux/delay.h>
30 #include <linux/device.h>
31 #include <linux/hashtable.h>
32 #include <linux/interrupt.h>
33 #include <linux/io.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/mm.h>
37 #include <linux/mutex.h>
38 #include <linux/of_irq.h>
39 #include <linux/scatterlist.h>
40 #include <linux/slab.h>
41 #include <linux/smp.h>
42 #include <linux/uuid.h>
43 #include <linux/xarray.h>
44
45 #include "common.h"
46
47 #define FFA_DRIVER_VERSION FFA_VERSION_1_2
48 #define FFA_MIN_VERSION FFA_VERSION_1_0
49
50 #define SENDER_ID_MASK GENMASK(31, 16)
51 #define RECEIVER_ID_MASK GENMASK(15, 0)
52 #define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
53 #define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
54 #define PACK_TARGET_INFO(s, r) \
55 (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
56
57 #define RXTX_MAP_MIN_BUFSZ_MASK GENMASK(1, 0)
58 #define RXTX_MAP_MIN_BUFSZ(x) ((x) & RXTX_MAP_MIN_BUFSZ_MASK)
59
60 #define FFA_MAX_NOTIFICATIONS 64
61
62 static ffa_fn *invoke_ffa_fn;
63
64 static const int ffa_linux_errmap[] = {
65 /* better than switch case as long as return value is continuous */
66 0, /* FFA_RET_SUCCESS */
67 -EOPNOTSUPP, /* FFA_RET_NOT_SUPPORTED */
68 -EINVAL, /* FFA_RET_INVALID_PARAMETERS */
69 -ENOMEM, /* FFA_RET_NO_MEMORY */
70 -EBUSY, /* FFA_RET_BUSY */
71 -EINTR, /* FFA_RET_INTERRUPTED */
72 -EACCES, /* FFA_RET_DENIED */
73 -EAGAIN, /* FFA_RET_RETRY */
74 -ECANCELED, /* FFA_RET_ABORTED */
75 -ENODATA, /* FFA_RET_NO_DATA */
76 -EAGAIN, /* FFA_RET_NOT_READY */
77 };
78
ffa_to_linux_errno(int errno)79 static inline int ffa_to_linux_errno(int errno)
80 {
81 int err_idx = -errno;
82
83 if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
84 return ffa_linux_errmap[err_idx];
85 return -EINVAL;
86 }
87
88 struct ffa_pcpu_irq {
89 struct ffa_drv_info *info;
90 };
91
92 struct ffa_drv_info {
93 u32 version;
94 u16 vm_id;
95 struct mutex rx_lock; /* lock to protect Rx buffer */
96 struct mutex tx_lock; /* lock to protect Tx buffer */
97 void *rx_buffer;
98 void *tx_buffer;
99 size_t rxtx_bufsz;
100 bool mem_ops_native;
101 bool msg_direct_req2_supp;
102 bool bitmap_created;
103 bool notif_enabled;
104 unsigned int sched_recv_irq;
105 unsigned int notif_pend_irq;
106 unsigned int cpuhp_state;
107 struct ffa_pcpu_irq __percpu *irq_pcpu;
108 struct workqueue_struct *notif_pcpu_wq;
109 struct work_struct notif_pcpu_work;
110 struct work_struct sched_recv_irq_work;
111 struct xarray partition_info;
112 DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
113 rwlock_t notify_lock; /* lock to protect notifier hashtable */
114 };
115
116 static struct ffa_drv_info *drv_info;
117
118 /*
119 * The driver must be able to support all the versions from the earliest
120 * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
121 * The specification states that if firmware supports a FFA implementation
122 * that is incompatible with and at a greater version number than specified
123 * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
124 * it must return the NOT_SUPPORTED error code.
125 */
ffa_compatible_version_find(u32 version)126 static u32 ffa_compatible_version_find(u32 version)
127 {
128 u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version);
129 u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION);
130 u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION);
131
132 if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
133 return version;
134
135 pr_info("Firmware version higher than driver version, downgrading\n");
136 return FFA_DRIVER_VERSION;
137 }
138
ffa_version_check(u32 * version)139 static int ffa_version_check(u32 *version)
140 {
141 ffa_value_t ver;
142
143 invoke_ffa_fn((ffa_value_t){
144 .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
145 }, &ver);
146
147 if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) {
148 pr_info("FFA_VERSION returned not supported\n");
149 return -EOPNOTSUPP;
150 }
151
152 if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
153 pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
154 FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
155 FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
156 FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
157 return -EINVAL;
158 }
159
160 if (ver.a0 < FFA_MIN_VERSION) {
161 pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
162 FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
163 FFA_MAJOR_VERSION(FFA_MIN_VERSION),
164 FFA_MINOR_VERSION(FFA_MIN_VERSION));
165 return -EINVAL;
166 }
167
168 pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
169 FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
170 pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0),
171 FFA_MINOR_VERSION(ver.a0));
172 *version = ffa_compatible_version_find(ver.a0);
173
174 return 0;
175 }
176
ffa_rx_release(void)177 static int ffa_rx_release(void)
178 {
179 ffa_value_t ret;
180
181 invoke_ffa_fn((ffa_value_t){
182 .a0 = FFA_RX_RELEASE,
183 }, &ret);
184
185 if (ret.a0 == FFA_ERROR)
186 return ffa_to_linux_errno((int)ret.a2);
187
188 /* check for ret.a0 == FFA_RX_RELEASE ? */
189
190 return 0;
191 }
192
ffa_rxtx_map(phys_addr_t tx_buf,phys_addr_t rx_buf,u32 pg_cnt)193 static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
194 {
195 ffa_value_t ret;
196
197 invoke_ffa_fn((ffa_value_t){
198 .a0 = FFA_FN_NATIVE(RXTX_MAP),
199 .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
200 }, &ret);
201
202 if (ret.a0 == FFA_ERROR)
203 return ffa_to_linux_errno((int)ret.a2);
204
205 return 0;
206 }
207
ffa_rxtx_unmap(u16 vm_id)208 static int ffa_rxtx_unmap(u16 vm_id)
209 {
210 ffa_value_t ret;
211
212 invoke_ffa_fn((ffa_value_t){
213 .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
214 }, &ret);
215
216 if (ret.a0 == FFA_ERROR)
217 return ffa_to_linux_errno((int)ret.a2);
218
219 return 0;
220 }
221
ffa_features(u32 func_feat_id,u32 input_props,u32 * if_props_1,u32 * if_props_2)222 static int ffa_features(u32 func_feat_id, u32 input_props,
223 u32 *if_props_1, u32 *if_props_2)
224 {
225 ffa_value_t id;
226
227 if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
228 pr_err("%s: Invalid Parameters: %x, %x", __func__,
229 func_feat_id, input_props);
230 return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
231 }
232
233 invoke_ffa_fn((ffa_value_t){
234 .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
235 }, &id);
236
237 if (id.a0 == FFA_ERROR)
238 return ffa_to_linux_errno((int)id.a2);
239
240 if (if_props_1)
241 *if_props_1 = id.a2;
242 if (if_props_2)
243 *if_props_2 = id.a3;
244
245 return 0;
246 }
247
248 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0)
249
250 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
251 static int
__ffa_partition_info_get(u32 uuid0,u32 uuid1,u32 uuid2,u32 uuid3,struct ffa_partition_info * buffer,int num_partitions)252 __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
253 struct ffa_partition_info *buffer, int num_partitions)
254 {
255 int idx, count, flags = 0, sz, buf_sz;
256 ffa_value_t partition_info;
257
258 if (drv_info->version > FFA_VERSION_1_0 &&
259 (!buffer || !num_partitions)) /* Just get the count for now */
260 flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
261
262 mutex_lock(&drv_info->rx_lock);
263 invoke_ffa_fn((ffa_value_t){
264 .a0 = FFA_PARTITION_INFO_GET,
265 .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
266 .a5 = flags,
267 }, &partition_info);
268
269 if (partition_info.a0 == FFA_ERROR) {
270 mutex_unlock(&drv_info->rx_lock);
271 return ffa_to_linux_errno((int)partition_info.a2);
272 }
273
274 count = partition_info.a2;
275
276 if (drv_info->version > FFA_VERSION_1_0) {
277 buf_sz = sz = partition_info.a3;
278 if (sz > sizeof(*buffer))
279 buf_sz = sizeof(*buffer);
280 } else {
281 /* FFA_VERSION_1_0 lacks size in the response */
282 buf_sz = sz = 8;
283 }
284
285 if (buffer && count <= num_partitions)
286 for (idx = 0; idx < count; idx++) {
287 struct ffa_partition_info_le {
288 __le16 id;
289 __le16 exec_ctxt;
290 __le32 properties;
291 uuid_t uuid;
292 } *rx_buf = drv_info->rx_buffer + idx * sz;
293 struct ffa_partition_info *buf = buffer + idx;
294
295 buf->id = le16_to_cpu(rx_buf->id);
296 buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt);
297 buf->properties = le32_to_cpu(rx_buf->properties);
298 if (buf_sz > 8)
299 import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
300 }
301
302 if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
303 ffa_rx_release();
304
305 mutex_unlock(&drv_info->rx_lock);
306
307 return count;
308 }
309
310 #define LAST_INDEX_MASK GENMASK(15, 0)
311 #define CURRENT_INDEX_MASK GENMASK(31, 16)
312 #define UUID_INFO_TAG_MASK GENMASK(47, 32)
313 #define PARTITION_INFO_SZ_MASK GENMASK(63, 48)
314 #define PARTITION_COUNT(x) ((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1)
315 #define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
316 #define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
317 #define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
318 #define PART_INFO_ID_MASK GENMASK(15, 0)
319 #define PART_INFO_EXEC_CXT_MASK GENMASK(31, 16)
320 #define PART_INFO_PROPS_MASK GENMASK(63, 32)
321 #define PART_INFO_ID(x) ((u16)(FIELD_GET(PART_INFO_ID_MASK, (x))))
322 #define PART_INFO_EXEC_CXT(x) ((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x))))
323 #define PART_INFO_PROPERTIES(x) ((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x))))
324 static int
__ffa_partition_info_get_regs(u32 uuid0,u32 uuid1,u32 uuid2,u32 uuid3,struct ffa_partition_info * buffer,int num_parts)325 __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
326 struct ffa_partition_info *buffer, int num_parts)
327 {
328 u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
329 struct ffa_partition_info *buf = buffer;
330 ffa_value_t partition_info;
331
332 do {
333 __le64 *regs;
334 int idx;
335
336 start_idx = prev_idx ? prev_idx + 1 : 0;
337
338 invoke_ffa_fn((ffa_value_t){
339 .a0 = FFA_PARTITION_INFO_GET_REGS,
340 .a1 = (u64)uuid1 << 32 | uuid0,
341 .a2 = (u64)uuid3 << 32 | uuid2,
342 .a3 = start_idx | tag << 16,
343 }, &partition_info);
344
345 if (partition_info.a0 == FFA_ERROR)
346 return ffa_to_linux_errno((int)partition_info.a2);
347
348 if (!count)
349 count = PARTITION_COUNT(partition_info.a2);
350 if (!buffer || !num_parts) /* count only */
351 return count;
352
353 cur_idx = CURRENT_INDEX(partition_info.a2);
354 tag = UUID_INFO_TAG(partition_info.a2);
355 buf_sz = PARTITION_INFO_SZ(partition_info.a2);
356 if (buf_sz > sizeof(*buffer))
357 buf_sz = sizeof(*buffer);
358
359 regs = (void *)&partition_info.a3;
360 for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) {
361 union {
362 uuid_t uuid;
363 u64 regs[2];
364 } uuid_regs = {
365 .regs = {
366 le64_to_cpu(*(regs + 1)),
367 le64_to_cpu(*(regs + 2)),
368 }
369 };
370 u64 val = *(u64 *)regs;
371
372 buf->id = PART_INFO_ID(val);
373 buf->exec_ctxt = PART_INFO_EXEC_CXT(val);
374 buf->properties = PART_INFO_PROPERTIES(val);
375 uuid_copy(&buf->uuid, &uuid_regs.uuid);
376 regs += 3;
377 }
378 prev_idx = cur_idx;
379
380 } while (cur_idx < (count - 1));
381
382 return count;
383 }
384
385 /* buffer is allocated and caller must free the same if returned count > 0 */
386 static int
ffa_partition_probe(const uuid_t * uuid,struct ffa_partition_info ** buffer)387 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
388 {
389 int count;
390 u32 uuid0_4[4];
391 bool reg_mode = false;
392 struct ffa_partition_info *pbuf;
393
394 if (!ffa_features(FFA_PARTITION_INFO_GET_REGS, 0, NULL, NULL))
395 reg_mode = true;
396
397 export_uuid((u8 *)uuid0_4, uuid);
398 if (reg_mode)
399 count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1],
400 uuid0_4[2], uuid0_4[3],
401 NULL, 0);
402 else
403 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1],
404 uuid0_4[2], uuid0_4[3],
405 NULL, 0);
406 if (count <= 0)
407 return count;
408
409 pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
410 if (!pbuf)
411 return -ENOMEM;
412
413 if (reg_mode)
414 count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1],
415 uuid0_4[2], uuid0_4[3],
416 pbuf, count);
417 else
418 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1],
419 uuid0_4[2], uuid0_4[3],
420 pbuf, count);
421 if (count <= 0)
422 kfree(pbuf);
423 else
424 *buffer = pbuf;
425
426 return count;
427 }
428
429 #define VM_ID_MASK GENMASK(15, 0)
ffa_id_get(u16 * vm_id)430 static int ffa_id_get(u16 *vm_id)
431 {
432 ffa_value_t id;
433
434 invoke_ffa_fn((ffa_value_t){
435 .a0 = FFA_ID_GET,
436 }, &id);
437
438 if (id.a0 == FFA_ERROR)
439 return ffa_to_linux_errno((int)id.a2);
440
441 *vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
442
443 return 0;
444 }
445
ffa_msg_send_wait_for_completion(ffa_value_t * ret)446 static inline void ffa_msg_send_wait_for_completion(ffa_value_t *ret)
447 {
448 while (ret->a0 == FFA_INTERRUPT || ret->a0 == FFA_YIELD) {
449 if (ret->a0 == FFA_YIELD)
450 fsleep(1000);
451
452 invoke_ffa_fn((ffa_value_t){
453 .a0 = FFA_RUN, .a1 = ret->a1,
454 }, ret);
455 }
456 }
457
ffa_msg_send_direct_req(u16 src_id,u16 dst_id,bool mode_32bit,struct ffa_send_direct_data * data)458 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
459 struct ffa_send_direct_data *data)
460 {
461 u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
462 ffa_value_t ret;
463
464 if (mode_32bit) {
465 req_id = FFA_MSG_SEND_DIRECT_REQ;
466 resp_id = FFA_MSG_SEND_DIRECT_RESP;
467 } else {
468 req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
469 resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
470 }
471
472 invoke_ffa_fn((ffa_value_t){
473 .a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
474 .a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
475 .a6 = data->data3, .a7 = data->data4,
476 }, &ret);
477
478 ffa_msg_send_wait_for_completion(&ret);
479
480 if (ret.a0 == FFA_ERROR)
481 return ffa_to_linux_errno((int)ret.a2);
482
483 if (ret.a0 == resp_id) {
484 data->data0 = ret.a3;
485 data->data1 = ret.a4;
486 data->data2 = ret.a5;
487 data->data3 = ret.a6;
488 data->data4 = ret.a7;
489 return 0;
490 }
491
492 return -EINVAL;
493 }
494
ffa_msg_send2(struct ffa_device * dev,u16 src_id,void * buf,size_t sz)495 static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz)
496 {
497 u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id);
498 struct ffa_indirect_msg_hdr *msg;
499 ffa_value_t ret;
500 int retval = 0;
501
502 if (sz > (drv_info->rxtx_bufsz - sizeof(*msg)))
503 return -ERANGE;
504
505 mutex_lock(&drv_info->tx_lock);
506
507 msg = drv_info->tx_buffer;
508 msg->flags = 0;
509 msg->res0 = 0;
510 msg->offset = sizeof(*msg);
511 msg->send_recv_id = src_dst_ids;
512 msg->size = sz;
513 uuid_copy(&msg->uuid, &dev->uuid);
514 memcpy((u8 *)msg + msg->offset, buf, sz);
515
516 /* flags = 0, sender VMID = 0 works for both physical/virtual NS */
517 invoke_ffa_fn((ffa_value_t){
518 .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0
519 }, &ret);
520
521 if (ret.a0 == FFA_ERROR)
522 retval = ffa_to_linux_errno((int)ret.a2);
523
524 mutex_unlock(&drv_info->tx_lock);
525 return retval;
526 }
527
ffa_msg_send_direct_req2(u16 src_id,u16 dst_id,const uuid_t * uuid,struct ffa_send_direct_data2 * data)528 static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid,
529 struct ffa_send_direct_data2 *data)
530 {
531 u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
532 union {
533 uuid_t uuid;
534 __le64 regs[2];
535 } uuid_regs = { .uuid = *uuid };
536 ffa_value_t ret, args = {
537 .a0 = FFA_MSG_SEND_DIRECT_REQ2,
538 .a1 = src_dst_ids,
539 .a2 = le64_to_cpu(uuid_regs.regs[0]),
540 .a3 = le64_to_cpu(uuid_regs.regs[1]),
541 };
542 memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data));
543
544 invoke_ffa_fn(args, &ret);
545
546 ffa_msg_send_wait_for_completion(&ret);
547
548 if (ret.a0 == FFA_ERROR)
549 return ffa_to_linux_errno((int)ret.a2);
550
551 if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) {
552 memcpy(data, (void *)&ret + offsetof(ffa_value_t, a4), sizeof(*data));
553 return 0;
554 }
555
556 return -EINVAL;
557 }
558
ffa_mem_first_frag(u32 func_id,phys_addr_t buf,u32 buf_sz,u32 frag_len,u32 len,u64 * handle)559 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
560 u32 frag_len, u32 len, u64 *handle)
561 {
562 ffa_value_t ret;
563
564 invoke_ffa_fn((ffa_value_t){
565 .a0 = func_id, .a1 = len, .a2 = frag_len,
566 .a3 = buf, .a4 = buf_sz,
567 }, &ret);
568
569 while (ret.a0 == FFA_MEM_OP_PAUSE)
570 invoke_ffa_fn((ffa_value_t){
571 .a0 = FFA_MEM_OP_RESUME,
572 .a1 = ret.a1, .a2 = ret.a2,
573 }, &ret);
574
575 if (ret.a0 == FFA_ERROR)
576 return ffa_to_linux_errno((int)ret.a2);
577
578 if (ret.a0 == FFA_SUCCESS) {
579 if (handle)
580 *handle = PACK_HANDLE(ret.a2, ret.a3);
581 } else if (ret.a0 == FFA_MEM_FRAG_RX) {
582 if (handle)
583 *handle = PACK_HANDLE(ret.a1, ret.a2);
584 } else {
585 return -EOPNOTSUPP;
586 }
587
588 return frag_len;
589 }
590
ffa_mem_next_frag(u64 handle,u32 frag_len)591 static int ffa_mem_next_frag(u64 handle, u32 frag_len)
592 {
593 ffa_value_t ret;
594
595 invoke_ffa_fn((ffa_value_t){
596 .a0 = FFA_MEM_FRAG_TX,
597 .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
598 .a3 = frag_len,
599 }, &ret);
600
601 while (ret.a0 == FFA_MEM_OP_PAUSE)
602 invoke_ffa_fn((ffa_value_t){
603 .a0 = FFA_MEM_OP_RESUME,
604 .a1 = ret.a1, .a2 = ret.a2,
605 }, &ret);
606
607 if (ret.a0 == FFA_ERROR)
608 return ffa_to_linux_errno((int)ret.a2);
609
610 if (ret.a0 == FFA_MEM_FRAG_RX)
611 return ret.a3;
612 else if (ret.a0 == FFA_SUCCESS)
613 return 0;
614
615 return -EOPNOTSUPP;
616 }
617
618 static int
ffa_transmit_fragment(u32 func_id,phys_addr_t buf,u32 buf_sz,u32 frag_len,u32 len,u64 * handle,bool first)619 ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
620 u32 len, u64 *handle, bool first)
621 {
622 if (!first)
623 return ffa_mem_next_frag(*handle, frag_len);
624
625 return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
626 }
627
ffa_get_num_pages_sg(struct scatterlist * sg)628 static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
629 {
630 u32 num_pages = 0;
631
632 do {
633 num_pages += sg->length / FFA_PAGE_SIZE;
634 } while ((sg = sg_next(sg)));
635
636 return num_pages;
637 }
638
ffa_memory_attributes_get(u32 func_id)639 static u16 ffa_memory_attributes_get(u32 func_id)
640 {
641 /*
642 * For the memory lend or donate operation, if the receiver is a PE or
643 * a proxy endpoint, the owner/sender must not specify the attributes
644 */
645 if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
646 func_id == FFA_MEM_LEND)
647 return 0;
648
649 return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
650 }
651
ffa_emad_impdef_value_init(u32 version,void * dst,void * src)652 static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src)
653 {
654 struct ffa_mem_region_attributes *ep_mem_access;
655
656 if (FFA_EMAD_HAS_IMPDEF_FIELD(version))
657 memcpy(dst, src, sizeof(ep_mem_access->impdef_val));
658 }
659
660 static void
ffa_mem_region_additional_setup(u32 version,struct ffa_mem_region * mem_region)661 ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region)
662 {
663 if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) {
664 mem_region->ep_mem_size = 0;
665 } else {
666 mem_region->ep_mem_size = ffa_emad_size_get(version);
667 mem_region->ep_mem_offset = sizeof(*mem_region);
668 memset(mem_region->reserved, 0, 12);
669 }
670 }
671
672 static int
ffa_setup_and_transmit(u32 func_id,void * buffer,u32 max_fragsize,struct ffa_mem_ops_args * args)673 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
674 struct ffa_mem_ops_args *args)
675 {
676 int rc = 0;
677 bool first = true;
678 u32 composite_offset;
679 phys_addr_t addr = 0;
680 struct ffa_mem_region *mem_region = buffer;
681 struct ffa_composite_mem_region *composite;
682 struct ffa_mem_region_addr_range *constituents;
683 struct ffa_mem_region_attributes *ep_mem_access;
684 u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
685
686 mem_region->tag = args->tag;
687 mem_region->flags = args->flags;
688 mem_region->sender_id = drv_info->vm_id;
689 mem_region->attributes = ffa_memory_attributes_get(func_id);
690 composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
691 drv_info->version);
692
693 for (idx = 0; idx < args->nattrs; idx++) {
694 ep_mem_access = buffer +
695 ffa_mem_desc_offset(buffer, idx, drv_info->version);
696 ep_mem_access->receiver = args->attrs[idx].receiver;
697 ep_mem_access->attrs = args->attrs[idx].attrs;
698 ep_mem_access->composite_off = composite_offset;
699 ep_mem_access->flag = 0;
700 ep_mem_access->reserved = 0;
701 ffa_emad_impdef_value_init(drv_info->version,
702 ep_mem_access->impdef_val,
703 args->attrs[idx].impdef_val);
704 }
705 mem_region->handle = 0;
706 mem_region->ep_count = args->nattrs;
707 ffa_mem_region_additional_setup(drv_info->version, mem_region);
708
709 composite = buffer + composite_offset;
710 composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
711 composite->addr_range_cnt = num_entries;
712 composite->reserved = 0;
713
714 length = composite_offset + CONSTITUENTS_OFFSET(num_entries);
715 frag_len = composite_offset + CONSTITUENTS_OFFSET(0);
716 if (frag_len > max_fragsize)
717 return -ENXIO;
718
719 if (!args->use_txbuf) {
720 addr = virt_to_phys(buffer);
721 buf_sz = max_fragsize / FFA_PAGE_SIZE;
722 }
723
724 constituents = buffer + frag_len;
725 idx = 0;
726 do {
727 if (frag_len == max_fragsize) {
728 rc = ffa_transmit_fragment(func_id, addr, buf_sz,
729 frag_len, length,
730 &args->g_handle, first);
731 if (rc < 0)
732 return -ENXIO;
733
734 first = false;
735 idx = 0;
736 frag_len = 0;
737 constituents = buffer;
738 }
739
740 if ((void *)constituents - buffer > max_fragsize) {
741 pr_err("Memory Region Fragment > Tx Buffer size\n");
742 return -EFAULT;
743 }
744
745 constituents->address = sg_phys(args->sg);
746 constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
747 constituents->reserved = 0;
748 constituents++;
749 frag_len += sizeof(struct ffa_mem_region_addr_range);
750 } while ((args->sg = sg_next(args->sg)));
751
752 return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
753 length, &args->g_handle, first);
754 }
755
ffa_memory_ops(u32 func_id,struct ffa_mem_ops_args * args)756 static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
757 {
758 int ret;
759 void *buffer;
760 size_t rxtx_bufsz = drv_info->rxtx_bufsz;
761
762 if (!args->use_txbuf) {
763 buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
764 if (!buffer)
765 return -ENOMEM;
766 } else {
767 buffer = drv_info->tx_buffer;
768 mutex_lock(&drv_info->tx_lock);
769 }
770
771 ret = ffa_setup_and_transmit(func_id, buffer, rxtx_bufsz, args);
772
773 if (args->use_txbuf)
774 mutex_unlock(&drv_info->tx_lock);
775 else
776 free_pages_exact(buffer, rxtx_bufsz);
777
778 return ret < 0 ? ret : 0;
779 }
780
ffa_memory_reclaim(u64 g_handle,u32 flags)781 static int ffa_memory_reclaim(u64 g_handle, u32 flags)
782 {
783 ffa_value_t ret;
784
785 invoke_ffa_fn((ffa_value_t){
786 .a0 = FFA_MEM_RECLAIM,
787 .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
788 .a3 = flags,
789 }, &ret);
790
791 if (ret.a0 == FFA_ERROR)
792 return ffa_to_linux_errno((int)ret.a2);
793
794 return 0;
795 }
796
ffa_notification_bitmap_create(void)797 static int ffa_notification_bitmap_create(void)
798 {
799 ffa_value_t ret;
800 u16 vcpu_count = nr_cpu_ids;
801
802 invoke_ffa_fn((ffa_value_t){
803 .a0 = FFA_NOTIFICATION_BITMAP_CREATE,
804 .a1 = drv_info->vm_id, .a2 = vcpu_count,
805 }, &ret);
806
807 if (ret.a0 == FFA_ERROR)
808 return ffa_to_linux_errno((int)ret.a2);
809
810 return 0;
811 }
812
ffa_notification_bitmap_destroy(void)813 static int ffa_notification_bitmap_destroy(void)
814 {
815 ffa_value_t ret;
816
817 invoke_ffa_fn((ffa_value_t){
818 .a0 = FFA_NOTIFICATION_BITMAP_DESTROY,
819 .a1 = drv_info->vm_id,
820 }, &ret);
821
822 if (ret.a0 == FFA_ERROR)
823 return ffa_to_linux_errno((int)ret.a2);
824
825 return 0;
826 }
827
828 enum notify_type {
829 SECURE_PARTITION,
830 NON_SECURE_VM,
831 SPM_FRAMEWORK,
832 NS_HYP_FRAMEWORK,
833 };
834
835 #define NOTIFICATION_LOW_MASK GENMASK(31, 0)
836 #define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
837 #define NOTIFICATION_BITMAP_HIGH(x) \
838 ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x))))
839 #define NOTIFICATION_BITMAP_LOW(x) \
840 ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x))))
841 #define PACK_NOTIFICATION_BITMAP(low, high) \
842 (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \
843 FIELD_PREP(NOTIFICATION_HIGH_MASK, (high)))
844
845 #define RECEIVER_VCPU_MASK GENMASK(31, 16)
846 #define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \
847 (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \
848 FIELD_PREP(RECEIVER_ID_MASK, (r)))
849
850 #define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0)
851 #define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7)
852 #define ID_LIST_MASK_64 GENMASK(51, 12)
853 #define ID_LIST_MASK_32 GENMASK(31, 12)
854 #define MAX_IDS_64 20
855 #define MAX_IDS_32 10
856
857 #define PER_VCPU_NOTIFICATION_FLAG BIT(0)
858 #define SECURE_PARTITION_BITMAP_ENABLE BIT(SECURE_PARTITION)
859 #define NON_SECURE_VM_BITMAP_ENABLE BIT(NON_SECURE_VM)
860 #define SPM_FRAMEWORK_BITMAP_ENABLE BIT(SPM_FRAMEWORK)
861 #define NS_HYP_FRAMEWORK_BITMAP_ENABLE BIT(NS_HYP_FRAMEWORK)
862 #define FFA_BITMAP_SECURE_ENABLE_MASK \
863 (SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE)
864 #define FFA_BITMAP_NS_ENABLE_MASK \
865 (NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE)
866 #define FFA_BITMAP_ALL_ENABLE_MASK \
867 (FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK)
868
869 #define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
870
871 #define SPM_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_LOW(x)
872 #define NS_HYP_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_HIGH(x)
873 #define FRAMEWORK_NOTIFY_RX_BUFFER_FULL BIT(0)
874
ffa_notification_bind_common(u16 dst_id,u64 bitmap,u32 flags,bool is_bind)875 static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
876 u32 flags, bool is_bind)
877 {
878 ffa_value_t ret;
879 u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id);
880
881 func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND;
882
883 invoke_ffa_fn((ffa_value_t){
884 .a0 = func, .a1 = src_dst_ids, .a2 = flags,
885 .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
886 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
887 }, &ret);
888
889 if (ret.a0 == FFA_ERROR)
890 return ffa_to_linux_errno((int)ret.a2);
891 else if (ret.a0 != FFA_SUCCESS)
892 return -EINVAL;
893
894 return 0;
895 }
896
897 static
ffa_notification_set(u16 src_id,u16 dst_id,u32 flags,u64 bitmap)898 int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap)
899 {
900 ffa_value_t ret;
901 u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id);
902
903 invoke_ffa_fn((ffa_value_t) {
904 .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags,
905 .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
906 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
907 }, &ret);
908
909 if (ret.a0 == FFA_ERROR)
910 return ffa_to_linux_errno((int)ret.a2);
911 else if (ret.a0 != FFA_SUCCESS)
912 return -EINVAL;
913
914 return 0;
915 }
916
917 struct ffa_notify_bitmaps {
918 u64 sp_map;
919 u64 vm_map;
920 u64 arch_map;
921 };
922
ffa_notification_get(u32 flags,struct ffa_notify_bitmaps * notify)923 static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
924 {
925 ffa_value_t ret;
926 u16 src_id = drv_info->vm_id;
927 u16 cpu_id = smp_processor_id();
928 u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id);
929
930 invoke_ffa_fn((ffa_value_t){
931 .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags,
932 }, &ret);
933
934 if (ret.a0 == FFA_ERROR)
935 return ffa_to_linux_errno((int)ret.a2);
936 else if (ret.a0 != FFA_SUCCESS)
937 return -EINVAL; /* Something else went wrong. */
938
939 if (flags & SECURE_PARTITION_BITMAP_ENABLE)
940 notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
941 if (flags & NON_SECURE_VM_BITMAP_ENABLE)
942 notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
943 if (flags & SPM_FRAMEWORK_BITMAP_ENABLE)
944 notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6);
945 if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE)
946 notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map,
947 ret.a7);
948
949 return 0;
950 }
951
952 struct ffa_dev_part_info {
953 ffa_sched_recv_cb callback;
954 void *cb_data;
955 rwlock_t rw_lock;
956 struct ffa_device *dev;
957 struct list_head node;
958 };
959
__do_sched_recv_cb(u16 part_id,u16 vcpu,bool is_per_vcpu)960 static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
961 {
962 struct ffa_dev_part_info *partition = NULL, *tmp;
963 ffa_sched_recv_cb callback;
964 struct list_head *phead;
965 void *cb_data;
966
967 phead = xa_load(&drv_info->partition_info, part_id);
968 if (!phead) {
969 pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
970 return;
971 }
972
973 list_for_each_entry_safe(partition, tmp, phead, node) {
974 read_lock(&partition->rw_lock);
975 callback = partition->callback;
976 cb_data = partition->cb_data;
977 read_unlock(&partition->rw_lock);
978
979 if (callback)
980 callback(vcpu, is_per_vcpu, cb_data);
981 }
982 }
983
ffa_notification_info_get(void)984 static void ffa_notification_info_get(void)
985 {
986 int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64];
987 bool is_64b_resp;
988 ffa_value_t ret;
989 u64 id_list;
990
991 do {
992 invoke_ffa_fn((ffa_value_t){
993 .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET),
994 }, &ret);
995
996 if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
997 if ((s32)ret.a2 != FFA_RET_NO_DATA)
998 pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
999 ret.a0, ret.a2);
1000 return;
1001 }
1002
1003 is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS);
1004
1005 ids_processed = 0;
1006 lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2);
1007 if (is_64b_resp) {
1008 max_ids = MAX_IDS_64;
1009 id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2);
1010 } else {
1011 max_ids = MAX_IDS_32;
1012 id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2);
1013 }
1014
1015 for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2)
1016 ids_count[idx] = (id_list & 0x3) + 1;
1017
1018 /* Process IDs */
1019 for (list = 0; list < lists_cnt; list++) {
1020 u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3;
1021
1022 if (ids_processed >= max_ids - 1)
1023 break;
1024
1025 part_id = packed_id_list[ids_processed++];
1026
1027 if (ids_count[list] == 1) { /* Global Notification */
1028 __do_sched_recv_cb(part_id, 0, false);
1029 continue;
1030 }
1031
1032 /* Per vCPU Notification */
1033 for (idx = 1; idx < ids_count[list]; idx++) {
1034 if (ids_processed >= max_ids - 1)
1035 break;
1036
1037 vcpu_id = packed_id_list[ids_processed++];
1038
1039 __do_sched_recv_cb(part_id, vcpu_id, true);
1040 }
1041 }
1042 } while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK);
1043 }
1044
ffa_run(struct ffa_device * dev,u16 vcpu)1045 static int ffa_run(struct ffa_device *dev, u16 vcpu)
1046 {
1047 ffa_value_t ret;
1048 u32 target = dev->vm_id << 16 | vcpu;
1049
1050 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret);
1051
1052 while (ret.a0 == FFA_INTERRUPT)
1053 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, },
1054 &ret);
1055
1056 if (ret.a0 == FFA_ERROR)
1057 return ffa_to_linux_errno((int)ret.a2);
1058
1059 return 0;
1060 }
1061
ffa_drvinfo_flags_init(void)1062 static void ffa_drvinfo_flags_init(void)
1063 {
1064 if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
1065 !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
1066 drv_info->mem_ops_native = true;
1067
1068 if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2, 0, NULL, NULL) ||
1069 !ffa_features(FFA_MSG_SEND_DIRECT_RESP2, 0, NULL, NULL))
1070 drv_info->msg_direct_req2_supp = true;
1071 }
1072
ffa_api_version_get(void)1073 static u32 ffa_api_version_get(void)
1074 {
1075 return drv_info->version;
1076 }
1077
ffa_partition_info_get(const char * uuid_str,struct ffa_partition_info * buffer)1078 static int ffa_partition_info_get(const char *uuid_str,
1079 struct ffa_partition_info *buffer)
1080 {
1081 int count;
1082 uuid_t uuid;
1083 struct ffa_partition_info *pbuf;
1084
1085 if (uuid_parse(uuid_str, &uuid)) {
1086 pr_err("invalid uuid (%s)\n", uuid_str);
1087 return -ENODEV;
1088 }
1089
1090 count = ffa_partition_probe(&uuid, &pbuf);
1091 if (count <= 0)
1092 return -ENOENT;
1093
1094 memcpy(buffer, pbuf, sizeof(*pbuf) * count);
1095 kfree(pbuf);
1096 return 0;
1097 }
1098
ffa_mode_32bit_set(struct ffa_device * dev)1099 static void ffa_mode_32bit_set(struct ffa_device *dev)
1100 {
1101 dev->mode_32bit = true;
1102 }
1103
ffa_sync_send_receive(struct ffa_device * dev,struct ffa_send_direct_data * data)1104 static int ffa_sync_send_receive(struct ffa_device *dev,
1105 struct ffa_send_direct_data *data)
1106 {
1107 return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
1108 dev->mode_32bit, data);
1109 }
1110
ffa_indirect_msg_send(struct ffa_device * dev,void * buf,size_t sz)1111 static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
1112 {
1113 return ffa_msg_send2(dev, drv_info->vm_id, buf, sz);
1114 }
1115
ffa_sync_send_receive2(struct ffa_device * dev,struct ffa_send_direct_data2 * data)1116 static int ffa_sync_send_receive2(struct ffa_device *dev,
1117 struct ffa_send_direct_data2 *data)
1118 {
1119 if (!drv_info->msg_direct_req2_supp)
1120 return -EOPNOTSUPP;
1121
1122 return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
1123 &dev->uuid, data);
1124 }
1125
ffa_memory_share(struct ffa_mem_ops_args * args)1126 static int ffa_memory_share(struct ffa_mem_ops_args *args)
1127 {
1128 if (drv_info->mem_ops_native)
1129 return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
1130
1131 return ffa_memory_ops(FFA_MEM_SHARE, args);
1132 }
1133
ffa_memory_lend(struct ffa_mem_ops_args * args)1134 static int ffa_memory_lend(struct ffa_mem_ops_args *args)
1135 {
1136 /* Note that upon a successful MEM_LEND request the caller
1137 * must ensure that the memory region specified is not accessed
1138 * until a successful MEM_RECALIM call has been made.
1139 * On systems with a hypervisor present this will been enforced,
1140 * however on systems without a hypervisor the responsibility
1141 * falls to the calling kernel driver to prevent access.
1142 */
1143 if (drv_info->mem_ops_native)
1144 return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
1145
1146 return ffa_memory_ops(FFA_MEM_LEND, args);
1147 }
1148
1149 #define ffa_notifications_disabled() (!drv_info->notif_enabled)
1150
1151 struct notifier_cb_info {
1152 struct hlist_node hnode;
1153 struct ffa_device *dev;
1154 ffa_fwk_notifier_cb fwk_cb;
1155 ffa_notifier_cb cb;
1156 void *cb_data;
1157 };
1158
1159 static int
ffa_sched_recv_cb_update(struct ffa_device * dev,ffa_sched_recv_cb callback,void * cb_data,bool is_registration)1160 ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback,
1161 void *cb_data, bool is_registration)
1162 {
1163 struct ffa_dev_part_info *partition = NULL, *tmp;
1164 struct list_head *phead;
1165 bool cb_valid;
1166
1167 if (ffa_notifications_disabled())
1168 return -EOPNOTSUPP;
1169
1170 phead = xa_load(&drv_info->partition_info, dev->vm_id);
1171 if (!phead) {
1172 pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id);
1173 return -EINVAL;
1174 }
1175
1176 list_for_each_entry_safe(partition, tmp, phead, node)
1177 if (partition->dev == dev)
1178 break;
1179
1180 if (!partition) {
1181 pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id);
1182 return -EINVAL;
1183 }
1184
1185 write_lock(&partition->rw_lock);
1186
1187 cb_valid = !!partition->callback;
1188 if (!(is_registration ^ cb_valid)) {
1189 write_unlock(&partition->rw_lock);
1190 return -EINVAL;
1191 }
1192
1193 partition->callback = callback;
1194 partition->cb_data = cb_data;
1195
1196 write_unlock(&partition->rw_lock);
1197 return 0;
1198 }
1199
ffa_sched_recv_cb_register(struct ffa_device * dev,ffa_sched_recv_cb cb,void * cb_data)1200 static int ffa_sched_recv_cb_register(struct ffa_device *dev,
1201 ffa_sched_recv_cb cb, void *cb_data)
1202 {
1203 return ffa_sched_recv_cb_update(dev, cb, cb_data, true);
1204 }
1205
ffa_sched_recv_cb_unregister(struct ffa_device * dev)1206 static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
1207 {
1208 return ffa_sched_recv_cb_update(dev, NULL, NULL, false);
1209 }
1210
ffa_notification_bind(u16 dst_id,u64 bitmap,u32 flags)1211 static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
1212 {
1213 return ffa_notification_bind_common(dst_id, bitmap, flags, true);
1214 }
1215
ffa_notification_unbind(u16 dst_id,u64 bitmap)1216 static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
1217 {
1218 return ffa_notification_bind_common(dst_id, bitmap, 0, false);
1219 }
1220
ffa_notify_type_get(u16 vm_id)1221 static enum notify_type ffa_notify_type_get(u16 vm_id)
1222 {
1223 if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
1224 return SECURE_PARTITION;
1225 else
1226 return NON_SECURE_VM;
1227 }
1228
1229 /* notifier_hnode_get* should be called with notify_lock held */
1230 static struct notifier_cb_info *
notifier_hnode_get_by_vmid(u16 notify_id,int vmid)1231 notifier_hnode_get_by_vmid(u16 notify_id, int vmid)
1232 {
1233 struct notifier_cb_info *node;
1234
1235 hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
1236 if (node->fwk_cb && vmid == node->dev->vm_id)
1237 return node;
1238
1239 return NULL;
1240 }
1241
1242 static struct notifier_cb_info *
notifier_hnode_get_by_vmid_uuid(u16 notify_id,int vmid,const uuid_t * uuid)1243 notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid)
1244 {
1245 struct notifier_cb_info *node;
1246
1247 if (uuid_is_null(uuid))
1248 return notifier_hnode_get_by_vmid(notify_id, vmid);
1249
1250 hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
1251 if (node->fwk_cb && vmid == node->dev->vm_id &&
1252 uuid_equal(&node->dev->uuid, uuid))
1253 return node;
1254
1255 return NULL;
1256 }
1257
1258 static struct notifier_cb_info *
notifier_hnode_get_by_type(u16 notify_id,enum notify_type type)1259 notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
1260 {
1261 struct notifier_cb_info *node;
1262
1263 hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
1264 if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
1265 return node;
1266
1267 return NULL;
1268 }
1269
update_notifier_cb(struct ffa_device * dev,int notify_id,struct notifier_cb_info * cb,bool is_framework)1270 static int update_notifier_cb(struct ffa_device *dev, int notify_id,
1271 struct notifier_cb_info *cb, bool is_framework)
1272 {
1273 struct notifier_cb_info *cb_info = NULL;
1274 enum notify_type type = ffa_notify_type_get(dev->vm_id);
1275 bool cb_found, is_registration = !!cb;
1276
1277 if (is_framework)
1278 cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
1279 &dev->uuid);
1280 else
1281 cb_info = notifier_hnode_get_by_type(notify_id, type);
1282
1283 cb_found = !!cb_info;
1284
1285 if (!(is_registration ^ cb_found))
1286 return -EINVAL;
1287
1288 if (is_registration) {
1289 hash_add(drv_info->notifier_hash, &cb->hnode, notify_id);
1290 } else {
1291 hash_del(&cb_info->hnode);
1292 kfree(cb_info);
1293 }
1294
1295 return 0;
1296 }
1297
__ffa_notify_relinquish(struct ffa_device * dev,int notify_id,bool is_framework)1298 static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
1299 bool is_framework)
1300 {
1301 int rc;
1302
1303 if (ffa_notifications_disabled())
1304 return -EOPNOTSUPP;
1305
1306 if (notify_id >= FFA_MAX_NOTIFICATIONS)
1307 return -EINVAL;
1308
1309 write_lock(&drv_info->notify_lock);
1310
1311 rc = update_notifier_cb(dev, notify_id, NULL, is_framework);
1312 if (rc) {
1313 pr_err("Could not unregister notification callback\n");
1314 write_unlock(&drv_info->notify_lock);
1315 return rc;
1316 }
1317
1318 if (!is_framework)
1319 rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1320
1321 write_unlock(&drv_info->notify_lock);
1322
1323 return rc;
1324 }
1325
ffa_notify_relinquish(struct ffa_device * dev,int notify_id)1326 static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
1327 {
1328 return __ffa_notify_relinquish(dev, notify_id, false);
1329 }
1330
ffa_fwk_notify_relinquish(struct ffa_device * dev,int notify_id)1331 static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id)
1332 {
1333 return __ffa_notify_relinquish(dev, notify_id, true);
1334 }
1335
__ffa_notify_request(struct ffa_device * dev,bool is_per_vcpu,void * cb,void * cb_data,int notify_id,bool is_framework)1336 static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
1337 void *cb, void *cb_data,
1338 int notify_id, bool is_framework)
1339 {
1340 int rc;
1341 u32 flags = 0;
1342 struct notifier_cb_info *cb_info = NULL;
1343
1344 if (ffa_notifications_disabled())
1345 return -EOPNOTSUPP;
1346
1347 if (notify_id >= FFA_MAX_NOTIFICATIONS)
1348 return -EINVAL;
1349
1350 cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
1351 if (!cb_info)
1352 return -ENOMEM;
1353
1354 cb_info->dev = dev;
1355 cb_info->cb_data = cb_data;
1356 if (is_framework)
1357 cb_info->fwk_cb = cb;
1358 else
1359 cb_info->cb = cb;
1360
1361 write_lock(&drv_info->notify_lock);
1362
1363 if (!is_framework) {
1364 if (is_per_vcpu)
1365 flags = PER_VCPU_NOTIFICATION_FLAG;
1366
1367 rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
1368 if (rc)
1369 goto out_unlock_free;
1370 }
1371
1372 rc = update_notifier_cb(dev, notify_id, cb_info, is_framework);
1373 if (rc) {
1374 pr_err("Failed to register callback for %d - %d\n",
1375 notify_id, rc);
1376 if (!is_framework)
1377 ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1378 }
1379
1380 out_unlock_free:
1381 write_unlock(&drv_info->notify_lock);
1382 if (rc)
1383 kfree(cb_info);
1384
1385 return rc;
1386 }
1387
ffa_notify_request(struct ffa_device * dev,bool is_per_vcpu,ffa_notifier_cb cb,void * cb_data,int notify_id)1388 static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
1389 ffa_notifier_cb cb, void *cb_data, int notify_id)
1390 {
1391 return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id,
1392 false);
1393 }
1394
1395 static int
ffa_fwk_notify_request(struct ffa_device * dev,ffa_fwk_notifier_cb cb,void * cb_data,int notify_id)1396 ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb,
1397 void *cb_data, int notify_id)
1398 {
1399 return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true);
1400 }
1401
ffa_notify_send(struct ffa_device * dev,int notify_id,bool is_per_vcpu,u16 vcpu)1402 static int ffa_notify_send(struct ffa_device *dev, int notify_id,
1403 bool is_per_vcpu, u16 vcpu)
1404 {
1405 u32 flags = 0;
1406
1407 if (ffa_notifications_disabled())
1408 return -EOPNOTSUPP;
1409
1410 if (is_per_vcpu)
1411 flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16);
1412
1413 return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags,
1414 BIT(notify_id));
1415 }
1416
handle_notif_callbacks(u64 bitmap,enum notify_type type)1417 static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
1418 {
1419 int notify_id;
1420 struct notifier_cb_info *cb_info = NULL;
1421
1422 for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap;
1423 notify_id++, bitmap >>= 1) {
1424 if (!(bitmap & 1))
1425 continue;
1426
1427 read_lock(&drv_info->notify_lock);
1428 cb_info = notifier_hnode_get_by_type(notify_id, type);
1429 read_unlock(&drv_info->notify_lock);
1430
1431 if (cb_info && cb_info->cb)
1432 cb_info->cb(notify_id, cb_info->cb_data);
1433 }
1434 }
1435
handle_fwk_notif_callbacks(u32 bitmap)1436 static void handle_fwk_notif_callbacks(u32 bitmap)
1437 {
1438 void *buf;
1439 uuid_t uuid;
1440 int notify_id = 0, target;
1441 struct ffa_indirect_msg_hdr *msg;
1442 struct notifier_cb_info *cb_info = NULL;
1443
1444 /* Only one framework notification defined and supported for now */
1445 if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL))
1446 return;
1447
1448 mutex_lock(&drv_info->rx_lock);
1449
1450 msg = drv_info->rx_buffer;
1451 buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL);
1452 if (!buf) {
1453 mutex_unlock(&drv_info->rx_lock);
1454 return;
1455 }
1456
1457 target = SENDER_ID(msg->send_recv_id);
1458 if (msg->offset >= sizeof(*msg))
1459 uuid_copy(&uuid, &msg->uuid);
1460 else
1461 uuid_copy(&uuid, &uuid_null);
1462
1463 mutex_unlock(&drv_info->rx_lock);
1464
1465 ffa_rx_release();
1466
1467 read_lock(&drv_info->notify_lock);
1468 cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
1469 read_unlock(&drv_info->notify_lock);
1470
1471 if (cb_info && cb_info->fwk_cb)
1472 cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
1473 kfree(buf);
1474 }
1475
notif_get_and_handle(void * cb_data)1476 static void notif_get_and_handle(void *cb_data)
1477 {
1478 int rc;
1479 u32 flags;
1480 struct ffa_drv_info *info = cb_data;
1481 struct ffa_notify_bitmaps bitmaps = { 0 };
1482
1483 if (info->vm_id == 0) /* Non secure physical instance */
1484 flags = FFA_BITMAP_SECURE_ENABLE_MASK;
1485 else
1486 flags = FFA_BITMAP_ALL_ENABLE_MASK;
1487
1488 rc = ffa_notification_get(flags, &bitmaps);
1489 if (rc) {
1490 pr_err("Failed to retrieve notifications with %d!\n", rc);
1491 return;
1492 }
1493
1494 handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map));
1495 handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map));
1496 handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
1497 handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
1498 }
1499
1500 static void
ffa_self_notif_handle(u16 vcpu,bool is_per_vcpu,void * cb_data)1501 ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data)
1502 {
1503 struct ffa_drv_info *info = cb_data;
1504
1505 if (!is_per_vcpu)
1506 notif_get_and_handle(info);
1507 else
1508 smp_call_function_single(vcpu, notif_get_and_handle, info, 0);
1509 }
1510
notif_pcpu_irq_work_fn(struct work_struct * work)1511 static void notif_pcpu_irq_work_fn(struct work_struct *work)
1512 {
1513 struct ffa_drv_info *info = container_of(work, struct ffa_drv_info,
1514 notif_pcpu_work);
1515
1516 ffa_self_notif_handle(smp_processor_id(), true, info);
1517 }
1518
1519 static const struct ffa_info_ops ffa_drv_info_ops = {
1520 .api_version_get = ffa_api_version_get,
1521 .partition_info_get = ffa_partition_info_get,
1522 };
1523
1524 static const struct ffa_msg_ops ffa_drv_msg_ops = {
1525 .mode_32bit_set = ffa_mode_32bit_set,
1526 .sync_send_receive = ffa_sync_send_receive,
1527 .indirect_send = ffa_indirect_msg_send,
1528 .sync_send_receive2 = ffa_sync_send_receive2,
1529 };
1530
1531 static const struct ffa_mem_ops ffa_drv_mem_ops = {
1532 .memory_reclaim = ffa_memory_reclaim,
1533 .memory_share = ffa_memory_share,
1534 .memory_lend = ffa_memory_lend,
1535 };
1536
1537 static const struct ffa_cpu_ops ffa_drv_cpu_ops = {
1538 .run = ffa_run,
1539 };
1540
1541 static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
1542 .sched_recv_cb_register = ffa_sched_recv_cb_register,
1543 .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
1544 .notify_request = ffa_notify_request,
1545 .notify_relinquish = ffa_notify_relinquish,
1546 .fwk_notify_request = ffa_fwk_notify_request,
1547 .fwk_notify_relinquish = ffa_fwk_notify_relinquish,
1548 .notify_send = ffa_notify_send,
1549 };
1550
1551 static const struct ffa_ops ffa_drv_ops = {
1552 .info_ops = &ffa_drv_info_ops,
1553 .msg_ops = &ffa_drv_msg_ops,
1554 .mem_ops = &ffa_drv_mem_ops,
1555 .cpu_ops = &ffa_drv_cpu_ops,
1556 .notifier_ops = &ffa_drv_notifier_ops,
1557 };
1558
ffa_device_match_uuid(struct ffa_device * ffa_dev,const uuid_t * uuid)1559 void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
1560 {
1561 int count, idx;
1562 struct ffa_partition_info *pbuf, *tpbuf;
1563
1564 count = ffa_partition_probe(uuid, &pbuf);
1565 if (count <= 0)
1566 return;
1567
1568 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
1569 if (tpbuf->id == ffa_dev->vm_id)
1570 uuid_copy(&ffa_dev->uuid, uuid);
1571 kfree(pbuf);
1572 }
1573
1574 static int
ffa_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)1575 ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data)
1576 {
1577 struct device *dev = data;
1578 struct ffa_device *fdev = to_ffa_dev(dev);
1579
1580 if (action == BUS_NOTIFY_BIND_DRIVER) {
1581 struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
1582 const struct ffa_device_id *id_table = ffa_drv->id_table;
1583
1584 /*
1585 * FF-A v1.1 provides UUID for each partition as part of the
1586 * discovery API, the discovered UUID must be populated in the
1587 * device's UUID and there is no need to workaround by copying
1588 * the same from the driver table.
1589 */
1590 if (uuid_is_null(&fdev->uuid))
1591 ffa_device_match_uuid(fdev, &id_table->uuid);
1592
1593 return NOTIFY_OK;
1594 }
1595
1596 return NOTIFY_DONE;
1597 }
1598
1599 static struct notifier_block ffa_bus_nb = {
1600 .notifier_call = ffa_bus_notifier,
1601 };
1602
ffa_xa_add_partition_info(struct ffa_device * dev)1603 static int ffa_xa_add_partition_info(struct ffa_device *dev)
1604 {
1605 struct ffa_dev_part_info *info;
1606 struct list_head *head, *phead;
1607 int ret = -ENOMEM;
1608
1609 phead = xa_load(&drv_info->partition_info, dev->vm_id);
1610 if (phead) {
1611 head = phead;
1612 list_for_each_entry(info, head, node) {
1613 if (info->dev == dev) {
1614 pr_err("%s: duplicate dev %p part ID 0x%x\n",
1615 __func__, dev, dev->vm_id);
1616 return -EEXIST;
1617 }
1618 }
1619 }
1620
1621 info = kzalloc(sizeof(*info), GFP_KERNEL);
1622 if (!info)
1623 return ret;
1624
1625 rwlock_init(&info->rw_lock);
1626 info->dev = dev;
1627
1628 if (!phead) {
1629 phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1630 if (!phead)
1631 goto free_out;
1632
1633 INIT_LIST_HEAD(phead);
1634
1635 ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead,
1636 GFP_KERNEL);
1637 if (ret) {
1638 pr_err("%s: failed to save part ID 0x%x Ret:%d\n",
1639 __func__, dev->vm_id, ret);
1640 goto free_out;
1641 }
1642 }
1643 list_add(&info->node, phead);
1644 return 0;
1645
1646 free_out:
1647 kfree(phead);
1648 kfree(info);
1649 return ret;
1650 }
1651
ffa_setup_host_partition(int vm_id)1652 static int ffa_setup_host_partition(int vm_id)
1653 {
1654 struct ffa_partition_info buf = { 0 };
1655 struct ffa_device *ffa_dev;
1656 int ret;
1657
1658 buf.id = vm_id;
1659 ffa_dev = ffa_device_register(&buf, &ffa_drv_ops);
1660 if (!ffa_dev) {
1661 pr_err("%s: failed to register host partition ID 0x%x\n",
1662 __func__, vm_id);
1663 return -EINVAL;
1664 }
1665
1666 ret = ffa_xa_add_partition_info(ffa_dev);
1667 if (ret)
1668 return ret;
1669
1670 if (ffa_notifications_disabled())
1671 return 0;
1672
1673 ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle,
1674 drv_info, true);
1675 if (ret)
1676 pr_info("Failed to register driver sched callback %d\n", ret);
1677
1678 return ret;
1679 }
1680
ffa_partitions_cleanup(void)1681 static void ffa_partitions_cleanup(void)
1682 {
1683 struct list_head *phead;
1684 unsigned long idx;
1685
1686 /* Clean up/free all registered devices */
1687 ffa_devices_unregister();
1688
1689 xa_for_each(&drv_info->partition_info, idx, phead) {
1690 struct ffa_dev_part_info *info, *tmp;
1691
1692 xa_erase(&drv_info->partition_info, idx);
1693 list_for_each_entry_safe(info, tmp, phead, node) {
1694 list_del(&info->node);
1695 kfree(info);
1696 }
1697 kfree(phead);
1698 }
1699
1700 xa_destroy(&drv_info->partition_info);
1701 }
1702
ffa_setup_partitions(void)1703 static int ffa_setup_partitions(void)
1704 {
1705 int count, idx, ret;
1706 struct ffa_device *ffa_dev;
1707 struct ffa_partition_info *pbuf, *tpbuf;
1708
1709 if (drv_info->version == FFA_VERSION_1_0) {
1710 ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb);
1711 if (ret)
1712 pr_err("Failed to register FF-A bus notifiers\n");
1713 }
1714
1715 count = ffa_partition_probe(&uuid_null, &pbuf);
1716 if (count <= 0) {
1717 pr_info("%s: No partitions found, error %d\n", __func__, count);
1718 return -EINVAL;
1719 }
1720
1721 xa_init(&drv_info->partition_info);
1722 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
1723 /* Note that if the UUID will be uuid_null, that will require
1724 * ffa_bus_notifier() to find the UUID of this partition id
1725 * with help of ffa_device_match_uuid(). FF-A v1.1 and above
1726 * provides UUID here for each partition as part of the
1727 * discovery API and the same is passed.
1728 */
1729 ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops);
1730 if (!ffa_dev) {
1731 pr_err("%s: failed to register partition ID 0x%x\n",
1732 __func__, tpbuf->id);
1733 continue;
1734 }
1735
1736 if (drv_info->version > FFA_VERSION_1_0 &&
1737 !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
1738 ffa_mode_32bit_set(ffa_dev);
1739
1740 if (ffa_xa_add_partition_info(ffa_dev)) {
1741 ffa_device_unregister(ffa_dev);
1742 continue;
1743 }
1744 }
1745
1746 kfree(pbuf);
1747
1748 /*
1749 * Check if the host is already added as part of partition info
1750 * No multiple UUID possible for the host, so just checking if
1751 * there is an entry will suffice
1752 */
1753 if (xa_load(&drv_info->partition_info, drv_info->vm_id))
1754 return 0;
1755
1756 /* Allocate for the host */
1757 ret = ffa_setup_host_partition(drv_info->vm_id);
1758 if (ret)
1759 ffa_partitions_cleanup();
1760
1761 return ret;
1762 }
1763
1764 /* FFA FEATURE IDs */
1765 #define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
1766 #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
1767 #define FFA_FEAT_MANAGED_EXIT_INT (3)
1768
ffa_sched_recv_irq_handler(int irq,void * irq_data)1769 static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data)
1770 {
1771 struct ffa_pcpu_irq *pcpu = irq_data;
1772 struct ffa_drv_info *info = pcpu->info;
1773
1774 queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work);
1775
1776 return IRQ_HANDLED;
1777 }
1778
notif_pend_irq_handler(int irq,void * irq_data)1779 static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data)
1780 {
1781 struct ffa_pcpu_irq *pcpu = irq_data;
1782 struct ffa_drv_info *info = pcpu->info;
1783
1784 queue_work_on(smp_processor_id(), info->notif_pcpu_wq,
1785 &info->notif_pcpu_work);
1786
1787 return IRQ_HANDLED;
1788 }
1789
ffa_sched_recv_irq_work_fn(struct work_struct * work)1790 static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
1791 {
1792 ffa_notification_info_get();
1793 }
1794
ffa_irq_map(u32 id)1795 static int ffa_irq_map(u32 id)
1796 {
1797 char *err_str;
1798 int ret, irq, intid;
1799
1800 if (id == FFA_FEAT_NOTIFICATION_PENDING_INT)
1801 err_str = "Notification Pending Interrupt";
1802 else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT)
1803 err_str = "Schedule Receiver Interrupt";
1804 else
1805 err_str = "Unknown ID";
1806
1807 /* The returned intid is assumed to be SGI donated to NS world */
1808 ret = ffa_features(id, 0, &intid, NULL);
1809 if (ret < 0) {
1810 if (ret != -EOPNOTSUPP)
1811 pr_err("Failed to retrieve FF-A %s %u\n", err_str, id);
1812 return ret;
1813 }
1814
1815 if (acpi_disabled) {
1816 struct of_phandle_args oirq = {};
1817 struct device_node *gic;
1818
1819 /* Only GICv3 supported currently with the device tree */
1820 gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
1821 if (!gic)
1822 return -ENXIO;
1823
1824 oirq.np = gic;
1825 oirq.args_count = 1;
1826 oirq.args[0] = intid;
1827 irq = irq_create_of_mapping(&oirq);
1828 of_node_put(gic);
1829 #ifdef CONFIG_ACPI
1830 } else {
1831 irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE,
1832 ACPI_ACTIVE_HIGH);
1833 #endif
1834 }
1835
1836 if (irq <= 0) {
1837 pr_err("Failed to create IRQ mapping!\n");
1838 return -ENODATA;
1839 }
1840
1841 return irq;
1842 }
1843
ffa_irq_unmap(unsigned int irq)1844 static void ffa_irq_unmap(unsigned int irq)
1845 {
1846 if (!irq)
1847 return;
1848 irq_dispose_mapping(irq);
1849 }
1850
ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)1851 static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
1852 {
1853 if (drv_info->sched_recv_irq)
1854 enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
1855 if (drv_info->notif_pend_irq)
1856 enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE);
1857 return 0;
1858 }
1859
ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)1860 static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
1861 {
1862 if (drv_info->sched_recv_irq)
1863 disable_percpu_irq(drv_info->sched_recv_irq);
1864 if (drv_info->notif_pend_irq)
1865 disable_percpu_irq(drv_info->notif_pend_irq);
1866 return 0;
1867 }
1868
ffa_uninit_pcpu_irq(void)1869 static void ffa_uninit_pcpu_irq(void)
1870 {
1871 if (drv_info->cpuhp_state) {
1872 cpuhp_remove_state(drv_info->cpuhp_state);
1873 drv_info->cpuhp_state = 0;
1874 }
1875
1876 if (drv_info->notif_pcpu_wq) {
1877 destroy_workqueue(drv_info->notif_pcpu_wq);
1878 drv_info->notif_pcpu_wq = NULL;
1879 }
1880
1881 if (drv_info->sched_recv_irq)
1882 free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
1883
1884 if (drv_info->notif_pend_irq)
1885 free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu);
1886
1887 if (drv_info->irq_pcpu) {
1888 free_percpu(drv_info->irq_pcpu);
1889 drv_info->irq_pcpu = NULL;
1890 }
1891 }
1892
ffa_init_pcpu_irq(void)1893 static int ffa_init_pcpu_irq(void)
1894 {
1895 struct ffa_pcpu_irq __percpu *irq_pcpu;
1896 int ret, cpu;
1897
1898 irq_pcpu = alloc_percpu(struct ffa_pcpu_irq);
1899 if (!irq_pcpu)
1900 return -ENOMEM;
1901
1902 for_each_present_cpu(cpu)
1903 per_cpu_ptr(irq_pcpu, cpu)->info = drv_info;
1904
1905 drv_info->irq_pcpu = irq_pcpu;
1906
1907 if (drv_info->sched_recv_irq) {
1908 ret = request_percpu_irq(drv_info->sched_recv_irq,
1909 ffa_sched_recv_irq_handler,
1910 "ARM-FFA-SRI", irq_pcpu);
1911 if (ret) {
1912 pr_err("Error registering percpu SRI nIRQ %d : %d\n",
1913 drv_info->sched_recv_irq, ret);
1914 drv_info->sched_recv_irq = 0;
1915 return ret;
1916 }
1917 }
1918
1919 if (drv_info->notif_pend_irq) {
1920 ret = request_percpu_irq(drv_info->notif_pend_irq,
1921 notif_pend_irq_handler,
1922 "ARM-FFA-NPI", irq_pcpu);
1923 if (ret) {
1924 pr_err("Error registering percpu NPI nIRQ %d : %d\n",
1925 drv_info->notif_pend_irq, ret);
1926 drv_info->notif_pend_irq = 0;
1927 return ret;
1928 }
1929 }
1930
1931 INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn);
1932 INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn);
1933 drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification");
1934 if (!drv_info->notif_pcpu_wq)
1935 return -EINVAL;
1936
1937 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting",
1938 ffa_cpuhp_pcpu_irq_enable,
1939 ffa_cpuhp_pcpu_irq_disable);
1940
1941 if (ret < 0)
1942 return ret;
1943
1944 drv_info->cpuhp_state = ret;
1945 return 0;
1946 }
1947
ffa_notifications_cleanup(void)1948 static void ffa_notifications_cleanup(void)
1949 {
1950 ffa_uninit_pcpu_irq();
1951 ffa_irq_unmap(drv_info->sched_recv_irq);
1952 drv_info->sched_recv_irq = 0;
1953 ffa_irq_unmap(drv_info->notif_pend_irq);
1954 drv_info->notif_pend_irq = 0;
1955
1956 if (drv_info->bitmap_created) {
1957 ffa_notification_bitmap_destroy();
1958 drv_info->bitmap_created = false;
1959 }
1960 drv_info->notif_enabled = false;
1961 }
1962
ffa_notifications_setup(void)1963 static void ffa_notifications_setup(void)
1964 {
1965 int ret;
1966
1967 ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
1968 if (!ret) {
1969 ret = ffa_notification_bitmap_create();
1970 if (ret) {
1971 pr_err("Notification bitmap create error %d\n", ret);
1972 return;
1973 }
1974
1975 drv_info->bitmap_created = true;
1976 }
1977
1978 ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT);
1979 if (ret > 0)
1980 drv_info->sched_recv_irq = ret;
1981
1982 ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT);
1983 if (ret > 0)
1984 drv_info->notif_pend_irq = ret;
1985
1986 if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq)
1987 goto cleanup;
1988
1989 ret = ffa_init_pcpu_irq();
1990 if (ret)
1991 goto cleanup;
1992
1993 hash_init(drv_info->notifier_hash);
1994 rwlock_init(&drv_info->notify_lock);
1995
1996 drv_info->notif_enabled = true;
1997 return;
1998 cleanup:
1999 pr_info("Notification setup failed %d, not enabled\n", ret);
2000 ffa_notifications_cleanup();
2001 }
2002
ffa_init(void)2003 static int __init ffa_init(void)
2004 {
2005 int ret;
2006 u32 buf_sz;
2007 size_t rxtx_bufsz = SZ_4K;
2008
2009 ret = ffa_transport_init(&invoke_ffa_fn);
2010 if (ret)
2011 return ret;
2012
2013 drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
2014 if (!drv_info)
2015 return -ENOMEM;
2016
2017 ret = ffa_version_check(&drv_info->version);
2018 if (ret)
2019 goto free_drv_info;
2020
2021 if (ffa_id_get(&drv_info->vm_id)) {
2022 pr_err("failed to obtain VM id for self\n");
2023 ret = -ENODEV;
2024 goto free_drv_info;
2025 }
2026
2027 ret = ffa_features(FFA_FN_NATIVE(RXTX_MAP), 0, &buf_sz, NULL);
2028 if (!ret) {
2029 if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 1)
2030 rxtx_bufsz = SZ_64K;
2031 else if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 2)
2032 rxtx_bufsz = SZ_16K;
2033 else
2034 rxtx_bufsz = SZ_4K;
2035 }
2036
2037 drv_info->rxtx_bufsz = rxtx_bufsz;
2038 drv_info->rx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
2039 if (!drv_info->rx_buffer) {
2040 ret = -ENOMEM;
2041 goto free_pages;
2042 }
2043
2044 drv_info->tx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
2045 if (!drv_info->tx_buffer) {
2046 ret = -ENOMEM;
2047 goto free_pages;
2048 }
2049
2050 ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
2051 virt_to_phys(drv_info->rx_buffer),
2052 rxtx_bufsz / FFA_PAGE_SIZE);
2053 if (ret) {
2054 pr_err("failed to register FFA RxTx buffers\n");
2055 goto free_pages;
2056 }
2057
2058 mutex_init(&drv_info->rx_lock);
2059 mutex_init(&drv_info->tx_lock);
2060
2061 ffa_drvinfo_flags_init();
2062
2063 ffa_notifications_setup();
2064
2065 ret = ffa_setup_partitions();
2066 if (!ret)
2067 return ret;
2068
2069 pr_err("failed to setup partitions\n");
2070 ffa_notifications_cleanup();
2071 free_pages:
2072 if (drv_info->tx_buffer)
2073 free_pages_exact(drv_info->tx_buffer, rxtx_bufsz);
2074 free_pages_exact(drv_info->rx_buffer, rxtx_bufsz);
2075 free_drv_info:
2076 kfree(drv_info);
2077 return ret;
2078 }
2079 rootfs_initcall(ffa_init);
2080
ffa_exit(void)2081 static void __exit ffa_exit(void)
2082 {
2083 ffa_notifications_cleanup();
2084 ffa_partitions_cleanup();
2085 ffa_rxtx_unmap(drv_info->vm_id);
2086 free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz);
2087 free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz);
2088 kfree(drv_info);
2089 }
2090 module_exit(ffa_exit);
2091
2092 MODULE_ALIAS("arm-ffa");
2093 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2094 MODULE_DESCRIPTION("Arm FF-A interface driver");
2095 MODULE_LICENSE("GPL v2");
2096