1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3
4 #ifndef VCHIQ_CORE_H
5 #define VCHIQ_CORE_H
6
7 #include <linux/mutex.h>
8 #include <linux/completion.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dev_printk.h>
11 #include <linux/kthread.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/seq_file.h>
15 #include <linux/spinlock_types.h>
16 #include <linux/wait.h>
17
18 #include "vchiq.h"
19 #include "vchiq_cfg.h"
20
21 /* Do this so that we can test-build the code on non-rpi systems */
22 #if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
23
24 #else
25
26 #ifndef dsb
27 #define dsb(a)
28 #endif
29
30 #endif /* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
31
32 #define VCHIQ_SERVICE_HANDLE_INVALID 0
33
34 #define VCHIQ_SLOT_SIZE 4096
35 #define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
36
37 #define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
38 #define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
39 #define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
40 VCHIQ_SLOT_SIZE)
41
42 #define BITSET_SIZE(b) ((b + 31) >> 5)
43 #define BITSET_WORD(b) (b >> 5)
44 #define BITSET_BIT(b) (1 << (b & 31))
45 #define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
46 #define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
47
48 enum {
49 DEBUG_ENTRIES,
50 #if VCHIQ_ENABLE_DEBUG
51 DEBUG_SLOT_HANDLER_COUNT,
52 DEBUG_SLOT_HANDLER_LINE,
53 DEBUG_PARSE_LINE,
54 DEBUG_PARSE_HEADER,
55 DEBUG_PARSE_MSGID,
56 DEBUG_AWAIT_COMPLETION_LINE,
57 DEBUG_DEQUEUE_MESSAGE_LINE,
58 DEBUG_SERVICE_CALLBACK_LINE,
59 DEBUG_MSG_QUEUE_FULL_COUNT,
60 DEBUG_COMPLETION_QUEUE_FULL_COUNT,
61 #endif
62 DEBUG_MAX
63 };
64
65 #if VCHIQ_ENABLE_DEBUG
66
67 #define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug
68 #define DEBUG_TRACE(d) \
69 do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
70 #define DEBUG_VALUE(d, v) \
71 do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
72 #define DEBUG_COUNT(d) \
73 do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
74
75 #else /* VCHIQ_ENABLE_DEBUG */
76
77 #define DEBUG_INITIALISE(local)
78 #define DEBUG_TRACE(d)
79 #define DEBUG_VALUE(d, v)
80 #define DEBUG_COUNT(d)
81
82 #endif /* VCHIQ_ENABLE_DEBUG */
83
84 enum vchiq_connstate {
85 VCHIQ_CONNSTATE_DISCONNECTED,
86 VCHIQ_CONNSTATE_CONNECTING,
87 VCHIQ_CONNSTATE_CONNECTED,
88 VCHIQ_CONNSTATE_PAUSING,
89 VCHIQ_CONNSTATE_PAUSE_SENT,
90 VCHIQ_CONNSTATE_PAUSED,
91 VCHIQ_CONNSTATE_RESUMING,
92 VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
93 VCHIQ_CONNSTATE_RESUME_TIMEOUT
94 };
95
96 enum {
97 VCHIQ_SRVSTATE_FREE,
98 VCHIQ_SRVSTATE_HIDDEN,
99 VCHIQ_SRVSTATE_LISTENING,
100 VCHIQ_SRVSTATE_OPENING,
101 VCHIQ_SRVSTATE_OPEN,
102 VCHIQ_SRVSTATE_OPENSYNC,
103 VCHIQ_SRVSTATE_CLOSESENT,
104 VCHIQ_SRVSTATE_CLOSERECVD,
105 VCHIQ_SRVSTATE_CLOSEWAIT,
106 VCHIQ_SRVSTATE_CLOSED
107 };
108
109 enum vchiq_bulk_dir {
110 VCHIQ_BULK_TRANSMIT,
111 VCHIQ_BULK_RECEIVE
112 };
113
114 struct vchiq_bulk {
115 short mode;
116 short dir;
117 void *cb_data;
118 void __user *cb_userdata;
119 struct bulk_waiter *waiter;
120 dma_addr_t dma_addr;
121 int size;
122 void *remote_data;
123 int remote_size;
124 int actual;
125 void *offset;
126 void __user *uoffset;
127 };
128
129 struct vchiq_bulk_queue {
130 int local_insert; /* Where to insert the next local bulk */
131 int remote_insert; /* Where to insert the next remote bulk (master) */
132 int process; /* Bulk to transfer next */
133 int remote_notify; /* Bulk to notify the remote client of next (mstr) */
134 int remove; /* Bulk to notify the local client of, and remove, next */
135 struct vchiq_bulk bulks[VCHIQ_NUM_SERVICE_BULKS];
136 };
137
138 /*
139 * Remote events provide a way of presenting several virtual doorbells to a
140 * peer (ARM host to VPU) using only one physical doorbell. They can be thought
141 * of as a way for the peer to signal a semaphore, in this case implemented as
142 * a workqueue.
143 *
144 * Remote events remain signalled until acknowledged by the receiver, and they
145 * are non-counting. They are designed in such a way as to minimise the number
146 * of interrupts and avoid unnecessary waiting.
147 *
148 * A remote_event is as small data structures that live in shared memory. It
149 * comprises two booleans - armed and fired:
150 *
151 * The sender sets fired when they signal the receiver.
152 * If fired is set, the receiver has been signalled and need not wait.
153 * The receiver sets the armed field before they begin to wait.
154 * If armed is set, the receiver is waiting and wishes to be woken by interrupt.
155 */
156 struct remote_event {
157 int armed;
158 int fired;
159 u32 __unused;
160 };
161
162 struct opaque_platform_state;
163
164 struct vchiq_slot {
165 char data[VCHIQ_SLOT_SIZE];
166 };
167
168 struct vchiq_slot_info {
169 /* Use two counters rather than one to avoid the need for a mutex. */
170 short use_count;
171 short release_count;
172 };
173
174 /*
175 * VCHIQ is a reliable connection-oriented datagram protocol.
176 *
177 * A VCHIQ service is equivalent to a TCP connection, except:
178 * + FOURCCs are used for the rendezvous, and port numbers are assigned at the
179 * time the connection is established.
180 * + There is less of a distinction between server and client sockets, the only
181 * difference being which end makes the first move.
182 * + For a multi-client server, the server creates new "listening" services as
183 * the existing one becomes connected - there is no need to specify the
184 * maximum number of clients up front.
185 * + Data transfer is reliable but packetized (messages have defined ends).
186 * + Messages can be either short (capable of fitting in a slot) and in-band,
187 * or copied between external buffers (bulk transfers).
188 */
189 struct vchiq_service {
190 struct vchiq_service_base base;
191 unsigned int handle;
192 struct kref ref_count;
193 struct rcu_head rcu;
194 int srvstate;
195 void (*userdata_term)(void *userdata);
196 unsigned int localport;
197 unsigned int remoteport;
198 int public_fourcc;
199 int client_id;
200 char auto_close;
201 char sync;
202 char closing;
203 char trace;
204 atomic_t poll_flags;
205 short version;
206 short version_min;
207 short peer_version;
208
209 struct vchiq_state *state;
210 struct vchiq_instance *instance;
211
212 int service_use_count;
213
214 struct vchiq_bulk_queue bulk_tx;
215 struct vchiq_bulk_queue bulk_rx;
216
217 struct completion remove_event;
218 struct completion bulk_remove_event;
219 struct mutex bulk_mutex;
220
221 struct service_stats_struct {
222 int quota_stalls;
223 int slot_stalls;
224 int bulk_stalls;
225 int error_count;
226 int ctrl_tx_count;
227 int ctrl_rx_count;
228 int bulk_tx_count;
229 int bulk_rx_count;
230 int bulk_aborted_count;
231 u64 ctrl_tx_bytes;
232 u64 ctrl_rx_bytes;
233 u64 bulk_tx_bytes;
234 u64 bulk_rx_bytes;
235 } stats;
236
237 int msg_queue_read;
238 int msg_queue_write;
239 struct completion msg_queue_pop;
240 struct completion msg_queue_push;
241 struct vchiq_header *msg_queue[VCHIQ_MAX_SLOTS];
242 };
243
244 /*
245 * The quota information is outside struct vchiq_service so that it can
246 * be statically allocated, since for accounting reasons a service's slot
247 * usage is carried over between users of the same port number.
248 */
249 struct vchiq_service_quota {
250 unsigned short slot_quota;
251 unsigned short slot_use_count;
252 unsigned short message_quota;
253 unsigned short message_use_count;
254 struct completion quota_event;
255 int previous_tx_index;
256 };
257
258 struct vchiq_shared_state {
259 /* A non-zero value here indicates that the content is valid. */
260 int initialised;
261
262 /* The first and last (inclusive) slots allocated to the owner. */
263 int slot_first;
264 int slot_last;
265
266 /* The slot allocated to synchronous messages from the owner. */
267 int slot_sync;
268
269 /*
270 * Signalling this event indicates that owner's slot handler thread
271 * should run.
272 */
273 struct remote_event trigger;
274
275 /*
276 * Indicates the byte position within the stream where the next message
277 * will be written. The least significant bits are an index into the
278 * slot. The next bits are the index of the slot in slot_queue.
279 */
280 int tx_pos;
281
282 /* This event should be signalled when a slot is recycled. */
283 struct remote_event recycle;
284
285 /* The slot_queue index where the next recycled slot will be written. */
286 int slot_queue_recycle;
287
288 /* This event should be signalled when a synchronous message is sent. */
289 struct remote_event sync_trigger;
290
291 /*
292 * This event should be signalled when a synchronous message has been
293 * released.
294 */
295 struct remote_event sync_release;
296
297 /* A circular buffer of slot indexes. */
298 int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
299
300 /* Debugging state */
301 int debug[DEBUG_MAX];
302 };
303
304 /*
305 * vchiq_slot_zero describes the memory shared between the ARM host and the
306 * VideoCore VPU. The "master" and "slave" states are owned by the respective
307 * sides but visible to the other; the slots are shared, and the remaining
308 * fields are read-only.
309 *
310 * In the configuration used by this implementation, the memory is allocated
311 * by the host, the VPU is the master (the side which controls the DMA for bulk
312 * transfers), and the host is the slave.
313 *
314 * The ownership of slots changes with use:
315 * + When empty they are owned by the sender.
316 * + When partially filled they are shared with the receiver.
317 * + When completely full they are owned by the receiver.
318 * + When the receiver has finished processing the contents, they are recycled
319 * back to the sender.
320 */
321 struct vchiq_slot_zero {
322 int magic;
323 short version;
324 short version_min;
325 int slot_zero_size;
326 int slot_size;
327 int max_slots;
328 int max_slots_per_side;
329 int platform_data[2];
330 struct vchiq_shared_state master;
331 struct vchiq_shared_state slave;
332 struct vchiq_slot_info slots[VCHIQ_MAX_SLOTS];
333 };
334
335 /*
336 * This is the private runtime state used by each side. The same structure was
337 * originally used by both sides, but implementations have since diverged.
338 */
339 struct vchiq_state {
340 struct device *dev;
341 int id;
342 int initialised;
343 enum vchiq_connstate conn_state;
344 short version_common;
345
346 struct vchiq_shared_state *local;
347 struct vchiq_shared_state *remote;
348 struct vchiq_slot *slot_data;
349
350 unsigned short default_slot_quota;
351 unsigned short default_message_quota;
352
353 /* Event indicating connect message received */
354 struct completion connect;
355
356 /* Mutex protecting services */
357 struct mutex mutex;
358 struct vchiq_instance **instance;
359
360 /* Processes all incoming messages which aren't synchronous */
361 struct task_struct *slot_handler_thread;
362
363 /*
364 * Slots which have been fully processed and released by the (peer)
365 * receiver are added to the receiver queue, which is asynchronously
366 * processed by the recycle thread.
367 */
368 struct task_struct *recycle_thread;
369
370 /*
371 * Processes incoming synchronous messages
372 *
373 * The synchronous message channel is shared between all synchronous
374 * services, and provides a way for urgent messages to bypass
375 * potentially long queues of asynchronous messages in the normal slots.
376 *
377 * There can be only one outstanding synchronous message in
378 * each direction, and as a precious shared resource synchronous
379 * services should be used sparingly.
380 */
381 struct task_struct *sync_thread;
382
383 /* Local implementation of the trigger remote event */
384 wait_queue_head_t trigger_event;
385
386 /* Local implementation of the recycle remote event */
387 wait_queue_head_t recycle_event;
388
389 /* Local implementation of the sync trigger remote event */
390 wait_queue_head_t sync_trigger_event;
391
392 /* Local implementation of the sync release remote event */
393 wait_queue_head_t sync_release_event;
394
395 char *tx_data;
396 char *rx_data;
397 struct vchiq_slot_info *rx_info;
398
399 struct mutex slot_mutex;
400
401 struct mutex recycle_mutex;
402
403 struct mutex sync_mutex;
404
405 spinlock_t msg_queue_spinlock;
406
407 spinlock_t bulk_waiter_spinlock;
408
409 spinlock_t quota_spinlock;
410
411 /*
412 * Indicates the byte position within the stream from where the next
413 * message will be read. The least significant bits are an index into
414 * the slot.The next bits are the index of the slot in
415 * remote->slot_queue.
416 */
417 int rx_pos;
418
419 /*
420 * A cached copy of local->tx_pos. Only write to local->tx_pos, and read
421 * from remote->tx_pos.
422 */
423 int local_tx_pos;
424
425 /* The slot_queue index of the slot to become available next. */
426 int slot_queue_available;
427
428 /* A flag to indicate if any poll has been requested */
429 int poll_needed;
430
431 /* Ths index of the previous slot used for data messages. */
432 int previous_data_index;
433
434 /* The number of slots occupied by data messages. */
435 unsigned short data_use_count;
436
437 /* The maximum number of slots to be occupied by data messages. */
438 unsigned short data_quota;
439
440 /* An array of bit sets indicating which services must be polled. */
441 atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
442
443 /* The number of the first unused service */
444 int unused_service;
445
446 /* Signalled when a free slot becomes available. */
447 struct completion slot_available_event;
448
449 /* Signalled when a free data slot becomes available. */
450 struct completion data_quota_event;
451
452 struct state_stats_struct {
453 int slot_stalls;
454 int data_stalls;
455 int ctrl_tx_count;
456 int ctrl_rx_count;
457 int error_count;
458 } stats;
459
460 struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES];
461 struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
462 struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
463
464 struct opaque_platform_state *platform_state;
465 };
466
467 struct pagelist {
468 u32 length;
469 u16 type;
470 u16 offset;
471 u32 addrs[1]; /* N.B. 12 LSBs hold the number
472 * of following pages at consecutive
473 * addresses.
474 */
475 };
476
477 struct vchiq_pagelist_info {
478 struct pagelist *pagelist;
479 size_t pagelist_buffer_size;
480 dma_addr_t dma_addr;
481 enum dma_data_direction dma_dir;
482 unsigned int num_pages;
483 unsigned int pages_need_release;
484 struct page **pages;
485 struct scatterlist *scatterlist;
486 unsigned int scatterlist_mapped;
487 };
488
vchiq_remote_initialised(const struct vchiq_state * state)489 static inline bool vchiq_remote_initialised(const struct vchiq_state *state)
490 {
491 return state->remote && state->remote->initialised;
492 }
493
494 struct bulk_waiter {
495 struct vchiq_bulk *bulk;
496 struct completion event;
497 int actual;
498 };
499
500 struct vchiq_config {
501 unsigned int max_msg_size;
502 unsigned int bulk_threshold; /* The message size above which it
503 * is better to use a bulk transfer
504 * (<= max_msg_size)
505 */
506 unsigned int max_outstanding_bulks;
507 unsigned int max_services;
508 short version; /* The version of VCHIQ */
509 short version_min; /* The minimum compatible version of VCHIQ */
510 };
511
512 extern spinlock_t bulk_waiter_spinlock;
513
514 extern const char *
515 get_conn_state_name(enum vchiq_connstate conn_state);
516
517 extern struct vchiq_slot_zero *
518 vchiq_init_slots(struct device *dev, void *mem_base, int mem_size);
519
520 extern int
521 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev);
522
523 extern int
524 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
525
526 struct vchiq_service *
527 vchiq_add_service_internal(struct vchiq_state *state,
528 const struct vchiq_service_params_kernel *params,
529 int srvstate, struct vchiq_instance *instance,
530 void (*userdata_term)(void *userdata));
531
532 extern int
533 vchiq_open_service_internal(struct vchiq_service *service, int client_id);
534
535 extern int
536 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
537
538 extern void
539 vchiq_terminate_service_internal(struct vchiq_service *service);
540
541 extern void
542 vchiq_free_service_internal(struct vchiq_service *service);
543
544 extern void
545 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
546
547 extern void
548 remote_event_pollall(struct vchiq_state *state);
549
550 extern int
551 vchiq_bulk_xfer_waiting(struct vchiq_instance *instance, unsigned int handle,
552 struct bulk_waiter *userdata);
553
554 extern int
555 vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle,
556 struct vchiq_bulk *bulk);
557
558 extern int
559 vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle,
560 struct vchiq_bulk *bulk);
561
562 extern void
563 vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
564
565 extern void
566 request_poll(struct vchiq_state *state, struct vchiq_service *service,
567 int poll_type);
568
569 struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle);
570
571 extern struct vchiq_service *
572 find_service_by_handle(struct vchiq_instance *instance, unsigned int handle);
573
574 extern struct vchiq_service *
575 find_service_by_port(struct vchiq_state *state, unsigned int localport);
576
577 extern struct vchiq_service *
578 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
579
580 extern struct vchiq_service *
581 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
582
583 extern struct vchiq_service *
584 __next_service_by_instance(struct vchiq_state *state,
585 struct vchiq_instance *instance,
586 int *pidx);
587
588 extern struct vchiq_service *
589 next_service_by_instance(struct vchiq_state *state,
590 struct vchiq_instance *instance,
591 int *pidx);
592
593 extern void
594 vchiq_service_get(struct vchiq_service *service);
595
596 extern void
597 vchiq_service_put(struct vchiq_service *service);
598
599 extern int
600 vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
601 ssize_t (*copy_callback)(void *context, void *dest,
602 size_t offset, size_t maxsize),
603 void *context,
604 size_t size);
605
606 void vchiq_dump_platform_state(struct seq_file *f);
607
608 void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f);
609
610 void vchiq_dump_platform_service_state(struct seq_file *f, struct vchiq_service *service);
611
612 int vchiq_use_service_internal(struct vchiq_service *service);
613
614 int vchiq_release_service_internal(struct vchiq_service *service);
615
616 void vchiq_on_remote_use(struct vchiq_state *state);
617
618 void vchiq_on_remote_release(struct vchiq_state *state);
619
620 int vchiq_platform_init_state(struct vchiq_state *state);
621
622 int vchiq_check_service(struct vchiq_service *service);
623
624 int vchiq_send_remote_use(struct vchiq_state *state);
625
626 int vchiq_send_remote_use_active(struct vchiq_state *state);
627
628 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
629 enum vchiq_connstate oldstate,
630 enum vchiq_connstate newstate);
631
632 void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
633
634 void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
635 const void *void_mem, size_t num_bytes);
636
637 int vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
638
639 int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service);
640
641 void vchiq_get_config(struct vchiq_config *config);
642
643 int vchiq_set_service_option(struct vchiq_instance *instance, unsigned int service,
644 enum vchiq_service_option option, int value);
645
646 #endif
647