1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _LINUX_BINDER_INTERNAL_H
4 #define _LINUX_BINDER_INTERNAL_H
5
6 #include <linux/fs.h>
7 #include <linux/list.h>
8 #include <linux/miscdevice.h>
9 #include <linux/mutex.h>
10 #include <linux/refcount.h>
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/uidgid.h>
14 #include <uapi/linux/android/binderfs.h>
15 #include "binder_alloc.h"
16 #include "dbitmap.h"
17
18 struct binder_context {
19 struct binder_node *binder_context_mgr_node;
20 struct mutex context_mgr_node_lock;
21 kuid_t binder_context_mgr_uid;
22 const char *name;
23 };
24
25 /**
26 * struct binder_device - information about a binder device node
27 * @hlist: list of binder devices
28 * @miscdev: information about a binder character device node
29 * @context: binder context information
30 * @binderfs_inode: This is the inode of the root dentry of the super block
31 * belonging to a binderfs mount.
32 */
33 struct binder_device {
34 struct hlist_node hlist;
35 struct miscdevice miscdev;
36 struct binder_context context;
37 struct inode *binderfs_inode;
38 refcount_t ref;
39 };
40
41 /**
42 * binderfs_mount_opts - mount options for binderfs
43 * @max: maximum number of allocatable binderfs binder devices
44 * @stats_mode: enable binder stats in binderfs.
45 */
46 struct binderfs_mount_opts {
47 int max;
48 int stats_mode;
49 };
50
51 /**
52 * binderfs_info - information about a binderfs mount
53 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
54 * @control_dentry: This records the dentry of this binderfs mount
55 * binder-control device.
56 * @root_uid: uid that needs to be used when a new binder device is
57 * created.
58 * @root_gid: gid that needs to be used when a new binder device is
59 * created.
60 * @mount_opts: The mount options in use.
61 * @device_count: The current number of allocated binder devices.
62 * @proc_log_dir: Pointer to the directory dentry containing process-specific
63 * logs.
64 */
65 struct binderfs_info {
66 struct ipc_namespace *ipc_ns;
67 struct dentry *control_dentry;
68 kuid_t root_uid;
69 kgid_t root_gid;
70 struct binderfs_mount_opts mount_opts;
71 int device_count;
72 struct dentry *proc_log_dir;
73 };
74
75 extern const struct file_operations binder_fops;
76
77 extern char *binder_devices_param;
78
79 #ifdef CONFIG_ANDROID_BINDERFS
80 extern bool is_binderfs_device(const struct inode *inode);
81 extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
82 const struct file_operations *fops,
83 void *data);
84 #else
is_binderfs_device(const struct inode * inode)85 static inline bool is_binderfs_device(const struct inode *inode)
86 {
87 return false;
88 }
binderfs_create_file(struct dentry * dir,const char * name,const struct file_operations * fops,void * data)89 static inline struct dentry *binderfs_create_file(struct dentry *dir,
90 const char *name,
91 const struct file_operations *fops,
92 void *data)
93 {
94 return NULL;
95 }
96 #endif
97
98 #ifdef CONFIG_ANDROID_BINDERFS
99 extern int __init init_binderfs(void);
100 #else
init_binderfs(void)101 static inline int __init init_binderfs(void)
102 {
103 return 0;
104 }
105 #endif
106
107 struct binder_debugfs_entry {
108 const char *name;
109 umode_t mode;
110 const struct file_operations *fops;
111 void *data;
112 };
113
114 extern const struct binder_debugfs_entry binder_debugfs_entries[];
115
116 #define binder_for_each_debugfs_entry(entry) \
117 for ((entry) = binder_debugfs_entries; \
118 (entry)->name; \
119 (entry)++)
120
121 enum binder_stat_types {
122 BINDER_STAT_PROC,
123 BINDER_STAT_THREAD,
124 BINDER_STAT_NODE,
125 BINDER_STAT_REF,
126 BINDER_STAT_DEATH,
127 BINDER_STAT_TRANSACTION,
128 BINDER_STAT_TRANSACTION_COMPLETE,
129 BINDER_STAT_FREEZE,
130 BINDER_STAT_COUNT
131 };
132
133 struct binder_stats {
134 atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1];
135 atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1];
136 atomic_t obj_created[BINDER_STAT_COUNT];
137 atomic_t obj_deleted[BINDER_STAT_COUNT];
138 };
139
140 /**
141 * struct binder_work - work enqueued on a worklist
142 * @entry: node enqueued on list
143 * @type: type of work to be performed
144 *
145 * There are separate work lists for proc, thread, and node (async).
146 */
147 struct binder_work {
148 struct list_head entry;
149
150 enum binder_work_type {
151 BINDER_WORK_TRANSACTION = 1,
152 BINDER_WORK_TRANSACTION_COMPLETE,
153 BINDER_WORK_TRANSACTION_PENDING,
154 BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
155 BINDER_WORK_RETURN_ERROR,
156 BINDER_WORK_NODE,
157 BINDER_WORK_DEAD_BINDER,
158 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
159 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
160 BINDER_WORK_FROZEN_BINDER,
161 BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
162 } type;
163 };
164
165 struct binder_error {
166 struct binder_work work;
167 uint32_t cmd;
168 };
169
170 /**
171 * struct binder_node - binder node bookkeeping
172 * @debug_id: unique ID for debugging
173 * (invariant after initialized)
174 * @lock: lock for node fields
175 * @work: worklist element for node work
176 * (protected by @proc->inner_lock)
177 * @rb_node: element for proc->nodes tree
178 * (protected by @proc->inner_lock)
179 * @dead_node: element for binder_dead_nodes list
180 * (protected by binder_dead_nodes_lock)
181 * @proc: binder_proc that owns this node
182 * (invariant after initialized)
183 * @refs: list of references on this node
184 * (protected by @lock)
185 * @internal_strong_refs: used to take strong references when
186 * initiating a transaction
187 * (protected by @proc->inner_lock if @proc
188 * and by @lock)
189 * @local_weak_refs: weak user refs from local process
190 * (protected by @proc->inner_lock if @proc
191 * and by @lock)
192 * @local_strong_refs: strong user refs from local process
193 * (protected by @proc->inner_lock if @proc
194 * and by @lock)
195 * @tmp_refs: temporary kernel refs
196 * (protected by @proc->inner_lock while @proc
197 * is valid, and by binder_dead_nodes_lock
198 * if @proc is NULL. During inc/dec and node release
199 * it is also protected by @lock to provide safety
200 * as the node dies and @proc becomes NULL)
201 * @ptr: userspace pointer for node
202 * (invariant, no lock needed)
203 * @cookie: userspace cookie for node
204 * (invariant, no lock needed)
205 * @has_strong_ref: userspace notified of strong ref
206 * (protected by @proc->inner_lock if @proc
207 * and by @lock)
208 * @pending_strong_ref: userspace has acked notification of strong ref
209 * (protected by @proc->inner_lock if @proc
210 * and by @lock)
211 * @has_weak_ref: userspace notified of weak ref
212 * (protected by @proc->inner_lock if @proc
213 * and by @lock)
214 * @pending_weak_ref: userspace has acked notification of weak ref
215 * (protected by @proc->inner_lock if @proc
216 * and by @lock)
217 * @has_async_transaction: async transaction to node in progress
218 * (protected by @lock)
219 * @accept_fds: file descriptor operations supported for node
220 * (invariant after initialized)
221 * @min_priority: minimum scheduling priority
222 * (invariant after initialized)
223 * @txn_security_ctx: require sender's security context
224 * (invariant after initialized)
225 * @async_todo: list of async work items
226 * (protected by @proc->inner_lock)
227 *
228 * Bookkeeping structure for binder nodes.
229 */
230 struct binder_node {
231 int debug_id;
232 spinlock_t lock;
233 struct binder_work work;
234 union {
235 struct rb_node rb_node;
236 struct hlist_node dead_node;
237 };
238 struct binder_proc *proc;
239 struct hlist_head refs;
240 int internal_strong_refs;
241 int local_weak_refs;
242 int local_strong_refs;
243 int tmp_refs;
244 binder_uintptr_t ptr;
245 binder_uintptr_t cookie;
246 struct {
247 /*
248 * bitfield elements protected by
249 * proc inner_lock
250 */
251 u8 has_strong_ref:1;
252 u8 pending_strong_ref:1;
253 u8 has_weak_ref:1;
254 u8 pending_weak_ref:1;
255 };
256 struct {
257 /*
258 * invariant after initialization
259 */
260 u8 accept_fds:1;
261 u8 txn_security_ctx:1;
262 u8 min_priority;
263 };
264 bool has_async_transaction;
265 struct list_head async_todo;
266 };
267
268 struct binder_ref_death {
269 /**
270 * @work: worklist element for death notifications
271 * (protected by inner_lock of the proc that
272 * this ref belongs to)
273 */
274 struct binder_work work;
275 binder_uintptr_t cookie;
276 };
277
278 struct binder_ref_freeze {
279 struct binder_work work;
280 binder_uintptr_t cookie;
281 bool is_frozen:1;
282 bool sent:1;
283 bool resend:1;
284 };
285
286 /**
287 * struct binder_ref_data - binder_ref counts and id
288 * @debug_id: unique ID for the ref
289 * @desc: unique userspace handle for ref
290 * @strong: strong ref count (debugging only if not locked)
291 * @weak: weak ref count (debugging only if not locked)
292 *
293 * Structure to hold ref count and ref id information. Since
294 * the actual ref can only be accessed with a lock, this structure
295 * is used to return information about the ref to callers of
296 * ref inc/dec functions.
297 */
298 struct binder_ref_data {
299 int debug_id;
300 uint32_t desc;
301 int strong;
302 int weak;
303 };
304
305 /**
306 * struct binder_ref - struct to track references on nodes
307 * @data: binder_ref_data containing id, handle, and current refcounts
308 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
309 * @rb_node_node: node for lookup by @node in proc's rb_tree
310 * @node_entry: list entry for node->refs list in target node
311 * (protected by @node->lock)
312 * @proc: binder_proc containing ref
313 * @node: binder_node of target node. When cleaning up a
314 * ref for deletion in binder_cleanup_ref, a non-NULL
315 * @node indicates the node must be freed
316 * @death: pointer to death notification (ref_death) if requested
317 * (protected by @node->lock)
318 * @freeze: pointer to freeze notification (ref_freeze) if requested
319 * (protected by @node->lock)
320 *
321 * Structure to track references from procA to target node (on procB). This
322 * structure is unsafe to access without holding @proc->outer_lock.
323 */
324 struct binder_ref {
325 /* Lookups needed: */
326 /* node + proc => ref (transaction) */
327 /* desc + proc => ref (transaction, inc/dec ref) */
328 /* node => refs + procs (proc exit) */
329 struct binder_ref_data data;
330 struct rb_node rb_node_desc;
331 struct rb_node rb_node_node;
332 struct hlist_node node_entry;
333 struct binder_proc *proc;
334 struct binder_node *node;
335 struct binder_ref_death *death;
336 struct binder_ref_freeze *freeze;
337 };
338
339 /**
340 * struct binder_proc - binder process bookkeeping
341 * @proc_node: element for binder_procs list
342 * @threads: rbtree of binder_threads in this proc
343 * (protected by @inner_lock)
344 * @nodes: rbtree of binder nodes associated with
345 * this proc ordered by node->ptr
346 * (protected by @inner_lock)
347 * @refs_by_desc: rbtree of refs ordered by ref->desc
348 * (protected by @outer_lock)
349 * @refs_by_node: rbtree of refs ordered by ref->node
350 * (protected by @outer_lock)
351 * @waiting_threads: threads currently waiting for proc work
352 * (protected by @inner_lock)
353 * @pid PID of group_leader of process
354 * (invariant after initialized)
355 * @tsk task_struct for group_leader of process
356 * (invariant after initialized)
357 * @cred struct cred associated with the `struct file`
358 * in binder_open()
359 * (invariant after initialized)
360 * @deferred_work_node: element for binder_deferred_list
361 * (protected by binder_deferred_lock)
362 * @deferred_work: bitmap of deferred work to perform
363 * (protected by binder_deferred_lock)
364 * @outstanding_txns: number of transactions to be transmitted before
365 * processes in freeze_wait are woken up
366 * (protected by @inner_lock)
367 * @is_dead: process is dead and awaiting free
368 * when outstanding transactions are cleaned up
369 * (protected by @inner_lock)
370 * @is_frozen: process is frozen and unable to service
371 * binder transactions
372 * (protected by @inner_lock)
373 * @sync_recv: process received sync transactions since last frozen
374 * bit 0: received sync transaction after being frozen
375 * bit 1: new pending sync transaction during freezing
376 * (protected by @inner_lock)
377 * @async_recv: process received async transactions since last frozen
378 * (protected by @inner_lock)
379 * @freeze_wait: waitqueue of processes waiting for all outstanding
380 * transactions to be processed
381 * (protected by @inner_lock)
382 * @dmap dbitmap to manage available reference descriptors
383 * (protected by @outer_lock)
384 * @todo: list of work for this process
385 * (protected by @inner_lock)
386 * @stats: per-process binder statistics
387 * (atomics, no lock needed)
388 * @delivered_death: list of delivered death notification
389 * (protected by @inner_lock)
390 * @delivered_freeze: list of delivered freeze notification
391 * (protected by @inner_lock)
392 * @max_threads: cap on number of binder threads
393 * (protected by @inner_lock)
394 * @requested_threads: number of binder threads requested but not
395 * yet started. In current implementation, can
396 * only be 0 or 1.
397 * (protected by @inner_lock)
398 * @requested_threads_started: number binder threads started
399 * (protected by @inner_lock)
400 * @tmp_ref: temporary reference to indicate proc is in use
401 * (protected by @inner_lock)
402 * @default_priority: default scheduler priority
403 * (invariant after initialized)
404 * @debugfs_entry: debugfs node
405 * @alloc: binder allocator bookkeeping
406 * @context: binder_context for this proc
407 * (invariant after initialized)
408 * @inner_lock: can nest under outer_lock and/or node lock
409 * @outer_lock: no nesting under innor or node lock
410 * Lock order: 1) outer, 2) node, 3) inner
411 * @binderfs_entry: process-specific binderfs log file
412 * @oneway_spam_detection_enabled: process enabled oneway spam detection
413 * or not
414 *
415 * Bookkeeping structure for binder processes
416 */
417 struct binder_proc {
418 struct hlist_node proc_node;
419 struct rb_root threads;
420 struct rb_root nodes;
421 struct rb_root refs_by_desc;
422 struct rb_root refs_by_node;
423 struct list_head waiting_threads;
424 int pid;
425 struct task_struct *tsk;
426 const struct cred *cred;
427 struct hlist_node deferred_work_node;
428 int deferred_work;
429 int outstanding_txns;
430 bool is_dead;
431 bool is_frozen;
432 bool sync_recv;
433 bool async_recv;
434 wait_queue_head_t freeze_wait;
435 struct dbitmap dmap;
436 struct list_head todo;
437 struct binder_stats stats;
438 struct list_head delivered_death;
439 struct list_head delivered_freeze;
440 u32 max_threads;
441 int requested_threads;
442 int requested_threads_started;
443 int tmp_ref;
444 long default_priority;
445 struct dentry *debugfs_entry;
446 struct binder_alloc alloc;
447 struct binder_context *context;
448 spinlock_t inner_lock;
449 spinlock_t outer_lock;
450 struct dentry *binderfs_entry;
451 bool oneway_spam_detection_enabled;
452 };
453
454 /**
455 * struct binder_thread - binder thread bookkeeping
456 * @proc: binder process for this thread
457 * (invariant after initialization)
458 * @rb_node: element for proc->threads rbtree
459 * (protected by @proc->inner_lock)
460 * @waiting_thread_node: element for @proc->waiting_threads list
461 * (protected by @proc->inner_lock)
462 * @pid: PID for this thread
463 * (invariant after initialization)
464 * @looper: bitmap of looping state
465 * (only accessed by this thread)
466 * @looper_needs_return: looping thread needs to exit driver
467 * (no lock needed)
468 * @transaction_stack: stack of in-progress transactions for this thread
469 * (protected by @proc->inner_lock)
470 * @todo: list of work to do for this thread
471 * (protected by @proc->inner_lock)
472 * @process_todo: whether work in @todo should be processed
473 * (protected by @proc->inner_lock)
474 * @return_error: transaction errors reported by this thread
475 * (only accessed by this thread)
476 * @reply_error: transaction errors reported by target thread
477 * (protected by @proc->inner_lock)
478 * @ee: extended error information from this thread
479 * (protected by @proc->inner_lock)
480 * @wait: wait queue for thread work
481 * @stats: per-thread statistics
482 * (atomics, no lock needed)
483 * @tmp_ref: temporary reference to indicate thread is in use
484 * (atomic since @proc->inner_lock cannot
485 * always be acquired)
486 * @is_dead: thread is dead and awaiting free
487 * when outstanding transactions are cleaned up
488 * (protected by @proc->inner_lock)
489 *
490 * Bookkeeping structure for binder threads.
491 */
492 struct binder_thread {
493 struct binder_proc *proc;
494 struct rb_node rb_node;
495 struct list_head waiting_thread_node;
496 int pid;
497 int looper; /* only modified by this thread */
498 bool looper_need_return; /* can be written by other thread */
499 struct binder_transaction *transaction_stack;
500 struct list_head todo;
501 bool process_todo;
502 struct binder_error return_error;
503 struct binder_error reply_error;
504 struct binder_extended_error ee;
505 wait_queue_head_t wait;
506 struct binder_stats stats;
507 atomic_t tmp_ref;
508 bool is_dead;
509 };
510
511 /**
512 * struct binder_txn_fd_fixup - transaction fd fixup list element
513 * @fixup_entry: list entry
514 * @file: struct file to be associated with new fd
515 * @offset: offset in buffer data to this fixup
516 * @target_fd: fd to use by the target to install @file
517 *
518 * List element for fd fixups in a transaction. Since file
519 * descriptors need to be allocated in the context of the
520 * target process, we pass each fd to be processed in this
521 * struct.
522 */
523 struct binder_txn_fd_fixup {
524 struct list_head fixup_entry;
525 struct file *file;
526 size_t offset;
527 int target_fd;
528 };
529
530 struct binder_transaction {
531 int debug_id;
532 struct binder_work work;
533 struct binder_thread *from;
534 pid_t from_pid;
535 pid_t from_tid;
536 struct binder_transaction *from_parent;
537 struct binder_proc *to_proc;
538 struct binder_thread *to_thread;
539 struct binder_transaction *to_parent;
540 unsigned need_reply:1;
541 /* unsigned is_dead:1; */ /* not used at the moment */
542
543 struct binder_buffer *buffer;
544 unsigned int code;
545 unsigned int flags;
546 long priority;
547 long saved_priority;
548 kuid_t sender_euid;
549 ktime_t start_time;
550 struct list_head fd_fixups;
551 binder_uintptr_t security_ctx;
552 /**
553 * @lock: protects @from, @to_proc, and @to_thread
554 *
555 * @from, @to_proc, and @to_thread can be set to NULL
556 * during thread teardown
557 */
558 spinlock_t lock;
559 };
560
561 /**
562 * struct binder_object - union of flat binder object types
563 * @hdr: generic object header
564 * @fbo: binder object (nodes and refs)
565 * @fdo: file descriptor object
566 * @bbo: binder buffer pointer
567 * @fdao: file descriptor array
568 *
569 * Used for type-independent object copies
570 */
571 struct binder_object {
572 union {
573 struct binder_object_header hdr;
574 struct flat_binder_object fbo;
575 struct binder_fd_object fdo;
576 struct binder_buffer_object bbo;
577 struct binder_fd_array_object fdao;
578 };
579 };
580
581 /**
582 * Add a binder device to binder_devices
583 * @device: the new binder device to add to the global list
584 */
585 void binder_add_device(struct binder_device *device);
586
587 /**
588 * Remove a binder device to binder_devices
589 * @device: the binder device to remove from the global list
590 */
591 void binder_remove_device(struct binder_device *device);
592
593 #endif /* _LINUX_BINDER_INTERNAL_H */
594