1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _LINUX_BINDER_INTERNAL_H
4 #define _LINUX_BINDER_INTERNAL_H
5
6 #include <linux/fs.h>
7 #include <linux/list.h>
8 #include <linux/miscdevice.h>
9 #include <linux/mutex.h>
10 #include <linux/refcount.h>
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/uidgid.h>
14 #include <uapi/linux/android/binderfs.h>
15 #include "binder_alloc.h"
16 #include "dbitmap.h"
17
18 struct binder_context {
19 struct binder_node *binder_context_mgr_node;
20 struct mutex context_mgr_node_lock;
21 kuid_t binder_context_mgr_uid;
22 const char *name;
23 };
24
25 /**
26 * struct binder_device - information about a binder device node
27 * @hlist: list of binder devices
28 * @miscdev: information about a binder character device node
29 * @context: binder context information
30 * @binderfs_inode: This is the inode of the root dentry of the super block
31 * belonging to a binderfs mount.
32 */
33 struct binder_device {
34 struct hlist_node hlist;
35 struct miscdevice miscdev;
36 struct binder_context context;
37 struct inode *binderfs_inode;
38 refcount_t ref;
39 };
40
41 /**
42 * binderfs_mount_opts - mount options for binderfs
43 * @max: maximum number of allocatable binderfs binder devices
44 * @stats_mode: enable binder stats in binderfs.
45 */
46 struct binderfs_mount_opts {
47 int max;
48 int stats_mode;
49 };
50
51 /**
52 * binderfs_info - information about a binderfs mount
53 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
54 * @control_dentry: This records the dentry of this binderfs mount
55 * binder-control device.
56 * @root_uid: uid that needs to be used when a new binder device is
57 * created.
58 * @root_gid: gid that needs to be used when a new binder device is
59 * created.
60 * @mount_opts: The mount options in use.
61 * @device_count: The current number of allocated binder devices.
62 * @proc_log_dir: Pointer to the directory dentry containing process-specific
63 * logs.
64 */
65 struct binderfs_info {
66 struct ipc_namespace *ipc_ns;
67 struct dentry *control_dentry;
68 kuid_t root_uid;
69 kgid_t root_gid;
70 struct binderfs_mount_opts mount_opts;
71 int device_count;
72 struct dentry *proc_log_dir;
73 };
74
75 extern const struct file_operations binder_fops;
76
77 extern char *binder_devices_param;
78
79 #ifdef CONFIG_ANDROID_BINDERFS
80 extern bool is_binderfs_device(const struct inode *inode);
81 extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
82 const struct file_operations *fops,
83 void *data);
84 extern void binderfs_remove_file(struct dentry *dentry);
85 #else
is_binderfs_device(const struct inode * inode)86 static inline bool is_binderfs_device(const struct inode *inode)
87 {
88 return false;
89 }
binderfs_create_file(struct dentry * dir,const char * name,const struct file_operations * fops,void * data)90 static inline struct dentry *binderfs_create_file(struct dentry *dir,
91 const char *name,
92 const struct file_operations *fops,
93 void *data)
94 {
95 return NULL;
96 }
binderfs_remove_file(struct dentry * dentry)97 static inline void binderfs_remove_file(struct dentry *dentry) {}
98 #endif
99
100 #ifdef CONFIG_ANDROID_BINDERFS
101 extern int __init init_binderfs(void);
102 #else
init_binderfs(void)103 static inline int __init init_binderfs(void)
104 {
105 return 0;
106 }
107 #endif
108
109 struct binder_debugfs_entry {
110 const char *name;
111 umode_t mode;
112 const struct file_operations *fops;
113 void *data;
114 };
115
116 extern const struct binder_debugfs_entry binder_debugfs_entries[];
117
118 #define binder_for_each_debugfs_entry(entry) \
119 for ((entry) = binder_debugfs_entries; \
120 (entry)->name; \
121 (entry)++)
122
123 enum binder_stat_types {
124 BINDER_STAT_PROC,
125 BINDER_STAT_THREAD,
126 BINDER_STAT_NODE,
127 BINDER_STAT_REF,
128 BINDER_STAT_DEATH,
129 BINDER_STAT_TRANSACTION,
130 BINDER_STAT_TRANSACTION_COMPLETE,
131 BINDER_STAT_FREEZE,
132 BINDER_STAT_COUNT
133 };
134
135 struct binder_stats {
136 atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1];
137 atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1];
138 atomic_t obj_created[BINDER_STAT_COUNT];
139 atomic_t obj_deleted[BINDER_STAT_COUNT];
140 };
141
142 /**
143 * struct binder_work - work enqueued on a worklist
144 * @entry: node enqueued on list
145 * @type: type of work to be performed
146 *
147 * There are separate work lists for proc, thread, and node (async).
148 */
149 struct binder_work {
150 struct list_head entry;
151
152 enum binder_work_type {
153 BINDER_WORK_TRANSACTION = 1,
154 BINDER_WORK_TRANSACTION_COMPLETE,
155 BINDER_WORK_TRANSACTION_PENDING,
156 BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
157 BINDER_WORK_RETURN_ERROR,
158 BINDER_WORK_NODE,
159 BINDER_WORK_DEAD_BINDER,
160 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
161 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
162 BINDER_WORK_FROZEN_BINDER,
163 BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
164 } type;
165 };
166
167 struct binder_error {
168 struct binder_work work;
169 uint32_t cmd;
170 };
171
172 /**
173 * struct binder_node - binder node bookkeeping
174 * @debug_id: unique ID for debugging
175 * (invariant after initialized)
176 * @lock: lock for node fields
177 * @work: worklist element for node work
178 * (protected by @proc->inner_lock)
179 * @rb_node: element for proc->nodes tree
180 * (protected by @proc->inner_lock)
181 * @dead_node: element for binder_dead_nodes list
182 * (protected by binder_dead_nodes_lock)
183 * @proc: binder_proc that owns this node
184 * (invariant after initialized)
185 * @refs: list of references on this node
186 * (protected by @lock)
187 * @internal_strong_refs: used to take strong references when
188 * initiating a transaction
189 * (protected by @proc->inner_lock if @proc
190 * and by @lock)
191 * @local_weak_refs: weak user refs from local process
192 * (protected by @proc->inner_lock if @proc
193 * and by @lock)
194 * @local_strong_refs: strong user refs from local process
195 * (protected by @proc->inner_lock if @proc
196 * and by @lock)
197 * @tmp_refs: temporary kernel refs
198 * (protected by @proc->inner_lock while @proc
199 * is valid, and by binder_dead_nodes_lock
200 * if @proc is NULL. During inc/dec and node release
201 * it is also protected by @lock to provide safety
202 * as the node dies and @proc becomes NULL)
203 * @ptr: userspace pointer for node
204 * (invariant, no lock needed)
205 * @cookie: userspace cookie for node
206 * (invariant, no lock needed)
207 * @has_strong_ref: userspace notified of strong ref
208 * (protected by @proc->inner_lock if @proc
209 * and by @lock)
210 * @pending_strong_ref: userspace has acked notification of strong ref
211 * (protected by @proc->inner_lock if @proc
212 * and by @lock)
213 * @has_weak_ref: userspace notified of weak ref
214 * (protected by @proc->inner_lock if @proc
215 * and by @lock)
216 * @pending_weak_ref: userspace has acked notification of weak ref
217 * (protected by @proc->inner_lock if @proc
218 * and by @lock)
219 * @has_async_transaction: async transaction to node in progress
220 * (protected by @lock)
221 * @accept_fds: file descriptor operations supported for node
222 * (invariant after initialized)
223 * @min_priority: minimum scheduling priority
224 * (invariant after initialized)
225 * @txn_security_ctx: require sender's security context
226 * (invariant after initialized)
227 * @async_todo: list of async work items
228 * (protected by @proc->inner_lock)
229 *
230 * Bookkeeping structure for binder nodes.
231 */
232 struct binder_node {
233 int debug_id;
234 spinlock_t lock;
235 struct binder_work work;
236 union {
237 struct rb_node rb_node;
238 struct hlist_node dead_node;
239 };
240 struct binder_proc *proc;
241 struct hlist_head refs;
242 int internal_strong_refs;
243 int local_weak_refs;
244 int local_strong_refs;
245 int tmp_refs;
246 binder_uintptr_t ptr;
247 binder_uintptr_t cookie;
248 struct {
249 /*
250 * bitfield elements protected by
251 * proc inner_lock
252 */
253 u8 has_strong_ref:1;
254 u8 pending_strong_ref:1;
255 u8 has_weak_ref:1;
256 u8 pending_weak_ref:1;
257 };
258 struct {
259 /*
260 * invariant after initialization
261 */
262 u8 accept_fds:1;
263 u8 txn_security_ctx:1;
264 u8 min_priority;
265 };
266 bool has_async_transaction;
267 struct list_head async_todo;
268 };
269
270 struct binder_ref_death {
271 /**
272 * @work: worklist element for death notifications
273 * (protected by inner_lock of the proc that
274 * this ref belongs to)
275 */
276 struct binder_work work;
277 binder_uintptr_t cookie;
278 };
279
280 struct binder_ref_freeze {
281 struct binder_work work;
282 binder_uintptr_t cookie;
283 bool is_frozen:1;
284 bool sent:1;
285 bool resend:1;
286 };
287
288 /**
289 * struct binder_ref_data - binder_ref counts and id
290 * @debug_id: unique ID for the ref
291 * @desc: unique userspace handle for ref
292 * @strong: strong ref count (debugging only if not locked)
293 * @weak: weak ref count (debugging only if not locked)
294 *
295 * Structure to hold ref count and ref id information. Since
296 * the actual ref can only be accessed with a lock, this structure
297 * is used to return information about the ref to callers of
298 * ref inc/dec functions.
299 */
300 struct binder_ref_data {
301 int debug_id;
302 uint32_t desc;
303 int strong;
304 int weak;
305 };
306
307 /**
308 * struct binder_ref - struct to track references on nodes
309 * @data: binder_ref_data containing id, handle, and current refcounts
310 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
311 * @rb_node_node: node for lookup by @node in proc's rb_tree
312 * @node_entry: list entry for node->refs list in target node
313 * (protected by @node->lock)
314 * @proc: binder_proc containing ref
315 * @node: binder_node of target node. When cleaning up a
316 * ref for deletion in binder_cleanup_ref, a non-NULL
317 * @node indicates the node must be freed
318 * @death: pointer to death notification (ref_death) if requested
319 * (protected by @node->lock)
320 * @freeze: pointer to freeze notification (ref_freeze) if requested
321 * (protected by @node->lock)
322 *
323 * Structure to track references from procA to target node (on procB). This
324 * structure is unsafe to access without holding @proc->outer_lock.
325 */
326 struct binder_ref {
327 /* Lookups needed: */
328 /* node + proc => ref (transaction) */
329 /* desc + proc => ref (transaction, inc/dec ref) */
330 /* node => refs + procs (proc exit) */
331 struct binder_ref_data data;
332 struct rb_node rb_node_desc;
333 struct rb_node rb_node_node;
334 struct hlist_node node_entry;
335 struct binder_proc *proc;
336 struct binder_node *node;
337 struct binder_ref_death *death;
338 struct binder_ref_freeze *freeze;
339 };
340
341 /**
342 * struct binder_proc - binder process bookkeeping
343 * @proc_node: element for binder_procs list
344 * @threads: rbtree of binder_threads in this proc
345 * (protected by @inner_lock)
346 * @nodes: rbtree of binder nodes associated with
347 * this proc ordered by node->ptr
348 * (protected by @inner_lock)
349 * @refs_by_desc: rbtree of refs ordered by ref->desc
350 * (protected by @outer_lock)
351 * @refs_by_node: rbtree of refs ordered by ref->node
352 * (protected by @outer_lock)
353 * @waiting_threads: threads currently waiting for proc work
354 * (protected by @inner_lock)
355 * @pid PID of group_leader of process
356 * (invariant after initialized)
357 * @tsk task_struct for group_leader of process
358 * (invariant after initialized)
359 * @cred struct cred associated with the `struct file`
360 * in binder_open()
361 * (invariant after initialized)
362 * @deferred_work_node: element for binder_deferred_list
363 * (protected by binder_deferred_lock)
364 * @deferred_work: bitmap of deferred work to perform
365 * (protected by binder_deferred_lock)
366 * @outstanding_txns: number of transactions to be transmitted before
367 * processes in freeze_wait are woken up
368 * (protected by @inner_lock)
369 * @is_dead: process is dead and awaiting free
370 * when outstanding transactions are cleaned up
371 * (protected by @inner_lock)
372 * @is_frozen: process is frozen and unable to service
373 * binder transactions
374 * (protected by @inner_lock)
375 * @sync_recv: process received sync transactions since last frozen
376 * bit 0: received sync transaction after being frozen
377 * bit 1: new pending sync transaction during freezing
378 * (protected by @inner_lock)
379 * @async_recv: process received async transactions since last frozen
380 * (protected by @inner_lock)
381 * @freeze_wait: waitqueue of processes waiting for all outstanding
382 * transactions to be processed
383 * (protected by @inner_lock)
384 * @dmap dbitmap to manage available reference descriptors
385 * (protected by @outer_lock)
386 * @todo: list of work for this process
387 * (protected by @inner_lock)
388 * @stats: per-process binder statistics
389 * (atomics, no lock needed)
390 * @delivered_death: list of delivered death notification
391 * (protected by @inner_lock)
392 * @delivered_freeze: list of delivered freeze notification
393 * (protected by @inner_lock)
394 * @max_threads: cap on number of binder threads
395 * (protected by @inner_lock)
396 * @requested_threads: number of binder threads requested but not
397 * yet started. In current implementation, can
398 * only be 0 or 1.
399 * (protected by @inner_lock)
400 * @requested_threads_started: number binder threads started
401 * (protected by @inner_lock)
402 * @tmp_ref: temporary reference to indicate proc is in use
403 * (protected by @inner_lock)
404 * @default_priority: default scheduler priority
405 * (invariant after initialized)
406 * @debugfs_entry: debugfs node
407 * @alloc: binder allocator bookkeeping
408 * @context: binder_context for this proc
409 * (invariant after initialized)
410 * @inner_lock: can nest under outer_lock and/or node lock
411 * @outer_lock: no nesting under innor or node lock
412 * Lock order: 1) outer, 2) node, 3) inner
413 * @binderfs_entry: process-specific binderfs log file
414 * @oneway_spam_detection_enabled: process enabled oneway spam detection
415 * or not
416 *
417 * Bookkeeping structure for binder processes
418 */
419 struct binder_proc {
420 struct hlist_node proc_node;
421 struct rb_root threads;
422 struct rb_root nodes;
423 struct rb_root refs_by_desc;
424 struct rb_root refs_by_node;
425 struct list_head waiting_threads;
426 int pid;
427 struct task_struct *tsk;
428 const struct cred *cred;
429 struct hlist_node deferred_work_node;
430 int deferred_work;
431 int outstanding_txns;
432 bool is_dead;
433 bool is_frozen;
434 bool sync_recv;
435 bool async_recv;
436 wait_queue_head_t freeze_wait;
437 struct dbitmap dmap;
438 struct list_head todo;
439 struct binder_stats stats;
440 struct list_head delivered_death;
441 struct list_head delivered_freeze;
442 u32 max_threads;
443 int requested_threads;
444 int requested_threads_started;
445 int tmp_ref;
446 long default_priority;
447 struct dentry *debugfs_entry;
448 struct binder_alloc alloc;
449 struct binder_context *context;
450 spinlock_t inner_lock;
451 spinlock_t outer_lock;
452 struct dentry *binderfs_entry;
453 bool oneway_spam_detection_enabled;
454 };
455
456 /**
457 * struct binder_thread - binder thread bookkeeping
458 * @proc: binder process for this thread
459 * (invariant after initialization)
460 * @rb_node: element for proc->threads rbtree
461 * (protected by @proc->inner_lock)
462 * @waiting_thread_node: element for @proc->waiting_threads list
463 * (protected by @proc->inner_lock)
464 * @pid: PID for this thread
465 * (invariant after initialization)
466 * @looper: bitmap of looping state
467 * (only accessed by this thread)
468 * @looper_needs_return: looping thread needs to exit driver
469 * (no lock needed)
470 * @transaction_stack: stack of in-progress transactions for this thread
471 * (protected by @proc->inner_lock)
472 * @todo: list of work to do for this thread
473 * (protected by @proc->inner_lock)
474 * @process_todo: whether work in @todo should be processed
475 * (protected by @proc->inner_lock)
476 * @return_error: transaction errors reported by this thread
477 * (only accessed by this thread)
478 * @reply_error: transaction errors reported by target thread
479 * (protected by @proc->inner_lock)
480 * @ee: extended error information from this thread
481 * (protected by @proc->inner_lock)
482 * @wait: wait queue for thread work
483 * @stats: per-thread statistics
484 * (atomics, no lock needed)
485 * @tmp_ref: temporary reference to indicate thread is in use
486 * (atomic since @proc->inner_lock cannot
487 * always be acquired)
488 * @is_dead: thread is dead and awaiting free
489 * when outstanding transactions are cleaned up
490 * (protected by @proc->inner_lock)
491 *
492 * Bookkeeping structure for binder threads.
493 */
494 struct binder_thread {
495 struct binder_proc *proc;
496 struct rb_node rb_node;
497 struct list_head waiting_thread_node;
498 int pid;
499 int looper; /* only modified by this thread */
500 bool looper_need_return; /* can be written by other thread */
501 struct binder_transaction *transaction_stack;
502 struct list_head todo;
503 bool process_todo;
504 struct binder_error return_error;
505 struct binder_error reply_error;
506 struct binder_extended_error ee;
507 wait_queue_head_t wait;
508 struct binder_stats stats;
509 atomic_t tmp_ref;
510 bool is_dead;
511 };
512
513 /**
514 * struct binder_txn_fd_fixup - transaction fd fixup list element
515 * @fixup_entry: list entry
516 * @file: struct file to be associated with new fd
517 * @offset: offset in buffer data to this fixup
518 * @target_fd: fd to use by the target to install @file
519 *
520 * List element for fd fixups in a transaction. Since file
521 * descriptors need to be allocated in the context of the
522 * target process, we pass each fd to be processed in this
523 * struct.
524 */
525 struct binder_txn_fd_fixup {
526 struct list_head fixup_entry;
527 struct file *file;
528 size_t offset;
529 int target_fd;
530 };
531
532 struct binder_transaction {
533 int debug_id;
534 struct binder_work work;
535 struct binder_thread *from;
536 pid_t from_pid;
537 pid_t from_tid;
538 struct binder_transaction *from_parent;
539 struct binder_proc *to_proc;
540 struct binder_thread *to_thread;
541 struct binder_transaction *to_parent;
542 unsigned need_reply:1;
543 /* unsigned is_dead:1; */ /* not used at the moment */
544
545 struct binder_buffer *buffer;
546 unsigned int code;
547 unsigned int flags;
548 long priority;
549 long saved_priority;
550 kuid_t sender_euid;
551 ktime_t start_time;
552 struct list_head fd_fixups;
553 binder_uintptr_t security_ctx;
554 /**
555 * @lock: protects @from, @to_proc, and @to_thread
556 *
557 * @from, @to_proc, and @to_thread can be set to NULL
558 * during thread teardown
559 */
560 spinlock_t lock;
561 };
562
563 /**
564 * struct binder_object - union of flat binder object types
565 * @hdr: generic object header
566 * @fbo: binder object (nodes and refs)
567 * @fdo: file descriptor object
568 * @bbo: binder buffer pointer
569 * @fdao: file descriptor array
570 *
571 * Used for type-independent object copies
572 */
573 struct binder_object {
574 union {
575 struct binder_object_header hdr;
576 struct flat_binder_object fbo;
577 struct binder_fd_object fdo;
578 struct binder_buffer_object bbo;
579 struct binder_fd_array_object fdao;
580 };
581 };
582
583 /**
584 * Add a binder device to binder_devices
585 * @device: the new binder device to add to the global list
586 */
587 void binder_add_device(struct binder_device *device);
588
589 /**
590 * Remove a binder device to binder_devices
591 * @device: the binder device to remove from the global list
592 */
593 void binder_remove_device(struct binder_device *device);
594
595 #endif /* _LINUX_BINDER_INTERNAL_H */
596