1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef _LINUX_BINDER_INTERNAL_H 4 #define _LINUX_BINDER_INTERNAL_H 5 6 #include <linux/export.h> 7 #include <linux/fs.h> 8 #include <linux/list.h> 9 #include <linux/miscdevice.h> 10 #include <linux/mutex.h> 11 #include <linux/refcount.h> 12 #include <linux/stddef.h> 13 #include <linux/types.h> 14 #include <linux/uidgid.h> 15 #include <uapi/linux/android/binderfs.h> 16 #include "binder_alloc.h" 17 #include "dbitmap.h" 18 19 struct binder_context { 20 struct binder_node *binder_context_mgr_node; 21 struct mutex context_mgr_node_lock; 22 kuid_t binder_context_mgr_uid; 23 const char *name; 24 }; 25 26 /** 27 * struct binder_device - information about a binder device node 28 * @hlist: list of binder devices 29 * @miscdev: information about a binder character device node 30 * @context: binder context information 31 * @binderfs_inode: This is the inode of the root dentry of the super block 32 * belonging to a binderfs mount. 33 */ 34 struct binder_device { 35 struct hlist_node hlist; 36 struct miscdevice miscdev; 37 struct binder_context context; 38 struct inode *binderfs_inode; 39 refcount_t ref; 40 }; 41 42 /** 43 * binderfs_mount_opts - mount options for binderfs 44 * @max: maximum number of allocatable binderfs binder devices 45 * @stats_mode: enable binder stats in binderfs. 46 */ 47 struct binderfs_mount_opts { 48 int max; 49 int stats_mode; 50 }; 51 52 /** 53 * binderfs_info - information about a binderfs mount 54 * @ipc_ns: The ipc namespace the binderfs mount belongs to. 55 * @control_dentry: This records the dentry of this binderfs mount 56 * binder-control device. 57 * @root_uid: uid that needs to be used when a new binder device is 58 * created. 59 * @root_gid: gid that needs to be used when a new binder device is 60 * created. 61 * @mount_opts: The mount options in use. 62 * @device_count: The current number of allocated binder devices. 63 * @proc_log_dir: Pointer to the directory dentry containing process-specific 64 * logs. 65 */ 66 struct binderfs_info { 67 struct ipc_namespace *ipc_ns; 68 struct dentry *control_dentry; 69 kuid_t root_uid; 70 kgid_t root_gid; 71 struct binderfs_mount_opts mount_opts; 72 int device_count; 73 struct dentry *proc_log_dir; 74 }; 75 76 extern const struct file_operations binder_fops; 77 78 extern char *binder_devices_param; 79 80 #ifdef CONFIG_ANDROID_BINDERFS 81 extern bool is_binderfs_device(const struct inode *inode); 82 extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name, 83 const struct file_operations *fops, 84 void *data); 85 extern void binderfs_remove_file(struct dentry *dentry); 86 #else 87 static inline bool is_binderfs_device(const struct inode *inode) 88 { 89 return false; 90 } 91 static inline struct dentry *binderfs_create_file(struct dentry *dir, 92 const char *name, 93 const struct file_operations *fops, 94 void *data) 95 { 96 return NULL; 97 } 98 static inline void binderfs_remove_file(struct dentry *dentry) {} 99 #endif 100 101 #ifdef CONFIG_ANDROID_BINDERFS 102 extern int __init init_binderfs(void); 103 #else 104 static inline int __init init_binderfs(void) 105 { 106 return 0; 107 } 108 #endif 109 110 struct binder_debugfs_entry { 111 const char *name; 112 umode_t mode; 113 const struct file_operations *fops; 114 void *data; 115 }; 116 117 extern const struct binder_debugfs_entry binder_debugfs_entries[]; 118 119 #define binder_for_each_debugfs_entry(entry) \ 120 for ((entry) = binder_debugfs_entries; \ 121 (entry)->name; \ 122 (entry)++) 123 124 enum binder_stat_types { 125 BINDER_STAT_PROC, 126 BINDER_STAT_THREAD, 127 BINDER_STAT_NODE, 128 BINDER_STAT_REF, 129 BINDER_STAT_DEATH, 130 BINDER_STAT_TRANSACTION, 131 BINDER_STAT_TRANSACTION_COMPLETE, 132 BINDER_STAT_FREEZE, 133 BINDER_STAT_COUNT 134 }; 135 136 struct binder_stats { 137 atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1]; 138 atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1]; 139 atomic_t obj_created[BINDER_STAT_COUNT]; 140 atomic_t obj_deleted[BINDER_STAT_COUNT]; 141 }; 142 143 /** 144 * struct binder_work - work enqueued on a worklist 145 * @entry: node enqueued on list 146 * @type: type of work to be performed 147 * 148 * There are separate work lists for proc, thread, and node (async). 149 */ 150 struct binder_work { 151 struct list_head entry; 152 153 enum binder_work_type { 154 BINDER_WORK_TRANSACTION = 1, 155 BINDER_WORK_TRANSACTION_COMPLETE, 156 BINDER_WORK_TRANSACTION_PENDING, 157 BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT, 158 BINDER_WORK_RETURN_ERROR, 159 BINDER_WORK_NODE, 160 BINDER_WORK_DEAD_BINDER, 161 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 162 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 163 BINDER_WORK_FROZEN_BINDER, 164 BINDER_WORK_CLEAR_FREEZE_NOTIFICATION, 165 } type; 166 }; 167 168 struct binder_error { 169 struct binder_work work; 170 uint32_t cmd; 171 }; 172 173 /** 174 * struct binder_node - binder node bookkeeping 175 * @debug_id: unique ID for debugging 176 * (invariant after initialized) 177 * @lock: lock for node fields 178 * @work: worklist element for node work 179 * (protected by @proc->inner_lock) 180 * @rb_node: element for proc->nodes tree 181 * (protected by @proc->inner_lock) 182 * @dead_node: element for binder_dead_nodes list 183 * (protected by binder_dead_nodes_lock) 184 * @proc: binder_proc that owns this node 185 * (invariant after initialized) 186 * @refs: list of references on this node 187 * (protected by @lock) 188 * @internal_strong_refs: used to take strong references when 189 * initiating a transaction 190 * (protected by @proc->inner_lock if @proc 191 * and by @lock) 192 * @local_weak_refs: weak user refs from local process 193 * (protected by @proc->inner_lock if @proc 194 * and by @lock) 195 * @local_strong_refs: strong user refs from local process 196 * (protected by @proc->inner_lock if @proc 197 * and by @lock) 198 * @tmp_refs: temporary kernel refs 199 * (protected by @proc->inner_lock while @proc 200 * is valid, and by binder_dead_nodes_lock 201 * if @proc is NULL. During inc/dec and node release 202 * it is also protected by @lock to provide safety 203 * as the node dies and @proc becomes NULL) 204 * @ptr: userspace pointer for node 205 * (invariant, no lock needed) 206 * @cookie: userspace cookie for node 207 * (invariant, no lock needed) 208 * @has_strong_ref: userspace notified of strong ref 209 * (protected by @proc->inner_lock if @proc 210 * and by @lock) 211 * @pending_strong_ref: userspace has acked notification of strong ref 212 * (protected by @proc->inner_lock if @proc 213 * and by @lock) 214 * @has_weak_ref: userspace notified of weak ref 215 * (protected by @proc->inner_lock if @proc 216 * and by @lock) 217 * @pending_weak_ref: userspace has acked notification of weak ref 218 * (protected by @proc->inner_lock if @proc 219 * and by @lock) 220 * @has_async_transaction: async transaction to node in progress 221 * (protected by @lock) 222 * @accept_fds: file descriptor operations supported for node 223 * (invariant after initialized) 224 * @min_priority: minimum scheduling priority 225 * (invariant after initialized) 226 * @txn_security_ctx: require sender's security context 227 * (invariant after initialized) 228 * @async_todo: list of async work items 229 * (protected by @proc->inner_lock) 230 * 231 * Bookkeeping structure for binder nodes. 232 */ 233 struct binder_node { 234 int debug_id; 235 spinlock_t lock; 236 struct binder_work work; 237 union { 238 struct rb_node rb_node; 239 struct hlist_node dead_node; 240 }; 241 struct binder_proc *proc; 242 struct hlist_head refs; 243 int internal_strong_refs; 244 int local_weak_refs; 245 int local_strong_refs; 246 int tmp_refs; 247 binder_uintptr_t ptr; 248 binder_uintptr_t cookie; 249 struct { 250 /* 251 * bitfield elements protected by 252 * proc inner_lock 253 */ 254 u8 has_strong_ref:1; 255 u8 pending_strong_ref:1; 256 u8 has_weak_ref:1; 257 u8 pending_weak_ref:1; 258 }; 259 struct { 260 /* 261 * invariant after initialization 262 */ 263 u8 accept_fds:1; 264 u8 txn_security_ctx:1; 265 u8 min_priority; 266 }; 267 bool has_async_transaction; 268 struct list_head async_todo; 269 }; 270 271 struct binder_ref_death { 272 /** 273 * @work: worklist element for death notifications 274 * (protected by inner_lock of the proc that 275 * this ref belongs to) 276 */ 277 struct binder_work work; 278 binder_uintptr_t cookie; 279 }; 280 281 struct binder_ref_freeze { 282 struct binder_work work; 283 binder_uintptr_t cookie; 284 bool is_frozen:1; 285 bool sent:1; 286 bool resend:1; 287 }; 288 289 /** 290 * struct binder_ref_data - binder_ref counts and id 291 * @debug_id: unique ID for the ref 292 * @desc: unique userspace handle for ref 293 * @strong: strong ref count (debugging only if not locked) 294 * @weak: weak ref count (debugging only if not locked) 295 * 296 * Structure to hold ref count and ref id information. Since 297 * the actual ref can only be accessed with a lock, this structure 298 * is used to return information about the ref to callers of 299 * ref inc/dec functions. 300 */ 301 struct binder_ref_data { 302 int debug_id; 303 uint32_t desc; 304 int strong; 305 int weak; 306 }; 307 308 /** 309 * struct binder_ref - struct to track references on nodes 310 * @data: binder_ref_data containing id, handle, and current refcounts 311 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree 312 * @rb_node_node: node for lookup by @node in proc's rb_tree 313 * @node_entry: list entry for node->refs list in target node 314 * (protected by @node->lock) 315 * @proc: binder_proc containing ref 316 * @node: binder_node of target node. When cleaning up a 317 * ref for deletion in binder_cleanup_ref, a non-NULL 318 * @node indicates the node must be freed 319 * @death: pointer to death notification (ref_death) if requested 320 * (protected by @node->lock) 321 * @freeze: pointer to freeze notification (ref_freeze) if requested 322 * (protected by @node->lock) 323 * 324 * Structure to track references from procA to target node (on procB). This 325 * structure is unsafe to access without holding @proc->outer_lock. 326 */ 327 struct binder_ref { 328 /* Lookups needed: */ 329 /* node + proc => ref (transaction) */ 330 /* desc + proc => ref (transaction, inc/dec ref) */ 331 /* node => refs + procs (proc exit) */ 332 struct binder_ref_data data; 333 struct rb_node rb_node_desc; 334 struct rb_node rb_node_node; 335 struct hlist_node node_entry; 336 struct binder_proc *proc; 337 struct binder_node *node; 338 struct binder_ref_death *death; 339 struct binder_ref_freeze *freeze; 340 }; 341 342 /** 343 * struct binder_proc - binder process bookkeeping 344 * @proc_node: element for binder_procs list 345 * @threads: rbtree of binder_threads in this proc 346 * (protected by @inner_lock) 347 * @nodes: rbtree of binder nodes associated with 348 * this proc ordered by node->ptr 349 * (protected by @inner_lock) 350 * @refs_by_desc: rbtree of refs ordered by ref->desc 351 * (protected by @outer_lock) 352 * @refs_by_node: rbtree of refs ordered by ref->node 353 * (protected by @outer_lock) 354 * @waiting_threads: threads currently waiting for proc work 355 * (protected by @inner_lock) 356 * @pid PID of group_leader of process 357 * (invariant after initialized) 358 * @tsk task_struct for group_leader of process 359 * (invariant after initialized) 360 * @cred struct cred associated with the `struct file` 361 * in binder_open() 362 * (invariant after initialized) 363 * @deferred_work_node: element for binder_deferred_list 364 * (protected by binder_deferred_lock) 365 * @deferred_work: bitmap of deferred work to perform 366 * (protected by binder_deferred_lock) 367 * @outstanding_txns: number of transactions to be transmitted before 368 * processes in freeze_wait are woken up 369 * (protected by @inner_lock) 370 * @is_dead: process is dead and awaiting free 371 * when outstanding transactions are cleaned up 372 * (protected by @inner_lock) 373 * @is_frozen: process is frozen and unable to service 374 * binder transactions 375 * (protected by @inner_lock) 376 * @sync_recv: process received sync transactions since last frozen 377 * bit 0: received sync transaction after being frozen 378 * bit 1: new pending sync transaction during freezing 379 * (protected by @inner_lock) 380 * @async_recv: process received async transactions since last frozen 381 * (protected by @inner_lock) 382 * @freeze_wait: waitqueue of processes waiting for all outstanding 383 * transactions to be processed 384 * (protected by @inner_lock) 385 * @dmap dbitmap to manage available reference descriptors 386 * (protected by @outer_lock) 387 * @todo: list of work for this process 388 * (protected by @inner_lock) 389 * @stats: per-process binder statistics 390 * (atomics, no lock needed) 391 * @delivered_death: list of delivered death notification 392 * (protected by @inner_lock) 393 * @delivered_freeze: list of delivered freeze notification 394 * (protected by @inner_lock) 395 * @max_threads: cap on number of binder threads 396 * (protected by @inner_lock) 397 * @requested_threads: number of binder threads requested but not 398 * yet started. In current implementation, can 399 * only be 0 or 1. 400 * (protected by @inner_lock) 401 * @requested_threads_started: number binder threads started 402 * (protected by @inner_lock) 403 * @tmp_ref: temporary reference to indicate proc is in use 404 * (protected by @inner_lock) 405 * @default_priority: default scheduler priority 406 * (invariant after initialized) 407 * @debugfs_entry: debugfs node 408 * @alloc: binder allocator bookkeeping 409 * @context: binder_context for this proc 410 * (invariant after initialized) 411 * @inner_lock: can nest under outer_lock and/or node lock 412 * @outer_lock: no nesting under innor or node lock 413 * Lock order: 1) outer, 2) node, 3) inner 414 * @binderfs_entry: process-specific binderfs log file 415 * @oneway_spam_detection_enabled: process enabled oneway spam detection 416 * or not 417 * 418 * Bookkeeping structure for binder processes 419 */ 420 struct binder_proc { 421 struct hlist_node proc_node; 422 struct rb_root threads; 423 struct rb_root nodes; 424 struct rb_root refs_by_desc; 425 struct rb_root refs_by_node; 426 struct list_head waiting_threads; 427 int pid; 428 struct task_struct *tsk; 429 const struct cred *cred; 430 struct hlist_node deferred_work_node; 431 int deferred_work; 432 int outstanding_txns; 433 bool is_dead; 434 bool is_frozen; 435 bool sync_recv; 436 bool async_recv; 437 wait_queue_head_t freeze_wait; 438 struct dbitmap dmap; 439 struct list_head todo; 440 struct binder_stats stats; 441 struct list_head delivered_death; 442 struct list_head delivered_freeze; 443 u32 max_threads; 444 int requested_threads; 445 int requested_threads_started; 446 int tmp_ref; 447 long default_priority; 448 struct dentry *debugfs_entry; 449 struct binder_alloc alloc; 450 struct binder_context *context; 451 spinlock_t inner_lock; 452 spinlock_t outer_lock; 453 struct dentry *binderfs_entry; 454 bool oneway_spam_detection_enabled; 455 }; 456 457 /** 458 * struct binder_thread - binder thread bookkeeping 459 * @proc: binder process for this thread 460 * (invariant after initialization) 461 * @rb_node: element for proc->threads rbtree 462 * (protected by @proc->inner_lock) 463 * @waiting_thread_node: element for @proc->waiting_threads list 464 * (protected by @proc->inner_lock) 465 * @pid: PID for this thread 466 * (invariant after initialization) 467 * @looper: bitmap of looping state 468 * (only accessed by this thread) 469 * @looper_needs_return: looping thread needs to exit driver 470 * (no lock needed) 471 * @transaction_stack: stack of in-progress transactions for this thread 472 * (protected by @proc->inner_lock) 473 * @todo: list of work to do for this thread 474 * (protected by @proc->inner_lock) 475 * @process_todo: whether work in @todo should be processed 476 * (protected by @proc->inner_lock) 477 * @return_error: transaction errors reported by this thread 478 * (only accessed by this thread) 479 * @reply_error: transaction errors reported by target thread 480 * (protected by @proc->inner_lock) 481 * @ee: extended error information from this thread 482 * (protected by @proc->inner_lock) 483 * @wait: wait queue for thread work 484 * @stats: per-thread statistics 485 * (atomics, no lock needed) 486 * @tmp_ref: temporary reference to indicate thread is in use 487 * (atomic since @proc->inner_lock cannot 488 * always be acquired) 489 * @is_dead: thread is dead and awaiting free 490 * when outstanding transactions are cleaned up 491 * (protected by @proc->inner_lock) 492 * 493 * Bookkeeping structure for binder threads. 494 */ 495 struct binder_thread { 496 struct binder_proc *proc; 497 struct rb_node rb_node; 498 struct list_head waiting_thread_node; 499 int pid; 500 int looper; /* only modified by this thread */ 501 bool looper_need_return; /* can be written by other thread */ 502 struct binder_transaction *transaction_stack; 503 struct list_head todo; 504 bool process_todo; 505 struct binder_error return_error; 506 struct binder_error reply_error; 507 struct binder_extended_error ee; 508 wait_queue_head_t wait; 509 struct binder_stats stats; 510 atomic_t tmp_ref; 511 bool is_dead; 512 }; 513 514 /** 515 * struct binder_txn_fd_fixup - transaction fd fixup list element 516 * @fixup_entry: list entry 517 * @file: struct file to be associated with new fd 518 * @offset: offset in buffer data to this fixup 519 * @target_fd: fd to use by the target to install @file 520 * 521 * List element for fd fixups in a transaction. Since file 522 * descriptors need to be allocated in the context of the 523 * target process, we pass each fd to be processed in this 524 * struct. 525 */ 526 struct binder_txn_fd_fixup { 527 struct list_head fixup_entry; 528 struct file *file; 529 size_t offset; 530 int target_fd; 531 }; 532 533 struct binder_transaction { 534 int debug_id; 535 struct binder_work work; 536 struct binder_thread *from; 537 pid_t from_pid; 538 pid_t from_tid; 539 struct binder_transaction *from_parent; 540 struct binder_proc *to_proc; 541 struct binder_thread *to_thread; 542 struct binder_transaction *to_parent; 543 unsigned need_reply:1; 544 /* unsigned is_dead:1; */ /* not used at the moment */ 545 546 struct binder_buffer *buffer; 547 unsigned int code; 548 unsigned int flags; 549 long priority; 550 long saved_priority; 551 kuid_t sender_euid; 552 ktime_t start_time; 553 struct list_head fd_fixups; 554 binder_uintptr_t security_ctx; 555 /** 556 * @lock: protects @from, @to_proc, and @to_thread 557 * 558 * @from, @to_proc, and @to_thread can be set to NULL 559 * during thread teardown 560 */ 561 spinlock_t lock; 562 }; 563 564 /** 565 * struct binder_object - union of flat binder object types 566 * @hdr: generic object header 567 * @fbo: binder object (nodes and refs) 568 * @fdo: file descriptor object 569 * @bbo: binder buffer pointer 570 * @fdao: file descriptor array 571 * 572 * Used for type-independent object copies 573 */ 574 struct binder_object { 575 union { 576 struct binder_object_header hdr; 577 struct flat_binder_object fbo; 578 struct binder_fd_object fdo; 579 struct binder_buffer_object bbo; 580 struct binder_fd_array_object fdao; 581 }; 582 }; 583 584 /** 585 * Add a binder device to binder_devices 586 * @device: the new binder device to add to the global list 587 * 588 * Not reentrant as the list is not protected by any locks 589 */ 590 void binder_add_device(struct binder_device *device); 591 592 #endif /* _LINUX_BINDER_INTERNAL_H */ 593