xref: /linux/drivers/android/binder/deferred_close.rs (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2025 Google LLC.
4 
5 //! Logic for closing files in a deferred manner.
6 //!
7 //! This file could make sense to have in `kernel::fs`, but it was rejected for being too
8 //! Binder-specific.
9 
10 use core::mem::MaybeUninit;
11 use kernel::{
12     alloc::{AllocError, Flags},
13     bindings,
14     prelude::*,
15 };
16 
17 /// Helper used for closing file descriptors in a way that is safe even if the file is currently
18 /// held using `fdget`.
19 ///
20 /// Additional motivation can be found in commit 80cd795630d6 ("binder: fix use-after-free due to
21 /// ksys_close() during fdget()") and in the comments on `binder_do_fd_close`.
22 pub(crate) struct DeferredFdCloser {
23     inner: KBox<DeferredFdCloserInner>,
24 }
25 
26 /// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
27 /// moving it across threads.
28 unsafe impl Send for DeferredFdCloser {}
29 /// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
30 /// moving it across threads.
31 unsafe impl Sync for DeferredFdCloser {}
32 
33 /// # Invariants
34 ///
35 /// If the `file` pointer is non-null, then it points at a `struct file` and owns a refcount to
36 /// that file.
37 #[repr(C)]
38 struct DeferredFdCloserInner {
39     twork: MaybeUninit<bindings::callback_head>,
40     file: *mut bindings::file,
41 }
42 
43 impl DeferredFdCloser {
44     /// Create a new [`DeferredFdCloser`].
45     pub(crate) fn new(flags: Flags) -> Result<Self, AllocError> {
46         Ok(Self {
47             // INVARIANT: The `file` pointer is null, so the type invariant does not apply.
48             inner: KBox::new(
49                 DeferredFdCloserInner {
50                     twork: MaybeUninit::uninit(),
51                     file: core::ptr::null_mut(),
52                 },
53                 flags,
54             )?,
55         })
56     }
57 
58     /// Schedule a task work that closes the file descriptor when this task returns to userspace.
59     ///
60     /// Fails if this is called from a context where we cannot run work when returning to
61     /// userspace. (E.g., from a kthread.)
62     pub(crate) fn close_fd(self, fd: u32) -> Result<(), DeferredFdCloseError> {
63         use bindings::task_work_notify_mode_TWA_RESUME as TWA_RESUME;
64 
65         // In this method, we schedule the task work before closing the file. This is because
66         // scheduling a task work is fallible, and we need to know whether it will fail before we
67         // attempt to close the file.
68 
69         // Task works are not available on kthreads.
70         let current = kernel::current!();
71 
72         // Check if this is a kthread.
73         // SAFETY: Reading `flags` from a task is always okay.
74         if unsafe { ((*current.as_ptr()).flags & bindings::PF_KTHREAD) != 0 } {
75             return Err(DeferredFdCloseError::TaskWorkUnavailable);
76         }
77 
78         // Transfer ownership of the box's allocation to a raw pointer. This disables the
79         // destructor, so we must manually convert it back to a KBox to drop it.
80         //
81         // Until we convert it back to a `KBox`, there are no aliasing requirements on this
82         // pointer.
83         let inner = KBox::into_raw(self.inner);
84 
85         // The `callback_head` field is first in the struct, so this cast correctly gives us a
86         // pointer to the field.
87         let callback_head = inner.cast::<bindings::callback_head>();
88         // SAFETY: This pointer offset operation does not go out-of-bounds.
89         let file_field = unsafe { core::ptr::addr_of_mut!((*inner).file) };
90 
91         let current = current.as_ptr();
92 
93         // SAFETY: This function currently has exclusive access to the `DeferredFdCloserInner`, so
94         // it is okay for us to perform unsynchronized writes to its `callback_head` field.
95         unsafe { bindings::init_task_work(callback_head, Some(Self::do_close_fd)) };
96 
97         // SAFETY: This inserts the `DeferredFdCloserInner` into the task workqueue for the current
98         // task. If this operation is successful, then this transfers exclusive ownership of the
99         // `callback_head` field to the C side until it calls `do_close_fd`, and we don't touch or
100         // invalidate the field during that time.
101         //
102         // When the C side calls `do_close_fd`, the safety requirements of that method are
103         // satisfied because when a task work is executed, the callback is given ownership of the
104         // pointer.
105         //
106         // The file pointer is currently null. If it is changed to be non-null before `do_close_fd`
107         // is called, then that change happens due to the write at the end of this function, and
108         // that write has a safety comment that explains why the refcount can be dropped when
109         // `do_close_fd` runs.
110         let res = unsafe { bindings::task_work_add(current, callback_head, TWA_RESUME) };
111 
112         if res != 0 {
113             // SAFETY: Scheduling the task work failed, so we still have ownership of the box, so
114             // we may destroy it.
115             unsafe { drop(KBox::from_raw(inner)) };
116 
117             return Err(DeferredFdCloseError::TaskWorkUnavailable);
118         }
119 
120         // This removes the fd from the fd table in `current`. The file is not fully closed until
121         // `filp_close` is called. We are given ownership of one refcount to the file.
122         //
123         // SAFETY: This is safe no matter what `fd` is. If the `fd` is valid (that is, if the
124         // pointer is non-null), then we call `filp_close` on the returned pointer as required by
125         // `file_close_fd`.
126         let file = unsafe { bindings::file_close_fd(fd) };
127         if file.is_null() {
128             // We don't clean up the task work since that might be expensive if the task work queue
129             // is long. Just let it execute and let it clean up for itself.
130             return Err(DeferredFdCloseError::BadFd);
131         }
132 
133         // Acquire a second refcount to the file.
134         //
135         // SAFETY: The `file` pointer points at a file with a non-zero refcount.
136         unsafe { bindings::get_file(file) };
137 
138         // This method closes the fd, consuming one of our two refcounts. There could be active
139         // light refcounts created from that fd, so we must ensure that the file has a positive
140         // refcount for the duration of those active light refcounts. We do that by holding on to
141         // the second refcount until the current task returns to userspace.
142         //
143         // SAFETY: The `file` pointer is valid. Passing `current->files` as the file table to close
144         // it in is correct, since we just got the `fd` from `file_close_fd` which also uses
145         // `current->files`.
146         //
147         // Note: fl_owner_t is currently a void pointer.
148         unsafe { bindings::filp_close(file, (*current).files as bindings::fl_owner_t) };
149 
150         // We update the file pointer that the task work is supposed to fput. This transfers
151         // ownership of our last refcount.
152         //
153         // INVARIANT: This changes the `file` field of a `DeferredFdCloserInner` from null to
154         // non-null. This doesn't break the type invariant for `DeferredFdCloserInner` because we
155         // still own a refcount to the file, so we can pass ownership of that refcount to the
156         // `DeferredFdCloserInner`.
157         //
158         // When `do_close_fd` runs, it must be safe for it to `fput` the refcount. However, this is
159         // the case because all light refcounts that are associated with the fd we closed
160         // previously must be dropped when `do_close_fd`, since light refcounts must be dropped
161         // before returning to userspace.
162         //
163         // SAFETY: Task works are executed on the current thread right before we return to
164         // userspace, so this write is guaranteed to happen before `do_close_fd` is called, which
165         // means that a race is not possible here.
166         unsafe { *file_field = file };
167 
168         Ok(())
169     }
170 
171     /// # Safety
172     ///
173     /// The provided pointer must point at the `twork` field of a `DeferredFdCloserInner` stored in
174     /// a `KBox`, and the caller must pass exclusive ownership of that `KBox`. Furthermore, if the
175     /// file pointer is non-null, then it must be okay to release the refcount by calling `fput`.
176     unsafe extern "C" fn do_close_fd(inner: *mut bindings::callback_head) {
177         // SAFETY: The caller just passed us ownership of this box.
178         let inner = unsafe { KBox::from_raw(inner.cast::<DeferredFdCloserInner>()) };
179         if !inner.file.is_null() {
180             // SAFETY: By the type invariants, we own a refcount to this file, and the caller
181             // guarantees that dropping the refcount now is okay.
182             unsafe { bindings::fput(inner.file) };
183         }
184         // The allocation is freed when `inner` goes out of scope.
185     }
186 }
187 
188 /// Represents a failure to close an fd in a deferred manner.
189 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
190 pub(crate) enum DeferredFdCloseError {
191     /// Closing the fd failed because we were unable to schedule a task work.
192     TaskWorkUnavailable,
193     /// Closing the fd failed because the fd does not exist.
194     BadFd,
195 }
196 
197 impl From<DeferredFdCloseError> for Error {
198     fn from(err: DeferredFdCloseError) -> Error {
199         match err {
200             DeferredFdCloseError::TaskWorkUnavailable => ESRCH,
201             DeferredFdCloseError::BadFd => EBADF,
202         }
203     }
204 }
205