xref: /linux/rust/kernel/block/mq/operations.rs (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! This module provides an interface for blk-mq drivers to implement.
4 //!
5 //! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
6 
7 use crate::{
8     bindings,
9     block::mq::request::RequestDataWrapper,
10     block::mq::Request,
11     error::{from_result, Result},
12     types::ARef,
13 };
14 use core::{marker::PhantomData, sync::atomic::AtomicU64, sync::atomic::Ordering};
15 
16 /// Implement this trait to interface blk-mq as block devices.
17 ///
18 /// To implement a block device driver, implement this trait as described in the
19 /// [module level documentation]. The kernel will use the implementation of the
20 /// functions defined in this trait to interface a block device driver. Note:
21 /// There is no need for an exit_request() implementation, because the `drop`
22 /// implementation of the [`Request`] type will be invoked by automatically by
23 /// the C/Rust glue logic.
24 ///
25 /// [module level documentation]: kernel::block::mq
26 #[macros::vtable]
27 pub trait Operations: Sized {
28     /// Called by the kernel to queue a request with the driver. If `is_last` is
29     /// `false`, the driver is allowed to defer committing the request.
queue_rq(rq: ARef<Request<Self>>, is_last: bool) -> Result30     fn queue_rq(rq: ARef<Request<Self>>, is_last: bool) -> Result;
31 
32     /// Called by the kernel to indicate that queued requests should be submitted.
commit_rqs()33     fn commit_rqs();
34 
35     /// Called by the kernel to poll the device for completed requests. Only
36     /// used for poll queues.
poll() -> bool37     fn poll() -> bool {
38         crate::build_error(crate::error::VTABLE_DEFAULT_ERROR)
39     }
40 }
41 
42 /// A vtable for blk-mq to interact with a block device driver.
43 ///
44 /// A `bindings::blk_mq_ops` vtable is constructed from pointers to the `extern
45 /// "C"` functions of this struct, exposed through the `OperationsVTable::VTABLE`.
46 ///
47 /// For general documentation of these methods, see the kernel source
48 /// documentation related to `struct blk_mq_operations` in
49 /// [`include/linux/blk-mq.h`].
50 ///
51 /// [`include/linux/blk-mq.h`]: srctree/include/linux/blk-mq.h
52 pub(crate) struct OperationsVTable<T: Operations>(PhantomData<T>);
53 
54 impl<T: Operations> OperationsVTable<T> {
55     /// This function is called by the C kernel. A pointer to this function is
56     /// installed in the `blk_mq_ops` vtable for the driver.
57     ///
58     /// # Safety
59     ///
60     /// - The caller of this function must ensure that the pointee of `bd` is
61     ///   valid for reads for the duration of this function.
62     /// - This function must be called for an initialized and live `hctx`. That
63     ///   is, `Self::init_hctx_callback` was called and
64     ///   `Self::exit_hctx_callback()` was not yet called.
65     /// - `(*bd).rq` must point to an initialized and live `bindings:request`.
66     ///   That is, `Self::init_request_callback` was called but
67     ///   `Self::exit_request_callback` was not yet called for the request.
68     /// - `(*bd).rq` must be owned by the driver. That is, the block layer must
69     ///   promise to not access the request until the driver calls
70     ///   `bindings::blk_mq_end_request` for the request.
queue_rq_callback( _hctx: *mut bindings::blk_mq_hw_ctx, bd: *const bindings::blk_mq_queue_data, ) -> bindings::blk_status_t71     unsafe extern "C" fn queue_rq_callback(
72         _hctx: *mut bindings::blk_mq_hw_ctx,
73         bd: *const bindings::blk_mq_queue_data,
74     ) -> bindings::blk_status_t {
75         // SAFETY: `bd.rq` is valid as required by the safety requirement for
76         // this function.
77         let request = unsafe { &*(*bd).rq.cast::<Request<T>>() };
78 
79         // One refcount for the ARef, one for being in flight
80         request.wrapper_ref().refcount().store(2, Ordering::Relaxed);
81 
82         // SAFETY:
83         //  - We own a refcount that we took above. We pass that to `ARef`.
84         //  - By the safety requirements of this function, `request` is a valid
85         //    `struct request` and the private data is properly initialized.
86         //  - `rq` will be alive until `blk_mq_end_request` is called and is
87         //    reference counted by `ARef` until then.
88         let rq = unsafe { Request::aref_from_raw((*bd).rq) };
89 
90         // SAFETY: We have exclusive access and we just set the refcount above.
91         unsafe { Request::start_unchecked(&rq) };
92 
93         let ret = T::queue_rq(
94             rq,
95             // SAFETY: `bd` is valid as required by the safety requirement for
96             // this function.
97             unsafe { (*bd).last },
98         );
99 
100         if let Err(e) = ret {
101             e.to_blk_status()
102         } else {
103             bindings::BLK_STS_OK as _
104         }
105     }
106 
107     /// This function is called by the C kernel. A pointer to this function is
108     /// installed in the `blk_mq_ops` vtable for the driver.
109     ///
110     /// # Safety
111     ///
112     /// This function may only be called by blk-mq C infrastructure.
commit_rqs_callback(_hctx: *mut bindings::blk_mq_hw_ctx)113     unsafe extern "C" fn commit_rqs_callback(_hctx: *mut bindings::blk_mq_hw_ctx) {
114         T::commit_rqs()
115     }
116 
117     /// This function is called by the C kernel. It is not currently
118     /// implemented, and there is no way to exercise this code path.
119     ///
120     /// # Safety
121     ///
122     /// This function may only be called by blk-mq C infrastructure.
complete_callback(_rq: *mut bindings::request)123     unsafe extern "C" fn complete_callback(_rq: *mut bindings::request) {}
124 
125     /// This function is called by the C kernel. A pointer to this function is
126     /// installed in the `blk_mq_ops` vtable for the driver.
127     ///
128     /// # Safety
129     ///
130     /// This function may only be called by blk-mq C infrastructure.
poll_callback( _hctx: *mut bindings::blk_mq_hw_ctx, _iob: *mut bindings::io_comp_batch, ) -> core::ffi::c_int131     unsafe extern "C" fn poll_callback(
132         _hctx: *mut bindings::blk_mq_hw_ctx,
133         _iob: *mut bindings::io_comp_batch,
134     ) -> core::ffi::c_int {
135         T::poll().into()
136     }
137 
138     /// This function is called by the C kernel. A pointer to this function is
139     /// installed in the `blk_mq_ops` vtable for the driver.
140     ///
141     /// # Safety
142     ///
143     /// This function may only be called by blk-mq C infrastructure. This
144     /// function may only be called once before `exit_hctx_callback` is called
145     /// for the same context.
init_hctx_callback( _hctx: *mut bindings::blk_mq_hw_ctx, _tagset_data: *mut core::ffi::c_void, _hctx_idx: core::ffi::c_uint, ) -> core::ffi::c_int146     unsafe extern "C" fn init_hctx_callback(
147         _hctx: *mut bindings::blk_mq_hw_ctx,
148         _tagset_data: *mut core::ffi::c_void,
149         _hctx_idx: core::ffi::c_uint,
150     ) -> core::ffi::c_int {
151         from_result(|| Ok(0))
152     }
153 
154     /// This function is called by the C kernel. A pointer to this function is
155     /// installed in the `blk_mq_ops` vtable for the driver.
156     ///
157     /// # Safety
158     ///
159     /// This function may only be called by blk-mq C infrastructure.
exit_hctx_callback( _hctx: *mut bindings::blk_mq_hw_ctx, _hctx_idx: core::ffi::c_uint, )160     unsafe extern "C" fn exit_hctx_callback(
161         _hctx: *mut bindings::blk_mq_hw_ctx,
162         _hctx_idx: core::ffi::c_uint,
163     ) {
164     }
165 
166     /// This function is called by the C kernel. A pointer to this function is
167     /// installed in the `blk_mq_ops` vtable for the driver.
168     ///
169     /// # Safety
170     ///
171     /// - This function may only be called by blk-mq C infrastructure.
172     /// - `_set` must point to an initialized `TagSet<T>`.
173     /// - `rq` must point to an initialized `bindings::request`.
174     /// - The allocation pointed to by `rq` must be at the size of `Request`
175     ///   plus the size of `RequestDataWrapper`.
init_request_callback( _set: *mut bindings::blk_mq_tag_set, rq: *mut bindings::request, _hctx_idx: core::ffi::c_uint, _numa_node: core::ffi::c_uint, ) -> core::ffi::c_int176     unsafe extern "C" fn init_request_callback(
177         _set: *mut bindings::blk_mq_tag_set,
178         rq: *mut bindings::request,
179         _hctx_idx: core::ffi::c_uint,
180         _numa_node: core::ffi::c_uint,
181     ) -> core::ffi::c_int {
182         from_result(|| {
183             // SAFETY: By the safety requirements of this function, `rq` points
184             // to a valid allocation.
185             let pdu = unsafe { Request::wrapper_ptr(rq.cast::<Request<T>>()) };
186 
187             // SAFETY: The refcount field is allocated but not initialized, so
188             // it is valid for writes.
189             unsafe { RequestDataWrapper::refcount_ptr(pdu.as_ptr()).write(AtomicU64::new(0)) };
190 
191             Ok(0)
192         })
193     }
194 
195     /// This function is called by the C kernel. A pointer to this function is
196     /// installed in the `blk_mq_ops` vtable for the driver.
197     ///
198     /// # Safety
199     ///
200     /// - This function may only be called by blk-mq C infrastructure.
201     /// - `_set` must point to an initialized `TagSet<T>`.
202     /// - `rq` must point to an initialized and valid `Request`.
exit_request_callback( _set: *mut bindings::blk_mq_tag_set, rq: *mut bindings::request, _hctx_idx: core::ffi::c_uint, )203     unsafe extern "C" fn exit_request_callback(
204         _set: *mut bindings::blk_mq_tag_set,
205         rq: *mut bindings::request,
206         _hctx_idx: core::ffi::c_uint,
207     ) {
208         // SAFETY: The tagset invariants guarantee that all requests are allocated with extra memory
209         // for the request data.
210         let pdu = unsafe { bindings::blk_mq_rq_to_pdu(rq) }.cast::<RequestDataWrapper>();
211 
212         // SAFETY: `pdu` is valid for read and write and is properly initialised.
213         unsafe { core::ptr::drop_in_place(pdu) };
214     }
215 
216     const VTABLE: bindings::blk_mq_ops = bindings::blk_mq_ops {
217         queue_rq: Some(Self::queue_rq_callback),
218         queue_rqs: None,
219         commit_rqs: Some(Self::commit_rqs_callback),
220         get_budget: None,
221         put_budget: None,
222         set_rq_budget_token: None,
223         get_rq_budget_token: None,
224         timeout: None,
225         poll: if T::HAS_POLL {
226             Some(Self::poll_callback)
227         } else {
228             None
229         },
230         complete: Some(Self::complete_callback),
231         init_hctx: Some(Self::init_hctx_callback),
232         exit_hctx: Some(Self::exit_hctx_callback),
233         init_request: Some(Self::init_request_callback),
234         exit_request: Some(Self::exit_request_callback),
235         cleanup_rq: None,
236         busy: None,
237         map_queues: None,
238         #[cfg(CONFIG_BLK_DEBUG_FS)]
239         show_rq: None,
240     };
241 
build() -> &'static bindings::blk_mq_ops242     pub(crate) const fn build() -> &'static bindings::blk_mq_ops {
243         &Self::VTABLE
244     }
245 }
246