xref: /linux/drivers/gpu/nova-core/gsp/cmdq.rs (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 mod continuation;
4 
5 use core::mem;
6 
7 use kernel::{
8     device,
9     dma::{
10         Coherent,
11         DmaAddress, //
12     },
13     dma_write,
14     io::{
15         poll::read_poll_timeout,
16         Io, //
17     },
18     new_mutex,
19     prelude::*,
20     ptr,
21     sync::{
22         aref::ARef,
23         Mutex, //
24     },
25     time::Delta,
26     transmute::{
27         AsBytes,
28         FromBytes, //
29     },
30 };
31 
32 use continuation::{
33     ContinuationRecord,
34     SplitState, //
35 };
36 
37 use pin_init::pin_init_scope;
38 
39 use crate::{
40     driver::Bar0,
41     gsp::{
42         fw::{
43             GspMsgElement,
44             MsgFunction,
45             MsgqRxHeader,
46             MsgqTxHeader,
47             GSP_MSG_QUEUE_ELEMENT_SIZE_MAX, //
48         },
49         PteArray,
50         GSP_PAGE_SHIFT,
51         GSP_PAGE_SIZE, //
52     },
53     num,
54     regs,
55     sbuffer::SBufferIter, //
56 };
57 
58 /// Marker type representing the absence of a reply for a command. Commands using this as their
59 /// reply type are sent using [`Cmdq::send_command_no_wait`].
60 pub(crate) struct NoReply;
61 
62 /// Trait implemented by types representing a command to send to the GSP.
63 ///
64 /// The main purpose of this trait is to provide [`Cmdq`] with the information it needs to send
65 /// a given command.
66 ///
67 /// [`CommandToGsp::init`] in particular is responsible for initializing the command directly
68 /// into the space reserved for it in the command queue buffer.
69 ///
70 /// Some commands may be followed by a variable-length payload. For these, the
71 /// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be
72 /// defined as well.
73 pub(crate) trait CommandToGsp {
74     /// Function identifying this command to the GSP.
75     const FUNCTION: MsgFunction;
76 
77     /// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer.
78     type Command: FromBytes + AsBytes;
79 
80     /// Type of the reply expected from the GSP, or [`NoReply`] for commands that don't
81     /// have a reply.
82     type Reply;
83 
84     /// Error type returned by [`CommandToGsp::init`].
85     type InitError;
86 
87     /// In-place command initializer responsible for filling the command in the command queue
88     /// buffer.
89     fn init(&self) -> impl Init<Self::Command, Self::InitError>;
90 
91     /// Size of the variable-length payload following the command structure generated by
92     /// [`CommandToGsp::init`].
93     ///
94     /// Most commands don't have a variable-length payload, so this is zero by default.
95     fn variable_payload_len(&self) -> usize {
96         0
97     }
98 
99     /// Method initializing the variable-length payload.
100     ///
101     /// The command buffer is circular, which means that we may need to jump back to its beginning
102     /// while in the middle of a command. For this reason, the variable-length payload is
103     /// initialized using a [`SBufferIter`].
104     ///
105     /// This method will receive a buffer of the length returned by
106     /// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving
107     /// unwritten space will lead to an error.
108     ///
109     /// Most commands don't have a variable-length payload, so this does nothing by default.
110     fn init_variable_payload(
111         &self,
112         _dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>,
113     ) -> Result {
114         Ok(())
115     }
116 
117     /// Total size of the command (including its variable-length payload) without the
118     /// [`GspMsgElement`] header.
119     fn size(&self) -> usize {
120         size_of::<Self::Command>() + self.variable_payload_len()
121     }
122 }
123 
124 /// Trait representing messages received from the GSP.
125 ///
126 /// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message.
127 pub(crate) trait MessageFromGsp: Sized {
128     /// Function identifying this message from the GSP.
129     const FUNCTION: MsgFunction;
130 
131     /// Error type returned by [`MessageFromGsp::read`].
132     type InitError;
133 
134     /// Type containing the raw message to be read from the message queue.
135     type Message: FromBytes;
136 
137     /// Method reading the message from the message queue and returning it.
138     ///
139     /// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns
140     /// it.
141     fn read(
142         msg: &Self::Message,
143         sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>,
144     ) -> Result<Self, Self::InitError>;
145 }
146 
147 /// Number of GSP pages making the [`Msgq`].
148 pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f;
149 
150 /// Circular buffer of a [`Msgq`].
151 ///
152 /// This area of memory is to be shared between the driver and the GSP to exchange commands or
153 /// messages.
154 #[repr(C, align(0x1000))]
155 #[derive(Debug)]
156 struct MsgqData {
157     data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)],
158 }
159 
160 // Annoyingly we are forced to use a literal to specify the alignment of
161 // `MsgqData`, so check that it corresponds to the actual GSP page size here.
162 static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE);
163 
164 /// Unidirectional message queue.
165 ///
166 /// Contains the data for a message queue, that either the driver or GSP writes to.
167 ///
168 /// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the
169 /// read pointer of `rx` actually refers to the `Msgq` owned by the other side.
170 /// This design ensures that only the driver or GSP ever writes to a given instance of this struct.
171 #[repr(C)]
172 // There is no struct defined for this in the open-gpu-kernel-source headers.
173 // Instead it is defined by code in `GspMsgQueuesInit()`.
174 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
175 pub(super) struct Msgq {
176     /// Header for sending messages, including the write pointer.
177     pub(super) tx: MsgqTxHeader,
178     /// Header for receiving messages, including the read pointer.
179     pub(super) rx: MsgqRxHeader,
180     /// The message queue proper.
181     msgq: MsgqData,
182 }
183 
184 /// Structure shared between the driver and the GSP and containing the command and message queues.
185 #[repr(C)]
186 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
187 pub(super) struct GspMem {
188     /// Self-mapping page table entries.
189     ptes: PteArray<{ Self::PTE_ARRAY_SIZE }>,
190     /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the
191     /// write and read pointers that the CPU updates. This means that the read pointer here is an
192     /// index into the GSP queue.
193     ///
194     /// This member is read-only for the GSP.
195     pub(super) cpuq: Msgq,
196     /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the
197     /// write and read pointers that the GSP updates. This means that the read pointer here is an
198     /// index into the CPU queue.
199     ///
200     /// This member is read-only for the driver.
201     pub(super) gspq: Msgq,
202 }
203 
204 impl GspMem {
205     const PTE_ARRAY_SIZE: usize = GSP_PAGE_SIZE / size_of::<u64>();
206 }
207 
208 // SAFETY: These structs don't meet the no-padding requirements of AsBytes but
209 // that is not a problem because they are not used outside the kernel.
210 unsafe impl AsBytes for GspMem {}
211 
212 // SAFETY: These structs don't meet the no-padding requirements of FromBytes but
213 // that is not a problem because they are not used outside the kernel.
214 unsafe impl FromBytes for GspMem {}
215 
216 /// Wrapper around [`GspMem`] to share it with the GPU using a [`Coherent`].
217 ///
218 /// This provides the low-level functionality to communicate with the GSP, including allocation of
219 /// queue space to write messages to and management of read/write pointers.
220 ///
221 /// This is shared with the GSP, with clear ownership rules regarding the command queues:
222 ///
223 /// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write
224 ///   pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`].
225 /// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read
226 ///   pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`].
227 struct DmaGspMem(Coherent<GspMem>);
228 
229 impl DmaGspMem {
230     /// Allocate a new instance and map it for `dev`.
231     fn new(dev: &device::Device<device::Bound>) -> Result<Self> {
232         const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>();
233         const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>();
234 
235         let gsp_mem = Coherent::<GspMem>::zeroed(dev, GFP_KERNEL)?;
236 
237         let start = gsp_mem.dma_handle();
238         // Write values one by one to avoid an on-stack instance of `PteArray`.
239         for i in 0..GspMem::PTE_ARRAY_SIZE {
240             dma_write!(gsp_mem, .ptes.0[i], PteArray::<0>::entry(start, i)?);
241         }
242 
243         dma_write!(
244             gsp_mem,
245             .cpuq.tx,
246             MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES)
247         );
248         dma_write!(gsp_mem, .cpuq.rx, MsgqRxHeader::new());
249 
250         Ok(Self(gsp_mem))
251     }
252 
253     /// Returns the region of the CPU message queue that the driver is currently allowed to write
254     /// to.
255     ///
256     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
257     /// that case the second slice will have a non-zero length.
258     fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) {
259         let tx = self.cpu_write_ptr();
260         let rx = self.gsp_read_ptr();
261 
262         // Pointer to the first entry of the CPU message queue.
263         let data = ptr::project!(mut self.0.as_mut_ptr(), .cpuq.msgq.data[0]);
264 
265         let (tail_end, wrap_end) = if rx == 0 {
266             // The write area is non-wrapping, and stops at the second-to-last entry of the command
267             // queue (to leave the last one empty).
268             (MSGQ_NUM_PAGES - 1, 0)
269         } else if rx <= tx {
270             // The write area wraps and continues until `rx - 1`.
271             (MSGQ_NUM_PAGES, rx - 1)
272         } else {
273             // The write area doesn't wrap and stops at `rx - 1`.
274             (rx - 1, 0)
275         };
276 
277         // SAFETY:
278         // - `data` was created from a valid pointer, and `rx` and `tx` are in the
279         //   `0..MSGQ_NUM_PAGES` range per the invariants of `cpu_write_ptr` and `gsp_read_ptr`,
280         //   thus the created slices are valid.
281         // - The area starting at `tx` and ending at `rx - 2` modulo `MSGQ_NUM_PAGES`,
282         //   inclusive, belongs to the driver for writing and is not accessed concurrently by
283         //   the GSP.
284         // - The caller holds a reference to `self` for as long as the returned slices are live,
285         //   meaning the CPU write pointer cannot be advanced and thus that the returned area
286         //   remains exclusive to the CPU for the duration of the slices.
287         // - The created slices point to non-overlapping sub-ranges of `data` in all
288         //   branches (in the `rx <= tx` case, the second slice ends at `rx - 1` which is strictly
289         //   less than `tx` where the first slice starts; in the other cases the second slice is
290         //   empty), so creating two `&mut` references from them does not violate aliasing rules.
291         unsafe {
292             (
293                 core::slice::from_raw_parts_mut(
294                     data.add(num::u32_as_usize(tx)),
295                     num::u32_as_usize(tail_end - tx),
296                 ),
297                 core::slice::from_raw_parts_mut(data, num::u32_as_usize(wrap_end)),
298             )
299         }
300     }
301 
302     /// Returns the size of the region of the CPU message queue that the driver is currently allowed
303     /// to write to, in bytes.
304     fn driver_write_area_size(&self) -> usize {
305         let tx = self.cpu_write_ptr();
306         let rx = self.gsp_read_ptr();
307 
308         // `rx` and `tx` are both in `0..MSGQ_NUM_PAGES` per the invariants of `gsp_read_ptr` and
309         // `cpu_write_ptr`. The minimum value case is where `rx == 0` and `tx == MSGQ_NUM_PAGES -
310         // 1`, which gives `0 + MSGQ_NUM_PAGES - (MSGQ_NUM_PAGES - 1) - 1 == 0`.
311         let slots = (rx + MSGQ_NUM_PAGES - tx - 1) % MSGQ_NUM_PAGES;
312         num::u32_as_usize(slots) * GSP_PAGE_SIZE
313     }
314 
315     /// Returns the region of the GSP message queue that the driver is currently allowed to read
316     /// from.
317     ///
318     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
319     /// that case the second slice will have a non-zero length.
320     fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) {
321         let tx = self.gsp_write_ptr();
322         let rx = self.cpu_read_ptr();
323 
324         // Pointer to the first entry of the GSP message queue.
325         let data = ptr::project!(self.0.as_ptr(), .gspq.msgq.data[0]);
326 
327         let (tail_end, wrap_end) = if rx <= tx {
328             // Read area is non-wrapping and stops right before `tx`.
329             (tx, 0)
330         } else {
331             // Read area is wrapping and stops right before `tx`.
332             (MSGQ_NUM_PAGES, tx)
333         };
334 
335         // SAFETY:
336         // - `data` was created from a valid pointer, and `rx` and `tx` are in the
337         //   `0..MSGQ_NUM_PAGES` range per the invariants of `gsp_write_ptr` and `cpu_read_ptr`,
338         //   thus the created slices are valid.
339         // - The area starting at `rx` and ending at `tx - 1` modulo `MSGQ_NUM_PAGES`,
340         //   inclusive, belongs to the driver for reading and is not accessed concurrently by
341         //   the GSP.
342         // - The caller holds a reference to `self` for as long as the returned slices are live,
343         //   meaning the CPU read pointer cannot be advanced and thus that the returned area
344         //   remains exclusive to the CPU for the duration of the slices.
345         unsafe {
346             (
347                 core::slice::from_raw_parts(
348                     data.add(num::u32_as_usize(rx)),
349                     num::u32_as_usize(tail_end - rx),
350                 ),
351                 core::slice::from_raw_parts(data, num::u32_as_usize(wrap_end)),
352             )
353         }
354     }
355 
356     /// Allocates a region on the command queue that is large enough to send a command of `size`
357     /// bytes, waiting for space to become available based on the provided timeout.
358     ///
359     /// This returns a [`GspCommand`] ready to be written to by the caller.
360     ///
361     /// # Errors
362     ///
363     /// - `EMSGSIZE` if the command is larger than [`GSP_MSG_QUEUE_ELEMENT_SIZE_MAX`].
364     /// - `ETIMEDOUT` if space does not become available within the timeout.
365     /// - `EIO` if the command header is not properly aligned.
366     fn allocate_command(&mut self, size: usize, timeout: Delta) -> Result<GspCommand<'_>> {
367         if size_of::<GspMsgElement>() + size > GSP_MSG_QUEUE_ELEMENT_SIZE_MAX {
368             return Err(EMSGSIZE);
369         }
370         read_poll_timeout(
371             || Ok(self.driver_write_area_size()),
372             |available_bytes| *available_bytes >= size_of::<GspMsgElement>() + size,
373             Delta::from_micros(1),
374             timeout,
375         )?;
376 
377         // Get the current writable area as an array of bytes.
378         let (slice_1, slice_2) = {
379             let (slice_1, slice_2) = self.driver_write_area();
380 
381             (slice_1.as_flattened_mut(), slice_2.as_flattened_mut())
382         };
383 
384         // Extract area for the `GspMsgElement`.
385         let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?;
386 
387         // Create the contents area.
388         let (slice_1, slice_2) = if slice_1.len() > size {
389             // Contents fits entirely in `slice_1`.
390             (&mut slice_1[..size], &mut slice_2[0..0])
391         } else {
392             // Need all of `slice_1` and some of `slice_2`.
393             let slice_2_len = size - slice_1.len();
394             (slice_1, &mut slice_2[..slice_2_len])
395         };
396 
397         Ok(GspCommand {
398             header,
399             contents: (slice_1, slice_2),
400         })
401     }
402 
403     // Returns the index of the memory page the GSP will write the next message to.
404     //
405     // # Invariants
406     //
407     // - The returned value is within `0..MSGQ_NUM_PAGES`.
408     fn gsp_write_ptr(&self) -> u32 {
409         super::fw::gsp_mem::gsp_write_ptr(&self.0)
410     }
411 
412     // Returns the index of the memory page the GSP will read the next command from.
413     //
414     // # Invariants
415     //
416     // - The returned value is within `0..MSGQ_NUM_PAGES`.
417     fn gsp_read_ptr(&self) -> u32 {
418         super::fw::gsp_mem::gsp_read_ptr(&self.0)
419     }
420 
421     // Returns the index of the memory page the CPU can read the next message from.
422     //
423     // # Invariants
424     //
425     // - The returned value is within `0..MSGQ_NUM_PAGES`.
426     fn cpu_read_ptr(&self) -> u32 {
427         super::fw::gsp_mem::cpu_read_ptr(&self.0)
428     }
429 
430     // Informs the GSP that it can send `elem_count` new pages into the message queue.
431     fn advance_cpu_read_ptr(&mut self, elem_count: u32) {
432         super::fw::gsp_mem::advance_cpu_read_ptr(&self.0, elem_count)
433     }
434 
435     // Returns the index of the memory page the CPU can write the next command to.
436     //
437     // # Invariants
438     //
439     // - The returned value is within `0..MSGQ_NUM_PAGES`.
440     fn cpu_write_ptr(&self) -> u32 {
441         super::fw::gsp_mem::cpu_write_ptr(&self.0)
442     }
443 
444     // Informs the GSP that it can process `elem_count` new pages from the command queue.
445     fn advance_cpu_write_ptr(&mut self, elem_count: u32) {
446         super::fw::gsp_mem::advance_cpu_write_ptr(&self.0, elem_count)
447     }
448 }
449 
450 /// A command ready to be sent on the command queue.
451 ///
452 /// This is the type returned by [`DmaGspMem::allocate_command`].
453 struct GspCommand<'a> {
454     // Writable reference to the header of the command.
455     header: &'a mut GspMsgElement,
456     // Writable slices to the contents of the command. The second slice is zero unless the command
457     // loops over the command queue.
458     contents: (&'a mut [u8], &'a mut [u8]),
459 }
460 
461 /// A message ready to be processed from the message queue.
462 ///
463 /// This is the type returned by [`Cmdq::wait_for_msg`].
464 struct GspMessage<'a> {
465     // Reference to the header of the message.
466     header: &'a GspMsgElement,
467     // Slices to the contents of the message. The second slice is zero unless the message loops
468     // over the message queue.
469     contents: (&'a [u8], &'a [u8]),
470 }
471 
472 /// GSP command queue.
473 ///
474 /// Provides the ability to send commands and receive messages from the GSP using a shared memory
475 /// area.
476 #[pin_data]
477 pub(crate) struct Cmdq {
478     /// Inner mutex-protected state.
479     #[pin]
480     inner: Mutex<CmdqInner>,
481     /// DMA handle of the command queue's shared memory region.
482     pub(super) dma_handle: DmaAddress,
483 }
484 
485 impl Cmdq {
486     /// Offset of the data after the PTEs.
487     const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq);
488 
489     /// Offset of command queue ring buffer.
490     pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq)
491         + core::mem::offset_of!(Msgq, msgq)
492         - Self::POST_PTE_OFFSET;
493 
494     /// Offset of message queue ring buffer.
495     pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq)
496         + core::mem::offset_of!(Msgq, msgq)
497         - Self::POST_PTE_OFFSET;
498 
499     /// Number of page table entries for the GSP shared region.
500     pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT;
501 
502     /// Default timeout for receiving a message from the GSP.
503     pub(super) const RECEIVE_TIMEOUT: Delta = Delta::from_secs(5);
504 
505     /// Creates a new command queue for `dev`.
506     pub(crate) fn new(dev: &device::Device<device::Bound>) -> impl PinInit<Self, Error> + '_ {
507         pin_init_scope(move || {
508             let gsp_mem = DmaGspMem::new(dev)?;
509 
510             Ok(try_pin_init!(Self {
511                 dma_handle: gsp_mem.0.dma_handle(),
512                 inner <- new_mutex!(CmdqInner {
513                     dev: dev.into(),
514                     gsp_mem,
515                     seq: 0,
516                 }),
517             }))
518         })
519     }
520 
521     /// Computes the checksum for the message pointed to by `it`.
522     ///
523     /// A message is made of several parts, so `it` is an iterator over byte slices representing
524     /// these parts.
525     fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 {
526         let sum64 = it
527             .enumerate()
528             .map(|(idx, byte)| (((idx % 8) * 8) as u32, byte))
529             .fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol));
530 
531         ((sum64 >> 32) as u32) ^ (sum64 as u32)
532     }
533 
534     /// Notifies the GSP that we have updated the command queue pointers.
535     fn notify_gsp(bar: &Bar0) {
536         bar.write_reg(regs::NV_PGSP_QUEUE_HEAD::zeroed().with_address(0u32));
537     }
538 
539     /// Sends `command` to the GSP and waits for the reply.
540     ///
541     /// Messages with non-matching function codes are silently consumed until the expected reply
542     /// arrives.
543     ///
544     /// The queue is locked for the entire send+receive cycle to ensure that no other command can
545     /// be interleaved.
546     ///
547     /// # Errors
548     ///
549     /// - `ETIMEDOUT` if space does not become available to send the command, or if the reply is
550     ///   not received within the timeout.
551     /// - `EIO` if the variable payload requested by the command has not been entirely
552     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
553     ///
554     /// Error codes returned by the command and reply initializers are propagated as-is.
555     pub(crate) fn send_command<M>(&self, bar: &Bar0, command: M) -> Result<M::Reply>
556     where
557         M: CommandToGsp,
558         M::Reply: MessageFromGsp,
559         Error: From<M::InitError>,
560         Error: From<<M::Reply as MessageFromGsp>::InitError>,
561     {
562         let mut inner = self.inner.lock();
563         inner.send_command(bar, command)?;
564 
565         loop {
566             match inner.receive_msg::<M::Reply>(Self::RECEIVE_TIMEOUT) {
567                 Ok(reply) => break Ok(reply),
568                 Err(ERANGE) => continue,
569                 Err(e) => break Err(e),
570             }
571         }
572     }
573 
574     /// Sends `command` to the GSP without waiting for a reply.
575     ///
576     /// # Errors
577     ///
578     /// - `ETIMEDOUT` if space does not become available within the timeout.
579     /// - `EIO` if the variable payload requested by the command has not been entirely
580     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
581     ///
582     /// Error codes returned by the command initializers are propagated as-is.
583     pub(crate) fn send_command_no_wait<M>(&self, bar: &Bar0, command: M) -> Result
584     where
585         M: CommandToGsp<Reply = NoReply>,
586         Error: From<M::InitError>,
587     {
588         self.inner.lock().send_command(bar, command)
589     }
590 
591     /// Receive a message from the GSP.
592     ///
593     /// See [`CmdqInner::receive_msg`] for details.
594     pub(crate) fn receive_msg<M: MessageFromGsp>(&self, timeout: Delta) -> Result<M>
595     where
596         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
597         Error: From<M::InitError>,
598     {
599         self.inner.lock().receive_msg(timeout)
600     }
601 }
602 
603 /// Inner mutex protected state of [`Cmdq`].
604 struct CmdqInner {
605     /// Device this command queue belongs to.
606     dev: ARef<device::Device>,
607     /// Current command sequence number.
608     seq: u32,
609     /// Memory area shared with the GSP for communicating commands and messages.
610     gsp_mem: DmaGspMem,
611 }
612 
613 impl CmdqInner {
614     /// Timeout for waiting for space on the command queue.
615     const ALLOCATE_TIMEOUT: Delta = Delta::from_secs(1);
616 
617     /// Sends `command` to the GSP, without splitting it.
618     ///
619     /// # Errors
620     ///
621     /// - `EMSGSIZE` if the command exceeds the maximum queue element size.
622     /// - `ETIMEDOUT` if space does not become available within the timeout.
623     /// - `EIO` if the variable payload requested by the command has not been entirely
624     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
625     ///
626     /// Error codes returned by the command initializers are propagated as-is.
627     fn send_single_command<M>(&mut self, bar: &Bar0, command: M) -> Result
628     where
629         M: CommandToGsp,
630         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
631         Error: From<M::InitError>,
632     {
633         let size_in_bytes = command.size();
634         let dst = self
635             .gsp_mem
636             .allocate_command(size_in_bytes, Self::ALLOCATE_TIMEOUT)?;
637 
638         // Extract area for the command itself. The GSP message header and the command header
639         // together are guaranteed to fit entirely into a single page, so it's ok to only look
640         // at `dst.contents.0` here.
641         let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?;
642 
643         // Fill the header and command in-place.
644         let msg_element = GspMsgElement::init(self.seq, size_in_bytes, M::FUNCTION);
645         // SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer
646         // fails.
647         unsafe {
648             msg_element.__init(core::ptr::from_mut(dst.header))?;
649             command.init().__init(core::ptr::from_mut(cmd))?;
650         }
651 
652         // Fill the variable-length payload, which may be empty.
653         let mut sbuffer = SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]);
654         command.init_variable_payload(&mut sbuffer)?;
655 
656         if !sbuffer.is_empty() {
657             return Err(EIO);
658         }
659         drop(sbuffer);
660 
661         // Compute checksum now that the whole message is ready.
662         dst.header
663             .set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([
664                 dst.header.as_bytes(),
665                 dst.contents.0,
666                 dst.contents.1,
667             ])));
668 
669         dev_dbg!(
670             &self.dev,
671             "GSP RPC: send: seq# {}, function={:?}, length=0x{:x}\n",
672             self.seq,
673             M::FUNCTION,
674             dst.header.length(),
675         );
676 
677         // All set - update the write pointer and inform the GSP of the new command.
678         let elem_count = dst.header.element_count();
679         self.seq += 1;
680         self.gsp_mem.advance_cpu_write_ptr(elem_count);
681         Cmdq::notify_gsp(bar);
682 
683         Ok(())
684     }
685 
686     /// Sends `command` to the GSP.
687     ///
688     /// The command may be split into multiple messages if it is large.
689     ///
690     /// # Errors
691     ///
692     /// - `ETIMEDOUT` if space does not become available within the timeout.
693     /// - `EIO` if the variable payload requested by the command has not been entirely
694     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
695     ///
696     /// Error codes returned by the command initializers are propagated as-is.
697     fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result
698     where
699         M: CommandToGsp,
700         Error: From<M::InitError>,
701     {
702         match SplitState::new(command)? {
703             SplitState::Single(command) => self.send_single_command(bar, command),
704             SplitState::Split(command, mut continuations) => {
705                 self.send_single_command(bar, command)?;
706 
707                 while let Some(continuation) = continuations.next() {
708                     // Turbofish needed because the compiler cannot infer M here.
709                     self.send_single_command::<ContinuationRecord<'_>>(bar, continuation)?;
710                 }
711 
712                 Ok(())
713             }
714         }
715     }
716 
717     /// Wait for a message to become available on the message queue.
718     ///
719     /// This works purely at the transport layer and does not interpret or validate the message
720     /// beyond the advertised length in its [`GspMsgElement`].
721     ///
722     /// This method returns:
723     ///
724     /// - A reference to the [`GspMsgElement`] of the message,
725     /// - Two byte slices with the contents of the message. The second slice is empty unless the
726     ///   message loops across the message queue.
727     ///
728     /// # Errors
729     ///
730     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
731     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
732     ///   message queue.
733     ///
734     /// Error codes returned by the message constructor are propagated as-is.
735     fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> {
736         // Wait for a message to arrive from the GSP.
737         let (slice_1, slice_2) = read_poll_timeout(
738             || Ok(self.gsp_mem.driver_read_area()),
739             |driver_area| !driver_area.0.is_empty(),
740             Delta::from_millis(1),
741             timeout,
742         )
743         .map(|(slice_1, slice_2)| (slice_1.as_flattened(), slice_2.as_flattened()))?;
744 
745         // Extract the `GspMsgElement`.
746         let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?;
747 
748         dev_dbg!(
749             &self.dev,
750             "GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n",
751             header.sequence(),
752             header.function(),
753             header.length(),
754         );
755 
756         let payload_length = header.payload_length();
757 
758         // Check that the driver read area is large enough for the message.
759         if slice_1.len() + slice_2.len() < payload_length {
760             return Err(EIO);
761         }
762 
763         // Cut the message slices down to the actual length of the message.
764         let (slice_1, slice_2) = if slice_1.len() > payload_length {
765             // PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
766             (slice_1.split_at(payload_length).0, &slice_2[0..0])
767         } else {
768             (
769                 slice_1,
770                 // PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
771                 // large as `payload_length`.
772                 slice_2.split_at(payload_length - slice_1.len()).0,
773             )
774         };
775 
776         // Validate checksum.
777         if Cmdq::calculate_checksum(SBufferIter::new_reader([
778             header.as_bytes(),
779             slice_1,
780             slice_2,
781         ])) != 0
782         {
783             dev_err!(
784                 &self.dev,
785                 "GSP RPC: receive: Call {} - bad checksum\n",
786                 header.sequence()
787             );
788             return Err(EIO);
789         }
790 
791         Ok(GspMessage {
792             header,
793             contents: (slice_1, slice_2),
794         })
795     }
796 
797     /// Receive a message from the GSP.
798     ///
799     /// The expected message type is specified using the `M` generic parameter. If the pending
800     /// message has a different function code, `ERANGE` is returned and the message is consumed.
801     ///
802     /// The read pointer is always advanced past the message, regardless of whether it matched.
803     ///
804     /// # Errors
805     ///
806     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
807     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
808     ///   message queue.
809     /// - `EINVAL` if the function code of the message was not recognized.
810     /// - `ERANGE` if the message had a recognized but non-matching function code.
811     ///
812     /// Error codes returned by [`MessageFromGsp::read`] are propagated as-is.
813     fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M>
814     where
815         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
816         Error: From<M::InitError>,
817     {
818         let message = self.wait_for_msg(timeout)?;
819         let function = message.header.function().map_err(|_| EINVAL)?;
820 
821         // Extract the message. Store the result as we want to advance the read pointer even in
822         // case of failure.
823         let result = if function == M::FUNCTION {
824             let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?;
825             let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]);
826 
827             M::read(cmd, &mut sbuffer)
828                 .map_err(|e| e.into())
829                 .inspect(|_| {
830                     if !sbuffer.is_empty() {
831                         dev_warn!(
832                             &self.dev,
833                             "GSP message {:?} has unprocessed data\n",
834                             function
835                         );
836                     }
837                 })
838         } else {
839             Err(ERANGE)
840         };
841 
842         // Advance the read pointer past this message.
843         self.gsp_mem.advance_cpu_read_ptr(u32::try_from(
844             message.header.length().div_ceil(GSP_PAGE_SIZE),
845         )?);
846 
847         result
848     }
849 }
850