xref: /linux/drivers/gpu/nova-core/gsp/cmdq.rs (revision e64b9cc293ae710c815c2de1ec9dcaa0784a8017)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 mod continuation;
4 
5 use core::mem;
6 
7 use kernel::{
8     device,
9     dma::{
10         Coherent,
11         DmaAddress, //
12     },
13     dma_write,
14     io::poll::read_poll_timeout,
15     new_mutex,
16     prelude::*,
17     sync::{
18         aref::ARef,
19         Mutex, //
20     },
21     time::Delta,
22     transmute::{
23         AsBytes,
24         FromBytes, //
25     },
26 };
27 
28 use continuation::{
29     ContinuationRecord,
30     SplitState, //
31 };
32 
33 use pin_init::pin_init_scope;
34 
35 use crate::{
36     driver::Bar0,
37     gsp::{
38         fw::{
39             GspMsgElement,
40             MsgFunction,
41             MsgqRxHeader,
42             MsgqTxHeader,
43             GSP_MSG_QUEUE_ELEMENT_SIZE_MAX, //
44         },
45         PteArray,
46         GSP_PAGE_SHIFT,
47         GSP_PAGE_SIZE, //
48     },
49     num,
50     regs,
51     sbuffer::SBufferIter, //
52 };
53 
54 /// Marker type representing the absence of a reply for a command. Commands using this as their
55 /// reply type are sent using [`Cmdq::send_command_no_wait`].
56 pub(crate) struct NoReply;
57 
58 /// Trait implemented by types representing a command to send to the GSP.
59 ///
60 /// The main purpose of this trait is to provide [`Cmdq`] with the information it needs to send
61 /// a given command.
62 ///
63 /// [`CommandToGsp::init`] in particular is responsible for initializing the command directly
64 /// into the space reserved for it in the command queue buffer.
65 ///
66 /// Some commands may be followed by a variable-length payload. For these, the
67 /// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be
68 /// defined as well.
69 pub(crate) trait CommandToGsp {
70     /// Function identifying this command to the GSP.
71     const FUNCTION: MsgFunction;
72 
73     /// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer.
74     type Command: FromBytes + AsBytes;
75 
76     /// Type of the reply expected from the GSP, or [`NoReply`] for commands that don't
77     /// have a reply.
78     type Reply;
79 
80     /// Error type returned by [`CommandToGsp::init`].
81     type InitError;
82 
83     /// In-place command initializer responsible for filling the command in the command queue
84     /// buffer.
85     fn init(&self) -> impl Init<Self::Command, Self::InitError>;
86 
87     /// Size of the variable-length payload following the command structure generated by
88     /// [`CommandToGsp::init`].
89     ///
90     /// Most commands don't have a variable-length payload, so this is zero by default.
91     fn variable_payload_len(&self) -> usize {
92         0
93     }
94 
95     /// Method initializing the variable-length payload.
96     ///
97     /// The command buffer is circular, which means that we may need to jump back to its beginning
98     /// while in the middle of a command. For this reason, the variable-length payload is
99     /// initialized using a [`SBufferIter`].
100     ///
101     /// This method will receive a buffer of the length returned by
102     /// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving
103     /// unwritten space will lead to an error.
104     ///
105     /// Most commands don't have a variable-length payload, so this does nothing by default.
106     fn init_variable_payload(
107         &self,
108         _dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>,
109     ) -> Result {
110         Ok(())
111     }
112 
113     /// Total size of the command (including its variable-length payload) without the
114     /// [`GspMsgElement`] header.
115     fn size(&self) -> usize {
116         size_of::<Self::Command>() + self.variable_payload_len()
117     }
118 }
119 
120 /// Trait representing messages received from the GSP.
121 ///
122 /// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message.
123 pub(crate) trait MessageFromGsp: Sized {
124     /// Function identifying this message from the GSP.
125     const FUNCTION: MsgFunction;
126 
127     /// Error type returned by [`MessageFromGsp::read`].
128     type InitError;
129 
130     /// Type containing the raw message to be read from the message queue.
131     type Message: FromBytes;
132 
133     /// Method reading the message from the message queue and returning it.
134     ///
135     /// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns
136     /// it.
137     fn read(
138         msg: &Self::Message,
139         sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>,
140     ) -> Result<Self, Self::InitError>;
141 }
142 
143 /// Number of GSP pages making the [`Msgq`].
144 pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f;
145 
146 /// Circular buffer of a [`Msgq`].
147 ///
148 /// This area of memory is to be shared between the driver and the GSP to exchange commands or
149 /// messages.
150 #[repr(C, align(0x1000))]
151 #[derive(Debug)]
152 struct MsgqData {
153     data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)],
154 }
155 
156 // Annoyingly we are forced to use a literal to specify the alignment of
157 // `MsgqData`, so check that it corresponds to the actual GSP page size here.
158 static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE);
159 
160 /// Unidirectional message queue.
161 ///
162 /// Contains the data for a message queue, that either the driver or GSP writes to.
163 ///
164 /// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the
165 /// read pointer of `rx` actually refers to the `Msgq` owned by the other side.
166 /// This design ensures that only the driver or GSP ever writes to a given instance of this struct.
167 #[repr(C)]
168 // There is no struct defined for this in the open-gpu-kernel-source headers.
169 // Instead it is defined by code in `GspMsgQueuesInit()`.
170 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
171 pub(super) struct Msgq {
172     /// Header for sending messages, including the write pointer.
173     pub(super) tx: MsgqTxHeader,
174     /// Header for receiving messages, including the read pointer.
175     pub(super) rx: MsgqRxHeader,
176     /// The message queue proper.
177     msgq: MsgqData,
178 }
179 
180 /// Structure shared between the driver and the GSP and containing the command and message queues.
181 #[repr(C)]
182 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
183 pub(super) struct GspMem {
184     /// Self-mapping page table entries.
185     ptes: PteArray<{ Self::PTE_ARRAY_SIZE }>,
186     /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the
187     /// write and read pointers that the CPU updates. This means that the read pointer here is an
188     /// index into the GSP queue.
189     ///
190     /// This member is read-only for the GSP.
191     pub(super) cpuq: Msgq,
192     /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the
193     /// write and read pointers that the GSP updates. This means that the read pointer here is an
194     /// index into the CPU queue.
195     ///
196     /// This member is read-only for the driver.
197     pub(super) gspq: Msgq,
198 }
199 
200 impl GspMem {
201     const PTE_ARRAY_SIZE: usize = GSP_PAGE_SIZE / size_of::<u64>();
202 }
203 
204 // SAFETY: These structs don't meet the no-padding requirements of AsBytes but
205 // that is not a problem because they are not used outside the kernel.
206 unsafe impl AsBytes for GspMem {}
207 
208 // SAFETY: These structs don't meet the no-padding requirements of FromBytes but
209 // that is not a problem because they are not used outside the kernel.
210 unsafe impl FromBytes for GspMem {}
211 
212 /// Wrapper around [`GspMem`] to share it with the GPU using a [`Coherent`].
213 ///
214 /// This provides the low-level functionality to communicate with the GSP, including allocation of
215 /// queue space to write messages to and management of read/write pointers.
216 ///
217 /// This is shared with the GSP, with clear ownership rules regarding the command queues:
218 ///
219 /// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write
220 ///   pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`].
221 /// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read
222 ///   pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`].
223 struct DmaGspMem(Coherent<GspMem>);
224 
225 impl DmaGspMem {
226     /// Allocate a new instance and map it for `dev`.
227     fn new(dev: &device::Device<device::Bound>) -> Result<Self> {
228         const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>();
229         const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>();
230 
231         let gsp_mem = Coherent::<GspMem>::zeroed(dev, GFP_KERNEL)?;
232 
233         let start = gsp_mem.dma_handle();
234         // Write values one by one to avoid an on-stack instance of `PteArray`.
235         for i in 0..GspMem::PTE_ARRAY_SIZE {
236             dma_write!(gsp_mem, .ptes.0[i], PteArray::<0>::entry(start, i)?);
237         }
238 
239         dma_write!(
240             gsp_mem,
241             .cpuq.tx,
242             MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES)
243         );
244         dma_write!(gsp_mem, .cpuq.rx, MsgqRxHeader::new());
245 
246         Ok(Self(gsp_mem))
247     }
248 
249     /// Returns the region of the CPU message queue that the driver is currently allowed to write
250     /// to.
251     ///
252     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
253     /// that case the second slice will have a non-zero length.
254     fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) {
255         let tx = self.cpu_write_ptr() as usize;
256         let rx = self.gsp_read_ptr() as usize;
257 
258         // SAFETY:
259         // - We will only access the driver-owned part of the shared memory.
260         // - Per the safety statement of the function, no concurrent access will be performed.
261         let gsp_mem = unsafe { &mut *self.0.as_mut() };
262         // PANIC: per the invariant of `cpu_write_ptr`, `tx` is `< MSGQ_NUM_PAGES`.
263         let (before_tx, after_tx) = gsp_mem.cpuq.msgq.data.split_at_mut(tx);
264 
265         // The area starting at `tx` and ending at `rx - 2` modulo MSGQ_NUM_PAGES, inclusive,
266         // belongs to the driver for writing.
267 
268         if rx == 0 {
269             // Since `rx` is zero, leave an empty slot at end of the buffer.
270             let last = after_tx.len() - 1;
271             (&mut after_tx[..last], &mut [])
272         } else if rx <= tx {
273             // The area is discontiguous and we leave an empty slot before `rx`.
274             // PANIC:
275             // - The index `rx - 1` is non-negative because `rx != 0` in this branch.
276             // - The index does not exceed `before_tx.len()` (which equals `tx`) because
277             //   `rx <= tx` in this branch.
278             (after_tx, &mut before_tx[..(rx - 1)])
279         } else {
280             // The area is contiguous and we leave an empty slot before `rx`.
281             // PANIC:
282             // - The index `rx - tx - 1` is non-negative because `rx > tx` in this branch.
283             // - The index does not exceed `after_tx.len()` (which is `MSGQ_NUM_PAGES - tx`)
284             //   because `rx < MSGQ_NUM_PAGES` by the `gsp_read_ptr` invariant.
285             (&mut after_tx[..(rx - tx - 1)], &mut [])
286         }
287     }
288 
289     /// Returns the size of the region of the CPU message queue that the driver is currently allowed
290     /// to write to, in bytes.
291     fn driver_write_area_size(&self) -> usize {
292         let tx = self.cpu_write_ptr();
293         let rx = self.gsp_read_ptr();
294 
295         // `rx` and `tx` are both in `0..MSGQ_NUM_PAGES` per the invariants of `gsp_read_ptr` and
296         // `cpu_write_ptr`. The minimum value case is where `rx == 0` and `tx == MSGQ_NUM_PAGES -
297         // 1`, which gives `0 + MSGQ_NUM_PAGES - (MSGQ_NUM_PAGES - 1) - 1 == 0`.
298         let slots = (rx + MSGQ_NUM_PAGES - tx - 1) % MSGQ_NUM_PAGES;
299         num::u32_as_usize(slots) * GSP_PAGE_SIZE
300     }
301 
302     /// Returns the region of the GSP message queue that the driver is currently allowed to read
303     /// from.
304     ///
305     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
306     /// that case the second slice will have a non-zero length.
307     fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) {
308         let tx = self.gsp_write_ptr() as usize;
309         let rx = self.cpu_read_ptr() as usize;
310 
311         // SAFETY:
312         // - We will only access the driver-owned part of the shared memory.
313         // - Per the safety statement of the function, no concurrent access will be performed.
314         let gsp_mem = unsafe { &*self.0.as_ptr() };
315         let data = &gsp_mem.gspq.msgq.data;
316 
317         // The area starting at `rx` and ending at `tx - 1` modulo MSGQ_NUM_PAGES, inclusive,
318         // belongs to the driver for reading.
319         // PANIC:
320         // - per the invariant of `cpu_read_ptr`, `rx < MSGQ_NUM_PAGES`
321         // - per the invariant of `gsp_write_ptr`, `tx < MSGQ_NUM_PAGES`
322         if rx <= tx {
323             // The area is contiguous.
324             (&data[rx..tx], &[])
325         } else {
326             // The area is discontiguous.
327             (&data[rx..], &data[..tx])
328         }
329     }
330 
331     /// Allocates a region on the command queue that is large enough to send a command of `size`
332     /// bytes, waiting for space to become available based on the provided timeout.
333     ///
334     /// This returns a [`GspCommand`] ready to be written to by the caller.
335     ///
336     /// # Errors
337     ///
338     /// - `EMSGSIZE` if the command is larger than [`GSP_MSG_QUEUE_ELEMENT_SIZE_MAX`].
339     /// - `ETIMEDOUT` if space does not become available within the timeout.
340     /// - `EIO` if the command header is not properly aligned.
341     fn allocate_command(&mut self, size: usize, timeout: Delta) -> Result<GspCommand<'_>> {
342         if size_of::<GspMsgElement>() + size > GSP_MSG_QUEUE_ELEMENT_SIZE_MAX {
343             return Err(EMSGSIZE);
344         }
345         read_poll_timeout(
346             || Ok(self.driver_write_area_size()),
347             |available_bytes| *available_bytes >= size_of::<GspMsgElement>() + size,
348             Delta::from_micros(1),
349             timeout,
350         )?;
351 
352         // Get the current writable area as an array of bytes.
353         let (slice_1, slice_2) = {
354             let (slice_1, slice_2) = self.driver_write_area();
355 
356             #[allow(clippy::incompatible_msrv)]
357             (slice_1.as_flattened_mut(), slice_2.as_flattened_mut())
358         };
359 
360         // Extract area for the `GspMsgElement`.
361         let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?;
362 
363         // Create the contents area.
364         let (slice_1, slice_2) = if slice_1.len() > size {
365             // Contents fits entirely in `slice_1`.
366             (&mut slice_1[..size], &mut slice_2[0..0])
367         } else {
368             // Need all of `slice_1` and some of `slice_2`.
369             let slice_2_len = size - slice_1.len();
370             (slice_1, &mut slice_2[..slice_2_len])
371         };
372 
373         Ok(GspCommand {
374             header,
375             contents: (slice_1, slice_2),
376         })
377     }
378 
379     // Returns the index of the memory page the GSP will write the next message to.
380     //
381     // # Invariants
382     //
383     // - The returned value is within `0..MSGQ_NUM_PAGES`.
384     fn gsp_write_ptr(&self) -> u32 {
385         super::fw::gsp_mem::gsp_write_ptr(&self.0)
386     }
387 
388     // Returns the index of the memory page the GSP will read the next command from.
389     //
390     // # Invariants
391     //
392     // - The returned value is within `0..MSGQ_NUM_PAGES`.
393     fn gsp_read_ptr(&self) -> u32 {
394         super::fw::gsp_mem::gsp_read_ptr(&self.0)
395     }
396 
397     // Returns the index of the memory page the CPU can read the next message from.
398     //
399     // # Invariants
400     //
401     // - The returned value is within `0..MSGQ_NUM_PAGES`.
402     fn cpu_read_ptr(&self) -> u32 {
403         super::fw::gsp_mem::cpu_read_ptr(&self.0)
404     }
405 
406     // Informs the GSP that it can send `elem_count` new pages into the message queue.
407     fn advance_cpu_read_ptr(&mut self, elem_count: u32) {
408         super::fw::gsp_mem::advance_cpu_read_ptr(&self.0, elem_count)
409     }
410 
411     // Returns the index of the memory page the CPU can write the next command to.
412     //
413     // # Invariants
414     //
415     // - The returned value is within `0..MSGQ_NUM_PAGES`.
416     fn cpu_write_ptr(&self) -> u32 {
417         super::fw::gsp_mem::cpu_write_ptr(&self.0)
418     }
419 
420     // Informs the GSP that it can process `elem_count` new pages from the command queue.
421     fn advance_cpu_write_ptr(&mut self, elem_count: u32) {
422         super::fw::gsp_mem::advance_cpu_write_ptr(&self.0, elem_count)
423     }
424 }
425 
426 /// A command ready to be sent on the command queue.
427 ///
428 /// This is the type returned by [`DmaGspMem::allocate_command`].
429 struct GspCommand<'a> {
430     // Writable reference to the header of the command.
431     header: &'a mut GspMsgElement,
432     // Writable slices to the contents of the command. The second slice is zero unless the command
433     // loops over the command queue.
434     contents: (&'a mut [u8], &'a mut [u8]),
435 }
436 
437 /// A message ready to be processed from the message queue.
438 ///
439 /// This is the type returned by [`Cmdq::wait_for_msg`].
440 struct GspMessage<'a> {
441     // Reference to the header of the message.
442     header: &'a GspMsgElement,
443     // Slices to the contents of the message. The second slice is zero unless the message loops
444     // over the message queue.
445     contents: (&'a [u8], &'a [u8]),
446 }
447 
448 /// GSP command queue.
449 ///
450 /// Provides the ability to send commands and receive messages from the GSP using a shared memory
451 /// area.
452 #[pin_data]
453 pub(crate) struct Cmdq {
454     /// Inner mutex-protected state.
455     #[pin]
456     inner: Mutex<CmdqInner>,
457     /// DMA handle of the command queue's shared memory region.
458     pub(super) dma_handle: DmaAddress,
459 }
460 
461 impl Cmdq {
462     /// Offset of the data after the PTEs.
463     const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq);
464 
465     /// Offset of command queue ring buffer.
466     pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq)
467         + core::mem::offset_of!(Msgq, msgq)
468         - Self::POST_PTE_OFFSET;
469 
470     /// Offset of message queue ring buffer.
471     pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq)
472         + core::mem::offset_of!(Msgq, msgq)
473         - Self::POST_PTE_OFFSET;
474 
475     /// Number of page table entries for the GSP shared region.
476     pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT;
477 
478     /// Default timeout for receiving a message from the GSP.
479     pub(super) const RECEIVE_TIMEOUT: Delta = Delta::from_secs(5);
480 
481     /// Creates a new command queue for `dev`.
482     pub(crate) fn new(dev: &device::Device<device::Bound>) -> impl PinInit<Self, Error> + '_ {
483         pin_init_scope(move || {
484             let gsp_mem = DmaGspMem::new(dev)?;
485 
486             Ok(try_pin_init!(Self {
487                 dma_handle: gsp_mem.0.dma_handle(),
488                 inner <- new_mutex!(CmdqInner {
489                     dev: dev.into(),
490                     gsp_mem,
491                     seq: 0,
492                 }),
493             }))
494         })
495     }
496 
497     /// Computes the checksum for the message pointed to by `it`.
498     ///
499     /// A message is made of several parts, so `it` is an iterator over byte slices representing
500     /// these parts.
501     fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 {
502         let sum64 = it
503             .enumerate()
504             .map(|(idx, byte)| (((idx % 8) * 8) as u32, byte))
505             .fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol));
506 
507         ((sum64 >> 32) as u32) ^ (sum64 as u32)
508     }
509 
510     /// Notifies the GSP that we have updated the command queue pointers.
511     fn notify_gsp(bar: &Bar0) {
512         regs::NV_PGSP_QUEUE_HEAD::default()
513             .set_address(0)
514             .write(bar);
515     }
516 
517     /// Sends `command` to the GSP and waits for the reply.
518     ///
519     /// Messages with non-matching function codes are silently consumed until the expected reply
520     /// arrives.
521     ///
522     /// The queue is locked for the entire send+receive cycle to ensure that no other command can
523     /// be interleaved.
524     ///
525     /// # Errors
526     ///
527     /// - `ETIMEDOUT` if space does not become available to send the command, or if the reply is
528     ///   not received within the timeout.
529     /// - `EIO` if the variable payload requested by the command has not been entirely
530     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
531     ///
532     /// Error codes returned by the command and reply initializers are propagated as-is.
533     pub(crate) fn send_command<M>(&self, bar: &Bar0, command: M) -> Result<M::Reply>
534     where
535         M: CommandToGsp,
536         M::Reply: MessageFromGsp,
537         Error: From<M::InitError>,
538         Error: From<<M::Reply as MessageFromGsp>::InitError>,
539     {
540         let mut inner = self.inner.lock();
541         inner.send_command(bar, command)?;
542 
543         loop {
544             match inner.receive_msg::<M::Reply>(Self::RECEIVE_TIMEOUT) {
545                 Ok(reply) => break Ok(reply),
546                 Err(ERANGE) => continue,
547                 Err(e) => break Err(e),
548             }
549         }
550     }
551 
552     /// Sends `command` to the GSP without waiting for a reply.
553     ///
554     /// # Errors
555     ///
556     /// - `ETIMEDOUT` if space does not become available within the timeout.
557     /// - `EIO` if the variable payload requested by the command has not been entirely
558     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
559     ///
560     /// Error codes returned by the command initializers are propagated as-is.
561     pub(crate) fn send_command_no_wait<M>(&self, bar: &Bar0, command: M) -> Result
562     where
563         M: CommandToGsp<Reply = NoReply>,
564         Error: From<M::InitError>,
565     {
566         self.inner.lock().send_command(bar, command)
567     }
568 
569     /// Receive a message from the GSP.
570     ///
571     /// See [`CmdqInner::receive_msg`] for details.
572     pub(crate) fn receive_msg<M: MessageFromGsp>(&self, timeout: Delta) -> Result<M>
573     where
574         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
575         Error: From<M::InitError>,
576     {
577         self.inner.lock().receive_msg(timeout)
578     }
579 }
580 
581 /// Inner mutex protected state of [`Cmdq`].
582 struct CmdqInner {
583     /// Device this command queue belongs to.
584     dev: ARef<device::Device>,
585     /// Current command sequence number.
586     seq: u32,
587     /// Memory area shared with the GSP for communicating commands and messages.
588     gsp_mem: DmaGspMem,
589 }
590 
591 impl CmdqInner {
592     /// Timeout for waiting for space on the command queue.
593     const ALLOCATE_TIMEOUT: Delta = Delta::from_secs(1);
594 
595     /// Sends `command` to the GSP, without splitting it.
596     ///
597     /// # Errors
598     ///
599     /// - `EMSGSIZE` if the command exceeds the maximum queue element size.
600     /// - `ETIMEDOUT` if space does not become available within the timeout.
601     /// - `EIO` if the variable payload requested by the command has not been entirely
602     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
603     ///
604     /// Error codes returned by the command initializers are propagated as-is.
605     fn send_single_command<M>(&mut self, bar: &Bar0, command: M) -> Result
606     where
607         M: CommandToGsp,
608         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
609         Error: From<M::InitError>,
610     {
611         let size_in_bytes = command.size();
612         let dst = self
613             .gsp_mem
614             .allocate_command(size_in_bytes, Self::ALLOCATE_TIMEOUT)?;
615 
616         // Extract area for the command itself. The GSP message header and the command header
617         // together are guaranteed to fit entirely into a single page, so it's ok to only look
618         // at `dst.contents.0` here.
619         let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?;
620 
621         // Fill the header and command in-place.
622         let msg_element = GspMsgElement::init(self.seq, size_in_bytes, M::FUNCTION);
623         // SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer
624         // fails.
625         unsafe {
626             msg_element.__init(core::ptr::from_mut(dst.header))?;
627             command.init().__init(core::ptr::from_mut(cmd))?;
628         }
629 
630         // Fill the variable-length payload, which may be empty.
631         let mut sbuffer = SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]);
632         command.init_variable_payload(&mut sbuffer)?;
633 
634         if !sbuffer.is_empty() {
635             return Err(EIO);
636         }
637         drop(sbuffer);
638 
639         // Compute checksum now that the whole message is ready.
640         dst.header
641             .set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([
642                 dst.header.as_bytes(),
643                 dst.contents.0,
644                 dst.contents.1,
645             ])));
646 
647         dev_dbg!(
648             &self.dev,
649             "GSP RPC: send: seq# {}, function={:?}, length=0x{:x}\n",
650             self.seq,
651             M::FUNCTION,
652             dst.header.length(),
653         );
654 
655         // All set - update the write pointer and inform the GSP of the new command.
656         let elem_count = dst.header.element_count();
657         self.seq += 1;
658         self.gsp_mem.advance_cpu_write_ptr(elem_count);
659         Cmdq::notify_gsp(bar);
660 
661         Ok(())
662     }
663 
664     /// Sends `command` to the GSP.
665     ///
666     /// The command may be split into multiple messages if it is large.
667     ///
668     /// # Errors
669     ///
670     /// - `ETIMEDOUT` if space does not become available within the timeout.
671     /// - `EIO` if the variable payload requested by the command has not been entirely
672     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
673     ///
674     /// Error codes returned by the command initializers are propagated as-is.
675     fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result
676     where
677         M: CommandToGsp,
678         Error: From<M::InitError>,
679     {
680         match SplitState::new(command)? {
681             SplitState::Single(command) => self.send_single_command(bar, command),
682             SplitState::Split(command, mut continuations) => {
683                 self.send_single_command(bar, command)?;
684 
685                 while let Some(continuation) = continuations.next() {
686                     // Turbofish needed because the compiler cannot infer M here.
687                     self.send_single_command::<ContinuationRecord<'_>>(bar, continuation)?;
688                 }
689 
690                 Ok(())
691             }
692         }
693     }
694 
695     /// Wait for a message to become available on the message queue.
696     ///
697     /// This works purely at the transport layer and does not interpret or validate the message
698     /// beyond the advertised length in its [`GspMsgElement`].
699     ///
700     /// This method returns:
701     ///
702     /// - A reference to the [`GspMsgElement`] of the message,
703     /// - Two byte slices with the contents of the message. The second slice is empty unless the
704     ///   message loops across the message queue.
705     ///
706     /// # Errors
707     ///
708     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
709     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
710     ///   message queue.
711     ///
712     /// Error codes returned by the message constructor are propagated as-is.
713     fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> {
714         // Wait for a message to arrive from the GSP.
715         let (slice_1, slice_2) = read_poll_timeout(
716             || Ok(self.gsp_mem.driver_read_area()),
717             |driver_area| !driver_area.0.is_empty(),
718             Delta::from_millis(1),
719             timeout,
720         )
721         .map(|(slice_1, slice_2)| {
722             #[allow(clippy::incompatible_msrv)]
723             (slice_1.as_flattened(), slice_2.as_flattened())
724         })?;
725 
726         // Extract the `GspMsgElement`.
727         let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?;
728 
729         dev_dbg!(
730             &self.dev,
731             "GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n",
732             header.sequence(),
733             header.function(),
734             header.length(),
735         );
736 
737         let payload_length = header.payload_length();
738 
739         // Check that the driver read area is large enough for the message.
740         if slice_1.len() + slice_2.len() < payload_length {
741             return Err(EIO);
742         }
743 
744         // Cut the message slices down to the actual length of the message.
745         let (slice_1, slice_2) = if slice_1.len() > payload_length {
746             // PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
747             (slice_1.split_at(payload_length).0, &slice_2[0..0])
748         } else {
749             (
750                 slice_1,
751                 // PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
752                 // large as `payload_length`.
753                 slice_2.split_at(payload_length - slice_1.len()).0,
754             )
755         };
756 
757         // Validate checksum.
758         if Cmdq::calculate_checksum(SBufferIter::new_reader([
759             header.as_bytes(),
760             slice_1,
761             slice_2,
762         ])) != 0
763         {
764             dev_err!(
765                 &self.dev,
766                 "GSP RPC: receive: Call {} - bad checksum\n",
767                 header.sequence()
768             );
769             return Err(EIO);
770         }
771 
772         Ok(GspMessage {
773             header,
774             contents: (slice_1, slice_2),
775         })
776     }
777 
778     /// Receive a message from the GSP.
779     ///
780     /// The expected message type is specified using the `M` generic parameter. If the pending
781     /// message has a different function code, `ERANGE` is returned and the message is consumed.
782     ///
783     /// The read pointer is always advanced past the message, regardless of whether it matched.
784     ///
785     /// # Errors
786     ///
787     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
788     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
789     ///   message queue.
790     /// - `EINVAL` if the function code of the message was not recognized.
791     /// - `ERANGE` if the message had a recognized but non-matching function code.
792     ///
793     /// Error codes returned by [`MessageFromGsp::read`] are propagated as-is.
794     fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M>
795     where
796         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
797         Error: From<M::InitError>,
798     {
799         let message = self.wait_for_msg(timeout)?;
800         let function = message.header.function().map_err(|_| EINVAL)?;
801 
802         // Extract the message. Store the result as we want to advance the read pointer even in
803         // case of failure.
804         let result = if function == M::FUNCTION {
805             let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?;
806             let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]);
807 
808             M::read(cmd, &mut sbuffer)
809                 .map_err(|e| e.into())
810                 .inspect(|_| {
811                     if !sbuffer.is_empty() {
812                         dev_warn!(
813                             &self.dev,
814                             "GSP message {:?} has unprocessed data\n",
815                             function
816                         );
817                     }
818                 })
819         } else {
820             Err(ERANGE)
821         };
822 
823         // Advance the read pointer past this message.
824         self.gsp_mem.advance_cpu_read_ptr(u32::try_from(
825             message.header.length().div_ceil(GSP_PAGE_SIZE),
826         )?);
827 
828         result
829     }
830 }
831