xref: /linux/drivers/gpu/nova-core/gsp/cmdq.rs (revision 38f7e5450ebfc6f2e046a249a3f629ea7bec8c31)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 mod continuation;
4 
5 use core::mem;
6 
7 use kernel::{
8     device,
9     dma::{
10         Coherent,
11         DmaAddress, //
12     },
13     dma_write,
14     io::{
15         poll::read_poll_timeout,
16         Io, //
17     },
18     new_mutex,
19     prelude::*,
20     sync::{
21         aref::ARef,
22         Mutex, //
23     },
24     time::Delta,
25     transmute::{
26         AsBytes,
27         FromBytes, //
28     },
29 };
30 
31 use continuation::{
32     ContinuationRecord,
33     SplitState, //
34 };
35 
36 use pin_init::pin_init_scope;
37 
38 use crate::{
39     driver::Bar0,
40     gsp::{
41         fw::{
42             GspMsgElement,
43             MsgFunction,
44             MsgqRxHeader,
45             MsgqTxHeader,
46             GSP_MSG_QUEUE_ELEMENT_SIZE_MAX, //
47         },
48         PteArray,
49         GSP_PAGE_SHIFT,
50         GSP_PAGE_SIZE, //
51     },
52     num,
53     regs,
54     sbuffer::SBufferIter, //
55 };
56 
57 /// Marker type representing the absence of a reply for a command. Commands using this as their
58 /// reply type are sent using [`Cmdq::send_command_no_wait`].
59 pub(crate) struct NoReply;
60 
61 /// Trait implemented by types representing a command to send to the GSP.
62 ///
63 /// The main purpose of this trait is to provide [`Cmdq`] with the information it needs to send
64 /// a given command.
65 ///
66 /// [`CommandToGsp::init`] in particular is responsible for initializing the command directly
67 /// into the space reserved for it in the command queue buffer.
68 ///
69 /// Some commands may be followed by a variable-length payload. For these, the
70 /// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be
71 /// defined as well.
72 pub(crate) trait CommandToGsp {
73     /// Function identifying this command to the GSP.
74     const FUNCTION: MsgFunction;
75 
76     /// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer.
77     type Command: FromBytes + AsBytes;
78 
79     /// Type of the reply expected from the GSP, or [`NoReply`] for commands that don't
80     /// have a reply.
81     type Reply;
82 
83     /// Error type returned by [`CommandToGsp::init`].
84     type InitError;
85 
86     /// In-place command initializer responsible for filling the command in the command queue
87     /// buffer.
88     fn init(&self) -> impl Init<Self::Command, Self::InitError>;
89 
90     /// Size of the variable-length payload following the command structure generated by
91     /// [`CommandToGsp::init`].
92     ///
93     /// Most commands don't have a variable-length payload, so this is zero by default.
94     fn variable_payload_len(&self) -> usize {
95         0
96     }
97 
98     /// Method initializing the variable-length payload.
99     ///
100     /// The command buffer is circular, which means that we may need to jump back to its beginning
101     /// while in the middle of a command. For this reason, the variable-length payload is
102     /// initialized using a [`SBufferIter`].
103     ///
104     /// This method will receive a buffer of the length returned by
105     /// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving
106     /// unwritten space will lead to an error.
107     ///
108     /// Most commands don't have a variable-length payload, so this does nothing by default.
109     fn init_variable_payload(
110         &self,
111         _dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>,
112     ) -> Result {
113         Ok(())
114     }
115 
116     /// Total size of the command (including its variable-length payload) without the
117     /// [`GspMsgElement`] header.
118     fn size(&self) -> usize {
119         size_of::<Self::Command>() + self.variable_payload_len()
120     }
121 }
122 
123 /// Trait representing messages received from the GSP.
124 ///
125 /// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message.
126 pub(crate) trait MessageFromGsp: Sized {
127     /// Function identifying this message from the GSP.
128     const FUNCTION: MsgFunction;
129 
130     /// Error type returned by [`MessageFromGsp::read`].
131     type InitError;
132 
133     /// Type containing the raw message to be read from the message queue.
134     type Message: FromBytes;
135 
136     /// Method reading the message from the message queue and returning it.
137     ///
138     /// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns
139     /// it.
140     fn read(
141         msg: &Self::Message,
142         sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>,
143     ) -> Result<Self, Self::InitError>;
144 }
145 
146 /// Number of GSP pages making the [`Msgq`].
147 pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f;
148 
149 /// Circular buffer of a [`Msgq`].
150 ///
151 /// This area of memory is to be shared between the driver and the GSP to exchange commands or
152 /// messages.
153 #[repr(C, align(0x1000))]
154 #[derive(Debug)]
155 struct MsgqData {
156     data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)],
157 }
158 
159 // Annoyingly we are forced to use a literal to specify the alignment of
160 // `MsgqData`, so check that it corresponds to the actual GSP page size here.
161 static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE);
162 
163 /// Unidirectional message queue.
164 ///
165 /// Contains the data for a message queue, that either the driver or GSP writes to.
166 ///
167 /// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the
168 /// read pointer of `rx` actually refers to the `Msgq` owned by the other side.
169 /// This design ensures that only the driver or GSP ever writes to a given instance of this struct.
170 #[repr(C)]
171 // There is no struct defined for this in the open-gpu-kernel-source headers.
172 // Instead it is defined by code in `GspMsgQueuesInit()`.
173 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
174 pub(super) struct Msgq {
175     /// Header for sending messages, including the write pointer.
176     pub(super) tx: MsgqTxHeader,
177     /// Header for receiving messages, including the read pointer.
178     pub(super) rx: MsgqRxHeader,
179     /// The message queue proper.
180     msgq: MsgqData,
181 }
182 
183 /// Structure shared between the driver and the GSP and containing the command and message queues.
184 #[repr(C)]
185 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
186 pub(super) struct GspMem {
187     /// Self-mapping page table entries.
188     ptes: PteArray<{ Self::PTE_ARRAY_SIZE }>,
189     /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the
190     /// write and read pointers that the CPU updates. This means that the read pointer here is an
191     /// index into the GSP queue.
192     ///
193     /// This member is read-only for the GSP.
194     pub(super) cpuq: Msgq,
195     /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the
196     /// write and read pointers that the GSP updates. This means that the read pointer here is an
197     /// index into the CPU queue.
198     ///
199     /// This member is read-only for the driver.
200     pub(super) gspq: Msgq,
201 }
202 
203 impl GspMem {
204     const PTE_ARRAY_SIZE: usize = GSP_PAGE_SIZE / size_of::<u64>();
205 }
206 
207 // SAFETY: These structs don't meet the no-padding requirements of AsBytes but
208 // that is not a problem because they are not used outside the kernel.
209 unsafe impl AsBytes for GspMem {}
210 
211 // SAFETY: These structs don't meet the no-padding requirements of FromBytes but
212 // that is not a problem because they are not used outside the kernel.
213 unsafe impl FromBytes for GspMem {}
214 
215 /// Wrapper around [`GspMem`] to share it with the GPU using a [`Coherent`].
216 ///
217 /// This provides the low-level functionality to communicate with the GSP, including allocation of
218 /// queue space to write messages to and management of read/write pointers.
219 ///
220 /// This is shared with the GSP, with clear ownership rules regarding the command queues:
221 ///
222 /// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write
223 ///   pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`].
224 /// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read
225 ///   pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`].
226 struct DmaGspMem(Coherent<GspMem>);
227 
228 impl DmaGspMem {
229     /// Allocate a new instance and map it for `dev`.
230     fn new(dev: &device::Device<device::Bound>) -> Result<Self> {
231         const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>();
232         const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>();
233 
234         let gsp_mem = Coherent::<GspMem>::zeroed(dev, GFP_KERNEL)?;
235 
236         let start = gsp_mem.dma_handle();
237         // Write values one by one to avoid an on-stack instance of `PteArray`.
238         for i in 0..GspMem::PTE_ARRAY_SIZE {
239             dma_write!(gsp_mem, .ptes.0[i], PteArray::<0>::entry(start, i)?);
240         }
241 
242         dma_write!(
243             gsp_mem,
244             .cpuq.tx,
245             MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES)
246         );
247         dma_write!(gsp_mem, .cpuq.rx, MsgqRxHeader::new());
248 
249         Ok(Self(gsp_mem))
250     }
251 
252     /// Returns the region of the CPU message queue that the driver is currently allowed to write
253     /// to.
254     ///
255     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
256     /// that case the second slice will have a non-zero length.
257     fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) {
258         let tx = self.cpu_write_ptr() as usize;
259         let rx = self.gsp_read_ptr() as usize;
260 
261         // SAFETY:
262         // - We will only access the driver-owned part of the shared memory.
263         // - Per the safety statement of the function, no concurrent access will be performed.
264         let gsp_mem = unsafe { &mut *self.0.as_mut() };
265         // PANIC: per the invariant of `cpu_write_ptr`, `tx` is `< MSGQ_NUM_PAGES`.
266         let (before_tx, after_tx) = gsp_mem.cpuq.msgq.data.split_at_mut(tx);
267 
268         // The area starting at `tx` and ending at `rx - 2` modulo MSGQ_NUM_PAGES, inclusive,
269         // belongs to the driver for writing.
270 
271         if rx == 0 {
272             // Since `rx` is zero, leave an empty slot at end of the buffer.
273             let last = after_tx.len() - 1;
274             (&mut after_tx[..last], &mut [])
275         } else if rx <= tx {
276             // The area is discontiguous and we leave an empty slot before `rx`.
277             // PANIC:
278             // - The index `rx - 1` is non-negative because `rx != 0` in this branch.
279             // - The index does not exceed `before_tx.len()` (which equals `tx`) because
280             //   `rx <= tx` in this branch.
281             (after_tx, &mut before_tx[..(rx - 1)])
282         } else {
283             // The area is contiguous and we leave an empty slot before `rx`.
284             // PANIC:
285             // - The index `rx - tx - 1` is non-negative because `rx > tx` in this branch.
286             // - The index does not exceed `after_tx.len()` (which is `MSGQ_NUM_PAGES - tx`)
287             //   because `rx < MSGQ_NUM_PAGES` by the `gsp_read_ptr` invariant.
288             (&mut after_tx[..(rx - tx - 1)], &mut [])
289         }
290     }
291 
292     /// Returns the size of the region of the CPU message queue that the driver is currently allowed
293     /// to write to, in bytes.
294     fn driver_write_area_size(&self) -> usize {
295         let tx = self.cpu_write_ptr();
296         let rx = self.gsp_read_ptr();
297 
298         // `rx` and `tx` are both in `0..MSGQ_NUM_PAGES` per the invariants of `gsp_read_ptr` and
299         // `cpu_write_ptr`. The minimum value case is where `rx == 0` and `tx == MSGQ_NUM_PAGES -
300         // 1`, which gives `0 + MSGQ_NUM_PAGES - (MSGQ_NUM_PAGES - 1) - 1 == 0`.
301         let slots = (rx + MSGQ_NUM_PAGES - tx - 1) % MSGQ_NUM_PAGES;
302         num::u32_as_usize(slots) * GSP_PAGE_SIZE
303     }
304 
305     /// Returns the region of the GSP message queue that the driver is currently allowed to read
306     /// from.
307     ///
308     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
309     /// that case the second slice will have a non-zero length.
310     fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) {
311         let tx = self.gsp_write_ptr() as usize;
312         let rx = self.cpu_read_ptr() as usize;
313 
314         // SAFETY:
315         // - We will only access the driver-owned part of the shared memory.
316         // - Per the safety statement of the function, no concurrent access will be performed.
317         let gsp_mem = unsafe { &*self.0.as_ptr() };
318         let data = &gsp_mem.gspq.msgq.data;
319 
320         // The area starting at `rx` and ending at `tx - 1` modulo MSGQ_NUM_PAGES, inclusive,
321         // belongs to the driver for reading.
322         // PANIC:
323         // - per the invariant of `cpu_read_ptr`, `rx < MSGQ_NUM_PAGES`
324         // - per the invariant of `gsp_write_ptr`, `tx < MSGQ_NUM_PAGES`
325         if rx <= tx {
326             // The area is contiguous.
327             (&data[rx..tx], &[])
328         } else {
329             // The area is discontiguous.
330             (&data[rx..], &data[..tx])
331         }
332     }
333 
334     /// Allocates a region on the command queue that is large enough to send a command of `size`
335     /// bytes, waiting for space to become available based on the provided timeout.
336     ///
337     /// This returns a [`GspCommand`] ready to be written to by the caller.
338     ///
339     /// # Errors
340     ///
341     /// - `EMSGSIZE` if the command is larger than [`GSP_MSG_QUEUE_ELEMENT_SIZE_MAX`].
342     /// - `ETIMEDOUT` if space does not become available within the timeout.
343     /// - `EIO` if the command header is not properly aligned.
344     fn allocate_command(&mut self, size: usize, timeout: Delta) -> Result<GspCommand<'_>> {
345         if size_of::<GspMsgElement>() + size > GSP_MSG_QUEUE_ELEMENT_SIZE_MAX {
346             return Err(EMSGSIZE);
347         }
348         read_poll_timeout(
349             || Ok(self.driver_write_area_size()),
350             |available_bytes| *available_bytes >= size_of::<GspMsgElement>() + size,
351             Delta::from_micros(1),
352             timeout,
353         )?;
354 
355         // Get the current writable area as an array of bytes.
356         let (slice_1, slice_2) = {
357             let (slice_1, slice_2) = self.driver_write_area();
358 
359             #[allow(clippy::incompatible_msrv)]
360             (slice_1.as_flattened_mut(), slice_2.as_flattened_mut())
361         };
362 
363         // Extract area for the `GspMsgElement`.
364         let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?;
365 
366         // Create the contents area.
367         let (slice_1, slice_2) = if slice_1.len() > size {
368             // Contents fits entirely in `slice_1`.
369             (&mut slice_1[..size], &mut slice_2[0..0])
370         } else {
371             // Need all of `slice_1` and some of `slice_2`.
372             let slice_2_len = size - slice_1.len();
373             (slice_1, &mut slice_2[..slice_2_len])
374         };
375 
376         Ok(GspCommand {
377             header,
378             contents: (slice_1, slice_2),
379         })
380     }
381 
382     // Returns the index of the memory page the GSP will write the next message to.
383     //
384     // # Invariants
385     //
386     // - The returned value is within `0..MSGQ_NUM_PAGES`.
387     fn gsp_write_ptr(&self) -> u32 {
388         super::fw::gsp_mem::gsp_write_ptr(&self.0)
389     }
390 
391     // Returns the index of the memory page the GSP will read the next command from.
392     //
393     // # Invariants
394     //
395     // - The returned value is within `0..MSGQ_NUM_PAGES`.
396     fn gsp_read_ptr(&self) -> u32 {
397         super::fw::gsp_mem::gsp_read_ptr(&self.0)
398     }
399 
400     // Returns the index of the memory page the CPU can read the next message from.
401     //
402     // # Invariants
403     //
404     // - The returned value is within `0..MSGQ_NUM_PAGES`.
405     fn cpu_read_ptr(&self) -> u32 {
406         super::fw::gsp_mem::cpu_read_ptr(&self.0)
407     }
408 
409     // Informs the GSP that it can send `elem_count` new pages into the message queue.
410     fn advance_cpu_read_ptr(&mut self, elem_count: u32) {
411         super::fw::gsp_mem::advance_cpu_read_ptr(&self.0, elem_count)
412     }
413 
414     // Returns the index of the memory page the CPU can write the next command to.
415     //
416     // # Invariants
417     //
418     // - The returned value is within `0..MSGQ_NUM_PAGES`.
419     fn cpu_write_ptr(&self) -> u32 {
420         super::fw::gsp_mem::cpu_write_ptr(&self.0)
421     }
422 
423     // Informs the GSP that it can process `elem_count` new pages from the command queue.
424     fn advance_cpu_write_ptr(&mut self, elem_count: u32) {
425         super::fw::gsp_mem::advance_cpu_write_ptr(&self.0, elem_count)
426     }
427 }
428 
429 /// A command ready to be sent on the command queue.
430 ///
431 /// This is the type returned by [`DmaGspMem::allocate_command`].
432 struct GspCommand<'a> {
433     // Writable reference to the header of the command.
434     header: &'a mut GspMsgElement,
435     // Writable slices to the contents of the command. The second slice is zero unless the command
436     // loops over the command queue.
437     contents: (&'a mut [u8], &'a mut [u8]),
438 }
439 
440 /// A message ready to be processed from the message queue.
441 ///
442 /// This is the type returned by [`Cmdq::wait_for_msg`].
443 struct GspMessage<'a> {
444     // Reference to the header of the message.
445     header: &'a GspMsgElement,
446     // Slices to the contents of the message. The second slice is zero unless the message loops
447     // over the message queue.
448     contents: (&'a [u8], &'a [u8]),
449 }
450 
451 /// GSP command queue.
452 ///
453 /// Provides the ability to send commands and receive messages from the GSP using a shared memory
454 /// area.
455 #[pin_data]
456 pub(crate) struct Cmdq {
457     /// Inner mutex-protected state.
458     #[pin]
459     inner: Mutex<CmdqInner>,
460     /// DMA handle of the command queue's shared memory region.
461     pub(super) dma_handle: DmaAddress,
462 }
463 
464 impl Cmdq {
465     /// Offset of the data after the PTEs.
466     const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq);
467 
468     /// Offset of command queue ring buffer.
469     pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq)
470         + core::mem::offset_of!(Msgq, msgq)
471         - Self::POST_PTE_OFFSET;
472 
473     /// Offset of message queue ring buffer.
474     pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq)
475         + core::mem::offset_of!(Msgq, msgq)
476         - Self::POST_PTE_OFFSET;
477 
478     /// Number of page table entries for the GSP shared region.
479     pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT;
480 
481     /// Default timeout for receiving a message from the GSP.
482     pub(super) const RECEIVE_TIMEOUT: Delta = Delta::from_secs(5);
483 
484     /// Creates a new command queue for `dev`.
485     pub(crate) fn new(dev: &device::Device<device::Bound>) -> impl PinInit<Self, Error> + '_ {
486         pin_init_scope(move || {
487             let gsp_mem = DmaGspMem::new(dev)?;
488 
489             Ok(try_pin_init!(Self {
490                 dma_handle: gsp_mem.0.dma_handle(),
491                 inner <- new_mutex!(CmdqInner {
492                     dev: dev.into(),
493                     gsp_mem,
494                     seq: 0,
495                 }),
496             }))
497         })
498     }
499 
500     /// Computes the checksum for the message pointed to by `it`.
501     ///
502     /// A message is made of several parts, so `it` is an iterator over byte slices representing
503     /// these parts.
504     fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 {
505         let sum64 = it
506             .enumerate()
507             .map(|(idx, byte)| (((idx % 8) * 8) as u32, byte))
508             .fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol));
509 
510         ((sum64 >> 32) as u32) ^ (sum64 as u32)
511     }
512 
513     /// Notifies the GSP that we have updated the command queue pointers.
514     fn notify_gsp(bar: &Bar0) {
515         bar.write_reg(regs::NV_PGSP_QUEUE_HEAD::zeroed().with_address(0u32));
516     }
517 
518     /// Sends `command` to the GSP and waits for the reply.
519     ///
520     /// Messages with non-matching function codes are silently consumed until the expected reply
521     /// arrives.
522     ///
523     /// The queue is locked for the entire send+receive cycle to ensure that no other command can
524     /// be interleaved.
525     ///
526     /// # Errors
527     ///
528     /// - `ETIMEDOUT` if space does not become available to send the command, or if the reply is
529     ///   not received within the timeout.
530     /// - `EIO` if the variable payload requested by the command has not been entirely
531     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
532     ///
533     /// Error codes returned by the command and reply initializers are propagated as-is.
534     pub(crate) fn send_command<M>(&self, bar: &Bar0, command: M) -> Result<M::Reply>
535     where
536         M: CommandToGsp,
537         M::Reply: MessageFromGsp,
538         Error: From<M::InitError>,
539         Error: From<<M::Reply as MessageFromGsp>::InitError>,
540     {
541         let mut inner = self.inner.lock();
542         inner.send_command(bar, command)?;
543 
544         loop {
545             match inner.receive_msg::<M::Reply>(Self::RECEIVE_TIMEOUT) {
546                 Ok(reply) => break Ok(reply),
547                 Err(ERANGE) => continue,
548                 Err(e) => break Err(e),
549             }
550         }
551     }
552 
553     /// Sends `command` to the GSP without waiting for a reply.
554     ///
555     /// # Errors
556     ///
557     /// - `ETIMEDOUT` if space does not become available within the timeout.
558     /// - `EIO` if the variable payload requested by the command has not been entirely
559     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
560     ///
561     /// Error codes returned by the command initializers are propagated as-is.
562     pub(crate) fn send_command_no_wait<M>(&self, bar: &Bar0, command: M) -> Result
563     where
564         M: CommandToGsp<Reply = NoReply>,
565         Error: From<M::InitError>,
566     {
567         self.inner.lock().send_command(bar, command)
568     }
569 
570     /// Receive a message from the GSP.
571     ///
572     /// See [`CmdqInner::receive_msg`] for details.
573     pub(crate) fn receive_msg<M: MessageFromGsp>(&self, timeout: Delta) -> Result<M>
574     where
575         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
576         Error: From<M::InitError>,
577     {
578         self.inner.lock().receive_msg(timeout)
579     }
580 }
581 
582 /// Inner mutex protected state of [`Cmdq`].
583 struct CmdqInner {
584     /// Device this command queue belongs to.
585     dev: ARef<device::Device>,
586     /// Current command sequence number.
587     seq: u32,
588     /// Memory area shared with the GSP for communicating commands and messages.
589     gsp_mem: DmaGspMem,
590 }
591 
592 impl CmdqInner {
593     /// Timeout for waiting for space on the command queue.
594     const ALLOCATE_TIMEOUT: Delta = Delta::from_secs(1);
595 
596     /// Sends `command` to the GSP, without splitting it.
597     ///
598     /// # Errors
599     ///
600     /// - `EMSGSIZE` if the command exceeds the maximum queue element size.
601     /// - `ETIMEDOUT` if space does not become available within the timeout.
602     /// - `EIO` if the variable payload requested by the command has not been entirely
603     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
604     ///
605     /// Error codes returned by the command initializers are propagated as-is.
606     fn send_single_command<M>(&mut self, bar: &Bar0, command: M) -> Result
607     where
608         M: CommandToGsp,
609         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
610         Error: From<M::InitError>,
611     {
612         let size_in_bytes = command.size();
613         let dst = self
614             .gsp_mem
615             .allocate_command(size_in_bytes, Self::ALLOCATE_TIMEOUT)?;
616 
617         // Extract area for the command itself. The GSP message header and the command header
618         // together are guaranteed to fit entirely into a single page, so it's ok to only look
619         // at `dst.contents.0` here.
620         let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?;
621 
622         // Fill the header and command in-place.
623         let msg_element = GspMsgElement::init(self.seq, size_in_bytes, M::FUNCTION);
624         // SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer
625         // fails.
626         unsafe {
627             msg_element.__init(core::ptr::from_mut(dst.header))?;
628             command.init().__init(core::ptr::from_mut(cmd))?;
629         }
630 
631         // Fill the variable-length payload, which may be empty.
632         let mut sbuffer = SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]);
633         command.init_variable_payload(&mut sbuffer)?;
634 
635         if !sbuffer.is_empty() {
636             return Err(EIO);
637         }
638         drop(sbuffer);
639 
640         // Compute checksum now that the whole message is ready.
641         dst.header
642             .set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([
643                 dst.header.as_bytes(),
644                 dst.contents.0,
645                 dst.contents.1,
646             ])));
647 
648         dev_dbg!(
649             &self.dev,
650             "GSP RPC: send: seq# {}, function={:?}, length=0x{:x}\n",
651             self.seq,
652             M::FUNCTION,
653             dst.header.length(),
654         );
655 
656         // All set - update the write pointer and inform the GSP of the new command.
657         let elem_count = dst.header.element_count();
658         self.seq += 1;
659         self.gsp_mem.advance_cpu_write_ptr(elem_count);
660         Cmdq::notify_gsp(bar);
661 
662         Ok(())
663     }
664 
665     /// Sends `command` to the GSP.
666     ///
667     /// The command may be split into multiple messages if it is large.
668     ///
669     /// # Errors
670     ///
671     /// - `ETIMEDOUT` if space does not become available within the timeout.
672     /// - `EIO` if the variable payload requested by the command has not been entirely
673     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
674     ///
675     /// Error codes returned by the command initializers are propagated as-is.
676     fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result
677     where
678         M: CommandToGsp,
679         Error: From<M::InitError>,
680     {
681         match SplitState::new(command)? {
682             SplitState::Single(command) => self.send_single_command(bar, command),
683             SplitState::Split(command, mut continuations) => {
684                 self.send_single_command(bar, command)?;
685 
686                 while let Some(continuation) = continuations.next() {
687                     // Turbofish needed because the compiler cannot infer M here.
688                     self.send_single_command::<ContinuationRecord<'_>>(bar, continuation)?;
689                 }
690 
691                 Ok(())
692             }
693         }
694     }
695 
696     /// Wait for a message to become available on the message queue.
697     ///
698     /// This works purely at the transport layer and does not interpret or validate the message
699     /// beyond the advertised length in its [`GspMsgElement`].
700     ///
701     /// This method returns:
702     ///
703     /// - A reference to the [`GspMsgElement`] of the message,
704     /// - Two byte slices with the contents of the message. The second slice is empty unless the
705     ///   message loops across the message queue.
706     ///
707     /// # Errors
708     ///
709     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
710     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
711     ///   message queue.
712     ///
713     /// Error codes returned by the message constructor are propagated as-is.
714     fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> {
715         // Wait for a message to arrive from the GSP.
716         let (slice_1, slice_2) = read_poll_timeout(
717             || Ok(self.gsp_mem.driver_read_area()),
718             |driver_area| !driver_area.0.is_empty(),
719             Delta::from_millis(1),
720             timeout,
721         )
722         .map(|(slice_1, slice_2)| {
723             #[allow(clippy::incompatible_msrv)]
724             (slice_1.as_flattened(), slice_2.as_flattened())
725         })?;
726 
727         // Extract the `GspMsgElement`.
728         let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?;
729 
730         dev_dbg!(
731             &self.dev,
732             "GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n",
733             header.sequence(),
734             header.function(),
735             header.length(),
736         );
737 
738         let payload_length = header.payload_length();
739 
740         // Check that the driver read area is large enough for the message.
741         if slice_1.len() + slice_2.len() < payload_length {
742             return Err(EIO);
743         }
744 
745         // Cut the message slices down to the actual length of the message.
746         let (slice_1, slice_2) = if slice_1.len() > payload_length {
747             // PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
748             (slice_1.split_at(payload_length).0, &slice_2[0..0])
749         } else {
750             (
751                 slice_1,
752                 // PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
753                 // large as `payload_length`.
754                 slice_2.split_at(payload_length - slice_1.len()).0,
755             )
756         };
757 
758         // Validate checksum.
759         if Cmdq::calculate_checksum(SBufferIter::new_reader([
760             header.as_bytes(),
761             slice_1,
762             slice_2,
763         ])) != 0
764         {
765             dev_err!(
766                 &self.dev,
767                 "GSP RPC: receive: Call {} - bad checksum\n",
768                 header.sequence()
769             );
770             return Err(EIO);
771         }
772 
773         Ok(GspMessage {
774             header,
775             contents: (slice_1, slice_2),
776         })
777     }
778 
779     /// Receive a message from the GSP.
780     ///
781     /// The expected message type is specified using the `M` generic parameter. If the pending
782     /// message has a different function code, `ERANGE` is returned and the message is consumed.
783     ///
784     /// The read pointer is always advanced past the message, regardless of whether it matched.
785     ///
786     /// # Errors
787     ///
788     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
789     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
790     ///   message queue.
791     /// - `EINVAL` if the function code of the message was not recognized.
792     /// - `ERANGE` if the message had a recognized but non-matching function code.
793     ///
794     /// Error codes returned by [`MessageFromGsp::read`] are propagated as-is.
795     fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M>
796     where
797         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
798         Error: From<M::InitError>,
799     {
800         let message = self.wait_for_msg(timeout)?;
801         let function = message.header.function().map_err(|_| EINVAL)?;
802 
803         // Extract the message. Store the result as we want to advance the read pointer even in
804         // case of failure.
805         let result = if function == M::FUNCTION {
806             let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?;
807             let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]);
808 
809             M::read(cmd, &mut sbuffer)
810                 .map_err(|e| e.into())
811                 .inspect(|_| {
812                     if !sbuffer.is_empty() {
813                         dev_warn!(
814                             &self.dev,
815                             "GSP message {:?} has unprocessed data\n",
816                             function
817                         );
818                     }
819                 })
820         } else {
821             Err(ERANGE)
822         };
823 
824         // Advance the read pointer past this message.
825         self.gsp_mem.advance_cpu_read_ptr(u32::try_from(
826             message.header.length().div_ceil(GSP_PAGE_SIZE),
827         )?);
828 
829         result
830     }
831 }
832