xref: /linux/drivers/gpu/nova-core/gsp/cmdq.rs (revision f9f0b4a1f35d39a1a2a2f8ec46eb7b81efc70a63)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 mod continuation;
4 
5 use core::mem;
6 
7 use kernel::{
8     device,
9     dma::{
10         CoherentAllocation,
11         DmaAddress, //
12     },
13     dma_write,
14     io::poll::read_poll_timeout,
15     new_mutex,
16     prelude::*,
17     sync::{
18         aref::ARef,
19         Mutex, //
20     },
21     time::Delta,
22     transmute::{
23         AsBytes,
24         FromBytes, //
25     },
26 };
27 
28 use continuation::{
29     ContinuationRecord,
30     SplitState, //
31 };
32 
33 use crate::{
34     driver::Bar0,
35     gsp::{
36         fw::{
37             GspMsgElement,
38             MsgFunction,
39             MsgqRxHeader,
40             MsgqTxHeader,
41             GSP_MSG_QUEUE_ELEMENT_SIZE_MAX, //
42         },
43         PteArray,
44         GSP_PAGE_SHIFT,
45         GSP_PAGE_SIZE, //
46     },
47     num,
48     regs,
49     sbuffer::SBufferIter, //
50 };
51 
52 /// Marker type representing the absence of a reply for a command. Commands using this as their
53 /// reply type are sent using [`Cmdq::send_command_no_wait`].
54 pub(crate) struct NoReply;
55 
56 /// Trait implemented by types representing a command to send to the GSP.
57 ///
58 /// The main purpose of this trait is to provide [`Cmdq`] with the information it needs to send
59 /// a given command.
60 ///
61 /// [`CommandToGsp::init`] in particular is responsible for initializing the command directly
62 /// into the space reserved for it in the command queue buffer.
63 ///
64 /// Some commands may be followed by a variable-length payload. For these, the
65 /// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be
66 /// defined as well.
67 pub(crate) trait CommandToGsp {
68     /// Function identifying this command to the GSP.
69     const FUNCTION: MsgFunction;
70 
71     /// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer.
72     type Command: FromBytes + AsBytes;
73 
74     /// Type of the reply expected from the GSP, or [`NoReply`] for commands that don't
75     /// have a reply.
76     type Reply;
77 
78     /// Error type returned by [`CommandToGsp::init`].
79     type InitError;
80 
81     /// In-place command initializer responsible for filling the command in the command queue
82     /// buffer.
83     fn init(&self) -> impl Init<Self::Command, Self::InitError>;
84 
85     /// Size of the variable-length payload following the command structure generated by
86     /// [`CommandToGsp::init`].
87     ///
88     /// Most commands don't have a variable-length payload, so this is zero by default.
89     fn variable_payload_len(&self) -> usize {
90         0
91     }
92 
93     /// Method initializing the variable-length payload.
94     ///
95     /// The command buffer is circular, which means that we may need to jump back to its beginning
96     /// while in the middle of a command. For this reason, the variable-length payload is
97     /// initialized using a [`SBufferIter`].
98     ///
99     /// This method will receive a buffer of the length returned by
100     /// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving
101     /// unwritten space will lead to an error.
102     ///
103     /// Most commands don't have a variable-length payload, so this does nothing by default.
104     fn init_variable_payload(
105         &self,
106         _dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>,
107     ) -> Result {
108         Ok(())
109     }
110 
111     /// Total size of the command (including its variable-length payload) without the
112     /// [`GspMsgElement`] header.
113     fn size(&self) -> usize {
114         size_of::<Self::Command>() + self.variable_payload_len()
115     }
116 }
117 
118 /// Trait representing messages received from the GSP.
119 ///
120 /// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message.
121 pub(crate) trait MessageFromGsp: Sized {
122     /// Function identifying this message from the GSP.
123     const FUNCTION: MsgFunction;
124 
125     /// Error type returned by [`MessageFromGsp::read`].
126     type InitError;
127 
128     /// Type containing the raw message to be read from the message queue.
129     type Message: FromBytes;
130 
131     /// Method reading the message from the message queue and returning it.
132     ///
133     /// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns
134     /// it.
135     fn read(
136         msg: &Self::Message,
137         sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>,
138     ) -> Result<Self, Self::InitError>;
139 }
140 
141 /// Number of GSP pages making the [`Msgq`].
142 pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f;
143 
144 /// Circular buffer of a [`Msgq`].
145 ///
146 /// This area of memory is to be shared between the driver and the GSP to exchange commands or
147 /// messages.
148 #[repr(C, align(0x1000))]
149 #[derive(Debug)]
150 struct MsgqData {
151     data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)],
152 }
153 
154 // Annoyingly we are forced to use a literal to specify the alignment of
155 // `MsgqData`, so check that it corresponds to the actual GSP page size here.
156 static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE);
157 
158 /// Unidirectional message queue.
159 ///
160 /// Contains the data for a message queue, that either the driver or GSP writes to.
161 ///
162 /// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the
163 /// read pointer of `rx` actually refers to the `Msgq` owned by the other side.
164 /// This design ensures that only the driver or GSP ever writes to a given instance of this struct.
165 #[repr(C)]
166 // There is no struct defined for this in the open-gpu-kernel-source headers.
167 // Instead it is defined by code in `GspMsgQueuesInit()`.
168 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
169 pub(super) struct Msgq {
170     /// Header for sending messages, including the write pointer.
171     pub(super) tx: MsgqTxHeader,
172     /// Header for receiving messages, including the read pointer.
173     pub(super) rx: MsgqRxHeader,
174     /// The message queue proper.
175     msgq: MsgqData,
176 }
177 
178 /// Structure shared between the driver and the GSP and containing the command and message queues.
179 #[repr(C)]
180 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
181 pub(super) struct GspMem {
182     /// Self-mapping page table entries.
183     ptes: PteArray<{ Self::PTE_ARRAY_SIZE }>,
184     /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the
185     /// write and read pointers that the CPU updates. This means that the read pointer here is an
186     /// index into the GSP queue.
187     ///
188     /// This member is read-only for the GSP.
189     pub(super) cpuq: Msgq,
190     /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the
191     /// write and read pointers that the GSP updates. This means that the read pointer here is an
192     /// index into the CPU queue.
193     ///
194     /// This member is read-only for the driver.
195     pub(super) gspq: Msgq,
196 }
197 
198 impl GspMem {
199     const PTE_ARRAY_SIZE: usize = GSP_PAGE_SIZE / size_of::<u64>();
200 }
201 
202 // SAFETY: These structs don't meet the no-padding requirements of AsBytes but
203 // that is not a problem because they are not used outside the kernel.
204 unsafe impl AsBytes for GspMem {}
205 
206 // SAFETY: These structs don't meet the no-padding requirements of FromBytes but
207 // that is not a problem because they are not used outside the kernel.
208 unsafe impl FromBytes for GspMem {}
209 
210 /// Wrapper around [`GspMem`] to share it with the GPU using a [`CoherentAllocation`].
211 ///
212 /// This provides the low-level functionality to communicate with the GSP, including allocation of
213 /// queue space to write messages to and management of read/write pointers.
214 ///
215 /// This is shared with the GSP, with clear ownership rules regarding the command queues:
216 ///
217 /// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write
218 ///   pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`].
219 /// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read
220 ///   pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`].
221 struct DmaGspMem(CoherentAllocation<GspMem>);
222 
223 impl DmaGspMem {
224     /// Allocate a new instance and map it for `dev`.
225     fn new(dev: &device::Device<device::Bound>) -> Result<Self> {
226         const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>();
227         const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>();
228 
229         let gsp_mem =
230             CoherentAllocation::<GspMem>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?;
231 
232         let start = gsp_mem.dma_handle();
233         // Write values one by one to avoid an on-stack instance of `PteArray`.
234         for i in 0..GspMem::PTE_ARRAY_SIZE {
235             dma_write!(gsp_mem, [0]?.ptes.0[i], PteArray::<0>::entry(start, i)?);
236         }
237 
238         dma_write!(
239             gsp_mem,
240             [0]?.cpuq.tx,
241             MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES)
242         );
243         dma_write!(gsp_mem, [0]?.cpuq.rx, MsgqRxHeader::new());
244 
245         Ok(Self(gsp_mem))
246     }
247 
248     /// Returns the region of the CPU message queue that the driver is currently allowed to write
249     /// to.
250     ///
251     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
252     /// that case the second slice will have a non-zero length.
253     fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) {
254         let tx = self.cpu_write_ptr() as usize;
255         let rx = self.gsp_read_ptr() as usize;
256 
257         // SAFETY:
258         // - The `CoherentAllocation` contains exactly one object.
259         // - We will only access the driver-owned part of the shared memory.
260         // - Per the safety statement of the function, no concurrent access will be performed.
261         let gsp_mem = &mut unsafe { self.0.as_slice_mut(0, 1) }.unwrap()[0];
262         // PANIC: per the invariant of `cpu_write_ptr`, `tx` is `< MSGQ_NUM_PAGES`.
263         let (before_tx, after_tx) = gsp_mem.cpuq.msgq.data.split_at_mut(tx);
264 
265         // The area starting at `tx` and ending at `rx - 2` modulo MSGQ_NUM_PAGES, inclusive,
266         // belongs to the driver for writing.
267 
268         if rx == 0 {
269             // Since `rx` is zero, leave an empty slot at end of the buffer.
270             let last = after_tx.len() - 1;
271             (&mut after_tx[..last], &mut [])
272         } else if rx <= tx {
273             // The area is discontiguous and we leave an empty slot before `rx`.
274             // PANIC:
275             // - The index `rx - 1` is non-negative because `rx != 0` in this branch.
276             // - The index does not exceed `before_tx.len()` (which equals `tx`) because
277             //   `rx <= tx` in this branch.
278             (after_tx, &mut before_tx[..(rx - 1)])
279         } else {
280             // The area is contiguous and we leave an empty slot before `rx`.
281             // PANIC:
282             // - The index `rx - tx - 1` is non-negative because `rx > tx` in this branch.
283             // - The index does not exceed `after_tx.len()` (which is `MSGQ_NUM_PAGES - tx`)
284             //   because `rx < MSGQ_NUM_PAGES` by the `gsp_read_ptr` invariant.
285             (&mut after_tx[..(rx - tx - 1)], &mut [])
286         }
287     }
288 
289     /// Returns the size of the region of the CPU message queue that the driver is currently allowed
290     /// to write to, in bytes.
291     fn driver_write_area_size(&self) -> usize {
292         let tx = self.cpu_write_ptr();
293         let rx = self.gsp_read_ptr();
294 
295         // `rx` and `tx` are both in `0..MSGQ_NUM_PAGES` per the invariants of `gsp_read_ptr` and
296         // `cpu_write_ptr`. The minimum value case is where `rx == 0` and `tx == MSGQ_NUM_PAGES -
297         // 1`, which gives `0 + MSGQ_NUM_PAGES - (MSGQ_NUM_PAGES - 1) - 1 == 0`.
298         let slots = (rx + MSGQ_NUM_PAGES - tx - 1) % MSGQ_NUM_PAGES;
299         num::u32_as_usize(slots) * GSP_PAGE_SIZE
300     }
301 
302     /// Returns the region of the GSP message queue that the driver is currently allowed to read
303     /// from.
304     ///
305     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
306     /// that case the second slice will have a non-zero length.
307     fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) {
308         let tx = self.gsp_write_ptr() as usize;
309         let rx = self.cpu_read_ptr() as usize;
310 
311         // SAFETY:
312         // - The `CoherentAllocation` contains exactly one object.
313         // - We will only access the driver-owned part of the shared memory.
314         // - Per the safety statement of the function, no concurrent access will be performed.
315         let gsp_mem = &unsafe { self.0.as_slice(0, 1) }.unwrap()[0];
316         let data = &gsp_mem.gspq.msgq.data;
317 
318         // The area starting at `rx` and ending at `tx - 1` modulo MSGQ_NUM_PAGES, inclusive,
319         // belongs to the driver for reading.
320         // PANIC:
321         // - per the invariant of `cpu_read_ptr`, `rx < MSGQ_NUM_PAGES`
322         // - per the invariant of `gsp_write_ptr`, `tx < MSGQ_NUM_PAGES`
323         if rx <= tx {
324             // The area is contiguous.
325             (&data[rx..tx], &[])
326         } else {
327             // The area is discontiguous.
328             (&data[rx..], &data[..tx])
329         }
330     }
331 
332     /// Allocates a region on the command queue that is large enough to send a command of `size`
333     /// bytes, waiting for space to become available based on the provided timeout.
334     ///
335     /// This returns a [`GspCommand`] ready to be written to by the caller.
336     ///
337     /// # Errors
338     ///
339     /// - `EMSGSIZE` if the command is larger than [`GSP_MSG_QUEUE_ELEMENT_SIZE_MAX`].
340     /// - `ETIMEDOUT` if space does not become available within the timeout.
341     /// - `EIO` if the command header is not properly aligned.
342     fn allocate_command(&mut self, size: usize, timeout: Delta) -> Result<GspCommand<'_>> {
343         if size_of::<GspMsgElement>() + size > GSP_MSG_QUEUE_ELEMENT_SIZE_MAX {
344             return Err(EMSGSIZE);
345         }
346         read_poll_timeout(
347             || Ok(self.driver_write_area_size()),
348             |available_bytes| *available_bytes >= size_of::<GspMsgElement>() + size,
349             Delta::from_micros(1),
350             timeout,
351         )?;
352 
353         // Get the current writable area as an array of bytes.
354         let (slice_1, slice_2) = {
355             let (slice_1, slice_2) = self.driver_write_area();
356 
357             #[allow(clippy::incompatible_msrv)]
358             (slice_1.as_flattened_mut(), slice_2.as_flattened_mut())
359         };
360 
361         // Extract area for the `GspMsgElement`.
362         let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?;
363 
364         // Create the contents area.
365         let (slice_1, slice_2) = if slice_1.len() > size {
366             // Contents fits entirely in `slice_1`.
367             (&mut slice_1[..size], &mut slice_2[0..0])
368         } else {
369             // Need all of `slice_1` and some of `slice_2`.
370             let slice_2_len = size - slice_1.len();
371             (slice_1, &mut slice_2[..slice_2_len])
372         };
373 
374         Ok(GspCommand {
375             header,
376             contents: (slice_1, slice_2),
377         })
378     }
379 
380     // Returns the index of the memory page the GSP will write the next message to.
381     //
382     // # Invariants
383     //
384     // - The returned value is within `0..MSGQ_NUM_PAGES`.
385     fn gsp_write_ptr(&self) -> u32 {
386         super::fw::gsp_mem::gsp_write_ptr(&self.0)
387     }
388 
389     // Returns the index of the memory page the GSP will read the next command from.
390     //
391     // # Invariants
392     //
393     // - The returned value is within `0..MSGQ_NUM_PAGES`.
394     fn gsp_read_ptr(&self) -> u32 {
395         super::fw::gsp_mem::gsp_read_ptr(&self.0)
396     }
397 
398     // Returns the index of the memory page the CPU can read the next message from.
399     //
400     // # Invariants
401     //
402     // - The returned value is within `0..MSGQ_NUM_PAGES`.
403     fn cpu_read_ptr(&self) -> u32 {
404         super::fw::gsp_mem::cpu_read_ptr(&self.0)
405     }
406 
407     // Informs the GSP that it can send `elem_count` new pages into the message queue.
408     fn advance_cpu_read_ptr(&mut self, elem_count: u32) {
409         super::fw::gsp_mem::advance_cpu_read_ptr(&self.0, elem_count)
410     }
411 
412     // Returns the index of the memory page the CPU can write the next command to.
413     //
414     // # Invariants
415     //
416     // - The returned value is within `0..MSGQ_NUM_PAGES`.
417     fn cpu_write_ptr(&self) -> u32 {
418         super::fw::gsp_mem::cpu_write_ptr(&self.0)
419     }
420 
421     // Informs the GSP that it can process `elem_count` new pages from the command queue.
422     fn advance_cpu_write_ptr(&mut self, elem_count: u32) {
423         super::fw::gsp_mem::advance_cpu_write_ptr(&self.0, elem_count)
424     }
425 }
426 
427 /// A command ready to be sent on the command queue.
428 ///
429 /// This is the type returned by [`DmaGspMem::allocate_command`].
430 struct GspCommand<'a> {
431     // Writable reference to the header of the command.
432     header: &'a mut GspMsgElement,
433     // Writable slices to the contents of the command. The second slice is zero unless the command
434     // loops over the command queue.
435     contents: (&'a mut [u8], &'a mut [u8]),
436 }
437 
438 /// A message ready to be processed from the message queue.
439 ///
440 /// This is the type returned by [`Cmdq::wait_for_msg`].
441 struct GspMessage<'a> {
442     // Reference to the header of the message.
443     header: &'a GspMsgElement,
444     // Slices to the contents of the message. The second slice is zero unless the message loops
445     // over the message queue.
446     contents: (&'a [u8], &'a [u8]),
447 }
448 
449 /// GSP command queue.
450 ///
451 /// Provides the ability to send commands and receive messages from the GSP using a shared memory
452 /// area.
453 #[pin_data]
454 pub(crate) struct Cmdq {
455     /// Inner mutex-protected state.
456     #[pin]
457     inner: Mutex<CmdqInner>,
458 }
459 
460 impl Cmdq {
461     /// Offset of the data after the PTEs.
462     const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq);
463 
464     /// Offset of command queue ring buffer.
465     pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq)
466         + core::mem::offset_of!(Msgq, msgq)
467         - Self::POST_PTE_OFFSET;
468 
469     /// Offset of message queue ring buffer.
470     pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq)
471         + core::mem::offset_of!(Msgq, msgq)
472         - Self::POST_PTE_OFFSET;
473 
474     /// Number of page table entries for the GSP shared region.
475     pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT;
476 
477     /// Default timeout for receiving a message from the GSP.
478     pub(super) const RECEIVE_TIMEOUT: Delta = Delta::from_secs(5);
479 
480     /// Creates a new command queue for `dev`.
481     pub(crate) fn new(dev: &device::Device<device::Bound>) -> impl PinInit<Self, Error> + '_ {
482         try_pin_init!(Self {
483             inner <- new_mutex!(CmdqInner {
484                 dev: dev.into(),
485                 gsp_mem: DmaGspMem::new(dev)?,
486                 seq: 0,
487             }),
488         })
489     }
490 
491     /// Computes the checksum for the message pointed to by `it`.
492     ///
493     /// A message is made of several parts, so `it` is an iterator over byte slices representing
494     /// these parts.
495     fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 {
496         let sum64 = it
497             .enumerate()
498             .map(|(idx, byte)| (((idx % 8) * 8) as u32, byte))
499             .fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol));
500 
501         ((sum64 >> 32) as u32) ^ (sum64 as u32)
502     }
503 
504     /// Notifies the GSP that we have updated the command queue pointers.
505     fn notify_gsp(bar: &Bar0) {
506         regs::NV_PGSP_QUEUE_HEAD::default()
507             .set_address(0)
508             .write(bar);
509     }
510 
511     /// Sends `command` to the GSP and waits for the reply.
512     ///
513     /// Messages with non-matching function codes are silently consumed until the expected reply
514     /// arrives.
515     ///
516     /// The queue is locked for the entire send+receive cycle to ensure that no other command can
517     /// be interleaved.
518     ///
519     /// # Errors
520     ///
521     /// - `ETIMEDOUT` if space does not become available to send the command, or if the reply is
522     ///   not received within the timeout.
523     /// - `EIO` if the variable payload requested by the command has not been entirely
524     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
525     ///
526     /// Error codes returned by the command and reply initializers are propagated as-is.
527     pub(crate) fn send_command<M>(&self, bar: &Bar0, command: M) -> Result<M::Reply>
528     where
529         M: CommandToGsp,
530         M::Reply: MessageFromGsp,
531         Error: From<M::InitError>,
532         Error: From<<M::Reply as MessageFromGsp>::InitError>,
533     {
534         let mut inner = self.inner.lock();
535         inner.send_command(bar, command)?;
536 
537         loop {
538             match inner.receive_msg::<M::Reply>(Self::RECEIVE_TIMEOUT) {
539                 Ok(reply) => break Ok(reply),
540                 Err(ERANGE) => continue,
541                 Err(e) => break Err(e),
542             }
543         }
544     }
545 
546     /// Sends `command` to the GSP without waiting for a reply.
547     ///
548     /// # Errors
549     ///
550     /// - `ETIMEDOUT` if space does not become available within the timeout.
551     /// - `EIO` if the variable payload requested by the command has not been entirely
552     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
553     ///
554     /// Error codes returned by the command initializers are propagated as-is.
555     pub(crate) fn send_command_no_wait<M>(&self, bar: &Bar0, command: M) -> Result
556     where
557         M: CommandToGsp<Reply = NoReply>,
558         Error: From<M::InitError>,
559     {
560         self.inner.lock().send_command(bar, command)
561     }
562 
563     /// Receive a message from the GSP.
564     ///
565     /// See [`CmdqInner::receive_msg`] for details.
566     pub(crate) fn receive_msg<M: MessageFromGsp>(&self, timeout: Delta) -> Result<M>
567     where
568         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
569         Error: From<M::InitError>,
570     {
571         self.inner.lock().receive_msg(timeout)
572     }
573 
574     /// Returns the DMA handle of the command queue's shared memory region.
575     pub(crate) fn dma_handle(&self) -> DmaAddress {
576         self.inner.lock().gsp_mem.0.dma_handle()
577     }
578 }
579 
580 /// Inner mutex protected state of [`Cmdq`].
581 struct CmdqInner {
582     /// Device this command queue belongs to.
583     dev: ARef<device::Device>,
584     /// Current command sequence number.
585     seq: u32,
586     /// Memory area shared with the GSP for communicating commands and messages.
587     gsp_mem: DmaGspMem,
588 }
589 
590 impl CmdqInner {
591     /// Timeout for waiting for space on the command queue.
592     const ALLOCATE_TIMEOUT: Delta = Delta::from_secs(1);
593 
594     /// Sends `command` to the GSP, without splitting it.
595     ///
596     /// # Errors
597     ///
598     /// - `EMSGSIZE` if the command exceeds the maximum queue element size.
599     /// - `ETIMEDOUT` if space does not become available within the timeout.
600     /// - `EIO` if the variable payload requested by the command has not been entirely
601     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
602     ///
603     /// Error codes returned by the command initializers are propagated as-is.
604     fn send_single_command<M>(&mut self, bar: &Bar0, command: M) -> Result
605     where
606         M: CommandToGsp,
607         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
608         Error: From<M::InitError>,
609     {
610         let size_in_bytes = command.size();
611         let dst = self
612             .gsp_mem
613             .allocate_command(size_in_bytes, Self::ALLOCATE_TIMEOUT)?;
614 
615         // Extract area for the command itself. The GSP message header and the command header
616         // together are guaranteed to fit entirely into a single page, so it's ok to only look
617         // at `dst.contents.0` here.
618         let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?;
619 
620         // Fill the header and command in-place.
621         let msg_element = GspMsgElement::init(self.seq, size_in_bytes, M::FUNCTION);
622         // SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer
623         // fails.
624         unsafe {
625             msg_element.__init(core::ptr::from_mut(dst.header))?;
626             command.init().__init(core::ptr::from_mut(cmd))?;
627         }
628 
629         // Fill the variable-length payload, which may be empty.
630         let mut sbuffer = SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]);
631         command.init_variable_payload(&mut sbuffer)?;
632 
633         if !sbuffer.is_empty() {
634             return Err(EIO);
635         }
636         drop(sbuffer);
637 
638         // Compute checksum now that the whole message is ready.
639         dst.header
640             .set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([
641                 dst.header.as_bytes(),
642                 dst.contents.0,
643                 dst.contents.1,
644             ])));
645 
646         dev_dbg!(
647             &self.dev,
648             "GSP RPC: send: seq# {}, function={:?}, length=0x{:x}\n",
649             self.seq,
650             M::FUNCTION,
651             dst.header.length(),
652         );
653 
654         // All set - update the write pointer and inform the GSP of the new command.
655         let elem_count = dst.header.element_count();
656         self.seq += 1;
657         self.gsp_mem.advance_cpu_write_ptr(elem_count);
658         Cmdq::notify_gsp(bar);
659 
660         Ok(())
661     }
662 
663     /// Sends `command` to the GSP.
664     ///
665     /// The command may be split into multiple messages if it is large.
666     ///
667     /// # Errors
668     ///
669     /// - `ETIMEDOUT` if space does not become available within the timeout.
670     /// - `EIO` if the variable payload requested by the command has not been entirely
671     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
672     ///
673     /// Error codes returned by the command initializers are propagated as-is.
674     fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result
675     where
676         M: CommandToGsp,
677         Error: From<M::InitError>,
678     {
679         match SplitState::new(command)? {
680             SplitState::Single(command) => self.send_single_command(bar, command),
681             SplitState::Split(command, mut continuations) => {
682                 self.send_single_command(bar, command)?;
683 
684                 while let Some(continuation) = continuations.next() {
685                     // Turbofish needed because the compiler cannot infer M here.
686                     self.send_single_command::<ContinuationRecord<'_>>(bar, continuation)?;
687                 }
688 
689                 Ok(())
690             }
691         }
692     }
693 
694     /// Wait for a message to become available on the message queue.
695     ///
696     /// This works purely at the transport layer and does not interpret or validate the message
697     /// beyond the advertised length in its [`GspMsgElement`].
698     ///
699     /// This method returns:
700     ///
701     /// - A reference to the [`GspMsgElement`] of the message,
702     /// - Two byte slices with the contents of the message. The second slice is empty unless the
703     ///   message loops across the message queue.
704     ///
705     /// # Errors
706     ///
707     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
708     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
709     ///   message queue.
710     ///
711     /// Error codes returned by the message constructor are propagated as-is.
712     fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> {
713         // Wait for a message to arrive from the GSP.
714         let (slice_1, slice_2) = read_poll_timeout(
715             || Ok(self.gsp_mem.driver_read_area()),
716             |driver_area| !driver_area.0.is_empty(),
717             Delta::from_millis(1),
718             timeout,
719         )
720         .map(|(slice_1, slice_2)| {
721             #[allow(clippy::incompatible_msrv)]
722             (slice_1.as_flattened(), slice_2.as_flattened())
723         })?;
724 
725         // Extract the `GspMsgElement`.
726         let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?;
727 
728         dev_dbg!(
729             &self.dev,
730             "GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n",
731             header.sequence(),
732             header.function(),
733             header.length(),
734         );
735 
736         let payload_length = header.payload_length();
737 
738         // Check that the driver read area is large enough for the message.
739         if slice_1.len() + slice_2.len() < payload_length {
740             return Err(EIO);
741         }
742 
743         // Cut the message slices down to the actual length of the message.
744         let (slice_1, slice_2) = if slice_1.len() > payload_length {
745             // PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
746             (slice_1.split_at(payload_length).0, &slice_2[0..0])
747         } else {
748             (
749                 slice_1,
750                 // PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
751                 // large as `payload_length`.
752                 slice_2.split_at(payload_length - slice_1.len()).0,
753             )
754         };
755 
756         // Validate checksum.
757         if Cmdq::calculate_checksum(SBufferIter::new_reader([
758             header.as_bytes(),
759             slice_1,
760             slice_2,
761         ])) != 0
762         {
763             dev_err!(
764                 &self.dev,
765                 "GSP RPC: receive: Call {} - bad checksum\n",
766                 header.sequence()
767             );
768             return Err(EIO);
769         }
770 
771         Ok(GspMessage {
772             header,
773             contents: (slice_1, slice_2),
774         })
775     }
776 
777     /// Receive a message from the GSP.
778     ///
779     /// The expected message type is specified using the `M` generic parameter. If the pending
780     /// message has a different function code, `ERANGE` is returned and the message is consumed.
781     ///
782     /// The read pointer is always advanced past the message, regardless of whether it matched.
783     ///
784     /// # Errors
785     ///
786     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
787     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
788     ///   message queue.
789     /// - `EINVAL` if the function code of the message was not recognized.
790     /// - `ERANGE` if the message had a recognized but non-matching function code.
791     ///
792     /// Error codes returned by [`MessageFromGsp::read`] are propagated as-is.
793     fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M>
794     where
795         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
796         Error: From<M::InitError>,
797     {
798         let message = self.wait_for_msg(timeout)?;
799         let function = message.header.function().map_err(|_| EINVAL)?;
800 
801         // Extract the message. Store the result as we want to advance the read pointer even in
802         // case of failure.
803         let result = if function == M::FUNCTION {
804             let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?;
805             let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]);
806 
807             M::read(cmd, &mut sbuffer)
808                 .map_err(|e| e.into())
809                 .inspect(|_| {
810                     if !sbuffer.is_empty() {
811                         dev_warn!(
812                             &self.dev,
813                             "GSP message {:?} has unprocessed data\n",
814                             function
815                         );
816                     }
817                 })
818         } else {
819             Err(ERANGE)
820         };
821 
822         // Advance the read pointer past this message.
823         self.gsp_mem.advance_cpu_read_ptr(u32::try_from(
824             message.header.length().div_ceil(GSP_PAGE_SIZE),
825         )?);
826 
827         result
828     }
829 }
830