xref: /linux/drivers/gpu/nova-core/gsp/cmdq.rs (revision 1c9982b4961334c1edb0745a04cabd34bc2de675)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 use core::{
4     cmp,
5     mem, //
6 };
7 
8 use kernel::{
9     device,
10     dma::{
11         CoherentAllocation,
12         DmaAddress, //
13     },
14     dma_write,
15     io::poll::read_poll_timeout,
16     prelude::*,
17     sync::aref::ARef,
18     time::Delta,
19     transmute::{
20         AsBytes,
21         FromBytes, //
22     },
23 };
24 
25 use crate::{
26     driver::Bar0,
27     gsp::{
28         fw::{
29             GspMsgElement,
30             MsgFunction,
31             MsgqRxHeader,
32             MsgqTxHeader, //
33         },
34         PteArray,
35         GSP_PAGE_SHIFT,
36         GSP_PAGE_SIZE, //
37     },
38     num,
39     regs,
40     sbuffer::SBufferIter, //
41 };
42 
43 /// Trait implemented by types representing a command to send to the GSP.
44 ///
45 /// The main purpose of this trait is to provide [`Cmdq::send_command`] with the information it
46 /// needs to send a given command.
47 ///
48 /// [`CommandToGsp::init`] in particular is responsible for initializing the command directly
49 /// into the space reserved for it in the command queue buffer.
50 ///
51 /// Some commands may be followed by a variable-length payload. For these, the
52 /// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be
53 /// defined as well.
54 pub(crate) trait CommandToGsp {
55     /// Function identifying this command to the GSP.
56     const FUNCTION: MsgFunction;
57 
58     /// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer.
59     type Command: FromBytes + AsBytes;
60 
61     /// Error type returned by [`CommandToGsp::init`].
62     type InitError;
63 
64     /// In-place command initializer responsible for filling the command in the command queue
65     /// buffer.
init(&self) -> impl Init<Self::Command, Self::InitError>66     fn init(&self) -> impl Init<Self::Command, Self::InitError>;
67 
68     /// Size of the variable-length payload following the command structure generated by
69     /// [`CommandToGsp::init`].
70     ///
71     /// Most commands don't have a variable-length payload, so this is zero by default.
variable_payload_len(&self) -> usize72     fn variable_payload_len(&self) -> usize {
73         0
74     }
75 
76     /// Method initializing the variable-length payload.
77     ///
78     /// The command buffer is circular, which means that we may need to jump back to its beginning
79     /// while in the middle of a command. For this reason, the variable-length payload is
80     /// initialized using a [`SBufferIter`].
81     ///
82     /// This method will receive a buffer of the length returned by
83     /// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving
84     /// unwritten space will lead to an error.
85     ///
86     /// Most commands don't have a variable-length payload, so this does nothing by default.
init_variable_payload( &self, _dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>, ) -> Result87     fn init_variable_payload(
88         &self,
89         _dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>,
90     ) -> Result {
91         Ok(())
92     }
93 }
94 
95 /// Trait representing messages received from the GSP.
96 ///
97 /// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message.
98 pub(crate) trait MessageFromGsp: Sized {
99     /// Function identifying this message from the GSP.
100     const FUNCTION: MsgFunction;
101 
102     /// Error type returned by [`MessageFromGsp::read`].
103     type InitError;
104 
105     /// Type containing the raw message to be read from the message queue.
106     type Message: FromBytes;
107 
108     /// Method reading the message from the message queue and returning it.
109     ///
110     /// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns
111     /// it.
read( msg: &Self::Message, sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>, ) -> Result<Self, Self::InitError>112     fn read(
113         msg: &Self::Message,
114         sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>,
115     ) -> Result<Self, Self::InitError>;
116 }
117 
118 /// Number of GSP pages making the [`Msgq`].
119 pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f;
120 
121 /// Circular buffer of a [`Msgq`].
122 ///
123 /// This area of memory is to be shared between the driver and the GSP to exchange commands or
124 /// messages.
125 #[repr(C, align(0x1000))]
126 #[derive(Debug)]
127 struct MsgqData {
128     data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)],
129 }
130 
131 // Annoyingly we are forced to use a literal to specify the alignment of
132 // `MsgqData`, so check that it corresponds to the actual GSP page size here.
133 static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE);
134 
135 /// Unidirectional message queue.
136 ///
137 /// Contains the data for a message queue, that either the driver or GSP writes to.
138 ///
139 /// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the
140 /// read pointer of `rx` actually refers to the `Msgq` owned by the other side.
141 /// This design ensures that only the driver or GSP ever writes to a given instance of this struct.
142 #[repr(C)]
143 // There is no struct defined for this in the open-gpu-kernel-source headers.
144 // Instead it is defined by code in `GspMsgQueuesInit()`.
145 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
146 pub(super) struct Msgq {
147     /// Header for sending messages, including the write pointer.
148     pub(super) tx: MsgqTxHeader,
149     /// Header for receiving messages, including the read pointer.
150     pub(super) rx: MsgqRxHeader,
151     /// The message queue proper.
152     msgq: MsgqData,
153 }
154 
155 /// Structure shared between the driver and the GSP and containing the command and message queues.
156 #[repr(C)]
157 // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
158 pub(super) struct GspMem {
159     /// Self-mapping page table entries.
160     ptes: PteArray<{ Self::PTE_ARRAY_SIZE }>,
161     /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the
162     /// write and read pointers that the CPU updates.
163     ///
164     /// This member is read-only for the GSP.
165     pub(super) cpuq: Msgq,
166     /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the
167     /// write and read pointers that the GSP updates.
168     ///
169     /// This member is read-only for the driver.
170     pub(super) gspq: Msgq,
171 }
172 
173 impl GspMem {
174     const PTE_ARRAY_SIZE: usize = GSP_PAGE_SIZE / size_of::<u64>();
175 }
176 
177 // SAFETY: These structs don't meet the no-padding requirements of AsBytes but
178 // that is not a problem because they are not used outside the kernel.
179 unsafe impl AsBytes for GspMem {}
180 
181 // SAFETY: These structs don't meet the no-padding requirements of FromBytes but
182 // that is not a problem because they are not used outside the kernel.
183 unsafe impl FromBytes for GspMem {}
184 
185 /// Wrapper around [`GspMem`] to share it with the GPU using a [`CoherentAllocation`].
186 ///
187 /// This provides the low-level functionality to communicate with the GSP, including allocation of
188 /// queue space to write messages to and management of read/write pointers.
189 ///
190 /// This is shared with the GSP, with clear ownership rules regarding the command queues:
191 ///
192 /// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write
193 ///   pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`].
194 /// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read
195 ///   pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`].
196 struct DmaGspMem(CoherentAllocation<GspMem>);
197 
198 impl DmaGspMem {
199     /// Allocate a new instance and map it for `dev`.
new(dev: &device::Device<device::Bound>) -> Result<Self>200     fn new(dev: &device::Device<device::Bound>) -> Result<Self> {
201         const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>();
202         const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>();
203 
204         let gsp_mem =
205             CoherentAllocation::<GspMem>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?;
206 
207         let start = gsp_mem.dma_handle();
208         // Write values one by one to avoid an on-stack instance of `PteArray`.
209         for i in 0..GspMem::PTE_ARRAY_SIZE {
210             dma_write!(gsp_mem, [0]?.ptes.0[i], PteArray::<0>::entry(start, i)?);
211         }
212 
213         dma_write!(
214             gsp_mem,
215             [0]?.cpuq.tx,
216             MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES)
217         );
218         dma_write!(gsp_mem, [0]?.cpuq.rx, MsgqRxHeader::new());
219 
220         Ok(Self(gsp_mem))
221     }
222 
223     /// Returns the region of the CPU message queue that the driver is currently allowed to write
224     /// to.
225     ///
226     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
227     /// that case the second slice will have a non-zero length.
driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]])228     fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) {
229         let tx = self.cpu_write_ptr() as usize;
230         let rx = self.gsp_read_ptr() as usize;
231 
232         // SAFETY:
233         // - The `CoherentAllocation` contains exactly one object.
234         // - We will only access the driver-owned part of the shared memory.
235         // - Per the safety statement of the function, no concurrent access will be performed.
236         let gsp_mem = &mut unsafe { self.0.as_slice_mut(0, 1) }.unwrap()[0];
237         // PANIC: per the invariant of `cpu_write_ptr`, `tx` is `<= MSGQ_NUM_PAGES`.
238         let (before_tx, after_tx) = gsp_mem.cpuq.msgq.data.split_at_mut(tx);
239 
240         if rx <= tx {
241             // The area from `tx` up to the end of the ring, and from the beginning of the ring up
242             // to `rx`, minus one unit, belongs to the driver.
243             if rx == 0 {
244                 let last = after_tx.len() - 1;
245                 (&mut after_tx[..last], &mut before_tx[0..0])
246             } else {
247                 (after_tx, &mut before_tx[..rx])
248             }
249         } else {
250             // The area from `tx` to `rx`, minus one unit, belongs to the driver.
251             //
252             // PANIC: per the invariants of `cpu_write_ptr` and `gsp_read_ptr`, `rx` and `tx` are
253             // `<= MSGQ_NUM_PAGES`, and the test above ensured that `rx > tx`.
254             (after_tx.split_at_mut(rx - tx).0, &mut before_tx[0..0])
255         }
256     }
257 
258     /// Returns the region of the GSP message queue that the driver is currently allowed to read
259     /// from.
260     ///
261     /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
262     /// that case the second slice will have a non-zero length.
driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]])263     fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) {
264         let tx = self.gsp_write_ptr() as usize;
265         let rx = self.cpu_read_ptr() as usize;
266 
267         // SAFETY:
268         // - The `CoherentAllocation` contains exactly one object.
269         // - We will only access the driver-owned part of the shared memory.
270         // - Per the safety statement of the function, no concurrent access will be performed.
271         let gsp_mem = &unsafe { self.0.as_slice(0, 1) }.unwrap()[0];
272         // PANIC: per the invariant of `cpu_read_ptr`, `xx` is `<= MSGQ_NUM_PAGES`.
273         let (before_rx, after_rx) = gsp_mem.gspq.msgq.data.split_at(rx);
274 
275         match tx.cmp(&rx) {
276             cmp::Ordering::Equal => (&after_rx[0..0], &after_rx[0..0]),
277             cmp::Ordering::Greater => (&after_rx[..tx], &before_rx[0..0]),
278             cmp::Ordering::Less => (after_rx, &before_rx[..tx]),
279         }
280     }
281 
282     /// Allocates a region on the command queue that is large enough to send a command of `size`
283     /// bytes.
284     ///
285     /// This returns a [`GspCommand`] ready to be written to by the caller.
286     ///
287     /// # Errors
288     ///
289     /// - `EAGAIN` if the driver area is too small to hold the requested command.
290     /// - `EIO` if the command header is not properly aligned.
allocate_command(&mut self, size: usize) -> Result<GspCommand<'_>>291     fn allocate_command(&mut self, size: usize) -> Result<GspCommand<'_>> {
292         // Get the current writable area as an array of bytes.
293         let (slice_1, slice_2) = {
294             let (slice_1, slice_2) = self.driver_write_area();
295 
296             #[allow(clippy::incompatible_msrv)]
297             (slice_1.as_flattened_mut(), slice_2.as_flattened_mut())
298         };
299 
300         // If the GSP is still processing previous messages the shared region
301         // may be full in which case we will have to retry once the GSP has
302         // processed the existing commands.
303         if size_of::<GspMsgElement>() + size > slice_1.len() + slice_2.len() {
304             return Err(EAGAIN);
305         }
306 
307         // Extract area for the `GspMsgElement`.
308         let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?;
309 
310         // Create the contents area.
311         let (slice_1, slice_2) = if slice_1.len() > size {
312             // Contents fits entirely in `slice_1`.
313             (&mut slice_1[..size], &mut slice_2[0..0])
314         } else {
315             // Need all of `slice_1` and some of `slice_2`.
316             let slice_2_len = size - slice_1.len();
317             (slice_1, &mut slice_2[..slice_2_len])
318         };
319 
320         Ok(GspCommand {
321             header,
322             contents: (slice_1, slice_2),
323         })
324     }
325 
326     // Returns the index of the memory page the GSP will write the next message to.
327     //
328     // # Invariants
329     //
330     // - The returned value is between `0` and `MSGQ_NUM_PAGES`.
gsp_write_ptr(&self) -> u32331     fn gsp_write_ptr(&self) -> u32 {
332         super::fw::gsp_mem::gsp_write_ptr(&self.0)
333     }
334 
335     // Returns the index of the memory page the GSP will read the next command from.
336     //
337     // # Invariants
338     //
339     // - The returned value is between `0` and `MSGQ_NUM_PAGES`.
gsp_read_ptr(&self) -> u32340     fn gsp_read_ptr(&self) -> u32 {
341         super::fw::gsp_mem::gsp_read_ptr(&self.0)
342     }
343 
344     // Returns the index of the memory page the CPU can read the next message from.
345     //
346     // # Invariants
347     //
348     // - The returned value is between `0` and `MSGQ_NUM_PAGES`.
cpu_read_ptr(&self) -> u32349     fn cpu_read_ptr(&self) -> u32 {
350         super::fw::gsp_mem::cpu_read_ptr(&self.0)
351     }
352 
353     // Informs the GSP that it can send `elem_count` new pages into the message queue.
advance_cpu_read_ptr(&mut self, elem_count: u32)354     fn advance_cpu_read_ptr(&mut self, elem_count: u32) {
355         super::fw::gsp_mem::advance_cpu_read_ptr(&self.0, elem_count)
356     }
357 
358     // Returns the index of the memory page the CPU can write the next command to.
359     //
360     // # Invariants
361     //
362     // - The returned value is between `0` and `MSGQ_NUM_PAGES`.
cpu_write_ptr(&self) -> u32363     fn cpu_write_ptr(&self) -> u32 {
364         super::fw::gsp_mem::cpu_write_ptr(&self.0)
365     }
366 
367     // Informs the GSP that it can process `elem_count` new pages from the command queue.
advance_cpu_write_ptr(&mut self, elem_count: u32)368     fn advance_cpu_write_ptr(&mut self, elem_count: u32) {
369         super::fw::gsp_mem::advance_cpu_write_ptr(&self.0, elem_count)
370     }
371 }
372 
373 /// A command ready to be sent on the command queue.
374 ///
375 /// This is the type returned by [`DmaGspMem::allocate_command`].
376 struct GspCommand<'a> {
377     // Writable reference to the header of the command.
378     header: &'a mut GspMsgElement,
379     // Writable slices to the contents of the command. The second slice is zero unless the command
380     // loops over the command queue.
381     contents: (&'a mut [u8], &'a mut [u8]),
382 }
383 
384 /// A message ready to be processed from the message queue.
385 ///
386 /// This is the type returned by [`Cmdq::wait_for_msg`].
387 struct GspMessage<'a> {
388     // Reference to the header of the message.
389     header: &'a GspMsgElement,
390     // Slices to the contents of the message. The second slice is zero unless the message loops
391     // over the message queue.
392     contents: (&'a [u8], &'a [u8]),
393 }
394 
395 /// GSP command queue.
396 ///
397 /// Provides the ability to send commands and receive messages from the GSP using a shared memory
398 /// area.
399 pub(crate) struct Cmdq {
400     /// Device this command queue belongs to.
401     dev: ARef<device::Device>,
402     /// Current command sequence number.
403     seq: u32,
404     /// Memory area shared with the GSP for communicating commands and messages.
405     gsp_mem: DmaGspMem,
406 }
407 
408 impl Cmdq {
409     /// Offset of the data after the PTEs.
410     const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq);
411 
412     /// Offset of command queue ring buffer.
413     pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq)
414         + core::mem::offset_of!(Msgq, msgq)
415         - Self::POST_PTE_OFFSET;
416 
417     /// Offset of message queue ring buffer.
418     pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq)
419         + core::mem::offset_of!(Msgq, msgq)
420         - Self::POST_PTE_OFFSET;
421 
422     /// Number of page table entries for the GSP shared region.
423     pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT;
424 
425     /// Creates a new command queue for `dev`.
new(dev: &device::Device<device::Bound>) -> Result<Cmdq>426     pub(crate) fn new(dev: &device::Device<device::Bound>) -> Result<Cmdq> {
427         let gsp_mem = DmaGspMem::new(dev)?;
428 
429         Ok(Cmdq {
430             dev: dev.into(),
431             seq: 0,
432             gsp_mem,
433         })
434     }
435 
436     /// Computes the checksum for the message pointed to by `it`.
437     ///
438     /// A message is made of several parts, so `it` is an iterator over byte slices representing
439     /// these parts.
calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32440     fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 {
441         let sum64 = it
442             .enumerate()
443             .map(|(idx, byte)| (((idx % 8) * 8) as u32, byte))
444             .fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol));
445 
446         ((sum64 >> 32) as u32) ^ (sum64 as u32)
447     }
448 
449     /// Notifies the GSP that we have updated the command queue pointers.
notify_gsp(bar: &Bar0)450     fn notify_gsp(bar: &Bar0) {
451         regs::NV_PGSP_QUEUE_HEAD::default()
452             .set_address(0)
453             .write(bar);
454     }
455 
456     /// Sends `command` to the GSP.
457     ///
458     /// # Errors
459     ///
460     /// - `EAGAIN` if there was not enough space in the command queue to send the command.
461     /// - `EIO` if the variable payload requested by the command has not been entirely
462     ///   written to by its [`CommandToGsp::init_variable_payload`] method.
463     ///
464     /// Error codes returned by the command initializers are propagated as-is.
send_command<M>(&mut self, bar: &Bar0, command: M) -> Result where M: CommandToGsp, Error: From<M::InitError>,465     pub(crate) fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result
466     where
467         M: CommandToGsp,
468         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
469         Error: From<M::InitError>,
470     {
471         let command_size = size_of::<M::Command>() + command.variable_payload_len();
472         let dst = self.gsp_mem.allocate_command(command_size)?;
473 
474         // Extract area for the command itself.
475         let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?;
476 
477         // Fill the header and command in-place.
478         let msg_element = GspMsgElement::init(self.seq, command_size, M::FUNCTION);
479         // SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer
480         // fails.
481         unsafe {
482             msg_element.__init(core::ptr::from_mut(dst.header))?;
483             command.init().__init(core::ptr::from_mut(cmd))?;
484         }
485 
486         // Fill the variable-length payload.
487         if command_size > size_of::<M::Command>() {
488             let mut sbuffer =
489                 SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]);
490             command.init_variable_payload(&mut sbuffer)?;
491 
492             if !sbuffer.is_empty() {
493                 return Err(EIO);
494             }
495         }
496 
497         // Compute checksum now that the whole message is ready.
498         dst.header
499             .set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([
500                 dst.header.as_bytes(),
501                 dst.contents.0,
502                 dst.contents.1,
503             ])));
504 
505         dev_dbg!(
506             &self.dev,
507             "GSP RPC: send: seq# {}, function={}, length=0x{:x}\n",
508             self.seq,
509             M::FUNCTION,
510             dst.header.length(),
511         );
512 
513         // All set - update the write pointer and inform the GSP of the new command.
514         let elem_count = dst.header.element_count();
515         self.seq += 1;
516         self.gsp_mem.advance_cpu_write_ptr(elem_count);
517         Cmdq::notify_gsp(bar);
518 
519         Ok(())
520     }
521 
522     /// Wait for a message to become available on the message queue.
523     ///
524     /// This works purely at the transport layer and does not interpret or validate the message
525     /// beyond the advertised length in its [`GspMsgElement`].
526     ///
527     /// This method returns:
528     ///
529     /// - A reference to the [`GspMsgElement`] of the message,
530     /// - Two byte slices with the contents of the message. The second slice is empty unless the
531     ///   message loops across the message queue.
532     ///
533     /// # Errors
534     ///
535     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
536     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
537     ///   message queue.
538     ///
539     /// Error codes returned by the message constructor are propagated as-is.
wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>>540     fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> {
541         // Wait for a message to arrive from the GSP.
542         let (slice_1, slice_2) = read_poll_timeout(
543             || Ok(self.gsp_mem.driver_read_area()),
544             |driver_area| !driver_area.0.is_empty(),
545             Delta::from_millis(1),
546             timeout,
547         )
548         .map(|(slice_1, slice_2)| {
549             #[allow(clippy::incompatible_msrv)]
550             (slice_1.as_flattened(), slice_2.as_flattened())
551         })?;
552 
553         // Extract the `GspMsgElement`.
554         let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?;
555 
556         dev_dbg!(
557             self.dev,
558             "GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n",
559             header.sequence(),
560             header.function(),
561             header.length(),
562         );
563 
564         let payload_length = header.payload_length();
565 
566         // Check that the driver read area is large enough for the message.
567         if slice_1.len() + slice_2.len() < payload_length {
568             return Err(EIO);
569         }
570 
571         // Cut the message slices down to the actual length of the message.
572         let (slice_1, slice_2) = if slice_1.len() > payload_length {
573             // PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
574             (slice_1.split_at(payload_length).0, &slice_2[0..0])
575         } else {
576             (
577                 slice_1,
578                 // PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
579                 // large as `payload_length`.
580                 slice_2.split_at(payload_length - slice_1.len()).0,
581             )
582         };
583 
584         // Validate checksum.
585         if Cmdq::calculate_checksum(SBufferIter::new_reader([
586             header.as_bytes(),
587             slice_1,
588             slice_2,
589         ])) != 0
590         {
591             dev_err!(
592                 self.dev,
593                 "GSP RPC: receive: Call {} - bad checksum\n",
594                 header.sequence()
595             );
596             return Err(EIO);
597         }
598 
599         Ok(GspMessage {
600             header,
601             contents: (slice_1, slice_2),
602         })
603     }
604 
605     /// Receive a message from the GSP.
606     ///
607     /// `init` is a closure tasked with processing the message. It receives a reference to the
608     /// message in the message queue, and a [`SBufferIter`] pointing to its variable-length
609     /// payload, if any.
610     ///
611     /// The expected message is specified using the `M` generic parameter. If the pending message
612     /// is different, `EAGAIN` is returned and the unexpected message is dropped.
613     ///
614     /// This design is by no means final, but it is simple and will let us go through GSP
615     /// initialization.
616     ///
617     /// # Errors
618     ///
619     /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
620     /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
621     ///   message queue.
622     /// - `EINVAL` if the function of the message was unrecognized.
receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M> where Error: From<M::InitError>,623     pub(crate) fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M>
624     where
625         // This allows all error types, including `Infallible`, to be used for `M::InitError`.
626         Error: From<M::InitError>,
627     {
628         let message = self.wait_for_msg(timeout)?;
629         let function = message.header.function().map_err(|_| EINVAL)?;
630 
631         // Extract the message. Store the result as we want to advance the read pointer even in
632         // case of failure.
633         let result = if function == M::FUNCTION {
634             let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?;
635             let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]);
636 
637             M::read(cmd, &mut sbuffer).map_err(|e| e.into())
638         } else {
639             Err(ERANGE)
640         };
641 
642         // Advance the read pointer past this message.
643         self.gsp_mem.advance_cpu_read_ptr(u32::try_from(
644             message.header.length().div_ceil(GSP_PAGE_SIZE),
645         )?);
646 
647         result
648     }
649 
650     /// Returns the DMA handle of the command queue's shared memory region.
dma_handle(&self) -> DmaAddress651     pub(crate) fn dma_handle(&self) -> DmaAddress {
652         self.gsp_mem.0.dma_handle()
653     }
654 }
655