1 // SPDX-License-Identifier: GPL-2.0 2 3 use core::{ 4 cmp, 5 mem, 6 sync::atomic::{ 7 fence, 8 Ordering, // 9 }, // 10 }; 11 12 use kernel::{ 13 device, 14 dma::{ 15 CoherentAllocation, 16 DmaAddress, // 17 }, 18 dma_write, 19 io::poll::read_poll_timeout, 20 prelude::*, 21 sync::aref::ARef, 22 time::Delta, 23 transmute::{ 24 AsBytes, 25 FromBytes, // 26 }, 27 }; 28 29 use crate::{ 30 driver::Bar0, 31 gsp::{ 32 fw::{ 33 GspMsgElement, 34 MsgFunction, 35 MsgqRxHeader, 36 MsgqTxHeader, // 37 }, 38 PteArray, 39 GSP_PAGE_SHIFT, 40 GSP_PAGE_SIZE, // 41 }, 42 num, 43 regs, 44 sbuffer::SBufferIter, // 45 }; 46 47 /// Trait implemented by types representing a command to send to the GSP. 48 /// 49 /// The main purpose of this trait is to provide [`Cmdq::send_command`] with the information it 50 /// needs to send a given command. 51 /// 52 /// [`CommandToGsp::init`] in particular is responsible for initializing the command directly 53 /// into the space reserved for it in the command queue buffer. 54 /// 55 /// Some commands may be followed by a variable-length payload. For these, the 56 /// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be 57 /// defined as well. 58 pub(crate) trait CommandToGsp { 59 /// Function identifying this command to the GSP. 60 const FUNCTION: MsgFunction; 61 62 /// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer. 63 type Command: FromBytes + AsBytes; 64 65 /// Error type returned by [`CommandToGsp::init`]. 66 type InitError; 67 68 /// In-place command initializer responsible for filling the command in the command queue 69 /// buffer. 70 fn init(&self) -> impl Init<Self::Command, Self::InitError>; 71 72 /// Size of the variable-length payload following the command structure generated by 73 /// [`CommandToGsp::init`]. 74 /// 75 /// Most commands don't have a variable-length payload, so this is zero by default. 76 fn variable_payload_len(&self) -> usize { 77 0 78 } 79 80 /// Method initializing the variable-length payload. 81 /// 82 /// The command buffer is circular, which means that we may need to jump back to its beginning 83 /// while in the middle of a command. For this reason, the variable-length payload is 84 /// initialized using a [`SBufferIter`]. 85 /// 86 /// This method will receive a buffer of the length returned by 87 /// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving 88 /// unwritten space will lead to an error. 89 /// 90 /// Most commands don't have a variable-length payload, so this does nothing by default. 91 fn init_variable_payload( 92 &self, 93 _dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>, 94 ) -> Result { 95 Ok(()) 96 } 97 } 98 99 /// Trait representing messages received from the GSP. 100 /// 101 /// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message. 102 pub(crate) trait MessageFromGsp: Sized { 103 /// Function identifying this message from the GSP. 104 const FUNCTION: MsgFunction; 105 106 /// Error type returned by [`MessageFromGsp::read`]. 107 type InitError; 108 109 /// Type containing the raw message to be read from the message queue. 110 type Message: FromBytes; 111 112 /// Method reading the message from the message queue and returning it. 113 /// 114 /// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns 115 /// it. 116 fn read( 117 msg: &Self::Message, 118 sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>, 119 ) -> Result<Self, Self::InitError>; 120 } 121 122 /// Number of GSP pages making the [`Msgq`]. 123 pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f; 124 125 /// Circular buffer of a [`Msgq`]. 126 /// 127 /// This area of memory is to be shared between the driver and the GSP to exchange commands or 128 /// messages. 129 #[repr(C, align(0x1000))] 130 #[derive(Debug)] 131 struct MsgqData { 132 data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)], 133 } 134 135 // Annoyingly we are forced to use a literal to specify the alignment of 136 // `MsgqData`, so check that it corresponds to the actual GSP page size here. 137 static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE); 138 139 /// Unidirectional message queue. 140 /// 141 /// Contains the data for a message queue, that either the driver or GSP writes to. 142 /// 143 /// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the 144 /// read pointer of `rx` actually refers to the `Msgq` owned by the other side. 145 /// This design ensures that only the driver or GSP ever writes to a given instance of this struct. 146 #[repr(C)] 147 // There is no struct defined for this in the open-gpu-kernel-source headers. 148 // Instead it is defined by code in `GspMsgQueuesInit()`. 149 struct Msgq { 150 /// Header for sending messages, including the write pointer. 151 tx: MsgqTxHeader, 152 /// Header for receiving messages, including the read pointer. 153 rx: MsgqRxHeader, 154 /// The message queue proper. 155 msgq: MsgqData, 156 } 157 158 /// Structure shared between the driver and the GSP and containing the command and message queues. 159 #[repr(C)] 160 struct GspMem { 161 /// Self-mapping page table entries. 162 ptes: PteArray<{ GSP_PAGE_SIZE / size_of::<u64>() }>, 163 /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the 164 /// write and read pointers that the CPU updates. 165 /// 166 /// This member is read-only for the GSP. 167 cpuq: Msgq, 168 /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the 169 /// write and read pointers that the GSP updates. 170 /// 171 /// This member is read-only for the driver. 172 gspq: Msgq, 173 } 174 175 // SAFETY: These structs don't meet the no-padding requirements of AsBytes but 176 // that is not a problem because they are not used outside the kernel. 177 unsafe impl AsBytes for GspMem {} 178 179 // SAFETY: These structs don't meet the no-padding requirements of FromBytes but 180 // that is not a problem because they are not used outside the kernel. 181 unsafe impl FromBytes for GspMem {} 182 183 /// Wrapper around [`GspMem`] to share it with the GPU using a [`CoherentAllocation`]. 184 /// 185 /// This provides the low-level functionality to communicate with the GSP, including allocation of 186 /// queue space to write messages to and management of read/write pointers. 187 /// 188 /// This is shared with the GSP, with clear ownership rules regarding the command queues: 189 /// 190 /// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write 191 /// pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`]. 192 /// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read 193 /// pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`]. 194 struct DmaGspMem(CoherentAllocation<GspMem>); 195 196 impl DmaGspMem { 197 /// Allocate a new instance and map it for `dev`. 198 fn new(dev: &device::Device<device::Bound>) -> Result<Self> { 199 const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>(); 200 const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>(); 201 202 let gsp_mem = 203 CoherentAllocation::<GspMem>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?; 204 dma_write!(gsp_mem[0].ptes = PteArray::new(gsp_mem.dma_handle())?)?; 205 dma_write!(gsp_mem[0].cpuq.tx = MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES))?; 206 dma_write!(gsp_mem[0].cpuq.rx = MsgqRxHeader::new())?; 207 208 Ok(Self(gsp_mem)) 209 } 210 211 /// Returns the region of the CPU message queue that the driver is currently allowed to write 212 /// to. 213 /// 214 /// As the message queue is a circular buffer, the region may be discontiguous in memory. In 215 /// that case the second slice will have a non-zero length. 216 fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) { 217 let tx = self.cpu_write_ptr() as usize; 218 let rx = self.gsp_read_ptr() as usize; 219 220 // SAFETY: 221 // - The `CoherentAllocation` contains exactly one object. 222 // - We will only access the driver-owned part of the shared memory. 223 // - Per the safety statement of the function, no concurrent access will be performed. 224 let gsp_mem = &mut unsafe { self.0.as_slice_mut(0, 1) }.unwrap()[0]; 225 // PANIC: per the invariant of `cpu_write_ptr`, `tx` is `<= MSGQ_NUM_PAGES`. 226 let (before_tx, after_tx) = gsp_mem.cpuq.msgq.data.split_at_mut(tx); 227 228 if rx <= tx { 229 // The area from `tx` up to the end of the ring, and from the beginning of the ring up 230 // to `rx`, minus one unit, belongs to the driver. 231 if rx == 0 { 232 let last = after_tx.len() - 1; 233 (&mut after_tx[..last], &mut before_tx[0..0]) 234 } else { 235 (after_tx, &mut before_tx[..rx]) 236 } 237 } else { 238 // The area from `tx` to `rx`, minus one unit, belongs to the driver. 239 // 240 // PANIC: per the invariants of `cpu_write_ptr` and `gsp_read_ptr`, `rx` and `tx` are 241 // `<= MSGQ_NUM_PAGES`, and the test above ensured that `rx > tx`. 242 (after_tx.split_at_mut(rx - tx).0, &mut before_tx[0..0]) 243 } 244 } 245 246 /// Returns the region of the GSP message queue that the driver is currently allowed to read 247 /// from. 248 /// 249 /// As the message queue is a circular buffer, the region may be discontiguous in memory. In 250 /// that case the second slice will have a non-zero length. 251 fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) { 252 let tx = self.gsp_write_ptr() as usize; 253 let rx = self.cpu_read_ptr() as usize; 254 255 // SAFETY: 256 // - The `CoherentAllocation` contains exactly one object. 257 // - We will only access the driver-owned part of the shared memory. 258 // - Per the safety statement of the function, no concurrent access will be performed. 259 let gsp_mem = &unsafe { self.0.as_slice(0, 1) }.unwrap()[0]; 260 // PANIC: per the invariant of `cpu_read_ptr`, `xx` is `<= MSGQ_NUM_PAGES`. 261 let (before_rx, after_rx) = gsp_mem.gspq.msgq.data.split_at(rx); 262 263 match tx.cmp(&rx) { 264 cmp::Ordering::Equal => (&after_rx[0..0], &after_rx[0..0]), 265 cmp::Ordering::Greater => (&after_rx[..tx], &before_rx[0..0]), 266 cmp::Ordering::Less => (after_rx, &before_rx[..tx]), 267 } 268 } 269 270 /// Allocates a region on the command queue that is large enough to send a command of `size` 271 /// bytes. 272 /// 273 /// This returns a [`GspCommand`] ready to be written to by the caller. 274 /// 275 /// # Errors 276 /// 277 /// - `EAGAIN` if the driver area is too small to hold the requested command. 278 /// - `EIO` if the command header is not properly aligned. 279 fn allocate_command(&mut self, size: usize) -> Result<GspCommand<'_>> { 280 // Get the current writable area as an array of bytes. 281 let (slice_1, slice_2) = { 282 let (slice_1, slice_2) = self.driver_write_area(); 283 284 (slice_1.as_flattened_mut(), slice_2.as_flattened_mut()) 285 }; 286 287 // If the GSP is still processing previous messages the shared region 288 // may be full in which case we will have to retry once the GSP has 289 // processed the existing commands. 290 if size_of::<GspMsgElement>() + size > slice_1.len() + slice_2.len() { 291 return Err(EAGAIN); 292 } 293 294 // Extract area for the `GspMsgElement`. 295 let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?; 296 297 // Create the contents area. 298 let (slice_1, slice_2) = if slice_1.len() > size { 299 // Contents fits entirely in `slice_1`. 300 (&mut slice_1[..size], &mut slice_2[0..0]) 301 } else { 302 // Need all of `slice_1` and some of `slice_2`. 303 let slice_2_len = size - slice_1.len(); 304 (slice_1, &mut slice_2[..slice_2_len]) 305 }; 306 307 Ok(GspCommand { 308 header, 309 contents: (slice_1, slice_2), 310 }) 311 } 312 313 // Returns the index of the memory page the GSP will write the next message to. 314 // 315 // # Invariants 316 // 317 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 318 fn gsp_write_ptr(&self) -> u32 { 319 let gsp_mem = self.0.start_ptr(); 320 321 // SAFETY: 322 // - The 'CoherentAllocation' contains at least one object. 323 // - By the invariants of `CoherentAllocation` the pointer is valid. 324 (unsafe { (*gsp_mem).gspq.tx.write_ptr() } % MSGQ_NUM_PAGES) 325 } 326 327 // Returns the index of the memory page the GSP will read the next command from. 328 // 329 // # Invariants 330 // 331 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 332 fn gsp_read_ptr(&self) -> u32 { 333 let gsp_mem = self.0.start_ptr(); 334 335 // SAFETY: 336 // - The 'CoherentAllocation' contains at least one object. 337 // - By the invariants of `CoherentAllocation` the pointer is valid. 338 (unsafe { (*gsp_mem).gspq.rx.read_ptr() } % MSGQ_NUM_PAGES) 339 } 340 341 // Returns the index of the memory page the CPU can read the next message from. 342 // 343 // # Invariants 344 // 345 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 346 fn cpu_read_ptr(&self) -> u32 { 347 let gsp_mem = self.0.start_ptr(); 348 349 // SAFETY: 350 // - The ['CoherentAllocation'] contains at least one object. 351 // - By the invariants of CoherentAllocation the pointer is valid. 352 (unsafe { (*gsp_mem).cpuq.rx.read_ptr() } % MSGQ_NUM_PAGES) 353 } 354 355 // Informs the GSP that it can send `elem_count` new pages into the message queue. 356 fn advance_cpu_read_ptr(&mut self, elem_count: u32) { 357 let rptr = self.cpu_read_ptr().wrapping_add(elem_count) % MSGQ_NUM_PAGES; 358 359 // Ensure read pointer is properly ordered. 360 fence(Ordering::SeqCst); 361 362 let gsp_mem = self.0.start_ptr_mut(); 363 364 // SAFETY: 365 // - The 'CoherentAllocation' contains at least one object. 366 // - By the invariants of `CoherentAllocation` the pointer is valid. 367 unsafe { (*gsp_mem).cpuq.rx.set_read_ptr(rptr) }; 368 } 369 370 // Returns the index of the memory page the CPU can write the next command to. 371 // 372 // # Invariants 373 // 374 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 375 fn cpu_write_ptr(&self) -> u32 { 376 let gsp_mem = self.0.start_ptr(); 377 378 // SAFETY: 379 // - The 'CoherentAllocation' contains at least one object. 380 // - By the invariants of `CoherentAllocation` the pointer is valid. 381 (unsafe { (*gsp_mem).cpuq.tx.write_ptr() } % MSGQ_NUM_PAGES) 382 } 383 384 // Informs the GSP that it can process `elem_count` new pages from the command queue. 385 fn advance_cpu_write_ptr(&mut self, elem_count: u32) { 386 let wptr = self.cpu_write_ptr().wrapping_add(elem_count) & MSGQ_NUM_PAGES; 387 let gsp_mem = self.0.start_ptr_mut(); 388 389 // SAFETY: 390 // - The 'CoherentAllocation' contains at least one object. 391 // - By the invariants of `CoherentAllocation` the pointer is valid. 392 unsafe { (*gsp_mem).cpuq.tx.set_write_ptr(wptr) }; 393 394 // Ensure all command data is visible before triggering the GSP read. 395 fence(Ordering::SeqCst); 396 } 397 } 398 399 /// A command ready to be sent on the command queue. 400 /// 401 /// This is the type returned by [`DmaGspMem::allocate_command`]. 402 struct GspCommand<'a> { 403 // Writable reference to the header of the command. 404 header: &'a mut GspMsgElement, 405 // Writable slices to the contents of the command. The second slice is zero unless the command 406 // loops over the command queue. 407 contents: (&'a mut [u8], &'a mut [u8]), 408 } 409 410 /// A message ready to be processed from the message queue. 411 /// 412 /// This is the type returned by [`Cmdq::wait_for_msg`]. 413 struct GspMessage<'a> { 414 // Reference to the header of the message. 415 header: &'a GspMsgElement, 416 // Slices to the contents of the message. The second slice is zero unless the message loops 417 // over the message queue. 418 contents: (&'a [u8], &'a [u8]), 419 } 420 421 /// GSP command queue. 422 /// 423 /// Provides the ability to send commands and receive messages from the GSP using a shared memory 424 /// area. 425 pub(crate) struct Cmdq { 426 /// Device this command queue belongs to. 427 dev: ARef<device::Device>, 428 /// Current command sequence number. 429 seq: u32, 430 /// Memory area shared with the GSP for communicating commands and messages. 431 gsp_mem: DmaGspMem, 432 } 433 434 impl Cmdq { 435 /// Offset of the data after the PTEs. 436 const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq); 437 438 /// Offset of command queue ring buffer. 439 pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq) 440 + core::mem::offset_of!(Msgq, msgq) 441 - Self::POST_PTE_OFFSET; 442 443 /// Offset of message queue ring buffer. 444 pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq) 445 + core::mem::offset_of!(Msgq, msgq) 446 - Self::POST_PTE_OFFSET; 447 448 /// Number of page table entries for the GSP shared region. 449 pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT; 450 451 /// Creates a new command queue for `dev`. 452 pub(crate) fn new(dev: &device::Device<device::Bound>) -> Result<Cmdq> { 453 let gsp_mem = DmaGspMem::new(dev)?; 454 455 Ok(Cmdq { 456 dev: dev.into(), 457 seq: 0, 458 gsp_mem, 459 }) 460 } 461 462 /// Computes the checksum for the message pointed to by `it`. 463 /// 464 /// A message is made of several parts, so `it` is an iterator over byte slices representing 465 /// these parts. 466 fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 { 467 let sum64 = it 468 .enumerate() 469 .map(|(idx, byte)| (((idx % 8) * 8) as u32, byte)) 470 .fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol)); 471 472 ((sum64 >> 32) as u32) ^ (sum64 as u32) 473 } 474 475 /// Notifies the GSP that we have updated the command queue pointers. 476 fn notify_gsp(bar: &Bar0) { 477 regs::NV_PGSP_QUEUE_HEAD::default() 478 .set_address(0) 479 .write(bar); 480 } 481 482 /// Sends `command` to the GSP. 483 /// 484 /// # Errors 485 /// 486 /// - `EAGAIN` if there was not enough space in the command queue to send the command. 487 /// - `EIO` if the variable payload requested by the command has not been entirely 488 /// written to by its [`CommandToGsp::init_variable_payload`] method. 489 /// 490 /// Error codes returned by the command initializers are propagated as-is. 491 pub(crate) fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result 492 where 493 M: CommandToGsp, 494 // This allows all error types, including `Infallible`, to be used for `M::InitError`. 495 Error: From<M::InitError>, 496 { 497 let command_size = size_of::<M::Command>() + command.variable_payload_len(); 498 let dst = self.gsp_mem.allocate_command(command_size)?; 499 500 // Extract area for the command itself. 501 let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?; 502 503 // Fill the header and command in-place. 504 let msg_element = GspMsgElement::init(self.seq, command_size, M::FUNCTION); 505 // SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer 506 // fails. 507 unsafe { 508 msg_element.__init(core::ptr::from_mut(dst.header))?; 509 command.init().__init(core::ptr::from_mut(cmd))?; 510 } 511 512 // Fill the variable-length payload. 513 if command_size > size_of::<M::Command>() { 514 let mut sbuffer = 515 SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]); 516 command.init_variable_payload(&mut sbuffer)?; 517 518 if !sbuffer.is_empty() { 519 return Err(EIO); 520 } 521 } 522 523 // Compute checksum now that the whole message is ready. 524 dst.header 525 .set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([ 526 dst.header.as_bytes(), 527 dst.contents.0, 528 dst.contents.1, 529 ]))); 530 531 dev_dbg!( 532 &self.dev, 533 "GSP RPC: send: seq# {}, function={}, length=0x{:x}\n", 534 self.seq, 535 M::FUNCTION, 536 dst.header.length(), 537 ); 538 539 // All set - update the write pointer and inform the GSP of the new command. 540 let elem_count = dst.header.element_count(); 541 self.seq += 1; 542 self.gsp_mem.advance_cpu_write_ptr(elem_count); 543 Cmdq::notify_gsp(bar); 544 545 Ok(()) 546 } 547 548 /// Wait for a message to become available on the message queue. 549 /// 550 /// This works purely at the transport layer and does not interpret or validate the message 551 /// beyond the advertised length in its [`GspMsgElement`]. 552 /// 553 /// This method returns: 554 /// 555 /// - A reference to the [`GspMsgElement`] of the message, 556 /// - Two byte slices with the contents of the message. The second slice is empty unless the 557 /// message loops across the message queue. 558 /// 559 /// # Errors 560 /// 561 /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available. 562 /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the 563 /// message queue. 564 /// 565 /// Error codes returned by the message constructor are propagated as-is. 566 fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> { 567 // Wait for a message to arrive from the GSP. 568 let (slice_1, slice_2) = read_poll_timeout( 569 || Ok(self.gsp_mem.driver_read_area()), 570 |driver_area| !driver_area.0.is_empty(), 571 Delta::from_millis(1), 572 timeout, 573 ) 574 .map(|(slice_1, slice_2)| (slice_1.as_flattened(), slice_2.as_flattened()))?; 575 576 // Extract the `GspMsgElement`. 577 let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?; 578 579 dev_dbg!( 580 self.dev, 581 "GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n", 582 header.sequence(), 583 header.function(), 584 header.length(), 585 ); 586 587 let payload_length = header.payload_length(); 588 589 // Check that the driver read area is large enough for the message. 590 if slice_1.len() + slice_2.len() < payload_length { 591 return Err(EIO); 592 } 593 594 // Cut the message slices down to the actual length of the message. 595 let (slice_1, slice_2) = if slice_1.len() > payload_length { 596 // PANIC: we checked above that `slice_1` is at least as long as `payload_length`. 597 (slice_1.split_at(payload_length).0, &slice_2[0..0]) 598 } else { 599 ( 600 slice_1, 601 // PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as 602 // large as `payload_length`. 603 slice_2.split_at(payload_length - slice_1.len()).0, 604 ) 605 }; 606 607 // Validate checksum. 608 if Cmdq::calculate_checksum(SBufferIter::new_reader([ 609 header.as_bytes(), 610 slice_1, 611 slice_2, 612 ])) != 0 613 { 614 dev_err!( 615 self.dev, 616 "GSP RPC: receive: Call {} - bad checksum\n", 617 header.sequence() 618 ); 619 return Err(EIO); 620 } 621 622 Ok(GspMessage { 623 header, 624 contents: (slice_1, slice_2), 625 }) 626 } 627 628 /// Receive a message from the GSP. 629 /// 630 /// `init` is a closure tasked with processing the message. It receives a reference to the 631 /// message in the message queue, and a [`SBufferIter`] pointing to its variable-length 632 /// payload, if any. 633 /// 634 /// The expected message is specified using the `M` generic parameter. If the pending message 635 /// is different, `EAGAIN` is returned and the unexpected message is dropped. 636 /// 637 /// This design is by no means final, but it is simple and will let us go through GSP 638 /// initialization. 639 /// 640 /// # Errors 641 /// 642 /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available. 643 /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the 644 /// message queue. 645 /// - `EINVAL` if the function of the message was unrecognized. 646 pub(crate) fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M> 647 where 648 // This allows all error types, including `Infallible`, to be used for `M::InitError`. 649 Error: From<M::InitError>, 650 { 651 let message = self.wait_for_msg(timeout)?; 652 let function = message.header.function().map_err(|_| EINVAL)?; 653 654 // Extract the message. Store the result as we want to advance the read pointer even in 655 // case of failure. 656 let result = if function == M::FUNCTION { 657 let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?; 658 let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]); 659 660 M::read(cmd, &mut sbuffer).map_err(|e| e.into()) 661 } else { 662 Err(ERANGE) 663 }; 664 665 // Advance the read pointer past this message. 666 self.gsp_mem.advance_cpu_read_ptr(u32::try_from( 667 message.header.length().div_ceil(GSP_PAGE_SIZE), 668 )?); 669 670 result 671 } 672 673 /// Returns the DMA handle of the command queue's shared memory region. 674 pub(crate) fn dma_handle(&self) -> DmaAddress { 675 self.gsp_mem.0.dma_handle() 676 } 677 } 678