xref: /linux/drivers/gpu/nova-core/gsp/fw.rs (revision 1c9982b4961334c1edb0745a04cabd34bc2de675)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 pub(crate) mod commands;
4 mod r570_144;
5 
6 // Alias to avoid repeating the version number with every use.
7 use r570_144 as bindings;
8 
9 use core::ops::Range;
10 
11 use kernel::{
12     dma::CoherentAllocation,
13     fmt,
14     prelude::*,
15     ptr::{
16         Alignable,
17         Alignment, //
18     },
19     sizes::{
20         SZ_128K,
21         SZ_1M, //
22     },
23     transmute::{
24         AsBytes,
25         FromBytes, //
26     },
27 };
28 
29 use crate::{
30     fb::FbLayout,
31     firmware::gsp::GspFirmware,
32     gpu::Chipset,
33     gsp::{
34         cmdq::Cmdq, //
35         GSP_PAGE_SIZE,
36     },
37     num::{
38         self,
39         FromSafeCast, //
40     },
41 };
42 
43 // TODO: Replace with `IoView` projections once available; the `unwrap()` calls go away once we
44 // switch to the new `dma::Coherent` API.
45 pub(super) mod gsp_mem {
46     use core::sync::atomic::{
47         fence,
48         Ordering, //
49     };
50 
51     use kernel::{
52         dma::CoherentAllocation,
53         dma_read,
54         dma_write,
55         prelude::*, //
56     };
57 
58     use crate::gsp::cmdq::{
59         GspMem,
60         MSGQ_NUM_PAGES, //
61     };
62 
gsp_write_ptr(qs: &CoherentAllocation<GspMem>) -> u3263     pub(in crate::gsp) fn gsp_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 {
64         // PANIC: A `dma::CoherentAllocation` always contains at least one element.
65         || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap()
66     }
67 
gsp_read_ptr(qs: &CoherentAllocation<GspMem>) -> u3268     pub(in crate::gsp) fn gsp_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 {
69         // PANIC: A `dma::CoherentAllocation` always contains at least one element.
70         || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap()
71     }
72 
cpu_read_ptr(qs: &CoherentAllocation<GspMem>) -> u3273     pub(in crate::gsp) fn cpu_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 {
74         // PANIC: A `dma::CoherentAllocation` always contains at least one element.
75         || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap()
76     }
77 
advance_cpu_read_ptr(qs: &CoherentAllocation<GspMem>, count: u32)78     pub(in crate::gsp) fn advance_cpu_read_ptr(qs: &CoherentAllocation<GspMem>, count: u32) {
79         let rptr = cpu_read_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES;
80 
81         // Ensure read pointer is properly ordered.
82         fence(Ordering::SeqCst);
83 
84         // PANIC: A `dma::CoherentAllocation` always contains at least one element.
85         || -> Result {
86             dma_write!(qs, [0]?.cpuq.rx.0.readPtr, rptr);
87             Ok(())
88         }()
89         .unwrap()
90     }
91 
cpu_write_ptr(qs: &CoherentAllocation<GspMem>) -> u3292     pub(in crate::gsp) fn cpu_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 {
93         // PANIC: A `dma::CoherentAllocation` always contains at least one element.
94         || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap()
95     }
96 
advance_cpu_write_ptr(qs: &CoherentAllocation<GspMem>, count: u32)97     pub(in crate::gsp) fn advance_cpu_write_ptr(qs: &CoherentAllocation<GspMem>, count: u32) {
98         let wptr = cpu_write_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES;
99 
100         // PANIC: A `dma::CoherentAllocation` always contains at least one element.
101         || -> Result {
102             dma_write!(qs, [0]?.cpuq.tx.0.writePtr, wptr);
103             Ok(())
104         }()
105         .unwrap();
106 
107         // Ensure all command data is visible before triggering the GSP read.
108         fence(Ordering::SeqCst);
109     }
110 }
111 
112 /// Empty type to group methods related to heap parameters for running the GSP firmware.
113 enum GspFwHeapParams {}
114 
115 /// Minimum required alignment for the GSP heap.
116 const GSP_HEAP_ALIGNMENT: Alignment = Alignment::new::<{ 1 << 20 }>();
117 
118 impl GspFwHeapParams {
119     /// Returns the amount of GSP-RM heap memory used during GSP-RM boot and initialization (up to
120     /// and including the first client subdevice allocation).
base_rm_size(_chipset: Chipset) -> u64121     fn base_rm_size(_chipset: Chipset) -> u64 {
122         // TODO: this needs to be updated to return the correct value for Hopper+ once support for
123         // them is added:
124         // u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100)
125         u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X)
126     }
127 
128     /// Returns the amount of heap memory required to support a single channel allocation.
client_alloc_size() -> u64129     fn client_alloc_size() -> u64 {
130         u64::from(bindings::GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE)
131             .align_up(GSP_HEAP_ALIGNMENT)
132             .unwrap_or(u64::MAX)
133     }
134 
135     /// Returns the amount of memory to reserve for management purposes for a framebuffer of size
136     /// `fb_size`.
management_overhead(fb_size: u64) -> u64137     fn management_overhead(fb_size: u64) -> u64 {
138         let fb_size_gb = fb_size.div_ceil(u64::from_safe_cast(kernel::sizes::SZ_1G));
139 
140         u64::from(bindings::GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB)
141             .saturating_mul(fb_size_gb)
142             .align_up(GSP_HEAP_ALIGNMENT)
143             .unwrap_or(u64::MAX)
144     }
145 }
146 
147 /// Heap memory requirements and constraints for a given version of the GSP LIBOS.
148 pub(crate) struct LibosParams {
149     /// The base amount of heap required by the GSP operating system, in bytes.
150     carveout_size: u64,
151     /// The minimum and maximum sizes allowed for the GSP FW heap, in bytes.
152     allowed_heap_size: Range<u64>,
153 }
154 
155 impl LibosParams {
156     /// Version 2 of the GSP LIBOS (Turing and GA100)
157     const LIBOS2: LibosParams = LibosParams {
158         carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2),
159         allowed_heap_size: num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB)
160             * num::usize_as_u64(SZ_1M)
161             ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB)
162                 * num::usize_as_u64(SZ_1M),
163     };
164 
165     /// Version 3 of the GSP LIBOS (GA102+)
166     const LIBOS3: LibosParams = LibosParams {
167         carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL),
168         allowed_heap_size: num::u32_as_u64(
169             bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
170         ) * num::usize_as_u64(SZ_1M)
171             ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB)
172                 * num::usize_as_u64(SZ_1M),
173     };
174 
175     /// Returns the libos parameters corresponding to `chipset`.
from_chipset(chipset: Chipset) -> &'static LibosParams176     pub(crate) fn from_chipset(chipset: Chipset) -> &'static LibosParams {
177         if chipset < Chipset::GA102 {
178             &Self::LIBOS2
179         } else {
180             &Self::LIBOS3
181         }
182     }
183 
184     /// Returns the amount of memory (in bytes) to allocate for the WPR heap for a framebuffer size
185     /// of `fb_size` (in bytes) for `chipset`.
wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64186     pub(crate) fn wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64 {
187         // The WPR heap will contain the following:
188         // LIBOS carveout,
189         self.carveout_size
190             // RM boot working memory,
191             .saturating_add(GspFwHeapParams::base_rm_size(chipset))
192             // One RM client,
193             .saturating_add(GspFwHeapParams::client_alloc_size())
194             // Overhead for memory management.
195             .saturating_add(GspFwHeapParams::management_overhead(fb_size))
196             // Clamp to the supported heap sizes.
197             .clamp(self.allowed_heap_size.start, self.allowed_heap_size.end - 1)
198     }
199 }
200 
201 /// Structure passed to the GSP bootloader, containing the framebuffer layout as well as the DMA
202 /// addresses of the GSP bootloader and firmware.
203 #[repr(transparent)]
204 pub(crate) struct GspFwWprMeta(bindings::GspFwWprMeta);
205 
206 // SAFETY: Padding is explicit and does not contain uninitialized data.
207 unsafe impl AsBytes for GspFwWprMeta {}
208 
209 // SAFETY: This struct only contains integer types for which all bit patterns
210 // are valid.
211 unsafe impl FromBytes for GspFwWprMeta {}
212 
213 type GspFwWprMetaBootResumeInfo = bindings::GspFwWprMeta__bindgen_ty_1;
214 type GspFwWprMetaBootInfo = bindings::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
215 
216 impl GspFwWprMeta {
217     /// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the
218     /// `fb_layout` layout.
new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self219     pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self {
220         Self(bindings::GspFwWprMeta {
221             // CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified.
222             magic: bindings::GSP_FW_WPR_META_MAGIC as u64,
223             revision: u64::from(bindings::GSP_FW_WPR_META_REVISION),
224             sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(),
225             sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size),
226             sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(),
227             sizeOfBootloader: u64::from_safe_cast(gsp_firmware.bootloader.ucode.size()),
228             bootloaderCodeOffset: u64::from(gsp_firmware.bootloader.code_offset),
229             bootloaderDataOffset: u64::from(gsp_firmware.bootloader.data_offset),
230             bootloaderManifestOffset: u64::from(gsp_firmware.bootloader.manifest_offset),
231             __bindgen_anon_1: GspFwWprMetaBootResumeInfo {
232                 __bindgen_anon_1: GspFwWprMetaBootInfo {
233                     sysmemAddrOfSignature: gsp_firmware.signatures.dma_handle(),
234                     sizeOfSignature: u64::from_safe_cast(gsp_firmware.signatures.size()),
235                 },
236             },
237             gspFwRsvdStart: fb_layout.heap.start,
238             nonWprHeapOffset: fb_layout.heap.start,
239             nonWprHeapSize: fb_layout.heap.end - fb_layout.heap.start,
240             gspFwWprStart: fb_layout.wpr2.start,
241             gspFwHeapOffset: fb_layout.wpr2_heap.start,
242             gspFwHeapSize: fb_layout.wpr2_heap.end - fb_layout.wpr2_heap.start,
243             gspFwOffset: fb_layout.elf.start,
244             bootBinOffset: fb_layout.boot.start,
245             frtsOffset: fb_layout.frts.start,
246             frtsSize: fb_layout.frts.end - fb_layout.frts.start,
247             gspFwWprEnd: fb_layout
248                 .vga_workspace
249                 .start
250                 .align_down(Alignment::new::<SZ_128K>()),
251             gspFwHeapVfPartitionCount: fb_layout.vf_partition_count,
252             fbSize: fb_layout.fb.end - fb_layout.fb.start,
253             vgaWorkspaceOffset: fb_layout.vga_workspace.start,
254             vgaWorkspaceSize: fb_layout.vga_workspace.end - fb_layout.vga_workspace.start,
255             ..Default::default()
256         })
257     }
258 }
259 
260 #[derive(Copy, Clone, Debug, PartialEq)]
261 #[repr(u32)]
262 pub(crate) enum MsgFunction {
263     // Common function codes
264     Nop = bindings::NV_VGPU_MSG_FUNCTION_NOP,
265     SetGuestSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO,
266     AllocRoot = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT,
267     AllocDevice = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE,
268     AllocMemory = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
269     AllocCtxDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA,
270     AllocChannelDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA,
271     MapMemory = bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY,
272     BindCtxDma = bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA,
273     AllocObject = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT,
274     Free = bindings::NV_VGPU_MSG_FUNCTION_FREE,
275     Log = bindings::NV_VGPU_MSG_FUNCTION_LOG,
276     GetGspStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO,
277     SetRegistry = bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY,
278     GspSetSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO,
279     GspInitPostObjGpu = bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU,
280     GspRmControl = bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
281     GetStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO,
282 
283     // Event codes
284     GspInitDone = bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE,
285     GspRunCpuSequencer = bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
286     PostEvent = bindings::NV_VGPU_MSG_EVENT_POST_EVENT,
287     RcTriggered = bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED,
288     MmuFaultQueued = bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
289     OsErrorLog = bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG,
290     GspPostNoCat = bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD,
291     GspLockdownNotice = bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE,
292     UcodeLibOsPrint = bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT,
293 }
294 
295 impl fmt::Display for MsgFunction {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result296     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
297         match self {
298             // Common function codes
299             MsgFunction::Nop => write!(f, "NOP"),
300             MsgFunction::SetGuestSystemInfo => write!(f, "SET_GUEST_SYSTEM_INFO"),
301             MsgFunction::AllocRoot => write!(f, "ALLOC_ROOT"),
302             MsgFunction::AllocDevice => write!(f, "ALLOC_DEVICE"),
303             MsgFunction::AllocMemory => write!(f, "ALLOC_MEMORY"),
304             MsgFunction::AllocCtxDma => write!(f, "ALLOC_CTX_DMA"),
305             MsgFunction::AllocChannelDma => write!(f, "ALLOC_CHANNEL_DMA"),
306             MsgFunction::MapMemory => write!(f, "MAP_MEMORY"),
307             MsgFunction::BindCtxDma => write!(f, "BIND_CTX_DMA"),
308             MsgFunction::AllocObject => write!(f, "ALLOC_OBJECT"),
309             MsgFunction::Free => write!(f, "FREE"),
310             MsgFunction::Log => write!(f, "LOG"),
311             MsgFunction::GetGspStaticInfo => write!(f, "GET_GSP_STATIC_INFO"),
312             MsgFunction::SetRegistry => write!(f, "SET_REGISTRY"),
313             MsgFunction::GspSetSystemInfo => write!(f, "GSP_SET_SYSTEM_INFO"),
314             MsgFunction::GspInitPostObjGpu => write!(f, "GSP_INIT_POST_OBJGPU"),
315             MsgFunction::GspRmControl => write!(f, "GSP_RM_CONTROL"),
316             MsgFunction::GetStaticInfo => write!(f, "GET_STATIC_INFO"),
317 
318             // Event codes
319             MsgFunction::GspInitDone => write!(f, "INIT_DONE"),
320             MsgFunction::GspRunCpuSequencer => write!(f, "RUN_CPU_SEQUENCER"),
321             MsgFunction::PostEvent => write!(f, "POST_EVENT"),
322             MsgFunction::RcTriggered => write!(f, "RC_TRIGGERED"),
323             MsgFunction::MmuFaultQueued => write!(f, "MMU_FAULT_QUEUED"),
324             MsgFunction::OsErrorLog => write!(f, "OS_ERROR_LOG"),
325             MsgFunction::GspPostNoCat => write!(f, "NOCAT"),
326             MsgFunction::GspLockdownNotice => write!(f, "LOCKDOWN_NOTICE"),
327             MsgFunction::UcodeLibOsPrint => write!(f, "LIBOS_PRINT"),
328         }
329     }
330 }
331 
332 impl TryFrom<u32> for MsgFunction {
333     type Error = kernel::error::Error;
334 
try_from(value: u32) -> Result<MsgFunction>335     fn try_from(value: u32) -> Result<MsgFunction> {
336         match value {
337             bindings::NV_VGPU_MSG_FUNCTION_NOP => Ok(MsgFunction::Nop),
338             bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO => {
339                 Ok(MsgFunction::SetGuestSystemInfo)
340             }
341             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT => Ok(MsgFunction::AllocRoot),
342             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE => Ok(MsgFunction::AllocDevice),
343             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY => Ok(MsgFunction::AllocMemory),
344             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA => Ok(MsgFunction::AllocCtxDma),
345             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA => Ok(MsgFunction::AllocChannelDma),
346             bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY => Ok(MsgFunction::MapMemory),
347             bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA => Ok(MsgFunction::BindCtxDma),
348             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT => Ok(MsgFunction::AllocObject),
349             bindings::NV_VGPU_MSG_FUNCTION_FREE => Ok(MsgFunction::Free),
350             bindings::NV_VGPU_MSG_FUNCTION_LOG => Ok(MsgFunction::Log),
351             bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO => Ok(MsgFunction::GetGspStaticInfo),
352             bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY => Ok(MsgFunction::SetRegistry),
353             bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO => Ok(MsgFunction::GspSetSystemInfo),
354             bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU => {
355                 Ok(MsgFunction::GspInitPostObjGpu)
356             }
357             bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL => Ok(MsgFunction::GspRmControl),
358             bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO => Ok(MsgFunction::GetStaticInfo),
359             bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE => Ok(MsgFunction::GspInitDone),
360             bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER => {
361                 Ok(MsgFunction::GspRunCpuSequencer)
362             }
363             bindings::NV_VGPU_MSG_EVENT_POST_EVENT => Ok(MsgFunction::PostEvent),
364             bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED => Ok(MsgFunction::RcTriggered),
365             bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED => Ok(MsgFunction::MmuFaultQueued),
366             bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG => Ok(MsgFunction::OsErrorLog),
367             bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD => Ok(MsgFunction::GspPostNoCat),
368             bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE => Ok(MsgFunction::GspLockdownNotice),
369             bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT => Ok(MsgFunction::UcodeLibOsPrint),
370             _ => Err(EINVAL),
371         }
372     }
373 }
374 
375 impl From<MsgFunction> for u32 {
from(value: MsgFunction) -> Self376     fn from(value: MsgFunction) -> Self {
377         // CAST: `MsgFunction` is `repr(u32)` and can thus be cast losslessly.
378         value as u32
379     }
380 }
381 
382 /// Sequencer buffer opcode for GSP sequencer commands.
383 #[derive(Copy, Clone, Debug, PartialEq)]
384 #[repr(u32)]
385 pub(crate) enum SeqBufOpcode {
386     // Core operation opcodes
387     CoreReset = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
388     CoreResume = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
389     CoreStart = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
390     CoreWaitForHalt = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
391 
392     // Delay opcode
393     DelayUs = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
394 
395     // Register operation opcodes
396     RegModify = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
397     RegPoll = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
398     RegStore = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
399     RegWrite = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
400 }
401 
402 impl fmt::Display for SeqBufOpcode {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result403     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
404         match self {
405             SeqBufOpcode::CoreReset => write!(f, "CORE_RESET"),
406             SeqBufOpcode::CoreResume => write!(f, "CORE_RESUME"),
407             SeqBufOpcode::CoreStart => write!(f, "CORE_START"),
408             SeqBufOpcode::CoreWaitForHalt => write!(f, "CORE_WAIT_FOR_HALT"),
409             SeqBufOpcode::DelayUs => write!(f, "DELAY_US"),
410             SeqBufOpcode::RegModify => write!(f, "REG_MODIFY"),
411             SeqBufOpcode::RegPoll => write!(f, "REG_POLL"),
412             SeqBufOpcode::RegStore => write!(f, "REG_STORE"),
413             SeqBufOpcode::RegWrite => write!(f, "REG_WRITE"),
414         }
415     }
416 }
417 
418 impl TryFrom<u32> for SeqBufOpcode {
419     type Error = kernel::error::Error;
420 
try_from(value: u32) -> Result<SeqBufOpcode>421     fn try_from(value: u32) -> Result<SeqBufOpcode> {
422         match value {
423             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
424                 Ok(SeqBufOpcode::CoreReset)
425             }
426             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
427                 Ok(SeqBufOpcode::CoreResume)
428             }
429             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
430                 Ok(SeqBufOpcode::CoreStart)
431             }
432             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
433                 Ok(SeqBufOpcode::CoreWaitForHalt)
434             }
435             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
436             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
437                 Ok(SeqBufOpcode::RegModify)
438             }
439             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
440             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
441             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
442             _ => Err(EINVAL),
443         }
444     }
445 }
446 
447 impl From<SeqBufOpcode> for u32 {
from(value: SeqBufOpcode) -> Self448     fn from(value: SeqBufOpcode) -> Self {
449         // CAST: `SeqBufOpcode` is `repr(u32)` and can thus be cast losslessly.
450         value as u32
451     }
452 }
453 
454 /// Wrapper for GSP sequencer register write payload.
455 #[repr(transparent)]
456 #[derive(Copy, Clone)]
457 pub(crate) struct RegWritePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
458 
459 impl RegWritePayload {
460     /// Returns the register address.
addr(&self) -> u32461     pub(crate) fn addr(&self) -> u32 {
462         self.0.addr
463     }
464 
465     /// Returns the value to write.
val(&self) -> u32466     pub(crate) fn val(&self) -> u32 {
467         self.0.val
468     }
469 }
470 
471 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
472 unsafe impl FromBytes for RegWritePayload {}
473 
474 // SAFETY: Padding is explicit and will not contain uninitialized data.
475 unsafe impl AsBytes for RegWritePayload {}
476 
477 /// Wrapper for GSP sequencer register modify payload.
478 #[repr(transparent)]
479 #[derive(Copy, Clone)]
480 pub(crate) struct RegModifyPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
481 
482 impl RegModifyPayload {
483     /// Returns the register address.
addr(&self) -> u32484     pub(crate) fn addr(&self) -> u32 {
485         self.0.addr
486     }
487 
488     /// Returns the mask to apply.
mask(&self) -> u32489     pub(crate) fn mask(&self) -> u32 {
490         self.0.mask
491     }
492 
493     /// Returns the value to write.
val(&self) -> u32494     pub(crate) fn val(&self) -> u32 {
495         self.0.val
496     }
497 }
498 
499 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
500 unsafe impl FromBytes for RegModifyPayload {}
501 
502 // SAFETY: Padding is explicit and will not contain uninitialized data.
503 unsafe impl AsBytes for RegModifyPayload {}
504 
505 /// Wrapper for GSP sequencer register poll payload.
506 #[repr(transparent)]
507 #[derive(Copy, Clone)]
508 pub(crate) struct RegPollPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
509 
510 impl RegPollPayload {
511     /// Returns the register address.
addr(&self) -> u32512     pub(crate) fn addr(&self) -> u32 {
513         self.0.addr
514     }
515 
516     /// Returns the mask to apply.
mask(&self) -> u32517     pub(crate) fn mask(&self) -> u32 {
518         self.0.mask
519     }
520 
521     /// Returns the expected value.
val(&self) -> u32522     pub(crate) fn val(&self) -> u32 {
523         self.0.val
524     }
525 
526     /// Returns the timeout in microseconds.
timeout(&self) -> u32527     pub(crate) fn timeout(&self) -> u32 {
528         self.0.timeout
529     }
530 }
531 
532 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
533 unsafe impl FromBytes for RegPollPayload {}
534 
535 // SAFETY: Padding is explicit and will not contain uninitialized data.
536 unsafe impl AsBytes for RegPollPayload {}
537 
538 /// Wrapper for GSP sequencer delay payload.
539 #[repr(transparent)]
540 #[derive(Copy, Clone)]
541 pub(crate) struct DelayUsPayload(bindings::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
542 
543 impl DelayUsPayload {
544     /// Returns the delay value in microseconds.
val(&self) -> u32545     pub(crate) fn val(&self) -> u32 {
546         self.0.val
547     }
548 }
549 
550 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
551 unsafe impl FromBytes for DelayUsPayload {}
552 
553 // SAFETY: Padding is explicit and will not contain uninitialized data.
554 unsafe impl AsBytes for DelayUsPayload {}
555 
556 /// Wrapper for GSP sequencer register store payload.
557 #[repr(transparent)]
558 #[derive(Copy, Clone)]
559 pub(crate) struct RegStorePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
560 
561 impl RegStorePayload {
562     /// Returns the register address.
addr(&self) -> u32563     pub(crate) fn addr(&self) -> u32 {
564         self.0.addr
565     }
566 
567     /// Returns the storage index.
568     #[allow(unused)]
index(&self) -> u32569     pub(crate) fn index(&self) -> u32 {
570         self.0.index
571     }
572 }
573 
574 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
575 unsafe impl FromBytes for RegStorePayload {}
576 
577 // SAFETY: Padding is explicit and will not contain uninitialized data.
578 unsafe impl AsBytes for RegStorePayload {}
579 
580 /// Wrapper for GSP sequencer buffer command.
581 #[repr(transparent)]
582 pub(crate) struct SequencerBufferCmd(bindings::GSP_SEQUENCER_BUFFER_CMD);
583 
584 impl SequencerBufferCmd {
585     /// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid.
opcode(&self) -> Result<SeqBufOpcode>586     pub(crate) fn opcode(&self) -> Result<SeqBufOpcode> {
587         self.0.opCode.try_into()
588     }
589 
590     /// Returns the register write payload by value.
591     ///
592     /// Returns an error if the opcode is not `SeqBufOpcode::RegWrite`.
reg_write_payload(&self) -> Result<RegWritePayload>593     pub(crate) fn reg_write_payload(&self) -> Result<RegWritePayload> {
594         if self.opcode()? != SeqBufOpcode::RegWrite {
595             return Err(EINVAL);
596         }
597         // SAFETY: Opcode is verified to be `RegWrite`, so union contains valid `RegWritePayload`.
598         let payload_bytes = unsafe {
599             core::slice::from_raw_parts(
600                 core::ptr::addr_of!(self.0.payload.regWrite).cast::<u8>(),
601                 core::mem::size_of::<RegWritePayload>(),
602             )
603         };
604         Ok(*RegWritePayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
605     }
606 
607     /// Returns the register modify payload by value.
608     ///
609     /// Returns an error if the opcode is not `SeqBufOpcode::RegModify`.
reg_modify_payload(&self) -> Result<RegModifyPayload>610     pub(crate) fn reg_modify_payload(&self) -> Result<RegModifyPayload> {
611         if self.opcode()? != SeqBufOpcode::RegModify {
612             return Err(EINVAL);
613         }
614         // SAFETY: Opcode is verified to be `RegModify`, so union contains valid `RegModifyPayload`.
615         let payload_bytes = unsafe {
616             core::slice::from_raw_parts(
617                 core::ptr::addr_of!(self.0.payload.regModify).cast::<u8>(),
618                 core::mem::size_of::<RegModifyPayload>(),
619             )
620         };
621         Ok(*RegModifyPayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
622     }
623 
624     /// Returns the register poll payload by value.
625     ///
626     /// Returns an error if the opcode is not `SeqBufOpcode::RegPoll`.
reg_poll_payload(&self) -> Result<RegPollPayload>627     pub(crate) fn reg_poll_payload(&self) -> Result<RegPollPayload> {
628         if self.opcode()? != SeqBufOpcode::RegPoll {
629             return Err(EINVAL);
630         }
631         // SAFETY: Opcode is verified to be `RegPoll`, so union contains valid `RegPollPayload`.
632         let payload_bytes = unsafe {
633             core::slice::from_raw_parts(
634                 core::ptr::addr_of!(self.0.payload.regPoll).cast::<u8>(),
635                 core::mem::size_of::<RegPollPayload>(),
636             )
637         };
638         Ok(*RegPollPayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
639     }
640 
641     /// Returns the delay payload by value.
642     ///
643     /// Returns an error if the opcode is not `SeqBufOpcode::DelayUs`.
delay_us_payload(&self) -> Result<DelayUsPayload>644     pub(crate) fn delay_us_payload(&self) -> Result<DelayUsPayload> {
645         if self.opcode()? != SeqBufOpcode::DelayUs {
646             return Err(EINVAL);
647         }
648         // SAFETY: Opcode is verified to be `DelayUs`, so union contains valid `DelayUsPayload`.
649         let payload_bytes = unsafe {
650             core::slice::from_raw_parts(
651                 core::ptr::addr_of!(self.0.payload.delayUs).cast::<u8>(),
652                 core::mem::size_of::<DelayUsPayload>(),
653             )
654         };
655         Ok(*DelayUsPayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
656     }
657 
658     /// Returns the register store payload by value.
659     ///
660     /// Returns an error if the opcode is not `SeqBufOpcode::RegStore`.
reg_store_payload(&self) -> Result<RegStorePayload>661     pub(crate) fn reg_store_payload(&self) -> Result<RegStorePayload> {
662         if self.opcode()? != SeqBufOpcode::RegStore {
663             return Err(EINVAL);
664         }
665         // SAFETY: Opcode is verified to be `RegStore`, so union contains valid `RegStorePayload`.
666         let payload_bytes = unsafe {
667             core::slice::from_raw_parts(
668                 core::ptr::addr_of!(self.0.payload.regStore).cast::<u8>(),
669                 core::mem::size_of::<RegStorePayload>(),
670             )
671         };
672         Ok(*RegStorePayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
673     }
674 }
675 
676 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
677 unsafe impl FromBytes for SequencerBufferCmd {}
678 
679 // SAFETY: Padding is explicit and will not contain uninitialized data.
680 unsafe impl AsBytes for SequencerBufferCmd {}
681 
682 /// Wrapper for GSP run CPU sequencer RPC.
683 #[repr(transparent)]
684 pub(crate) struct RunCpuSequencer(bindings::rpc_run_cpu_sequencer_v17_00);
685 
686 impl RunCpuSequencer {
687     /// Returns the command index.
cmd_index(&self) -> u32688     pub(crate) fn cmd_index(&self) -> u32 {
689         self.0.cmdIndex
690     }
691 }
692 
693 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
694 unsafe impl FromBytes for RunCpuSequencer {}
695 
696 // SAFETY: Padding is explicit and will not contain uninitialized data.
697 unsafe impl AsBytes for RunCpuSequencer {}
698 
699 /// Struct containing the arguments required to pass a memory buffer to the GSP
700 /// for use during initialisation.
701 ///
702 /// The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
703 /// configured for a larger page size (e.g. 64K pages), we need to give
704 /// the GSP an array of 4K pages. Since we only create physically contiguous
705 /// buffers the math to calculate the addresses is simple.
706 ///
707 /// The buffers must be a multiple of GSP_PAGE_SIZE.  GSP-RM also currently
708 /// ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
709 /// buffers to be physically contiguous anyway.
710 ///
711 /// The memory allocated for the arguments must remain until the GSP sends the
712 /// init_done RPC.
713 #[repr(transparent)]
714 pub(crate) struct LibosMemoryRegionInitArgument(bindings::LibosMemoryRegionInitArgument);
715 
716 // SAFETY: Padding is explicit and does not contain uninitialized data.
717 unsafe impl AsBytes for LibosMemoryRegionInitArgument {}
718 
719 // SAFETY: This struct only contains integer types for which all bit patterns
720 // are valid.
721 unsafe impl FromBytes for LibosMemoryRegionInitArgument {}
722 
723 impl LibosMemoryRegionInitArgument {
new<A: AsBytes + FromBytes>( name: &'static str, obj: &CoherentAllocation<A>, ) -> Self724     pub(crate) fn new<A: AsBytes + FromBytes>(
725         name: &'static str,
726         obj: &CoherentAllocation<A>,
727     ) -> Self {
728         /// Generates the `ID8` identifier required for some GSP objects.
729         fn id8(name: &str) -> u64 {
730             let mut bytes = [0u8; core::mem::size_of::<u64>()];
731 
732             for (c, b) in name.bytes().rev().zip(&mut bytes) {
733                 *b = c;
734             }
735 
736             u64::from_ne_bytes(bytes)
737         }
738 
739         Self(bindings::LibosMemoryRegionInitArgument {
740             id8: id8(name),
741             pa: obj.dma_handle(),
742             size: num::usize_as_u64(obj.size()),
743             kind: num::u32_into_u8::<
744                 { bindings::LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS },
745             >(),
746             loc: num::u32_into_u8::<
747                 { bindings::LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM },
748             >(),
749             ..Default::default()
750         })
751     }
752 }
753 
754 /// TX header for setting up a message queue with the GSP.
755 #[repr(transparent)]
756 pub(crate) struct MsgqTxHeader(bindings::msgqTxHeader);
757 
758 impl MsgqTxHeader {
759     /// Create a new TX queue header.
760     ///
761     /// # Arguments
762     ///
763     /// * `msgq_size` - Total size of the message queue structure, in bytes.
764     /// * `rx_hdr_offset` - Offset, in bytes, of the start of the RX header in the message queue
765     ///   structure.
766     /// * `msg_count` - Number of messages that can be sent, i.e. the number of memory pages
767     ///   allocated for the message queue in the message queue structure.
new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self768     pub(crate) fn new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self {
769         Self(bindings::msgqTxHeader {
770             version: 0,
771             size: msgq_size,
772             msgSize: num::usize_into_u32::<GSP_PAGE_SIZE>(),
773             msgCount: msg_count,
774             writePtr: 0,
775             flags: 1,
776             rxHdrOff: rx_hdr_offset,
777             entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(),
778         })
779     }
780 }
781 
782 // SAFETY: Padding is explicit and does not contain uninitialized data.
783 unsafe impl AsBytes for MsgqTxHeader {}
784 
785 /// RX header for setting up a message queue with the GSP.
786 #[repr(transparent)]
787 pub(crate) struct MsgqRxHeader(bindings::msgqRxHeader);
788 
789 /// Header for the message RX queue.
790 impl MsgqRxHeader {
791     /// Creates a new RX queue header.
new() -> Self792     pub(crate) fn new() -> Self {
793         Self(Default::default())
794     }
795 }
796 
797 // SAFETY: Padding is explicit and does not contain uninitialized data.
798 unsafe impl AsBytes for MsgqRxHeader {}
799 
800 bitfield! {
801     struct MsgHeaderVersion(u32) {
802         31:24 major as u8;
803         23:16 minor as u8;
804     }
805 }
806 
807 impl MsgHeaderVersion {
808     const MAJOR_TOT: u8 = 3;
809     const MINOR_TOT: u8 = 0;
810 
new() -> Self811     fn new() -> Self {
812         Self::default()
813             .set_major(Self::MAJOR_TOT)
814             .set_minor(Self::MINOR_TOT)
815     }
816 }
817 
818 impl bindings::rpc_message_header_v {
init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error>819     fn init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error> {
820         type RpcMessageHeader = bindings::rpc_message_header_v;
821 
822         try_init!(RpcMessageHeader {
823             header_version: MsgHeaderVersion::new().into(),
824             signature: bindings::NV_VGPU_MSG_SIGNATURE_VALID,
825             function: function.into(),
826             length: size_of::<Self>()
827                 .checked_add(cmd_size)
828                 .ok_or(EOVERFLOW)
829                 .and_then(|v| v.try_into().map_err(|_| EINVAL))?,
830             rpc_result: 0xffffffff,
831             rpc_result_private: 0xffffffff,
832             ..Zeroable::init_zeroed()
833         })
834     }
835 }
836 
837 /// GSP Message Element.
838 ///
839 /// This is essentially a message header expected to be followed by the message data.
840 #[repr(transparent)]
841 pub(crate) struct GspMsgElement {
842     inner: bindings::GSP_MSG_QUEUE_ELEMENT,
843 }
844 
845 impl GspMsgElement {
846     /// Creates a new message element.
847     ///
848     /// # Arguments
849     ///
850     /// * `sequence` - Sequence number of the message.
851     /// * `cmd_size` - Size of the command (not including the message element), in bytes.
852     /// * `function` - Function of the message.
853     #[allow(non_snake_case)]
init( sequence: u32, cmd_size: usize, function: MsgFunction, ) -> impl Init<Self, Error>854     pub(crate) fn init(
855         sequence: u32,
856         cmd_size: usize,
857         function: MsgFunction,
858     ) -> impl Init<Self, Error> {
859         type RpcMessageHeader = bindings::rpc_message_header_v;
860         type InnerGspMsgElement = bindings::GSP_MSG_QUEUE_ELEMENT;
861         let init_inner = try_init!(InnerGspMsgElement {
862             seqNum: sequence,
863             elemCount: size_of::<Self>()
864                 .checked_add(cmd_size)
865                 .ok_or(EOVERFLOW)?
866                 .div_ceil(GSP_PAGE_SIZE)
867                 .try_into()
868                 .map_err(|_| EOVERFLOW)?,
869             rpc <- RpcMessageHeader::init(cmd_size, function),
870             ..Zeroable::init_zeroed()
871         });
872 
873         try_init!(GspMsgElement {
874             inner <- init_inner,
875         })
876     }
877 
878     /// Sets the checksum of this message.
879     ///
880     /// Since the header is also part of the checksum, this is usually called after the whole
881     /// message has been written to the shared memory area.
set_checksum(&mut self, checksum: u32)882     pub(crate) fn set_checksum(&mut self, checksum: u32) {
883         self.inner.checkSum = checksum;
884     }
885 
886     /// Returns the length of the message's payload.
payload_length(&self) -> usize887     pub(crate) fn payload_length(&self) -> usize {
888         // `rpc.length` includes the length of the RPC message header.
889         num::u32_as_usize(self.inner.rpc.length)
890             .saturating_sub(size_of::<bindings::rpc_message_header_v>())
891     }
892 
893     /// Returns the total length of the message, message and RPC headers included.
length(&self) -> usize894     pub(crate) fn length(&self) -> usize {
895         size_of::<Self>() + self.payload_length()
896     }
897 
898     // Returns the sequence number of the message.
sequence(&self) -> u32899     pub(crate) fn sequence(&self) -> u32 {
900         self.inner.rpc.sequence
901     }
902 
903     // Returns the function of the message, if it is valid, or the invalid function number as an
904     // error.
function(&self) -> Result<MsgFunction, u32>905     pub(crate) fn function(&self) -> Result<MsgFunction, u32> {
906         self.inner
907             .rpc
908             .function
909             .try_into()
910             .map_err(|_| self.inner.rpc.function)
911     }
912 
913     // Returns the number of elements (i.e. memory pages) used by this message.
element_count(&self) -> u32914     pub(crate) fn element_count(&self) -> u32 {
915         self.inner.elemCount
916     }
917 }
918 
919 // SAFETY: Padding is explicit and does not contain uninitialized data.
920 unsafe impl AsBytes for GspMsgElement {}
921 
922 // SAFETY: This struct only contains integer types for which all bit patterns
923 // are valid.
924 unsafe impl FromBytes for GspMsgElement {}
925 
926 /// Arguments for GSP startup.
927 #[repr(transparent)]
928 pub(crate) struct GspArgumentsCached(bindings::GSP_ARGUMENTS_CACHED);
929 
930 impl GspArgumentsCached {
931     /// Creates the arguments for starting the GSP up using `cmdq` as its command queue.
new(cmdq: &Cmdq) -> Self932     pub(crate) fn new(cmdq: &Cmdq) -> Self {
933         Self(bindings::GSP_ARGUMENTS_CACHED {
934             messageQueueInitArguments: MessageQueueInitArguments::new(cmdq).0,
935             bDmemStack: 1,
936             ..Default::default()
937         })
938     }
939 }
940 
941 // SAFETY: Padding is explicit and will not contain uninitialized data.
942 unsafe impl AsBytes for GspArgumentsCached {}
943 
944 /// On Turing and GA100, the entries in the `LibosMemoryRegionInitArgument`
945 /// must all be a multiple of GSP_PAGE_SIZE in size, so add padding to force it
946 /// to that size.
947 #[repr(C)]
948 pub(crate) struct GspArgumentsPadded {
949     pub(crate) inner: GspArgumentsCached,
950     _padding: [u8; GSP_PAGE_SIZE - core::mem::size_of::<bindings::GSP_ARGUMENTS_CACHED>()],
951 }
952 
953 // SAFETY: Padding is explicit and will not contain uninitialized data.
954 unsafe impl AsBytes for GspArgumentsPadded {}
955 
956 // SAFETY: This struct only contains integer types for which all bit patterns
957 // are valid.
958 unsafe impl FromBytes for GspArgumentsPadded {}
959 
960 /// Init arguments for the message queue.
961 #[repr(transparent)]
962 struct MessageQueueInitArguments(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS);
963 
964 impl MessageQueueInitArguments {
965     /// Creates a new init arguments structure for `cmdq`.
new(cmdq: &Cmdq) -> Self966     fn new(cmdq: &Cmdq) -> Self {
967         Self(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS {
968             sharedMemPhysAddr: cmdq.dma_handle(),
969             pageTableEntryCount: num::usize_into_u32::<{ Cmdq::NUM_PTES }>(),
970             cmdQueueOffset: num::usize_as_u64(Cmdq::CMDQ_OFFSET),
971             statQueueOffset: num::usize_as_u64(Cmdq::STATQ_OFFSET),
972             ..Default::default()
973         })
974     }
975 }
976