xref: /linux/drivers/gpu/nova-core/gsp/fw.rs (revision cbd4480cfac54dd4e9f7fb9ac2e0226ea38fecbb)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 pub(crate) mod commands;
4 mod r570_144;
5 
6 // Alias to avoid repeating the version number with every use.
7 use r570_144 as bindings;
8 
9 use core::ops::Range;
10 
11 use kernel::{
12     dma::CoherentAllocation,
13     fmt,
14     prelude::*,
15     ptr::{
16         Alignable,
17         Alignment, //
18     },
19     sizes::{
20         SZ_128K,
21         SZ_1M, //
22     },
23     transmute::{
24         AsBytes,
25         FromBytes, //
26     },
27 };
28 
29 use crate::{
30     fb::FbLayout,
31     firmware::gsp::GspFirmware,
32     gpu::Chipset,
33     gsp::{
34         cmdq::Cmdq, //
35         GSP_PAGE_SIZE,
36     },
37     num::{
38         self,
39         FromSafeCast, //
40     },
41 };
42 
43 /// Empty type to group methods related to heap parameters for running the GSP firmware.
44 enum GspFwHeapParams {}
45 
46 /// Minimum required alignment for the GSP heap.
47 const GSP_HEAP_ALIGNMENT: Alignment = Alignment::new::<{ 1 << 20 }>();
48 
49 impl GspFwHeapParams {
50     /// Returns the amount of GSP-RM heap memory used during GSP-RM boot and initialization (up to
51     /// and including the first client subdevice allocation).
base_rm_size(_chipset: Chipset) -> u6452     fn base_rm_size(_chipset: Chipset) -> u64 {
53         // TODO: this needs to be updated to return the correct value for Hopper+ once support for
54         // them is added:
55         // u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100)
56         u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X)
57     }
58 
59     /// Returns the amount of heap memory required to support a single channel allocation.
client_alloc_size() -> u6460     fn client_alloc_size() -> u64 {
61         u64::from(bindings::GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE)
62             .align_up(GSP_HEAP_ALIGNMENT)
63             .unwrap_or(u64::MAX)
64     }
65 
66     /// Returns the amount of memory to reserve for management purposes for a framebuffer of size
67     /// `fb_size`.
management_overhead(fb_size: u64) -> u6468     fn management_overhead(fb_size: u64) -> u64 {
69         let fb_size_gb = fb_size.div_ceil(u64::from_safe_cast(kernel::sizes::SZ_1G));
70 
71         u64::from(bindings::GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB)
72             .saturating_mul(fb_size_gb)
73             .align_up(GSP_HEAP_ALIGNMENT)
74             .unwrap_or(u64::MAX)
75     }
76 }
77 
78 /// Heap memory requirements and constraints for a given version of the GSP LIBOS.
79 pub(crate) struct LibosParams {
80     /// The base amount of heap required by the GSP operating system, in bytes.
81     carveout_size: u64,
82     /// The minimum and maximum sizes allowed for the GSP FW heap, in bytes.
83     allowed_heap_size: Range<u64>,
84 }
85 
86 impl LibosParams {
87     /// Version 2 of the GSP LIBOS (Turing and GA100)
88     const LIBOS2: LibosParams = LibosParams {
89         carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2),
90         allowed_heap_size: num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB)
91             * num::usize_as_u64(SZ_1M)
92             ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB)
93                 * num::usize_as_u64(SZ_1M),
94     };
95 
96     /// Version 3 of the GSP LIBOS (GA102+)
97     const LIBOS3: LibosParams = LibosParams {
98         carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL),
99         allowed_heap_size: num::u32_as_u64(
100             bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
101         ) * num::usize_as_u64(SZ_1M)
102             ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB)
103                 * num::usize_as_u64(SZ_1M),
104     };
105 
106     /// Returns the libos parameters corresponding to `chipset`.
from_chipset(chipset: Chipset) -> &'static LibosParams107     pub(crate) fn from_chipset(chipset: Chipset) -> &'static LibosParams {
108         if chipset < Chipset::GA102 {
109             &Self::LIBOS2
110         } else {
111             &Self::LIBOS3
112         }
113     }
114 
115     /// Returns the amount of memory (in bytes) to allocate for the WPR heap for a framebuffer size
116     /// of `fb_size` (in bytes) for `chipset`.
wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64117     pub(crate) fn wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64 {
118         // The WPR heap will contain the following:
119         // LIBOS carveout,
120         self.carveout_size
121             // RM boot working memory,
122             .saturating_add(GspFwHeapParams::base_rm_size(chipset))
123             // One RM client,
124             .saturating_add(GspFwHeapParams::client_alloc_size())
125             // Overhead for memory management.
126             .saturating_add(GspFwHeapParams::management_overhead(fb_size))
127             // Clamp to the supported heap sizes.
128             .clamp(self.allowed_heap_size.start, self.allowed_heap_size.end - 1)
129     }
130 }
131 
132 /// Structure passed to the GSP bootloader, containing the framebuffer layout as well as the DMA
133 /// addresses of the GSP bootloader and firmware.
134 #[repr(transparent)]
135 pub(crate) struct GspFwWprMeta(bindings::GspFwWprMeta);
136 
137 // SAFETY: Padding is explicit and does not contain uninitialized data.
138 unsafe impl AsBytes for GspFwWprMeta {}
139 
140 // SAFETY: This struct only contains integer types for which all bit patterns
141 // are valid.
142 unsafe impl FromBytes for GspFwWprMeta {}
143 
144 type GspFwWprMetaBootResumeInfo = bindings::GspFwWprMeta__bindgen_ty_1;
145 type GspFwWprMetaBootInfo = bindings::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
146 
147 impl GspFwWprMeta {
148     /// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the
149     /// `fb_layout` layout.
new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self150     pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self {
151         Self(bindings::GspFwWprMeta {
152             // CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified.
153             magic: bindings::GSP_FW_WPR_META_MAGIC as u64,
154             revision: u64::from(bindings::GSP_FW_WPR_META_REVISION),
155             sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(),
156             sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size),
157             sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(),
158             sizeOfBootloader: u64::from_safe_cast(gsp_firmware.bootloader.ucode.size()),
159             bootloaderCodeOffset: u64::from(gsp_firmware.bootloader.code_offset),
160             bootloaderDataOffset: u64::from(gsp_firmware.bootloader.data_offset),
161             bootloaderManifestOffset: u64::from(gsp_firmware.bootloader.manifest_offset),
162             __bindgen_anon_1: GspFwWprMetaBootResumeInfo {
163                 __bindgen_anon_1: GspFwWprMetaBootInfo {
164                     sysmemAddrOfSignature: gsp_firmware.signatures.dma_handle(),
165                     sizeOfSignature: u64::from_safe_cast(gsp_firmware.signatures.size()),
166                 },
167             },
168             gspFwRsvdStart: fb_layout.heap.start,
169             nonWprHeapOffset: fb_layout.heap.start,
170             nonWprHeapSize: fb_layout.heap.end - fb_layout.heap.start,
171             gspFwWprStart: fb_layout.wpr2.start,
172             gspFwHeapOffset: fb_layout.wpr2_heap.start,
173             gspFwHeapSize: fb_layout.wpr2_heap.end - fb_layout.wpr2_heap.start,
174             gspFwOffset: fb_layout.elf.start,
175             bootBinOffset: fb_layout.boot.start,
176             frtsOffset: fb_layout.frts.start,
177             frtsSize: fb_layout.frts.end - fb_layout.frts.start,
178             gspFwWprEnd: fb_layout
179                 .vga_workspace
180                 .start
181                 .align_down(Alignment::new::<SZ_128K>()),
182             gspFwHeapVfPartitionCount: fb_layout.vf_partition_count,
183             fbSize: fb_layout.fb.end - fb_layout.fb.start,
184             vgaWorkspaceOffset: fb_layout.vga_workspace.start,
185             vgaWorkspaceSize: fb_layout.vga_workspace.end - fb_layout.vga_workspace.start,
186             ..Default::default()
187         })
188     }
189 }
190 
191 #[derive(Copy, Clone, Debug, PartialEq)]
192 #[repr(u32)]
193 pub(crate) enum MsgFunction {
194     // Common function codes
195     Nop = bindings::NV_VGPU_MSG_FUNCTION_NOP,
196     SetGuestSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO,
197     AllocRoot = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT,
198     AllocDevice = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE,
199     AllocMemory = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
200     AllocCtxDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA,
201     AllocChannelDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA,
202     MapMemory = bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY,
203     BindCtxDma = bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA,
204     AllocObject = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT,
205     Free = bindings::NV_VGPU_MSG_FUNCTION_FREE,
206     Log = bindings::NV_VGPU_MSG_FUNCTION_LOG,
207     GetGspStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO,
208     SetRegistry = bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY,
209     GspSetSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO,
210     GspInitPostObjGpu = bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU,
211     GspRmControl = bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
212     GetStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO,
213 
214     // Event codes
215     GspInitDone = bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE,
216     GspRunCpuSequencer = bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
217     PostEvent = bindings::NV_VGPU_MSG_EVENT_POST_EVENT,
218     RcTriggered = bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED,
219     MmuFaultQueued = bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
220     OsErrorLog = bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG,
221     GspPostNoCat = bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD,
222     GspLockdownNotice = bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE,
223     UcodeLibOsPrint = bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT,
224 }
225 
226 impl fmt::Display for MsgFunction {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result227     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
228         match self {
229             // Common function codes
230             MsgFunction::Nop => write!(f, "NOP"),
231             MsgFunction::SetGuestSystemInfo => write!(f, "SET_GUEST_SYSTEM_INFO"),
232             MsgFunction::AllocRoot => write!(f, "ALLOC_ROOT"),
233             MsgFunction::AllocDevice => write!(f, "ALLOC_DEVICE"),
234             MsgFunction::AllocMemory => write!(f, "ALLOC_MEMORY"),
235             MsgFunction::AllocCtxDma => write!(f, "ALLOC_CTX_DMA"),
236             MsgFunction::AllocChannelDma => write!(f, "ALLOC_CHANNEL_DMA"),
237             MsgFunction::MapMemory => write!(f, "MAP_MEMORY"),
238             MsgFunction::BindCtxDma => write!(f, "BIND_CTX_DMA"),
239             MsgFunction::AllocObject => write!(f, "ALLOC_OBJECT"),
240             MsgFunction::Free => write!(f, "FREE"),
241             MsgFunction::Log => write!(f, "LOG"),
242             MsgFunction::GetGspStaticInfo => write!(f, "GET_GSP_STATIC_INFO"),
243             MsgFunction::SetRegistry => write!(f, "SET_REGISTRY"),
244             MsgFunction::GspSetSystemInfo => write!(f, "GSP_SET_SYSTEM_INFO"),
245             MsgFunction::GspInitPostObjGpu => write!(f, "GSP_INIT_POST_OBJGPU"),
246             MsgFunction::GspRmControl => write!(f, "GSP_RM_CONTROL"),
247             MsgFunction::GetStaticInfo => write!(f, "GET_STATIC_INFO"),
248 
249             // Event codes
250             MsgFunction::GspInitDone => write!(f, "INIT_DONE"),
251             MsgFunction::GspRunCpuSequencer => write!(f, "RUN_CPU_SEQUENCER"),
252             MsgFunction::PostEvent => write!(f, "POST_EVENT"),
253             MsgFunction::RcTriggered => write!(f, "RC_TRIGGERED"),
254             MsgFunction::MmuFaultQueued => write!(f, "MMU_FAULT_QUEUED"),
255             MsgFunction::OsErrorLog => write!(f, "OS_ERROR_LOG"),
256             MsgFunction::GspPostNoCat => write!(f, "NOCAT"),
257             MsgFunction::GspLockdownNotice => write!(f, "LOCKDOWN_NOTICE"),
258             MsgFunction::UcodeLibOsPrint => write!(f, "LIBOS_PRINT"),
259         }
260     }
261 }
262 
263 impl TryFrom<u32> for MsgFunction {
264     type Error = kernel::error::Error;
265 
try_from(value: u32) -> Result<MsgFunction>266     fn try_from(value: u32) -> Result<MsgFunction> {
267         match value {
268             bindings::NV_VGPU_MSG_FUNCTION_NOP => Ok(MsgFunction::Nop),
269             bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO => {
270                 Ok(MsgFunction::SetGuestSystemInfo)
271             }
272             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT => Ok(MsgFunction::AllocRoot),
273             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE => Ok(MsgFunction::AllocDevice),
274             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY => Ok(MsgFunction::AllocMemory),
275             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA => Ok(MsgFunction::AllocCtxDma),
276             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA => Ok(MsgFunction::AllocChannelDma),
277             bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY => Ok(MsgFunction::MapMemory),
278             bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA => Ok(MsgFunction::BindCtxDma),
279             bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT => Ok(MsgFunction::AllocObject),
280             bindings::NV_VGPU_MSG_FUNCTION_FREE => Ok(MsgFunction::Free),
281             bindings::NV_VGPU_MSG_FUNCTION_LOG => Ok(MsgFunction::Log),
282             bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO => Ok(MsgFunction::GetGspStaticInfo),
283             bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY => Ok(MsgFunction::SetRegistry),
284             bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO => Ok(MsgFunction::GspSetSystemInfo),
285             bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU => {
286                 Ok(MsgFunction::GspInitPostObjGpu)
287             }
288             bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL => Ok(MsgFunction::GspRmControl),
289             bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO => Ok(MsgFunction::GetStaticInfo),
290             bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE => Ok(MsgFunction::GspInitDone),
291             bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER => {
292                 Ok(MsgFunction::GspRunCpuSequencer)
293             }
294             bindings::NV_VGPU_MSG_EVENT_POST_EVENT => Ok(MsgFunction::PostEvent),
295             bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED => Ok(MsgFunction::RcTriggered),
296             bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED => Ok(MsgFunction::MmuFaultQueued),
297             bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG => Ok(MsgFunction::OsErrorLog),
298             bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD => Ok(MsgFunction::GspPostNoCat),
299             bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE => Ok(MsgFunction::GspLockdownNotice),
300             bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT => Ok(MsgFunction::UcodeLibOsPrint),
301             _ => Err(EINVAL),
302         }
303     }
304 }
305 
306 impl From<MsgFunction> for u32 {
from(value: MsgFunction) -> Self307     fn from(value: MsgFunction) -> Self {
308         // CAST: `MsgFunction` is `repr(u32)` and can thus be cast losslessly.
309         value as u32
310     }
311 }
312 
313 /// Sequencer buffer opcode for GSP sequencer commands.
314 #[derive(Copy, Clone, Debug, PartialEq)]
315 #[repr(u32)]
316 pub(crate) enum SeqBufOpcode {
317     // Core operation opcodes
318     CoreReset = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
319     CoreResume = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
320     CoreStart = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
321     CoreWaitForHalt = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
322 
323     // Delay opcode
324     DelayUs = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
325 
326     // Register operation opcodes
327     RegModify = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
328     RegPoll = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
329     RegStore = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
330     RegWrite = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
331 }
332 
333 impl fmt::Display for SeqBufOpcode {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result334     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
335         match self {
336             SeqBufOpcode::CoreReset => write!(f, "CORE_RESET"),
337             SeqBufOpcode::CoreResume => write!(f, "CORE_RESUME"),
338             SeqBufOpcode::CoreStart => write!(f, "CORE_START"),
339             SeqBufOpcode::CoreWaitForHalt => write!(f, "CORE_WAIT_FOR_HALT"),
340             SeqBufOpcode::DelayUs => write!(f, "DELAY_US"),
341             SeqBufOpcode::RegModify => write!(f, "REG_MODIFY"),
342             SeqBufOpcode::RegPoll => write!(f, "REG_POLL"),
343             SeqBufOpcode::RegStore => write!(f, "REG_STORE"),
344             SeqBufOpcode::RegWrite => write!(f, "REG_WRITE"),
345         }
346     }
347 }
348 
349 impl TryFrom<u32> for SeqBufOpcode {
350     type Error = kernel::error::Error;
351 
try_from(value: u32) -> Result<SeqBufOpcode>352     fn try_from(value: u32) -> Result<SeqBufOpcode> {
353         match value {
354             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
355                 Ok(SeqBufOpcode::CoreReset)
356             }
357             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
358                 Ok(SeqBufOpcode::CoreResume)
359             }
360             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
361                 Ok(SeqBufOpcode::CoreStart)
362             }
363             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
364                 Ok(SeqBufOpcode::CoreWaitForHalt)
365             }
366             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
367             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
368                 Ok(SeqBufOpcode::RegModify)
369             }
370             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
371             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
372             bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
373             _ => Err(EINVAL),
374         }
375     }
376 }
377 
378 impl From<SeqBufOpcode> for u32 {
from(value: SeqBufOpcode) -> Self379     fn from(value: SeqBufOpcode) -> Self {
380         // CAST: `SeqBufOpcode` is `repr(u32)` and can thus be cast losslessly.
381         value as u32
382     }
383 }
384 
385 /// Wrapper for GSP sequencer register write payload.
386 #[repr(transparent)]
387 #[derive(Copy, Clone)]
388 pub(crate) struct RegWritePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
389 
390 impl RegWritePayload {
391     /// Returns the register address.
addr(&self) -> u32392     pub(crate) fn addr(&self) -> u32 {
393         self.0.addr
394     }
395 
396     /// Returns the value to write.
val(&self) -> u32397     pub(crate) fn val(&self) -> u32 {
398         self.0.val
399     }
400 }
401 
402 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
403 unsafe impl FromBytes for RegWritePayload {}
404 
405 // SAFETY: Padding is explicit and will not contain uninitialized data.
406 unsafe impl AsBytes for RegWritePayload {}
407 
408 /// Wrapper for GSP sequencer register modify payload.
409 #[repr(transparent)]
410 #[derive(Copy, Clone)]
411 pub(crate) struct RegModifyPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
412 
413 impl RegModifyPayload {
414     /// Returns the register address.
addr(&self) -> u32415     pub(crate) fn addr(&self) -> u32 {
416         self.0.addr
417     }
418 
419     /// Returns the mask to apply.
mask(&self) -> u32420     pub(crate) fn mask(&self) -> u32 {
421         self.0.mask
422     }
423 
424     /// Returns the value to write.
val(&self) -> u32425     pub(crate) fn val(&self) -> u32 {
426         self.0.val
427     }
428 }
429 
430 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
431 unsafe impl FromBytes for RegModifyPayload {}
432 
433 // SAFETY: Padding is explicit and will not contain uninitialized data.
434 unsafe impl AsBytes for RegModifyPayload {}
435 
436 /// Wrapper for GSP sequencer register poll payload.
437 #[repr(transparent)]
438 #[derive(Copy, Clone)]
439 pub(crate) struct RegPollPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
440 
441 impl RegPollPayload {
442     /// Returns the register address.
addr(&self) -> u32443     pub(crate) fn addr(&self) -> u32 {
444         self.0.addr
445     }
446 
447     /// Returns the mask to apply.
mask(&self) -> u32448     pub(crate) fn mask(&self) -> u32 {
449         self.0.mask
450     }
451 
452     /// Returns the expected value.
val(&self) -> u32453     pub(crate) fn val(&self) -> u32 {
454         self.0.val
455     }
456 
457     /// Returns the timeout in microseconds.
timeout(&self) -> u32458     pub(crate) fn timeout(&self) -> u32 {
459         self.0.timeout
460     }
461 }
462 
463 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
464 unsafe impl FromBytes for RegPollPayload {}
465 
466 // SAFETY: Padding is explicit and will not contain uninitialized data.
467 unsafe impl AsBytes for RegPollPayload {}
468 
469 /// Wrapper for GSP sequencer delay payload.
470 #[repr(transparent)]
471 #[derive(Copy, Clone)]
472 pub(crate) struct DelayUsPayload(bindings::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
473 
474 impl DelayUsPayload {
475     /// Returns the delay value in microseconds.
val(&self) -> u32476     pub(crate) fn val(&self) -> u32 {
477         self.0.val
478     }
479 }
480 
481 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
482 unsafe impl FromBytes for DelayUsPayload {}
483 
484 // SAFETY: Padding is explicit and will not contain uninitialized data.
485 unsafe impl AsBytes for DelayUsPayload {}
486 
487 /// Wrapper for GSP sequencer register store payload.
488 #[repr(transparent)]
489 #[derive(Copy, Clone)]
490 pub(crate) struct RegStorePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
491 
492 impl RegStorePayload {
493     /// Returns the register address.
addr(&self) -> u32494     pub(crate) fn addr(&self) -> u32 {
495         self.0.addr
496     }
497 
498     /// Returns the storage index.
499     #[allow(unused)]
index(&self) -> u32500     pub(crate) fn index(&self) -> u32 {
501         self.0.index
502     }
503 }
504 
505 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
506 unsafe impl FromBytes for RegStorePayload {}
507 
508 // SAFETY: Padding is explicit and will not contain uninitialized data.
509 unsafe impl AsBytes for RegStorePayload {}
510 
511 /// Wrapper for GSP sequencer buffer command.
512 #[repr(transparent)]
513 pub(crate) struct SequencerBufferCmd(bindings::GSP_SEQUENCER_BUFFER_CMD);
514 
515 impl SequencerBufferCmd {
516     /// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid.
opcode(&self) -> Result<SeqBufOpcode>517     pub(crate) fn opcode(&self) -> Result<SeqBufOpcode> {
518         self.0.opCode.try_into()
519     }
520 
521     /// Returns the register write payload by value.
522     ///
523     /// Returns an error if the opcode is not `SeqBufOpcode::RegWrite`.
reg_write_payload(&self) -> Result<RegWritePayload>524     pub(crate) fn reg_write_payload(&self) -> Result<RegWritePayload> {
525         if self.opcode()? != SeqBufOpcode::RegWrite {
526             return Err(EINVAL);
527         }
528         // SAFETY: Opcode is verified to be `RegWrite`, so union contains valid `RegWritePayload`.
529         let payload_bytes = unsafe {
530             core::slice::from_raw_parts(
531                 core::ptr::addr_of!(self.0.payload.regWrite).cast::<u8>(),
532                 core::mem::size_of::<RegWritePayload>(),
533             )
534         };
535         Ok(*RegWritePayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
536     }
537 
538     /// Returns the register modify payload by value.
539     ///
540     /// Returns an error if the opcode is not `SeqBufOpcode::RegModify`.
reg_modify_payload(&self) -> Result<RegModifyPayload>541     pub(crate) fn reg_modify_payload(&self) -> Result<RegModifyPayload> {
542         if self.opcode()? != SeqBufOpcode::RegModify {
543             return Err(EINVAL);
544         }
545         // SAFETY: Opcode is verified to be `RegModify`, so union contains valid `RegModifyPayload`.
546         let payload_bytes = unsafe {
547             core::slice::from_raw_parts(
548                 core::ptr::addr_of!(self.0.payload.regModify).cast::<u8>(),
549                 core::mem::size_of::<RegModifyPayload>(),
550             )
551         };
552         Ok(*RegModifyPayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
553     }
554 
555     /// Returns the register poll payload by value.
556     ///
557     /// Returns an error if the opcode is not `SeqBufOpcode::RegPoll`.
reg_poll_payload(&self) -> Result<RegPollPayload>558     pub(crate) fn reg_poll_payload(&self) -> Result<RegPollPayload> {
559         if self.opcode()? != SeqBufOpcode::RegPoll {
560             return Err(EINVAL);
561         }
562         // SAFETY: Opcode is verified to be `RegPoll`, so union contains valid `RegPollPayload`.
563         let payload_bytes = unsafe {
564             core::slice::from_raw_parts(
565                 core::ptr::addr_of!(self.0.payload.regPoll).cast::<u8>(),
566                 core::mem::size_of::<RegPollPayload>(),
567             )
568         };
569         Ok(*RegPollPayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
570     }
571 
572     /// Returns the delay payload by value.
573     ///
574     /// Returns an error if the opcode is not `SeqBufOpcode::DelayUs`.
delay_us_payload(&self) -> Result<DelayUsPayload>575     pub(crate) fn delay_us_payload(&self) -> Result<DelayUsPayload> {
576         if self.opcode()? != SeqBufOpcode::DelayUs {
577             return Err(EINVAL);
578         }
579         // SAFETY: Opcode is verified to be `DelayUs`, so union contains valid `DelayUsPayload`.
580         let payload_bytes = unsafe {
581             core::slice::from_raw_parts(
582                 core::ptr::addr_of!(self.0.payload.delayUs).cast::<u8>(),
583                 core::mem::size_of::<DelayUsPayload>(),
584             )
585         };
586         Ok(*DelayUsPayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
587     }
588 
589     /// Returns the register store payload by value.
590     ///
591     /// Returns an error if the opcode is not `SeqBufOpcode::RegStore`.
reg_store_payload(&self) -> Result<RegStorePayload>592     pub(crate) fn reg_store_payload(&self) -> Result<RegStorePayload> {
593         if self.opcode()? != SeqBufOpcode::RegStore {
594             return Err(EINVAL);
595         }
596         // SAFETY: Opcode is verified to be `RegStore`, so union contains valid `RegStorePayload`.
597         let payload_bytes = unsafe {
598             core::slice::from_raw_parts(
599                 core::ptr::addr_of!(self.0.payload.regStore).cast::<u8>(),
600                 core::mem::size_of::<RegStorePayload>(),
601             )
602         };
603         Ok(*RegStorePayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
604     }
605 }
606 
607 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
608 unsafe impl FromBytes for SequencerBufferCmd {}
609 
610 // SAFETY: Padding is explicit and will not contain uninitialized data.
611 unsafe impl AsBytes for SequencerBufferCmd {}
612 
613 /// Wrapper for GSP run CPU sequencer RPC.
614 #[repr(transparent)]
615 pub(crate) struct RunCpuSequencer(bindings::rpc_run_cpu_sequencer_v17_00);
616 
617 impl RunCpuSequencer {
618     /// Returns the command index.
cmd_index(&self) -> u32619     pub(crate) fn cmd_index(&self) -> u32 {
620         self.0.cmdIndex
621     }
622 }
623 
624 // SAFETY: This struct only contains integer types for which all bit patterns are valid.
625 unsafe impl FromBytes for RunCpuSequencer {}
626 
627 // SAFETY: Padding is explicit and will not contain uninitialized data.
628 unsafe impl AsBytes for RunCpuSequencer {}
629 
630 /// Struct containing the arguments required to pass a memory buffer to the GSP
631 /// for use during initialisation.
632 ///
633 /// The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
634 /// configured for a larger page size (e.g. 64K pages), we need to give
635 /// the GSP an array of 4K pages. Since we only create physically contiguous
636 /// buffers the math to calculate the addresses is simple.
637 ///
638 /// The buffers must be a multiple of GSP_PAGE_SIZE.  GSP-RM also currently
639 /// ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
640 /// buffers to be physically contiguous anyway.
641 ///
642 /// The memory allocated for the arguments must remain until the GSP sends the
643 /// init_done RPC.
644 #[repr(transparent)]
645 pub(crate) struct LibosMemoryRegionInitArgument(bindings::LibosMemoryRegionInitArgument);
646 
647 // SAFETY: Padding is explicit and does not contain uninitialized data.
648 unsafe impl AsBytes for LibosMemoryRegionInitArgument {}
649 
650 // SAFETY: This struct only contains integer types for which all bit patterns
651 // are valid.
652 unsafe impl FromBytes for LibosMemoryRegionInitArgument {}
653 
654 impl LibosMemoryRegionInitArgument {
new<A: AsBytes + FromBytes>( name: &'static str, obj: &CoherentAllocation<A>, ) -> Self655     pub(crate) fn new<A: AsBytes + FromBytes>(
656         name: &'static str,
657         obj: &CoherentAllocation<A>,
658     ) -> Self {
659         /// Generates the `ID8` identifier required for some GSP objects.
660         fn id8(name: &str) -> u64 {
661             let mut bytes = [0u8; core::mem::size_of::<u64>()];
662 
663             for (c, b) in name.bytes().rev().zip(&mut bytes) {
664                 *b = c;
665             }
666 
667             u64::from_ne_bytes(bytes)
668         }
669 
670         Self(bindings::LibosMemoryRegionInitArgument {
671             id8: id8(name),
672             pa: obj.dma_handle(),
673             size: num::usize_as_u64(obj.size()),
674             kind: num::u32_into_u8::<
675                 { bindings::LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS },
676             >(),
677             loc: num::u32_into_u8::<
678                 { bindings::LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM },
679             >(),
680             ..Default::default()
681         })
682     }
683 }
684 
685 /// TX header for setting up a message queue with the GSP.
686 #[repr(transparent)]
687 pub(crate) struct MsgqTxHeader(bindings::msgqTxHeader);
688 
689 impl MsgqTxHeader {
690     /// Create a new TX queue header.
691     ///
692     /// # Arguments
693     ///
694     /// * `msgq_size` - Total size of the message queue structure, in bytes.
695     /// * `rx_hdr_offset` - Offset, in bytes, of the start of the RX header in the message queue
696     ///   structure.
697     /// * `msg_count` - Number of messages that can be sent, i.e. the number of memory pages
698     ///   allocated for the message queue in the message queue structure.
new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self699     pub(crate) fn new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self {
700         Self(bindings::msgqTxHeader {
701             version: 0,
702             size: msgq_size,
703             msgSize: num::usize_into_u32::<GSP_PAGE_SIZE>(),
704             msgCount: msg_count,
705             writePtr: 0,
706             flags: 1,
707             rxHdrOff: rx_hdr_offset,
708             entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(),
709         })
710     }
711 
712     /// Returns the value of the write pointer for this queue.
write_ptr(&self) -> u32713     pub(crate) fn write_ptr(&self) -> u32 {
714         let ptr = core::ptr::from_ref(&self.0.writePtr);
715 
716         // SAFETY: `ptr` is a valid pointer to a `u32`.
717         unsafe { ptr.read_volatile() }
718     }
719 
720     /// Sets the value of the write pointer for this queue.
set_write_ptr(&mut self, val: u32)721     pub(crate) fn set_write_ptr(&mut self, val: u32) {
722         let ptr = core::ptr::from_mut(&mut self.0.writePtr);
723 
724         // SAFETY: `ptr` is a valid pointer to a `u32`.
725         unsafe { ptr.write_volatile(val) }
726     }
727 }
728 
729 // SAFETY: Padding is explicit and does not contain uninitialized data.
730 unsafe impl AsBytes for MsgqTxHeader {}
731 
732 /// RX header for setting up a message queue with the GSP.
733 #[repr(transparent)]
734 pub(crate) struct MsgqRxHeader(bindings::msgqRxHeader);
735 
736 /// Header for the message RX queue.
737 impl MsgqRxHeader {
738     /// Creates a new RX queue header.
new() -> Self739     pub(crate) fn new() -> Self {
740         Self(Default::default())
741     }
742 
743     /// Returns the value of the read pointer for this queue.
read_ptr(&self) -> u32744     pub(crate) fn read_ptr(&self) -> u32 {
745         let ptr = core::ptr::from_ref(&self.0.readPtr);
746 
747         // SAFETY: `ptr` is a valid pointer to a `u32`.
748         unsafe { ptr.read_volatile() }
749     }
750 
751     /// Sets the value of the read pointer for this queue.
set_read_ptr(&mut self, val: u32)752     pub(crate) fn set_read_ptr(&mut self, val: u32) {
753         let ptr = core::ptr::from_mut(&mut self.0.readPtr);
754 
755         // SAFETY: `ptr` is a valid pointer to a `u32`.
756         unsafe { ptr.write_volatile(val) }
757     }
758 }
759 
760 // SAFETY: Padding is explicit and does not contain uninitialized data.
761 unsafe impl AsBytes for MsgqRxHeader {}
762 
763 bitfield! {
764     struct MsgHeaderVersion(u32) {
765         31:24 major as u8;
766         23:16 minor as u8;
767     }
768 }
769 
770 impl MsgHeaderVersion {
771     const MAJOR_TOT: u8 = 3;
772     const MINOR_TOT: u8 = 0;
773 
new() -> Self774     fn new() -> Self {
775         Self::default()
776             .set_major(Self::MAJOR_TOT)
777             .set_minor(Self::MINOR_TOT)
778     }
779 }
780 
781 impl bindings::rpc_message_header_v {
init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error>782     fn init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error> {
783         type RpcMessageHeader = bindings::rpc_message_header_v;
784 
785         try_init!(RpcMessageHeader {
786             header_version: MsgHeaderVersion::new().into(),
787             signature: bindings::NV_VGPU_MSG_SIGNATURE_VALID,
788             function: function.into(),
789             length: size_of::<Self>()
790                 .checked_add(cmd_size)
791                 .ok_or(EOVERFLOW)
792                 .and_then(|v| v.try_into().map_err(|_| EINVAL))?,
793             rpc_result: 0xffffffff,
794             rpc_result_private: 0xffffffff,
795             ..Zeroable::init_zeroed()
796         })
797     }
798 }
799 
800 /// GSP Message Element.
801 ///
802 /// This is essentially a message header expected to be followed by the message data.
803 #[repr(transparent)]
804 pub(crate) struct GspMsgElement {
805     inner: bindings::GSP_MSG_QUEUE_ELEMENT,
806 }
807 
808 impl GspMsgElement {
809     /// Creates a new message element.
810     ///
811     /// # Arguments
812     ///
813     /// * `sequence` - Sequence number of the message.
814     /// * `cmd_size` - Size of the command (not including the message element), in bytes.
815     /// * `function` - Function of the message.
816     #[allow(non_snake_case)]
init( sequence: u32, cmd_size: usize, function: MsgFunction, ) -> impl Init<Self, Error>817     pub(crate) fn init(
818         sequence: u32,
819         cmd_size: usize,
820         function: MsgFunction,
821     ) -> impl Init<Self, Error> {
822         type RpcMessageHeader = bindings::rpc_message_header_v;
823         type InnerGspMsgElement = bindings::GSP_MSG_QUEUE_ELEMENT;
824         let init_inner = try_init!(InnerGspMsgElement {
825             seqNum: sequence,
826             elemCount: size_of::<Self>()
827                 .checked_add(cmd_size)
828                 .ok_or(EOVERFLOW)?
829                 .div_ceil(GSP_PAGE_SIZE)
830                 .try_into()
831                 .map_err(|_| EOVERFLOW)?,
832             rpc <- RpcMessageHeader::init(cmd_size, function),
833             ..Zeroable::init_zeroed()
834         });
835 
836         try_init!(GspMsgElement {
837             inner <- init_inner,
838         })
839     }
840 
841     /// Sets the checksum of this message.
842     ///
843     /// Since the header is also part of the checksum, this is usually called after the whole
844     /// message has been written to the shared memory area.
set_checksum(&mut self, checksum: u32)845     pub(crate) fn set_checksum(&mut self, checksum: u32) {
846         self.inner.checkSum = checksum;
847     }
848 
849     /// Returns the length of the message's payload.
payload_length(&self) -> usize850     pub(crate) fn payload_length(&self) -> usize {
851         // `rpc.length` includes the length of the RPC message header.
852         num::u32_as_usize(self.inner.rpc.length)
853             .saturating_sub(size_of::<bindings::rpc_message_header_v>())
854     }
855 
856     /// Returns the total length of the message, message and RPC headers included.
length(&self) -> usize857     pub(crate) fn length(&self) -> usize {
858         size_of::<Self>() + self.payload_length()
859     }
860 
861     // Returns the sequence number of the message.
sequence(&self) -> u32862     pub(crate) fn sequence(&self) -> u32 {
863         self.inner.rpc.sequence
864     }
865 
866     // Returns the function of the message, if it is valid, or the invalid function number as an
867     // error.
function(&self) -> Result<MsgFunction, u32>868     pub(crate) fn function(&self) -> Result<MsgFunction, u32> {
869         self.inner
870             .rpc
871             .function
872             .try_into()
873             .map_err(|_| self.inner.rpc.function)
874     }
875 
876     // Returns the number of elements (i.e. memory pages) used by this message.
element_count(&self) -> u32877     pub(crate) fn element_count(&self) -> u32 {
878         self.inner.elemCount
879     }
880 }
881 
882 // SAFETY: Padding is explicit and does not contain uninitialized data.
883 unsafe impl AsBytes for GspMsgElement {}
884 
885 // SAFETY: This struct only contains integer types for which all bit patterns
886 // are valid.
887 unsafe impl FromBytes for GspMsgElement {}
888 
889 /// Arguments for GSP startup.
890 #[repr(transparent)]
891 pub(crate) struct GspArgumentsCached(bindings::GSP_ARGUMENTS_CACHED);
892 
893 impl GspArgumentsCached {
894     /// Creates the arguments for starting the GSP up using `cmdq` as its command queue.
new(cmdq: &Cmdq) -> Self895     pub(crate) fn new(cmdq: &Cmdq) -> Self {
896         Self(bindings::GSP_ARGUMENTS_CACHED {
897             messageQueueInitArguments: MessageQueueInitArguments::new(cmdq).0,
898             bDmemStack: 1,
899             ..Default::default()
900         })
901     }
902 }
903 
904 // SAFETY: Padding is explicit and will not contain uninitialized data.
905 unsafe impl AsBytes for GspArgumentsCached {}
906 
907 // SAFETY: This struct only contains integer types for which all bit patterns
908 // are valid.
909 unsafe impl FromBytes for GspArgumentsCached {}
910 
911 /// Init arguments for the message queue.
912 #[repr(transparent)]
913 struct MessageQueueInitArguments(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS);
914 
915 impl MessageQueueInitArguments {
916     /// Creates a new init arguments structure for `cmdq`.
new(cmdq: &Cmdq) -> Self917     fn new(cmdq: &Cmdq) -> Self {
918         Self(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS {
919             sharedMemPhysAddr: cmdq.dma_handle(),
920             pageTableEntryCount: num::usize_into_u32::<{ Cmdq::NUM_PTES }>(),
921             cmdQueueOffset: num::usize_as_u64(Cmdq::CMDQ_OFFSET),
922             statQueueOffset: num::usize_as_u64(Cmdq::STATQ_OFFSET),
923             ..Default::default()
924         })
925     }
926 }
927