1 // SPDX-License-Identifier: GPL-2.0 2 3 pub(crate) mod commands; 4 mod r570_144; 5 6 // Alias to avoid repeating the version number with every use. 7 use r570_144 as bindings; 8 9 use core::{ 10 fmt, 11 ops::Range, // 12 }; 13 14 use kernel::{ 15 dma::CoherentAllocation, 16 prelude::*, 17 ptr::{ 18 Alignable, 19 Alignment, // 20 }, 21 sizes::{ 22 SZ_128K, 23 SZ_1M, // 24 }, 25 transmute::{ 26 AsBytes, 27 FromBytes, // 28 }, 29 }; 30 31 use crate::{ 32 fb::FbLayout, 33 firmware::gsp::GspFirmware, 34 gpu::Chipset, 35 gsp::{ 36 cmdq::Cmdq, // 37 GSP_PAGE_SIZE, 38 }, 39 num::{ 40 self, 41 FromSafeCast, // 42 }, 43 }; 44 45 /// Empty type to group methods related to heap parameters for running the GSP firmware. 46 enum GspFwHeapParams {} 47 48 /// Minimum required alignment for the GSP heap. 49 const GSP_HEAP_ALIGNMENT: Alignment = Alignment::new::<{ 1 << 20 }>(); 50 51 impl GspFwHeapParams { 52 /// Returns the amount of GSP-RM heap memory used during GSP-RM boot and initialization (up to 53 /// and including the first client subdevice allocation). 54 fn base_rm_size(_chipset: Chipset) -> u64 { 55 // TODO: this needs to be updated to return the correct value for Hopper+ once support for 56 // them is added: 57 // u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100) 58 u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X) 59 } 60 61 /// Returns the amount of heap memory required to support a single channel allocation. 62 fn client_alloc_size() -> u64 { 63 u64::from(bindings::GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE) 64 .align_up(GSP_HEAP_ALIGNMENT) 65 .unwrap_or(u64::MAX) 66 } 67 68 /// Returns the amount of memory to reserve for management purposes for a framebuffer of size 69 /// `fb_size`. 70 fn management_overhead(fb_size: u64) -> u64 { 71 let fb_size_gb = fb_size.div_ceil(u64::from_safe_cast(kernel::sizes::SZ_1G)); 72 73 u64::from(bindings::GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB) 74 .saturating_mul(fb_size_gb) 75 .align_up(GSP_HEAP_ALIGNMENT) 76 .unwrap_or(u64::MAX) 77 } 78 } 79 80 /// Heap memory requirements and constraints for a given version of the GSP LIBOS. 81 pub(crate) struct LibosParams { 82 /// The base amount of heap required by the GSP operating system, in bytes. 83 carveout_size: u64, 84 /// The minimum and maximum sizes allowed for the GSP FW heap, in bytes. 85 allowed_heap_size: Range<u64>, 86 } 87 88 impl LibosParams { 89 /// Version 2 of the GSP LIBOS (Turing and GA100) 90 const LIBOS2: LibosParams = LibosParams { 91 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2), 92 allowed_heap_size: num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB) 93 * num::usize_as_u64(SZ_1M) 94 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB) 95 * num::usize_as_u64(SZ_1M), 96 }; 97 98 /// Version 3 of the GSP LIBOS (GA102+) 99 const LIBOS3: LibosParams = LibosParams { 100 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL), 101 allowed_heap_size: num::u32_as_u64( 102 bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, 103 ) * num::usize_as_u64(SZ_1M) 104 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB) 105 * num::usize_as_u64(SZ_1M), 106 }; 107 108 /// Returns the libos parameters corresponding to `chipset`. 109 pub(crate) fn from_chipset(chipset: Chipset) -> &'static LibosParams { 110 if chipset < Chipset::GA102 { 111 &Self::LIBOS2 112 } else { 113 &Self::LIBOS3 114 } 115 } 116 117 /// Returns the amount of memory (in bytes) to allocate for the WPR heap for a framebuffer size 118 /// of `fb_size` (in bytes) for `chipset`. 119 pub(crate) fn wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64 { 120 // The WPR heap will contain the following: 121 // LIBOS carveout, 122 self.carveout_size 123 // RM boot working memory, 124 .saturating_add(GspFwHeapParams::base_rm_size(chipset)) 125 // One RM client, 126 .saturating_add(GspFwHeapParams::client_alloc_size()) 127 // Overhead for memory management. 128 .saturating_add(GspFwHeapParams::management_overhead(fb_size)) 129 // Clamp to the supported heap sizes. 130 .clamp(self.allowed_heap_size.start, self.allowed_heap_size.end - 1) 131 } 132 } 133 134 /// Structure passed to the GSP bootloader, containing the framebuffer layout as well as the DMA 135 /// addresses of the GSP bootloader and firmware. 136 #[repr(transparent)] 137 pub(crate) struct GspFwWprMeta(bindings::GspFwWprMeta); 138 139 // SAFETY: Padding is explicit and does not contain uninitialized data. 140 unsafe impl AsBytes for GspFwWprMeta {} 141 142 // SAFETY: This struct only contains integer types for which all bit patterns 143 // are valid. 144 unsafe impl FromBytes for GspFwWprMeta {} 145 146 type GspFwWprMetaBootResumeInfo = r570_144::GspFwWprMeta__bindgen_ty_1; 147 type GspFwWprMetaBootInfo = r570_144::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1; 148 149 impl GspFwWprMeta { 150 /// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the 151 /// `fb_layout` layout. 152 pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self { 153 Self(bindings::GspFwWprMeta { 154 // CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified. 155 magic: r570_144::GSP_FW_WPR_META_MAGIC as u64, 156 revision: u64::from(r570_144::GSP_FW_WPR_META_REVISION), 157 sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(), 158 sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size), 159 sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(), 160 sizeOfBootloader: u64::from_safe_cast(gsp_firmware.bootloader.ucode.size()), 161 bootloaderCodeOffset: u64::from(gsp_firmware.bootloader.code_offset), 162 bootloaderDataOffset: u64::from(gsp_firmware.bootloader.data_offset), 163 bootloaderManifestOffset: u64::from(gsp_firmware.bootloader.manifest_offset), 164 __bindgen_anon_1: GspFwWprMetaBootResumeInfo { 165 __bindgen_anon_1: GspFwWprMetaBootInfo { 166 sysmemAddrOfSignature: gsp_firmware.signatures.dma_handle(), 167 sizeOfSignature: u64::from_safe_cast(gsp_firmware.signatures.size()), 168 }, 169 }, 170 gspFwRsvdStart: fb_layout.heap.start, 171 nonWprHeapOffset: fb_layout.heap.start, 172 nonWprHeapSize: fb_layout.heap.end - fb_layout.heap.start, 173 gspFwWprStart: fb_layout.wpr2.start, 174 gspFwHeapOffset: fb_layout.wpr2_heap.start, 175 gspFwHeapSize: fb_layout.wpr2_heap.end - fb_layout.wpr2_heap.start, 176 gspFwOffset: fb_layout.elf.start, 177 bootBinOffset: fb_layout.boot.start, 178 frtsOffset: fb_layout.frts.start, 179 frtsSize: fb_layout.frts.end - fb_layout.frts.start, 180 gspFwWprEnd: fb_layout 181 .vga_workspace 182 .start 183 .align_down(Alignment::new::<SZ_128K>()), 184 gspFwHeapVfPartitionCount: fb_layout.vf_partition_count, 185 fbSize: fb_layout.fb.end - fb_layout.fb.start, 186 vgaWorkspaceOffset: fb_layout.vga_workspace.start, 187 vgaWorkspaceSize: fb_layout.vga_workspace.end - fb_layout.vga_workspace.start, 188 ..Default::default() 189 }) 190 } 191 } 192 193 #[derive(Copy, Clone, Debug, PartialEq)] 194 #[repr(u32)] 195 pub(crate) enum MsgFunction { 196 // Common function codes 197 Nop = bindings::NV_VGPU_MSG_FUNCTION_NOP, 198 SetGuestSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO, 199 AllocRoot = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT, 200 AllocDevice = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE, 201 AllocMemory = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, 202 AllocCtxDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA, 203 AllocChannelDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA, 204 MapMemory = bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY, 205 BindCtxDma = bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA, 206 AllocObject = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT, 207 Free = bindings::NV_VGPU_MSG_FUNCTION_FREE, 208 Log = bindings::NV_VGPU_MSG_FUNCTION_LOG, 209 GetGspStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, 210 SetRegistry = bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY, 211 GspSetSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, 212 GspInitPostObjGpu = bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU, 213 GspRmControl = bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, 214 GetStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO, 215 216 // Event codes 217 GspInitDone = bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE, 218 GspRunCpuSequencer = bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, 219 PostEvent = bindings::NV_VGPU_MSG_EVENT_POST_EVENT, 220 RcTriggered = bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED, 221 MmuFaultQueued = bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, 222 OsErrorLog = bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG, 223 GspPostNoCat = bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, 224 GspLockdownNotice = bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, 225 UcodeLibOsPrint = bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, 226 } 227 228 impl fmt::Display for MsgFunction { 229 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 230 match self { 231 // Common function codes 232 MsgFunction::Nop => write!(f, "NOP"), 233 MsgFunction::SetGuestSystemInfo => write!(f, "SET_GUEST_SYSTEM_INFO"), 234 MsgFunction::AllocRoot => write!(f, "ALLOC_ROOT"), 235 MsgFunction::AllocDevice => write!(f, "ALLOC_DEVICE"), 236 MsgFunction::AllocMemory => write!(f, "ALLOC_MEMORY"), 237 MsgFunction::AllocCtxDma => write!(f, "ALLOC_CTX_DMA"), 238 MsgFunction::AllocChannelDma => write!(f, "ALLOC_CHANNEL_DMA"), 239 MsgFunction::MapMemory => write!(f, "MAP_MEMORY"), 240 MsgFunction::BindCtxDma => write!(f, "BIND_CTX_DMA"), 241 MsgFunction::AllocObject => write!(f, "ALLOC_OBJECT"), 242 MsgFunction::Free => write!(f, "FREE"), 243 MsgFunction::Log => write!(f, "LOG"), 244 MsgFunction::GetGspStaticInfo => write!(f, "GET_GSP_STATIC_INFO"), 245 MsgFunction::SetRegistry => write!(f, "SET_REGISTRY"), 246 MsgFunction::GspSetSystemInfo => write!(f, "GSP_SET_SYSTEM_INFO"), 247 MsgFunction::GspInitPostObjGpu => write!(f, "GSP_INIT_POST_OBJGPU"), 248 MsgFunction::GspRmControl => write!(f, "GSP_RM_CONTROL"), 249 MsgFunction::GetStaticInfo => write!(f, "GET_STATIC_INFO"), 250 251 // Event codes 252 MsgFunction::GspInitDone => write!(f, "INIT_DONE"), 253 MsgFunction::GspRunCpuSequencer => write!(f, "RUN_CPU_SEQUENCER"), 254 MsgFunction::PostEvent => write!(f, "POST_EVENT"), 255 MsgFunction::RcTriggered => write!(f, "RC_TRIGGERED"), 256 MsgFunction::MmuFaultQueued => write!(f, "MMU_FAULT_QUEUED"), 257 MsgFunction::OsErrorLog => write!(f, "OS_ERROR_LOG"), 258 MsgFunction::GspPostNoCat => write!(f, "NOCAT"), 259 MsgFunction::GspLockdownNotice => write!(f, "LOCKDOWN_NOTICE"), 260 MsgFunction::UcodeLibOsPrint => write!(f, "LIBOS_PRINT"), 261 } 262 } 263 } 264 265 impl TryFrom<u32> for MsgFunction { 266 type Error = kernel::error::Error; 267 268 fn try_from(value: u32) -> Result<MsgFunction> { 269 match value { 270 bindings::NV_VGPU_MSG_FUNCTION_NOP => Ok(MsgFunction::Nop), 271 bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO => { 272 Ok(MsgFunction::SetGuestSystemInfo) 273 } 274 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT => Ok(MsgFunction::AllocRoot), 275 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE => Ok(MsgFunction::AllocDevice), 276 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY => Ok(MsgFunction::AllocMemory), 277 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA => Ok(MsgFunction::AllocCtxDma), 278 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA => Ok(MsgFunction::AllocChannelDma), 279 bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY => Ok(MsgFunction::MapMemory), 280 bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA => Ok(MsgFunction::BindCtxDma), 281 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT => Ok(MsgFunction::AllocObject), 282 bindings::NV_VGPU_MSG_FUNCTION_FREE => Ok(MsgFunction::Free), 283 bindings::NV_VGPU_MSG_FUNCTION_LOG => Ok(MsgFunction::Log), 284 bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO => Ok(MsgFunction::GetGspStaticInfo), 285 bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY => Ok(MsgFunction::SetRegistry), 286 bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO => Ok(MsgFunction::GspSetSystemInfo), 287 bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU => { 288 Ok(MsgFunction::GspInitPostObjGpu) 289 } 290 bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL => Ok(MsgFunction::GspRmControl), 291 bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO => Ok(MsgFunction::GetStaticInfo), 292 bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE => Ok(MsgFunction::GspInitDone), 293 bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER => { 294 Ok(MsgFunction::GspRunCpuSequencer) 295 } 296 bindings::NV_VGPU_MSG_EVENT_POST_EVENT => Ok(MsgFunction::PostEvent), 297 bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED => Ok(MsgFunction::RcTriggered), 298 bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED => Ok(MsgFunction::MmuFaultQueued), 299 bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG => Ok(MsgFunction::OsErrorLog), 300 bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD => Ok(MsgFunction::GspPostNoCat), 301 bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE => Ok(MsgFunction::GspLockdownNotice), 302 bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT => Ok(MsgFunction::UcodeLibOsPrint), 303 _ => Err(EINVAL), 304 } 305 } 306 } 307 308 impl From<MsgFunction> for u32 { 309 fn from(value: MsgFunction) -> Self { 310 // CAST: `MsgFunction` is `repr(u32)` and can thus be cast losslessly. 311 value as u32 312 } 313 } 314 315 /// Sequencer buffer opcode for GSP sequencer commands. 316 #[derive(Copy, Clone, Debug, PartialEq)] 317 #[repr(u32)] 318 pub(crate) enum SeqBufOpcode { 319 // Core operation opcodes 320 CoreReset = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET, 321 CoreResume = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME, 322 CoreStart = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START, 323 CoreWaitForHalt = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, 324 325 // Delay opcode 326 DelayUs = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US, 327 328 // Register operation opcodes 329 RegModify = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY, 330 RegPoll = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL, 331 RegStore = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE, 332 RegWrite = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE, 333 } 334 335 impl fmt::Display for SeqBufOpcode { 336 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 337 match self { 338 SeqBufOpcode::CoreReset => write!(f, "CORE_RESET"), 339 SeqBufOpcode::CoreResume => write!(f, "CORE_RESUME"), 340 SeqBufOpcode::CoreStart => write!(f, "CORE_START"), 341 SeqBufOpcode::CoreWaitForHalt => write!(f, "CORE_WAIT_FOR_HALT"), 342 SeqBufOpcode::DelayUs => write!(f, "DELAY_US"), 343 SeqBufOpcode::RegModify => write!(f, "REG_MODIFY"), 344 SeqBufOpcode::RegPoll => write!(f, "REG_POLL"), 345 SeqBufOpcode::RegStore => write!(f, "REG_STORE"), 346 SeqBufOpcode::RegWrite => write!(f, "REG_WRITE"), 347 } 348 } 349 } 350 351 impl TryFrom<u32> for SeqBufOpcode { 352 type Error = kernel::error::Error; 353 354 fn try_from(value: u32) -> Result<SeqBufOpcode> { 355 match value { 356 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => { 357 Ok(SeqBufOpcode::CoreReset) 358 } 359 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => { 360 Ok(SeqBufOpcode::CoreResume) 361 } 362 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => { 363 Ok(SeqBufOpcode::CoreStart) 364 } 365 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => { 366 Ok(SeqBufOpcode::CoreWaitForHalt) 367 } 368 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs), 369 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => { 370 Ok(SeqBufOpcode::RegModify) 371 } 372 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll), 373 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore), 374 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite), 375 _ => Err(EINVAL), 376 } 377 } 378 } 379 380 impl From<SeqBufOpcode> for u32 { 381 fn from(value: SeqBufOpcode) -> Self { 382 // CAST: `SeqBufOpcode` is `repr(u32)` and can thus be cast losslessly. 383 value as u32 384 } 385 } 386 387 /// Wrapper for GSP sequencer register write payload. 388 #[repr(transparent)] 389 #[derive(Copy, Clone)] 390 pub(crate) struct RegWritePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_WRITE); 391 392 #[expect(unused)] 393 impl RegWritePayload { 394 /// Returns the register address. 395 pub(crate) fn addr(&self) -> u32 { 396 self.0.addr 397 } 398 399 /// Returns the value to write. 400 pub(crate) fn val(&self) -> u32 { 401 self.0.val 402 } 403 } 404 405 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 406 unsafe impl FromBytes for RegWritePayload {} 407 408 // SAFETY: Padding is explicit and will not contain uninitialized data. 409 unsafe impl AsBytes for RegWritePayload {} 410 411 /// Wrapper for GSP sequencer register modify payload. 412 #[repr(transparent)] 413 #[derive(Copy, Clone)] 414 pub(crate) struct RegModifyPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY); 415 416 #[expect(unused)] 417 impl RegModifyPayload { 418 /// Returns the register address. 419 pub(crate) fn addr(&self) -> u32 { 420 self.0.addr 421 } 422 423 /// Returns the mask to apply. 424 pub(crate) fn mask(&self) -> u32 { 425 self.0.mask 426 } 427 428 /// Returns the value to write. 429 pub(crate) fn val(&self) -> u32 { 430 self.0.val 431 } 432 } 433 434 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 435 unsafe impl FromBytes for RegModifyPayload {} 436 437 // SAFETY: Padding is explicit and will not contain uninitialized data. 438 unsafe impl AsBytes for RegModifyPayload {} 439 440 /// Wrapper for GSP sequencer register poll payload. 441 #[repr(transparent)] 442 #[derive(Copy, Clone)] 443 pub(crate) struct RegPollPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_POLL); 444 445 #[expect(unused)] 446 impl RegPollPayload { 447 /// Returns the register address. 448 pub(crate) fn addr(&self) -> u32 { 449 self.0.addr 450 } 451 452 /// Returns the mask to apply. 453 pub(crate) fn mask(&self) -> u32 { 454 self.0.mask 455 } 456 457 /// Returns the expected value. 458 pub(crate) fn val(&self) -> u32 { 459 self.0.val 460 } 461 462 /// Returns the timeout in microseconds. 463 pub(crate) fn timeout(&self) -> u32 { 464 self.0.timeout 465 } 466 } 467 468 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 469 unsafe impl FromBytes for RegPollPayload {} 470 471 // SAFETY: Padding is explicit and will not contain uninitialized data. 472 unsafe impl AsBytes for RegPollPayload {} 473 474 /// Wrapper for GSP sequencer delay payload. 475 #[repr(transparent)] 476 #[derive(Copy, Clone)] 477 pub(crate) struct DelayUsPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_DELAY_US); 478 479 #[expect(unused)] 480 impl DelayUsPayload { 481 /// Returns the delay value in microseconds. 482 pub(crate) fn val(&self) -> u32 { 483 self.0.val 484 } 485 } 486 487 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 488 unsafe impl FromBytes for DelayUsPayload {} 489 490 // SAFETY: Padding is explicit and will not contain uninitialized data. 491 unsafe impl AsBytes for DelayUsPayload {} 492 493 /// Wrapper for GSP sequencer register store payload. 494 #[repr(transparent)] 495 #[derive(Copy, Clone)] 496 pub(crate) struct RegStorePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_STORE); 497 498 #[expect(unused)] 499 impl RegStorePayload { 500 /// Returns the register address. 501 pub(crate) fn addr(&self) -> u32 { 502 self.0.addr 503 } 504 505 /// Returns the storage index. 506 pub(crate) fn index(&self) -> u32 { 507 self.0.index 508 } 509 } 510 511 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 512 unsafe impl FromBytes for RegStorePayload {} 513 514 // SAFETY: Padding is explicit and will not contain uninitialized data. 515 unsafe impl AsBytes for RegStorePayload {} 516 517 /// Wrapper for GSP sequencer buffer command. 518 #[repr(transparent)] 519 pub(crate) struct SequencerBufferCmd(r570_144::GSP_SEQUENCER_BUFFER_CMD); 520 521 #[expect(unused)] 522 impl SequencerBufferCmd { 523 /// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid. 524 pub(crate) fn opcode(&self) -> Result<SeqBufOpcode> { 525 self.0.opCode.try_into() 526 } 527 528 /// Returns the register write payload by value. 529 /// 530 /// Returns an error if the opcode is not `SeqBufOpcode::RegWrite`. 531 pub(crate) fn reg_write_payload(&self) -> Result<RegWritePayload> { 532 if self.opcode()? != SeqBufOpcode::RegWrite { 533 return Err(EINVAL); 534 } 535 // SAFETY: Opcode is verified to be `RegWrite`, so union contains valid `RegWritePayload`. 536 let payload_bytes = unsafe { 537 core::slice::from_raw_parts( 538 core::ptr::addr_of!(self.0.payload.regWrite).cast::<u8>(), 539 core::mem::size_of::<RegWritePayload>(), 540 ) 541 }; 542 Ok(*RegWritePayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 543 } 544 545 /// Returns the register modify payload by value. 546 /// 547 /// Returns an error if the opcode is not `SeqBufOpcode::RegModify`. 548 pub(crate) fn reg_modify_payload(&self) -> Result<RegModifyPayload> { 549 if self.opcode()? != SeqBufOpcode::RegModify { 550 return Err(EINVAL); 551 } 552 // SAFETY: Opcode is verified to be `RegModify`, so union contains valid `RegModifyPayload`. 553 let payload_bytes = unsafe { 554 core::slice::from_raw_parts( 555 core::ptr::addr_of!(self.0.payload.regModify).cast::<u8>(), 556 core::mem::size_of::<RegModifyPayload>(), 557 ) 558 }; 559 Ok(*RegModifyPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 560 } 561 562 /// Returns the register poll payload by value. 563 /// 564 /// Returns an error if the opcode is not `SeqBufOpcode::RegPoll`. 565 pub(crate) fn reg_poll_payload(&self) -> Result<RegPollPayload> { 566 if self.opcode()? != SeqBufOpcode::RegPoll { 567 return Err(EINVAL); 568 } 569 // SAFETY: Opcode is verified to be `RegPoll`, so union contains valid `RegPollPayload`. 570 let payload_bytes = unsafe { 571 core::slice::from_raw_parts( 572 core::ptr::addr_of!(self.0.payload.regPoll).cast::<u8>(), 573 core::mem::size_of::<RegPollPayload>(), 574 ) 575 }; 576 Ok(*RegPollPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 577 } 578 579 /// Returns the delay payload by value. 580 /// 581 /// Returns an error if the opcode is not `SeqBufOpcode::DelayUs`. 582 pub(crate) fn delay_us_payload(&self) -> Result<DelayUsPayload> { 583 if self.opcode()? != SeqBufOpcode::DelayUs { 584 return Err(EINVAL); 585 } 586 // SAFETY: Opcode is verified to be `DelayUs`, so union contains valid `DelayUsPayload`. 587 let payload_bytes = unsafe { 588 core::slice::from_raw_parts( 589 core::ptr::addr_of!(self.0.payload.delayUs).cast::<u8>(), 590 core::mem::size_of::<DelayUsPayload>(), 591 ) 592 }; 593 Ok(*DelayUsPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 594 } 595 596 /// Returns the register store payload by value. 597 /// 598 /// Returns an error if the opcode is not `SeqBufOpcode::RegStore`. 599 pub(crate) fn reg_store_payload(&self) -> Result<RegStorePayload> { 600 if self.opcode()? != SeqBufOpcode::RegStore { 601 return Err(EINVAL); 602 } 603 // SAFETY: Opcode is verified to be `RegStore`, so union contains valid `RegStorePayload`. 604 let payload_bytes = unsafe { 605 core::slice::from_raw_parts( 606 core::ptr::addr_of!(self.0.payload.regStore).cast::<u8>(), 607 core::mem::size_of::<RegStorePayload>(), 608 ) 609 }; 610 Ok(*RegStorePayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 611 } 612 } 613 614 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 615 unsafe impl FromBytes for SequencerBufferCmd {} 616 617 // SAFETY: Padding is explicit and will not contain uninitialized data. 618 unsafe impl AsBytes for SequencerBufferCmd {} 619 620 /// Wrapper for GSP run CPU sequencer RPC. 621 #[repr(transparent)] 622 pub(crate) struct RunCpuSequencer(r570_144::rpc_run_cpu_sequencer_v17_00); 623 624 #[expect(unused)] 625 impl RunCpuSequencer { 626 /// Returns the command index. 627 pub(crate) fn cmd_index(&self) -> u32 { 628 self.0.cmdIndex 629 } 630 } 631 632 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 633 unsafe impl FromBytes for RunCpuSequencer {} 634 635 // SAFETY: Padding is explicit and will not contain uninitialized data. 636 unsafe impl AsBytes for RunCpuSequencer {} 637 638 /// Struct containing the arguments required to pass a memory buffer to the GSP 639 /// for use during initialisation. 640 /// 641 /// The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is 642 /// configured for a larger page size (e.g. 64K pages), we need to give 643 /// the GSP an array of 4K pages. Since we only create physically contiguous 644 /// buffers the math to calculate the addresses is simple. 645 /// 646 /// The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently 647 /// ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the 648 /// buffers to be physically contiguous anyway. 649 /// 650 /// The memory allocated for the arguments must remain until the GSP sends the 651 /// init_done RPC. 652 #[repr(transparent)] 653 pub(crate) struct LibosMemoryRegionInitArgument(bindings::LibosMemoryRegionInitArgument); 654 655 // SAFETY: Padding is explicit and does not contain uninitialized data. 656 unsafe impl AsBytes for LibosMemoryRegionInitArgument {} 657 658 // SAFETY: This struct only contains integer types for which all bit patterns 659 // are valid. 660 unsafe impl FromBytes for LibosMemoryRegionInitArgument {} 661 662 impl LibosMemoryRegionInitArgument { 663 pub(crate) fn new<A: AsBytes + FromBytes>( 664 name: &'static str, 665 obj: &CoherentAllocation<A>, 666 ) -> Self { 667 /// Generates the `ID8` identifier required for some GSP objects. 668 fn id8(name: &str) -> u64 { 669 let mut bytes = [0u8; core::mem::size_of::<u64>()]; 670 671 for (c, b) in name.bytes().rev().zip(&mut bytes) { 672 *b = c; 673 } 674 675 u64::from_ne_bytes(bytes) 676 } 677 678 Self(bindings::LibosMemoryRegionInitArgument { 679 id8: id8(name), 680 pa: obj.dma_handle(), 681 size: num::usize_as_u64(obj.size()), 682 kind: num::u32_into_u8::< 683 { bindings::LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS }, 684 >(), 685 loc: num::u32_into_u8::< 686 { bindings::LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM }, 687 >(), 688 ..Default::default() 689 }) 690 } 691 } 692 693 /// TX header for setting up a message queue with the GSP. 694 #[repr(transparent)] 695 pub(crate) struct MsgqTxHeader(bindings::msgqTxHeader); 696 697 impl MsgqTxHeader { 698 /// Create a new TX queue header. 699 /// 700 /// # Arguments 701 /// 702 /// * `msgq_size` - Total size of the message queue structure, in bytes. 703 /// * `rx_hdr_offset` - Offset, in bytes, of the start of the RX header in the message queue 704 /// structure. 705 /// * `msg_count` - Number of messages that can be sent, i.e. the number of memory pages 706 /// allocated for the message queue in the message queue structure. 707 pub(crate) fn new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self { 708 Self(bindings::msgqTxHeader { 709 version: 0, 710 size: msgq_size, 711 msgSize: num::usize_into_u32::<GSP_PAGE_SIZE>(), 712 msgCount: msg_count, 713 writePtr: 0, 714 flags: 1, 715 rxHdrOff: rx_hdr_offset, 716 entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(), 717 }) 718 } 719 720 /// Returns the value of the write pointer for this queue. 721 pub(crate) fn write_ptr(&self) -> u32 { 722 let ptr = core::ptr::from_ref(&self.0.writePtr); 723 724 // SAFETY: `ptr` is a valid pointer to a `u32`. 725 unsafe { ptr.read_volatile() } 726 } 727 728 /// Sets the value of the write pointer for this queue. 729 pub(crate) fn set_write_ptr(&mut self, val: u32) { 730 let ptr = core::ptr::from_mut(&mut self.0.writePtr); 731 732 // SAFETY: `ptr` is a valid pointer to a `u32`. 733 unsafe { ptr.write_volatile(val) } 734 } 735 } 736 737 // SAFETY: Padding is explicit and does not contain uninitialized data. 738 unsafe impl AsBytes for MsgqTxHeader {} 739 740 /// RX header for setting up a message queue with the GSP. 741 #[repr(transparent)] 742 pub(crate) struct MsgqRxHeader(bindings::msgqRxHeader); 743 744 /// Header for the message RX queue. 745 impl MsgqRxHeader { 746 /// Creates a new RX queue header. 747 pub(crate) fn new() -> Self { 748 Self(Default::default()) 749 } 750 751 /// Returns the value of the read pointer for this queue. 752 pub(crate) fn read_ptr(&self) -> u32 { 753 let ptr = core::ptr::from_ref(&self.0.readPtr); 754 755 // SAFETY: `ptr` is a valid pointer to a `u32`. 756 unsafe { ptr.read_volatile() } 757 } 758 759 /// Sets the value of the read pointer for this queue. 760 pub(crate) fn set_read_ptr(&mut self, val: u32) { 761 let ptr = core::ptr::from_mut(&mut self.0.readPtr); 762 763 // SAFETY: `ptr` is a valid pointer to a `u32`. 764 unsafe { ptr.write_volatile(val) } 765 } 766 } 767 768 // SAFETY: Padding is explicit and does not contain uninitialized data. 769 unsafe impl AsBytes for MsgqRxHeader {} 770 771 bitfield! { 772 struct MsgHeaderVersion(u32) { 773 31:24 major as u8; 774 23:16 minor as u8; 775 } 776 } 777 778 impl MsgHeaderVersion { 779 const MAJOR_TOT: u8 = 3; 780 const MINOR_TOT: u8 = 0; 781 782 fn new() -> Self { 783 Self::default() 784 .set_major(Self::MAJOR_TOT) 785 .set_minor(Self::MINOR_TOT) 786 } 787 } 788 789 impl bindings::rpc_message_header_v { 790 fn init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error> { 791 type RpcMessageHeader = bindings::rpc_message_header_v; 792 793 try_init!(RpcMessageHeader { 794 header_version: MsgHeaderVersion::new().into(), 795 signature: bindings::NV_VGPU_MSG_SIGNATURE_VALID, 796 function: function.into(), 797 length: size_of::<Self>() 798 .checked_add(cmd_size) 799 .ok_or(EOVERFLOW) 800 .and_then(|v| v.try_into().map_err(|_| EINVAL))?, 801 rpc_result: 0xffffffff, 802 rpc_result_private: 0xffffffff, 803 ..Zeroable::init_zeroed() 804 }) 805 } 806 } 807 808 // SAFETY: We can't derive the Zeroable trait for this binding because the 809 // procedural macro doesn't support the syntax used by bindgen to create the 810 // __IncompleteArrayField types. So instead we implement it here, which is safe 811 // because these are explicitly padded structures only containing types for 812 // which any bit pattern, including all zeros, is valid. 813 unsafe impl Zeroable for bindings::rpc_message_header_v {} 814 815 /// GSP Message Element. 816 /// 817 /// This is essentially a message header expected to be followed by the message data. 818 #[repr(transparent)] 819 pub(crate) struct GspMsgElement { 820 inner: bindings::GSP_MSG_QUEUE_ELEMENT, 821 } 822 823 impl GspMsgElement { 824 /// Creates a new message element. 825 /// 826 /// # Arguments 827 /// 828 /// * `sequence` - Sequence number of the message. 829 /// * `cmd_size` - Size of the command (not including the message element), in bytes. 830 /// * `function` - Function of the message. 831 #[allow(non_snake_case)] 832 pub(crate) fn init( 833 sequence: u32, 834 cmd_size: usize, 835 function: MsgFunction, 836 ) -> impl Init<Self, Error> { 837 type RpcMessageHeader = bindings::rpc_message_header_v; 838 type InnerGspMsgElement = bindings::GSP_MSG_QUEUE_ELEMENT; 839 let init_inner = try_init!(InnerGspMsgElement { 840 seqNum: sequence, 841 elemCount: size_of::<Self>() 842 .checked_add(cmd_size) 843 .ok_or(EOVERFLOW)? 844 .div_ceil(GSP_PAGE_SIZE) 845 .try_into() 846 .map_err(|_| EOVERFLOW)?, 847 rpc <- RpcMessageHeader::init(cmd_size, function), 848 ..Zeroable::init_zeroed() 849 }); 850 851 try_init!(GspMsgElement { 852 inner <- init_inner, 853 }) 854 } 855 856 /// Sets the checksum of this message. 857 /// 858 /// Since the header is also part of the checksum, this is usually called after the whole 859 /// message has been written to the shared memory area. 860 pub(crate) fn set_checksum(&mut self, checksum: u32) { 861 self.inner.checkSum = checksum; 862 } 863 864 /// Returns the total length of the message. 865 pub(crate) fn length(&self) -> usize { 866 // `rpc.length` includes the length of the GspRpcHeader but not the message header. 867 size_of::<Self>() - size_of::<bindings::rpc_message_header_v>() 868 + num::u32_as_usize(self.inner.rpc.length) 869 } 870 871 // Returns the sequence number of the message. 872 pub(crate) fn sequence(&self) -> u32 { 873 self.inner.rpc.sequence 874 } 875 876 // Returns the function of the message, if it is valid, or the invalid function number as an 877 // error. 878 pub(crate) fn function(&self) -> Result<MsgFunction, u32> { 879 self.inner 880 .rpc 881 .function 882 .try_into() 883 .map_err(|_| self.inner.rpc.function) 884 } 885 886 // Returns the number of elements (i.e. memory pages) used by this message. 887 pub(crate) fn element_count(&self) -> u32 { 888 self.inner.elemCount 889 } 890 } 891 892 // SAFETY: Padding is explicit and does not contain uninitialized data. 893 unsafe impl AsBytes for GspMsgElement {} 894 895 // SAFETY: This struct only contains integer types for which all bit patterns 896 // are valid. 897 unsafe impl FromBytes for GspMsgElement {} 898 899 /// Arguments for GSP startup. 900 #[repr(transparent)] 901 pub(crate) struct GspArgumentsCached(bindings::GSP_ARGUMENTS_CACHED); 902 903 impl GspArgumentsCached { 904 /// Creates the arguments for starting the GSP up using `cmdq` as its command queue. 905 pub(crate) fn new(cmdq: &Cmdq) -> Self { 906 Self(bindings::GSP_ARGUMENTS_CACHED { 907 messageQueueInitArguments: MessageQueueInitArguments::new(cmdq).0, 908 bDmemStack: 1, 909 ..Default::default() 910 }) 911 } 912 } 913 914 // SAFETY: Padding is explicit and will not contain uninitialized data. 915 unsafe impl AsBytes for GspArgumentsCached {} 916 917 // SAFETY: This struct only contains integer types for which all bit patterns 918 // are valid. 919 unsafe impl FromBytes for GspArgumentsCached {} 920 921 /// Init arguments for the message queue. 922 #[repr(transparent)] 923 struct MessageQueueInitArguments(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS); 924 925 impl MessageQueueInitArguments { 926 /// Creates a new init arguments structure for `cmdq`. 927 fn new(cmdq: &Cmdq) -> Self { 928 Self(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS { 929 sharedMemPhysAddr: cmdq.dma_handle(), 930 pageTableEntryCount: num::usize_into_u32::<{ Cmdq::NUM_PTES }>(), 931 cmdQueueOffset: num::usize_as_u64(Cmdq::CMDQ_OFFSET), 932 statQueueOffset: num::usize_as_u64(Cmdq::STATQ_OFFSET), 933 ..Default::default() 934 }) 935 } 936 } 937