1 // SPDX-License-Identifier: GPL-2.0 2 3 pub(crate) mod commands; 4 mod r570_144; 5 6 // Alias to avoid repeating the version number with every use. 7 use r570_144 as bindings; 8 9 use core::{ 10 fmt, 11 ops::Range, // 12 }; 13 14 use kernel::{ 15 dma::CoherentAllocation, 16 prelude::*, 17 ptr::{ 18 Alignable, 19 Alignment, // 20 }, 21 sizes::{ 22 SZ_128K, 23 SZ_1M, // 24 }, 25 transmute::{ 26 AsBytes, 27 FromBytes, // 28 }, 29 }; 30 31 use crate::{ 32 fb::FbLayout, 33 firmware::gsp::GspFirmware, 34 gpu::Chipset, 35 gsp::{ 36 cmdq::Cmdq, // 37 GSP_PAGE_SIZE, 38 }, 39 num::{ 40 self, 41 FromSafeCast, // 42 }, 43 }; 44 45 /// Empty type to group methods related to heap parameters for running the GSP firmware. 46 enum GspFwHeapParams {} 47 48 /// Minimum required alignment for the GSP heap. 49 const GSP_HEAP_ALIGNMENT: Alignment = Alignment::new::<{ 1 << 20 }>(); 50 51 impl GspFwHeapParams { 52 /// Returns the amount of GSP-RM heap memory used during GSP-RM boot and initialization (up to 53 /// and including the first client subdevice allocation). 54 fn base_rm_size(_chipset: Chipset) -> u64 { 55 // TODO: this needs to be updated to return the correct value for Hopper+ once support for 56 // them is added: 57 // u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100) 58 u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X) 59 } 60 61 /// Returns the amount of heap memory required to support a single channel allocation. 62 fn client_alloc_size() -> u64 { 63 u64::from(bindings::GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE) 64 .align_up(GSP_HEAP_ALIGNMENT) 65 .unwrap_or(u64::MAX) 66 } 67 68 /// Returns the amount of memory to reserve for management purposes for a framebuffer of size 69 /// `fb_size`. 70 fn management_overhead(fb_size: u64) -> u64 { 71 let fb_size_gb = fb_size.div_ceil(u64::from_safe_cast(kernel::sizes::SZ_1G)); 72 73 u64::from(bindings::GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB) 74 .saturating_mul(fb_size_gb) 75 .align_up(GSP_HEAP_ALIGNMENT) 76 .unwrap_or(u64::MAX) 77 } 78 } 79 80 /// Heap memory requirements and constraints for a given version of the GSP LIBOS. 81 pub(crate) struct LibosParams { 82 /// The base amount of heap required by the GSP operating system, in bytes. 83 carveout_size: u64, 84 /// The minimum and maximum sizes allowed for the GSP FW heap, in bytes. 85 allowed_heap_size: Range<u64>, 86 } 87 88 impl LibosParams { 89 /// Version 2 of the GSP LIBOS (Turing and GA100) 90 const LIBOS2: LibosParams = LibosParams { 91 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2), 92 allowed_heap_size: num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB) 93 * num::usize_as_u64(SZ_1M) 94 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB) 95 * num::usize_as_u64(SZ_1M), 96 }; 97 98 /// Version 3 of the GSP LIBOS (GA102+) 99 const LIBOS3: LibosParams = LibosParams { 100 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL), 101 allowed_heap_size: num::u32_as_u64( 102 bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, 103 ) * num::usize_as_u64(SZ_1M) 104 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB) 105 * num::usize_as_u64(SZ_1M), 106 }; 107 108 /// Returns the libos parameters corresponding to `chipset`. 109 pub(crate) fn from_chipset(chipset: Chipset) -> &'static LibosParams { 110 if chipset < Chipset::GA102 { 111 &Self::LIBOS2 112 } else { 113 &Self::LIBOS3 114 } 115 } 116 117 /// Returns the amount of memory (in bytes) to allocate for the WPR heap for a framebuffer size 118 /// of `fb_size` (in bytes) for `chipset`. 119 pub(crate) fn wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64 { 120 // The WPR heap will contain the following: 121 // LIBOS carveout, 122 self.carveout_size 123 // RM boot working memory, 124 .saturating_add(GspFwHeapParams::base_rm_size(chipset)) 125 // One RM client, 126 .saturating_add(GspFwHeapParams::client_alloc_size()) 127 // Overhead for memory management. 128 .saturating_add(GspFwHeapParams::management_overhead(fb_size)) 129 // Clamp to the supported heap sizes. 130 .clamp(self.allowed_heap_size.start, self.allowed_heap_size.end - 1) 131 } 132 } 133 134 /// Structure passed to the GSP bootloader, containing the framebuffer layout as well as the DMA 135 /// addresses of the GSP bootloader and firmware. 136 #[repr(transparent)] 137 pub(crate) struct GspFwWprMeta(bindings::GspFwWprMeta); 138 139 // SAFETY: Padding is explicit and does not contain uninitialized data. 140 unsafe impl AsBytes for GspFwWprMeta {} 141 142 // SAFETY: This struct only contains integer types for which all bit patterns 143 // are valid. 144 unsafe impl FromBytes for GspFwWprMeta {} 145 146 type GspFwWprMetaBootResumeInfo = r570_144::GspFwWprMeta__bindgen_ty_1; 147 type GspFwWprMetaBootInfo = r570_144::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1; 148 149 impl GspFwWprMeta { 150 /// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the 151 /// `fb_layout` layout. 152 pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self { 153 Self(bindings::GspFwWprMeta { 154 // CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified. 155 magic: r570_144::GSP_FW_WPR_META_MAGIC as u64, 156 revision: u64::from(r570_144::GSP_FW_WPR_META_REVISION), 157 sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(), 158 sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size), 159 sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(), 160 sizeOfBootloader: u64::from_safe_cast(gsp_firmware.bootloader.ucode.size()), 161 bootloaderCodeOffset: u64::from(gsp_firmware.bootloader.code_offset), 162 bootloaderDataOffset: u64::from(gsp_firmware.bootloader.data_offset), 163 bootloaderManifestOffset: u64::from(gsp_firmware.bootloader.manifest_offset), 164 __bindgen_anon_1: GspFwWprMetaBootResumeInfo { 165 __bindgen_anon_1: GspFwWprMetaBootInfo { 166 sysmemAddrOfSignature: gsp_firmware.signatures.dma_handle(), 167 sizeOfSignature: u64::from_safe_cast(gsp_firmware.signatures.size()), 168 }, 169 }, 170 gspFwRsvdStart: fb_layout.heap.start, 171 nonWprHeapOffset: fb_layout.heap.start, 172 nonWprHeapSize: fb_layout.heap.end - fb_layout.heap.start, 173 gspFwWprStart: fb_layout.wpr2.start, 174 gspFwHeapOffset: fb_layout.wpr2_heap.start, 175 gspFwHeapSize: fb_layout.wpr2_heap.end - fb_layout.wpr2_heap.start, 176 gspFwOffset: fb_layout.elf.start, 177 bootBinOffset: fb_layout.boot.start, 178 frtsOffset: fb_layout.frts.start, 179 frtsSize: fb_layout.frts.end - fb_layout.frts.start, 180 gspFwWprEnd: fb_layout 181 .vga_workspace 182 .start 183 .align_down(Alignment::new::<SZ_128K>()), 184 gspFwHeapVfPartitionCount: fb_layout.vf_partition_count, 185 fbSize: fb_layout.fb.end - fb_layout.fb.start, 186 vgaWorkspaceOffset: fb_layout.vga_workspace.start, 187 vgaWorkspaceSize: fb_layout.vga_workspace.end - fb_layout.vga_workspace.start, 188 ..Default::default() 189 }) 190 } 191 } 192 193 #[derive(Copy, Clone, Debug, PartialEq)] 194 #[repr(u32)] 195 pub(crate) enum MsgFunction { 196 // Common function codes 197 Nop = bindings::NV_VGPU_MSG_FUNCTION_NOP, 198 SetGuestSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO, 199 AllocRoot = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT, 200 AllocDevice = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE, 201 AllocMemory = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, 202 AllocCtxDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA, 203 AllocChannelDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA, 204 MapMemory = bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY, 205 BindCtxDma = bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA, 206 AllocObject = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT, 207 Free = bindings::NV_VGPU_MSG_FUNCTION_FREE, 208 Log = bindings::NV_VGPU_MSG_FUNCTION_LOG, 209 GetGspStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, 210 SetRegistry = bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY, 211 GspSetSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, 212 GspInitPostObjGpu = bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU, 213 GspRmControl = bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, 214 GetStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO, 215 216 // Event codes 217 GspInitDone = bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE, 218 GspRunCpuSequencer = bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, 219 PostEvent = bindings::NV_VGPU_MSG_EVENT_POST_EVENT, 220 RcTriggered = bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED, 221 MmuFaultQueued = bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, 222 OsErrorLog = bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG, 223 GspPostNoCat = bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, 224 GspLockdownNotice = bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, 225 UcodeLibOsPrint = bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, 226 } 227 228 impl fmt::Display for MsgFunction { 229 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 230 match self { 231 // Common function codes 232 MsgFunction::Nop => write!(f, "NOP"), 233 MsgFunction::SetGuestSystemInfo => write!(f, "SET_GUEST_SYSTEM_INFO"), 234 MsgFunction::AllocRoot => write!(f, "ALLOC_ROOT"), 235 MsgFunction::AllocDevice => write!(f, "ALLOC_DEVICE"), 236 MsgFunction::AllocMemory => write!(f, "ALLOC_MEMORY"), 237 MsgFunction::AllocCtxDma => write!(f, "ALLOC_CTX_DMA"), 238 MsgFunction::AllocChannelDma => write!(f, "ALLOC_CHANNEL_DMA"), 239 MsgFunction::MapMemory => write!(f, "MAP_MEMORY"), 240 MsgFunction::BindCtxDma => write!(f, "BIND_CTX_DMA"), 241 MsgFunction::AllocObject => write!(f, "ALLOC_OBJECT"), 242 MsgFunction::Free => write!(f, "FREE"), 243 MsgFunction::Log => write!(f, "LOG"), 244 MsgFunction::GetGspStaticInfo => write!(f, "GET_GSP_STATIC_INFO"), 245 MsgFunction::SetRegistry => write!(f, "SET_REGISTRY"), 246 MsgFunction::GspSetSystemInfo => write!(f, "GSP_SET_SYSTEM_INFO"), 247 MsgFunction::GspInitPostObjGpu => write!(f, "GSP_INIT_POST_OBJGPU"), 248 MsgFunction::GspRmControl => write!(f, "GSP_RM_CONTROL"), 249 MsgFunction::GetStaticInfo => write!(f, "GET_STATIC_INFO"), 250 251 // Event codes 252 MsgFunction::GspInitDone => write!(f, "INIT_DONE"), 253 MsgFunction::GspRunCpuSequencer => write!(f, "RUN_CPU_SEQUENCER"), 254 MsgFunction::PostEvent => write!(f, "POST_EVENT"), 255 MsgFunction::RcTriggered => write!(f, "RC_TRIGGERED"), 256 MsgFunction::MmuFaultQueued => write!(f, "MMU_FAULT_QUEUED"), 257 MsgFunction::OsErrorLog => write!(f, "OS_ERROR_LOG"), 258 MsgFunction::GspPostNoCat => write!(f, "NOCAT"), 259 MsgFunction::GspLockdownNotice => write!(f, "LOCKDOWN_NOTICE"), 260 MsgFunction::UcodeLibOsPrint => write!(f, "LIBOS_PRINT"), 261 } 262 } 263 } 264 265 impl TryFrom<u32> for MsgFunction { 266 type Error = kernel::error::Error; 267 268 fn try_from(value: u32) -> Result<MsgFunction> { 269 match value { 270 bindings::NV_VGPU_MSG_FUNCTION_NOP => Ok(MsgFunction::Nop), 271 bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO => { 272 Ok(MsgFunction::SetGuestSystemInfo) 273 } 274 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT => Ok(MsgFunction::AllocRoot), 275 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE => Ok(MsgFunction::AllocDevice), 276 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY => Ok(MsgFunction::AllocMemory), 277 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA => Ok(MsgFunction::AllocCtxDma), 278 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA => Ok(MsgFunction::AllocChannelDma), 279 bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY => Ok(MsgFunction::MapMemory), 280 bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA => Ok(MsgFunction::BindCtxDma), 281 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT => Ok(MsgFunction::AllocObject), 282 bindings::NV_VGPU_MSG_FUNCTION_FREE => Ok(MsgFunction::Free), 283 bindings::NV_VGPU_MSG_FUNCTION_LOG => Ok(MsgFunction::Log), 284 bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO => Ok(MsgFunction::GetGspStaticInfo), 285 bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY => Ok(MsgFunction::SetRegistry), 286 bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO => Ok(MsgFunction::GspSetSystemInfo), 287 bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU => { 288 Ok(MsgFunction::GspInitPostObjGpu) 289 } 290 bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL => Ok(MsgFunction::GspRmControl), 291 bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO => Ok(MsgFunction::GetStaticInfo), 292 bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE => Ok(MsgFunction::GspInitDone), 293 bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER => { 294 Ok(MsgFunction::GspRunCpuSequencer) 295 } 296 bindings::NV_VGPU_MSG_EVENT_POST_EVENT => Ok(MsgFunction::PostEvent), 297 bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED => Ok(MsgFunction::RcTriggered), 298 bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED => Ok(MsgFunction::MmuFaultQueued), 299 bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG => Ok(MsgFunction::OsErrorLog), 300 bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD => Ok(MsgFunction::GspPostNoCat), 301 bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE => Ok(MsgFunction::GspLockdownNotice), 302 bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT => Ok(MsgFunction::UcodeLibOsPrint), 303 _ => Err(EINVAL), 304 } 305 } 306 } 307 308 impl From<MsgFunction> for u32 { 309 fn from(value: MsgFunction) -> Self { 310 // CAST: `MsgFunction` is `repr(u32)` and can thus be cast losslessly. 311 value as u32 312 } 313 } 314 315 /// Sequencer buffer opcode for GSP sequencer commands. 316 #[derive(Copy, Clone, Debug, PartialEq)] 317 #[repr(u32)] 318 pub(crate) enum SeqBufOpcode { 319 // Core operation opcodes 320 CoreReset = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET, 321 CoreResume = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME, 322 CoreStart = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START, 323 CoreWaitForHalt = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, 324 325 // Delay opcode 326 DelayUs = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US, 327 328 // Register operation opcodes 329 RegModify = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY, 330 RegPoll = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL, 331 RegStore = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE, 332 RegWrite = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE, 333 } 334 335 impl fmt::Display for SeqBufOpcode { 336 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 337 match self { 338 SeqBufOpcode::CoreReset => write!(f, "CORE_RESET"), 339 SeqBufOpcode::CoreResume => write!(f, "CORE_RESUME"), 340 SeqBufOpcode::CoreStart => write!(f, "CORE_START"), 341 SeqBufOpcode::CoreWaitForHalt => write!(f, "CORE_WAIT_FOR_HALT"), 342 SeqBufOpcode::DelayUs => write!(f, "DELAY_US"), 343 SeqBufOpcode::RegModify => write!(f, "REG_MODIFY"), 344 SeqBufOpcode::RegPoll => write!(f, "REG_POLL"), 345 SeqBufOpcode::RegStore => write!(f, "REG_STORE"), 346 SeqBufOpcode::RegWrite => write!(f, "REG_WRITE"), 347 } 348 } 349 } 350 351 impl TryFrom<u32> for SeqBufOpcode { 352 type Error = kernel::error::Error; 353 354 fn try_from(value: u32) -> Result<SeqBufOpcode> { 355 match value { 356 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => { 357 Ok(SeqBufOpcode::CoreReset) 358 } 359 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => { 360 Ok(SeqBufOpcode::CoreResume) 361 } 362 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => { 363 Ok(SeqBufOpcode::CoreStart) 364 } 365 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => { 366 Ok(SeqBufOpcode::CoreWaitForHalt) 367 } 368 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs), 369 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => { 370 Ok(SeqBufOpcode::RegModify) 371 } 372 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll), 373 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore), 374 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite), 375 _ => Err(EINVAL), 376 } 377 } 378 } 379 380 impl From<SeqBufOpcode> for u32 { 381 fn from(value: SeqBufOpcode) -> Self { 382 // CAST: `SeqBufOpcode` is `repr(u32)` and can thus be cast losslessly. 383 value as u32 384 } 385 } 386 387 /// Wrapper for GSP sequencer register write payload. 388 #[repr(transparent)] 389 #[derive(Copy, Clone)] 390 pub(crate) struct RegWritePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_WRITE); 391 392 impl RegWritePayload { 393 /// Returns the register address. 394 pub(crate) fn addr(&self) -> u32 { 395 self.0.addr 396 } 397 398 /// Returns the value to write. 399 pub(crate) fn val(&self) -> u32 { 400 self.0.val 401 } 402 } 403 404 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 405 unsafe impl FromBytes for RegWritePayload {} 406 407 // SAFETY: Padding is explicit and will not contain uninitialized data. 408 unsafe impl AsBytes for RegWritePayload {} 409 410 /// Wrapper for GSP sequencer register modify payload. 411 #[repr(transparent)] 412 #[derive(Copy, Clone)] 413 pub(crate) struct RegModifyPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY); 414 415 impl RegModifyPayload { 416 /// Returns the register address. 417 pub(crate) fn addr(&self) -> u32 { 418 self.0.addr 419 } 420 421 /// Returns the mask to apply. 422 pub(crate) fn mask(&self) -> u32 { 423 self.0.mask 424 } 425 426 /// Returns the value to write. 427 pub(crate) fn val(&self) -> u32 { 428 self.0.val 429 } 430 } 431 432 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 433 unsafe impl FromBytes for RegModifyPayload {} 434 435 // SAFETY: Padding is explicit and will not contain uninitialized data. 436 unsafe impl AsBytes for RegModifyPayload {} 437 438 /// Wrapper for GSP sequencer register poll payload. 439 #[repr(transparent)] 440 #[derive(Copy, Clone)] 441 pub(crate) struct RegPollPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_POLL); 442 443 impl RegPollPayload { 444 /// Returns the register address. 445 pub(crate) fn addr(&self) -> u32 { 446 self.0.addr 447 } 448 449 /// Returns the mask to apply. 450 pub(crate) fn mask(&self) -> u32 { 451 self.0.mask 452 } 453 454 /// Returns the expected value. 455 pub(crate) fn val(&self) -> u32 { 456 self.0.val 457 } 458 459 /// Returns the timeout in microseconds. 460 pub(crate) fn timeout(&self) -> u32 { 461 self.0.timeout 462 } 463 } 464 465 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 466 unsafe impl FromBytes for RegPollPayload {} 467 468 // SAFETY: Padding is explicit and will not contain uninitialized data. 469 unsafe impl AsBytes for RegPollPayload {} 470 471 /// Wrapper for GSP sequencer delay payload. 472 #[repr(transparent)] 473 #[derive(Copy, Clone)] 474 pub(crate) struct DelayUsPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_DELAY_US); 475 476 impl DelayUsPayload { 477 /// Returns the delay value in microseconds. 478 pub(crate) fn val(&self) -> u32 { 479 self.0.val 480 } 481 } 482 483 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 484 unsafe impl FromBytes for DelayUsPayload {} 485 486 // SAFETY: Padding is explicit and will not contain uninitialized data. 487 unsafe impl AsBytes for DelayUsPayload {} 488 489 /// Wrapper for GSP sequencer register store payload. 490 #[repr(transparent)] 491 #[derive(Copy, Clone)] 492 pub(crate) struct RegStorePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_STORE); 493 494 impl RegStorePayload { 495 /// Returns the register address. 496 pub(crate) fn addr(&self) -> u32 { 497 self.0.addr 498 } 499 500 /// Returns the storage index. 501 #[allow(unused)] 502 pub(crate) fn index(&self) -> u32 { 503 self.0.index 504 } 505 } 506 507 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 508 unsafe impl FromBytes for RegStorePayload {} 509 510 // SAFETY: Padding is explicit and will not contain uninitialized data. 511 unsafe impl AsBytes for RegStorePayload {} 512 513 /// Wrapper for GSP sequencer buffer command. 514 #[repr(transparent)] 515 pub(crate) struct SequencerBufferCmd(r570_144::GSP_SEQUENCER_BUFFER_CMD); 516 517 impl SequencerBufferCmd { 518 /// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid. 519 pub(crate) fn opcode(&self) -> Result<SeqBufOpcode> { 520 self.0.opCode.try_into() 521 } 522 523 /// Returns the register write payload by value. 524 /// 525 /// Returns an error if the opcode is not `SeqBufOpcode::RegWrite`. 526 pub(crate) fn reg_write_payload(&self) -> Result<RegWritePayload> { 527 if self.opcode()? != SeqBufOpcode::RegWrite { 528 return Err(EINVAL); 529 } 530 // SAFETY: Opcode is verified to be `RegWrite`, so union contains valid `RegWritePayload`. 531 let payload_bytes = unsafe { 532 core::slice::from_raw_parts( 533 core::ptr::addr_of!(self.0.payload.regWrite).cast::<u8>(), 534 core::mem::size_of::<RegWritePayload>(), 535 ) 536 }; 537 Ok(*RegWritePayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 538 } 539 540 /// Returns the register modify payload by value. 541 /// 542 /// Returns an error if the opcode is not `SeqBufOpcode::RegModify`. 543 pub(crate) fn reg_modify_payload(&self) -> Result<RegModifyPayload> { 544 if self.opcode()? != SeqBufOpcode::RegModify { 545 return Err(EINVAL); 546 } 547 // SAFETY: Opcode is verified to be `RegModify`, so union contains valid `RegModifyPayload`. 548 let payload_bytes = unsafe { 549 core::slice::from_raw_parts( 550 core::ptr::addr_of!(self.0.payload.regModify).cast::<u8>(), 551 core::mem::size_of::<RegModifyPayload>(), 552 ) 553 }; 554 Ok(*RegModifyPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 555 } 556 557 /// Returns the register poll payload by value. 558 /// 559 /// Returns an error if the opcode is not `SeqBufOpcode::RegPoll`. 560 pub(crate) fn reg_poll_payload(&self) -> Result<RegPollPayload> { 561 if self.opcode()? != SeqBufOpcode::RegPoll { 562 return Err(EINVAL); 563 } 564 // SAFETY: Opcode is verified to be `RegPoll`, so union contains valid `RegPollPayload`. 565 let payload_bytes = unsafe { 566 core::slice::from_raw_parts( 567 core::ptr::addr_of!(self.0.payload.regPoll).cast::<u8>(), 568 core::mem::size_of::<RegPollPayload>(), 569 ) 570 }; 571 Ok(*RegPollPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 572 } 573 574 /// Returns the delay payload by value. 575 /// 576 /// Returns an error if the opcode is not `SeqBufOpcode::DelayUs`. 577 pub(crate) fn delay_us_payload(&self) -> Result<DelayUsPayload> { 578 if self.opcode()? != SeqBufOpcode::DelayUs { 579 return Err(EINVAL); 580 } 581 // SAFETY: Opcode is verified to be `DelayUs`, so union contains valid `DelayUsPayload`. 582 let payload_bytes = unsafe { 583 core::slice::from_raw_parts( 584 core::ptr::addr_of!(self.0.payload.delayUs).cast::<u8>(), 585 core::mem::size_of::<DelayUsPayload>(), 586 ) 587 }; 588 Ok(*DelayUsPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 589 } 590 591 /// Returns the register store payload by value. 592 /// 593 /// Returns an error if the opcode is not `SeqBufOpcode::RegStore`. 594 pub(crate) fn reg_store_payload(&self) -> Result<RegStorePayload> { 595 if self.opcode()? != SeqBufOpcode::RegStore { 596 return Err(EINVAL); 597 } 598 // SAFETY: Opcode is verified to be `RegStore`, so union contains valid `RegStorePayload`. 599 let payload_bytes = unsafe { 600 core::slice::from_raw_parts( 601 core::ptr::addr_of!(self.0.payload.regStore).cast::<u8>(), 602 core::mem::size_of::<RegStorePayload>(), 603 ) 604 }; 605 Ok(*RegStorePayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 606 } 607 } 608 609 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 610 unsafe impl FromBytes for SequencerBufferCmd {} 611 612 // SAFETY: Padding is explicit and will not contain uninitialized data. 613 unsafe impl AsBytes for SequencerBufferCmd {} 614 615 /// Wrapper for GSP run CPU sequencer RPC. 616 #[repr(transparent)] 617 pub(crate) struct RunCpuSequencer(r570_144::rpc_run_cpu_sequencer_v17_00); 618 619 impl RunCpuSequencer { 620 /// Returns the command index. 621 pub(crate) fn cmd_index(&self) -> u32 { 622 self.0.cmdIndex 623 } 624 } 625 626 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 627 unsafe impl FromBytes for RunCpuSequencer {} 628 629 // SAFETY: Padding is explicit and will not contain uninitialized data. 630 unsafe impl AsBytes for RunCpuSequencer {} 631 632 /// Struct containing the arguments required to pass a memory buffer to the GSP 633 /// for use during initialisation. 634 /// 635 /// The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is 636 /// configured for a larger page size (e.g. 64K pages), we need to give 637 /// the GSP an array of 4K pages. Since we only create physically contiguous 638 /// buffers the math to calculate the addresses is simple. 639 /// 640 /// The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently 641 /// ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the 642 /// buffers to be physically contiguous anyway. 643 /// 644 /// The memory allocated for the arguments must remain until the GSP sends the 645 /// init_done RPC. 646 #[repr(transparent)] 647 pub(crate) struct LibosMemoryRegionInitArgument(bindings::LibosMemoryRegionInitArgument); 648 649 // SAFETY: Padding is explicit and does not contain uninitialized data. 650 unsafe impl AsBytes for LibosMemoryRegionInitArgument {} 651 652 // SAFETY: This struct only contains integer types for which all bit patterns 653 // are valid. 654 unsafe impl FromBytes for LibosMemoryRegionInitArgument {} 655 656 impl LibosMemoryRegionInitArgument { 657 pub(crate) fn new<A: AsBytes + FromBytes>( 658 name: &'static str, 659 obj: &CoherentAllocation<A>, 660 ) -> Self { 661 /// Generates the `ID8` identifier required for some GSP objects. 662 fn id8(name: &str) -> u64 { 663 let mut bytes = [0u8; core::mem::size_of::<u64>()]; 664 665 for (c, b) in name.bytes().rev().zip(&mut bytes) { 666 *b = c; 667 } 668 669 u64::from_ne_bytes(bytes) 670 } 671 672 Self(bindings::LibosMemoryRegionInitArgument { 673 id8: id8(name), 674 pa: obj.dma_handle(), 675 size: num::usize_as_u64(obj.size()), 676 kind: num::u32_into_u8::< 677 { bindings::LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS }, 678 >(), 679 loc: num::u32_into_u8::< 680 { bindings::LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM }, 681 >(), 682 ..Default::default() 683 }) 684 } 685 } 686 687 /// TX header for setting up a message queue with the GSP. 688 #[repr(transparent)] 689 pub(crate) struct MsgqTxHeader(bindings::msgqTxHeader); 690 691 impl MsgqTxHeader { 692 /// Create a new TX queue header. 693 /// 694 /// # Arguments 695 /// 696 /// * `msgq_size` - Total size of the message queue structure, in bytes. 697 /// * `rx_hdr_offset` - Offset, in bytes, of the start of the RX header in the message queue 698 /// structure. 699 /// * `msg_count` - Number of messages that can be sent, i.e. the number of memory pages 700 /// allocated for the message queue in the message queue structure. 701 pub(crate) fn new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self { 702 Self(bindings::msgqTxHeader { 703 version: 0, 704 size: msgq_size, 705 msgSize: num::usize_into_u32::<GSP_PAGE_SIZE>(), 706 msgCount: msg_count, 707 writePtr: 0, 708 flags: 1, 709 rxHdrOff: rx_hdr_offset, 710 entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(), 711 }) 712 } 713 714 /// Returns the value of the write pointer for this queue. 715 pub(crate) fn write_ptr(&self) -> u32 { 716 let ptr = core::ptr::from_ref(&self.0.writePtr); 717 718 // SAFETY: `ptr` is a valid pointer to a `u32`. 719 unsafe { ptr.read_volatile() } 720 } 721 722 /// Sets the value of the write pointer for this queue. 723 pub(crate) fn set_write_ptr(&mut self, val: u32) { 724 let ptr = core::ptr::from_mut(&mut self.0.writePtr); 725 726 // SAFETY: `ptr` is a valid pointer to a `u32`. 727 unsafe { ptr.write_volatile(val) } 728 } 729 } 730 731 // SAFETY: Padding is explicit and does not contain uninitialized data. 732 unsafe impl AsBytes for MsgqTxHeader {} 733 734 /// RX header for setting up a message queue with the GSP. 735 #[repr(transparent)] 736 pub(crate) struct MsgqRxHeader(bindings::msgqRxHeader); 737 738 /// Header for the message RX queue. 739 impl MsgqRxHeader { 740 /// Creates a new RX queue header. 741 pub(crate) fn new() -> Self { 742 Self(Default::default()) 743 } 744 745 /// Returns the value of the read pointer for this queue. 746 pub(crate) fn read_ptr(&self) -> u32 { 747 let ptr = core::ptr::from_ref(&self.0.readPtr); 748 749 // SAFETY: `ptr` is a valid pointer to a `u32`. 750 unsafe { ptr.read_volatile() } 751 } 752 753 /// Sets the value of the read pointer for this queue. 754 pub(crate) fn set_read_ptr(&mut self, val: u32) { 755 let ptr = core::ptr::from_mut(&mut self.0.readPtr); 756 757 // SAFETY: `ptr` is a valid pointer to a `u32`. 758 unsafe { ptr.write_volatile(val) } 759 } 760 } 761 762 // SAFETY: Padding is explicit and does not contain uninitialized data. 763 unsafe impl AsBytes for MsgqRxHeader {} 764 765 bitfield! { 766 struct MsgHeaderVersion(u32) { 767 31:24 major as u8; 768 23:16 minor as u8; 769 } 770 } 771 772 impl MsgHeaderVersion { 773 const MAJOR_TOT: u8 = 3; 774 const MINOR_TOT: u8 = 0; 775 776 fn new() -> Self { 777 Self::default() 778 .set_major(Self::MAJOR_TOT) 779 .set_minor(Self::MINOR_TOT) 780 } 781 } 782 783 impl bindings::rpc_message_header_v { 784 fn init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error> { 785 type RpcMessageHeader = bindings::rpc_message_header_v; 786 787 try_init!(RpcMessageHeader { 788 header_version: MsgHeaderVersion::new().into(), 789 signature: bindings::NV_VGPU_MSG_SIGNATURE_VALID, 790 function: function.into(), 791 length: size_of::<Self>() 792 .checked_add(cmd_size) 793 .ok_or(EOVERFLOW) 794 .and_then(|v| v.try_into().map_err(|_| EINVAL))?, 795 rpc_result: 0xffffffff, 796 rpc_result_private: 0xffffffff, 797 ..Zeroable::init_zeroed() 798 }) 799 } 800 } 801 802 // SAFETY: We can't derive the Zeroable trait for this binding because the 803 // procedural macro doesn't support the syntax used by bindgen to create the 804 // __IncompleteArrayField types. So instead we implement it here, which is safe 805 // because these are explicitly padded structures only containing types for 806 // which any bit pattern, including all zeros, is valid. 807 unsafe impl Zeroable for bindings::rpc_message_header_v {} 808 809 /// GSP Message Element. 810 /// 811 /// This is essentially a message header expected to be followed by the message data. 812 #[repr(transparent)] 813 pub(crate) struct GspMsgElement { 814 inner: bindings::GSP_MSG_QUEUE_ELEMENT, 815 } 816 817 impl GspMsgElement { 818 /// Creates a new message element. 819 /// 820 /// # Arguments 821 /// 822 /// * `sequence` - Sequence number of the message. 823 /// * `cmd_size` - Size of the command (not including the message element), in bytes. 824 /// * `function` - Function of the message. 825 #[allow(non_snake_case)] 826 pub(crate) fn init( 827 sequence: u32, 828 cmd_size: usize, 829 function: MsgFunction, 830 ) -> impl Init<Self, Error> { 831 type RpcMessageHeader = bindings::rpc_message_header_v; 832 type InnerGspMsgElement = bindings::GSP_MSG_QUEUE_ELEMENT; 833 let init_inner = try_init!(InnerGspMsgElement { 834 seqNum: sequence, 835 elemCount: size_of::<Self>() 836 .checked_add(cmd_size) 837 .ok_or(EOVERFLOW)? 838 .div_ceil(GSP_PAGE_SIZE) 839 .try_into() 840 .map_err(|_| EOVERFLOW)?, 841 rpc <- RpcMessageHeader::init(cmd_size, function), 842 ..Zeroable::init_zeroed() 843 }); 844 845 try_init!(GspMsgElement { 846 inner <- init_inner, 847 }) 848 } 849 850 /// Sets the checksum of this message. 851 /// 852 /// Since the header is also part of the checksum, this is usually called after the whole 853 /// message has been written to the shared memory area. 854 pub(crate) fn set_checksum(&mut self, checksum: u32) { 855 self.inner.checkSum = checksum; 856 } 857 858 /// Returns the total length of the message. 859 pub(crate) fn length(&self) -> usize { 860 // `rpc.length` includes the length of the GspRpcHeader but not the message header. 861 size_of::<Self>() - size_of::<bindings::rpc_message_header_v>() 862 + num::u32_as_usize(self.inner.rpc.length) 863 } 864 865 // Returns the sequence number of the message. 866 pub(crate) fn sequence(&self) -> u32 { 867 self.inner.rpc.sequence 868 } 869 870 // Returns the function of the message, if it is valid, or the invalid function number as an 871 // error. 872 pub(crate) fn function(&self) -> Result<MsgFunction, u32> { 873 self.inner 874 .rpc 875 .function 876 .try_into() 877 .map_err(|_| self.inner.rpc.function) 878 } 879 880 // Returns the number of elements (i.e. memory pages) used by this message. 881 pub(crate) fn element_count(&self) -> u32 { 882 self.inner.elemCount 883 } 884 } 885 886 // SAFETY: Padding is explicit and does not contain uninitialized data. 887 unsafe impl AsBytes for GspMsgElement {} 888 889 // SAFETY: This struct only contains integer types for which all bit patterns 890 // are valid. 891 unsafe impl FromBytes for GspMsgElement {} 892 893 /// Arguments for GSP startup. 894 #[repr(transparent)] 895 pub(crate) struct GspArgumentsCached(bindings::GSP_ARGUMENTS_CACHED); 896 897 impl GspArgumentsCached { 898 /// Creates the arguments for starting the GSP up using `cmdq` as its command queue. 899 pub(crate) fn new(cmdq: &Cmdq) -> Self { 900 Self(bindings::GSP_ARGUMENTS_CACHED { 901 messageQueueInitArguments: MessageQueueInitArguments::new(cmdq).0, 902 bDmemStack: 1, 903 ..Default::default() 904 }) 905 } 906 } 907 908 // SAFETY: Padding is explicit and will not contain uninitialized data. 909 unsafe impl AsBytes for GspArgumentsCached {} 910 911 // SAFETY: This struct only contains integer types for which all bit patterns 912 // are valid. 913 unsafe impl FromBytes for GspArgumentsCached {} 914 915 /// Init arguments for the message queue. 916 #[repr(transparent)] 917 struct MessageQueueInitArguments(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS); 918 919 impl MessageQueueInitArguments { 920 /// Creates a new init arguments structure for `cmdq`. 921 fn new(cmdq: &Cmdq) -> Self { 922 Self(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS { 923 sharedMemPhysAddr: cmdq.dma_handle(), 924 pageTableEntryCount: num::usize_into_u32::<{ Cmdq::NUM_PTES }>(), 925 cmdQueueOffset: num::usize_as_u64(Cmdq::CMDQ_OFFSET), 926 statQueueOffset: num::usize_as_u64(Cmdq::STATQ_OFFSET), 927 ..Default::default() 928 }) 929 } 930 } 931