1 // SPDX-License-Identifier: GPL-2.0 2 3 pub(crate) mod commands; 4 mod r570_144; 5 6 // Alias to avoid repeating the version number with every use. 7 use r570_144 as bindings; 8 9 use core::ops::Range; 10 11 use kernel::{ 12 dma::CoherentAllocation, 13 fmt, 14 prelude::*, 15 ptr::{ 16 Alignable, 17 Alignment, // 18 }, 19 sizes::{ 20 SZ_128K, 21 SZ_1M, // 22 }, 23 transmute::{ 24 AsBytes, 25 FromBytes, // 26 }, 27 }; 28 29 use crate::{ 30 fb::FbLayout, 31 firmware::gsp::GspFirmware, 32 gpu::Chipset, 33 gsp::{ 34 cmdq::Cmdq, // 35 GSP_PAGE_SIZE, 36 }, 37 num::{ 38 self, 39 FromSafeCast, // 40 }, 41 }; 42 43 /// Empty type to group methods related to heap parameters for running the GSP firmware. 44 enum GspFwHeapParams {} 45 46 /// Minimum required alignment for the GSP heap. 47 const GSP_HEAP_ALIGNMENT: Alignment = Alignment::new::<{ 1 << 20 }>(); 48 49 impl GspFwHeapParams { 50 /// Returns the amount of GSP-RM heap memory used during GSP-RM boot and initialization (up to 51 /// and including the first client subdevice allocation). 52 fn base_rm_size(_chipset: Chipset) -> u64 { 53 // TODO: this needs to be updated to return the correct value for Hopper+ once support for 54 // them is added: 55 // u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100) 56 u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X) 57 } 58 59 /// Returns the amount of heap memory required to support a single channel allocation. 60 fn client_alloc_size() -> u64 { 61 u64::from(bindings::GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE) 62 .align_up(GSP_HEAP_ALIGNMENT) 63 .unwrap_or(u64::MAX) 64 } 65 66 /// Returns the amount of memory to reserve for management purposes for a framebuffer of size 67 /// `fb_size`. 68 fn management_overhead(fb_size: u64) -> u64 { 69 let fb_size_gb = fb_size.div_ceil(u64::from_safe_cast(kernel::sizes::SZ_1G)); 70 71 u64::from(bindings::GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB) 72 .saturating_mul(fb_size_gb) 73 .align_up(GSP_HEAP_ALIGNMENT) 74 .unwrap_or(u64::MAX) 75 } 76 } 77 78 /// Heap memory requirements and constraints for a given version of the GSP LIBOS. 79 pub(crate) struct LibosParams { 80 /// The base amount of heap required by the GSP operating system, in bytes. 81 carveout_size: u64, 82 /// The minimum and maximum sizes allowed for the GSP FW heap, in bytes. 83 allowed_heap_size: Range<u64>, 84 } 85 86 impl LibosParams { 87 /// Version 2 of the GSP LIBOS (Turing and GA100) 88 const LIBOS2: LibosParams = LibosParams { 89 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2), 90 allowed_heap_size: num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB) 91 * num::usize_as_u64(SZ_1M) 92 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB) 93 * num::usize_as_u64(SZ_1M), 94 }; 95 96 /// Version 3 of the GSP LIBOS (GA102+) 97 const LIBOS3: LibosParams = LibosParams { 98 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL), 99 allowed_heap_size: num::u32_as_u64( 100 bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, 101 ) * num::usize_as_u64(SZ_1M) 102 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB) 103 * num::usize_as_u64(SZ_1M), 104 }; 105 106 /// Returns the libos parameters corresponding to `chipset`. 107 pub(crate) fn from_chipset(chipset: Chipset) -> &'static LibosParams { 108 if chipset < Chipset::GA102 { 109 &Self::LIBOS2 110 } else { 111 &Self::LIBOS3 112 } 113 } 114 115 /// Returns the amount of memory (in bytes) to allocate for the WPR heap for a framebuffer size 116 /// of `fb_size` (in bytes) for `chipset`. 117 pub(crate) fn wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64 { 118 // The WPR heap will contain the following: 119 // LIBOS carveout, 120 self.carveout_size 121 // RM boot working memory, 122 .saturating_add(GspFwHeapParams::base_rm_size(chipset)) 123 // One RM client, 124 .saturating_add(GspFwHeapParams::client_alloc_size()) 125 // Overhead for memory management. 126 .saturating_add(GspFwHeapParams::management_overhead(fb_size)) 127 // Clamp to the supported heap sizes. 128 .clamp(self.allowed_heap_size.start, self.allowed_heap_size.end - 1) 129 } 130 } 131 132 /// Structure passed to the GSP bootloader, containing the framebuffer layout as well as the DMA 133 /// addresses of the GSP bootloader and firmware. 134 #[repr(transparent)] 135 pub(crate) struct GspFwWprMeta(bindings::GspFwWprMeta); 136 137 // SAFETY: Padding is explicit and does not contain uninitialized data. 138 unsafe impl AsBytes for GspFwWprMeta {} 139 140 // SAFETY: This struct only contains integer types for which all bit patterns 141 // are valid. 142 unsafe impl FromBytes for GspFwWprMeta {} 143 144 type GspFwWprMetaBootResumeInfo = r570_144::GspFwWprMeta__bindgen_ty_1; 145 type GspFwWprMetaBootInfo = r570_144::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1; 146 147 impl GspFwWprMeta { 148 /// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the 149 /// `fb_layout` layout. 150 pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self { 151 Self(bindings::GspFwWprMeta { 152 // CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified. 153 magic: r570_144::GSP_FW_WPR_META_MAGIC as u64, 154 revision: u64::from(r570_144::GSP_FW_WPR_META_REVISION), 155 sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(), 156 sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size), 157 sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(), 158 sizeOfBootloader: u64::from_safe_cast(gsp_firmware.bootloader.ucode.size()), 159 bootloaderCodeOffset: u64::from(gsp_firmware.bootloader.code_offset), 160 bootloaderDataOffset: u64::from(gsp_firmware.bootloader.data_offset), 161 bootloaderManifestOffset: u64::from(gsp_firmware.bootloader.manifest_offset), 162 __bindgen_anon_1: GspFwWprMetaBootResumeInfo { 163 __bindgen_anon_1: GspFwWprMetaBootInfo { 164 sysmemAddrOfSignature: gsp_firmware.signatures.dma_handle(), 165 sizeOfSignature: u64::from_safe_cast(gsp_firmware.signatures.size()), 166 }, 167 }, 168 gspFwRsvdStart: fb_layout.heap.start, 169 nonWprHeapOffset: fb_layout.heap.start, 170 nonWprHeapSize: fb_layout.heap.end - fb_layout.heap.start, 171 gspFwWprStart: fb_layout.wpr2.start, 172 gspFwHeapOffset: fb_layout.wpr2_heap.start, 173 gspFwHeapSize: fb_layout.wpr2_heap.end - fb_layout.wpr2_heap.start, 174 gspFwOffset: fb_layout.elf.start, 175 bootBinOffset: fb_layout.boot.start, 176 frtsOffset: fb_layout.frts.start, 177 frtsSize: fb_layout.frts.end - fb_layout.frts.start, 178 gspFwWprEnd: fb_layout 179 .vga_workspace 180 .start 181 .align_down(Alignment::new::<SZ_128K>()), 182 gspFwHeapVfPartitionCount: fb_layout.vf_partition_count, 183 fbSize: fb_layout.fb.end - fb_layout.fb.start, 184 vgaWorkspaceOffset: fb_layout.vga_workspace.start, 185 vgaWorkspaceSize: fb_layout.vga_workspace.end - fb_layout.vga_workspace.start, 186 ..Default::default() 187 }) 188 } 189 } 190 191 #[derive(Copy, Clone, Debug, PartialEq)] 192 #[repr(u32)] 193 pub(crate) enum MsgFunction { 194 // Common function codes 195 Nop = bindings::NV_VGPU_MSG_FUNCTION_NOP, 196 SetGuestSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO, 197 AllocRoot = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT, 198 AllocDevice = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE, 199 AllocMemory = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, 200 AllocCtxDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA, 201 AllocChannelDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA, 202 MapMemory = bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY, 203 BindCtxDma = bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA, 204 AllocObject = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT, 205 Free = bindings::NV_VGPU_MSG_FUNCTION_FREE, 206 Log = bindings::NV_VGPU_MSG_FUNCTION_LOG, 207 GetGspStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, 208 SetRegistry = bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY, 209 GspSetSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, 210 GspInitPostObjGpu = bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU, 211 GspRmControl = bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, 212 GetStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO, 213 214 // Event codes 215 GspInitDone = bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE, 216 GspRunCpuSequencer = bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, 217 PostEvent = bindings::NV_VGPU_MSG_EVENT_POST_EVENT, 218 RcTriggered = bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED, 219 MmuFaultQueued = bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, 220 OsErrorLog = bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG, 221 GspPostNoCat = bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, 222 GspLockdownNotice = bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, 223 UcodeLibOsPrint = bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, 224 } 225 226 impl fmt::Display for MsgFunction { 227 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 228 match self { 229 // Common function codes 230 MsgFunction::Nop => write!(f, "NOP"), 231 MsgFunction::SetGuestSystemInfo => write!(f, "SET_GUEST_SYSTEM_INFO"), 232 MsgFunction::AllocRoot => write!(f, "ALLOC_ROOT"), 233 MsgFunction::AllocDevice => write!(f, "ALLOC_DEVICE"), 234 MsgFunction::AllocMemory => write!(f, "ALLOC_MEMORY"), 235 MsgFunction::AllocCtxDma => write!(f, "ALLOC_CTX_DMA"), 236 MsgFunction::AllocChannelDma => write!(f, "ALLOC_CHANNEL_DMA"), 237 MsgFunction::MapMemory => write!(f, "MAP_MEMORY"), 238 MsgFunction::BindCtxDma => write!(f, "BIND_CTX_DMA"), 239 MsgFunction::AllocObject => write!(f, "ALLOC_OBJECT"), 240 MsgFunction::Free => write!(f, "FREE"), 241 MsgFunction::Log => write!(f, "LOG"), 242 MsgFunction::GetGspStaticInfo => write!(f, "GET_GSP_STATIC_INFO"), 243 MsgFunction::SetRegistry => write!(f, "SET_REGISTRY"), 244 MsgFunction::GspSetSystemInfo => write!(f, "GSP_SET_SYSTEM_INFO"), 245 MsgFunction::GspInitPostObjGpu => write!(f, "GSP_INIT_POST_OBJGPU"), 246 MsgFunction::GspRmControl => write!(f, "GSP_RM_CONTROL"), 247 MsgFunction::GetStaticInfo => write!(f, "GET_STATIC_INFO"), 248 249 // Event codes 250 MsgFunction::GspInitDone => write!(f, "INIT_DONE"), 251 MsgFunction::GspRunCpuSequencer => write!(f, "RUN_CPU_SEQUENCER"), 252 MsgFunction::PostEvent => write!(f, "POST_EVENT"), 253 MsgFunction::RcTriggered => write!(f, "RC_TRIGGERED"), 254 MsgFunction::MmuFaultQueued => write!(f, "MMU_FAULT_QUEUED"), 255 MsgFunction::OsErrorLog => write!(f, "OS_ERROR_LOG"), 256 MsgFunction::GspPostNoCat => write!(f, "NOCAT"), 257 MsgFunction::GspLockdownNotice => write!(f, "LOCKDOWN_NOTICE"), 258 MsgFunction::UcodeLibOsPrint => write!(f, "LIBOS_PRINT"), 259 } 260 } 261 } 262 263 impl TryFrom<u32> for MsgFunction { 264 type Error = kernel::error::Error; 265 266 fn try_from(value: u32) -> Result<MsgFunction> { 267 match value { 268 bindings::NV_VGPU_MSG_FUNCTION_NOP => Ok(MsgFunction::Nop), 269 bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO => { 270 Ok(MsgFunction::SetGuestSystemInfo) 271 } 272 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT => Ok(MsgFunction::AllocRoot), 273 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE => Ok(MsgFunction::AllocDevice), 274 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY => Ok(MsgFunction::AllocMemory), 275 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA => Ok(MsgFunction::AllocCtxDma), 276 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA => Ok(MsgFunction::AllocChannelDma), 277 bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY => Ok(MsgFunction::MapMemory), 278 bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA => Ok(MsgFunction::BindCtxDma), 279 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT => Ok(MsgFunction::AllocObject), 280 bindings::NV_VGPU_MSG_FUNCTION_FREE => Ok(MsgFunction::Free), 281 bindings::NV_VGPU_MSG_FUNCTION_LOG => Ok(MsgFunction::Log), 282 bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO => Ok(MsgFunction::GetGspStaticInfo), 283 bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY => Ok(MsgFunction::SetRegistry), 284 bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO => Ok(MsgFunction::GspSetSystemInfo), 285 bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU => { 286 Ok(MsgFunction::GspInitPostObjGpu) 287 } 288 bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL => Ok(MsgFunction::GspRmControl), 289 bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO => Ok(MsgFunction::GetStaticInfo), 290 bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE => Ok(MsgFunction::GspInitDone), 291 bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER => { 292 Ok(MsgFunction::GspRunCpuSequencer) 293 } 294 bindings::NV_VGPU_MSG_EVENT_POST_EVENT => Ok(MsgFunction::PostEvent), 295 bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED => Ok(MsgFunction::RcTriggered), 296 bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED => Ok(MsgFunction::MmuFaultQueued), 297 bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG => Ok(MsgFunction::OsErrorLog), 298 bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD => Ok(MsgFunction::GspPostNoCat), 299 bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE => Ok(MsgFunction::GspLockdownNotice), 300 bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT => Ok(MsgFunction::UcodeLibOsPrint), 301 _ => Err(EINVAL), 302 } 303 } 304 } 305 306 impl From<MsgFunction> for u32 { 307 fn from(value: MsgFunction) -> Self { 308 // CAST: `MsgFunction` is `repr(u32)` and can thus be cast losslessly. 309 value as u32 310 } 311 } 312 313 /// Sequencer buffer opcode for GSP sequencer commands. 314 #[derive(Copy, Clone, Debug, PartialEq)] 315 #[repr(u32)] 316 pub(crate) enum SeqBufOpcode { 317 // Core operation opcodes 318 CoreReset = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET, 319 CoreResume = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME, 320 CoreStart = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START, 321 CoreWaitForHalt = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, 322 323 // Delay opcode 324 DelayUs = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US, 325 326 // Register operation opcodes 327 RegModify = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY, 328 RegPoll = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL, 329 RegStore = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE, 330 RegWrite = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE, 331 } 332 333 impl fmt::Display for SeqBufOpcode { 334 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 335 match self { 336 SeqBufOpcode::CoreReset => write!(f, "CORE_RESET"), 337 SeqBufOpcode::CoreResume => write!(f, "CORE_RESUME"), 338 SeqBufOpcode::CoreStart => write!(f, "CORE_START"), 339 SeqBufOpcode::CoreWaitForHalt => write!(f, "CORE_WAIT_FOR_HALT"), 340 SeqBufOpcode::DelayUs => write!(f, "DELAY_US"), 341 SeqBufOpcode::RegModify => write!(f, "REG_MODIFY"), 342 SeqBufOpcode::RegPoll => write!(f, "REG_POLL"), 343 SeqBufOpcode::RegStore => write!(f, "REG_STORE"), 344 SeqBufOpcode::RegWrite => write!(f, "REG_WRITE"), 345 } 346 } 347 } 348 349 impl TryFrom<u32> for SeqBufOpcode { 350 type Error = kernel::error::Error; 351 352 fn try_from(value: u32) -> Result<SeqBufOpcode> { 353 match value { 354 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => { 355 Ok(SeqBufOpcode::CoreReset) 356 } 357 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => { 358 Ok(SeqBufOpcode::CoreResume) 359 } 360 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => { 361 Ok(SeqBufOpcode::CoreStart) 362 } 363 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => { 364 Ok(SeqBufOpcode::CoreWaitForHalt) 365 } 366 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs), 367 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => { 368 Ok(SeqBufOpcode::RegModify) 369 } 370 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll), 371 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore), 372 r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite), 373 _ => Err(EINVAL), 374 } 375 } 376 } 377 378 impl From<SeqBufOpcode> for u32 { 379 fn from(value: SeqBufOpcode) -> Self { 380 // CAST: `SeqBufOpcode` is `repr(u32)` and can thus be cast losslessly. 381 value as u32 382 } 383 } 384 385 /// Wrapper for GSP sequencer register write payload. 386 #[repr(transparent)] 387 #[derive(Copy, Clone)] 388 pub(crate) struct RegWritePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_WRITE); 389 390 impl RegWritePayload { 391 /// Returns the register address. 392 pub(crate) fn addr(&self) -> u32 { 393 self.0.addr 394 } 395 396 /// Returns the value to write. 397 pub(crate) fn val(&self) -> u32 { 398 self.0.val 399 } 400 } 401 402 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 403 unsafe impl FromBytes for RegWritePayload {} 404 405 // SAFETY: Padding is explicit and will not contain uninitialized data. 406 unsafe impl AsBytes for RegWritePayload {} 407 408 /// Wrapper for GSP sequencer register modify payload. 409 #[repr(transparent)] 410 #[derive(Copy, Clone)] 411 pub(crate) struct RegModifyPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY); 412 413 impl RegModifyPayload { 414 /// Returns the register address. 415 pub(crate) fn addr(&self) -> u32 { 416 self.0.addr 417 } 418 419 /// Returns the mask to apply. 420 pub(crate) fn mask(&self) -> u32 { 421 self.0.mask 422 } 423 424 /// Returns the value to write. 425 pub(crate) fn val(&self) -> u32 { 426 self.0.val 427 } 428 } 429 430 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 431 unsafe impl FromBytes for RegModifyPayload {} 432 433 // SAFETY: Padding is explicit and will not contain uninitialized data. 434 unsafe impl AsBytes for RegModifyPayload {} 435 436 /// Wrapper for GSP sequencer register poll payload. 437 #[repr(transparent)] 438 #[derive(Copy, Clone)] 439 pub(crate) struct RegPollPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_POLL); 440 441 impl RegPollPayload { 442 /// Returns the register address. 443 pub(crate) fn addr(&self) -> u32 { 444 self.0.addr 445 } 446 447 /// Returns the mask to apply. 448 pub(crate) fn mask(&self) -> u32 { 449 self.0.mask 450 } 451 452 /// Returns the expected value. 453 pub(crate) fn val(&self) -> u32 { 454 self.0.val 455 } 456 457 /// Returns the timeout in microseconds. 458 pub(crate) fn timeout(&self) -> u32 { 459 self.0.timeout 460 } 461 } 462 463 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 464 unsafe impl FromBytes for RegPollPayload {} 465 466 // SAFETY: Padding is explicit and will not contain uninitialized data. 467 unsafe impl AsBytes for RegPollPayload {} 468 469 /// Wrapper for GSP sequencer delay payload. 470 #[repr(transparent)] 471 #[derive(Copy, Clone)] 472 pub(crate) struct DelayUsPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_DELAY_US); 473 474 impl DelayUsPayload { 475 /// Returns the delay value in microseconds. 476 pub(crate) fn val(&self) -> u32 { 477 self.0.val 478 } 479 } 480 481 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 482 unsafe impl FromBytes for DelayUsPayload {} 483 484 // SAFETY: Padding is explicit and will not contain uninitialized data. 485 unsafe impl AsBytes for DelayUsPayload {} 486 487 /// Wrapper for GSP sequencer register store payload. 488 #[repr(transparent)] 489 #[derive(Copy, Clone)] 490 pub(crate) struct RegStorePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_STORE); 491 492 impl RegStorePayload { 493 /// Returns the register address. 494 pub(crate) fn addr(&self) -> u32 { 495 self.0.addr 496 } 497 498 /// Returns the storage index. 499 #[allow(unused)] 500 pub(crate) fn index(&self) -> u32 { 501 self.0.index 502 } 503 } 504 505 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 506 unsafe impl FromBytes for RegStorePayload {} 507 508 // SAFETY: Padding is explicit and will not contain uninitialized data. 509 unsafe impl AsBytes for RegStorePayload {} 510 511 /// Wrapper for GSP sequencer buffer command. 512 #[repr(transparent)] 513 pub(crate) struct SequencerBufferCmd(r570_144::GSP_SEQUENCER_BUFFER_CMD); 514 515 impl SequencerBufferCmd { 516 /// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid. 517 pub(crate) fn opcode(&self) -> Result<SeqBufOpcode> { 518 self.0.opCode.try_into() 519 } 520 521 /// Returns the register write payload by value. 522 /// 523 /// Returns an error if the opcode is not `SeqBufOpcode::RegWrite`. 524 pub(crate) fn reg_write_payload(&self) -> Result<RegWritePayload> { 525 if self.opcode()? != SeqBufOpcode::RegWrite { 526 return Err(EINVAL); 527 } 528 // SAFETY: Opcode is verified to be `RegWrite`, so union contains valid `RegWritePayload`. 529 let payload_bytes = unsafe { 530 core::slice::from_raw_parts( 531 core::ptr::addr_of!(self.0.payload.regWrite).cast::<u8>(), 532 core::mem::size_of::<RegWritePayload>(), 533 ) 534 }; 535 Ok(*RegWritePayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 536 } 537 538 /// Returns the register modify payload by value. 539 /// 540 /// Returns an error if the opcode is not `SeqBufOpcode::RegModify`. 541 pub(crate) fn reg_modify_payload(&self) -> Result<RegModifyPayload> { 542 if self.opcode()? != SeqBufOpcode::RegModify { 543 return Err(EINVAL); 544 } 545 // SAFETY: Opcode is verified to be `RegModify`, so union contains valid `RegModifyPayload`. 546 let payload_bytes = unsafe { 547 core::slice::from_raw_parts( 548 core::ptr::addr_of!(self.0.payload.regModify).cast::<u8>(), 549 core::mem::size_of::<RegModifyPayload>(), 550 ) 551 }; 552 Ok(*RegModifyPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 553 } 554 555 /// Returns the register poll payload by value. 556 /// 557 /// Returns an error if the opcode is not `SeqBufOpcode::RegPoll`. 558 pub(crate) fn reg_poll_payload(&self) -> Result<RegPollPayload> { 559 if self.opcode()? != SeqBufOpcode::RegPoll { 560 return Err(EINVAL); 561 } 562 // SAFETY: Opcode is verified to be `RegPoll`, so union contains valid `RegPollPayload`. 563 let payload_bytes = unsafe { 564 core::slice::from_raw_parts( 565 core::ptr::addr_of!(self.0.payload.regPoll).cast::<u8>(), 566 core::mem::size_of::<RegPollPayload>(), 567 ) 568 }; 569 Ok(*RegPollPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 570 } 571 572 /// Returns the delay payload by value. 573 /// 574 /// Returns an error if the opcode is not `SeqBufOpcode::DelayUs`. 575 pub(crate) fn delay_us_payload(&self) -> Result<DelayUsPayload> { 576 if self.opcode()? != SeqBufOpcode::DelayUs { 577 return Err(EINVAL); 578 } 579 // SAFETY: Opcode is verified to be `DelayUs`, so union contains valid `DelayUsPayload`. 580 let payload_bytes = unsafe { 581 core::slice::from_raw_parts( 582 core::ptr::addr_of!(self.0.payload.delayUs).cast::<u8>(), 583 core::mem::size_of::<DelayUsPayload>(), 584 ) 585 }; 586 Ok(*DelayUsPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 587 } 588 589 /// Returns the register store payload by value. 590 /// 591 /// Returns an error if the opcode is not `SeqBufOpcode::RegStore`. 592 pub(crate) fn reg_store_payload(&self) -> Result<RegStorePayload> { 593 if self.opcode()? != SeqBufOpcode::RegStore { 594 return Err(EINVAL); 595 } 596 // SAFETY: Opcode is verified to be `RegStore`, so union contains valid `RegStorePayload`. 597 let payload_bytes = unsafe { 598 core::slice::from_raw_parts( 599 core::ptr::addr_of!(self.0.payload.regStore).cast::<u8>(), 600 core::mem::size_of::<RegStorePayload>(), 601 ) 602 }; 603 Ok(*RegStorePayload::from_bytes(payload_bytes).ok_or(EINVAL)?) 604 } 605 } 606 607 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 608 unsafe impl FromBytes for SequencerBufferCmd {} 609 610 // SAFETY: Padding is explicit and will not contain uninitialized data. 611 unsafe impl AsBytes for SequencerBufferCmd {} 612 613 /// Wrapper for GSP run CPU sequencer RPC. 614 #[repr(transparent)] 615 pub(crate) struct RunCpuSequencer(r570_144::rpc_run_cpu_sequencer_v17_00); 616 617 impl RunCpuSequencer { 618 /// Returns the command index. 619 pub(crate) fn cmd_index(&self) -> u32 { 620 self.0.cmdIndex 621 } 622 } 623 624 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 625 unsafe impl FromBytes for RunCpuSequencer {} 626 627 // SAFETY: Padding is explicit and will not contain uninitialized data. 628 unsafe impl AsBytes for RunCpuSequencer {} 629 630 /// Struct containing the arguments required to pass a memory buffer to the GSP 631 /// for use during initialisation. 632 /// 633 /// The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is 634 /// configured for a larger page size (e.g. 64K pages), we need to give 635 /// the GSP an array of 4K pages. Since we only create physically contiguous 636 /// buffers the math to calculate the addresses is simple. 637 /// 638 /// The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently 639 /// ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the 640 /// buffers to be physically contiguous anyway. 641 /// 642 /// The memory allocated for the arguments must remain until the GSP sends the 643 /// init_done RPC. 644 #[repr(transparent)] 645 pub(crate) struct LibosMemoryRegionInitArgument(bindings::LibosMemoryRegionInitArgument); 646 647 // SAFETY: Padding is explicit and does not contain uninitialized data. 648 unsafe impl AsBytes for LibosMemoryRegionInitArgument {} 649 650 // SAFETY: This struct only contains integer types for which all bit patterns 651 // are valid. 652 unsafe impl FromBytes for LibosMemoryRegionInitArgument {} 653 654 impl LibosMemoryRegionInitArgument { 655 pub(crate) fn new<A: AsBytes + FromBytes>( 656 name: &'static str, 657 obj: &CoherentAllocation<A>, 658 ) -> Self { 659 /// Generates the `ID8` identifier required for some GSP objects. 660 fn id8(name: &str) -> u64 { 661 let mut bytes = [0u8; core::mem::size_of::<u64>()]; 662 663 for (c, b) in name.bytes().rev().zip(&mut bytes) { 664 *b = c; 665 } 666 667 u64::from_ne_bytes(bytes) 668 } 669 670 Self(bindings::LibosMemoryRegionInitArgument { 671 id8: id8(name), 672 pa: obj.dma_handle(), 673 size: num::usize_as_u64(obj.size()), 674 kind: num::u32_into_u8::< 675 { bindings::LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS }, 676 >(), 677 loc: num::u32_into_u8::< 678 { bindings::LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM }, 679 >(), 680 ..Default::default() 681 }) 682 } 683 } 684 685 /// TX header for setting up a message queue with the GSP. 686 #[repr(transparent)] 687 pub(crate) struct MsgqTxHeader(bindings::msgqTxHeader); 688 689 impl MsgqTxHeader { 690 /// Create a new TX queue header. 691 /// 692 /// # Arguments 693 /// 694 /// * `msgq_size` - Total size of the message queue structure, in bytes. 695 /// * `rx_hdr_offset` - Offset, in bytes, of the start of the RX header in the message queue 696 /// structure. 697 /// * `msg_count` - Number of messages that can be sent, i.e. the number of memory pages 698 /// allocated for the message queue in the message queue structure. 699 pub(crate) fn new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self { 700 Self(bindings::msgqTxHeader { 701 version: 0, 702 size: msgq_size, 703 msgSize: num::usize_into_u32::<GSP_PAGE_SIZE>(), 704 msgCount: msg_count, 705 writePtr: 0, 706 flags: 1, 707 rxHdrOff: rx_hdr_offset, 708 entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(), 709 }) 710 } 711 712 /// Returns the value of the write pointer for this queue. 713 pub(crate) fn write_ptr(&self) -> u32 { 714 let ptr = core::ptr::from_ref(&self.0.writePtr); 715 716 // SAFETY: `ptr` is a valid pointer to a `u32`. 717 unsafe { ptr.read_volatile() } 718 } 719 720 /// Sets the value of the write pointer for this queue. 721 pub(crate) fn set_write_ptr(&mut self, val: u32) { 722 let ptr = core::ptr::from_mut(&mut self.0.writePtr); 723 724 // SAFETY: `ptr` is a valid pointer to a `u32`. 725 unsafe { ptr.write_volatile(val) } 726 } 727 } 728 729 // SAFETY: Padding is explicit and does not contain uninitialized data. 730 unsafe impl AsBytes for MsgqTxHeader {} 731 732 /// RX header for setting up a message queue with the GSP. 733 #[repr(transparent)] 734 pub(crate) struct MsgqRxHeader(bindings::msgqRxHeader); 735 736 /// Header for the message RX queue. 737 impl MsgqRxHeader { 738 /// Creates a new RX queue header. 739 pub(crate) fn new() -> Self { 740 Self(Default::default()) 741 } 742 743 /// Returns the value of the read pointer for this queue. 744 pub(crate) fn read_ptr(&self) -> u32 { 745 let ptr = core::ptr::from_ref(&self.0.readPtr); 746 747 // SAFETY: `ptr` is a valid pointer to a `u32`. 748 unsafe { ptr.read_volatile() } 749 } 750 751 /// Sets the value of the read pointer for this queue. 752 pub(crate) fn set_read_ptr(&mut self, val: u32) { 753 let ptr = core::ptr::from_mut(&mut self.0.readPtr); 754 755 // SAFETY: `ptr` is a valid pointer to a `u32`. 756 unsafe { ptr.write_volatile(val) } 757 } 758 } 759 760 // SAFETY: Padding is explicit and does not contain uninitialized data. 761 unsafe impl AsBytes for MsgqRxHeader {} 762 763 bitfield! { 764 struct MsgHeaderVersion(u32) { 765 31:24 major as u8; 766 23:16 minor as u8; 767 } 768 } 769 770 impl MsgHeaderVersion { 771 const MAJOR_TOT: u8 = 3; 772 const MINOR_TOT: u8 = 0; 773 774 fn new() -> Self { 775 Self::default() 776 .set_major(Self::MAJOR_TOT) 777 .set_minor(Self::MINOR_TOT) 778 } 779 } 780 781 impl bindings::rpc_message_header_v { 782 fn init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error> { 783 type RpcMessageHeader = bindings::rpc_message_header_v; 784 785 try_init!(RpcMessageHeader { 786 header_version: MsgHeaderVersion::new().into(), 787 signature: bindings::NV_VGPU_MSG_SIGNATURE_VALID, 788 function: function.into(), 789 length: size_of::<Self>() 790 .checked_add(cmd_size) 791 .ok_or(EOVERFLOW) 792 .and_then(|v| v.try_into().map_err(|_| EINVAL))?, 793 rpc_result: 0xffffffff, 794 rpc_result_private: 0xffffffff, 795 ..Zeroable::init_zeroed() 796 }) 797 } 798 } 799 800 // SAFETY: We can't derive the Zeroable trait for this binding because the 801 // procedural macro doesn't support the syntax used by bindgen to create the 802 // __IncompleteArrayField types. So instead we implement it here, which is safe 803 // because these are explicitly padded structures only containing types for 804 // which any bit pattern, including all zeros, is valid. 805 unsafe impl Zeroable for bindings::rpc_message_header_v {} 806 807 /// GSP Message Element. 808 /// 809 /// This is essentially a message header expected to be followed by the message data. 810 #[repr(transparent)] 811 pub(crate) struct GspMsgElement { 812 inner: bindings::GSP_MSG_QUEUE_ELEMENT, 813 } 814 815 impl GspMsgElement { 816 /// Creates a new message element. 817 /// 818 /// # Arguments 819 /// 820 /// * `sequence` - Sequence number of the message. 821 /// * `cmd_size` - Size of the command (not including the message element), in bytes. 822 /// * `function` - Function of the message. 823 #[allow(non_snake_case)] 824 pub(crate) fn init( 825 sequence: u32, 826 cmd_size: usize, 827 function: MsgFunction, 828 ) -> impl Init<Self, Error> { 829 type RpcMessageHeader = bindings::rpc_message_header_v; 830 type InnerGspMsgElement = bindings::GSP_MSG_QUEUE_ELEMENT; 831 let init_inner = try_init!(InnerGspMsgElement { 832 seqNum: sequence, 833 elemCount: size_of::<Self>() 834 .checked_add(cmd_size) 835 .ok_or(EOVERFLOW)? 836 .div_ceil(GSP_PAGE_SIZE) 837 .try_into() 838 .map_err(|_| EOVERFLOW)?, 839 rpc <- RpcMessageHeader::init(cmd_size, function), 840 ..Zeroable::init_zeroed() 841 }); 842 843 try_init!(GspMsgElement { 844 inner <- init_inner, 845 }) 846 } 847 848 /// Sets the checksum of this message. 849 /// 850 /// Since the header is also part of the checksum, this is usually called after the whole 851 /// message has been written to the shared memory area. 852 pub(crate) fn set_checksum(&mut self, checksum: u32) { 853 self.inner.checkSum = checksum; 854 } 855 856 /// Returns the total length of the message. 857 pub(crate) fn length(&self) -> usize { 858 // `rpc.length` includes the length of the GspRpcHeader but not the message header. 859 size_of::<Self>() - size_of::<bindings::rpc_message_header_v>() 860 + num::u32_as_usize(self.inner.rpc.length) 861 } 862 863 // Returns the sequence number of the message. 864 pub(crate) fn sequence(&self) -> u32 { 865 self.inner.rpc.sequence 866 } 867 868 // Returns the function of the message, if it is valid, or the invalid function number as an 869 // error. 870 pub(crate) fn function(&self) -> Result<MsgFunction, u32> { 871 self.inner 872 .rpc 873 .function 874 .try_into() 875 .map_err(|_| self.inner.rpc.function) 876 } 877 878 // Returns the number of elements (i.e. memory pages) used by this message. 879 pub(crate) fn element_count(&self) -> u32 { 880 self.inner.elemCount 881 } 882 } 883 884 // SAFETY: Padding is explicit and does not contain uninitialized data. 885 unsafe impl AsBytes for GspMsgElement {} 886 887 // SAFETY: This struct only contains integer types for which all bit patterns 888 // are valid. 889 unsafe impl FromBytes for GspMsgElement {} 890 891 /// Arguments for GSP startup. 892 #[repr(transparent)] 893 pub(crate) struct GspArgumentsCached(bindings::GSP_ARGUMENTS_CACHED); 894 895 impl GspArgumentsCached { 896 /// Creates the arguments for starting the GSP up using `cmdq` as its command queue. 897 pub(crate) fn new(cmdq: &Cmdq) -> Self { 898 Self(bindings::GSP_ARGUMENTS_CACHED { 899 messageQueueInitArguments: MessageQueueInitArguments::new(cmdq).0, 900 bDmemStack: 1, 901 ..Default::default() 902 }) 903 } 904 } 905 906 // SAFETY: Padding is explicit and will not contain uninitialized data. 907 unsafe impl AsBytes for GspArgumentsCached {} 908 909 // SAFETY: This struct only contains integer types for which all bit patterns 910 // are valid. 911 unsafe impl FromBytes for GspArgumentsCached {} 912 913 /// Init arguments for the message queue. 914 #[repr(transparent)] 915 struct MessageQueueInitArguments(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS); 916 917 impl MessageQueueInitArguments { 918 /// Creates a new init arguments structure for `cmdq`. 919 fn new(cmdq: &Cmdq) -> Self { 920 Self(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS { 921 sharedMemPhysAddr: cmdq.dma_handle(), 922 pageTableEntryCount: num::usize_into_u32::<{ Cmdq::NUM_PTES }>(), 923 cmdQueueOffset: num::usize_as_u64(Cmdq::CMDQ_OFFSET), 924 statQueueOffset: num::usize_as_u64(Cmdq::STATQ_OFFSET), 925 ..Default::default() 926 }) 927 } 928 } 929