1 // SPDX-License-Identifier: GPL-2.0 2 3 pub(crate) mod commands; 4 mod r570_144; 5 6 // Alias to avoid repeating the version number with every use. 7 use r570_144 as bindings; 8 9 use core::ops::Range; 10 11 use kernel::{ 12 dma::Coherent, 13 prelude::*, 14 ptr::{ 15 Alignable, 16 Alignment, 17 KnownSize, // 18 }, 19 sizes::{ 20 SZ_128K, 21 SZ_1M, // 22 }, 23 transmute::{ 24 AsBytes, 25 FromBytes, // 26 }, 27 }; 28 29 use crate::{ 30 fb::FbLayout, 31 firmware::gsp::GspFirmware, 32 gpu::Chipset, 33 gsp::{ 34 cmdq::Cmdq, // 35 GSP_PAGE_SIZE, 36 }, 37 num::{ 38 self, 39 FromSafeCast, // 40 }, 41 }; 42 43 // TODO: Replace with `IoView` projections once available. 44 pub(super) mod gsp_mem { 45 use core::sync::atomic::{ 46 fence, 47 Ordering, // 48 }; 49 50 use kernel::{ 51 dma::Coherent, 52 dma_read, 53 dma_write, // 54 }; 55 56 use crate::gsp::cmdq::{ 57 GspMem, 58 MSGQ_NUM_PAGES, // 59 }; 60 61 pub(in crate::gsp) fn gsp_write_ptr(qs: &Coherent<GspMem>) -> u32 { 62 dma_read!(qs, .gspq.tx.0.writePtr) % MSGQ_NUM_PAGES 63 } 64 65 pub(in crate::gsp) fn gsp_read_ptr(qs: &Coherent<GspMem>) -> u32 { 66 dma_read!(qs, .gspq.rx.0.readPtr) % MSGQ_NUM_PAGES 67 } 68 69 pub(in crate::gsp) fn cpu_read_ptr(qs: &Coherent<GspMem>) -> u32 { 70 dma_read!(qs, .cpuq.rx.0.readPtr) % MSGQ_NUM_PAGES 71 } 72 73 pub(in crate::gsp) fn advance_cpu_read_ptr(qs: &Coherent<GspMem>, count: u32) { 74 let rptr = cpu_read_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES; 75 76 // Ensure read pointer is properly ordered. 77 fence(Ordering::SeqCst); 78 79 dma_write!(qs, .cpuq.rx.0.readPtr, rptr); 80 } 81 82 pub(in crate::gsp) fn cpu_write_ptr(qs: &Coherent<GspMem>) -> u32 { 83 dma_read!(qs, .cpuq.tx.0.writePtr) % MSGQ_NUM_PAGES 84 } 85 86 pub(in crate::gsp) fn advance_cpu_write_ptr(qs: &Coherent<GspMem>, count: u32) { 87 let wptr = cpu_write_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES; 88 89 dma_write!(qs, .cpuq.tx.0.writePtr, wptr); 90 91 // Ensure all command data is visible before triggering the GSP read. 92 fence(Ordering::SeqCst); 93 } 94 } 95 96 /// Maximum size of a single GSP message queue element in bytes. 97 pub(crate) const GSP_MSG_QUEUE_ELEMENT_SIZE_MAX: usize = 98 num::u32_as_usize(bindings::GSP_MSG_QUEUE_ELEMENT_SIZE_MAX); 99 100 /// Empty type to group methods related to heap parameters for running the GSP firmware. 101 enum GspFwHeapParams {} 102 103 /// Minimum required alignment for the GSP heap. 104 const GSP_HEAP_ALIGNMENT: Alignment = Alignment::new::<{ 1 << 20 }>(); 105 106 impl GspFwHeapParams { 107 /// Returns the amount of GSP-RM heap memory used during GSP-RM boot and initialization (up to 108 /// and including the first client subdevice allocation). 109 fn base_rm_size(_chipset: Chipset) -> u64 { 110 // TODO: this needs to be updated to return the correct value for Hopper+ once support for 111 // them is added: 112 // u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100) 113 u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X) 114 } 115 116 /// Returns the amount of heap memory required to support a single channel allocation. 117 fn client_alloc_size() -> u64 { 118 u64::from(bindings::GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE) 119 .align_up(GSP_HEAP_ALIGNMENT) 120 .unwrap_or(u64::MAX) 121 } 122 123 /// Returns the amount of memory to reserve for management purposes for a framebuffer of size 124 /// `fb_size`. 125 fn management_overhead(fb_size: u64) -> u64 { 126 let fb_size_gb = fb_size.div_ceil(u64::from_safe_cast(kernel::sizes::SZ_1G)); 127 128 u64::from(bindings::GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB) 129 .saturating_mul(fb_size_gb) 130 .align_up(GSP_HEAP_ALIGNMENT) 131 .unwrap_or(u64::MAX) 132 } 133 } 134 135 /// Heap memory requirements and constraints for a given version of the GSP LIBOS. 136 pub(crate) struct LibosParams { 137 /// The base amount of heap required by the GSP operating system, in bytes. 138 carveout_size: u64, 139 /// The minimum and maximum sizes allowed for the GSP FW heap, in bytes. 140 allowed_heap_size: Range<u64>, 141 } 142 143 impl LibosParams { 144 /// Version 2 of the GSP LIBOS (Turing and GA100) 145 const LIBOS2: LibosParams = LibosParams { 146 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2), 147 allowed_heap_size: num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB) 148 * num::usize_as_u64(SZ_1M) 149 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB) 150 * num::usize_as_u64(SZ_1M), 151 }; 152 153 /// Version 3 of the GSP LIBOS (GA102+) 154 const LIBOS3: LibosParams = LibosParams { 155 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL), 156 allowed_heap_size: num::u32_as_u64( 157 bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, 158 ) * num::usize_as_u64(SZ_1M) 159 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB) 160 * num::usize_as_u64(SZ_1M), 161 }; 162 163 /// Returns the libos parameters corresponding to `chipset`. 164 pub(crate) fn from_chipset(chipset: Chipset) -> &'static LibosParams { 165 if chipset < Chipset::GA102 { 166 &Self::LIBOS2 167 } else { 168 &Self::LIBOS3 169 } 170 } 171 172 /// Returns the amount of memory (in bytes) to allocate for the WPR heap for a framebuffer size 173 /// of `fb_size` (in bytes) for `chipset`. 174 pub(crate) fn wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64 { 175 // The WPR heap will contain the following: 176 // LIBOS carveout, 177 self.carveout_size 178 // RM boot working memory, 179 .saturating_add(GspFwHeapParams::base_rm_size(chipset)) 180 // One RM client, 181 .saturating_add(GspFwHeapParams::client_alloc_size()) 182 // Overhead for memory management. 183 .saturating_add(GspFwHeapParams::management_overhead(fb_size)) 184 // Clamp to the supported heap sizes. 185 .clamp(self.allowed_heap_size.start, self.allowed_heap_size.end - 1) 186 } 187 } 188 189 /// Structure passed to the GSP bootloader, containing the framebuffer layout as well as the DMA 190 /// addresses of the GSP bootloader and firmware. 191 #[repr(transparent)] 192 pub(crate) struct GspFwWprMeta { 193 inner: bindings::GspFwWprMeta, 194 } 195 196 // SAFETY: Padding is explicit and does not contain uninitialized data. 197 unsafe impl AsBytes for GspFwWprMeta {} 198 199 // SAFETY: This struct only contains integer types for which all bit patterns 200 // are valid. 201 unsafe impl FromBytes for GspFwWprMeta {} 202 203 type GspFwWprMetaBootResumeInfo = bindings::GspFwWprMeta__bindgen_ty_1; 204 type GspFwWprMetaBootInfo = bindings::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1; 205 206 impl GspFwWprMeta { 207 /// Returns an initializer for a `GspFwWprMeta` suitable for booting `gsp_firmware` using the 208 /// `fb_layout` layout. 209 pub(crate) fn new<'a>( 210 gsp_firmware: &'a GspFirmware, 211 fb_layout: &'a FbLayout, 212 ) -> impl Init<Self> + 'a { 213 #[allow(non_snake_case)] 214 let init_inner = init!(bindings::GspFwWprMeta { 215 // CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified. 216 magic: bindings::GSP_FW_WPR_META_MAGIC as u64, 217 revision: u64::from(bindings::GSP_FW_WPR_META_REVISION), 218 sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(), 219 sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size), 220 sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(), 221 sizeOfBootloader: u64::from_safe_cast(gsp_firmware.bootloader.ucode.size()), 222 bootloaderCodeOffset: u64::from(gsp_firmware.bootloader.code_offset), 223 bootloaderDataOffset: u64::from(gsp_firmware.bootloader.data_offset), 224 bootloaderManifestOffset: u64::from(gsp_firmware.bootloader.manifest_offset), 225 __bindgen_anon_1: GspFwWprMetaBootResumeInfo { 226 __bindgen_anon_1: GspFwWprMetaBootInfo { 227 sysmemAddrOfSignature: gsp_firmware.signatures.dma_handle(), 228 sizeOfSignature: u64::from_safe_cast(gsp_firmware.signatures.size()), 229 }, 230 }, 231 gspFwRsvdStart: fb_layout.heap.start, 232 nonWprHeapOffset: fb_layout.heap.start, 233 nonWprHeapSize: fb_layout.heap.end - fb_layout.heap.start, 234 gspFwWprStart: fb_layout.wpr2.start, 235 gspFwHeapOffset: fb_layout.wpr2_heap.start, 236 gspFwHeapSize: fb_layout.wpr2_heap.end - fb_layout.wpr2_heap.start, 237 gspFwOffset: fb_layout.elf.start, 238 bootBinOffset: fb_layout.boot.start, 239 frtsOffset: fb_layout.frts.start, 240 frtsSize: fb_layout.frts.end - fb_layout.frts.start, 241 gspFwWprEnd: fb_layout 242 .vga_workspace 243 .start 244 .align_down(Alignment::new::<SZ_128K>()), 245 gspFwHeapVfPartitionCount: fb_layout.vf_partition_count, 246 fbSize: fb_layout.fb.end - fb_layout.fb.start, 247 vgaWorkspaceOffset: fb_layout.vga_workspace.start, 248 vgaWorkspaceSize: fb_layout.vga_workspace.end - fb_layout.vga_workspace.start, 249 ..Zeroable::init_zeroed() 250 }); 251 252 init!(GspFwWprMeta { 253 inner <- init_inner, 254 }) 255 } 256 } 257 258 #[derive(Copy, Clone, Debug, PartialEq)] 259 #[repr(u32)] 260 pub(crate) enum MsgFunction { 261 // Common function codes 262 AllocChannelDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA, 263 AllocCtxDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA, 264 AllocDevice = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE, 265 AllocMemory = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, 266 AllocObject = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT, 267 AllocRoot = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT, 268 BindCtxDma = bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA, 269 ContinuationRecord = bindings::NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, 270 Free = bindings::NV_VGPU_MSG_FUNCTION_FREE, 271 GetGspStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, 272 GetStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO, 273 GspInitPostObjGpu = bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU, 274 GspRmControl = bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, 275 GspSetSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, 276 Log = bindings::NV_VGPU_MSG_FUNCTION_LOG, 277 MapMemory = bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY, 278 Nop = bindings::NV_VGPU_MSG_FUNCTION_NOP, 279 SetGuestSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO, 280 SetRegistry = bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY, 281 282 // Event codes 283 GspInitDone = bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE, 284 GspLockdownNotice = bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, 285 GspPostNoCat = bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, 286 GspRunCpuSequencer = bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, 287 MmuFaultQueued = bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, 288 OsErrorLog = bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG, 289 PostEvent = bindings::NV_VGPU_MSG_EVENT_POST_EVENT, 290 RcTriggered = bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED, 291 UcodeLibOsPrint = bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, 292 } 293 294 impl TryFrom<u32> for MsgFunction { 295 type Error = kernel::error::Error; 296 297 fn try_from(value: u32) -> Result<MsgFunction> { 298 match value { 299 // Common function codes 300 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA => Ok(MsgFunction::AllocChannelDma), 301 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA => Ok(MsgFunction::AllocCtxDma), 302 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE => Ok(MsgFunction::AllocDevice), 303 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY => Ok(MsgFunction::AllocMemory), 304 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT => Ok(MsgFunction::AllocObject), 305 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT => Ok(MsgFunction::AllocRoot), 306 bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA => Ok(MsgFunction::BindCtxDma), 307 bindings::NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD => { 308 Ok(MsgFunction::ContinuationRecord) 309 } 310 bindings::NV_VGPU_MSG_FUNCTION_FREE => Ok(MsgFunction::Free), 311 bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO => Ok(MsgFunction::GetGspStaticInfo), 312 bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO => Ok(MsgFunction::GetStaticInfo), 313 bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU => { 314 Ok(MsgFunction::GspInitPostObjGpu) 315 } 316 bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL => Ok(MsgFunction::GspRmControl), 317 bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO => Ok(MsgFunction::GspSetSystemInfo), 318 bindings::NV_VGPU_MSG_FUNCTION_LOG => Ok(MsgFunction::Log), 319 bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY => Ok(MsgFunction::MapMemory), 320 bindings::NV_VGPU_MSG_FUNCTION_NOP => Ok(MsgFunction::Nop), 321 bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO => { 322 Ok(MsgFunction::SetGuestSystemInfo) 323 } 324 bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY => Ok(MsgFunction::SetRegistry), 325 326 // Event codes 327 bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE => Ok(MsgFunction::GspInitDone), 328 bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE => Ok(MsgFunction::GspLockdownNotice), 329 bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD => Ok(MsgFunction::GspPostNoCat), 330 bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER => { 331 Ok(MsgFunction::GspRunCpuSequencer) 332 } 333 bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED => Ok(MsgFunction::MmuFaultQueued), 334 bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG => Ok(MsgFunction::OsErrorLog), 335 bindings::NV_VGPU_MSG_EVENT_POST_EVENT => Ok(MsgFunction::PostEvent), 336 bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED => Ok(MsgFunction::RcTriggered), 337 bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT => Ok(MsgFunction::UcodeLibOsPrint), 338 _ => Err(EINVAL), 339 } 340 } 341 } 342 343 impl From<MsgFunction> for u32 { 344 fn from(value: MsgFunction) -> Self { 345 // CAST: `MsgFunction` is `repr(u32)` and can thus be cast losslessly. 346 value as u32 347 } 348 } 349 350 /// Sequencer buffer opcode for GSP sequencer commands. 351 #[derive(Copy, Clone, Debug, PartialEq)] 352 #[repr(u32)] 353 pub(crate) enum SeqBufOpcode { 354 // Core operation opcodes 355 CoreReset = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET, 356 CoreResume = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME, 357 CoreStart = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START, 358 CoreWaitForHalt = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, 359 360 // Delay opcode 361 DelayUs = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US, 362 363 // Register operation opcodes 364 RegModify = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY, 365 RegPoll = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL, 366 RegStore = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE, 367 RegWrite = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE, 368 } 369 370 impl TryFrom<u32> for SeqBufOpcode { 371 type Error = kernel::error::Error; 372 373 fn try_from(value: u32) -> Result<SeqBufOpcode> { 374 match value { 375 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => { 376 Ok(SeqBufOpcode::CoreReset) 377 } 378 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => { 379 Ok(SeqBufOpcode::CoreResume) 380 } 381 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => { 382 Ok(SeqBufOpcode::CoreStart) 383 } 384 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => { 385 Ok(SeqBufOpcode::CoreWaitForHalt) 386 } 387 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs), 388 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => { 389 Ok(SeqBufOpcode::RegModify) 390 } 391 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll), 392 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore), 393 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite), 394 _ => Err(EINVAL), 395 } 396 } 397 } 398 399 impl From<SeqBufOpcode> for u32 { 400 fn from(value: SeqBufOpcode) -> Self { 401 // CAST: `SeqBufOpcode` is `repr(u32)` and can thus be cast losslessly. 402 value as u32 403 } 404 } 405 406 /// Wrapper for GSP sequencer register write payload. 407 #[repr(transparent)] 408 #[derive(Copy, Clone, Debug)] 409 pub(crate) struct RegWritePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_WRITE); 410 411 impl RegWritePayload { 412 /// Returns the register address. 413 pub(crate) fn addr(&self) -> u32 { 414 self.0.addr 415 } 416 417 /// Returns the value to write. 418 pub(crate) fn val(&self) -> u32 { 419 self.0.val 420 } 421 } 422 423 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 424 unsafe impl FromBytes for RegWritePayload {} 425 426 // SAFETY: Padding is explicit and will not contain uninitialized data. 427 unsafe impl AsBytes for RegWritePayload {} 428 429 /// Wrapper for GSP sequencer register modify payload. 430 #[repr(transparent)] 431 #[derive(Copy, Clone, Debug)] 432 pub(crate) struct RegModifyPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY); 433 434 impl RegModifyPayload { 435 /// Returns the register address. 436 pub(crate) fn addr(&self) -> u32 { 437 self.0.addr 438 } 439 440 /// Returns the mask to apply. 441 pub(crate) fn mask(&self) -> u32 { 442 self.0.mask 443 } 444 445 /// Returns the value to write. 446 pub(crate) fn val(&self) -> u32 { 447 self.0.val 448 } 449 } 450 451 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 452 unsafe impl FromBytes for RegModifyPayload {} 453 454 // SAFETY: Padding is explicit and will not contain uninitialized data. 455 unsafe impl AsBytes for RegModifyPayload {} 456 457 /// Wrapper for GSP sequencer register poll payload. 458 #[repr(transparent)] 459 #[derive(Copy, Clone, Debug)] 460 pub(crate) struct RegPollPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_POLL); 461 462 impl RegPollPayload { 463 /// Returns the register address. 464 pub(crate) fn addr(&self) -> u32 { 465 self.0.addr 466 } 467 468 /// Returns the mask to apply. 469 pub(crate) fn mask(&self) -> u32 { 470 self.0.mask 471 } 472 473 /// Returns the expected value. 474 pub(crate) fn val(&self) -> u32 { 475 self.0.val 476 } 477 478 /// Returns the timeout in microseconds. 479 pub(crate) fn timeout(&self) -> u32 { 480 self.0.timeout 481 } 482 } 483 484 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 485 unsafe impl FromBytes for RegPollPayload {} 486 487 // SAFETY: Padding is explicit and will not contain uninitialized data. 488 unsafe impl AsBytes for RegPollPayload {} 489 490 /// Wrapper for GSP sequencer delay payload. 491 #[repr(transparent)] 492 #[derive(Copy, Clone, Debug)] 493 pub(crate) struct DelayUsPayload(bindings::GSP_SEQ_BUF_PAYLOAD_DELAY_US); 494 495 impl DelayUsPayload { 496 /// Returns the delay value in microseconds. 497 pub(crate) fn val(&self) -> u32 { 498 self.0.val 499 } 500 } 501 502 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 503 unsafe impl FromBytes for DelayUsPayload {} 504 505 // SAFETY: Padding is explicit and will not contain uninitialized data. 506 unsafe impl AsBytes for DelayUsPayload {} 507 508 /// Wrapper for GSP sequencer register store payload. 509 #[repr(transparent)] 510 #[derive(Copy, Clone, Debug)] 511 pub(crate) struct RegStorePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_STORE); 512 513 impl RegStorePayload { 514 /// Returns the register address. 515 pub(crate) fn addr(&self) -> u32 { 516 self.0.addr 517 } 518 519 /// Returns the storage index. 520 #[allow(unused)] 521 pub(crate) fn index(&self) -> u32 { 522 self.0.index 523 } 524 } 525 526 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 527 unsafe impl FromBytes for RegStorePayload {} 528 529 // SAFETY: Padding is explicit and will not contain uninitialized data. 530 unsafe impl AsBytes for RegStorePayload {} 531 532 /// Wrapper for GSP sequencer buffer command. 533 #[repr(transparent)] 534 pub(crate) struct SequencerBufferCmd(bindings::GSP_SEQUENCER_BUFFER_CMD); 535 536 impl SequencerBufferCmd { 537 /// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid. 538 pub(crate) fn opcode(&self) -> Result<SeqBufOpcode> { 539 self.0.opCode.try_into() 540 } 541 542 /// Returns the register write payload by value. 543 /// 544 /// Returns an error if the opcode is not `SeqBufOpcode::RegWrite`. 545 pub(crate) fn reg_write_payload(&self) -> Result<RegWritePayload> { 546 if self.opcode()? != SeqBufOpcode::RegWrite { 547 return Err(EINVAL); 548 } 549 // SAFETY: Opcode is verified to be `RegWrite`, so union contains valid `RegWritePayload`. 550 Ok(RegWritePayload(unsafe { self.0.payload.regWrite })) 551 } 552 553 /// Returns the register modify payload by value. 554 /// 555 /// Returns an error if the opcode is not `SeqBufOpcode::RegModify`. 556 pub(crate) fn reg_modify_payload(&self) -> Result<RegModifyPayload> { 557 if self.opcode()? != SeqBufOpcode::RegModify { 558 return Err(EINVAL); 559 } 560 // SAFETY: Opcode is verified to be `RegModify`, so union contains valid `RegModifyPayload`. 561 Ok(RegModifyPayload(unsafe { self.0.payload.regModify })) 562 } 563 564 /// Returns the register poll payload by value. 565 /// 566 /// Returns an error if the opcode is not `SeqBufOpcode::RegPoll`. 567 pub(crate) fn reg_poll_payload(&self) -> Result<RegPollPayload> { 568 if self.opcode()? != SeqBufOpcode::RegPoll { 569 return Err(EINVAL); 570 } 571 // SAFETY: Opcode is verified to be `RegPoll`, so union contains valid `RegPollPayload`. 572 Ok(RegPollPayload(unsafe { self.0.payload.regPoll })) 573 } 574 575 /// Returns the delay payload by value. 576 /// 577 /// Returns an error if the opcode is not `SeqBufOpcode::DelayUs`. 578 pub(crate) fn delay_us_payload(&self) -> Result<DelayUsPayload> { 579 if self.opcode()? != SeqBufOpcode::DelayUs { 580 return Err(EINVAL); 581 } 582 // SAFETY: Opcode is verified to be `DelayUs`, so union contains valid `DelayUsPayload`. 583 Ok(DelayUsPayload(unsafe { self.0.payload.delayUs })) 584 } 585 586 /// Returns the register store payload by value. 587 /// 588 /// Returns an error if the opcode is not `SeqBufOpcode::RegStore`. 589 pub(crate) fn reg_store_payload(&self) -> Result<RegStorePayload> { 590 if self.opcode()? != SeqBufOpcode::RegStore { 591 return Err(EINVAL); 592 } 593 // SAFETY: Opcode is verified to be `RegStore`, so union contains valid `RegStorePayload`. 594 Ok(RegStorePayload(unsafe { self.0.payload.regStore })) 595 } 596 } 597 598 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 599 unsafe impl FromBytes for SequencerBufferCmd {} 600 601 // SAFETY: Padding is explicit and will not contain uninitialized data. 602 unsafe impl AsBytes for SequencerBufferCmd {} 603 604 /// Wrapper for GSP run CPU sequencer RPC. 605 #[repr(transparent)] 606 pub(crate) struct RunCpuSequencer(bindings::rpc_run_cpu_sequencer_v17_00); 607 608 impl RunCpuSequencer { 609 /// Returns the command index. 610 pub(crate) fn cmd_index(&self) -> u32 { 611 self.0.cmdIndex 612 } 613 } 614 615 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 616 unsafe impl FromBytes for RunCpuSequencer {} 617 618 // SAFETY: Padding is explicit and will not contain uninitialized data. 619 unsafe impl AsBytes for RunCpuSequencer {} 620 621 /// Struct containing the arguments required to pass a memory buffer to the GSP 622 /// for use during initialisation. 623 /// 624 /// The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is 625 /// configured for a larger page size (e.g. 64K pages), we need to give 626 /// the GSP an array of 4K pages. Since we only create physically contiguous 627 /// buffers the math to calculate the addresses is simple. 628 /// 629 /// The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently 630 /// ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the 631 /// buffers to be physically contiguous anyway. 632 /// 633 /// The memory allocated for the arguments must remain until the GSP sends the 634 /// init_done RPC. 635 #[repr(transparent)] 636 pub(crate) struct LibosMemoryRegionInitArgument { 637 inner: bindings::LibosMemoryRegionInitArgument, 638 } 639 640 // SAFETY: Padding is explicit and does not contain uninitialized data. 641 unsafe impl AsBytes for LibosMemoryRegionInitArgument {} 642 643 // SAFETY: This struct only contains integer types for which all bit patterns 644 // are valid. 645 unsafe impl FromBytes for LibosMemoryRegionInitArgument {} 646 647 impl LibosMemoryRegionInitArgument { 648 pub(crate) fn new<'a, A: AsBytes + FromBytes + KnownSize + ?Sized>( 649 name: &'static str, 650 obj: &'a Coherent<A>, 651 ) -> impl Init<Self> + 'a { 652 /// Generates the `ID8` identifier required for some GSP objects. 653 fn id8(name: &str) -> u64 { 654 let mut bytes = [0u8; core::mem::size_of::<u64>()]; 655 656 for (c, b) in name.bytes().rev().zip(&mut bytes) { 657 *b = c; 658 } 659 660 u64::from_ne_bytes(bytes) 661 } 662 663 #[allow(non_snake_case)] 664 let init_inner = init!(bindings::LibosMemoryRegionInitArgument { 665 id8: id8(name), 666 pa: obj.dma_handle(), 667 size: num::usize_as_u64(obj.size()), 668 kind: num::u32_into_u8::< 669 { bindings::LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS }, 670 >(), 671 loc: num::u32_into_u8::< 672 { bindings::LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM }, 673 >(), 674 ..Zeroable::init_zeroed() 675 }); 676 677 init!(LibosMemoryRegionInitArgument { 678 inner <- init_inner, 679 }) 680 } 681 } 682 683 /// TX header for setting up a message queue with the GSP. 684 #[repr(transparent)] 685 pub(crate) struct MsgqTxHeader(bindings::msgqTxHeader); 686 687 impl MsgqTxHeader { 688 /// Create a new TX queue header. 689 /// 690 /// # Arguments 691 /// 692 /// * `msgq_size` - Total size of the message queue structure, in bytes. 693 /// * `rx_hdr_offset` - Offset, in bytes, of the start of the RX header in the message queue 694 /// structure. 695 /// * `msg_count` - Number of messages that can be sent, i.e. the number of memory pages 696 /// allocated for the message queue in the message queue structure. 697 pub(crate) fn new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self { 698 Self(bindings::msgqTxHeader { 699 version: 0, 700 size: msgq_size, 701 msgSize: num::usize_into_u32::<GSP_PAGE_SIZE>(), 702 msgCount: msg_count, 703 writePtr: 0, 704 flags: 1, 705 rxHdrOff: rx_hdr_offset, 706 entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(), 707 }) 708 } 709 } 710 711 // SAFETY: Padding is explicit and does not contain uninitialized data. 712 unsafe impl AsBytes for MsgqTxHeader {} 713 714 /// RX header for setting up a message queue with the GSP. 715 #[repr(transparent)] 716 pub(crate) struct MsgqRxHeader(bindings::msgqRxHeader); 717 718 /// Header for the message RX queue. 719 impl MsgqRxHeader { 720 /// Creates a new RX queue header. 721 pub(crate) fn new() -> Self { 722 Self(Default::default()) 723 } 724 } 725 726 // SAFETY: Padding is explicit and does not contain uninitialized data. 727 unsafe impl AsBytes for MsgqRxHeader {} 728 729 bitfield! { 730 struct MsgHeaderVersion(u32) { 731 31:24 major as u8; 732 23:16 minor as u8; 733 } 734 } 735 736 impl MsgHeaderVersion { 737 const MAJOR_TOT: u8 = 3; 738 const MINOR_TOT: u8 = 0; 739 740 fn new() -> Self { 741 Self::default() 742 .set_major(Self::MAJOR_TOT) 743 .set_minor(Self::MINOR_TOT) 744 } 745 } 746 747 impl bindings::rpc_message_header_v { 748 fn init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error> { 749 type RpcMessageHeader = bindings::rpc_message_header_v; 750 751 try_init!(RpcMessageHeader { 752 header_version: MsgHeaderVersion::new().into(), 753 signature: bindings::NV_VGPU_MSG_SIGNATURE_VALID, 754 function: function.into(), 755 length: size_of::<Self>() 756 .checked_add(cmd_size) 757 .ok_or(EOVERFLOW) 758 .and_then(|v| v.try_into().map_err(|_| EINVAL))?, 759 rpc_result: 0xffffffff, 760 rpc_result_private: 0xffffffff, 761 ..Zeroable::init_zeroed() 762 }) 763 } 764 } 765 766 /// GSP Message Element. 767 /// 768 /// This is essentially a message header expected to be followed by the message data. 769 #[repr(transparent)] 770 pub(crate) struct GspMsgElement { 771 inner: bindings::GSP_MSG_QUEUE_ELEMENT, 772 } 773 774 impl GspMsgElement { 775 /// Creates a new message element. 776 /// 777 /// # Arguments 778 /// 779 /// * `sequence` - Sequence number of the message. 780 /// * `cmd_size` - Size of the command (not including the message element), in bytes. 781 /// * `function` - Function of the message. 782 #[allow(non_snake_case)] 783 pub(crate) fn init( 784 sequence: u32, 785 cmd_size: usize, 786 function: MsgFunction, 787 ) -> impl Init<Self, Error> { 788 type RpcMessageHeader = bindings::rpc_message_header_v; 789 type InnerGspMsgElement = bindings::GSP_MSG_QUEUE_ELEMENT; 790 let init_inner = try_init!(InnerGspMsgElement { 791 seqNum: sequence, 792 elemCount: size_of::<Self>() 793 .checked_add(cmd_size) 794 .ok_or(EOVERFLOW)? 795 .div_ceil(GSP_PAGE_SIZE) 796 .try_into() 797 .map_err(|_| EOVERFLOW)?, 798 rpc <- RpcMessageHeader::init(cmd_size, function), 799 ..Zeroable::init_zeroed() 800 }); 801 802 try_init!(GspMsgElement { 803 inner <- init_inner, 804 }) 805 } 806 807 /// Sets the checksum of this message. 808 /// 809 /// Since the header is also part of the checksum, this is usually called after the whole 810 /// message has been written to the shared memory area. 811 pub(crate) fn set_checksum(&mut self, checksum: u32) { 812 self.inner.checkSum = checksum; 813 } 814 815 /// Returns the length of the message's payload. 816 pub(crate) fn payload_length(&self) -> usize { 817 // `rpc.length` includes the length of the RPC message header. 818 num::u32_as_usize(self.inner.rpc.length) 819 .saturating_sub(size_of::<bindings::rpc_message_header_v>()) 820 } 821 822 /// Returns the total length of the message, message and RPC headers included. 823 pub(crate) fn length(&self) -> usize { 824 size_of::<Self>() + self.payload_length() 825 } 826 827 // Returns the sequence number of the message. 828 pub(crate) fn sequence(&self) -> u32 { 829 self.inner.rpc.sequence 830 } 831 832 // Returns the function of the message, if it is valid, or the invalid function number as an 833 // error. 834 pub(crate) fn function(&self) -> Result<MsgFunction, u32> { 835 self.inner 836 .rpc 837 .function 838 .try_into() 839 .map_err(|_| self.inner.rpc.function) 840 } 841 842 // Returns the number of elements (i.e. memory pages) used by this message. 843 pub(crate) fn element_count(&self) -> u32 { 844 self.inner.elemCount 845 } 846 } 847 848 // SAFETY: Padding is explicit and does not contain uninitialized data. 849 unsafe impl AsBytes for GspMsgElement {} 850 851 // SAFETY: This struct only contains integer types for which all bit patterns 852 // are valid. 853 unsafe impl FromBytes for GspMsgElement {} 854 855 /// Arguments for GSP startup. 856 #[repr(transparent)] 857 #[derive(Zeroable)] 858 pub(crate) struct GspArgumentsCached { 859 inner: bindings::GSP_ARGUMENTS_CACHED, 860 } 861 862 impl GspArgumentsCached { 863 /// Creates the arguments for starting the GSP up using `cmdq` as its command queue. 864 pub(crate) fn new(cmdq: &Cmdq) -> impl Init<Self> + '_ { 865 #[allow(non_snake_case)] 866 let init_inner = init!(bindings::GSP_ARGUMENTS_CACHED { 867 messageQueueInitArguments <- MessageQueueInitArguments::new(cmdq), 868 bDmemStack: 1, 869 ..Zeroable::init_zeroed() 870 }); 871 872 init!(GspArgumentsCached { 873 inner <- init_inner, 874 }) 875 } 876 } 877 878 // SAFETY: Padding is explicit and will not contain uninitialized data. 879 unsafe impl AsBytes for GspArgumentsCached {} 880 881 /// On Turing and GA100, the entries in the `LibosMemoryRegionInitArgument` 882 /// must all be a multiple of GSP_PAGE_SIZE in size, so add padding to force it 883 /// to that size. 884 #[repr(C)] 885 #[derive(Zeroable)] 886 pub(crate) struct GspArgumentsPadded { 887 pub(crate) inner: GspArgumentsCached, 888 _padding: [u8; GSP_PAGE_SIZE - core::mem::size_of::<bindings::GSP_ARGUMENTS_CACHED>()], 889 } 890 891 impl GspArgumentsPadded { 892 pub(crate) fn new(cmdq: &Cmdq) -> impl Init<Self> + '_ { 893 init!(GspArgumentsPadded { 894 inner <- GspArgumentsCached::new(cmdq), 895 ..Zeroable::init_zeroed() 896 }) 897 } 898 } 899 900 // SAFETY: Padding is explicit and will not contain uninitialized data. 901 unsafe impl AsBytes for GspArgumentsPadded {} 902 903 // SAFETY: This struct only contains integer types for which all bit patterns 904 // are valid. 905 unsafe impl FromBytes for GspArgumentsPadded {} 906 907 /// Init arguments for the message queue. 908 type MessageQueueInitArguments = bindings::MESSAGE_QUEUE_INIT_ARGUMENTS; 909 910 impl MessageQueueInitArguments { 911 /// Creates a new init arguments structure for `cmdq`. 912 #[allow(non_snake_case)] 913 fn new(cmdq: &Cmdq) -> impl Init<Self> + '_ { 914 init!(MessageQueueInitArguments { 915 sharedMemPhysAddr: cmdq.dma_handle, 916 pageTableEntryCount: num::usize_into_u32::<{ Cmdq::NUM_PTES }>(), 917 cmdQueueOffset: num::usize_as_u64(Cmdq::CMDQ_OFFSET), 918 statQueueOffset: num::usize_as_u64(Cmdq::STATQ_OFFSET), 919 ..Zeroable::init_zeroed() 920 }) 921 } 922 } 923