1 // SPDX-License-Identifier: GPL-2.0 2 3 pub(crate) mod commands; 4 mod r570_144; 5 6 // Alias to avoid repeating the version number with every use. 7 use r570_144 as bindings; 8 9 use core::ops::Range; 10 11 use kernel::{ 12 dma::CoherentAllocation, 13 prelude::*, 14 ptr::{ 15 Alignable, 16 Alignment, // 17 }, 18 sizes::{ 19 SZ_128K, 20 SZ_1M, // 21 }, 22 transmute::{ 23 AsBytes, 24 FromBytes, // 25 }, 26 }; 27 28 use crate::{ 29 fb::FbLayout, 30 firmware::gsp::GspFirmware, 31 gpu::Chipset, 32 gsp::{ 33 cmdq::Cmdq, // 34 GSP_PAGE_SIZE, 35 }, 36 num::{ 37 self, 38 FromSafeCast, // 39 }, 40 }; 41 42 // TODO: Replace with `IoView` projections once available; the `unwrap()` calls go away once we 43 // switch to the new `dma::Coherent` API. 44 pub(super) mod gsp_mem { 45 use core::sync::atomic::{ 46 fence, 47 Ordering, // 48 }; 49 50 use kernel::{ 51 dma::CoherentAllocation, 52 dma_read, 53 dma_write, 54 prelude::*, // 55 }; 56 57 use crate::gsp::cmdq::{ 58 GspMem, 59 MSGQ_NUM_PAGES, // 60 }; 61 62 pub(in crate::gsp) fn gsp_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 63 // PANIC: A `dma::CoherentAllocation` always contains at least one element. 64 || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap() 65 } 66 67 pub(in crate::gsp) fn gsp_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 68 // PANIC: A `dma::CoherentAllocation` always contains at least one element. 69 || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap() 70 } 71 72 pub(in crate::gsp) fn cpu_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 73 // PANIC: A `dma::CoherentAllocation` always contains at least one element. 74 || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap() 75 } 76 77 pub(in crate::gsp) fn advance_cpu_read_ptr(qs: &CoherentAllocation<GspMem>, count: u32) { 78 let rptr = cpu_read_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES; 79 80 // Ensure read pointer is properly ordered. 81 fence(Ordering::SeqCst); 82 83 // PANIC: A `dma::CoherentAllocation` always contains at least one element. 84 || -> Result { 85 dma_write!(qs, [0]?.cpuq.rx.0.readPtr, rptr); 86 Ok(()) 87 }() 88 .unwrap() 89 } 90 91 pub(in crate::gsp) fn cpu_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 92 // PANIC: A `dma::CoherentAllocation` always contains at least one element. 93 || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap() 94 } 95 96 pub(in crate::gsp) fn advance_cpu_write_ptr(qs: &CoherentAllocation<GspMem>, count: u32) { 97 let wptr = cpu_write_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES; 98 99 // PANIC: A `dma::CoherentAllocation` always contains at least one element. 100 || -> Result { 101 dma_write!(qs, [0]?.cpuq.tx.0.writePtr, wptr); 102 Ok(()) 103 }() 104 .unwrap(); 105 106 // Ensure all command data is visible before triggering the GSP read. 107 fence(Ordering::SeqCst); 108 } 109 } 110 111 /// Maximum size of a single GSP message queue element in bytes. 112 pub(crate) const GSP_MSG_QUEUE_ELEMENT_SIZE_MAX: usize = 113 num::u32_as_usize(bindings::GSP_MSG_QUEUE_ELEMENT_SIZE_MAX); 114 115 /// Empty type to group methods related to heap parameters for running the GSP firmware. 116 enum GspFwHeapParams {} 117 118 /// Minimum required alignment for the GSP heap. 119 const GSP_HEAP_ALIGNMENT: Alignment = Alignment::new::<{ 1 << 20 }>(); 120 121 impl GspFwHeapParams { 122 /// Returns the amount of GSP-RM heap memory used during GSP-RM boot and initialization (up to 123 /// and including the first client subdevice allocation). 124 fn base_rm_size(_chipset: Chipset) -> u64 { 125 // TODO: this needs to be updated to return the correct value for Hopper+ once support for 126 // them is added: 127 // u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100) 128 u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X) 129 } 130 131 /// Returns the amount of heap memory required to support a single channel allocation. 132 fn client_alloc_size() -> u64 { 133 u64::from(bindings::GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE) 134 .align_up(GSP_HEAP_ALIGNMENT) 135 .unwrap_or(u64::MAX) 136 } 137 138 /// Returns the amount of memory to reserve for management purposes for a framebuffer of size 139 /// `fb_size`. 140 fn management_overhead(fb_size: u64) -> u64 { 141 let fb_size_gb = fb_size.div_ceil(u64::from_safe_cast(kernel::sizes::SZ_1G)); 142 143 u64::from(bindings::GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB) 144 .saturating_mul(fb_size_gb) 145 .align_up(GSP_HEAP_ALIGNMENT) 146 .unwrap_or(u64::MAX) 147 } 148 } 149 150 /// Heap memory requirements and constraints for a given version of the GSP LIBOS. 151 pub(crate) struct LibosParams { 152 /// The base amount of heap required by the GSP operating system, in bytes. 153 carveout_size: u64, 154 /// The minimum and maximum sizes allowed for the GSP FW heap, in bytes. 155 allowed_heap_size: Range<u64>, 156 } 157 158 impl LibosParams { 159 /// Version 2 of the GSP LIBOS (Turing and GA100) 160 const LIBOS2: LibosParams = LibosParams { 161 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2), 162 allowed_heap_size: num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB) 163 * num::usize_as_u64(SZ_1M) 164 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB) 165 * num::usize_as_u64(SZ_1M), 166 }; 167 168 /// Version 3 of the GSP LIBOS (GA102+) 169 const LIBOS3: LibosParams = LibosParams { 170 carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL), 171 allowed_heap_size: num::u32_as_u64( 172 bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, 173 ) * num::usize_as_u64(SZ_1M) 174 ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB) 175 * num::usize_as_u64(SZ_1M), 176 }; 177 178 /// Returns the libos parameters corresponding to `chipset`. 179 pub(crate) fn from_chipset(chipset: Chipset) -> &'static LibosParams { 180 if chipset < Chipset::GA102 { 181 &Self::LIBOS2 182 } else { 183 &Self::LIBOS3 184 } 185 } 186 187 /// Returns the amount of memory (in bytes) to allocate for the WPR heap for a framebuffer size 188 /// of `fb_size` (in bytes) for `chipset`. 189 pub(crate) fn wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64 { 190 // The WPR heap will contain the following: 191 // LIBOS carveout, 192 self.carveout_size 193 // RM boot working memory, 194 .saturating_add(GspFwHeapParams::base_rm_size(chipset)) 195 // One RM client, 196 .saturating_add(GspFwHeapParams::client_alloc_size()) 197 // Overhead for memory management. 198 .saturating_add(GspFwHeapParams::management_overhead(fb_size)) 199 // Clamp to the supported heap sizes. 200 .clamp(self.allowed_heap_size.start, self.allowed_heap_size.end - 1) 201 } 202 } 203 204 /// Structure passed to the GSP bootloader, containing the framebuffer layout as well as the DMA 205 /// addresses of the GSP bootloader and firmware. 206 #[repr(transparent)] 207 pub(crate) struct GspFwWprMeta(bindings::GspFwWprMeta); 208 209 // SAFETY: Padding is explicit and does not contain uninitialized data. 210 unsafe impl AsBytes for GspFwWprMeta {} 211 212 // SAFETY: This struct only contains integer types for which all bit patterns 213 // are valid. 214 unsafe impl FromBytes for GspFwWprMeta {} 215 216 type GspFwWprMetaBootResumeInfo = bindings::GspFwWprMeta__bindgen_ty_1; 217 type GspFwWprMetaBootInfo = bindings::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1; 218 219 impl GspFwWprMeta { 220 /// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the 221 /// `fb_layout` layout. 222 pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self { 223 Self(bindings::GspFwWprMeta { 224 // CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified. 225 magic: bindings::GSP_FW_WPR_META_MAGIC as u64, 226 revision: u64::from(bindings::GSP_FW_WPR_META_REVISION), 227 sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(), 228 sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size), 229 sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(), 230 sizeOfBootloader: u64::from_safe_cast(gsp_firmware.bootloader.ucode.size()), 231 bootloaderCodeOffset: u64::from(gsp_firmware.bootloader.code_offset), 232 bootloaderDataOffset: u64::from(gsp_firmware.bootloader.data_offset), 233 bootloaderManifestOffset: u64::from(gsp_firmware.bootloader.manifest_offset), 234 __bindgen_anon_1: GspFwWprMetaBootResumeInfo { 235 __bindgen_anon_1: GspFwWprMetaBootInfo { 236 sysmemAddrOfSignature: gsp_firmware.signatures.dma_handle(), 237 sizeOfSignature: u64::from_safe_cast(gsp_firmware.signatures.size()), 238 }, 239 }, 240 gspFwRsvdStart: fb_layout.heap.start, 241 nonWprHeapOffset: fb_layout.heap.start, 242 nonWprHeapSize: fb_layout.heap.end - fb_layout.heap.start, 243 gspFwWprStart: fb_layout.wpr2.start, 244 gspFwHeapOffset: fb_layout.wpr2_heap.start, 245 gspFwHeapSize: fb_layout.wpr2_heap.end - fb_layout.wpr2_heap.start, 246 gspFwOffset: fb_layout.elf.start, 247 bootBinOffset: fb_layout.boot.start, 248 frtsOffset: fb_layout.frts.start, 249 frtsSize: fb_layout.frts.end - fb_layout.frts.start, 250 gspFwWprEnd: fb_layout 251 .vga_workspace 252 .start 253 .align_down(Alignment::new::<SZ_128K>()), 254 gspFwHeapVfPartitionCount: fb_layout.vf_partition_count, 255 fbSize: fb_layout.fb.end - fb_layout.fb.start, 256 vgaWorkspaceOffset: fb_layout.vga_workspace.start, 257 vgaWorkspaceSize: fb_layout.vga_workspace.end - fb_layout.vga_workspace.start, 258 ..Default::default() 259 }) 260 } 261 } 262 263 #[derive(Copy, Clone, Debug, PartialEq)] 264 #[repr(u32)] 265 pub(crate) enum MsgFunction { 266 // Common function codes 267 AllocChannelDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA, 268 AllocCtxDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA, 269 AllocDevice = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE, 270 AllocMemory = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, 271 AllocObject = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT, 272 AllocRoot = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT, 273 BindCtxDma = bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA, 274 ContinuationRecord = bindings::NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, 275 Free = bindings::NV_VGPU_MSG_FUNCTION_FREE, 276 GetGspStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, 277 GetStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO, 278 GspInitPostObjGpu = bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU, 279 GspRmControl = bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, 280 GspSetSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, 281 Log = bindings::NV_VGPU_MSG_FUNCTION_LOG, 282 MapMemory = bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY, 283 Nop = bindings::NV_VGPU_MSG_FUNCTION_NOP, 284 SetGuestSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO, 285 SetRegistry = bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY, 286 287 // Event codes 288 GspInitDone = bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE, 289 GspLockdownNotice = bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, 290 GspPostNoCat = bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, 291 GspRunCpuSequencer = bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, 292 MmuFaultQueued = bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, 293 OsErrorLog = bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG, 294 PostEvent = bindings::NV_VGPU_MSG_EVENT_POST_EVENT, 295 RcTriggered = bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED, 296 UcodeLibOsPrint = bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, 297 } 298 299 impl TryFrom<u32> for MsgFunction { 300 type Error = kernel::error::Error; 301 302 fn try_from(value: u32) -> Result<MsgFunction> { 303 match value { 304 // Common function codes 305 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA => Ok(MsgFunction::AllocChannelDma), 306 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA => Ok(MsgFunction::AllocCtxDma), 307 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE => Ok(MsgFunction::AllocDevice), 308 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY => Ok(MsgFunction::AllocMemory), 309 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT => Ok(MsgFunction::AllocObject), 310 bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT => Ok(MsgFunction::AllocRoot), 311 bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA => Ok(MsgFunction::BindCtxDma), 312 bindings::NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD => { 313 Ok(MsgFunction::ContinuationRecord) 314 } 315 bindings::NV_VGPU_MSG_FUNCTION_FREE => Ok(MsgFunction::Free), 316 bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO => Ok(MsgFunction::GetGspStaticInfo), 317 bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO => Ok(MsgFunction::GetStaticInfo), 318 bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU => { 319 Ok(MsgFunction::GspInitPostObjGpu) 320 } 321 bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL => Ok(MsgFunction::GspRmControl), 322 bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO => Ok(MsgFunction::GspSetSystemInfo), 323 bindings::NV_VGPU_MSG_FUNCTION_LOG => Ok(MsgFunction::Log), 324 bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY => Ok(MsgFunction::MapMemory), 325 bindings::NV_VGPU_MSG_FUNCTION_NOP => Ok(MsgFunction::Nop), 326 bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO => { 327 Ok(MsgFunction::SetGuestSystemInfo) 328 } 329 bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY => Ok(MsgFunction::SetRegistry), 330 331 // Event codes 332 bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE => Ok(MsgFunction::GspInitDone), 333 bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE => Ok(MsgFunction::GspLockdownNotice), 334 bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD => Ok(MsgFunction::GspPostNoCat), 335 bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER => { 336 Ok(MsgFunction::GspRunCpuSequencer) 337 } 338 bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED => Ok(MsgFunction::MmuFaultQueued), 339 bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG => Ok(MsgFunction::OsErrorLog), 340 bindings::NV_VGPU_MSG_EVENT_POST_EVENT => Ok(MsgFunction::PostEvent), 341 bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED => Ok(MsgFunction::RcTriggered), 342 bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT => Ok(MsgFunction::UcodeLibOsPrint), 343 _ => Err(EINVAL), 344 } 345 } 346 } 347 348 impl From<MsgFunction> for u32 { 349 fn from(value: MsgFunction) -> Self { 350 // CAST: `MsgFunction` is `repr(u32)` and can thus be cast losslessly. 351 value as u32 352 } 353 } 354 355 /// Sequencer buffer opcode for GSP sequencer commands. 356 #[derive(Copy, Clone, Debug, PartialEq)] 357 #[repr(u32)] 358 pub(crate) enum SeqBufOpcode { 359 // Core operation opcodes 360 CoreReset = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET, 361 CoreResume = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME, 362 CoreStart = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START, 363 CoreWaitForHalt = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, 364 365 // Delay opcode 366 DelayUs = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US, 367 368 // Register operation opcodes 369 RegModify = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY, 370 RegPoll = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL, 371 RegStore = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE, 372 RegWrite = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE, 373 } 374 375 impl TryFrom<u32> for SeqBufOpcode { 376 type Error = kernel::error::Error; 377 378 fn try_from(value: u32) -> Result<SeqBufOpcode> { 379 match value { 380 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => { 381 Ok(SeqBufOpcode::CoreReset) 382 } 383 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => { 384 Ok(SeqBufOpcode::CoreResume) 385 } 386 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => { 387 Ok(SeqBufOpcode::CoreStart) 388 } 389 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => { 390 Ok(SeqBufOpcode::CoreWaitForHalt) 391 } 392 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs), 393 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => { 394 Ok(SeqBufOpcode::RegModify) 395 } 396 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll), 397 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore), 398 bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite), 399 _ => Err(EINVAL), 400 } 401 } 402 } 403 404 impl From<SeqBufOpcode> for u32 { 405 fn from(value: SeqBufOpcode) -> Self { 406 // CAST: `SeqBufOpcode` is `repr(u32)` and can thus be cast losslessly. 407 value as u32 408 } 409 } 410 411 /// Wrapper for GSP sequencer register write payload. 412 #[repr(transparent)] 413 #[derive(Copy, Clone, Debug)] 414 pub(crate) struct RegWritePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_WRITE); 415 416 impl RegWritePayload { 417 /// Returns the register address. 418 pub(crate) fn addr(&self) -> u32 { 419 self.0.addr 420 } 421 422 /// Returns the value to write. 423 pub(crate) fn val(&self) -> u32 { 424 self.0.val 425 } 426 } 427 428 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 429 unsafe impl FromBytes for RegWritePayload {} 430 431 // SAFETY: Padding is explicit and will not contain uninitialized data. 432 unsafe impl AsBytes for RegWritePayload {} 433 434 /// Wrapper for GSP sequencer register modify payload. 435 #[repr(transparent)] 436 #[derive(Copy, Clone, Debug)] 437 pub(crate) struct RegModifyPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY); 438 439 impl RegModifyPayload { 440 /// Returns the register address. 441 pub(crate) fn addr(&self) -> u32 { 442 self.0.addr 443 } 444 445 /// Returns the mask to apply. 446 pub(crate) fn mask(&self) -> u32 { 447 self.0.mask 448 } 449 450 /// Returns the value to write. 451 pub(crate) fn val(&self) -> u32 { 452 self.0.val 453 } 454 } 455 456 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 457 unsafe impl FromBytes for RegModifyPayload {} 458 459 // SAFETY: Padding is explicit and will not contain uninitialized data. 460 unsafe impl AsBytes for RegModifyPayload {} 461 462 /// Wrapper for GSP sequencer register poll payload. 463 #[repr(transparent)] 464 #[derive(Copy, Clone, Debug)] 465 pub(crate) struct RegPollPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_POLL); 466 467 impl RegPollPayload { 468 /// Returns the register address. 469 pub(crate) fn addr(&self) -> u32 { 470 self.0.addr 471 } 472 473 /// Returns the mask to apply. 474 pub(crate) fn mask(&self) -> u32 { 475 self.0.mask 476 } 477 478 /// Returns the expected value. 479 pub(crate) fn val(&self) -> u32 { 480 self.0.val 481 } 482 483 /// Returns the timeout in microseconds. 484 pub(crate) fn timeout(&self) -> u32 { 485 self.0.timeout 486 } 487 } 488 489 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 490 unsafe impl FromBytes for RegPollPayload {} 491 492 // SAFETY: Padding is explicit and will not contain uninitialized data. 493 unsafe impl AsBytes for RegPollPayload {} 494 495 /// Wrapper for GSP sequencer delay payload. 496 #[repr(transparent)] 497 #[derive(Copy, Clone, Debug)] 498 pub(crate) struct DelayUsPayload(bindings::GSP_SEQ_BUF_PAYLOAD_DELAY_US); 499 500 impl DelayUsPayload { 501 /// Returns the delay value in microseconds. 502 pub(crate) fn val(&self) -> u32 { 503 self.0.val 504 } 505 } 506 507 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 508 unsafe impl FromBytes for DelayUsPayload {} 509 510 // SAFETY: Padding is explicit and will not contain uninitialized data. 511 unsafe impl AsBytes for DelayUsPayload {} 512 513 /// Wrapper for GSP sequencer register store payload. 514 #[repr(transparent)] 515 #[derive(Copy, Clone, Debug)] 516 pub(crate) struct RegStorePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_STORE); 517 518 impl RegStorePayload { 519 /// Returns the register address. 520 pub(crate) fn addr(&self) -> u32 { 521 self.0.addr 522 } 523 524 /// Returns the storage index. 525 #[allow(unused)] 526 pub(crate) fn index(&self) -> u32 { 527 self.0.index 528 } 529 } 530 531 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 532 unsafe impl FromBytes for RegStorePayload {} 533 534 // SAFETY: Padding is explicit and will not contain uninitialized data. 535 unsafe impl AsBytes for RegStorePayload {} 536 537 /// Wrapper for GSP sequencer buffer command. 538 #[repr(transparent)] 539 pub(crate) struct SequencerBufferCmd(bindings::GSP_SEQUENCER_BUFFER_CMD); 540 541 impl SequencerBufferCmd { 542 /// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid. 543 pub(crate) fn opcode(&self) -> Result<SeqBufOpcode> { 544 self.0.opCode.try_into() 545 } 546 547 /// Returns the register write payload by value. 548 /// 549 /// Returns an error if the opcode is not `SeqBufOpcode::RegWrite`. 550 pub(crate) fn reg_write_payload(&self) -> Result<RegWritePayload> { 551 if self.opcode()? != SeqBufOpcode::RegWrite { 552 return Err(EINVAL); 553 } 554 // SAFETY: Opcode is verified to be `RegWrite`, so union contains valid `RegWritePayload`. 555 Ok(RegWritePayload(unsafe { self.0.payload.regWrite })) 556 } 557 558 /// Returns the register modify payload by value. 559 /// 560 /// Returns an error if the opcode is not `SeqBufOpcode::RegModify`. 561 pub(crate) fn reg_modify_payload(&self) -> Result<RegModifyPayload> { 562 if self.opcode()? != SeqBufOpcode::RegModify { 563 return Err(EINVAL); 564 } 565 // SAFETY: Opcode is verified to be `RegModify`, so union contains valid `RegModifyPayload`. 566 Ok(RegModifyPayload(unsafe { self.0.payload.regModify })) 567 } 568 569 /// Returns the register poll payload by value. 570 /// 571 /// Returns an error if the opcode is not `SeqBufOpcode::RegPoll`. 572 pub(crate) fn reg_poll_payload(&self) -> Result<RegPollPayload> { 573 if self.opcode()? != SeqBufOpcode::RegPoll { 574 return Err(EINVAL); 575 } 576 // SAFETY: Opcode is verified to be `RegPoll`, so union contains valid `RegPollPayload`. 577 Ok(RegPollPayload(unsafe { self.0.payload.regPoll })) 578 } 579 580 /// Returns the delay payload by value. 581 /// 582 /// Returns an error if the opcode is not `SeqBufOpcode::DelayUs`. 583 pub(crate) fn delay_us_payload(&self) -> Result<DelayUsPayload> { 584 if self.opcode()? != SeqBufOpcode::DelayUs { 585 return Err(EINVAL); 586 } 587 // SAFETY: Opcode is verified to be `DelayUs`, so union contains valid `DelayUsPayload`. 588 Ok(DelayUsPayload(unsafe { self.0.payload.delayUs })) 589 } 590 591 /// Returns the register store payload by value. 592 /// 593 /// Returns an error if the opcode is not `SeqBufOpcode::RegStore`. 594 pub(crate) fn reg_store_payload(&self) -> Result<RegStorePayload> { 595 if self.opcode()? != SeqBufOpcode::RegStore { 596 return Err(EINVAL); 597 } 598 // SAFETY: Opcode is verified to be `RegStore`, so union contains valid `RegStorePayload`. 599 Ok(RegStorePayload(unsafe { self.0.payload.regStore })) 600 } 601 } 602 603 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 604 unsafe impl FromBytes for SequencerBufferCmd {} 605 606 // SAFETY: Padding is explicit and will not contain uninitialized data. 607 unsafe impl AsBytes for SequencerBufferCmd {} 608 609 /// Wrapper for GSP run CPU sequencer RPC. 610 #[repr(transparent)] 611 pub(crate) struct RunCpuSequencer(bindings::rpc_run_cpu_sequencer_v17_00); 612 613 impl RunCpuSequencer { 614 /// Returns the command index. 615 pub(crate) fn cmd_index(&self) -> u32 { 616 self.0.cmdIndex 617 } 618 } 619 620 // SAFETY: This struct only contains integer types for which all bit patterns are valid. 621 unsafe impl FromBytes for RunCpuSequencer {} 622 623 // SAFETY: Padding is explicit and will not contain uninitialized data. 624 unsafe impl AsBytes for RunCpuSequencer {} 625 626 /// Struct containing the arguments required to pass a memory buffer to the GSP 627 /// for use during initialisation. 628 /// 629 /// The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is 630 /// configured for a larger page size (e.g. 64K pages), we need to give 631 /// the GSP an array of 4K pages. Since we only create physically contiguous 632 /// buffers the math to calculate the addresses is simple. 633 /// 634 /// The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently 635 /// ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the 636 /// buffers to be physically contiguous anyway. 637 /// 638 /// The memory allocated for the arguments must remain until the GSP sends the 639 /// init_done RPC. 640 #[repr(transparent)] 641 pub(crate) struct LibosMemoryRegionInitArgument(bindings::LibosMemoryRegionInitArgument); 642 643 // SAFETY: Padding is explicit and does not contain uninitialized data. 644 unsafe impl AsBytes for LibosMemoryRegionInitArgument {} 645 646 // SAFETY: This struct only contains integer types for which all bit patterns 647 // are valid. 648 unsafe impl FromBytes for LibosMemoryRegionInitArgument {} 649 650 impl LibosMemoryRegionInitArgument { 651 pub(crate) fn new<A: AsBytes + FromBytes>( 652 name: &'static str, 653 obj: &CoherentAllocation<A>, 654 ) -> Self { 655 /// Generates the `ID8` identifier required for some GSP objects. 656 fn id8(name: &str) -> u64 { 657 let mut bytes = [0u8; core::mem::size_of::<u64>()]; 658 659 for (c, b) in name.bytes().rev().zip(&mut bytes) { 660 *b = c; 661 } 662 663 u64::from_ne_bytes(bytes) 664 } 665 666 Self(bindings::LibosMemoryRegionInitArgument { 667 id8: id8(name), 668 pa: obj.dma_handle(), 669 size: num::usize_as_u64(obj.size()), 670 kind: num::u32_into_u8::< 671 { bindings::LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS }, 672 >(), 673 loc: num::u32_into_u8::< 674 { bindings::LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM }, 675 >(), 676 ..Default::default() 677 }) 678 } 679 } 680 681 /// TX header for setting up a message queue with the GSP. 682 #[repr(transparent)] 683 pub(crate) struct MsgqTxHeader(bindings::msgqTxHeader); 684 685 impl MsgqTxHeader { 686 /// Create a new TX queue header. 687 /// 688 /// # Arguments 689 /// 690 /// * `msgq_size` - Total size of the message queue structure, in bytes. 691 /// * `rx_hdr_offset` - Offset, in bytes, of the start of the RX header in the message queue 692 /// structure. 693 /// * `msg_count` - Number of messages that can be sent, i.e. the number of memory pages 694 /// allocated for the message queue in the message queue structure. 695 pub(crate) fn new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self { 696 Self(bindings::msgqTxHeader { 697 version: 0, 698 size: msgq_size, 699 msgSize: num::usize_into_u32::<GSP_PAGE_SIZE>(), 700 msgCount: msg_count, 701 writePtr: 0, 702 flags: 1, 703 rxHdrOff: rx_hdr_offset, 704 entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(), 705 }) 706 } 707 } 708 709 // SAFETY: Padding is explicit and does not contain uninitialized data. 710 unsafe impl AsBytes for MsgqTxHeader {} 711 712 /// RX header for setting up a message queue with the GSP. 713 #[repr(transparent)] 714 pub(crate) struct MsgqRxHeader(bindings::msgqRxHeader); 715 716 /// Header for the message RX queue. 717 impl MsgqRxHeader { 718 /// Creates a new RX queue header. 719 pub(crate) fn new() -> Self { 720 Self(Default::default()) 721 } 722 } 723 724 // SAFETY: Padding is explicit and does not contain uninitialized data. 725 unsafe impl AsBytes for MsgqRxHeader {} 726 727 bitfield! { 728 struct MsgHeaderVersion(u32) { 729 31:24 major as u8; 730 23:16 minor as u8; 731 } 732 } 733 734 impl MsgHeaderVersion { 735 const MAJOR_TOT: u8 = 3; 736 const MINOR_TOT: u8 = 0; 737 738 fn new() -> Self { 739 Self::default() 740 .set_major(Self::MAJOR_TOT) 741 .set_minor(Self::MINOR_TOT) 742 } 743 } 744 745 impl bindings::rpc_message_header_v { 746 fn init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error> { 747 type RpcMessageHeader = bindings::rpc_message_header_v; 748 749 try_init!(RpcMessageHeader { 750 header_version: MsgHeaderVersion::new().into(), 751 signature: bindings::NV_VGPU_MSG_SIGNATURE_VALID, 752 function: function.into(), 753 length: size_of::<Self>() 754 .checked_add(cmd_size) 755 .ok_or(EOVERFLOW) 756 .and_then(|v| v.try_into().map_err(|_| EINVAL))?, 757 rpc_result: 0xffffffff, 758 rpc_result_private: 0xffffffff, 759 ..Zeroable::init_zeroed() 760 }) 761 } 762 } 763 764 /// GSP Message Element. 765 /// 766 /// This is essentially a message header expected to be followed by the message data. 767 #[repr(transparent)] 768 pub(crate) struct GspMsgElement { 769 inner: bindings::GSP_MSG_QUEUE_ELEMENT, 770 } 771 772 impl GspMsgElement { 773 /// Creates a new message element. 774 /// 775 /// # Arguments 776 /// 777 /// * `sequence` - Sequence number of the message. 778 /// * `cmd_size` - Size of the command (not including the message element), in bytes. 779 /// * `function` - Function of the message. 780 #[allow(non_snake_case)] 781 pub(crate) fn init( 782 sequence: u32, 783 cmd_size: usize, 784 function: MsgFunction, 785 ) -> impl Init<Self, Error> { 786 type RpcMessageHeader = bindings::rpc_message_header_v; 787 type InnerGspMsgElement = bindings::GSP_MSG_QUEUE_ELEMENT; 788 let init_inner = try_init!(InnerGspMsgElement { 789 seqNum: sequence, 790 elemCount: size_of::<Self>() 791 .checked_add(cmd_size) 792 .ok_or(EOVERFLOW)? 793 .div_ceil(GSP_PAGE_SIZE) 794 .try_into() 795 .map_err(|_| EOVERFLOW)?, 796 rpc <- RpcMessageHeader::init(cmd_size, function), 797 ..Zeroable::init_zeroed() 798 }); 799 800 try_init!(GspMsgElement { 801 inner <- init_inner, 802 }) 803 } 804 805 /// Sets the checksum of this message. 806 /// 807 /// Since the header is also part of the checksum, this is usually called after the whole 808 /// message has been written to the shared memory area. 809 pub(crate) fn set_checksum(&mut self, checksum: u32) { 810 self.inner.checkSum = checksum; 811 } 812 813 /// Returns the length of the message's payload. 814 pub(crate) fn payload_length(&self) -> usize { 815 // `rpc.length` includes the length of the RPC message header. 816 num::u32_as_usize(self.inner.rpc.length) 817 .saturating_sub(size_of::<bindings::rpc_message_header_v>()) 818 } 819 820 /// Returns the total length of the message, message and RPC headers included. 821 pub(crate) fn length(&self) -> usize { 822 size_of::<Self>() + self.payload_length() 823 } 824 825 // Returns the sequence number of the message. 826 pub(crate) fn sequence(&self) -> u32 { 827 self.inner.rpc.sequence 828 } 829 830 // Returns the function of the message, if it is valid, or the invalid function number as an 831 // error. 832 pub(crate) fn function(&self) -> Result<MsgFunction, u32> { 833 self.inner 834 .rpc 835 .function 836 .try_into() 837 .map_err(|_| self.inner.rpc.function) 838 } 839 840 // Returns the number of elements (i.e. memory pages) used by this message. 841 pub(crate) fn element_count(&self) -> u32 { 842 self.inner.elemCount 843 } 844 } 845 846 // SAFETY: Padding is explicit and does not contain uninitialized data. 847 unsafe impl AsBytes for GspMsgElement {} 848 849 // SAFETY: This struct only contains integer types for which all bit patterns 850 // are valid. 851 unsafe impl FromBytes for GspMsgElement {} 852 853 /// Arguments for GSP startup. 854 #[repr(transparent)] 855 pub(crate) struct GspArgumentsCached(bindings::GSP_ARGUMENTS_CACHED); 856 857 impl GspArgumentsCached { 858 /// Creates the arguments for starting the GSP up using `cmdq` as its command queue. 859 pub(crate) fn new(cmdq: &Cmdq) -> Self { 860 Self(bindings::GSP_ARGUMENTS_CACHED { 861 messageQueueInitArguments: MessageQueueInitArguments::new(cmdq).0, 862 bDmemStack: 1, 863 ..Default::default() 864 }) 865 } 866 } 867 868 // SAFETY: Padding is explicit and will not contain uninitialized data. 869 unsafe impl AsBytes for GspArgumentsCached {} 870 871 /// On Turing and GA100, the entries in the `LibosMemoryRegionInitArgument` 872 /// must all be a multiple of GSP_PAGE_SIZE in size, so add padding to force it 873 /// to that size. 874 #[repr(C)] 875 pub(crate) struct GspArgumentsPadded { 876 pub(crate) inner: GspArgumentsCached, 877 _padding: [u8; GSP_PAGE_SIZE - core::mem::size_of::<bindings::GSP_ARGUMENTS_CACHED>()], 878 } 879 880 // SAFETY: Padding is explicit and will not contain uninitialized data. 881 unsafe impl AsBytes for GspArgumentsPadded {} 882 883 // SAFETY: This struct only contains integer types for which all bit patterns 884 // are valid. 885 unsafe impl FromBytes for GspArgumentsPadded {} 886 887 /// Init arguments for the message queue. 888 #[repr(transparent)] 889 struct MessageQueueInitArguments(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS); 890 891 impl MessageQueueInitArguments { 892 /// Creates a new init arguments structure for `cmdq`. 893 fn new(cmdq: &Cmdq) -> Self { 894 Self(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS { 895 sharedMemPhysAddr: cmdq.dma_handle(), 896 pageTableEntryCount: num::usize_into_u32::<{ Cmdq::NUM_PTES }>(), 897 cmdQueueOffset: num::usize_as_u64(Cmdq::CMDQ_OFFSET), 898 statQueueOffset: num::usize_as_u64(Cmdq::STATQ_OFFSET), 899 ..Default::default() 900 }) 901 } 902 } 903