Lines Matching full:sdma
30 /* SDMA CSA reside in the 3rd page of CSA */
34 * GPU SDMA IP block helpers function.
42 for (i = 0; i < adev->sdma.num_instances; i++) in amdgpu_sdma_get_instance_from_ring()
43 if (ring == &adev->sdma.instance[i].ring || in amdgpu_sdma_get_instance_from_ring()
44 ring == &adev->sdma.instance[i].page) in amdgpu_sdma_get_instance_from_ring()
45 return &adev->sdma.instance[i]; in amdgpu_sdma_get_instance_from_ring()
55 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_sdma_get_index_from_ring()
56 if (ring == &adev->sdma.instance[i].ring || in amdgpu_sdma_get_index_from_ring()
57 ring == &adev->sdma.instance[i].page) { in amdgpu_sdma_get_index_from_ring()
74 /* don't enable OS preemption on SDMA under SRIOV */ in amdgpu_sdma_get_csa_mc_addr()
82 sdma[ring->idx].sdma_meta_data); in amdgpu_sdma_get_csa_mc_addr()
108 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_sdma_ras_late_init()
109 r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, in amdgpu_sdma_ras_late_init()
141 struct ras_common_if *ras_if = adev->sdma.ras_if; in amdgpu_sdma_process_ecc_irq()
198 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_sdma_destroy_inst_ctx()
199 amdgpu_ucode_release(&adev->sdma.instance[i].fw); in amdgpu_sdma_destroy_inst_ctx()
204 memset((void *)adev->sdma.instance, 0, in amdgpu_sdma_destroy_inst_ctx()
221 err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, in amdgpu_sdma_init_microcode()
225 err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, in amdgpu_sdma_init_microcode()
232 adev->sdma.instance[instance].fw->data; in amdgpu_sdma_init_microcode()
240 err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]); in amdgpu_sdma_init_microcode()
245 for (i = 1; i < adev->sdma.num_instances; i++) in amdgpu_sdma_init_microcode()
246 memcpy((void *)&adev->sdma.instance[i], in amdgpu_sdma_init_microcode()
247 (void *)&adev->sdma.instance[0], in amdgpu_sdma_init_microcode()
257 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_sdma_init_microcode()
261 /* Use a single copy per SDMA firmware type. PSP uses the same instance for all in amdgpu_sdma_init_microcode()
271 adev->sdma.num_inst_per_aid == i) { in amdgpu_sdma_init_microcode()
276 info->fw = adev->sdma.instance[i].fw; in amdgpu_sdma_init_microcode()
284 adev->sdma.instance[0].fw->data; in amdgpu_sdma_init_microcode()
287 info->fw = adev->sdma.instance[0].fw; in amdgpu_sdma_init_microcode()
292 info->fw = adev->sdma.instance[0].fw; in amdgpu_sdma_init_microcode()
298 adev->sdma.instance[0].fw->data; in amdgpu_sdma_init_microcode()
301 info->fw = adev->sdma.instance[0].fw; in amdgpu_sdma_init_microcode()
321 /* adev->sdma.ras is NULL, which means sdma does not in amdgpu_sdma_ras_sw_init()
324 if (!adev->sdma.ras) in amdgpu_sdma_ras_sw_init()
327 ras = adev->sdma.ras; in amdgpu_sdma_ras_sw_init()
331 dev_err(adev->dev, "Failed to register sdma ras block!\n"); in amdgpu_sdma_ras_sw_init()
335 strcpy(ras->ras_block.ras_comm.name, "sdma"); in amdgpu_sdma_ras_sw_init()
338 adev->sdma.ras_if = &ras->ras_block.ras_comm; in amdgpu_sdma_ras_sw_init()
352 * debugfs for to enable/disable sdma job submission to specific core.
365 mask = BIT_ULL(adev->sdma.num_instances) - 1; in amdgpu_debugfs_sdma_sched_mask_set()
369 for (i = 0; i < adev->sdma.num_instances; ++i) { in amdgpu_debugfs_sdma_sched_mask_set()
370 ring = &adev->sdma.instance[i].ring; in amdgpu_debugfs_sdma_sched_mask_set()
390 for (i = 0; i < adev->sdma.num_instances; ++i) { in amdgpu_debugfs_sdma_sched_mask_get()
391 ring = &adev->sdma.instance[i].ring; in amdgpu_debugfs_sdma_sched_mask_get()
413 if (!(adev->sdma.num_instances > 1)) in amdgpu_debugfs_sdma_sched_mask_init()
431 return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset); in amdgpu_get_sdma_reset_mask()
444 if (adev->sdma.num_instances) { in amdgpu_sdma_sysfs_reset_mask_init()
459 if (adev->sdma.num_instances) in amdgpu_sdma_sysfs_reset_mask_fini()