1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/debugfs.h> 28 #include "amdgpu.h" 29 #include "soc15_common.h" 30 #include "soc21.h" 31 #include "vcn/vcn_4_0_0_offset.h" 32 #include "vcn/vcn_4_0_0_sh_mask.h" 33 34 #include "amdgpu_umsch_mm.h" 35 #include "umsch_mm_4_0_api_def.h" 36 #include "umsch_mm_v4_0.h" 37 38 #define regUVD_IPX_DLDO_CONFIG 0x0064 39 #define regUVD_IPX_DLDO_CONFIG_BASE_IDX 1 40 #define regUVD_IPX_DLDO_STATUS 0x0065 41 #define regUVD_IPX_DLDO_STATUS_BASE_IDX 1 42 43 #define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT 0x00000002 44 #define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG_MASK 0x0000000cUL 45 #define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT 0x00000001 46 #define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK 0x00000002UL 47 48 static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch) 49 { 50 struct amdgpu_device *adev = umsch->ring.adev; 51 uint64_t data; 52 int r; 53 54 r = amdgpu_umsch_mm_allocate_ucode_buffer(umsch); 55 if (r) 56 return r; 57 58 r = amdgpu_umsch_mm_allocate_ucode_data_buffer(umsch); 59 if (r) 60 goto err_free_ucode_bo; 61 62 umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr; 63 64 if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) { 65 WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG, 66 1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT); 67 SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS, 68 0 << UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT, 69 UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK); 70 } 71 72 data = RREG32_SOC15(VCN, 0, regUMSCH_MES_RESET_CTRL); 73 data = REG_SET_FIELD(data, UMSCH_MES_RESET_CTRL, MES_CORE_SOFT_RESET, 0); 74 WREG32_SOC15_UMSCH(regUMSCH_MES_RESET_CTRL, data); 75 76 data = RREG32_SOC15(VCN, 0, regVCN_MES_CNTL); 77 data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_INVALIDATE_ICACHE, 1); 78 data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_RESET, 1); 79 data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_ACTIVE, 0); 80 data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_HALT, 1); 81 WREG32_SOC15_UMSCH(regVCN_MES_CNTL, data); 82 83 data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_BASE_CNTL); 84 data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, VMID, 0); 85 data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, EXE_DISABLE, 0); 86 data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, CACHE_POLICY, 0); 87 WREG32_SOC15_UMSCH(regVCN_MES_IC_BASE_CNTL, data); 88 89 WREG32_SOC15_UMSCH(regVCN_MES_INTR_ROUTINE_START, 90 lower_32_bits(adev->umsch_mm.irq_start_addr >> 2)); 91 WREG32_SOC15_UMSCH(regVCN_MES_INTR_ROUTINE_START_HI, 92 upper_32_bits(adev->umsch_mm.irq_start_addr >> 2)); 93 94 WREG32_SOC15_UMSCH(regVCN_MES_PRGRM_CNTR_START, 95 lower_32_bits(adev->umsch_mm.uc_start_addr >> 2)); 96 WREG32_SOC15_UMSCH(regVCN_MES_PRGRM_CNTR_START_HI, 97 upper_32_bits(adev->umsch_mm.uc_start_addr >> 2)); 98 99 WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_BASE_LO, 0); 100 WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_BASE_HI, 0); 101 102 data = adev->umsch_mm.uc_start_addr + adev->umsch_mm.ucode_size - 1; 103 WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_MASK_LO, lower_32_bits(data)); 104 WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_INSTR_MASK_HI, upper_32_bits(data)); 105 106 data = adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? 107 0 : adev->umsch_mm.ucode_fw_gpu_addr; 108 WREG32_SOC15_UMSCH(regVCN_MES_IC_BASE_LO, lower_32_bits(data)); 109 WREG32_SOC15_UMSCH(regVCN_MES_IC_BASE_HI, upper_32_bits(data)); 110 111 WREG32_SOC15_UMSCH(regVCN_MES_MIBOUND_LO, 0x1FFFFF); 112 113 WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_BASE0_LO, 114 lower_32_bits(adev->umsch_mm.data_start_addr)); 115 WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_BASE0_HI, 116 upper_32_bits(adev->umsch_mm.data_start_addr)); 117 118 WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_MASK0_LO, 119 lower_32_bits(adev->umsch_mm.data_size - 1)); 120 WREG32_SOC15_UMSCH(regVCN_MES_LOCAL_MASK0_HI, 121 upper_32_bits(adev->umsch_mm.data_size - 1)); 122 123 data = adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? 124 0 : adev->umsch_mm.data_fw_gpu_addr; 125 WREG32_SOC15_UMSCH(regVCN_MES_DC_BASE_LO, lower_32_bits(data)); 126 WREG32_SOC15_UMSCH(regVCN_MES_DC_BASE_HI, upper_32_bits(data)); 127 128 WREG32_SOC15_UMSCH(regVCN_MES_MDBOUND_LO, 0x3FFFF); 129 130 data = RREG32_SOC15(VCN, 0, regUVD_UMSCH_FORCE); 131 data = REG_SET_FIELD(data, UVD_UMSCH_FORCE, IC_FORCE_GPUVM, 1); 132 data = REG_SET_FIELD(data, UVD_UMSCH_FORCE, DC_FORCE_GPUVM, 1); 133 WREG32_SOC15_UMSCH(regUVD_UMSCH_FORCE, data); 134 135 data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL); 136 data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, PRIME_ICACHE, 0); 137 data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); 138 WREG32_SOC15_UMSCH(regVCN_MES_IC_OP_CNTL, data); 139 140 data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL); 141 data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, PRIME_ICACHE, 1); 142 WREG32_SOC15_UMSCH(regVCN_MES_IC_OP_CNTL, data); 143 144 WREG32_SOC15_UMSCH(regVCN_MES_GP0_LO, 0); 145 WREG32_SOC15_UMSCH(regVCN_MES_GP0_HI, 0); 146 147 #if defined(CONFIG_DEBUG_FS) 148 WREG32_SOC15_UMSCH(regVCN_MES_GP0_LO, lower_32_bits(umsch->log_gpu_addr)); 149 WREG32_SOC15_UMSCH(regVCN_MES_GP0_HI, upper_32_bits(umsch->log_gpu_addr)); 150 #endif 151 152 WREG32_SOC15_UMSCH(regVCN_MES_GP1_LO, 0); 153 WREG32_SOC15_UMSCH(regVCN_MES_GP1_HI, 0); 154 155 data = RREG32_SOC15(VCN, 0, regVCN_MES_CNTL); 156 data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_INVALIDATE_ICACHE, 0); 157 data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_RESET, 0); 158 data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_HALT, 0); 159 data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_ACTIVE, 1); 160 WREG32_SOC15_UMSCH(regVCN_MES_CNTL, data); 161 162 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 163 amdgpu_umsch_mm_psp_execute_cmd_buf(umsch); 164 165 r = SOC15_WAIT_ON_RREG(VCN, 0, regVCN_MES_MSTATUS_LO, 0xAAAAAAAA, 0xFFFFFFFF); 166 if (r) { 167 dev_err(adev->dev, "UMSCH FW Load: Failed, regVCN_MES_MSTATUS_LO: 0x%08x\n", 168 RREG32_SOC15(VCN, 0, regVCN_MES_MSTATUS_LO)); 169 goto err_free_data_bo; 170 } 171 172 return 0; 173 174 err_free_data_bo: 175 amdgpu_bo_free_kernel(&adev->umsch_mm.data_fw_obj, 176 &adev->umsch_mm.data_fw_gpu_addr, 177 (void **)&adev->umsch_mm.data_fw_ptr); 178 err_free_ucode_bo: 179 amdgpu_bo_free_kernel(&adev->umsch_mm.ucode_fw_obj, 180 &adev->umsch_mm.ucode_fw_gpu_addr, 181 (void **)&adev->umsch_mm.ucode_fw_ptr); 182 return r; 183 } 184 185 static void umsch_mm_v4_0_aggregated_doorbell_init(struct amdgpu_umsch_mm *umsch) 186 { 187 struct amdgpu_device *adev = umsch->ring.adev; 188 uint32_t data; 189 190 data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL0); 191 data = REG_SET_FIELD(data, VCN_AGDB_CTRL0, OFFSET, 192 umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_REALTIME]); 193 data = REG_SET_FIELD(data, VCN_AGDB_CTRL0, EN, 1); 194 WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL0, data); 195 196 data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL1); 197 data = REG_SET_FIELD(data, VCN_AGDB_CTRL1, OFFSET, 198 umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_FOCUS]); 199 data = REG_SET_FIELD(data, VCN_AGDB_CTRL1, EN, 1); 200 WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL1, data); 201 202 data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL2); 203 data = REG_SET_FIELD(data, VCN_AGDB_CTRL2, OFFSET, 204 umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL]); 205 data = REG_SET_FIELD(data, VCN_AGDB_CTRL2, EN, 1); 206 WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL2, data); 207 208 data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL3); 209 data = REG_SET_FIELD(data, VCN_AGDB_CTRL3, OFFSET, 210 umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_IDLE]); 211 data = REG_SET_FIELD(data, VCN_AGDB_CTRL3, EN, 1); 212 WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL3, data); 213 } 214 215 static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch) 216 { 217 struct amdgpu_ring *ring = &umsch->ring; 218 struct amdgpu_device *adev = ring->adev; 219 uint32_t data; 220 221 data = RREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL); 222 data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, OFFSET, ring->doorbell_index); 223 data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 1); 224 WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data); 225 226 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 227 (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); 228 229 WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); 230 WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 231 232 WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size); 233 234 ring->wptr = 0; 235 236 data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); 237 data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK); 238 WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); 239 240 umsch_mm_v4_0_aggregated_doorbell_init(umsch); 241 242 return 0; 243 } 244 245 static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch) 246 { 247 struct amdgpu_ring *ring = &umsch->ring; 248 struct amdgpu_device *adev = ring->adev; 249 uint32_t data; 250 251 data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); 252 data = REG_SET_FIELD(data, VCN_RB_ENABLE, UMSCH_RB_EN, 0); 253 WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); 254 255 data = RREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL); 256 data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0); 257 WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data); 258 259 if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) { 260 WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG, 261 2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT); 262 SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS, 263 1 << UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT, 264 UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK); 265 } 266 267 return 0; 268 } 269 270 static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch) 271 { 272 union UMSCHAPI__SET_HW_RESOURCES set_hw_resources = {}; 273 struct amdgpu_device *adev = umsch->ring.adev; 274 int r; 275 276 set_hw_resources.header.type = UMSCH_API_TYPE_SCHEDULER; 277 set_hw_resources.header.opcode = UMSCH_API_SET_HW_RSRC; 278 set_hw_resources.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 279 280 set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn; 281 set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe; 282 set_hw_resources.collaboration_mask_vpe = 283 adev->vpe.collaborate_mode ? 0x3 : 0x0; 284 set_hw_resources.engine_mask = umsch->engine_mask; 285 286 set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask; 287 set_hw_resources.vcn1_hqd_mask[0] = umsch->vcn1_hqd_mask; 288 set_hw_resources.vcn_hqd_mask[0] = umsch->vcn_hqd_mask[0]; 289 set_hw_resources.vcn_hqd_mask[1] = umsch->vcn_hqd_mask[1]; 290 set_hw_resources.vpe_hqd_mask[0] = umsch->vpe_hqd_mask; 291 292 set_hw_resources.g_sch_ctx_gpu_mc_ptr = umsch->sch_ctx_gpu_addr; 293 294 set_hw_resources.enable_level_process_quantum_check = 1; 295 296 memcpy(set_hw_resources.mmhub_base, adev->reg_offset[MMHUB_HWIP][0], 297 sizeof(uint32_t) * 5); 298 set_hw_resources.mmhub_version = 299 IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, MMHUB_HWIP, 0)); 300 301 memcpy(set_hw_resources.osssys_base, adev->reg_offset[OSSSYS_HWIP][0], 302 sizeof(uint32_t) * 5); 303 set_hw_resources.osssys_version = 304 IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); 305 306 set_hw_resources.vcn_version = 307 IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VCN_HWIP, 0)); 308 set_hw_resources.vpe_version = 309 IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VPE_HWIP, 0)); 310 311 set_hw_resources.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; 312 set_hw_resources.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; 313 314 r = amdgpu_umsch_mm_submit_pkt(umsch, &set_hw_resources.max_dwords_in_api, 315 API_FRAME_SIZE_IN_DWORDS); 316 if (r) 317 return r; 318 319 r = amdgpu_umsch_mm_query_fence(umsch); 320 if (r) { 321 dev_err(adev->dev, "UMSCH SET_HW_RESOURCES: Failed\n"); 322 return r; 323 } 324 325 return 0; 326 } 327 328 static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch, 329 struct umsch_mm_add_queue_input *input_ptr) 330 { 331 struct amdgpu_device *adev = umsch->ring.adev; 332 union UMSCHAPI__ADD_QUEUE add_queue = {}; 333 int r; 334 335 add_queue.header.type = UMSCH_API_TYPE_SCHEDULER; 336 add_queue.header.opcode = UMSCH_API_ADD_QUEUE; 337 add_queue.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 338 339 add_queue.process_id = input_ptr->process_id; 340 add_queue.page_table_base_addr = input_ptr->page_table_base_addr; 341 add_queue.process_va_start = input_ptr->process_va_start; 342 add_queue.process_va_end = input_ptr->process_va_end; 343 add_queue.process_quantum = input_ptr->process_quantum; 344 add_queue.process_csa_addr = input_ptr->process_csa_addr; 345 add_queue.context_quantum = input_ptr->context_quantum; 346 add_queue.context_csa_addr = input_ptr->context_csa_addr; 347 add_queue.inprocess_context_priority = input_ptr->inprocess_context_priority; 348 add_queue.context_global_priority_level = 349 (enum UMSCH_AMD_PRIORITY_LEVEL)input_ptr->context_global_priority_level; 350 add_queue.doorbell_offset_0 = input_ptr->doorbell_offset_0; 351 add_queue.doorbell_offset_1 = input_ptr->doorbell_offset_1; 352 add_queue.affinity.u32All = input_ptr->affinity; 353 add_queue.mqd_addr = input_ptr->mqd_addr; 354 add_queue.engine_type = (enum UMSCH_ENGINE_TYPE)input_ptr->engine_type; 355 add_queue.h_context = input_ptr->h_context; 356 add_queue.h_queue = input_ptr->h_queue; 357 add_queue.vm_context_cntl = input_ptr->vm_context_cntl; 358 add_queue.is_context_suspended = input_ptr->is_context_suspended; 359 add_queue.collaboration_mode = adev->vpe.collaborate_mode ? 1 : 0; 360 361 add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; 362 add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; 363 364 r = amdgpu_umsch_mm_submit_pkt(umsch, &add_queue.max_dwords_in_api, 365 API_FRAME_SIZE_IN_DWORDS); 366 if (r) 367 return r; 368 369 r = amdgpu_umsch_mm_query_fence(umsch); 370 if (r) { 371 dev_err(adev->dev, "UMSCH ADD_QUEUE: Failed\n"); 372 return r; 373 } 374 375 return 0; 376 } 377 378 static int umsch_mm_v4_0_remove_queue(struct amdgpu_umsch_mm *umsch, 379 struct umsch_mm_remove_queue_input *input_ptr) 380 { 381 union UMSCHAPI__REMOVE_QUEUE remove_queue = {}; 382 struct amdgpu_device *adev = umsch->ring.adev; 383 int r; 384 385 remove_queue.header.type = UMSCH_API_TYPE_SCHEDULER; 386 remove_queue.header.opcode = UMSCH_API_REMOVE_QUEUE; 387 remove_queue.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 388 389 remove_queue.doorbell_offset_0 = input_ptr->doorbell_offset_0; 390 remove_queue.doorbell_offset_1 = input_ptr->doorbell_offset_1; 391 remove_queue.context_csa_addr = input_ptr->context_csa_addr; 392 393 remove_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; 394 remove_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; 395 396 r = amdgpu_umsch_mm_submit_pkt(umsch, &remove_queue.max_dwords_in_api, 397 API_FRAME_SIZE_IN_DWORDS); 398 if (r) 399 return r; 400 401 r = amdgpu_umsch_mm_query_fence(umsch); 402 if (r) { 403 dev_err(adev->dev, "UMSCH REMOVE_QUEUE: Failed\n"); 404 return r; 405 } 406 407 return 0; 408 } 409 410 static int umsch_mm_v4_0_set_regs(struct amdgpu_umsch_mm *umsch) 411 { 412 struct amdgpu_device *adev = container_of(umsch, struct amdgpu_device, umsch_mm); 413 414 umsch->rb_wptr = SOC15_REG_OFFSET(VCN, 0, regVCN_UMSCH_RB_WPTR); 415 umsch->rb_rptr = SOC15_REG_OFFSET(VCN, 0, regVCN_UMSCH_RB_RPTR); 416 417 return 0; 418 } 419 420 static const struct umsch_mm_funcs umsch_mm_v4_0_funcs = { 421 .set_hw_resources = umsch_mm_v4_0_set_hw_resources, 422 .add_queue = umsch_mm_v4_0_add_queue, 423 .remove_queue = umsch_mm_v4_0_remove_queue, 424 .set_regs = umsch_mm_v4_0_set_regs, 425 .init_microcode = amdgpu_umsch_mm_init_microcode, 426 .load_microcode = umsch_mm_v4_0_load_microcode, 427 .ring_init = amdgpu_umsch_mm_ring_init, 428 .ring_start = umsch_mm_v4_0_ring_start, 429 .ring_stop = umsch_mm_v4_0_ring_stop, 430 }; 431 432 void umsch_mm_v4_0_set_funcs(struct amdgpu_umsch_mm *umsch) 433 { 434 umsch->funcs = &umsch_mm_v4_0_funcs; 435 } 436