1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/firmware.h> 24 #include <drm/drm_drv.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_ucode.h" 28 #include "amdgpu_vpe.h" 29 #include "amdgpu_smu.h" 30 #include "soc15_common.h" 31 #include "vpe_v6_1.h" 32 33 #define AMDGPU_CSA_VPE_SIZE 64 34 /* VPE CSA resides in the 4th page of CSA */ 35 #define AMDGPU_CSA_VPE_OFFSET (4096 * 3) 36 37 /* 1 second timeout */ 38 #define VPE_IDLE_TIMEOUT msecs_to_jiffies(1000) 39 40 #define VPE_MAX_DPM_LEVEL 4 41 #define FIXED1_8_BITS_PER_FRACTIONAL_PART 8 42 #define GET_PRATIO_INTEGER_PART(x) ((x) >> FIXED1_8_BITS_PER_FRACTIONAL_PART) 43 44 static void vpe_set_ring_funcs(struct amdgpu_device *adev); 45 46 static inline uint16_t div16_u16_rem(uint16_t dividend, uint16_t divisor, uint16_t *remainder) 47 { 48 *remainder = dividend % divisor; 49 return dividend / divisor; 50 } 51 52 static inline uint16_t complete_integer_division_u16( 53 uint16_t dividend, 54 uint16_t divisor, 55 uint16_t *remainder) 56 { 57 return div16_u16_rem(dividend, divisor, (uint16_t *)remainder); 58 } 59 60 static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator) 61 { 62 u16 arg1_value = numerator; 63 u16 arg2_value = denominator; 64 65 uint16_t remainder; 66 67 /* determine integer part */ 68 uint16_t res_value = complete_integer_division_u16( 69 arg1_value, arg2_value, &remainder); 70 71 if (res_value > 127 /* CHAR_MAX */) 72 return 0; 73 74 /* determine fractional part */ 75 { 76 unsigned int i = FIXED1_8_BITS_PER_FRACTIONAL_PART; 77 78 do { 79 remainder <<= 1; 80 81 res_value <<= 1; 82 83 if (remainder >= arg2_value) { 84 res_value |= 1; 85 remainder -= arg2_value; 86 } 87 } while (--i != 0); 88 } 89 90 /* round up LSB */ 91 { 92 uint16_t summand = (remainder << 1) >= arg2_value; 93 94 if ((res_value + summand) > 32767 /* SHRT_MAX */) 95 return 0; 96 97 res_value += summand; 98 } 99 100 return res_value; 101 } 102 103 static uint16_t vpe_internal_get_pratio(uint16_t from_frequency, uint16_t to_frequency) 104 { 105 uint16_t pratio = vpe_u1_8_from_fraction(from_frequency, to_frequency); 106 107 if (GET_PRATIO_INTEGER_PART(pratio) > 1) 108 pratio = 0; 109 110 return pratio; 111 } 112 113 /* 114 * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest), 115 * VPE FW will dynamically decide which level should be used according to current loading. 116 * 117 * Get VPE and SOC clocks from PM, and select the appropriate four clock values, 118 * calculate the ratios of adjusting from one clock to another. 119 * The VPE FW can then request the appropriate frequency from the PMFW. 120 */ 121 int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe) 122 { 123 struct amdgpu_device *adev = vpe->ring.adev; 124 uint32_t dpm_ctl; 125 126 if (adev->pm.dpm_enabled) { 127 struct dpm_clocks clock_table = { 0 }; 128 struct dpm_clock *VPEClks; 129 struct dpm_clock *SOCClks; 130 uint32_t idx; 131 uint32_t pratio_vmax_vnorm = 0, pratio_vnorm_vmid = 0, pratio_vmid_vmin = 0; 132 uint16_t pratio_vmin_freq = 0, pratio_vmid_freq = 0, pratio_vnorm_freq = 0, pratio_vmax_freq = 0; 133 134 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); 135 dpm_ctl |= 1; /* DPM enablement */ 136 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); 137 138 /* Get VPECLK and SOCCLK */ 139 if (amdgpu_dpm_get_dpm_clock_table(adev, &clock_table)) { 140 dev_dbg(adev->dev, "%s: get clock failed!\n", __func__); 141 goto disable_dpm; 142 } 143 144 SOCClks = clock_table.SocClocks; 145 VPEClks = clock_table.VPEClocks; 146 147 /* vpe dpm only cares 4 levels. */ 148 for (idx = 0; idx < VPE_MAX_DPM_LEVEL; idx++) { 149 uint32_t soc_dpm_level; 150 uint32_t min_freq; 151 152 if (idx == 0) 153 soc_dpm_level = 0; 154 else 155 soc_dpm_level = (idx * 2) + 1; 156 157 /* clamp the max level */ 158 if (soc_dpm_level > PP_SMU_NUM_VPECLK_DPM_LEVELS - 1) 159 soc_dpm_level = PP_SMU_NUM_VPECLK_DPM_LEVELS - 1; 160 161 min_freq = (SOCClks[soc_dpm_level].Freq < VPEClks[soc_dpm_level].Freq) ? 162 SOCClks[soc_dpm_level].Freq : VPEClks[soc_dpm_level].Freq; 163 164 switch (idx) { 165 case 0: 166 pratio_vmin_freq = min_freq; 167 break; 168 case 1: 169 pratio_vmid_freq = min_freq; 170 break; 171 case 2: 172 pratio_vnorm_freq = min_freq; 173 break; 174 case 3: 175 pratio_vmax_freq = min_freq; 176 break; 177 default: 178 break; 179 } 180 } 181 182 if (pratio_vmin_freq && pratio_vmid_freq && pratio_vnorm_freq && pratio_vmax_freq) { 183 uint32_t pratio_ctl; 184 185 pratio_vmax_vnorm = (uint32_t)vpe_internal_get_pratio(pratio_vmax_freq, pratio_vnorm_freq); 186 pratio_vnorm_vmid = (uint32_t)vpe_internal_get_pratio(pratio_vnorm_freq, pratio_vmid_freq); 187 pratio_vmid_vmin = (uint32_t)vpe_internal_get_pratio(pratio_vmid_freq, pratio_vmin_freq); 188 189 pratio_ctl = pratio_vmax_vnorm | (pratio_vnorm_vmid << 9) | (pratio_vmid_vmin << 18); 190 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */ 191 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */ 192 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */ 193 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */ 194 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */ 195 dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__); 196 } else { 197 dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__); 198 goto disable_dpm; 199 } 200 } 201 return 0; 202 203 disable_dpm: 204 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); 205 dpm_ctl &= 0xfffffffe; /* Disable DPM */ 206 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); 207 dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__); 208 return 0; 209 } 210 211 int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev) 212 { 213 struct amdgpu_firmware_info ucode = { 214 .ucode_id = AMDGPU_UCODE_ID_VPE, 215 .mc_addr = adev->vpe.cmdbuf_gpu_addr, 216 .ucode_size = 8, 217 }; 218 219 return psp_execute_ip_fw_load(&adev->psp, &ucode); 220 } 221 222 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe) 223 { 224 struct amdgpu_device *adev = vpe->ring.adev; 225 const struct vpe_firmware_header_v1_0 *vpe_hdr; 226 char fw_prefix[32], fw_name[64]; 227 int ret; 228 229 amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix)); 230 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", fw_prefix); 231 232 ret = amdgpu_ucode_request(adev, &adev->vpe.fw, fw_name); 233 if (ret) 234 goto out; 235 236 vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data; 237 adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version); 238 adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version); 239 240 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 241 struct amdgpu_firmware_info *info; 242 243 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX]; 244 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX; 245 info->fw = adev->vpe.fw; 246 adev->firmware.fw_size += 247 ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE); 248 249 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL]; 250 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL; 251 info->fw = adev->vpe.fw; 252 adev->firmware.fw_size += 253 ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE); 254 } 255 256 return 0; 257 out: 258 dev_err(adev->dev, "fail to initialize vpe microcode\n"); 259 release_firmware(adev->vpe.fw); 260 adev->vpe.fw = NULL; 261 return ret; 262 } 263 264 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe) 265 { 266 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); 267 struct amdgpu_ring *ring = &vpe->ring; 268 int ret; 269 270 ring->ring_obj = NULL; 271 ring->use_doorbell = true; 272 ring->vm_hub = AMDGPU_MMHUB0(0); 273 ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1); 274 snprintf(ring->name, 4, "vpe"); 275 276 ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0, 277 AMDGPU_RING_PRIO_DEFAULT, NULL); 278 if (ret) 279 return ret; 280 281 return 0; 282 } 283 284 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe) 285 { 286 amdgpu_ring_fini(&vpe->ring); 287 288 return 0; 289 } 290 291 static int vpe_early_init(void *handle) 292 { 293 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 294 struct amdgpu_vpe *vpe = &adev->vpe; 295 296 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 297 case IP_VERSION(6, 1, 0): 298 vpe_v6_1_set_funcs(vpe); 299 break; 300 case IP_VERSION(6, 1, 1): 301 vpe_v6_1_set_funcs(vpe); 302 vpe->collaborate_mode = true; 303 break; 304 default: 305 return -EINVAL; 306 } 307 308 vpe_set_ring_funcs(adev); 309 vpe_set_regs(vpe); 310 311 dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false"); 312 313 return 0; 314 } 315 316 static void vpe_idle_work_handler(struct work_struct *work) 317 { 318 struct amdgpu_device *adev = 319 container_of(work, struct amdgpu_device, vpe.idle_work.work); 320 unsigned int fences = 0; 321 322 fences += amdgpu_fence_count_emitted(&adev->vpe.ring); 323 324 if (fences == 0) 325 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); 326 else 327 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); 328 } 329 330 static int vpe_common_init(struct amdgpu_vpe *vpe) 331 { 332 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); 333 int r; 334 335 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 336 AMDGPU_GEM_DOMAIN_GTT, 337 &adev->vpe.cmdbuf_obj, 338 &adev->vpe.cmdbuf_gpu_addr, 339 (void **)&adev->vpe.cmdbuf_cpu_addr); 340 if (r) { 341 dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r); 342 return r; 343 } 344 345 vpe->context_started = false; 346 INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler); 347 348 return 0; 349 } 350 351 static int vpe_sw_init(void *handle) 352 { 353 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 354 struct amdgpu_vpe *vpe = &adev->vpe; 355 int ret; 356 357 ret = vpe_common_init(vpe); 358 if (ret) 359 goto out; 360 361 ret = vpe_irq_init(vpe); 362 if (ret) 363 goto out; 364 365 ret = vpe_ring_init(vpe); 366 if (ret) 367 goto out; 368 369 ret = vpe_init_microcode(vpe); 370 if (ret) 371 goto out; 372 out: 373 return ret; 374 } 375 376 static int vpe_sw_fini(void *handle) 377 { 378 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 379 struct amdgpu_vpe *vpe = &adev->vpe; 380 381 release_firmware(vpe->fw); 382 vpe->fw = NULL; 383 384 vpe_ring_fini(vpe); 385 386 amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj, 387 &adev->vpe.cmdbuf_gpu_addr, 388 (void **)&adev->vpe.cmdbuf_cpu_addr); 389 390 return 0; 391 } 392 393 static int vpe_hw_init(void *handle) 394 { 395 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 396 struct amdgpu_vpe *vpe = &adev->vpe; 397 int ret; 398 399 ret = vpe_load_microcode(vpe); 400 if (ret) 401 return ret; 402 403 ret = vpe_ring_start(vpe); 404 if (ret) 405 return ret; 406 407 return 0; 408 } 409 410 static int vpe_hw_fini(void *handle) 411 { 412 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 413 struct amdgpu_vpe *vpe = &adev->vpe; 414 415 vpe_ring_stop(vpe); 416 417 /* Power off VPE */ 418 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); 419 420 return 0; 421 } 422 423 static int vpe_suspend(void *handle) 424 { 425 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 426 427 cancel_delayed_work_sync(&adev->vpe.idle_work); 428 429 return vpe_hw_fini(adev); 430 } 431 432 static int vpe_resume(void *handle) 433 { 434 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 435 436 return vpe_hw_init(adev); 437 } 438 439 static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 440 { 441 int i; 442 443 for (i = 0; i < count; i++) 444 if (i == 0) 445 amdgpu_ring_write(ring, ring->funcs->nop | 446 VPE_CMD_NOP_HEADER_COUNT(count - 1)); 447 else 448 amdgpu_ring_write(ring, ring->funcs->nop); 449 } 450 451 static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid) 452 { 453 struct amdgpu_device *adev = ring->adev; 454 uint32_t index = 0; 455 uint64_t csa_mc_addr; 456 457 if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp) 458 return 0; 459 460 csa_mc_addr = amdgpu_csa_vaddr(adev) + AMDGPU_CSA_VPE_OFFSET + 461 index * AMDGPU_CSA_VPE_SIZE; 462 463 return csa_mc_addr; 464 } 465 466 static void vpe_ring_emit_pred_exec(struct amdgpu_ring *ring, 467 uint32_t device_select, 468 uint32_t exec_count) 469 { 470 if (!ring->adev->vpe.collaborate_mode) 471 return; 472 473 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_PRED_EXE, 0) | 474 (device_select << 16)); 475 amdgpu_ring_write(ring, exec_count & 0x1fff); 476 } 477 478 static void vpe_ring_emit_ib(struct amdgpu_ring *ring, 479 struct amdgpu_job *job, 480 struct amdgpu_ib *ib, 481 uint32_t flags) 482 { 483 uint32_t vmid = AMDGPU_JOB_GET_VMID(job); 484 uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid); 485 486 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) | 487 VPE_CMD_INDIRECT_HEADER_VMID(vmid & 0xf)); 488 489 /* base must be 32 byte aligned */ 490 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); 491 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 492 amdgpu_ring_write(ring, ib->length_dw); 493 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)); 494 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)); 495 } 496 497 static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, 498 uint64_t seq, unsigned int flags) 499 { 500 int i = 0; 501 502 do { 503 /* write the fence */ 504 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0)); 505 /* zero in first two bits */ 506 WARN_ON_ONCE(addr & 0x3); 507 amdgpu_ring_write(ring, lower_32_bits(addr)); 508 amdgpu_ring_write(ring, upper_32_bits(addr)); 509 amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq)); 510 addr += 4; 511 } while ((flags & AMDGPU_FENCE_FLAG_64BIT) && (i++ < 1)); 512 513 if (flags & AMDGPU_FENCE_FLAG_INT) { 514 /* generate an interrupt */ 515 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0)); 516 amdgpu_ring_write(ring, 0); 517 } 518 519 } 520 521 static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 522 { 523 uint32_t seq = ring->fence_drv.sync_seq; 524 uint64_t addr = ring->fence_drv.gpu_addr; 525 526 vpe_ring_emit_pred_exec(ring, 0, 6); 527 528 /* wait for idle */ 529 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM, 530 VPE_POLL_REGMEM_SUBOP_REGMEM) | 531 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ 532 VPE_CMD_POLL_REGMEM_HEADER_MEM(1)); 533 amdgpu_ring_write(ring, addr & 0xfffffffc); 534 amdgpu_ring_write(ring, upper_32_bits(addr)); 535 amdgpu_ring_write(ring, seq); /* reference */ 536 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 537 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 538 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4)); 539 } 540 541 static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) 542 { 543 vpe_ring_emit_pred_exec(ring, 0, 3); 544 545 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0)); 546 amdgpu_ring_write(ring, reg << 2); 547 amdgpu_ring_write(ring, val); 548 } 549 550 static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 551 uint32_t val, uint32_t mask) 552 { 553 vpe_ring_emit_pred_exec(ring, 0, 6); 554 555 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM, 556 VPE_POLL_REGMEM_SUBOP_REGMEM) | 557 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ 558 VPE_CMD_POLL_REGMEM_HEADER_MEM(0)); 559 amdgpu_ring_write(ring, reg << 2); 560 amdgpu_ring_write(ring, 0); 561 amdgpu_ring_write(ring, val); /* reference */ 562 amdgpu_ring_write(ring, mask); /* mask */ 563 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 564 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(10)); 565 } 566 567 static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, 568 uint64_t pd_addr) 569 { 570 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 571 } 572 573 static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring, 574 uint64_t addr) 575 { 576 unsigned int ret; 577 578 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0)); 579 amdgpu_ring_write(ring, lower_32_bits(addr)); 580 amdgpu_ring_write(ring, upper_32_bits(addr)); 581 amdgpu_ring_write(ring, 1); 582 ret = ring->wptr & ring->buf_mask; 583 amdgpu_ring_write(ring, 0); 584 585 return ret; 586 } 587 588 static int vpe_ring_preempt_ib(struct amdgpu_ring *ring) 589 { 590 struct amdgpu_device *adev = ring->adev; 591 struct amdgpu_vpe *vpe = &adev->vpe; 592 uint32_t preempt_reg = vpe->regs.queue0_preempt; 593 int i, r = 0; 594 595 /* assert preemption condition */ 596 amdgpu_ring_set_preempt_cond_exec(ring, false); 597 598 /* emit the trailing fence */ 599 ring->trail_seq += 1; 600 amdgpu_ring_alloc(ring, 10); 601 vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0); 602 amdgpu_ring_commit(ring); 603 604 /* assert IB preemption */ 605 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1); 606 607 /* poll the trailing fence */ 608 for (i = 0; i < adev->usec_timeout; i++) { 609 if (ring->trail_seq == 610 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 611 break; 612 udelay(1); 613 } 614 615 if (i >= adev->usec_timeout) { 616 r = -EINVAL; 617 dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx); 618 } 619 620 /* deassert IB preemption */ 621 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0); 622 623 /* deassert the preemption condition */ 624 amdgpu_ring_set_preempt_cond_exec(ring, true); 625 626 return r; 627 } 628 629 static int vpe_set_clockgating_state(void *handle, 630 enum amd_clockgating_state state) 631 { 632 return 0; 633 } 634 635 static int vpe_set_powergating_state(void *handle, 636 enum amd_powergating_state state) 637 { 638 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 639 struct amdgpu_vpe *vpe = &adev->vpe; 640 641 if (!adev->pm.dpm_enabled) 642 dev_err(adev->dev, "Without PM, cannot support powergating\n"); 643 644 dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE"); 645 646 if (state == AMD_PG_STATE_GATE) { 647 amdgpu_dpm_enable_vpe(adev, false); 648 vpe->context_started = false; 649 } else { 650 amdgpu_dpm_enable_vpe(adev, true); 651 } 652 653 return 0; 654 } 655 656 static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring) 657 { 658 struct amdgpu_device *adev = ring->adev; 659 struct amdgpu_vpe *vpe = &adev->vpe; 660 uint64_t rptr; 661 662 if (ring->use_doorbell) { 663 rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr); 664 dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr); 665 } else { 666 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi)); 667 rptr = rptr << 32; 668 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo)); 669 dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr); 670 } 671 672 return (rptr >> 2); 673 } 674 675 static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring) 676 { 677 struct amdgpu_device *adev = ring->adev; 678 struct amdgpu_vpe *vpe = &adev->vpe; 679 uint64_t wptr; 680 681 if (ring->use_doorbell) { 682 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 683 dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr); 684 } else { 685 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi)); 686 wptr = wptr << 32; 687 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo)); 688 dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr); 689 } 690 691 return (wptr >> 2); 692 } 693 694 static void vpe_ring_set_wptr(struct amdgpu_ring *ring) 695 { 696 struct amdgpu_device *adev = ring->adev; 697 struct amdgpu_vpe *vpe = &adev->vpe; 698 699 if (ring->use_doorbell) { 700 dev_dbg(adev->dev, "Using doorbell, \ 701 wptr_offs == 0x%08x, \ 702 lower_32_bits(ring->wptr) << 2 == 0x%08x, \ 703 upper_32_bits(ring->wptr) << 2 == 0x%08x\n", 704 ring->wptr_offs, 705 lower_32_bits(ring->wptr << 2), 706 upper_32_bits(ring->wptr << 2)); 707 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2); 708 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); 709 if (vpe->collaborate_mode) 710 WDOORBELL64(ring->doorbell_index + 4, ring->wptr << 2); 711 } else { 712 int i; 713 714 for (i = 0; i < vpe->num_instances; i++) { 715 dev_dbg(adev->dev, "Not using doorbell, \ 716 regVPEC_QUEUE0_RB_WPTR == 0x%08x, \ 717 regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n", 718 lower_32_bits(ring->wptr << 2), 719 upper_32_bits(ring->wptr << 2)); 720 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo), 721 lower_32_bits(ring->wptr << 2)); 722 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi), 723 upper_32_bits(ring->wptr << 2)); 724 } 725 } 726 } 727 728 static int vpe_ring_test_ring(struct amdgpu_ring *ring) 729 { 730 struct amdgpu_device *adev = ring->adev; 731 const uint32_t test_pattern = 0xdeadbeef; 732 uint32_t index, i; 733 uint64_t wb_addr; 734 int ret; 735 736 ret = amdgpu_device_wb_get(adev, &index); 737 if (ret) { 738 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret); 739 return ret; 740 } 741 742 adev->wb.wb[index] = 0; 743 wb_addr = adev->wb.gpu_addr + (index * 4); 744 745 ret = amdgpu_ring_alloc(ring, 4); 746 if (ret) { 747 dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret); 748 goto out; 749 } 750 751 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0)); 752 amdgpu_ring_write(ring, lower_32_bits(wb_addr)); 753 amdgpu_ring_write(ring, upper_32_bits(wb_addr)); 754 amdgpu_ring_write(ring, test_pattern); 755 amdgpu_ring_commit(ring); 756 757 for (i = 0; i < adev->usec_timeout; i++) { 758 if (le32_to_cpu(adev->wb.wb[index]) == test_pattern) 759 goto out; 760 udelay(1); 761 } 762 763 ret = -ETIMEDOUT; 764 out: 765 amdgpu_device_wb_free(adev, index); 766 767 return ret; 768 } 769 770 static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout) 771 { 772 struct amdgpu_device *adev = ring->adev; 773 const uint32_t test_pattern = 0xdeadbeef; 774 struct amdgpu_ib ib = {}; 775 struct dma_fence *f = NULL; 776 uint32_t index; 777 uint64_t wb_addr; 778 int ret; 779 780 ret = amdgpu_device_wb_get(adev, &index); 781 if (ret) { 782 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret); 783 return ret; 784 } 785 786 adev->wb.wb[index] = 0; 787 wb_addr = adev->wb.gpu_addr + (index * 4); 788 789 ret = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); 790 if (ret) 791 goto err0; 792 793 ib.ptr[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0); 794 ib.ptr[1] = lower_32_bits(wb_addr); 795 ib.ptr[2] = upper_32_bits(wb_addr); 796 ib.ptr[3] = test_pattern; 797 ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); 798 ib.ptr[5] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); 799 ib.ptr[6] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); 800 ib.ptr[7] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); 801 ib.length_dw = 8; 802 803 ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 804 if (ret) 805 goto err1; 806 807 ret = dma_fence_wait_timeout(f, false, timeout); 808 if (ret <= 0) { 809 ret = ret ? : -ETIMEDOUT; 810 goto err1; 811 } 812 813 ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL; 814 815 err1: 816 amdgpu_ib_free(adev, &ib, NULL); 817 dma_fence_put(f); 818 err0: 819 amdgpu_device_wb_free(adev, index); 820 821 return ret; 822 } 823 824 static void vpe_ring_begin_use(struct amdgpu_ring *ring) 825 { 826 struct amdgpu_device *adev = ring->adev; 827 struct amdgpu_vpe *vpe = &adev->vpe; 828 829 cancel_delayed_work_sync(&adev->vpe.idle_work); 830 831 /* Power on VPE and notify VPE of new context */ 832 if (!vpe->context_started) { 833 uint32_t context_notify; 834 835 /* Power on VPE */ 836 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_UNGATE); 837 838 /* Indicates that a job from a new context has been submitted. */ 839 context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator)); 840 if ((context_notify & 0x1) == 0) 841 context_notify |= 0x1; 842 else 843 context_notify &= ~(0x1); 844 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify); 845 vpe->context_started = true; 846 } 847 } 848 849 static void vpe_ring_end_use(struct amdgpu_ring *ring) 850 { 851 struct amdgpu_device *adev = ring->adev; 852 853 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); 854 } 855 856 static const struct amdgpu_ring_funcs vpe_ring_funcs = { 857 .type = AMDGPU_RING_TYPE_VPE, 858 .align_mask = 0xf, 859 .nop = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0), 860 .support_64bit_ptrs = true, 861 .get_rptr = vpe_ring_get_rptr, 862 .get_wptr = vpe_ring_get_wptr, 863 .set_wptr = vpe_ring_set_wptr, 864 .emit_frame_size = 865 5 + /* vpe_ring_init_cond_exec */ 866 6 + /* vpe_ring_emit_pipeline_sync */ 867 10 + 10 + 10 + /* vpe_ring_emit_fence */ 868 /* vpe_ring_emit_vm_flush */ 869 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 870 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6, 871 .emit_ib_size = 7 + 6, 872 .emit_ib = vpe_ring_emit_ib, 873 .emit_pipeline_sync = vpe_ring_emit_pipeline_sync, 874 .emit_fence = vpe_ring_emit_fence, 875 .emit_vm_flush = vpe_ring_emit_vm_flush, 876 .emit_wreg = vpe_ring_emit_wreg, 877 .emit_reg_wait = vpe_ring_emit_reg_wait, 878 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 879 .insert_nop = vpe_ring_insert_nop, 880 .pad_ib = amdgpu_ring_generic_pad_ib, 881 .test_ring = vpe_ring_test_ring, 882 .test_ib = vpe_ring_test_ib, 883 .init_cond_exec = vpe_ring_init_cond_exec, 884 .preempt_ib = vpe_ring_preempt_ib, 885 .begin_use = vpe_ring_begin_use, 886 .end_use = vpe_ring_end_use, 887 }; 888 889 static void vpe_set_ring_funcs(struct amdgpu_device *adev) 890 { 891 adev->vpe.ring.funcs = &vpe_ring_funcs; 892 } 893 894 const struct amd_ip_funcs vpe_ip_funcs = { 895 .name = "vpe_v6_1", 896 .early_init = vpe_early_init, 897 .late_init = NULL, 898 .sw_init = vpe_sw_init, 899 .sw_fini = vpe_sw_fini, 900 .hw_init = vpe_hw_init, 901 .hw_fini = vpe_hw_fini, 902 .suspend = vpe_suspend, 903 .resume = vpe_resume, 904 .soft_reset = NULL, 905 .set_clockgating_state = vpe_set_clockgating_state, 906 .set_powergating_state = vpe_set_powergating_state, 907 }; 908 909 const struct amdgpu_ip_block_version vpe_v6_1_ip_block = { 910 .type = AMD_IP_BLOCK_TYPE_VPE, 911 .major = 6, 912 .minor = 1, 913 .rev = 0, 914 .funcs = &vpe_ip_funcs, 915 }; 916