1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #ifndef __AMDGPU_VPE_H__ 24 #define __AMDGPU_VPE_H__ 25 26 #include "amdgpu_ring.h" 27 #include "amdgpu_irq.h" 28 #include "vpe_6_1_fw_if.h" 29 30 struct amdgpu_vpe; 31 32 struct vpe_funcs { 33 uint32_t (*get_reg_offset)(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset); 34 int (*set_regs)(struct amdgpu_vpe *vpe); 35 int (*irq_init)(struct amdgpu_vpe *vpe); 36 int (*init_microcode)(struct amdgpu_vpe *vpe); 37 int (*load_microcode)(struct amdgpu_vpe *vpe); 38 int (*ring_init)(struct amdgpu_vpe *vpe); 39 int (*ring_start)(struct amdgpu_vpe *vpe); 40 int (*ring_stop)(struct amdgpu_vpe *vpe); 41 int (*ring_fini)(struct amdgpu_vpe *vpe); 42 }; 43 44 struct vpe_regs { 45 uint32_t queue0_rb_rptr_lo; 46 uint32_t queue0_rb_rptr_hi; 47 uint32_t queue0_rb_wptr_lo; 48 uint32_t queue0_rb_wptr_hi; 49 uint32_t queue0_preempt; 50 51 uint32_t dpm_enable; 52 uint32_t dpm_pratio; 53 uint32_t dpm_request_interval; 54 uint32_t dpm_decision_threshold; 55 uint32_t dpm_busy_clamp_threshold; 56 uint32_t dpm_idle_clamp_threshold; 57 uint32_t dpm_request_lv; 58 uint32_t context_indicator; 59 }; 60 61 struct amdgpu_vpe { 62 struct amdgpu_ring ring; 63 struct amdgpu_irq_src trap_irq; 64 65 const struct vpe_funcs *funcs; 66 struct vpe_regs regs; 67 68 const struct firmware *fw; 69 uint32_t fw_version; 70 uint32_t feature_version; 71 72 struct amdgpu_bo *cmdbuf_obj; 73 uint64_t cmdbuf_gpu_addr; 74 uint32_t *cmdbuf_cpu_addr; 75 struct delayed_work idle_work; 76 bool context_started; 77 }; 78 79 int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev); 80 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe); 81 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe); 82 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe); 83 int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe); 84 85 #define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0) 86 #define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0) 87 #define vpe_ring_stop(vpe) ((vpe)->funcs->ring_stop ? (vpe)->funcs->ring_stop((vpe)) : 0) 88 #define vpe_ring_fini(vpe) ((vpe)->funcs->ring_fini ? (vpe)->funcs->ring_fini((vpe)) : 0) 89 90 #define vpe_get_reg_offset(vpe, inst, offset) \ 91 ((vpe)->funcs->get_reg_offset ? (vpe)->funcs->get_reg_offset((vpe), (inst), (offset)) : 0) 92 #define vpe_set_regs(vpe) \ 93 ((vpe)->funcs->set_regs ? (vpe)->funcs->set_regs((vpe)) : 0) 94 #define vpe_irq_init(vpe) \ 95 ((vpe)->funcs->irq_init ? (vpe)->funcs->irq_init((vpe)) : 0) 96 #define vpe_init_microcode(vpe) \ 97 ((vpe)->funcs->init_microcode ? (vpe)->funcs->init_microcode((vpe)) : 0) 98 #define vpe_load_microcode(vpe) \ 99 ((vpe)->funcs->load_microcode ? (vpe)->funcs->load_microcode((vpe)) : 0) 100 101 extern const struct amdgpu_ip_block_version vpe_v6_1_ip_block; 102 103 #endif 104