1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "hdp_v5_2.h" 25 26 #include "hdp/hdp_5_2_1_offset.h" 27 #include "hdp/hdp_5_2_1_sh_mask.h" 28 #include <uapi/linux/kfd_ioctl.h> 29 30 static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev, 31 struct amdgpu_ring *ring) 32 { 33 if (!ring || !ring->funcs->emit_wreg) { 34 WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 35 0); 36 if (amdgpu_sriov_vf(adev)) { 37 /* this is fine because SR_IOV doesn't remap the register */ 38 RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); 39 } else { 40 /* We just need to read back a register to post the write. 41 * Reading back the remapped register causes problems on 42 * some platforms so just read back the memory size register. 43 */ 44 if (adev->nbio.funcs->get_memsize) 45 adev->nbio.funcs->get_memsize(adev); 46 } 47 } else { 48 amdgpu_ring_emit_wreg(ring, 49 (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 50 0); 51 } 52 } 53 54 static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev, 55 bool enable) 56 { 57 uint32_t hdp_clk_cntl; 58 uint32_t hdp_mem_pwr_cntl; 59 60 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 61 AMD_CG_SUPPORT_HDP_DS | 62 AMD_CG_SUPPORT_HDP_SD))) 63 return; 64 65 hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL); 66 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL); 67 68 /* Before doing clock/power mode switch, forced on MEM clock */ 69 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 70 ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1); 71 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 72 RC_MEM_CLK_SOFT_OVERRIDE, 1); 73 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); 74 75 /* disable clock and power gating before any changing */ 76 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 77 ATOMIC_MEM_POWER_CTRL_EN, 0); 78 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 79 ATOMIC_MEM_POWER_LS_EN, 0); 80 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 81 ATOMIC_MEM_POWER_DS_EN, 0); 82 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 83 ATOMIC_MEM_POWER_SD_EN, 0); 84 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 85 RC_MEM_POWER_CTRL_EN, 0); 86 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 87 RC_MEM_POWER_LS_EN, 0); 88 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 89 RC_MEM_POWER_DS_EN, 0); 90 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 91 RC_MEM_POWER_SD_EN, 0); 92 WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 93 94 /* Already disabled above. The actions below are for "enabled" only */ 95 if (enable) { 96 /* only one clock gating mode (LS/DS/SD) can be enabled */ 97 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 98 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 99 HDP_MEM_POWER_CTRL, 100 ATOMIC_MEM_POWER_SD_EN, 1); 101 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 102 HDP_MEM_POWER_CTRL, 103 RC_MEM_POWER_SD_EN, 1); 104 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 105 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 106 HDP_MEM_POWER_CTRL, 107 ATOMIC_MEM_POWER_LS_EN, 1); 108 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 109 HDP_MEM_POWER_CTRL, 110 RC_MEM_POWER_LS_EN, 1); 111 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 112 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 113 HDP_MEM_POWER_CTRL, 114 ATOMIC_MEM_POWER_DS_EN, 1); 115 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 116 HDP_MEM_POWER_CTRL, 117 RC_MEM_POWER_DS_EN, 1); 118 } 119 120 /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */ 121 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS | 122 AMD_CG_SUPPORT_HDP_SD)) { 123 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 124 ATOMIC_MEM_POWER_CTRL_EN, 1); 125 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 126 RC_MEM_POWER_CTRL_EN, 1); 127 WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 128 } 129 } 130 131 /* disable MEM clock override after clock/power mode changing */ 132 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 133 ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0); 134 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 135 RC_MEM_CLK_SOFT_OVERRIDE, 0); 136 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); 137 } 138 139 static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev, 140 bool enable) 141 { 142 uint32_t hdp_clk_cntl; 143 144 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 145 return; 146 147 hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL); 148 149 if (enable) { 150 hdp_clk_cntl &= 151 ~(uint32_t) 152 (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK | 153 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 154 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 155 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 156 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 157 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 158 } else { 159 hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK | 160 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 161 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 162 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 163 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 164 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 165 } 166 167 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); 168 } 169 170 static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev, 171 u64 *flags) 172 { 173 uint32_t tmp; 174 175 /* AMD_CG_SUPPORT_HDP_MGCG */ 176 tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL); 177 if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK | 178 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 179 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 180 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 181 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 182 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 183 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 184 185 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 186 tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL); 187 if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK) 188 *flags |= AMD_CG_SUPPORT_HDP_LS; 189 else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK) 190 *flags |= AMD_CG_SUPPORT_HDP_DS; 191 else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK) 192 *flags |= AMD_CG_SUPPORT_HDP_SD; 193 } 194 195 static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev, 196 bool enable) 197 { 198 hdp_v5_2_update_mem_power_gating(adev, enable); 199 hdp_v5_2_update_medium_grain_clock_gating(adev, enable); 200 } 201 202 const struct amdgpu_hdp_funcs hdp_v5_2_funcs = { 203 .flush_hdp = hdp_v5_2_flush_hdp, 204 .update_clock_gating = hdp_v5_2_update_clock_gating, 205 .get_clock_gating_state = hdp_v5_2_get_clockgating_state, 206 }; 207