1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "hdp_v5_0.h" 25 26 #include "hdp/hdp_5_0_0_offset.h" 27 #include "hdp/hdp_5_0_0_sh_mask.h" 28 #include <uapi/linux/kfd_ioctl.h> 29 30 static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, 31 struct amdgpu_ring *ring) 32 { 33 if (!ring || !ring->funcs->emit_wreg) { 34 WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); 35 /* We just need to read back a register to post the write. 36 * Reading back the remapped register causes problems on 37 * some platforms so just read back the memory size register. 38 */ 39 if (adev->nbio.funcs->get_memsize) 40 adev->nbio.funcs->get_memsize(adev); 41 } else { 42 amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); 43 } 44 } 45 46 static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev, 47 struct amdgpu_ring *ring) 48 { 49 if (!ring || !ring->funcs->emit_wreg) { 50 WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 51 RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE); 52 } else { 53 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 54 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 55 } 56 } 57 58 static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev, 59 bool enable) 60 { 61 uint32_t hdp_clk_cntl, hdp_clk_cntl1; 62 uint32_t hdp_mem_pwr_cntl; 63 64 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 65 AMD_CG_SUPPORT_HDP_DS | 66 AMD_CG_SUPPORT_HDP_SD))) 67 return; 68 69 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 70 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 71 72 /* Before doing clock/power mode switch, 73 * forced on IPH & RC clock */ 74 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 75 IPH_MEM_CLK_SOFT_OVERRIDE, 1); 76 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 77 RC_MEM_CLK_SOFT_OVERRIDE, 1); 78 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 79 80 /* HDP 5.0 doesn't support dynamic power mode switch, 81 * disable clock and power gating before any changing */ 82 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 83 IPH_MEM_POWER_CTRL_EN, 0); 84 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 85 IPH_MEM_POWER_LS_EN, 0); 86 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 87 IPH_MEM_POWER_DS_EN, 0); 88 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 89 IPH_MEM_POWER_SD_EN, 0); 90 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 91 RC_MEM_POWER_CTRL_EN, 0); 92 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 93 RC_MEM_POWER_LS_EN, 0); 94 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 95 RC_MEM_POWER_DS_EN, 0); 96 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 97 RC_MEM_POWER_SD_EN, 0); 98 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 99 100 /* Already disabled above. The actions below are for "enabled" only */ 101 if (enable) { 102 /* only one clock gating mode (LS/DS/SD) can be enabled */ 103 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 104 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 105 HDP_MEM_POWER_CTRL, 106 IPH_MEM_POWER_LS_EN, 1); 107 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 108 HDP_MEM_POWER_CTRL, 109 RC_MEM_POWER_LS_EN, 1); 110 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 111 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 112 HDP_MEM_POWER_CTRL, 113 IPH_MEM_POWER_DS_EN, 1); 114 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 115 HDP_MEM_POWER_CTRL, 116 RC_MEM_POWER_DS_EN, 1); 117 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 118 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 119 HDP_MEM_POWER_CTRL, 120 IPH_MEM_POWER_SD_EN, 1); 121 /* RC should not use shut down mode, fallback to ds or ls if allowed */ 122 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) 123 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 124 HDP_MEM_POWER_CTRL, 125 RC_MEM_POWER_DS_EN, 1); 126 else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) 127 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 128 HDP_MEM_POWER_CTRL, 129 RC_MEM_POWER_LS_EN, 1); 130 } 131 132 /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to 133 * be set for SRAM LS/DS/SD */ 134 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS | 135 AMD_CG_SUPPORT_HDP_SD)) { 136 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 137 IPH_MEM_POWER_CTRL_EN, 1); 138 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 139 RC_MEM_POWER_CTRL_EN, 1); 140 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 141 } 142 } 143 144 /* disable IPH & RC clock override after clock/power mode changing */ 145 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 146 IPH_MEM_CLK_SOFT_OVERRIDE, 0); 147 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 148 RC_MEM_CLK_SOFT_OVERRIDE, 0); 149 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 150 } 151 152 static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 153 bool enable) 154 { 155 uint32_t hdp_clk_cntl; 156 157 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 158 return; 159 160 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 161 162 if (enable) { 163 hdp_clk_cntl &= 164 ~(uint32_t) 165 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 166 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 167 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 168 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 169 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 170 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 171 } else { 172 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 173 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 174 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 175 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 176 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 177 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 178 } 179 180 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 181 } 182 183 static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev, 184 bool enable) 185 { 186 hdp_v5_0_update_mem_power_gating(adev, enable); 187 hdp_v5_0_update_medium_grain_clock_gating(adev, enable); 188 } 189 190 static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev, 191 u64 *flags) 192 { 193 uint32_t tmp; 194 195 /* AMD_CG_SUPPORT_HDP_MGCG */ 196 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 197 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 198 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 199 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 200 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 201 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 202 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 203 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 204 205 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 206 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 207 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 208 *flags |= AMD_CG_SUPPORT_HDP_LS; 209 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 210 *flags |= AMD_CG_SUPPORT_HDP_DS; 211 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 212 *flags |= AMD_CG_SUPPORT_HDP_SD; 213 } 214 215 static void hdp_v5_0_init_registers(struct amdgpu_device *adev) 216 { 217 u32 tmp; 218 219 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); 220 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; 221 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); 222 } 223 224 const struct amdgpu_hdp_funcs hdp_v5_0_funcs = { 225 .flush_hdp = hdp_v5_0_flush_hdp, 226 .invalidate_hdp = hdp_v5_0_invalidate_hdp, 227 .update_clock_gating = hdp_v5_0_update_clock_gating, 228 .get_clock_gating_state = hdp_v5_0_get_clockgating_state, 229 .init_registers = hdp_v5_0_init_registers, 230 }; 231