1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #include "radeon_ucode.h" 29 #include "cikd.h" 30 #include "r600_dpm.h" 31 #include "ci_dpm.h" 32 #include "atom.h" 33 #include <linux/seq_file.h> 34 35 #define MC_CG_ARB_FREQ_F0 0x0a 36 #define MC_CG_ARB_FREQ_F1 0x0b 37 #define MC_CG_ARB_FREQ_F2 0x0c 38 #define MC_CG_ARB_FREQ_F3 0x0d 39 40 #define SMC_RAM_END 0x40000 41 42 #define VOLTAGE_SCALE 4 43 #define VOLTAGE_VID_OFFSET_SCALE1 625 44 #define VOLTAGE_VID_OFFSET_SCALE2 100 45 46 static const struct ci_pt_defaults defaults_hawaii_xt = 47 { 48 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 49 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 50 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 51 }; 52 53 static const struct ci_pt_defaults defaults_hawaii_pro = 54 { 55 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, 56 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 57 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 58 }; 59 60 static const struct ci_pt_defaults defaults_bonaire_xt = 61 { 62 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, 63 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, 64 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } 65 }; 66 67 static const struct ci_pt_defaults defaults_bonaire_pro = 68 { 69 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, 70 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, 71 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } 72 }; 73 74 static const struct ci_pt_defaults defaults_saturn_xt = 75 { 76 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, 77 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, 78 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } 79 }; 80 81 static const struct ci_pt_defaults defaults_saturn_pro = 82 { 83 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, 84 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, 85 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } 86 }; 87 88 static const struct ci_pt_config_reg didt_config_ci[] = 89 { 90 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 91 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 92 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 93 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 94 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 95 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 96 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 97 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 98 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 99 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 100 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 101 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 102 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 103 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 104 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 105 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 106 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 107 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 108 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 109 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 110 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 111 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 112 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 113 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 114 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 115 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 116 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 117 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 118 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 119 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 120 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 121 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 122 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 123 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 124 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 125 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 126 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 127 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 128 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 129 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 130 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 131 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 132 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 133 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 134 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 135 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 136 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 137 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 138 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 139 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 140 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 141 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 142 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 143 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 144 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 145 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 146 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 147 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 148 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 149 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 150 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 151 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 152 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 153 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 154 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 155 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 156 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 157 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 158 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 159 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 160 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 161 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 162 { 0xFFFFFFFF } 163 }; 164 165 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 166 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 167 u32 arb_freq_src, u32 arb_freq_dest); 168 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 169 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode); 170 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, 171 u32 max_voltage_steps, 172 struct atom_voltage_table *voltage_table); 173 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 174 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 175 extern int ci_mc_load_microcode(struct radeon_device *rdev); 176 extern void cik_update_cg(struct radeon_device *rdev, 177 u32 block, bool enable); 178 179 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 180 struct atom_voltage_table_entry *voltage_table, 181 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd); 182 static int ci_set_power_limit(struct radeon_device *rdev, u32 n); 183 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 184 u32 target_tdp); 185 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); 186 187 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg); 188 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 189 PPSMC_Msg msg, u32 parameter); 190 191 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev); 192 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev); 193 194 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) 195 { 196 struct ci_power_info *pi = rdev->pm.dpm.priv; 197 198 return pi; 199 } 200 201 static struct ci_ps *ci_get_ps(struct radeon_ps *rps) 202 { 203 struct ci_ps *ps = rps->ps_priv; 204 205 return ps; 206 } 207 208 static void ci_initialize_powertune_defaults(struct radeon_device *rdev) 209 { 210 struct ci_power_info *pi = ci_get_pi(rdev); 211 212 switch (rdev->pdev->device) { 213 case 0x6649: 214 case 0x6650: 215 case 0x6651: 216 case 0x6658: 217 case 0x665C: 218 case 0x665D: 219 default: 220 pi->powertune_defaults = &defaults_bonaire_xt; 221 break; 222 case 0x6640: 223 case 0x6641: 224 case 0x6646: 225 case 0x6647: 226 pi->powertune_defaults = &defaults_saturn_xt; 227 break; 228 case 0x67B8: 229 case 0x67B0: 230 pi->powertune_defaults = &defaults_hawaii_xt; 231 break; 232 case 0x67BA: 233 case 0x67B1: 234 pi->powertune_defaults = &defaults_hawaii_pro; 235 break; 236 case 0x67A0: 237 case 0x67A1: 238 case 0x67A2: 239 case 0x67A8: 240 case 0x67A9: 241 case 0x67AA: 242 case 0x67B9: 243 case 0x67BE: 244 pi->powertune_defaults = &defaults_bonaire_xt; 245 break; 246 } 247 248 pi->dte_tj_offset = 0; 249 250 pi->caps_power_containment = true; 251 pi->caps_cac = false; 252 pi->caps_sq_ramping = false; 253 pi->caps_db_ramping = false; 254 pi->caps_td_ramping = false; 255 pi->caps_tcp_ramping = false; 256 257 if (pi->caps_power_containment) { 258 pi->caps_cac = true; 259 if (rdev->family == CHIP_HAWAII) 260 pi->enable_bapm_feature = false; 261 else 262 pi->enable_bapm_feature = true; 263 pi->enable_tdc_limit_feature = true; 264 pi->enable_pkg_pwr_tracking_feature = true; 265 } 266 } 267 268 static u8 ci_convert_to_vid(u16 vddc) 269 { 270 return (6200 - (vddc * VOLTAGE_SCALE)) / 25; 271 } 272 273 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev) 274 { 275 struct ci_power_info *pi = ci_get_pi(rdev); 276 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 277 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 278 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2; 279 u32 i; 280 281 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) 282 return -EINVAL; 283 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8) 284 return -EINVAL; 285 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count != 286 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) 287 return -EINVAL; 288 289 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { 290 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 291 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); 292 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); 293 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); 294 } else { 295 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); 296 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); 297 } 298 } 299 return 0; 300 } 301 302 static int ci_populate_vddc_vid(struct radeon_device *rdev) 303 { 304 struct ci_power_info *pi = ci_get_pi(rdev); 305 u8 *vid = pi->smc_powertune_table.VddCVid; 306 u32 i; 307 308 if (pi->vddc_voltage_table.count > 8) 309 return -EINVAL; 310 311 for (i = 0; i < pi->vddc_voltage_table.count; i++) 312 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); 313 314 return 0; 315 } 316 317 static int ci_populate_svi_load_line(struct radeon_device *rdev) 318 { 319 struct ci_power_info *pi = ci_get_pi(rdev); 320 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 321 322 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en; 323 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc; 324 pi->smc_powertune_table.SviLoadLineTrimVddC = 3; 325 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0; 326 327 return 0; 328 } 329 330 static int ci_populate_tdc_limit(struct radeon_device *rdev) 331 { 332 struct ci_power_info *pi = ci_get_pi(rdev); 333 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 334 u16 tdc_limit; 335 336 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256; 337 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit); 338 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc = 339 pt_defaults->tdc_vddc_throttle_release_limit_perc; 340 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt; 341 342 return 0; 343 } 344 345 static int ci_populate_dw8(struct radeon_device *rdev) 346 { 347 struct ci_power_info *pi = ci_get_pi(rdev); 348 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 349 int ret; 350 351 ret = ci_read_smc_sram_dword(rdev, 352 SMU7_FIRMWARE_HEADER_LOCATION + 353 offsetof(SMU7_Firmware_Header, PmFuseTable) + 354 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), 355 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl, 356 pi->sram_end); 357 if (ret) 358 return -EINVAL; 359 else 360 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl; 361 362 return 0; 363 } 364 365 static int ci_populate_fuzzy_fan(struct radeon_device *rdev) 366 { 367 struct ci_power_info *pi = ci_get_pi(rdev); 368 369 if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) || 370 (rdev->pm.dpm.fan.fan_output_sensitivity == 0)) 371 rdev->pm.dpm.fan.fan_output_sensitivity = 372 rdev->pm.dpm.fan.default_fan_output_sensitivity; 373 374 pi->smc_powertune_table.FuzzyFan_PwmSetDelta = 375 cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity); 376 377 return 0; 378 } 379 380 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) 381 { 382 struct ci_power_info *pi = ci_get_pi(rdev); 383 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 384 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 385 int i, min, max; 386 387 min = max = hi_vid[0]; 388 for (i = 0; i < 8; i++) { 389 if (0 != hi_vid[i]) { 390 if (min > hi_vid[i]) 391 min = hi_vid[i]; 392 if (max < hi_vid[i]) 393 max = hi_vid[i]; 394 } 395 396 if (0 != lo_vid[i]) { 397 if (min > lo_vid[i]) 398 min = lo_vid[i]; 399 if (max < lo_vid[i]) 400 max = lo_vid[i]; 401 } 402 } 403 404 if ((min == 0) || (max == 0)) 405 return -EINVAL; 406 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max; 407 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min; 408 409 return 0; 410 } 411 412 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev) 413 { 414 struct ci_power_info *pi = ci_get_pi(rdev); 415 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; 416 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; 417 struct radeon_cac_tdp_table *cac_tdp_table = 418 rdev->pm.dpm.dyn_state.cac_tdp_table; 419 420 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256; 421 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256; 422 423 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd); 424 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd); 425 426 return 0; 427 } 428 429 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev) 430 { 431 struct ci_power_info *pi = ci_get_pi(rdev); 432 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 433 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table; 434 struct radeon_cac_tdp_table *cac_tdp_table = 435 rdev->pm.dpm.dyn_state.cac_tdp_table; 436 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table; 437 int i, j, k; 438 const u16 *def1; 439 const u16 *def2; 440 441 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256; 442 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256; 443 444 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset; 445 dpm_table->GpuTjMax = 446 (u8)(pi->thermal_temp_setting.temperature_high / 1000); 447 dpm_table->GpuTjHyst = 8; 448 449 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base; 450 451 if (ppm) { 452 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000); 453 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256); 454 } else { 455 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0); 456 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0); 457 } 458 459 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient); 460 def1 = pt_defaults->bapmti_r; 461 def2 = pt_defaults->bapmti_rc; 462 463 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { 464 for (j = 0; j < SMU7_DTE_SOURCES; j++) { 465 for (k = 0; k < SMU7_DTE_SINKS; k++) { 466 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1); 467 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2); 468 def1++; 469 def2++; 470 } 471 } 472 } 473 474 return 0; 475 } 476 477 static int ci_populate_pm_base(struct radeon_device *rdev) 478 { 479 struct ci_power_info *pi = ci_get_pi(rdev); 480 u32 pm_fuse_table_offset; 481 int ret; 482 483 if (pi->caps_power_containment) { 484 ret = ci_read_smc_sram_dword(rdev, 485 SMU7_FIRMWARE_HEADER_LOCATION + 486 offsetof(SMU7_Firmware_Header, PmFuseTable), 487 &pm_fuse_table_offset, pi->sram_end); 488 if (ret) 489 return ret; 490 ret = ci_populate_bapm_vddc_vid_sidd(rdev); 491 if (ret) 492 return ret; 493 ret = ci_populate_vddc_vid(rdev); 494 if (ret) 495 return ret; 496 ret = ci_populate_svi_load_line(rdev); 497 if (ret) 498 return ret; 499 ret = ci_populate_tdc_limit(rdev); 500 if (ret) 501 return ret; 502 ret = ci_populate_dw8(rdev); 503 if (ret) 504 return ret; 505 ret = ci_populate_fuzzy_fan(rdev); 506 if (ret) 507 return ret; 508 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); 509 if (ret) 510 return ret; 511 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev); 512 if (ret) 513 return ret; 514 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset, 515 (u8 *)&pi->smc_powertune_table, 516 sizeof(SMU7_Discrete_PmFuses), pi->sram_end); 517 if (ret) 518 return ret; 519 } 520 521 return 0; 522 } 523 524 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable) 525 { 526 struct ci_power_info *pi = ci_get_pi(rdev); 527 u32 data; 528 529 if (pi->caps_sq_ramping) { 530 data = RREG32_DIDT(DIDT_SQ_CTRL0); 531 if (enable) 532 data |= DIDT_CTRL_EN; 533 else 534 data &= ~DIDT_CTRL_EN; 535 WREG32_DIDT(DIDT_SQ_CTRL0, data); 536 } 537 538 if (pi->caps_db_ramping) { 539 data = RREG32_DIDT(DIDT_DB_CTRL0); 540 if (enable) 541 data |= DIDT_CTRL_EN; 542 else 543 data &= ~DIDT_CTRL_EN; 544 WREG32_DIDT(DIDT_DB_CTRL0, data); 545 } 546 547 if (pi->caps_td_ramping) { 548 data = RREG32_DIDT(DIDT_TD_CTRL0); 549 if (enable) 550 data |= DIDT_CTRL_EN; 551 else 552 data &= ~DIDT_CTRL_EN; 553 WREG32_DIDT(DIDT_TD_CTRL0, data); 554 } 555 556 if (pi->caps_tcp_ramping) { 557 data = RREG32_DIDT(DIDT_TCP_CTRL0); 558 if (enable) 559 data |= DIDT_CTRL_EN; 560 else 561 data &= ~DIDT_CTRL_EN; 562 WREG32_DIDT(DIDT_TCP_CTRL0, data); 563 } 564 } 565 566 static int ci_program_pt_config_registers(struct radeon_device *rdev, 567 const struct ci_pt_config_reg *cac_config_regs) 568 { 569 const struct ci_pt_config_reg *config_regs = cac_config_regs; 570 u32 data; 571 u32 cache = 0; 572 573 if (config_regs == NULL) 574 return -EINVAL; 575 576 while (config_regs->offset != 0xFFFFFFFF) { 577 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) { 578 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 579 } else { 580 switch (config_regs->type) { 581 case CISLANDS_CONFIGREG_SMC_IND: 582 data = RREG32_SMC(config_regs->offset); 583 break; 584 case CISLANDS_CONFIGREG_DIDT_IND: 585 data = RREG32_DIDT(config_regs->offset); 586 break; 587 default: 588 data = RREG32(config_regs->offset << 2); 589 break; 590 } 591 592 data &= ~config_regs->mask; 593 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 594 data |= cache; 595 596 switch (config_regs->type) { 597 case CISLANDS_CONFIGREG_SMC_IND: 598 WREG32_SMC(config_regs->offset, data); 599 break; 600 case CISLANDS_CONFIGREG_DIDT_IND: 601 WREG32_DIDT(config_regs->offset, data); 602 break; 603 default: 604 WREG32(config_regs->offset << 2, data); 605 break; 606 } 607 cache = 0; 608 } 609 config_regs++; 610 } 611 return 0; 612 } 613 614 static int ci_enable_didt(struct radeon_device *rdev, bool enable) 615 { 616 struct ci_power_info *pi = ci_get_pi(rdev); 617 int ret; 618 619 if (pi->caps_sq_ramping || pi->caps_db_ramping || 620 pi->caps_td_ramping || pi->caps_tcp_ramping) { 621 cik_enter_rlc_safe_mode(rdev); 622 623 if (enable) { 624 ret = ci_program_pt_config_registers(rdev, didt_config_ci); 625 if (ret) { 626 cik_exit_rlc_safe_mode(rdev); 627 return ret; 628 } 629 } 630 631 ci_do_enable_didt(rdev, enable); 632 633 cik_exit_rlc_safe_mode(rdev); 634 } 635 636 return 0; 637 } 638 639 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable) 640 { 641 struct ci_power_info *pi = ci_get_pi(rdev); 642 PPSMC_Result smc_result; 643 int ret = 0; 644 645 if (enable) { 646 pi->power_containment_features = 0; 647 if (pi->caps_power_containment) { 648 if (pi->enable_bapm_feature) { 649 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE); 650 if (smc_result != PPSMC_Result_OK) 651 ret = -EINVAL; 652 else 653 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; 654 } 655 656 if (pi->enable_tdc_limit_feature) { 657 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable); 658 if (smc_result != PPSMC_Result_OK) 659 ret = -EINVAL; 660 else 661 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit; 662 } 663 664 if (pi->enable_pkg_pwr_tracking_feature) { 665 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable); 666 if (smc_result != PPSMC_Result_OK) { 667 ret = -EINVAL; 668 } else { 669 struct radeon_cac_tdp_table *cac_tdp_table = 670 rdev->pm.dpm.dyn_state.cac_tdp_table; 671 u32 default_pwr_limit = 672 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 673 674 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; 675 676 ci_set_power_limit(rdev, default_pwr_limit); 677 } 678 } 679 } 680 } else { 681 if (pi->caps_power_containment && pi->power_containment_features) { 682 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) 683 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable); 684 685 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM) 686 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE); 687 688 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) 689 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable); 690 pi->power_containment_features = 0; 691 } 692 } 693 694 return ret; 695 } 696 697 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable) 698 { 699 struct ci_power_info *pi = ci_get_pi(rdev); 700 PPSMC_Result smc_result; 701 int ret = 0; 702 703 if (pi->caps_cac) { 704 if (enable) { 705 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac); 706 if (smc_result != PPSMC_Result_OK) { 707 ret = -EINVAL; 708 pi->cac_enabled = false; 709 } else { 710 pi->cac_enabled = true; 711 } 712 } else if (pi->cac_enabled) { 713 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac); 714 pi->cac_enabled = false; 715 } 716 } 717 718 return ret; 719 } 720 721 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev, 722 bool enable) 723 { 724 struct ci_power_info *pi = ci_get_pi(rdev); 725 PPSMC_Result smc_result = PPSMC_Result_OK; 726 727 if (pi->thermal_sclk_dpm_enabled) { 728 if (enable) 729 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM); 730 else 731 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM); 732 } 733 734 if (smc_result == PPSMC_Result_OK) 735 return 0; 736 else 737 return -EINVAL; 738 } 739 740 static int ci_power_control_set_level(struct radeon_device *rdev) 741 { 742 struct ci_power_info *pi = ci_get_pi(rdev); 743 struct radeon_cac_tdp_table *cac_tdp_table = 744 rdev->pm.dpm.dyn_state.cac_tdp_table; 745 s32 adjust_percent; 746 s32 target_tdp; 747 int ret = 0; 748 bool adjust_polarity = false; /* ??? */ 749 750 if (pi->caps_power_containment) { 751 adjust_percent = adjust_polarity ? 752 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); 753 target_tdp = ((100 + adjust_percent) * 754 (s32)cac_tdp_table->configurable_tdp) / 100; 755 756 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); 757 } 758 759 return ret; 760 } 761 762 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 763 { 764 struct ci_power_info *pi = ci_get_pi(rdev); 765 766 if (pi->uvd_power_gated == gate) 767 return; 768 769 pi->uvd_power_gated = gate; 770 771 ci_update_uvd_dpm(rdev, gate); 772 } 773 774 bool ci_dpm_vblank_too_short(struct radeon_device *rdev) 775 { 776 struct ci_power_info *pi = ci_get_pi(rdev); 777 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 778 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 779 780 /* disable mclk switching if the refresh is >120Hz, even if the 781 * blanking period would allow it 782 */ 783 if (r600_dpm_get_vrefresh(rdev) > 120) 784 return true; 785 786 if (vblank_time < switch_limit) 787 return true; 788 else 789 return false; 790 791 } 792 793 static void ci_apply_state_adjust_rules(struct radeon_device *rdev, 794 struct radeon_ps *rps) 795 { 796 struct ci_ps *ps = ci_get_ps(rps); 797 struct ci_power_info *pi = ci_get_pi(rdev); 798 struct radeon_clock_and_voltage_limits *max_limits; 799 bool disable_mclk_switching; 800 u32 sclk, mclk; 801 int i; 802 803 if (rps->vce_active) { 804 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 805 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 806 } else { 807 rps->evclk = 0; 808 rps->ecclk = 0; 809 } 810 811 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 812 ci_dpm_vblank_too_short(rdev)) 813 disable_mclk_switching = true; 814 else 815 disable_mclk_switching = false; 816 817 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 818 pi->battery_state = true; 819 else 820 pi->battery_state = false; 821 822 if (rdev->pm.dpm.ac_power) 823 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 824 else 825 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 826 827 if (rdev->pm.dpm.ac_power == false) { 828 for (i = 0; i < ps->performance_level_count; i++) { 829 if (ps->performance_levels[i].mclk > max_limits->mclk) 830 ps->performance_levels[i].mclk = max_limits->mclk; 831 if (ps->performance_levels[i].sclk > max_limits->sclk) 832 ps->performance_levels[i].sclk = max_limits->sclk; 833 } 834 } 835 836 /* XXX validate the min clocks required for display */ 837 838 if (disable_mclk_switching) { 839 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 840 sclk = ps->performance_levels[0].sclk; 841 } else { 842 mclk = ps->performance_levels[0].mclk; 843 sclk = ps->performance_levels[0].sclk; 844 } 845 846 if (rps->vce_active) { 847 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 848 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 849 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk) 850 mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk; 851 } 852 853 ps->performance_levels[0].sclk = sclk; 854 ps->performance_levels[0].mclk = mclk; 855 856 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk) 857 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk; 858 859 if (disable_mclk_switching) { 860 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk) 861 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk; 862 } else { 863 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk) 864 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk; 865 } 866 } 867 868 static int ci_thermal_set_temperature_range(struct radeon_device *rdev, 869 int min_temp, int max_temp) 870 { 871 int low_temp = 0 * 1000; 872 int high_temp = 255 * 1000; 873 u32 tmp; 874 875 if (low_temp < min_temp) 876 low_temp = min_temp; 877 if (high_temp > max_temp) 878 high_temp = max_temp; 879 if (high_temp < low_temp) { 880 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 881 return -EINVAL; 882 } 883 884 tmp = RREG32_SMC(CG_THERMAL_INT); 885 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK); 886 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) | 887 CI_DIG_THERM_INTL(low_temp / 1000); 888 WREG32_SMC(CG_THERMAL_INT, tmp); 889 890 #if 0 891 /* XXX: need to figure out how to handle this properly */ 892 tmp = RREG32_SMC(CG_THERMAL_CTRL); 893 tmp &= DIG_THERM_DPM_MASK; 894 tmp |= DIG_THERM_DPM(high_temp / 1000); 895 WREG32_SMC(CG_THERMAL_CTRL, tmp); 896 #endif 897 898 rdev->pm.dpm.thermal.min_temp = low_temp; 899 rdev->pm.dpm.thermal.max_temp = high_temp; 900 901 return 0; 902 } 903 904 static int ci_thermal_enable_alert(struct radeon_device *rdev, 905 bool enable) 906 { 907 u32 thermal_int = RREG32_SMC(CG_THERMAL_INT); 908 PPSMC_Result result; 909 910 if (enable) { 911 thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 912 WREG32_SMC(CG_THERMAL_INT, thermal_int); 913 rdev->irq.dpm_thermal = false; 914 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable); 915 if (result != PPSMC_Result_OK) { 916 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 917 return -EINVAL; 918 } 919 } else { 920 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; 921 WREG32_SMC(CG_THERMAL_INT, thermal_int); 922 rdev->irq.dpm_thermal = true; 923 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable); 924 if (result != PPSMC_Result_OK) { 925 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n"); 926 return -EINVAL; 927 } 928 } 929 930 return 0; 931 } 932 933 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode) 934 { 935 struct ci_power_info *pi = ci_get_pi(rdev); 936 u32 tmp; 937 938 if (pi->fan_ctrl_is_in_default_mode) { 939 tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; 940 pi->fan_ctrl_default_mode = tmp; 941 tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; 942 pi->t_min = tmp; 943 pi->fan_ctrl_is_in_default_mode = false; 944 } 945 946 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; 947 tmp |= TMIN(0); 948 WREG32_SMC(CG_FDO_CTRL2, tmp); 949 950 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 951 tmp |= FDO_PWM_MODE(mode); 952 WREG32_SMC(CG_FDO_CTRL2, tmp); 953 } 954 955 static int ci_thermal_setup_fan_table(struct radeon_device *rdev) 956 { 957 struct ci_power_info *pi = ci_get_pi(rdev); 958 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; 959 u32 duty100; 960 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; 961 u16 fdo_min, slope1, slope2; 962 u32 reference_clock, tmp; 963 int ret; 964 u64 tmp64; 965 966 if (!pi->fan_table_start) { 967 rdev->pm.dpm.fan.ucode_fan_control = false; 968 return 0; 969 } 970 971 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 972 973 if (duty100 == 0) { 974 rdev->pm.dpm.fan.ucode_fan_control = false; 975 return 0; 976 } 977 978 tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100; 979 do_div(tmp64, 10000); 980 fdo_min = (u16)tmp64; 981 982 t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min; 983 t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med; 984 985 pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min; 986 pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med; 987 988 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); 989 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); 990 991 fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100); 992 fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100); 993 fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100); 994 995 fan_table.Slope1 = cpu_to_be16(slope1); 996 fan_table.Slope2 = cpu_to_be16(slope2); 997 998 fan_table.FdoMin = cpu_to_be16(fdo_min); 999 1000 fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst); 1001 1002 fan_table.HystUp = cpu_to_be16(1); 1003 1004 fan_table.HystSlope = cpu_to_be16(1); 1005 1006 fan_table.TempRespLim = cpu_to_be16(5); 1007 1008 reference_clock = radeon_get_xclk(rdev); 1009 1010 fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay * 1011 reference_clock) / 1600); 1012 1013 fan_table.FdoMax = cpu_to_be16((u16)duty100); 1014 1015 tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; 1016 fan_table.TempSrc = (uint8_t)tmp; 1017 1018 ret = ci_copy_bytes_to_smc(rdev, 1019 pi->fan_table_start, 1020 (u8 *)(&fan_table), 1021 sizeof(fan_table), 1022 pi->sram_end); 1023 1024 if (ret) { 1025 DRM_ERROR("Failed to load fan table to the SMC."); 1026 rdev->pm.dpm.fan.ucode_fan_control = false; 1027 } 1028 1029 return 0; 1030 } 1031 1032 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev) 1033 { 1034 struct ci_power_info *pi = ci_get_pi(rdev); 1035 PPSMC_Result ret; 1036 1037 if (pi->caps_od_fuzzy_fan_control_support) { 1038 ret = ci_send_msg_to_smc_with_parameter(rdev, 1039 PPSMC_StartFanControl, 1040 FAN_CONTROL_FUZZY); 1041 if (ret != PPSMC_Result_OK) 1042 return -EINVAL; 1043 ret = ci_send_msg_to_smc_with_parameter(rdev, 1044 PPSMC_MSG_SetFanPwmMax, 1045 rdev->pm.dpm.fan.default_max_fan_pwm); 1046 if (ret != PPSMC_Result_OK) 1047 return -EINVAL; 1048 } else { 1049 ret = ci_send_msg_to_smc_with_parameter(rdev, 1050 PPSMC_StartFanControl, 1051 FAN_CONTROL_TABLE); 1052 if (ret != PPSMC_Result_OK) 1053 return -EINVAL; 1054 } 1055 1056 pi->fan_is_controlled_by_smc = true; 1057 return 0; 1058 } 1059 1060 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev) 1061 { 1062 PPSMC_Result ret; 1063 struct ci_power_info *pi = ci_get_pi(rdev); 1064 1065 ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl); 1066 if (ret == PPSMC_Result_OK) { 1067 pi->fan_is_controlled_by_smc = false; 1068 return 0; 1069 } else 1070 return -EINVAL; 1071 } 1072 1073 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, 1074 u32 *speed) 1075 { 1076 u32 duty, duty100; 1077 u64 tmp64; 1078 1079 if (rdev->pm.no_fan) 1080 return -ENOENT; 1081 1082 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 1083 duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; 1084 1085 if (duty100 == 0) 1086 return -EINVAL; 1087 1088 tmp64 = (u64)duty * 100; 1089 do_div(tmp64, duty100); 1090 *speed = (u32)tmp64; 1091 1092 if (*speed > 100) 1093 *speed = 100; 1094 1095 return 0; 1096 } 1097 1098 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, 1099 u32 speed) 1100 { 1101 u32 tmp; 1102 u32 duty, duty100; 1103 u64 tmp64; 1104 struct ci_power_info *pi = ci_get_pi(rdev); 1105 1106 if (rdev->pm.no_fan) 1107 return -ENOENT; 1108 1109 if (pi->fan_is_controlled_by_smc) 1110 return -EINVAL; 1111 1112 if (speed > 100) 1113 return -EINVAL; 1114 1115 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 1116 1117 if (duty100 == 0) 1118 return -EINVAL; 1119 1120 tmp64 = (u64)speed * duty100; 1121 do_div(tmp64, 100); 1122 duty = (u32)tmp64; 1123 1124 tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; 1125 tmp |= FDO_STATIC_DUTY(duty); 1126 WREG32_SMC(CG_FDO_CTRL0, tmp); 1127 1128 return 0; 1129 } 1130 1131 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode) 1132 { 1133 if (mode) { 1134 /* stop auto-manage */ 1135 if (rdev->pm.dpm.fan.ucode_fan_control) 1136 ci_fan_ctrl_stop_smc_fan_control(rdev); 1137 ci_fan_ctrl_set_static_mode(rdev, mode); 1138 } else { 1139 /* restart auto-manage */ 1140 if (rdev->pm.dpm.fan.ucode_fan_control) 1141 ci_thermal_start_smc_fan_control(rdev); 1142 else 1143 ci_fan_ctrl_set_default_mode(rdev); 1144 } 1145 } 1146 1147 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev) 1148 { 1149 struct ci_power_info *pi = ci_get_pi(rdev); 1150 u32 tmp; 1151 1152 if (pi->fan_is_controlled_by_smc) 1153 return 0; 1154 1155 tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; 1156 return (tmp >> FDO_PWM_MODE_SHIFT); 1157 } 1158 1159 #if 0 1160 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev, 1161 u32 *speed) 1162 { 1163 u32 tach_period; 1164 u32 xclk = radeon_get_xclk(rdev); 1165 1166 if (rdev->pm.no_fan) 1167 return -ENOENT; 1168 1169 if (rdev->pm.fan_pulses_per_revolution == 0) 1170 return -ENOENT; 1171 1172 tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; 1173 if (tach_period == 0) 1174 return -ENOENT; 1175 1176 *speed = 60 * xclk * 10000 / tach_period; 1177 1178 return 0; 1179 } 1180 1181 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev, 1182 u32 speed) 1183 { 1184 u32 tach_period, tmp; 1185 u32 xclk = radeon_get_xclk(rdev); 1186 1187 if (rdev->pm.no_fan) 1188 return -ENOENT; 1189 1190 if (rdev->pm.fan_pulses_per_revolution == 0) 1191 return -ENOENT; 1192 1193 if ((speed < rdev->pm.fan_min_rpm) || 1194 (speed > rdev->pm.fan_max_rpm)) 1195 return -EINVAL; 1196 1197 if (rdev->pm.dpm.fan.ucode_fan_control) 1198 ci_fan_ctrl_stop_smc_fan_control(rdev); 1199 1200 tach_period = 60 * xclk * 10000 / (8 * speed); 1201 tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; 1202 tmp |= TARGET_PERIOD(tach_period); 1203 WREG32_SMC(CG_TACH_CTRL, tmp); 1204 1205 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM); 1206 1207 return 0; 1208 } 1209 #endif 1210 1211 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev) 1212 { 1213 struct ci_power_info *pi = ci_get_pi(rdev); 1214 u32 tmp; 1215 1216 if (!pi->fan_ctrl_is_in_default_mode) { 1217 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 1218 tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode); 1219 WREG32_SMC(CG_FDO_CTRL2, tmp); 1220 1221 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; 1222 tmp |= TMIN(pi->t_min); 1223 WREG32_SMC(CG_FDO_CTRL2, tmp); 1224 pi->fan_ctrl_is_in_default_mode = true; 1225 } 1226 } 1227 1228 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev) 1229 { 1230 if (rdev->pm.dpm.fan.ucode_fan_control) { 1231 ci_fan_ctrl_start_smc_fan_control(rdev); 1232 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); 1233 } 1234 } 1235 1236 static void ci_thermal_initialize(struct radeon_device *rdev) 1237 { 1238 u32 tmp; 1239 1240 if (rdev->pm.fan_pulses_per_revolution) { 1241 tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; 1242 tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1); 1243 WREG32_SMC(CG_TACH_CTRL, tmp); 1244 } 1245 1246 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; 1247 tmp |= TACH_PWM_RESP_RATE(0x28); 1248 WREG32_SMC(CG_FDO_CTRL2, tmp); 1249 } 1250 1251 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev) 1252 { 1253 int ret; 1254 1255 ci_thermal_initialize(rdev); 1256 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1257 if (ret) 1258 return ret; 1259 ret = ci_thermal_enable_alert(rdev, true); 1260 if (ret) 1261 return ret; 1262 if (rdev->pm.dpm.fan.ucode_fan_control) { 1263 ret = ci_thermal_setup_fan_table(rdev); 1264 if (ret) 1265 return ret; 1266 ci_thermal_start_smc_fan_control(rdev); 1267 } 1268 1269 return 0; 1270 } 1271 1272 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev) 1273 { 1274 if (!rdev->pm.no_fan) 1275 ci_fan_ctrl_set_default_mode(rdev); 1276 } 1277 1278 #if 0 1279 static int ci_read_smc_soft_register(struct radeon_device *rdev, 1280 u16 reg_offset, u32 *value) 1281 { 1282 struct ci_power_info *pi = ci_get_pi(rdev); 1283 1284 return ci_read_smc_sram_dword(rdev, 1285 pi->soft_regs_start + reg_offset, 1286 value, pi->sram_end); 1287 } 1288 #endif 1289 1290 static int ci_write_smc_soft_register(struct radeon_device *rdev, 1291 u16 reg_offset, u32 value) 1292 { 1293 struct ci_power_info *pi = ci_get_pi(rdev); 1294 1295 return ci_write_smc_sram_dword(rdev, 1296 pi->soft_regs_start + reg_offset, 1297 value, pi->sram_end); 1298 } 1299 1300 static void ci_init_fps_limits(struct radeon_device *rdev) 1301 { 1302 struct ci_power_info *pi = ci_get_pi(rdev); 1303 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 1304 1305 if (pi->caps_fps) { 1306 u16 tmp; 1307 1308 tmp = 45; 1309 table->FpsHighT = cpu_to_be16(tmp); 1310 1311 tmp = 30; 1312 table->FpsLowT = cpu_to_be16(tmp); 1313 } 1314 } 1315 1316 static int ci_update_sclk_t(struct radeon_device *rdev) 1317 { 1318 struct ci_power_info *pi = ci_get_pi(rdev); 1319 int ret = 0; 1320 u32 low_sclk_interrupt_t = 0; 1321 1322 if (pi->caps_sclk_throttle_low_notification) { 1323 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 1324 1325 ret = ci_copy_bytes_to_smc(rdev, 1326 pi->dpm_table_start + 1327 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), 1328 (u8 *)&low_sclk_interrupt_t, 1329 sizeof(u32), pi->sram_end); 1330 1331 } 1332 1333 return ret; 1334 } 1335 1336 static void ci_get_leakage_voltages(struct radeon_device *rdev) 1337 { 1338 struct ci_power_info *pi = ci_get_pi(rdev); 1339 u16 leakage_id, virtual_voltage_id; 1340 u16 vddc, vddci; 1341 int i; 1342 1343 pi->vddc_leakage.count = 0; 1344 pi->vddci_leakage.count = 0; 1345 1346 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 1347 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1348 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1349 if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0) 1350 continue; 1351 if (vddc != 0 && vddc != virtual_voltage_id) { 1352 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1353 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1354 pi->vddc_leakage.count++; 1355 } 1356 } 1357 } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { 1358 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1359 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1360 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci, 1361 virtual_voltage_id, 1362 leakage_id) == 0) { 1363 if (vddc != 0 && vddc != virtual_voltage_id) { 1364 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1365 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1366 pi->vddc_leakage.count++; 1367 } 1368 if (vddci != 0 && vddci != virtual_voltage_id) { 1369 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci; 1370 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id; 1371 pi->vddci_leakage.count++; 1372 } 1373 } 1374 } 1375 } 1376 } 1377 1378 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources) 1379 { 1380 struct ci_power_info *pi = ci_get_pi(rdev); 1381 bool want_thermal_protection; 1382 enum radeon_dpm_event_src dpm_event_src; 1383 u32 tmp; 1384 1385 switch (sources) { 1386 case 0: 1387 default: 1388 want_thermal_protection = false; 1389 break; 1390 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): 1391 want_thermal_protection = true; 1392 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; 1393 break; 1394 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): 1395 want_thermal_protection = true; 1396 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; 1397 break; 1398 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | 1399 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): 1400 want_thermal_protection = true; 1401 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; 1402 break; 1403 } 1404 1405 if (want_thermal_protection) { 1406 #if 0 1407 /* XXX: need to figure out how to handle this properly */ 1408 tmp = RREG32_SMC(CG_THERMAL_CTRL); 1409 tmp &= DPM_EVENT_SRC_MASK; 1410 tmp |= DPM_EVENT_SRC(dpm_event_src); 1411 WREG32_SMC(CG_THERMAL_CTRL, tmp); 1412 #endif 1413 1414 tmp = RREG32_SMC(GENERAL_PWRMGT); 1415 if (pi->thermal_protection) 1416 tmp &= ~THERMAL_PROTECTION_DIS; 1417 else 1418 tmp |= THERMAL_PROTECTION_DIS; 1419 WREG32_SMC(GENERAL_PWRMGT, tmp); 1420 } else { 1421 tmp = RREG32_SMC(GENERAL_PWRMGT); 1422 tmp |= THERMAL_PROTECTION_DIS; 1423 WREG32_SMC(GENERAL_PWRMGT, tmp); 1424 } 1425 } 1426 1427 static void ci_enable_auto_throttle_source(struct radeon_device *rdev, 1428 enum radeon_dpm_auto_throttle_src source, 1429 bool enable) 1430 { 1431 struct ci_power_info *pi = ci_get_pi(rdev); 1432 1433 if (enable) { 1434 if (!(pi->active_auto_throttle_sources & (1 << source))) { 1435 pi->active_auto_throttle_sources |= 1 << source; 1436 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1437 } 1438 } else { 1439 if (pi->active_auto_throttle_sources & (1 << source)) { 1440 pi->active_auto_throttle_sources &= ~(1 << source); 1441 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1442 } 1443 } 1444 } 1445 1446 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev) 1447 { 1448 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) 1449 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt); 1450 } 1451 1452 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev) 1453 { 1454 struct ci_power_info *pi = ci_get_pi(rdev); 1455 PPSMC_Result smc_result; 1456 1457 if (!pi->need_update_smu7_dpm_table) 1458 return 0; 1459 1460 if ((!pi->sclk_dpm_key_disabled) && 1461 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1462 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 1463 if (smc_result != PPSMC_Result_OK) 1464 return -EINVAL; 1465 } 1466 1467 if ((!pi->mclk_dpm_key_disabled) && 1468 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1469 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 1470 if (smc_result != PPSMC_Result_OK) 1471 return -EINVAL; 1472 } 1473 1474 pi->need_update_smu7_dpm_table = 0; 1475 return 0; 1476 } 1477 1478 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable) 1479 { 1480 struct ci_power_info *pi = ci_get_pi(rdev); 1481 PPSMC_Result smc_result; 1482 1483 if (enable) { 1484 if (!pi->sclk_dpm_key_disabled) { 1485 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable); 1486 if (smc_result != PPSMC_Result_OK) 1487 return -EINVAL; 1488 } 1489 1490 if (!pi->mclk_dpm_key_disabled) { 1491 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable); 1492 if (smc_result != PPSMC_Result_OK) 1493 return -EINVAL; 1494 1495 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN); 1496 1497 WREG32_SMC(LCAC_MC0_CNTL, 0x05); 1498 WREG32_SMC(LCAC_MC1_CNTL, 0x05); 1499 WREG32_SMC(LCAC_CPL_CNTL, 0x100005); 1500 1501 udelay(10); 1502 1503 WREG32_SMC(LCAC_MC0_CNTL, 0x400005); 1504 WREG32_SMC(LCAC_MC1_CNTL, 0x400005); 1505 WREG32_SMC(LCAC_CPL_CNTL, 0x500005); 1506 } 1507 } else { 1508 if (!pi->sclk_dpm_key_disabled) { 1509 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable); 1510 if (smc_result != PPSMC_Result_OK) 1511 return -EINVAL; 1512 } 1513 1514 if (!pi->mclk_dpm_key_disabled) { 1515 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable); 1516 if (smc_result != PPSMC_Result_OK) 1517 return -EINVAL; 1518 } 1519 } 1520 1521 return 0; 1522 } 1523 1524 static int ci_start_dpm(struct radeon_device *rdev) 1525 { 1526 struct ci_power_info *pi = ci_get_pi(rdev); 1527 PPSMC_Result smc_result; 1528 int ret; 1529 u32 tmp; 1530 1531 tmp = RREG32_SMC(GENERAL_PWRMGT); 1532 tmp |= GLOBAL_PWRMGT_EN; 1533 WREG32_SMC(GENERAL_PWRMGT, tmp); 1534 1535 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1536 tmp |= DYNAMIC_PM_EN; 1537 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1538 1539 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000); 1540 1541 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN); 1542 1543 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable); 1544 if (smc_result != PPSMC_Result_OK) 1545 return -EINVAL; 1546 1547 ret = ci_enable_sclk_mclk_dpm(rdev, true); 1548 if (ret) 1549 return ret; 1550 1551 if (!pi->pcie_dpm_key_disabled) { 1552 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable); 1553 if (smc_result != PPSMC_Result_OK) 1554 return -EINVAL; 1555 } 1556 1557 return 0; 1558 } 1559 1560 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev) 1561 { 1562 struct ci_power_info *pi = ci_get_pi(rdev); 1563 PPSMC_Result smc_result; 1564 1565 if (!pi->need_update_smu7_dpm_table) 1566 return 0; 1567 1568 if ((!pi->sclk_dpm_key_disabled) && 1569 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1570 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel); 1571 if (smc_result != PPSMC_Result_OK) 1572 return -EINVAL; 1573 } 1574 1575 if ((!pi->mclk_dpm_key_disabled) && 1576 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1577 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel); 1578 if (smc_result != PPSMC_Result_OK) 1579 return -EINVAL; 1580 } 1581 1582 return 0; 1583 } 1584 1585 static int ci_stop_dpm(struct radeon_device *rdev) 1586 { 1587 struct ci_power_info *pi = ci_get_pi(rdev); 1588 PPSMC_Result smc_result; 1589 int ret; 1590 u32 tmp; 1591 1592 tmp = RREG32_SMC(GENERAL_PWRMGT); 1593 tmp &= ~GLOBAL_PWRMGT_EN; 1594 WREG32_SMC(GENERAL_PWRMGT, tmp); 1595 1596 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1597 tmp &= ~DYNAMIC_PM_EN; 1598 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1599 1600 if (!pi->pcie_dpm_key_disabled) { 1601 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable); 1602 if (smc_result != PPSMC_Result_OK) 1603 return -EINVAL; 1604 } 1605 1606 ret = ci_enable_sclk_mclk_dpm(rdev, false); 1607 if (ret) 1608 return ret; 1609 1610 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable); 1611 if (smc_result != PPSMC_Result_OK) 1612 return -EINVAL; 1613 1614 return 0; 1615 } 1616 1617 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable) 1618 { 1619 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1620 1621 if (enable) 1622 tmp &= ~SCLK_PWRMGT_OFF; 1623 else 1624 tmp |= SCLK_PWRMGT_OFF; 1625 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1626 } 1627 1628 #if 0 1629 static int ci_notify_hw_of_power_source(struct radeon_device *rdev, 1630 bool ac_power) 1631 { 1632 struct ci_power_info *pi = ci_get_pi(rdev); 1633 struct radeon_cac_tdp_table *cac_tdp_table = 1634 rdev->pm.dpm.dyn_state.cac_tdp_table; 1635 u32 power_limit; 1636 1637 if (ac_power) 1638 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 1639 else 1640 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); 1641 1642 ci_set_power_limit(rdev, power_limit); 1643 1644 if (pi->caps_automatic_dc_transition) { 1645 if (ac_power) 1646 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC); 1647 else 1648 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp); 1649 } 1650 1651 return 0; 1652 } 1653 #endif 1654 1655 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg) 1656 { 1657 u32 tmp; 1658 int i; 1659 1660 if (!ci_is_smc_running(rdev)) 1661 return PPSMC_Result_Failed; 1662 1663 WREG32(SMC_MESSAGE_0, msg); 1664 1665 for (i = 0; i < rdev->usec_timeout; i++) { 1666 tmp = RREG32(SMC_RESP_0); 1667 if (tmp != 0) 1668 break; 1669 udelay(1); 1670 } 1671 tmp = RREG32(SMC_RESP_0); 1672 1673 return (PPSMC_Result)tmp; 1674 } 1675 1676 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 1677 PPSMC_Msg msg, u32 parameter) 1678 { 1679 WREG32(SMC_MSG_ARG_0, parameter); 1680 return ci_send_msg_to_smc(rdev, msg); 1681 } 1682 1683 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev, 1684 PPSMC_Msg msg, u32 *parameter) 1685 { 1686 PPSMC_Result smc_result; 1687 1688 smc_result = ci_send_msg_to_smc(rdev, msg); 1689 1690 if ((smc_result == PPSMC_Result_OK) && parameter) 1691 *parameter = RREG32(SMC_MSG_ARG_0); 1692 1693 return smc_result; 1694 } 1695 1696 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n) 1697 { 1698 struct ci_power_info *pi = ci_get_pi(rdev); 1699 1700 if (!pi->sclk_dpm_key_disabled) { 1701 PPSMC_Result smc_result = 1702 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); 1703 if (smc_result != PPSMC_Result_OK) 1704 return -EINVAL; 1705 } 1706 1707 return 0; 1708 } 1709 1710 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n) 1711 { 1712 struct ci_power_info *pi = ci_get_pi(rdev); 1713 1714 if (!pi->mclk_dpm_key_disabled) { 1715 PPSMC_Result smc_result = 1716 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); 1717 if (smc_result != PPSMC_Result_OK) 1718 return -EINVAL; 1719 } 1720 1721 return 0; 1722 } 1723 1724 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n) 1725 { 1726 struct ci_power_info *pi = ci_get_pi(rdev); 1727 1728 if (!pi->pcie_dpm_key_disabled) { 1729 PPSMC_Result smc_result = 1730 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n); 1731 if (smc_result != PPSMC_Result_OK) 1732 return -EINVAL; 1733 } 1734 1735 return 0; 1736 } 1737 1738 static int ci_set_power_limit(struct radeon_device *rdev, u32 n) 1739 { 1740 struct ci_power_info *pi = ci_get_pi(rdev); 1741 1742 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { 1743 PPSMC_Result smc_result = 1744 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n); 1745 if (smc_result != PPSMC_Result_OK) 1746 return -EINVAL; 1747 } 1748 1749 return 0; 1750 } 1751 1752 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 1753 u32 target_tdp) 1754 { 1755 PPSMC_Result smc_result = 1756 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); 1757 if (smc_result != PPSMC_Result_OK) 1758 return -EINVAL; 1759 return 0; 1760 } 1761 1762 #if 0 1763 static int ci_set_boot_state(struct radeon_device *rdev) 1764 { 1765 return ci_enable_sclk_mclk_dpm(rdev, false); 1766 } 1767 #endif 1768 1769 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev) 1770 { 1771 u32 sclk_freq; 1772 PPSMC_Result smc_result = 1773 ci_send_msg_to_smc_return_parameter(rdev, 1774 PPSMC_MSG_API_GetSclkFrequency, 1775 &sclk_freq); 1776 if (smc_result != PPSMC_Result_OK) 1777 sclk_freq = 0; 1778 1779 return sclk_freq; 1780 } 1781 1782 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev) 1783 { 1784 u32 mclk_freq; 1785 PPSMC_Result smc_result = 1786 ci_send_msg_to_smc_return_parameter(rdev, 1787 PPSMC_MSG_API_GetMclkFrequency, 1788 &mclk_freq); 1789 if (smc_result != PPSMC_Result_OK) 1790 mclk_freq = 0; 1791 1792 return mclk_freq; 1793 } 1794 1795 static void ci_dpm_start_smc(struct radeon_device *rdev) 1796 { 1797 int i; 1798 1799 ci_program_jump_on_start(rdev); 1800 ci_start_smc_clock(rdev); 1801 ci_start_smc(rdev); 1802 for (i = 0; i < rdev->usec_timeout; i++) { 1803 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED) 1804 break; 1805 } 1806 } 1807 1808 static void ci_dpm_stop_smc(struct radeon_device *rdev) 1809 { 1810 ci_reset_smc(rdev); 1811 ci_stop_smc_clock(rdev); 1812 } 1813 1814 static int ci_process_firmware_header(struct radeon_device *rdev) 1815 { 1816 struct ci_power_info *pi = ci_get_pi(rdev); 1817 u32 tmp; 1818 int ret; 1819 1820 ret = ci_read_smc_sram_dword(rdev, 1821 SMU7_FIRMWARE_HEADER_LOCATION + 1822 offsetof(SMU7_Firmware_Header, DpmTable), 1823 &tmp, pi->sram_end); 1824 if (ret) 1825 return ret; 1826 1827 pi->dpm_table_start = tmp; 1828 1829 ret = ci_read_smc_sram_dword(rdev, 1830 SMU7_FIRMWARE_HEADER_LOCATION + 1831 offsetof(SMU7_Firmware_Header, SoftRegisters), 1832 &tmp, pi->sram_end); 1833 if (ret) 1834 return ret; 1835 1836 pi->soft_regs_start = tmp; 1837 1838 ret = ci_read_smc_sram_dword(rdev, 1839 SMU7_FIRMWARE_HEADER_LOCATION + 1840 offsetof(SMU7_Firmware_Header, mcRegisterTable), 1841 &tmp, pi->sram_end); 1842 if (ret) 1843 return ret; 1844 1845 pi->mc_reg_table_start = tmp; 1846 1847 ret = ci_read_smc_sram_dword(rdev, 1848 SMU7_FIRMWARE_HEADER_LOCATION + 1849 offsetof(SMU7_Firmware_Header, FanTable), 1850 &tmp, pi->sram_end); 1851 if (ret) 1852 return ret; 1853 1854 pi->fan_table_start = tmp; 1855 1856 ret = ci_read_smc_sram_dword(rdev, 1857 SMU7_FIRMWARE_HEADER_LOCATION + 1858 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), 1859 &tmp, pi->sram_end); 1860 if (ret) 1861 return ret; 1862 1863 pi->arb_table_start = tmp; 1864 1865 return 0; 1866 } 1867 1868 static void ci_read_clock_registers(struct radeon_device *rdev) 1869 { 1870 struct ci_power_info *pi = ci_get_pi(rdev); 1871 1872 pi->clock_registers.cg_spll_func_cntl = 1873 RREG32_SMC(CG_SPLL_FUNC_CNTL); 1874 pi->clock_registers.cg_spll_func_cntl_2 = 1875 RREG32_SMC(CG_SPLL_FUNC_CNTL_2); 1876 pi->clock_registers.cg_spll_func_cntl_3 = 1877 RREG32_SMC(CG_SPLL_FUNC_CNTL_3); 1878 pi->clock_registers.cg_spll_func_cntl_4 = 1879 RREG32_SMC(CG_SPLL_FUNC_CNTL_4); 1880 pi->clock_registers.cg_spll_spread_spectrum = 1881 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 1882 pi->clock_registers.cg_spll_spread_spectrum_2 = 1883 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2); 1884 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); 1885 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); 1886 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); 1887 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); 1888 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); 1889 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); 1890 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); 1891 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); 1892 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); 1893 } 1894 1895 static void ci_init_sclk_t(struct radeon_device *rdev) 1896 { 1897 struct ci_power_info *pi = ci_get_pi(rdev); 1898 1899 pi->low_sclk_interrupt_t = 0; 1900 } 1901 1902 static void ci_enable_thermal_protection(struct radeon_device *rdev, 1903 bool enable) 1904 { 1905 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1906 1907 if (enable) 1908 tmp &= ~THERMAL_PROTECTION_DIS; 1909 else 1910 tmp |= THERMAL_PROTECTION_DIS; 1911 WREG32_SMC(GENERAL_PWRMGT, tmp); 1912 } 1913 1914 static void ci_enable_acpi_power_management(struct radeon_device *rdev) 1915 { 1916 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1917 1918 tmp |= STATIC_PM_EN; 1919 1920 WREG32_SMC(GENERAL_PWRMGT, tmp); 1921 } 1922 1923 #if 0 1924 static int ci_enter_ulp_state(struct radeon_device *rdev) 1925 { 1926 1927 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); 1928 1929 udelay(25000); 1930 1931 return 0; 1932 } 1933 1934 static int ci_exit_ulp_state(struct radeon_device *rdev) 1935 { 1936 int i; 1937 1938 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); 1939 1940 udelay(7000); 1941 1942 for (i = 0; i < rdev->usec_timeout; i++) { 1943 if (RREG32(SMC_RESP_0) == 1) 1944 break; 1945 udelay(1000); 1946 } 1947 1948 return 0; 1949 } 1950 #endif 1951 1952 static int ci_notify_smc_display_change(struct radeon_device *rdev, 1953 bool has_display) 1954 { 1955 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; 1956 1957 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; 1958 } 1959 1960 static int ci_enable_ds_master_switch(struct radeon_device *rdev, 1961 bool enable) 1962 { 1963 struct ci_power_info *pi = ci_get_pi(rdev); 1964 1965 if (enable) { 1966 if (pi->caps_sclk_ds) { 1967 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK) 1968 return -EINVAL; 1969 } else { 1970 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1971 return -EINVAL; 1972 } 1973 } else { 1974 if (pi->caps_sclk_ds) { 1975 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1976 return -EINVAL; 1977 } 1978 } 1979 1980 return 0; 1981 } 1982 1983 static void ci_program_display_gap(struct radeon_device *rdev) 1984 { 1985 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 1986 u32 pre_vbi_time_in_us; 1987 u32 frame_time_in_us; 1988 u32 ref_clock = rdev->clock.spll.reference_freq; 1989 u32 refresh_rate = r600_dpm_get_vrefresh(rdev); 1990 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 1991 1992 tmp &= ~DISP_GAP_MASK; 1993 if (rdev->pm.dpm.new_active_crtc_count > 0) 1994 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); 1995 else 1996 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE); 1997 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 1998 1999 if (refresh_rate == 0) 2000 refresh_rate = 60; 2001 if (vblank_time == 0xffffffff) 2002 vblank_time = 500; 2003 frame_time_in_us = 1000000 / refresh_rate; 2004 pre_vbi_time_in_us = 2005 frame_time_in_us - 200 - vblank_time; 2006 tmp = pre_vbi_time_in_us * (ref_clock / 100); 2007 2008 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp); 2009 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64); 2010 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); 2011 2012 2013 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1)); 2014 2015 } 2016 2017 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable) 2018 { 2019 struct ci_power_info *pi = ci_get_pi(rdev); 2020 u32 tmp; 2021 2022 if (enable) { 2023 if (pi->caps_sclk_ss_support) { 2024 tmp = RREG32_SMC(GENERAL_PWRMGT); 2025 tmp |= DYN_SPREAD_SPECTRUM_EN; 2026 WREG32_SMC(GENERAL_PWRMGT, tmp); 2027 } 2028 } else { 2029 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 2030 tmp &= ~SSEN; 2031 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp); 2032 2033 tmp = RREG32_SMC(GENERAL_PWRMGT); 2034 tmp &= ~DYN_SPREAD_SPECTRUM_EN; 2035 WREG32_SMC(GENERAL_PWRMGT, tmp); 2036 } 2037 } 2038 2039 static void ci_program_sstp(struct radeon_device *rdev) 2040 { 2041 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); 2042 } 2043 2044 static void ci_enable_display_gap(struct radeon_device *rdev) 2045 { 2046 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 2047 2048 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK); 2049 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) | 2050 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK)); 2051 2052 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 2053 } 2054 2055 static void ci_program_vc(struct radeon_device *rdev) 2056 { 2057 u32 tmp; 2058 2059 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 2060 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 2061 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 2062 2063 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0); 2064 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1); 2065 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2); 2066 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3); 2067 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4); 2068 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5); 2069 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6); 2070 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7); 2071 } 2072 2073 static void ci_clear_vc(struct radeon_device *rdev) 2074 { 2075 u32 tmp; 2076 2077 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 2078 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 2079 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 2080 2081 WREG32_SMC(CG_FTV_0, 0); 2082 WREG32_SMC(CG_FTV_1, 0); 2083 WREG32_SMC(CG_FTV_2, 0); 2084 WREG32_SMC(CG_FTV_3, 0); 2085 WREG32_SMC(CG_FTV_4, 0); 2086 WREG32_SMC(CG_FTV_5, 0); 2087 WREG32_SMC(CG_FTV_6, 0); 2088 WREG32_SMC(CG_FTV_7, 0); 2089 } 2090 2091 static int ci_upload_firmware(struct radeon_device *rdev) 2092 { 2093 struct ci_power_info *pi = ci_get_pi(rdev); 2094 int i, ret; 2095 2096 for (i = 0; i < rdev->usec_timeout; i++) { 2097 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE) 2098 break; 2099 } 2100 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1); 2101 2102 ci_stop_smc_clock(rdev); 2103 ci_reset_smc(rdev); 2104 2105 ret = ci_load_smc_ucode(rdev, pi->sram_end); 2106 2107 return ret; 2108 2109 } 2110 2111 static int ci_get_svi2_voltage_table(struct radeon_device *rdev, 2112 struct radeon_clock_voltage_dependency_table *voltage_dependency_table, 2113 struct atom_voltage_table *voltage_table) 2114 { 2115 u32 i; 2116 2117 if (voltage_dependency_table == NULL) 2118 return -EINVAL; 2119 2120 voltage_table->mask_low = 0; 2121 voltage_table->phase_delay = 0; 2122 2123 voltage_table->count = voltage_dependency_table->count; 2124 for (i = 0; i < voltage_table->count; i++) { 2125 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; 2126 voltage_table->entries[i].smio_low = 0; 2127 } 2128 2129 return 0; 2130 } 2131 2132 static int ci_construct_voltage_tables(struct radeon_device *rdev) 2133 { 2134 struct ci_power_info *pi = ci_get_pi(rdev); 2135 int ret; 2136 2137 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2138 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC, 2139 VOLTAGE_OBJ_GPIO_LUT, 2140 &pi->vddc_voltage_table); 2141 if (ret) 2142 return ret; 2143 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2144 ret = ci_get_svi2_voltage_table(rdev, 2145 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2146 &pi->vddc_voltage_table); 2147 if (ret) 2148 return ret; 2149 } 2150 2151 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC) 2152 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC, 2153 &pi->vddc_voltage_table); 2154 2155 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2156 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, 2157 VOLTAGE_OBJ_GPIO_LUT, 2158 &pi->vddci_voltage_table); 2159 if (ret) 2160 return ret; 2161 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2162 ret = ci_get_svi2_voltage_table(rdev, 2163 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2164 &pi->vddci_voltage_table); 2165 if (ret) 2166 return ret; 2167 } 2168 2169 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI) 2170 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI, 2171 &pi->vddci_voltage_table); 2172 2173 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2174 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC, 2175 VOLTAGE_OBJ_GPIO_LUT, 2176 &pi->mvdd_voltage_table); 2177 if (ret) 2178 return ret; 2179 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2180 ret = ci_get_svi2_voltage_table(rdev, 2181 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2182 &pi->mvdd_voltage_table); 2183 if (ret) 2184 return ret; 2185 } 2186 2187 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD) 2188 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD, 2189 &pi->mvdd_voltage_table); 2190 2191 return 0; 2192 } 2193 2194 static void ci_populate_smc_voltage_table(struct radeon_device *rdev, 2195 struct atom_voltage_table_entry *voltage_table, 2196 SMU7_Discrete_VoltageLevel *smc_voltage_table) 2197 { 2198 int ret; 2199 2200 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table, 2201 &smc_voltage_table->StdVoltageHiSidd, 2202 &smc_voltage_table->StdVoltageLoSidd); 2203 2204 if (ret) { 2205 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE; 2206 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE; 2207 } 2208 2209 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE); 2210 smc_voltage_table->StdVoltageHiSidd = 2211 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd); 2212 smc_voltage_table->StdVoltageLoSidd = 2213 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd); 2214 } 2215 2216 static int ci_populate_smc_vddc_table(struct radeon_device *rdev, 2217 SMU7_Discrete_DpmTable *table) 2218 { 2219 struct ci_power_info *pi = ci_get_pi(rdev); 2220 unsigned int count; 2221 2222 table->VddcLevelCount = pi->vddc_voltage_table.count; 2223 for (count = 0; count < table->VddcLevelCount; count++) { 2224 ci_populate_smc_voltage_table(rdev, 2225 &pi->vddc_voltage_table.entries[count], 2226 &table->VddcLevel[count]); 2227 2228 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2229 table->VddcLevel[count].Smio |= 2230 pi->vddc_voltage_table.entries[count].smio_low; 2231 else 2232 table->VddcLevel[count].Smio = 0; 2233 } 2234 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); 2235 2236 return 0; 2237 } 2238 2239 static int ci_populate_smc_vddci_table(struct radeon_device *rdev, 2240 SMU7_Discrete_DpmTable *table) 2241 { 2242 unsigned int count; 2243 struct ci_power_info *pi = ci_get_pi(rdev); 2244 2245 table->VddciLevelCount = pi->vddci_voltage_table.count; 2246 for (count = 0; count < table->VddciLevelCount; count++) { 2247 ci_populate_smc_voltage_table(rdev, 2248 &pi->vddci_voltage_table.entries[count], 2249 &table->VddciLevel[count]); 2250 2251 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2252 table->VddciLevel[count].Smio |= 2253 pi->vddci_voltage_table.entries[count].smio_low; 2254 else 2255 table->VddciLevel[count].Smio = 0; 2256 } 2257 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); 2258 2259 return 0; 2260 } 2261 2262 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev, 2263 SMU7_Discrete_DpmTable *table) 2264 { 2265 struct ci_power_info *pi = ci_get_pi(rdev); 2266 unsigned int count; 2267 2268 table->MvddLevelCount = pi->mvdd_voltage_table.count; 2269 for (count = 0; count < table->MvddLevelCount; count++) { 2270 ci_populate_smc_voltage_table(rdev, 2271 &pi->mvdd_voltage_table.entries[count], 2272 &table->MvddLevel[count]); 2273 2274 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2275 table->MvddLevel[count].Smio |= 2276 pi->mvdd_voltage_table.entries[count].smio_low; 2277 else 2278 table->MvddLevel[count].Smio = 0; 2279 } 2280 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); 2281 2282 return 0; 2283 } 2284 2285 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev, 2286 SMU7_Discrete_DpmTable *table) 2287 { 2288 int ret; 2289 2290 ret = ci_populate_smc_vddc_table(rdev, table); 2291 if (ret) 2292 return ret; 2293 2294 ret = ci_populate_smc_vddci_table(rdev, table); 2295 if (ret) 2296 return ret; 2297 2298 ret = ci_populate_smc_mvdd_table(rdev, table); 2299 if (ret) 2300 return ret; 2301 2302 return 0; 2303 } 2304 2305 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk, 2306 SMU7_Discrete_VoltageLevel *voltage) 2307 { 2308 struct ci_power_info *pi = ci_get_pi(rdev); 2309 u32 i = 0; 2310 2311 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 2312 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) { 2313 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { 2314 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; 2315 break; 2316 } 2317 } 2318 2319 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count) 2320 return -EINVAL; 2321 } 2322 2323 return -EINVAL; 2324 } 2325 2326 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 2327 struct atom_voltage_table_entry *voltage_table, 2328 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd) 2329 { 2330 u16 v_index, idx; 2331 bool voltage_found = false; 2332 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE; 2333 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE; 2334 2335 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) 2336 return -EINVAL; 2337 2338 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 2339 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2340 if (voltage_table->value == 2341 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2342 voltage_found = true; 2343 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 2344 idx = v_index; 2345 else 2346 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2347 *std_voltage_lo_sidd = 2348 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2349 *std_voltage_hi_sidd = 2350 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2351 break; 2352 } 2353 } 2354 2355 if (!voltage_found) { 2356 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2357 if (voltage_table->value <= 2358 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2359 voltage_found = true; 2360 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 2361 idx = v_index; 2362 else 2363 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2364 *std_voltage_lo_sidd = 2365 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2366 *std_voltage_hi_sidd = 2367 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2368 break; 2369 } 2370 } 2371 } 2372 } 2373 2374 return 0; 2375 } 2376 2377 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev, 2378 const struct radeon_phase_shedding_limits_table *limits, 2379 u32 sclk, 2380 u32 *phase_shedding) 2381 { 2382 unsigned int i; 2383 2384 *phase_shedding = 1; 2385 2386 for (i = 0; i < limits->count; i++) { 2387 if (sclk < limits->entries[i].sclk) { 2388 *phase_shedding = i; 2389 break; 2390 } 2391 } 2392 } 2393 2394 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev, 2395 const struct radeon_phase_shedding_limits_table *limits, 2396 u32 mclk, 2397 u32 *phase_shedding) 2398 { 2399 unsigned int i; 2400 2401 *phase_shedding = 1; 2402 2403 for (i = 0; i < limits->count; i++) { 2404 if (mclk < limits->entries[i].mclk) { 2405 *phase_shedding = i; 2406 break; 2407 } 2408 } 2409 } 2410 2411 static int ci_init_arb_table_index(struct radeon_device *rdev) 2412 { 2413 struct ci_power_info *pi = ci_get_pi(rdev); 2414 u32 tmp; 2415 int ret; 2416 2417 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start, 2418 &tmp, pi->sram_end); 2419 if (ret) 2420 return ret; 2421 2422 tmp &= 0x00FFFFFF; 2423 tmp |= MC_CG_ARB_FREQ_F1 << 24; 2424 2425 return ci_write_smc_sram_dword(rdev, pi->arb_table_start, 2426 tmp, pi->sram_end); 2427 } 2428 2429 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev, 2430 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table, 2431 u32 clock, u32 *voltage) 2432 { 2433 u32 i = 0; 2434 2435 if (allowed_clock_voltage_table->count == 0) 2436 return -EINVAL; 2437 2438 for (i = 0; i < allowed_clock_voltage_table->count; i++) { 2439 if (allowed_clock_voltage_table->entries[i].clk >= clock) { 2440 *voltage = allowed_clock_voltage_table->entries[i].v; 2441 return 0; 2442 } 2443 } 2444 2445 *voltage = allowed_clock_voltage_table->entries[i-1].v; 2446 2447 return 0; 2448 } 2449 2450 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2451 u32 sclk, u32 min_sclk_in_sr) 2452 { 2453 u32 i; 2454 u32 tmp; 2455 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? 2456 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK; 2457 2458 if (sclk < min) 2459 return 0; 2460 2461 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 2462 tmp = sclk / (1 << i); 2463 if (tmp >= min || i == 0) 2464 break; 2465 } 2466 2467 return (u8)i; 2468 } 2469 2470 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev) 2471 { 2472 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 2473 } 2474 2475 static int ci_reset_to_default(struct radeon_device *rdev) 2476 { 2477 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? 2478 0 : -EINVAL; 2479 } 2480 2481 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev) 2482 { 2483 u32 tmp; 2484 2485 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8; 2486 2487 if (tmp == MC_CG_ARB_FREQ_F0) 2488 return 0; 2489 2490 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); 2491 } 2492 2493 static void ci_register_patching_mc_arb(struct radeon_device *rdev, 2494 const u32 engine_clock, 2495 const u32 memory_clock, 2496 u32 *dram_timimg2) 2497 { 2498 bool patch; 2499 u32 tmp, tmp2; 2500 2501 tmp = RREG32(MC_SEQ_MISC0); 2502 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 2503 2504 if (patch && 2505 ((rdev->pdev->device == 0x67B0) || 2506 (rdev->pdev->device == 0x67B1))) { 2507 if ((memory_clock > 100000) && (memory_clock <= 125000)) { 2508 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff; 2509 *dram_timimg2 &= ~0x00ff0000; 2510 *dram_timimg2 |= tmp2 << 16; 2511 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) { 2512 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff; 2513 *dram_timimg2 &= ~0x00ff0000; 2514 *dram_timimg2 |= tmp2 << 16; 2515 } 2516 } 2517 } 2518 2519 2520 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, 2521 u32 sclk, 2522 u32 mclk, 2523 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs) 2524 { 2525 u32 dram_timing; 2526 u32 dram_timing2; 2527 u32 burst_time; 2528 2529 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk); 2530 2531 dram_timing = RREG32(MC_ARB_DRAM_TIMING); 2532 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 2533 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; 2534 2535 ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2); 2536 2537 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); 2538 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); 2539 arb_regs->McArbBurstTime = (u8)burst_time; 2540 2541 return 0; 2542 } 2543 2544 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev) 2545 { 2546 struct ci_power_info *pi = ci_get_pi(rdev); 2547 SMU7_Discrete_MCArbDramTimingTable arb_regs; 2548 u32 i, j; 2549 int ret = 0; 2550 2551 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable)); 2552 2553 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) { 2554 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) { 2555 ret = ci_populate_memory_timing_parameters(rdev, 2556 pi->dpm_table.sclk_table.dpm_levels[i].value, 2557 pi->dpm_table.mclk_table.dpm_levels[j].value, 2558 &arb_regs.entries[i][j]); 2559 if (ret) 2560 break; 2561 } 2562 } 2563 2564 if (ret == 0) 2565 ret = ci_copy_bytes_to_smc(rdev, 2566 pi->arb_table_start, 2567 (u8 *)&arb_regs, 2568 sizeof(SMU7_Discrete_MCArbDramTimingTable), 2569 pi->sram_end); 2570 2571 return ret; 2572 } 2573 2574 static int ci_program_memory_timing_parameters(struct radeon_device *rdev) 2575 { 2576 struct ci_power_info *pi = ci_get_pi(rdev); 2577 2578 if (pi->need_update_smu7_dpm_table == 0) 2579 return 0; 2580 2581 return ci_do_program_memory_timing_parameters(rdev); 2582 } 2583 2584 static void ci_populate_smc_initial_state(struct radeon_device *rdev, 2585 struct radeon_ps *radeon_boot_state) 2586 { 2587 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state); 2588 struct ci_power_info *pi = ci_get_pi(rdev); 2589 u32 level = 0; 2590 2591 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) { 2592 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= 2593 boot_state->performance_levels[0].sclk) { 2594 pi->smc_state_table.GraphicsBootLevel = level; 2595 break; 2596 } 2597 } 2598 2599 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) { 2600 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= 2601 boot_state->performance_levels[0].mclk) { 2602 pi->smc_state_table.MemoryBootLevel = level; 2603 break; 2604 } 2605 } 2606 } 2607 2608 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table) 2609 { 2610 u32 i; 2611 u32 mask_value = 0; 2612 2613 for (i = dpm_table->count; i > 0; i--) { 2614 mask_value = mask_value << 1; 2615 if (dpm_table->dpm_levels[i-1].enabled) 2616 mask_value |= 0x1; 2617 else 2618 mask_value &= 0xFFFFFFFE; 2619 } 2620 2621 return mask_value; 2622 } 2623 2624 static void ci_populate_smc_link_level(struct radeon_device *rdev, 2625 SMU7_Discrete_DpmTable *table) 2626 { 2627 struct ci_power_info *pi = ci_get_pi(rdev); 2628 struct ci_dpm_table *dpm_table = &pi->dpm_table; 2629 u32 i; 2630 2631 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) { 2632 table->LinkLevel[i].PcieGenSpeed = 2633 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value; 2634 table->LinkLevel[i].PcieLaneCount = 2635 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); 2636 table->LinkLevel[i].EnabledForActivity = 1; 2637 table->LinkLevel[i].DownT = cpu_to_be32(5); 2638 table->LinkLevel[i].UpT = cpu_to_be32(30); 2639 } 2640 2641 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count; 2642 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 2643 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); 2644 } 2645 2646 static int ci_populate_smc_uvd_level(struct radeon_device *rdev, 2647 SMU7_Discrete_DpmTable *table) 2648 { 2649 u32 count; 2650 struct atom_clock_dividers dividers; 2651 int ret = -EINVAL; 2652 2653 table->UvdLevelCount = 2654 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count; 2655 2656 for (count = 0; count < table->UvdLevelCount; count++) { 2657 table->UvdLevel[count].VclkFrequency = 2658 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; 2659 table->UvdLevel[count].DclkFrequency = 2660 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; 2661 table->UvdLevel[count].MinVddc = 2662 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2663 table->UvdLevel[count].MinVddcPhases = 1; 2664 2665 ret = radeon_atom_get_clock_dividers(rdev, 2666 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2667 table->UvdLevel[count].VclkFrequency, false, ÷rs); 2668 if (ret) 2669 return ret; 2670 2671 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; 2672 2673 ret = radeon_atom_get_clock_dividers(rdev, 2674 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2675 table->UvdLevel[count].DclkFrequency, false, ÷rs); 2676 if (ret) 2677 return ret; 2678 2679 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; 2680 2681 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); 2682 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); 2683 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); 2684 } 2685 2686 return ret; 2687 } 2688 2689 static int ci_populate_smc_vce_level(struct radeon_device *rdev, 2690 SMU7_Discrete_DpmTable *table) 2691 { 2692 u32 count; 2693 struct atom_clock_dividers dividers; 2694 int ret = -EINVAL; 2695 2696 table->VceLevelCount = 2697 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count; 2698 2699 for (count = 0; count < table->VceLevelCount; count++) { 2700 table->VceLevel[count].Frequency = 2701 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; 2702 table->VceLevel[count].MinVoltage = 2703 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2704 table->VceLevel[count].MinPhases = 1; 2705 2706 ret = radeon_atom_get_clock_dividers(rdev, 2707 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2708 table->VceLevel[count].Frequency, false, ÷rs); 2709 if (ret) 2710 return ret; 2711 2712 table->VceLevel[count].Divider = (u8)dividers.post_divider; 2713 2714 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); 2715 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); 2716 } 2717 2718 return ret; 2719 2720 } 2721 2722 static int ci_populate_smc_acp_level(struct radeon_device *rdev, 2723 SMU7_Discrete_DpmTable *table) 2724 { 2725 u32 count; 2726 struct atom_clock_dividers dividers; 2727 int ret = -EINVAL; 2728 2729 table->AcpLevelCount = (u8) 2730 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count); 2731 2732 for (count = 0; count < table->AcpLevelCount; count++) { 2733 table->AcpLevel[count].Frequency = 2734 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; 2735 table->AcpLevel[count].MinVoltage = 2736 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; 2737 table->AcpLevel[count].MinPhases = 1; 2738 2739 ret = radeon_atom_get_clock_dividers(rdev, 2740 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2741 table->AcpLevel[count].Frequency, false, ÷rs); 2742 if (ret) 2743 return ret; 2744 2745 table->AcpLevel[count].Divider = (u8)dividers.post_divider; 2746 2747 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); 2748 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); 2749 } 2750 2751 return ret; 2752 } 2753 2754 static int ci_populate_smc_samu_level(struct radeon_device *rdev, 2755 SMU7_Discrete_DpmTable *table) 2756 { 2757 u32 count; 2758 struct atom_clock_dividers dividers; 2759 int ret = -EINVAL; 2760 2761 table->SamuLevelCount = 2762 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count; 2763 2764 for (count = 0; count < table->SamuLevelCount; count++) { 2765 table->SamuLevel[count].Frequency = 2766 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; 2767 table->SamuLevel[count].MinVoltage = 2768 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2769 table->SamuLevel[count].MinPhases = 1; 2770 2771 ret = radeon_atom_get_clock_dividers(rdev, 2772 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2773 table->SamuLevel[count].Frequency, false, ÷rs); 2774 if (ret) 2775 return ret; 2776 2777 table->SamuLevel[count].Divider = (u8)dividers.post_divider; 2778 2779 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); 2780 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); 2781 } 2782 2783 return ret; 2784 } 2785 2786 static int ci_calculate_mclk_params(struct radeon_device *rdev, 2787 u32 memory_clock, 2788 SMU7_Discrete_MemoryLevel *mclk, 2789 bool strobe_mode, 2790 bool dll_state_on) 2791 { 2792 struct ci_power_info *pi = ci_get_pi(rdev); 2793 u32 dll_cntl = pi->clock_registers.dll_cntl; 2794 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2795 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl; 2796 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl; 2797 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl; 2798 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1; 2799 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2; 2800 u32 mpll_ss1 = pi->clock_registers.mpll_ss1; 2801 u32 mpll_ss2 = pi->clock_registers.mpll_ss2; 2802 struct atom_mpll_param mpll_param; 2803 int ret; 2804 2805 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param); 2806 if (ret) 2807 return ret; 2808 2809 mpll_func_cntl &= ~BWCTRL_MASK; 2810 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); 2811 2812 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); 2813 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | 2814 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); 2815 2816 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; 2817 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); 2818 2819 if (pi->mem_gddr5) { 2820 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); 2821 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | 2822 YCLK_POST_DIV(mpll_param.post_div); 2823 } 2824 2825 if (pi->caps_mclk_ss_support) { 2826 struct radeon_atom_ss ss; 2827 u32 freq_nom; 2828 u32 tmp; 2829 u32 reference_clock = rdev->clock.mpll.reference_freq; 2830 2831 if (mpll_param.qdr == 1) 2832 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div); 2833 else 2834 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div); 2835 2836 tmp = (freq_nom / reference_clock); 2837 tmp = tmp * tmp; 2838 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 2839 ASIC_INTERNAL_MEMORY_SS, freq_nom)) { 2840 u32 clks = reference_clock * 5 / ss.rate; 2841 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); 2842 2843 mpll_ss1 &= ~CLKV_MASK; 2844 mpll_ss1 |= CLKV(clkv); 2845 2846 mpll_ss2 &= ~CLKS_MASK; 2847 mpll_ss2 |= CLKS(clks); 2848 } 2849 } 2850 2851 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; 2852 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); 2853 2854 if (dll_state_on) 2855 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; 2856 else 2857 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 2858 2859 mclk->MclkFrequency = memory_clock; 2860 mclk->MpllFuncCntl = mpll_func_cntl; 2861 mclk->MpllFuncCntl_1 = mpll_func_cntl_1; 2862 mclk->MpllFuncCntl_2 = mpll_func_cntl_2; 2863 mclk->MpllAdFuncCntl = mpll_ad_func_cntl; 2864 mclk->MpllDqFuncCntl = mpll_dq_func_cntl; 2865 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; 2866 mclk->DllCntl = dll_cntl; 2867 mclk->MpllSs1 = mpll_ss1; 2868 mclk->MpllSs2 = mpll_ss2; 2869 2870 return 0; 2871 } 2872 2873 static int ci_populate_single_memory_level(struct radeon_device *rdev, 2874 u32 memory_clock, 2875 SMU7_Discrete_MemoryLevel *memory_level) 2876 { 2877 struct ci_power_info *pi = ci_get_pi(rdev); 2878 int ret; 2879 bool dll_state_on; 2880 2881 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { 2882 ret = ci_get_dependency_volt_by_clk(rdev, 2883 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2884 memory_clock, &memory_level->MinVddc); 2885 if (ret) 2886 return ret; 2887 } 2888 2889 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { 2890 ret = ci_get_dependency_volt_by_clk(rdev, 2891 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2892 memory_clock, &memory_level->MinVddci); 2893 if (ret) 2894 return ret; 2895 } 2896 2897 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { 2898 ret = ci_get_dependency_volt_by_clk(rdev, 2899 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2900 memory_clock, &memory_level->MinMvdd); 2901 if (ret) 2902 return ret; 2903 } 2904 2905 memory_level->MinVddcPhases = 1; 2906 2907 if (pi->vddc_phase_shed_control) 2908 ci_populate_phase_value_based_on_mclk(rdev, 2909 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 2910 memory_clock, 2911 &memory_level->MinVddcPhases); 2912 2913 memory_level->EnabledForThrottle = 1; 2914 memory_level->UpH = 0; 2915 memory_level->DownH = 100; 2916 memory_level->VoltageDownH = 0; 2917 memory_level->ActivityLevel = (u16)pi->mclk_activity_target; 2918 2919 memory_level->StutterEnable = false; 2920 memory_level->StrobeEnable = false; 2921 memory_level->EdcReadEnable = false; 2922 memory_level->EdcWriteEnable = false; 2923 memory_level->RttEnable = false; 2924 2925 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 2926 2927 if (pi->mclk_stutter_mode_threshold && 2928 (memory_clock <= pi->mclk_stutter_mode_threshold) && 2929 (pi->uvd_enabled == false) && 2930 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && 2931 (rdev->pm.dpm.new_active_crtc_count <= 2)) 2932 memory_level->StutterEnable = true; 2933 2934 if (pi->mclk_strobe_mode_threshold && 2935 (memory_clock <= pi->mclk_strobe_mode_threshold)) 2936 memory_level->StrobeEnable = 1; 2937 2938 if (pi->mem_gddr5) { 2939 memory_level->StrobeRatio = 2940 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); 2941 if (pi->mclk_edc_enable_threshold && 2942 (memory_clock > pi->mclk_edc_enable_threshold)) 2943 memory_level->EdcReadEnable = true; 2944 2945 if (pi->mclk_edc_wr_enable_threshold && 2946 (memory_clock > pi->mclk_edc_wr_enable_threshold)) 2947 memory_level->EdcWriteEnable = true; 2948 2949 if (memory_level->StrobeEnable) { 2950 if (si_get_mclk_frequency_ratio(memory_clock, true) >= 2951 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) 2952 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2953 else 2954 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; 2955 } else { 2956 dll_state_on = pi->dll_default_on; 2957 } 2958 } else { 2959 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock); 2960 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2961 } 2962 2963 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); 2964 if (ret) 2965 return ret; 2966 2967 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); 2968 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); 2969 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); 2970 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); 2971 2972 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); 2973 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); 2974 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl); 2975 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1); 2976 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2); 2977 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl); 2978 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl); 2979 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl); 2980 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl); 2981 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1); 2982 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2); 2983 2984 return 0; 2985 } 2986 2987 static int ci_populate_smc_acpi_level(struct radeon_device *rdev, 2988 SMU7_Discrete_DpmTable *table) 2989 { 2990 struct ci_power_info *pi = ci_get_pi(rdev); 2991 struct atom_clock_dividers dividers; 2992 SMU7_Discrete_VoltageLevel voltage_level; 2993 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl; 2994 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2; 2995 u32 dll_cntl = pi->clock_registers.dll_cntl; 2996 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2997 int ret; 2998 2999 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 3000 3001 if (pi->acpi_vddc) 3002 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); 3003 else 3004 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); 3005 3006 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; 3007 3008 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq; 3009 3010 ret = radeon_atom_get_clock_dividers(rdev, 3011 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 3012 table->ACPILevel.SclkFrequency, false, ÷rs); 3013 if (ret) 3014 return ret; 3015 3016 table->ACPILevel.SclkDid = (u8)dividers.post_divider; 3017 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3018 table->ACPILevel.DeepSleepDivId = 0; 3019 3020 spll_func_cntl &= ~SPLL_PWRON; 3021 spll_func_cntl |= SPLL_RESET; 3022 3023 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 3024 spll_func_cntl_2 |= SCLK_MUX_SEL(4); 3025 3026 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; 3027 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; 3028 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; 3029 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; 3030 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; 3031 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; 3032 table->ACPILevel.CcPwrDynRm = 0; 3033 table->ACPILevel.CcPwrDynRm1 = 0; 3034 3035 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); 3036 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); 3037 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); 3038 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); 3039 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); 3040 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); 3041 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); 3042 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); 3043 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); 3044 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); 3045 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); 3046 3047 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; 3048 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; 3049 3050 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 3051 if (pi->acpi_vddci) 3052 table->MemoryACPILevel.MinVddci = 3053 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE); 3054 else 3055 table->MemoryACPILevel.MinVddci = 3056 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE); 3057 } 3058 3059 if (ci_populate_mvdd_value(rdev, 0, &voltage_level)) 3060 table->MemoryACPILevel.MinMvdd = 0; 3061 else 3062 table->MemoryACPILevel.MinMvdd = 3063 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE); 3064 3065 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; 3066 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 3067 3068 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); 3069 3070 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); 3071 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); 3072 table->MemoryACPILevel.MpllAdFuncCntl = 3073 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl); 3074 table->MemoryACPILevel.MpllDqFuncCntl = 3075 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl); 3076 table->MemoryACPILevel.MpllFuncCntl = 3077 cpu_to_be32(pi->clock_registers.mpll_func_cntl); 3078 table->MemoryACPILevel.MpllFuncCntl_1 = 3079 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1); 3080 table->MemoryACPILevel.MpllFuncCntl_2 = 3081 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2); 3082 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); 3083 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); 3084 3085 table->MemoryACPILevel.EnabledForThrottle = 0; 3086 table->MemoryACPILevel.EnabledForActivity = 0; 3087 table->MemoryACPILevel.UpH = 0; 3088 table->MemoryACPILevel.DownH = 100; 3089 table->MemoryACPILevel.VoltageDownH = 0; 3090 table->MemoryACPILevel.ActivityLevel = 3091 cpu_to_be16((u16)pi->mclk_activity_target); 3092 3093 table->MemoryACPILevel.StutterEnable = false; 3094 table->MemoryACPILevel.StrobeEnable = false; 3095 table->MemoryACPILevel.EdcReadEnable = false; 3096 table->MemoryACPILevel.EdcWriteEnable = false; 3097 table->MemoryACPILevel.RttEnable = false; 3098 3099 return 0; 3100 } 3101 3102 3103 static int ci_enable_ulv(struct radeon_device *rdev, bool enable) 3104 { 3105 struct ci_power_info *pi = ci_get_pi(rdev); 3106 struct ci_ulv_parm *ulv = &pi->ulv; 3107 3108 if (ulv->supported) { 3109 if (enable) 3110 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? 3111 0 : -EINVAL; 3112 else 3113 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? 3114 0 : -EINVAL; 3115 } 3116 3117 return 0; 3118 } 3119 3120 static int ci_populate_ulv_level(struct radeon_device *rdev, 3121 SMU7_Discrete_Ulv *state) 3122 { 3123 struct ci_power_info *pi = ci_get_pi(rdev); 3124 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time; 3125 3126 state->CcPwrDynRm = 0; 3127 state->CcPwrDynRm1 = 0; 3128 3129 if (ulv_voltage == 0) { 3130 pi->ulv.supported = false; 3131 return 0; 3132 } 3133 3134 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 3135 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3136 state->VddcOffset = 0; 3137 else 3138 state->VddcOffset = 3139 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; 3140 } else { 3141 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3142 state->VddcOffsetVid = 0; 3143 else 3144 state->VddcOffsetVid = (u8) 3145 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * 3146 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); 3147 } 3148 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1; 3149 3150 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm); 3151 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1); 3152 state->VddcOffset = cpu_to_be16(state->VddcOffset); 3153 3154 return 0; 3155 } 3156 3157 static int ci_calculate_sclk_params(struct radeon_device *rdev, 3158 u32 engine_clock, 3159 SMU7_Discrete_GraphicsLevel *sclk) 3160 { 3161 struct ci_power_info *pi = ci_get_pi(rdev); 3162 struct atom_clock_dividers dividers; 3163 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3; 3164 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4; 3165 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum; 3166 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2; 3167 u32 reference_clock = rdev->clock.spll.reference_freq; 3168 u32 reference_divider; 3169 u32 fbdiv; 3170 int ret; 3171 3172 ret = radeon_atom_get_clock_dividers(rdev, 3173 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 3174 engine_clock, false, ÷rs); 3175 if (ret) 3176 return ret; 3177 3178 reference_divider = 1 + dividers.ref_div; 3179 fbdiv = dividers.fb_div & 0x3FFFFFF; 3180 3181 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; 3182 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); 3183 spll_func_cntl_3 |= SPLL_DITHEN; 3184 3185 if (pi->caps_sclk_ss_support) { 3186 struct radeon_atom_ss ss; 3187 u32 vco_freq = engine_clock * dividers.post_div; 3188 3189 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 3190 ASIC_INTERNAL_ENGINE_SS, vco_freq)) { 3191 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); 3192 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); 3193 3194 cg_spll_spread_spectrum &= ~CLK_S_MASK; 3195 cg_spll_spread_spectrum |= CLK_S(clk_s); 3196 cg_spll_spread_spectrum |= SSEN; 3197 3198 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; 3199 cg_spll_spread_spectrum_2 |= CLK_V(clk_v); 3200 } 3201 } 3202 3203 sclk->SclkFrequency = engine_clock; 3204 sclk->CgSpllFuncCntl3 = spll_func_cntl_3; 3205 sclk->CgSpllFuncCntl4 = spll_func_cntl_4; 3206 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; 3207 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; 3208 sclk->SclkDid = (u8)dividers.post_divider; 3209 3210 return 0; 3211 } 3212 3213 static int ci_populate_single_graphic_level(struct radeon_device *rdev, 3214 u32 engine_clock, 3215 u16 sclk_activity_level_t, 3216 SMU7_Discrete_GraphicsLevel *graphic_level) 3217 { 3218 struct ci_power_info *pi = ci_get_pi(rdev); 3219 int ret; 3220 3221 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level); 3222 if (ret) 3223 return ret; 3224 3225 ret = ci_get_dependency_volt_by_clk(rdev, 3226 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 3227 engine_clock, &graphic_level->MinVddc); 3228 if (ret) 3229 return ret; 3230 3231 graphic_level->SclkFrequency = engine_clock; 3232 3233 graphic_level->Flags = 0; 3234 graphic_level->MinVddcPhases = 1; 3235 3236 if (pi->vddc_phase_shed_control) 3237 ci_populate_phase_value_based_on_sclk(rdev, 3238 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 3239 engine_clock, 3240 &graphic_level->MinVddcPhases); 3241 3242 graphic_level->ActivityLevel = sclk_activity_level_t; 3243 3244 graphic_level->CcPwrDynRm = 0; 3245 graphic_level->CcPwrDynRm1 = 0; 3246 graphic_level->EnabledForThrottle = 1; 3247 graphic_level->UpH = 0; 3248 graphic_level->DownH = 0; 3249 graphic_level->VoltageDownH = 0; 3250 graphic_level->PowerThrottle = 0; 3251 3252 if (pi->caps_sclk_ds) 3253 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev, 3254 engine_clock, 3255 CISLAND_MINIMUM_ENGINE_CLOCK); 3256 3257 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3258 3259 graphic_level->Flags = cpu_to_be32(graphic_level->Flags); 3260 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); 3261 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); 3262 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); 3263 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); 3264 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3); 3265 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4); 3266 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum); 3267 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2); 3268 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm); 3269 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1); 3270 3271 return 0; 3272 } 3273 3274 static int ci_populate_all_graphic_levels(struct radeon_device *rdev) 3275 { 3276 struct ci_power_info *pi = ci_get_pi(rdev); 3277 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3278 u32 level_array_address = pi->dpm_table_start + 3279 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); 3280 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * 3281 SMU7_MAX_LEVELS_GRAPHICS; 3282 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; 3283 u32 i, ret; 3284 3285 memset(levels, 0, level_array_size); 3286 3287 for (i = 0; i < dpm_table->sclk_table.count; i++) { 3288 ret = ci_populate_single_graphic_level(rdev, 3289 dpm_table->sclk_table.dpm_levels[i].value, 3290 (u16)pi->activity_target[i], 3291 &pi->smc_state_table.GraphicsLevel[i]); 3292 if (ret) 3293 return ret; 3294 if (i > 1) 3295 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; 3296 if (i == (dpm_table->sclk_table.count - 1)) 3297 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = 3298 PPSMC_DISPLAY_WATERMARK_HIGH; 3299 } 3300 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; 3301 3302 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; 3303 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 3304 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); 3305 3306 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 3307 (u8 *)levels, level_array_size, 3308 pi->sram_end); 3309 if (ret) 3310 return ret; 3311 3312 return 0; 3313 } 3314 3315 static int ci_populate_ulv_state(struct radeon_device *rdev, 3316 SMU7_Discrete_Ulv *ulv_level) 3317 { 3318 return ci_populate_ulv_level(rdev, ulv_level); 3319 } 3320 3321 static int ci_populate_all_memory_levels(struct radeon_device *rdev) 3322 { 3323 struct ci_power_info *pi = ci_get_pi(rdev); 3324 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3325 u32 level_array_address = pi->dpm_table_start + 3326 offsetof(SMU7_Discrete_DpmTable, MemoryLevel); 3327 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * 3328 SMU7_MAX_LEVELS_MEMORY; 3329 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; 3330 u32 i, ret; 3331 3332 memset(levels, 0, level_array_size); 3333 3334 for (i = 0; i < dpm_table->mclk_table.count; i++) { 3335 if (dpm_table->mclk_table.dpm_levels[i].value == 0) 3336 return -EINVAL; 3337 ret = ci_populate_single_memory_level(rdev, 3338 dpm_table->mclk_table.dpm_levels[i].value, 3339 &pi->smc_state_table.MemoryLevel[i]); 3340 if (ret) 3341 return ret; 3342 } 3343 3344 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; 3345 3346 if ((dpm_table->mclk_table.count >= 2) && 3347 ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) { 3348 pi->smc_state_table.MemoryLevel[1].MinVddc = 3349 pi->smc_state_table.MemoryLevel[0].MinVddc; 3350 pi->smc_state_table.MemoryLevel[1].MinVddcPhases = 3351 pi->smc_state_table.MemoryLevel[0].MinVddcPhases; 3352 } 3353 3354 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); 3355 3356 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; 3357 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 3358 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); 3359 3360 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark = 3361 PPSMC_DISPLAY_WATERMARK_HIGH; 3362 3363 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 3364 (u8 *)levels, level_array_size, 3365 pi->sram_end); 3366 if (ret) 3367 return ret; 3368 3369 return 0; 3370 } 3371 3372 static void ci_reset_single_dpm_table(struct radeon_device *rdev, 3373 struct ci_single_dpm_table* dpm_table, 3374 u32 count) 3375 { 3376 u32 i; 3377 3378 dpm_table->count = count; 3379 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) 3380 dpm_table->dpm_levels[i].enabled = false; 3381 } 3382 3383 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, 3384 u32 index, u32 pcie_gen, u32 pcie_lanes) 3385 { 3386 dpm_table->dpm_levels[index].value = pcie_gen; 3387 dpm_table->dpm_levels[index].param1 = pcie_lanes; 3388 dpm_table->dpm_levels[index].enabled = true; 3389 } 3390 3391 static int ci_setup_default_pcie_tables(struct radeon_device *rdev) 3392 { 3393 struct ci_power_info *pi = ci_get_pi(rdev); 3394 3395 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) 3396 return -EINVAL; 3397 3398 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) { 3399 pi->pcie_gen_powersaving = pi->pcie_gen_performance; 3400 pi->pcie_lane_powersaving = pi->pcie_lane_performance; 3401 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) { 3402 pi->pcie_gen_performance = pi->pcie_gen_powersaving; 3403 pi->pcie_lane_performance = pi->pcie_lane_powersaving; 3404 } 3405 3406 ci_reset_single_dpm_table(rdev, 3407 &pi->dpm_table.pcie_speed_table, 3408 SMU7_MAX_LEVELS_LINK); 3409 3410 if (rdev->family == CHIP_BONAIRE) 3411 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3412 pi->pcie_gen_powersaving.min, 3413 pi->pcie_lane_powersaving.max); 3414 else 3415 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3416 pi->pcie_gen_powersaving.min, 3417 pi->pcie_lane_powersaving.min); 3418 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, 3419 pi->pcie_gen_performance.min, 3420 pi->pcie_lane_performance.min); 3421 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2, 3422 pi->pcie_gen_powersaving.min, 3423 pi->pcie_lane_powersaving.max); 3424 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3, 3425 pi->pcie_gen_performance.min, 3426 pi->pcie_lane_performance.max); 3427 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4, 3428 pi->pcie_gen_powersaving.max, 3429 pi->pcie_lane_powersaving.max); 3430 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5, 3431 pi->pcie_gen_performance.max, 3432 pi->pcie_lane_performance.max); 3433 3434 pi->dpm_table.pcie_speed_table.count = 6; 3435 3436 return 0; 3437 } 3438 3439 static int ci_setup_default_dpm_tables(struct radeon_device *rdev) 3440 { 3441 struct ci_power_info *pi = ci_get_pi(rdev); 3442 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 3443 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3444 struct radeon_clock_voltage_dependency_table *allowed_mclk_table = 3445 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 3446 struct radeon_cac_leakage_table *std_voltage_table = 3447 &rdev->pm.dpm.dyn_state.cac_leakage_table; 3448 u32 i; 3449 3450 if (allowed_sclk_vddc_table == NULL) 3451 return -EINVAL; 3452 if (allowed_sclk_vddc_table->count < 1) 3453 return -EINVAL; 3454 if (allowed_mclk_table == NULL) 3455 return -EINVAL; 3456 if (allowed_mclk_table->count < 1) 3457 return -EINVAL; 3458 3459 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table)); 3460 3461 ci_reset_single_dpm_table(rdev, 3462 &pi->dpm_table.sclk_table, 3463 SMU7_MAX_LEVELS_GRAPHICS); 3464 ci_reset_single_dpm_table(rdev, 3465 &pi->dpm_table.mclk_table, 3466 SMU7_MAX_LEVELS_MEMORY); 3467 ci_reset_single_dpm_table(rdev, 3468 &pi->dpm_table.vddc_table, 3469 SMU7_MAX_LEVELS_VDDC); 3470 ci_reset_single_dpm_table(rdev, 3471 &pi->dpm_table.vddci_table, 3472 SMU7_MAX_LEVELS_VDDCI); 3473 ci_reset_single_dpm_table(rdev, 3474 &pi->dpm_table.mvdd_table, 3475 SMU7_MAX_LEVELS_MVDD); 3476 3477 pi->dpm_table.sclk_table.count = 0; 3478 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3479 if ((i == 0) || 3480 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value != 3481 allowed_sclk_vddc_table->entries[i].clk)) { 3482 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = 3483 allowed_sclk_vddc_table->entries[i].clk; 3484 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = 3485 (i == 0) ? true : false; 3486 pi->dpm_table.sclk_table.count++; 3487 } 3488 } 3489 3490 pi->dpm_table.mclk_table.count = 0; 3491 for (i = 0; i < allowed_mclk_table->count; i++) { 3492 if ((i == 0) || 3493 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != 3494 allowed_mclk_table->entries[i].clk)) { 3495 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = 3496 allowed_mclk_table->entries[i].clk; 3497 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = 3498 (i == 0) ? true : false; 3499 pi->dpm_table.mclk_table.count++; 3500 } 3501 } 3502 3503 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3504 pi->dpm_table.vddc_table.dpm_levels[i].value = 3505 allowed_sclk_vddc_table->entries[i].v; 3506 pi->dpm_table.vddc_table.dpm_levels[i].param1 = 3507 std_voltage_table->entries[i].leakage; 3508 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true; 3509 } 3510 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; 3511 3512 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 3513 if (allowed_mclk_table) { 3514 for (i = 0; i < allowed_mclk_table->count; i++) { 3515 pi->dpm_table.vddci_table.dpm_levels[i].value = 3516 allowed_mclk_table->entries[i].v; 3517 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; 3518 } 3519 pi->dpm_table.vddci_table.count = allowed_mclk_table->count; 3520 } 3521 3522 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; 3523 if (allowed_mclk_table) { 3524 for (i = 0; i < allowed_mclk_table->count; i++) { 3525 pi->dpm_table.mvdd_table.dpm_levels[i].value = 3526 allowed_mclk_table->entries[i].v; 3527 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 3528 } 3529 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; 3530 } 3531 3532 ci_setup_default_pcie_tables(rdev); 3533 3534 return 0; 3535 } 3536 3537 static int ci_find_boot_level(struct ci_single_dpm_table *table, 3538 u32 value, u32 *boot_level) 3539 { 3540 u32 i; 3541 int ret = -EINVAL; 3542 3543 for(i = 0; i < table->count; i++) { 3544 if (value == table->dpm_levels[i].value) { 3545 *boot_level = i; 3546 ret = 0; 3547 } 3548 } 3549 3550 return ret; 3551 } 3552 3553 static int ci_init_smc_table(struct radeon_device *rdev) 3554 { 3555 struct ci_power_info *pi = ci_get_pi(rdev); 3556 struct ci_ulv_parm *ulv = &pi->ulv; 3557 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps; 3558 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 3559 int ret; 3560 3561 ret = ci_setup_default_dpm_tables(rdev); 3562 if (ret) 3563 return ret; 3564 3565 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) 3566 ci_populate_smc_voltage_tables(rdev, table); 3567 3568 ci_init_fps_limits(rdev); 3569 3570 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) 3571 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 3572 3573 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 3574 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 3575 3576 if (pi->mem_gddr5) 3577 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 3578 3579 if (ulv->supported) { 3580 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv); 3581 if (ret) 3582 return ret; 3583 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); 3584 } 3585 3586 ret = ci_populate_all_graphic_levels(rdev); 3587 if (ret) 3588 return ret; 3589 3590 ret = ci_populate_all_memory_levels(rdev); 3591 if (ret) 3592 return ret; 3593 3594 ci_populate_smc_link_level(rdev, table); 3595 3596 ret = ci_populate_smc_acpi_level(rdev, table); 3597 if (ret) 3598 return ret; 3599 3600 ret = ci_populate_smc_vce_level(rdev, table); 3601 if (ret) 3602 return ret; 3603 3604 ret = ci_populate_smc_acp_level(rdev, table); 3605 if (ret) 3606 return ret; 3607 3608 ret = ci_populate_smc_samu_level(rdev, table); 3609 if (ret) 3610 return ret; 3611 3612 ret = ci_do_program_memory_timing_parameters(rdev); 3613 if (ret) 3614 return ret; 3615 3616 ret = ci_populate_smc_uvd_level(rdev, table); 3617 if (ret) 3618 return ret; 3619 3620 table->UvdBootLevel = 0; 3621 table->VceBootLevel = 0; 3622 table->AcpBootLevel = 0; 3623 table->SamuBootLevel = 0; 3624 table->GraphicsBootLevel = 0; 3625 table->MemoryBootLevel = 0; 3626 3627 ret = ci_find_boot_level(&pi->dpm_table.sclk_table, 3628 pi->vbios_boot_state.sclk_bootup_value, 3629 (u32 *)&pi->smc_state_table.GraphicsBootLevel); 3630 3631 ret = ci_find_boot_level(&pi->dpm_table.mclk_table, 3632 pi->vbios_boot_state.mclk_bootup_value, 3633 (u32 *)&pi->smc_state_table.MemoryBootLevel); 3634 3635 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; 3636 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; 3637 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; 3638 3639 ci_populate_smc_initial_state(rdev, radeon_boot_state); 3640 3641 ret = ci_populate_bapm_parameters_in_dpm_table(rdev); 3642 if (ret) 3643 return ret; 3644 3645 table->UVDInterval = 1; 3646 table->VCEInterval = 1; 3647 table->ACPInterval = 1; 3648 table->SAMUInterval = 1; 3649 table->GraphicsVoltageChangeEnable = 1; 3650 table->GraphicsThermThrottleEnable = 1; 3651 table->GraphicsInterval = 1; 3652 table->VoltageInterval = 1; 3653 table->ThermalInterval = 1; 3654 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * 3655 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3656 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * 3657 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3658 table->MemoryVoltageChangeEnable = 1; 3659 table->MemoryInterval = 1; 3660 table->VoltageResponseTime = 0; 3661 table->VddcVddciDelta = 4000; 3662 table->PhaseResponseTime = 0; 3663 table->MemoryThermThrottleEnable = 1; 3664 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; 3665 table->PCIeGenInterval = 1; 3666 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) 3667 table->SVI2Enable = 1; 3668 else 3669 table->SVI2Enable = 0; 3670 3671 table->ThermGpio = 17; 3672 table->SclkStepSize = 0x4000; 3673 3674 table->SystemFlags = cpu_to_be32(table->SystemFlags); 3675 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); 3676 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); 3677 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); 3678 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); 3679 table->SclkStepSize = cpu_to_be32(table->SclkStepSize); 3680 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); 3681 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); 3682 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); 3683 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); 3684 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); 3685 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); 3686 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); 3687 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); 3688 3689 ret = ci_copy_bytes_to_smc(rdev, 3690 pi->dpm_table_start + 3691 offsetof(SMU7_Discrete_DpmTable, SystemFlags), 3692 (u8 *)&table->SystemFlags, 3693 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController), 3694 pi->sram_end); 3695 if (ret) 3696 return ret; 3697 3698 return 0; 3699 } 3700 3701 static void ci_trim_single_dpm_states(struct radeon_device *rdev, 3702 struct ci_single_dpm_table *dpm_table, 3703 u32 low_limit, u32 high_limit) 3704 { 3705 u32 i; 3706 3707 for (i = 0; i < dpm_table->count; i++) { 3708 if ((dpm_table->dpm_levels[i].value < low_limit) || 3709 (dpm_table->dpm_levels[i].value > high_limit)) 3710 dpm_table->dpm_levels[i].enabled = false; 3711 else 3712 dpm_table->dpm_levels[i].enabled = true; 3713 } 3714 } 3715 3716 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev, 3717 u32 speed_low, u32 lanes_low, 3718 u32 speed_high, u32 lanes_high) 3719 { 3720 struct ci_power_info *pi = ci_get_pi(rdev); 3721 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; 3722 u32 i, j; 3723 3724 for (i = 0; i < pcie_table->count; i++) { 3725 if ((pcie_table->dpm_levels[i].value < speed_low) || 3726 (pcie_table->dpm_levels[i].param1 < lanes_low) || 3727 (pcie_table->dpm_levels[i].value > speed_high) || 3728 (pcie_table->dpm_levels[i].param1 > lanes_high)) 3729 pcie_table->dpm_levels[i].enabled = false; 3730 else 3731 pcie_table->dpm_levels[i].enabled = true; 3732 } 3733 3734 for (i = 0; i < pcie_table->count; i++) { 3735 if (pcie_table->dpm_levels[i].enabled) { 3736 for (j = i + 1; j < pcie_table->count; j++) { 3737 if (pcie_table->dpm_levels[j].enabled) { 3738 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) && 3739 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1)) 3740 pcie_table->dpm_levels[j].enabled = false; 3741 } 3742 } 3743 } 3744 } 3745 } 3746 3747 static int ci_trim_dpm_states(struct radeon_device *rdev, 3748 struct radeon_ps *radeon_state) 3749 { 3750 struct ci_ps *state = ci_get_ps(radeon_state); 3751 struct ci_power_info *pi = ci_get_pi(rdev); 3752 u32 high_limit_count; 3753 3754 if (state->performance_level_count < 1) 3755 return -EINVAL; 3756 3757 if (state->performance_level_count == 1) 3758 high_limit_count = 0; 3759 else 3760 high_limit_count = 1; 3761 3762 ci_trim_single_dpm_states(rdev, 3763 &pi->dpm_table.sclk_table, 3764 state->performance_levels[0].sclk, 3765 state->performance_levels[high_limit_count].sclk); 3766 3767 ci_trim_single_dpm_states(rdev, 3768 &pi->dpm_table.mclk_table, 3769 state->performance_levels[0].mclk, 3770 state->performance_levels[high_limit_count].mclk); 3771 3772 ci_trim_pcie_dpm_states(rdev, 3773 state->performance_levels[0].pcie_gen, 3774 state->performance_levels[0].pcie_lane, 3775 state->performance_levels[high_limit_count].pcie_gen, 3776 state->performance_levels[high_limit_count].pcie_lane); 3777 3778 return 0; 3779 } 3780 3781 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev) 3782 { 3783 struct radeon_clock_voltage_dependency_table *disp_voltage_table = 3784 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk; 3785 struct radeon_clock_voltage_dependency_table *vddc_table = 3786 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3787 u32 requested_voltage = 0; 3788 u32 i; 3789 3790 if (disp_voltage_table == NULL) 3791 return -EINVAL; 3792 if (!disp_voltage_table->count) 3793 return -EINVAL; 3794 3795 for (i = 0; i < disp_voltage_table->count; i++) { 3796 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk) 3797 requested_voltage = disp_voltage_table->entries[i].v; 3798 } 3799 3800 for (i = 0; i < vddc_table->count; i++) { 3801 if (requested_voltage <= vddc_table->entries[i].v) { 3802 requested_voltage = vddc_table->entries[i].v; 3803 return (ci_send_msg_to_smc_with_parameter(rdev, 3804 PPSMC_MSG_VddC_Request, 3805 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ? 3806 0 : -EINVAL; 3807 } 3808 } 3809 3810 return -EINVAL; 3811 } 3812 3813 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev) 3814 { 3815 struct ci_power_info *pi = ci_get_pi(rdev); 3816 PPSMC_Result result; 3817 3818 ci_apply_disp_minimum_voltage_request(rdev); 3819 3820 if (!pi->sclk_dpm_key_disabled) { 3821 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3822 result = ci_send_msg_to_smc_with_parameter(rdev, 3823 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3824 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 3825 if (result != PPSMC_Result_OK) 3826 return -EINVAL; 3827 } 3828 } 3829 3830 if (!pi->mclk_dpm_key_disabled) { 3831 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3832 result = ci_send_msg_to_smc_with_parameter(rdev, 3833 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3834 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3835 if (result != PPSMC_Result_OK) 3836 return -EINVAL; 3837 } 3838 } 3839 #if 0 3840 if (!pi->pcie_dpm_key_disabled) { 3841 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3842 result = ci_send_msg_to_smc_with_parameter(rdev, 3843 PPSMC_MSG_PCIeDPM_SetEnabledMask, 3844 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 3845 if (result != PPSMC_Result_OK) 3846 return -EINVAL; 3847 } 3848 } 3849 #endif 3850 return 0; 3851 } 3852 3853 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev, 3854 struct radeon_ps *radeon_state) 3855 { 3856 struct ci_power_info *pi = ci_get_pi(rdev); 3857 struct ci_ps *state = ci_get_ps(radeon_state); 3858 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; 3859 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3860 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; 3861 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3862 u32 i; 3863 3864 pi->need_update_smu7_dpm_table = 0; 3865 3866 for (i = 0; i < sclk_table->count; i++) { 3867 if (sclk == sclk_table->dpm_levels[i].value) 3868 break; 3869 } 3870 3871 if (i >= sclk_table->count) { 3872 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3873 } else { 3874 /* XXX The current code always reprogrammed the sclk levels, 3875 * but we don't currently handle disp sclk requirements 3876 * so just skip it. 3877 */ 3878 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) 3879 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3880 } 3881 3882 for (i = 0; i < mclk_table->count; i++) { 3883 if (mclk == mclk_table->dpm_levels[i].value) 3884 break; 3885 } 3886 3887 if (i >= mclk_table->count) 3888 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3889 3890 if (rdev->pm.dpm.current_active_crtc_count != 3891 rdev->pm.dpm.new_active_crtc_count) 3892 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 3893 } 3894 3895 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev, 3896 struct radeon_ps *radeon_state) 3897 { 3898 struct ci_power_info *pi = ci_get_pi(rdev); 3899 struct ci_ps *state = ci_get_ps(radeon_state); 3900 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3901 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3902 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3903 int ret; 3904 3905 if (!pi->need_update_smu7_dpm_table) 3906 return 0; 3907 3908 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) 3909 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk; 3910 3911 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) 3912 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk; 3913 3914 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { 3915 ret = ci_populate_all_graphic_levels(rdev); 3916 if (ret) 3917 return ret; 3918 } 3919 3920 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { 3921 ret = ci_populate_all_memory_levels(rdev); 3922 if (ret) 3923 return ret; 3924 } 3925 3926 return 0; 3927 } 3928 3929 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 3930 { 3931 struct ci_power_info *pi = ci_get_pi(rdev); 3932 const struct radeon_clock_and_voltage_limits *max_limits; 3933 int i; 3934 3935 if (rdev->pm.dpm.ac_power) 3936 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3937 else 3938 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3939 3940 if (enable) { 3941 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; 3942 3943 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3944 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3945 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; 3946 3947 if (!pi->caps_uvd_dpm) 3948 break; 3949 } 3950 } 3951 3952 ci_send_msg_to_smc_with_parameter(rdev, 3953 PPSMC_MSG_UVDDPM_SetEnabledMask, 3954 pi->dpm_level_enable_mask.uvd_dpm_enable_mask); 3955 3956 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3957 pi->uvd_enabled = true; 3958 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 3959 ci_send_msg_to_smc_with_parameter(rdev, 3960 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3961 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3962 } 3963 } else { 3964 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3965 pi->uvd_enabled = false; 3966 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; 3967 ci_send_msg_to_smc_with_parameter(rdev, 3968 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3969 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3970 } 3971 } 3972 3973 return (ci_send_msg_to_smc(rdev, enable ? 3974 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ? 3975 0 : -EINVAL; 3976 } 3977 3978 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) 3979 { 3980 struct ci_power_info *pi = ci_get_pi(rdev); 3981 const struct radeon_clock_and_voltage_limits *max_limits; 3982 int i; 3983 3984 if (rdev->pm.dpm.ac_power) 3985 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3986 else 3987 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3988 3989 if (enable) { 3990 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0; 3991 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3992 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3993 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; 3994 3995 if (!pi->caps_vce_dpm) 3996 break; 3997 } 3998 } 3999 4000 ci_send_msg_to_smc_with_parameter(rdev, 4001 PPSMC_MSG_VCEDPM_SetEnabledMask, 4002 pi->dpm_level_enable_mask.vce_dpm_enable_mask); 4003 } 4004 4005 return (ci_send_msg_to_smc(rdev, enable ? 4006 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ? 4007 0 : -EINVAL; 4008 } 4009 4010 #if 0 4011 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) 4012 { 4013 struct ci_power_info *pi = ci_get_pi(rdev); 4014 const struct radeon_clock_and_voltage_limits *max_limits; 4015 int i; 4016 4017 if (rdev->pm.dpm.ac_power) 4018 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4019 else 4020 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4021 4022 if (enable) { 4023 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0; 4024 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4025 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4026 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i; 4027 4028 if (!pi->caps_samu_dpm) 4029 break; 4030 } 4031 } 4032 4033 ci_send_msg_to_smc_with_parameter(rdev, 4034 PPSMC_MSG_SAMUDPM_SetEnabledMask, 4035 pi->dpm_level_enable_mask.samu_dpm_enable_mask); 4036 } 4037 return (ci_send_msg_to_smc(rdev, enable ? 4038 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ? 4039 0 : -EINVAL; 4040 } 4041 4042 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable) 4043 { 4044 struct ci_power_info *pi = ci_get_pi(rdev); 4045 const struct radeon_clock_and_voltage_limits *max_limits; 4046 int i; 4047 4048 if (rdev->pm.dpm.ac_power) 4049 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4050 else 4051 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4052 4053 if (enable) { 4054 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0; 4055 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4056 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4057 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i; 4058 4059 if (!pi->caps_acp_dpm) 4060 break; 4061 } 4062 } 4063 4064 ci_send_msg_to_smc_with_parameter(rdev, 4065 PPSMC_MSG_ACPDPM_SetEnabledMask, 4066 pi->dpm_level_enable_mask.acp_dpm_enable_mask); 4067 } 4068 4069 return (ci_send_msg_to_smc(rdev, enable ? 4070 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ? 4071 0 : -EINVAL; 4072 } 4073 #endif 4074 4075 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate) 4076 { 4077 struct ci_power_info *pi = ci_get_pi(rdev); 4078 u32 tmp; 4079 4080 if (!gate) { 4081 if (pi->caps_uvd_dpm || 4082 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) 4083 pi->smc_state_table.UvdBootLevel = 0; 4084 else 4085 pi->smc_state_table.UvdBootLevel = 4086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; 4087 4088 tmp = RREG32_SMC(DPM_TABLE_475); 4089 tmp &= ~UvdBootLevel_MASK; 4090 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel); 4091 WREG32_SMC(DPM_TABLE_475, tmp); 4092 } 4093 4094 return ci_enable_uvd_dpm(rdev, !gate); 4095 } 4096 4097 static u8 ci_get_vce_boot_level(struct radeon_device *rdev) 4098 { 4099 u8 i; 4100 u32 min_evclk = 30000; /* ??? */ 4101 struct radeon_vce_clock_voltage_dependency_table *table = 4102 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 4103 4104 for (i = 0; i < table->count; i++) { 4105 if (table->entries[i].evclk >= min_evclk) 4106 return i; 4107 } 4108 4109 return table->count - 1; 4110 } 4111 4112 static int ci_update_vce_dpm(struct radeon_device *rdev, 4113 struct radeon_ps *radeon_new_state, 4114 struct radeon_ps *radeon_current_state) 4115 { 4116 struct ci_power_info *pi = ci_get_pi(rdev); 4117 int ret = 0; 4118 u32 tmp; 4119 4120 if (radeon_current_state->evclk != radeon_new_state->evclk) { 4121 if (radeon_new_state->evclk) { 4122 /* turn the clocks on when encoding */ 4123 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 4124 4125 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); 4126 tmp = RREG32_SMC(DPM_TABLE_475); 4127 tmp &= ~VceBootLevel_MASK; 4128 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); 4129 WREG32_SMC(DPM_TABLE_475, tmp); 4130 4131 ret = ci_enable_vce_dpm(rdev, true); 4132 } else { 4133 /* turn the clocks off when not encoding */ 4134 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 4135 4136 ret = ci_enable_vce_dpm(rdev, false); 4137 } 4138 } 4139 return ret; 4140 } 4141 4142 #if 0 4143 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) 4144 { 4145 return ci_enable_samu_dpm(rdev, gate); 4146 } 4147 4148 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate) 4149 { 4150 struct ci_power_info *pi = ci_get_pi(rdev); 4151 u32 tmp; 4152 4153 if (!gate) { 4154 pi->smc_state_table.AcpBootLevel = 0; 4155 4156 tmp = RREG32_SMC(DPM_TABLE_475); 4157 tmp &= ~AcpBootLevel_MASK; 4158 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel); 4159 WREG32_SMC(DPM_TABLE_475, tmp); 4160 } 4161 4162 return ci_enable_acp_dpm(rdev, !gate); 4163 } 4164 #endif 4165 4166 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev, 4167 struct radeon_ps *radeon_state) 4168 { 4169 struct ci_power_info *pi = ci_get_pi(rdev); 4170 int ret; 4171 4172 ret = ci_trim_dpm_states(rdev, radeon_state); 4173 if (ret) 4174 return ret; 4175 4176 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 4177 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table); 4178 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 4179 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table); 4180 pi->last_mclk_dpm_enable_mask = 4181 pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4182 if (pi->uvd_enabled) { 4183 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) 4184 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 4185 } 4186 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 4187 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table); 4188 4189 return 0; 4190 } 4191 4192 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev, 4193 u32 level_mask) 4194 { 4195 u32 level = 0; 4196 4197 while ((level_mask & (1 << level)) == 0) 4198 level++; 4199 4200 return level; 4201 } 4202 4203 4204 int ci_dpm_force_performance_level(struct radeon_device *rdev, 4205 enum radeon_dpm_forced_level level) 4206 { 4207 struct ci_power_info *pi = ci_get_pi(rdev); 4208 u32 tmp, levels, i; 4209 int ret; 4210 4211 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 4212 if ((!pi->pcie_dpm_key_disabled) && 4213 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4214 levels = 0; 4215 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; 4216 while (tmp >>= 1) 4217 levels++; 4218 if (levels) { 4219 ret = ci_dpm_force_state_pcie(rdev, level); 4220 if (ret) 4221 return ret; 4222 for (i = 0; i < rdev->usec_timeout; i++) { 4223 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 4224 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 4225 if (tmp == levels) 4226 break; 4227 udelay(1); 4228 } 4229 } 4230 } 4231 if ((!pi->sclk_dpm_key_disabled) && 4232 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4233 levels = 0; 4234 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; 4235 while (tmp >>= 1) 4236 levels++; 4237 if (levels) { 4238 ret = ci_dpm_force_state_sclk(rdev, levels); 4239 if (ret) 4240 return ret; 4241 for (i = 0; i < rdev->usec_timeout; i++) { 4242 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4243 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 4244 if (tmp == levels) 4245 break; 4246 udelay(1); 4247 } 4248 } 4249 } 4250 if ((!pi->mclk_dpm_key_disabled) && 4251 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4252 levels = 0; 4253 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4254 while (tmp >>= 1) 4255 levels++; 4256 if (levels) { 4257 ret = ci_dpm_force_state_mclk(rdev, levels); 4258 if (ret) 4259 return ret; 4260 for (i = 0; i < rdev->usec_timeout; i++) { 4261 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4262 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 4263 if (tmp == levels) 4264 break; 4265 udelay(1); 4266 } 4267 } 4268 } 4269 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 4270 if ((!pi->sclk_dpm_key_disabled) && 4271 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4272 levels = ci_get_lowest_enabled_level(rdev, 4273 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 4274 ret = ci_dpm_force_state_sclk(rdev, levels); 4275 if (ret) 4276 return ret; 4277 for (i = 0; i < rdev->usec_timeout; i++) { 4278 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4279 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 4280 if (tmp == levels) 4281 break; 4282 udelay(1); 4283 } 4284 } 4285 if ((!pi->mclk_dpm_key_disabled) && 4286 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4287 levels = ci_get_lowest_enabled_level(rdev, 4288 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 4289 ret = ci_dpm_force_state_mclk(rdev, levels); 4290 if (ret) 4291 return ret; 4292 for (i = 0; i < rdev->usec_timeout; i++) { 4293 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4294 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 4295 if (tmp == levels) 4296 break; 4297 udelay(1); 4298 } 4299 } 4300 if ((!pi->pcie_dpm_key_disabled) && 4301 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4302 levels = ci_get_lowest_enabled_level(rdev, 4303 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 4304 ret = ci_dpm_force_state_pcie(rdev, levels); 4305 if (ret) 4306 return ret; 4307 for (i = 0; i < rdev->usec_timeout; i++) { 4308 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 4309 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 4310 if (tmp == levels) 4311 break; 4312 udelay(1); 4313 } 4314 } 4315 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 4316 if (!pi->pcie_dpm_key_disabled) { 4317 PPSMC_Result smc_result; 4318 4319 smc_result = ci_send_msg_to_smc(rdev, 4320 PPSMC_MSG_PCIeDPM_UnForceLevel); 4321 if (smc_result != PPSMC_Result_OK) 4322 return -EINVAL; 4323 } 4324 ret = ci_upload_dpm_level_enable_mask(rdev); 4325 if (ret) 4326 return ret; 4327 } 4328 4329 rdev->pm.dpm.forced_level = level; 4330 4331 return 0; 4332 } 4333 4334 static int ci_set_mc_special_registers(struct radeon_device *rdev, 4335 struct ci_mc_reg_table *table) 4336 { 4337 struct ci_power_info *pi = ci_get_pi(rdev); 4338 u8 i, j, k; 4339 u32 temp_reg; 4340 4341 for (i = 0, j = table->last; i < table->last; i++) { 4342 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4343 return -EINVAL; 4344 switch(table->mc_reg_address[i].s1 << 2) { 4345 case MC_SEQ_MISC1: 4346 temp_reg = RREG32(MC_PMG_CMD_EMRS); 4347 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; 4348 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 4349 for (k = 0; k < table->num_entries; k++) { 4350 table->mc_reg_table_entry[k].mc_data[j] = 4351 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); 4352 } 4353 j++; 4354 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4355 return -EINVAL; 4356 4357 temp_reg = RREG32(MC_PMG_CMD_MRS); 4358 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; 4359 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; 4360 for (k = 0; k < table->num_entries; k++) { 4361 table->mc_reg_table_entry[k].mc_data[j] = 4362 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4363 if (!pi->mem_gddr5) 4364 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 4365 } 4366 j++; 4367 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4368 return -EINVAL; 4369 4370 if (!pi->mem_gddr5) { 4371 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2; 4372 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2; 4373 for (k = 0; k < table->num_entries; k++) { 4374 table->mc_reg_table_entry[k].mc_data[j] = 4375 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 4376 } 4377 j++; 4378 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4379 return -EINVAL; 4380 } 4381 break; 4382 case MC_SEQ_RESERVE_M: 4383 temp_reg = RREG32(MC_PMG_CMD_MRS1); 4384 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; 4385 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 4386 for (k = 0; k < table->num_entries; k++) { 4387 table->mc_reg_table_entry[k].mc_data[j] = 4388 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4389 } 4390 j++; 4391 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4392 return -EINVAL; 4393 break; 4394 default: 4395 break; 4396 } 4397 4398 } 4399 4400 table->last = j; 4401 4402 return 0; 4403 } 4404 4405 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) 4406 { 4407 bool result = true; 4408 4409 switch(in_reg) { 4410 case MC_SEQ_RAS_TIMING >> 2: 4411 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2; 4412 break; 4413 case MC_SEQ_DLL_STBY >> 2: 4414 *out_reg = MC_SEQ_DLL_STBY_LP >> 2; 4415 break; 4416 case MC_SEQ_G5PDX_CMD0 >> 2: 4417 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2; 4418 break; 4419 case MC_SEQ_G5PDX_CMD1 >> 2: 4420 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2; 4421 break; 4422 case MC_SEQ_G5PDX_CTRL >> 2: 4423 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2; 4424 break; 4425 case MC_SEQ_CAS_TIMING >> 2: 4426 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2; 4427 break; 4428 case MC_SEQ_MISC_TIMING >> 2: 4429 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2; 4430 break; 4431 case MC_SEQ_MISC_TIMING2 >> 2: 4432 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; 4433 break; 4434 case MC_SEQ_PMG_DVS_CMD >> 2: 4435 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2; 4436 break; 4437 case MC_SEQ_PMG_DVS_CTL >> 2: 4438 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2; 4439 break; 4440 case MC_SEQ_RD_CTL_D0 >> 2: 4441 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; 4442 break; 4443 case MC_SEQ_RD_CTL_D1 >> 2: 4444 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; 4445 break; 4446 case MC_SEQ_WR_CTL_D0 >> 2: 4447 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; 4448 break; 4449 case MC_SEQ_WR_CTL_D1 >> 2: 4450 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; 4451 break; 4452 case MC_PMG_CMD_EMRS >> 2: 4453 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 4454 break; 4455 case MC_PMG_CMD_MRS >> 2: 4456 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; 4457 break; 4458 case MC_PMG_CMD_MRS1 >> 2: 4459 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 4460 break; 4461 case MC_SEQ_PMG_TIMING >> 2: 4462 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2; 4463 break; 4464 case MC_PMG_CMD_MRS2 >> 2: 4465 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2; 4466 break; 4467 case MC_SEQ_WR_CTL_2 >> 2: 4468 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2; 4469 break; 4470 default: 4471 result = false; 4472 break; 4473 } 4474 4475 return result; 4476 } 4477 4478 static void ci_set_valid_flag(struct ci_mc_reg_table *table) 4479 { 4480 u8 i, j; 4481 4482 for (i = 0; i < table->last; i++) { 4483 for (j = 1; j < table->num_entries; j++) { 4484 if (table->mc_reg_table_entry[j-1].mc_data[i] != 4485 table->mc_reg_table_entry[j].mc_data[i]) { 4486 table->valid_flag |= 1 << i; 4487 break; 4488 } 4489 } 4490 } 4491 } 4492 4493 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) 4494 { 4495 u32 i; 4496 u16 address; 4497 4498 for (i = 0; i < table->last; i++) { 4499 table->mc_reg_address[i].s0 = 4500 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? 4501 address : table->mc_reg_address[i].s1; 4502 } 4503 } 4504 4505 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, 4506 struct ci_mc_reg_table *ci_table) 4507 { 4508 u8 i, j; 4509 4510 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4511 return -EINVAL; 4512 if (table->num_entries > MAX_AC_TIMING_ENTRIES) 4513 return -EINVAL; 4514 4515 for (i = 0; i < table->last; i++) 4516 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; 4517 4518 ci_table->last = table->last; 4519 4520 for (i = 0; i < table->num_entries; i++) { 4521 ci_table->mc_reg_table_entry[i].mclk_max = 4522 table->mc_reg_table_entry[i].mclk_max; 4523 for (j = 0; j < table->last; j++) 4524 ci_table->mc_reg_table_entry[i].mc_data[j] = 4525 table->mc_reg_table_entry[i].mc_data[j]; 4526 } 4527 ci_table->num_entries = table->num_entries; 4528 4529 return 0; 4530 } 4531 4532 static int ci_register_patching_mc_seq(struct radeon_device *rdev, 4533 struct ci_mc_reg_table *table) 4534 { 4535 u8 i, k; 4536 u32 tmp; 4537 bool patch; 4538 4539 tmp = RREG32(MC_SEQ_MISC0); 4540 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 4541 4542 if (patch && 4543 ((rdev->pdev->device == 0x67B0) || 4544 (rdev->pdev->device == 0x67B1))) { 4545 for (i = 0; i < table->last; i++) { 4546 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4547 return -EINVAL; 4548 switch(table->mc_reg_address[i].s1 >> 2) { 4549 case MC_SEQ_MISC1: 4550 for (k = 0; k < table->num_entries; k++) { 4551 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4552 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4553 table->mc_reg_table_entry[k].mc_data[i] = 4554 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | 4555 0x00000007; 4556 } 4557 break; 4558 case MC_SEQ_WR_CTL_D0: 4559 for (k = 0; k < table->num_entries; k++) { 4560 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4561 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4562 table->mc_reg_table_entry[k].mc_data[i] = 4563 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4564 0x0000D0DD; 4565 } 4566 break; 4567 case MC_SEQ_WR_CTL_D1: 4568 for (k = 0; k < table->num_entries; k++) { 4569 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4570 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4571 table->mc_reg_table_entry[k].mc_data[i] = 4572 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4573 0x0000D0DD; 4574 } 4575 break; 4576 case MC_SEQ_WR_CTL_2: 4577 for (k = 0; k < table->num_entries; k++) { 4578 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4579 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4580 table->mc_reg_table_entry[k].mc_data[i] = 0; 4581 } 4582 break; 4583 case MC_SEQ_CAS_TIMING: 4584 for (k = 0; k < table->num_entries; k++) { 4585 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4586 table->mc_reg_table_entry[k].mc_data[i] = 4587 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4588 0x000C0140; 4589 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4590 table->mc_reg_table_entry[k].mc_data[i] = 4591 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4592 0x000C0150; 4593 } 4594 break; 4595 case MC_SEQ_MISC_TIMING: 4596 for (k = 0; k < table->num_entries; k++) { 4597 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4598 table->mc_reg_table_entry[k].mc_data[i] = 4599 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4600 0x00000030; 4601 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4602 table->mc_reg_table_entry[k].mc_data[i] = 4603 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4604 0x00000035; 4605 } 4606 break; 4607 default: 4608 break; 4609 } 4610 } 4611 4612 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); 4613 tmp = RREG32(MC_SEQ_IO_DEBUG_DATA); 4614 tmp = (tmp & 0xFFF8FFFF) | (1 << 16); 4615 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); 4616 WREG32(MC_SEQ_IO_DEBUG_DATA, tmp); 4617 } 4618 4619 return 0; 4620 } 4621 4622 static int ci_initialize_mc_reg_table(struct radeon_device *rdev) 4623 { 4624 struct ci_power_info *pi = ci_get_pi(rdev); 4625 struct atom_mc_reg_table *table; 4626 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table; 4627 u8 module_index = rv770_get_memory_module_index(rdev); 4628 int ret; 4629 4630 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); 4631 if (!table) 4632 return -ENOMEM; 4633 4634 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); 4635 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); 4636 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY)); 4637 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0)); 4638 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1)); 4639 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL)); 4640 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD)); 4641 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL)); 4642 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); 4643 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); 4644 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); 4645 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); 4646 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); 4647 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); 4648 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); 4649 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); 4650 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); 4651 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); 4652 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); 4653 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); 4654 4655 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); 4656 if (ret) 4657 goto init_mc_done; 4658 4659 ret = ci_copy_vbios_mc_reg_table(table, ci_table); 4660 if (ret) 4661 goto init_mc_done; 4662 4663 ci_set_s0_mc_reg_index(ci_table); 4664 4665 ret = ci_register_patching_mc_seq(rdev, ci_table); 4666 if (ret) 4667 goto init_mc_done; 4668 4669 ret = ci_set_mc_special_registers(rdev, ci_table); 4670 if (ret) 4671 goto init_mc_done; 4672 4673 ci_set_valid_flag(ci_table); 4674 4675 init_mc_done: 4676 kfree(table); 4677 4678 return ret; 4679 } 4680 4681 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev, 4682 SMU7_Discrete_MCRegisters *mc_reg_table) 4683 { 4684 struct ci_power_info *pi = ci_get_pi(rdev); 4685 u32 i, j; 4686 4687 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) { 4688 if (pi->mc_reg_table.valid_flag & (1 << j)) { 4689 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4690 return -EINVAL; 4691 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0); 4692 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1); 4693 i++; 4694 } 4695 } 4696 4697 mc_reg_table->last = (u8)i; 4698 4699 return 0; 4700 } 4701 4702 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry, 4703 SMU7_Discrete_MCRegisterSet *data, 4704 u32 num_entries, u32 valid_flag) 4705 { 4706 u32 i, j; 4707 4708 for (i = 0, j = 0; j < num_entries; j++) { 4709 if (valid_flag & (1 << j)) { 4710 data->value[i] = cpu_to_be32(entry->mc_data[j]); 4711 i++; 4712 } 4713 } 4714 } 4715 4716 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev, 4717 const u32 memory_clock, 4718 SMU7_Discrete_MCRegisterSet *mc_reg_table_data) 4719 { 4720 struct ci_power_info *pi = ci_get_pi(rdev); 4721 u32 i = 0; 4722 4723 for(i = 0; i < pi->mc_reg_table.num_entries; i++) { 4724 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) 4725 break; 4726 } 4727 4728 if ((i == pi->mc_reg_table.num_entries) && (i > 0)) 4729 --i; 4730 4731 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i], 4732 mc_reg_table_data, pi->mc_reg_table.last, 4733 pi->mc_reg_table.valid_flag); 4734 } 4735 4736 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev, 4737 SMU7_Discrete_MCRegisters *mc_reg_table) 4738 { 4739 struct ci_power_info *pi = ci_get_pi(rdev); 4740 u32 i; 4741 4742 for (i = 0; i < pi->dpm_table.mclk_table.count; i++) 4743 ci_convert_mc_reg_table_entry_to_smc(rdev, 4744 pi->dpm_table.mclk_table.dpm_levels[i].value, 4745 &mc_reg_table->data[i]); 4746 } 4747 4748 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev) 4749 { 4750 struct ci_power_info *pi = ci_get_pi(rdev); 4751 int ret; 4752 4753 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4754 4755 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table); 4756 if (ret) 4757 return ret; 4758 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4759 4760 return ci_copy_bytes_to_smc(rdev, 4761 pi->mc_reg_table_start, 4762 (u8 *)&pi->smc_mc_reg_table, 4763 sizeof(SMU7_Discrete_MCRegisters), 4764 pi->sram_end); 4765 } 4766 4767 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev) 4768 { 4769 struct ci_power_info *pi = ci_get_pi(rdev); 4770 4771 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) 4772 return 0; 4773 4774 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4775 4776 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4777 4778 return ci_copy_bytes_to_smc(rdev, 4779 pi->mc_reg_table_start + 4780 offsetof(SMU7_Discrete_MCRegisters, data[0]), 4781 (u8 *)&pi->smc_mc_reg_table.data[0], 4782 sizeof(SMU7_Discrete_MCRegisterSet) * 4783 pi->dpm_table.mclk_table.count, 4784 pi->sram_end); 4785 } 4786 4787 static void ci_enable_voltage_control(struct radeon_device *rdev) 4788 { 4789 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 4790 4791 tmp |= VOLT_PWRMGT_EN; 4792 WREG32_SMC(GENERAL_PWRMGT, tmp); 4793 } 4794 4795 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev, 4796 struct radeon_ps *radeon_state) 4797 { 4798 struct ci_ps *state = ci_get_ps(radeon_state); 4799 int i; 4800 u16 pcie_speed, max_speed = 0; 4801 4802 for (i = 0; i < state->performance_level_count; i++) { 4803 pcie_speed = state->performance_levels[i].pcie_gen; 4804 if (max_speed < pcie_speed) 4805 max_speed = pcie_speed; 4806 } 4807 4808 return max_speed; 4809 } 4810 4811 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev) 4812 { 4813 u32 speed_cntl = 0; 4814 4815 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; 4816 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; 4817 4818 return (u16)speed_cntl; 4819 } 4820 4821 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev) 4822 { 4823 u32 link_width = 0; 4824 4825 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK; 4826 link_width >>= LC_LINK_WIDTH_RD_SHIFT; 4827 4828 switch (link_width) { 4829 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4830 return 1; 4831 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4832 return 2; 4833 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4834 return 4; 4835 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4836 return 8; 4837 case RADEON_PCIE_LC_LINK_WIDTH_X12: 4838 /* not actually supported */ 4839 return 12; 4840 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4841 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4842 default: 4843 return 16; 4844 } 4845 } 4846 4847 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev, 4848 struct radeon_ps *radeon_new_state, 4849 struct radeon_ps *radeon_current_state) 4850 { 4851 struct ci_power_info *pi = ci_get_pi(rdev); 4852 enum radeon_pcie_gen target_link_speed = 4853 ci_get_maximum_link_speed(rdev, radeon_new_state); 4854 enum radeon_pcie_gen current_link_speed; 4855 4856 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID) 4857 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state); 4858 else 4859 current_link_speed = pi->force_pcie_gen; 4860 4861 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 4862 pi->pspp_notify_required = false; 4863 if (target_link_speed > current_link_speed) { 4864 switch (target_link_speed) { 4865 #ifdef CONFIG_ACPI 4866 case RADEON_PCIE_GEN3: 4867 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) 4868 break; 4869 pi->force_pcie_gen = RADEON_PCIE_GEN2; 4870 if (current_link_speed == RADEON_PCIE_GEN2) 4871 break; 4872 /* fall through */ 4873 case RADEON_PCIE_GEN2: 4874 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) 4875 break; 4876 #endif 4877 /* fall through */ 4878 default: 4879 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev); 4880 break; 4881 } 4882 } else { 4883 if (target_link_speed < current_link_speed) 4884 pi->pspp_notify_required = true; 4885 } 4886 } 4887 4888 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev, 4889 struct radeon_ps *radeon_new_state, 4890 struct radeon_ps *radeon_current_state) 4891 { 4892 struct ci_power_info *pi = ci_get_pi(rdev); 4893 enum radeon_pcie_gen target_link_speed = 4894 ci_get_maximum_link_speed(rdev, radeon_new_state); 4895 u8 request; 4896 4897 if (pi->pspp_notify_required) { 4898 if (target_link_speed == RADEON_PCIE_GEN3) 4899 request = PCIE_PERF_REQ_PECI_GEN3; 4900 else if (target_link_speed == RADEON_PCIE_GEN2) 4901 request = PCIE_PERF_REQ_PECI_GEN2; 4902 else 4903 request = PCIE_PERF_REQ_PECI_GEN1; 4904 4905 if ((request == PCIE_PERF_REQ_PECI_GEN1) && 4906 (ci_get_current_pcie_speed(rdev) > 0)) 4907 return; 4908 4909 #ifdef CONFIG_ACPI 4910 radeon_acpi_pcie_performance_request(rdev, request, false); 4911 #endif 4912 } 4913 } 4914 4915 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev) 4916 { 4917 struct ci_power_info *pi = ci_get_pi(rdev); 4918 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 4919 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 4920 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table = 4921 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 4922 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table = 4923 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 4924 4925 if (allowed_sclk_vddc_table == NULL) 4926 return -EINVAL; 4927 if (allowed_sclk_vddc_table->count < 1) 4928 return -EINVAL; 4929 if (allowed_mclk_vddc_table == NULL) 4930 return -EINVAL; 4931 if (allowed_mclk_vddc_table->count < 1) 4932 return -EINVAL; 4933 if (allowed_mclk_vddci_table == NULL) 4934 return -EINVAL; 4935 if (allowed_mclk_vddci_table->count < 1) 4936 return -EINVAL; 4937 4938 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; 4939 pi->max_vddc_in_pp_table = 4940 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4941 4942 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; 4943 pi->max_vddci_in_pp_table = 4944 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4945 4946 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = 4947 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4948 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = 4949 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4950 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = 4951 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4952 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = 4953 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4954 4955 return 0; 4956 } 4957 4958 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc) 4959 { 4960 struct ci_power_info *pi = ci_get_pi(rdev); 4961 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage; 4962 u32 leakage_index; 4963 4964 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4965 if (leakage_table->leakage_id[leakage_index] == *vddc) { 4966 *vddc = leakage_table->actual_voltage[leakage_index]; 4967 break; 4968 } 4969 } 4970 } 4971 4972 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci) 4973 { 4974 struct ci_power_info *pi = ci_get_pi(rdev); 4975 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage; 4976 u32 leakage_index; 4977 4978 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4979 if (leakage_table->leakage_id[leakage_index] == *vddci) { 4980 *vddci = leakage_table->actual_voltage[leakage_index]; 4981 break; 4982 } 4983 } 4984 } 4985 4986 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 4987 struct radeon_clock_voltage_dependency_table *table) 4988 { 4989 u32 i; 4990 4991 if (table) { 4992 for (i = 0; i < table->count; i++) 4993 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 4994 } 4995 } 4996 4997 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev, 4998 struct radeon_clock_voltage_dependency_table *table) 4999 { 5000 u32 i; 5001 5002 if (table) { 5003 for (i = 0; i < table->count; i++) 5004 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v); 5005 } 5006 } 5007 5008 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 5009 struct radeon_vce_clock_voltage_dependency_table *table) 5010 { 5011 u32 i; 5012 5013 if (table) { 5014 for (i = 0; i < table->count; i++) 5015 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 5016 } 5017 } 5018 5019 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 5020 struct radeon_uvd_clock_voltage_dependency_table *table) 5021 { 5022 u32 i; 5023 5024 if (table) { 5025 for (i = 0; i < table->count; i++) 5026 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 5027 } 5028 } 5029 5030 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev, 5031 struct radeon_phase_shedding_limits_table *table) 5032 { 5033 u32 i; 5034 5035 if (table) { 5036 for (i = 0; i < table->count; i++) 5037 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage); 5038 } 5039 } 5040 5041 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev, 5042 struct radeon_clock_and_voltage_limits *table) 5043 { 5044 if (table) { 5045 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc); 5046 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci); 5047 } 5048 } 5049 5050 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev, 5051 struct radeon_cac_leakage_table *table) 5052 { 5053 u32 i; 5054 5055 if (table) { 5056 for (i = 0; i < table->count; i++) 5057 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc); 5058 } 5059 } 5060 5061 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev) 5062 { 5063 5064 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5065 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk); 5066 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5067 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk); 5068 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5069 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk); 5070 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev, 5071 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk); 5072 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5073 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table); 5074 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5075 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table); 5076 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5077 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table); 5078 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5079 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table); 5080 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev, 5081 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table); 5082 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 5083 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 5084 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 5085 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc); 5086 ci_patch_cac_leakage_table_with_vddc_leakage(rdev, 5087 &rdev->pm.dpm.dyn_state.cac_leakage_table); 5088 5089 } 5090 5091 static void ci_get_memory_type(struct radeon_device *rdev) 5092 { 5093 struct ci_power_info *pi = ci_get_pi(rdev); 5094 u32 tmp; 5095 5096 tmp = RREG32(MC_SEQ_MISC0); 5097 5098 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) == 5099 MC_SEQ_MISC0_GDDR5_VALUE) 5100 pi->mem_gddr5 = true; 5101 else 5102 pi->mem_gddr5 = false; 5103 5104 } 5105 5106 static void ci_update_current_ps(struct radeon_device *rdev, 5107 struct radeon_ps *rps) 5108 { 5109 struct ci_ps *new_ps = ci_get_ps(rps); 5110 struct ci_power_info *pi = ci_get_pi(rdev); 5111 5112 pi->current_rps = *rps; 5113 pi->current_ps = *new_ps; 5114 pi->current_rps.ps_priv = &pi->current_ps; 5115 } 5116 5117 static void ci_update_requested_ps(struct radeon_device *rdev, 5118 struct radeon_ps *rps) 5119 { 5120 struct ci_ps *new_ps = ci_get_ps(rps); 5121 struct ci_power_info *pi = ci_get_pi(rdev); 5122 5123 pi->requested_rps = *rps; 5124 pi->requested_ps = *new_ps; 5125 pi->requested_rps.ps_priv = &pi->requested_ps; 5126 } 5127 5128 int ci_dpm_pre_set_power_state(struct radeon_device *rdev) 5129 { 5130 struct ci_power_info *pi = ci_get_pi(rdev); 5131 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 5132 struct radeon_ps *new_ps = &requested_ps; 5133 5134 ci_update_requested_ps(rdev, new_ps); 5135 5136 ci_apply_state_adjust_rules(rdev, &pi->requested_rps); 5137 5138 return 0; 5139 } 5140 5141 void ci_dpm_post_set_power_state(struct radeon_device *rdev) 5142 { 5143 struct ci_power_info *pi = ci_get_pi(rdev); 5144 struct radeon_ps *new_ps = &pi->requested_rps; 5145 5146 ci_update_current_ps(rdev, new_ps); 5147 } 5148 5149 5150 void ci_dpm_setup_asic(struct radeon_device *rdev) 5151 { 5152 int r; 5153 5154 r = ci_mc_load_microcode(rdev); 5155 if (r) 5156 DRM_ERROR("Failed to load MC firmware!\n"); 5157 ci_read_clock_registers(rdev); 5158 ci_get_memory_type(rdev); 5159 ci_enable_acpi_power_management(rdev); 5160 ci_init_sclk_t(rdev); 5161 } 5162 5163 int ci_dpm_enable(struct radeon_device *rdev) 5164 { 5165 struct ci_power_info *pi = ci_get_pi(rdev); 5166 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5167 int ret; 5168 5169 if (ci_is_smc_running(rdev)) 5170 return -EINVAL; 5171 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 5172 ci_enable_voltage_control(rdev); 5173 ret = ci_construct_voltage_tables(rdev); 5174 if (ret) { 5175 DRM_ERROR("ci_construct_voltage_tables failed\n"); 5176 return ret; 5177 } 5178 } 5179 if (pi->caps_dynamic_ac_timing) { 5180 ret = ci_initialize_mc_reg_table(rdev); 5181 if (ret) 5182 pi->caps_dynamic_ac_timing = false; 5183 } 5184 if (pi->dynamic_ss) 5185 ci_enable_spread_spectrum(rdev, true); 5186 if (pi->thermal_protection) 5187 ci_enable_thermal_protection(rdev, true); 5188 ci_program_sstp(rdev); 5189 ci_enable_display_gap(rdev); 5190 ci_program_vc(rdev); 5191 ret = ci_upload_firmware(rdev); 5192 if (ret) { 5193 DRM_ERROR("ci_upload_firmware failed\n"); 5194 return ret; 5195 } 5196 ret = ci_process_firmware_header(rdev); 5197 if (ret) { 5198 DRM_ERROR("ci_process_firmware_header failed\n"); 5199 return ret; 5200 } 5201 ret = ci_initial_switch_from_arb_f0_to_f1(rdev); 5202 if (ret) { 5203 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n"); 5204 return ret; 5205 } 5206 ret = ci_init_smc_table(rdev); 5207 if (ret) { 5208 DRM_ERROR("ci_init_smc_table failed\n"); 5209 return ret; 5210 } 5211 ret = ci_init_arb_table_index(rdev); 5212 if (ret) { 5213 DRM_ERROR("ci_init_arb_table_index failed\n"); 5214 return ret; 5215 } 5216 if (pi->caps_dynamic_ac_timing) { 5217 ret = ci_populate_initial_mc_reg_table(rdev); 5218 if (ret) { 5219 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n"); 5220 return ret; 5221 } 5222 } 5223 ret = ci_populate_pm_base(rdev); 5224 if (ret) { 5225 DRM_ERROR("ci_populate_pm_base failed\n"); 5226 return ret; 5227 } 5228 ci_dpm_start_smc(rdev); 5229 ci_enable_vr_hot_gpio_interrupt(rdev); 5230 ret = ci_notify_smc_display_change(rdev, false); 5231 if (ret) { 5232 DRM_ERROR("ci_notify_smc_display_change failed\n"); 5233 return ret; 5234 } 5235 ci_enable_sclk_control(rdev, true); 5236 ret = ci_enable_ulv(rdev, true); 5237 if (ret) { 5238 DRM_ERROR("ci_enable_ulv failed\n"); 5239 return ret; 5240 } 5241 ret = ci_enable_ds_master_switch(rdev, true); 5242 if (ret) { 5243 DRM_ERROR("ci_enable_ds_master_switch failed\n"); 5244 return ret; 5245 } 5246 ret = ci_start_dpm(rdev); 5247 if (ret) { 5248 DRM_ERROR("ci_start_dpm failed\n"); 5249 return ret; 5250 } 5251 ret = ci_enable_didt(rdev, true); 5252 if (ret) { 5253 DRM_ERROR("ci_enable_didt failed\n"); 5254 return ret; 5255 } 5256 ret = ci_enable_smc_cac(rdev, true); 5257 if (ret) { 5258 DRM_ERROR("ci_enable_smc_cac failed\n"); 5259 return ret; 5260 } 5261 ret = ci_enable_power_containment(rdev, true); 5262 if (ret) { 5263 DRM_ERROR("ci_enable_power_containment failed\n"); 5264 return ret; 5265 } 5266 5267 ret = ci_power_control_set_level(rdev); 5268 if (ret) { 5269 DRM_ERROR("ci_power_control_set_level failed\n"); 5270 return ret; 5271 } 5272 5273 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 5274 5275 ret = ci_enable_thermal_based_sclk_dpm(rdev, true); 5276 if (ret) { 5277 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n"); 5278 return ret; 5279 } 5280 5281 ci_thermal_start_thermal_controller(rdev); 5282 5283 ci_update_current_ps(rdev, boot_ps); 5284 5285 return 0; 5286 } 5287 5288 static int ci_set_temperature_range(struct radeon_device *rdev) 5289 { 5290 int ret; 5291 5292 ret = ci_thermal_enable_alert(rdev, false); 5293 if (ret) 5294 return ret; 5295 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 5296 if (ret) 5297 return ret; 5298 ret = ci_thermal_enable_alert(rdev, true); 5299 if (ret) 5300 return ret; 5301 5302 return ret; 5303 } 5304 5305 int ci_dpm_late_enable(struct radeon_device *rdev) 5306 { 5307 int ret; 5308 5309 ret = ci_set_temperature_range(rdev); 5310 if (ret) 5311 return ret; 5312 5313 ci_dpm_powergate_uvd(rdev, true); 5314 5315 return 0; 5316 } 5317 5318 void ci_dpm_disable(struct radeon_device *rdev) 5319 { 5320 struct ci_power_info *pi = ci_get_pi(rdev); 5321 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5322 5323 ci_dpm_powergate_uvd(rdev, false); 5324 5325 if (!ci_is_smc_running(rdev)) 5326 return; 5327 5328 ci_thermal_stop_thermal_controller(rdev); 5329 5330 if (pi->thermal_protection) 5331 ci_enable_thermal_protection(rdev, false); 5332 ci_enable_power_containment(rdev, false); 5333 ci_enable_smc_cac(rdev, false); 5334 ci_enable_didt(rdev, false); 5335 ci_enable_spread_spectrum(rdev, false); 5336 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); 5337 ci_stop_dpm(rdev); 5338 ci_enable_ds_master_switch(rdev, false); 5339 ci_enable_ulv(rdev, false); 5340 ci_clear_vc(rdev); 5341 ci_reset_to_default(rdev); 5342 ci_dpm_stop_smc(rdev); 5343 ci_force_switch_to_arb_f0(rdev); 5344 ci_enable_thermal_based_sclk_dpm(rdev, false); 5345 5346 ci_update_current_ps(rdev, boot_ps); 5347 } 5348 5349 int ci_dpm_set_power_state(struct radeon_device *rdev) 5350 { 5351 struct ci_power_info *pi = ci_get_pi(rdev); 5352 struct radeon_ps *new_ps = &pi->requested_rps; 5353 struct radeon_ps *old_ps = &pi->current_rps; 5354 int ret; 5355 5356 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); 5357 if (pi->pcie_performance_request) 5358 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); 5359 ret = ci_freeze_sclk_mclk_dpm(rdev); 5360 if (ret) { 5361 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n"); 5362 return ret; 5363 } 5364 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps); 5365 if (ret) { 5366 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n"); 5367 return ret; 5368 } 5369 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps); 5370 if (ret) { 5371 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); 5372 return ret; 5373 } 5374 5375 ret = ci_update_vce_dpm(rdev, new_ps, old_ps); 5376 if (ret) { 5377 DRM_ERROR("ci_update_vce_dpm failed\n"); 5378 return ret; 5379 } 5380 5381 ret = ci_update_sclk_t(rdev); 5382 if (ret) { 5383 DRM_ERROR("ci_update_sclk_t failed\n"); 5384 return ret; 5385 } 5386 if (pi->caps_dynamic_ac_timing) { 5387 ret = ci_update_and_upload_mc_reg_table(rdev); 5388 if (ret) { 5389 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n"); 5390 return ret; 5391 } 5392 } 5393 ret = ci_program_memory_timing_parameters(rdev); 5394 if (ret) { 5395 DRM_ERROR("ci_program_memory_timing_parameters failed\n"); 5396 return ret; 5397 } 5398 ret = ci_unfreeze_sclk_mclk_dpm(rdev); 5399 if (ret) { 5400 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n"); 5401 return ret; 5402 } 5403 ret = ci_upload_dpm_level_enable_mask(rdev); 5404 if (ret) { 5405 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n"); 5406 return ret; 5407 } 5408 if (pi->pcie_performance_request) 5409 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 5410 5411 return 0; 5412 } 5413 5414 #if 0 5415 void ci_dpm_reset_asic(struct radeon_device *rdev) 5416 { 5417 ci_set_boot_state(rdev); 5418 } 5419 #endif 5420 5421 void ci_dpm_display_configuration_changed(struct radeon_device *rdev) 5422 { 5423 ci_program_display_gap(rdev); 5424 } 5425 5426 union power_info { 5427 struct _ATOM_POWERPLAY_INFO info; 5428 struct _ATOM_POWERPLAY_INFO_V2 info_2; 5429 struct _ATOM_POWERPLAY_INFO_V3 info_3; 5430 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 5431 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 5432 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 5433 }; 5434 5435 union pplib_clock_info { 5436 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 5437 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 5438 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 5439 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 5440 struct _ATOM_PPLIB_SI_CLOCK_INFO si; 5441 struct _ATOM_PPLIB_CI_CLOCK_INFO ci; 5442 }; 5443 5444 union pplib_power_state { 5445 struct _ATOM_PPLIB_STATE v1; 5446 struct _ATOM_PPLIB_STATE_V2 v2; 5447 }; 5448 5449 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev, 5450 struct radeon_ps *rps, 5451 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 5452 u8 table_rev) 5453 { 5454 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 5455 rps->class = le16_to_cpu(non_clock_info->usClassification); 5456 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 5457 5458 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 5459 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 5460 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 5461 } else { 5462 rps->vclk = 0; 5463 rps->dclk = 0; 5464 } 5465 5466 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 5467 rdev->pm.dpm.boot_ps = rps; 5468 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 5469 rdev->pm.dpm.uvd_ps = rps; 5470 } 5471 5472 static void ci_parse_pplib_clock_info(struct radeon_device *rdev, 5473 struct radeon_ps *rps, int index, 5474 union pplib_clock_info *clock_info) 5475 { 5476 struct ci_power_info *pi = ci_get_pi(rdev); 5477 struct ci_ps *ps = ci_get_ps(rps); 5478 struct ci_pl *pl = &ps->performance_levels[index]; 5479 5480 ps->performance_level_count = index + 1; 5481 5482 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5483 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; 5484 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5485 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5486 5487 pl->pcie_gen = r600_get_pcie_gen_support(rdev, 5488 pi->sys_pcie_mask, 5489 pi->vbios_boot_state.pcie_gen_bootup_value, 5490 clock_info->ci.ucPCIEGen); 5491 pl->pcie_lane = r600_get_pcie_lane_support(rdev, 5492 pi->vbios_boot_state.pcie_lane_bootup_value, 5493 le16_to_cpu(clock_info->ci.usPCIELane)); 5494 5495 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 5496 pi->acpi_pcie_gen = pl->pcie_gen; 5497 } 5498 5499 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { 5500 pi->ulv.supported = true; 5501 pi->ulv.pl = *pl; 5502 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT; 5503 } 5504 5505 /* patch up boot state */ 5506 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 5507 pl->mclk = pi->vbios_boot_state.mclk_bootup_value; 5508 pl->sclk = pi->vbios_boot_state.sclk_bootup_value; 5509 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; 5510 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; 5511 } 5512 5513 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 5514 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 5515 pi->use_pcie_powersaving_levels = true; 5516 if (pi->pcie_gen_powersaving.max < pl->pcie_gen) 5517 pi->pcie_gen_powersaving.max = pl->pcie_gen; 5518 if (pi->pcie_gen_powersaving.min > pl->pcie_gen) 5519 pi->pcie_gen_powersaving.min = pl->pcie_gen; 5520 if (pi->pcie_lane_powersaving.max < pl->pcie_lane) 5521 pi->pcie_lane_powersaving.max = pl->pcie_lane; 5522 if (pi->pcie_lane_powersaving.min > pl->pcie_lane) 5523 pi->pcie_lane_powersaving.min = pl->pcie_lane; 5524 break; 5525 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 5526 pi->use_pcie_performance_levels = true; 5527 if (pi->pcie_gen_performance.max < pl->pcie_gen) 5528 pi->pcie_gen_performance.max = pl->pcie_gen; 5529 if (pi->pcie_gen_performance.min > pl->pcie_gen) 5530 pi->pcie_gen_performance.min = pl->pcie_gen; 5531 if (pi->pcie_lane_performance.max < pl->pcie_lane) 5532 pi->pcie_lane_performance.max = pl->pcie_lane; 5533 if (pi->pcie_lane_performance.min > pl->pcie_lane) 5534 pi->pcie_lane_performance.min = pl->pcie_lane; 5535 break; 5536 default: 5537 break; 5538 } 5539 } 5540 5541 static int ci_parse_power_table(struct radeon_device *rdev) 5542 { 5543 struct radeon_mode_info *mode_info = &rdev->mode_info; 5544 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 5545 union pplib_power_state *power_state; 5546 int i, j, k, non_clock_array_index, clock_array_index; 5547 union pplib_clock_info *clock_info; 5548 struct _StateArray *state_array; 5549 struct _ClockInfoArray *clock_info_array; 5550 struct _NonClockInfoArray *non_clock_info_array; 5551 union power_info *power_info; 5552 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 5553 u16 data_offset; 5554 u8 frev, crev; 5555 u8 *power_state_offset; 5556 struct ci_ps *ps; 5557 5558 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 5559 &frev, &crev, &data_offset)) 5560 return -EINVAL; 5561 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 5562 5563 state_array = (struct _StateArray *) 5564 (mode_info->atom_context->bios + data_offset + 5565 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 5566 clock_info_array = (struct _ClockInfoArray *) 5567 (mode_info->atom_context->bios + data_offset + 5568 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 5569 non_clock_info_array = (struct _NonClockInfoArray *) 5570 (mode_info->atom_context->bios + data_offset + 5571 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 5572 5573 rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, 5574 sizeof(struct radeon_ps), 5575 GFP_KERNEL); 5576 if (!rdev->pm.dpm.ps) 5577 return -ENOMEM; 5578 power_state_offset = (u8 *)state_array->states; 5579 for (i = 0; i < state_array->ucNumEntries; i++) { 5580 u8 *idx; 5581 power_state = (union pplib_power_state *)power_state_offset; 5582 non_clock_array_index = power_state->v2.nonClockInfoIndex; 5583 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 5584 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 5585 if (!rdev->pm.power_state[i].clock_info) 5586 return -EINVAL; 5587 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); 5588 if (ps == NULL) { 5589 kfree(rdev->pm.dpm.ps); 5590 return -ENOMEM; 5591 } 5592 rdev->pm.dpm.ps[i].ps_priv = ps; 5593 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 5594 non_clock_info, 5595 non_clock_info_array->ucEntrySize); 5596 k = 0; 5597 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 5598 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 5599 clock_array_index = idx[j]; 5600 if (clock_array_index >= clock_info_array->ucNumEntries) 5601 continue; 5602 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) 5603 break; 5604 clock_info = (union pplib_clock_info *) 5605 ((u8 *)&clock_info_array->clockInfo[0] + 5606 (clock_array_index * clock_info_array->ucEntrySize)); 5607 ci_parse_pplib_clock_info(rdev, 5608 &rdev->pm.dpm.ps[i], k, 5609 clock_info); 5610 k++; 5611 } 5612 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 5613 } 5614 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 5615 5616 /* fill in the vce power states */ 5617 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 5618 u32 sclk, mclk; 5619 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 5620 clock_info = (union pplib_clock_info *) 5621 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 5622 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5623 sclk |= clock_info->ci.ucEngineClockHigh << 16; 5624 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5625 mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5626 rdev->pm.dpm.vce_states[i].sclk = sclk; 5627 rdev->pm.dpm.vce_states[i].mclk = mclk; 5628 } 5629 5630 return 0; 5631 } 5632 5633 static int ci_get_vbios_boot_values(struct radeon_device *rdev, 5634 struct ci_vbios_boot_state *boot_state) 5635 { 5636 struct radeon_mode_info *mode_info = &rdev->mode_info; 5637 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 5638 ATOM_FIRMWARE_INFO_V2_2 *firmware_info; 5639 u8 frev, crev; 5640 u16 data_offset; 5641 5642 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 5643 &frev, &crev, &data_offset)) { 5644 firmware_info = 5645 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios + 5646 data_offset); 5647 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage); 5648 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage); 5649 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage); 5650 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev); 5651 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev); 5652 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock); 5653 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock); 5654 5655 return 0; 5656 } 5657 return -EINVAL; 5658 } 5659 5660 void ci_dpm_fini(struct radeon_device *rdev) 5661 { 5662 int i; 5663 5664 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 5665 kfree(rdev->pm.dpm.ps[i].ps_priv); 5666 } 5667 kfree(rdev->pm.dpm.ps); 5668 kfree(rdev->pm.dpm.priv); 5669 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); 5670 r600_free_extended_power_table(rdev); 5671 } 5672 5673 int ci_dpm_init(struct radeon_device *rdev) 5674 { 5675 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 5676 SMU7_Discrete_DpmTable *dpm_table; 5677 struct radeon_gpio_rec gpio; 5678 u16 data_offset, size; 5679 u8 frev, crev; 5680 struct ci_power_info *pi; 5681 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; 5682 struct pci_dev *root = rdev->pdev->bus->self; 5683 int ret; 5684 5685 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); 5686 if (pi == NULL) 5687 return -ENOMEM; 5688 rdev->pm.dpm.priv = pi; 5689 5690 if (!pci_is_root_bus(rdev->pdev->bus)) 5691 speed_cap = pcie_get_speed_cap(root); 5692 if (speed_cap == PCI_SPEED_UNKNOWN) { 5693 pi->sys_pcie_mask = 0; 5694 } else { 5695 if (speed_cap == PCIE_SPEED_8_0GT) 5696 pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 | 5697 RADEON_PCIE_SPEED_50 | 5698 RADEON_PCIE_SPEED_80; 5699 else if (speed_cap == PCIE_SPEED_5_0GT) 5700 pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 | 5701 RADEON_PCIE_SPEED_50; 5702 else 5703 pi->sys_pcie_mask = RADEON_PCIE_SPEED_25; 5704 } 5705 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 5706 5707 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1; 5708 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3; 5709 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1; 5710 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3; 5711 5712 pi->pcie_lane_performance.max = 0; 5713 pi->pcie_lane_performance.min = 16; 5714 pi->pcie_lane_powersaving.max = 0; 5715 pi->pcie_lane_powersaving.min = 16; 5716 5717 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state); 5718 if (ret) { 5719 ci_dpm_fini(rdev); 5720 return ret; 5721 } 5722 5723 ret = r600_get_platform_caps(rdev); 5724 if (ret) { 5725 ci_dpm_fini(rdev); 5726 return ret; 5727 } 5728 5729 ret = r600_parse_extended_power_table(rdev); 5730 if (ret) { 5731 ci_dpm_fini(rdev); 5732 return ret; 5733 } 5734 5735 ret = ci_parse_power_table(rdev); 5736 if (ret) { 5737 ci_dpm_fini(rdev); 5738 return ret; 5739 } 5740 5741 pi->dll_default_on = false; 5742 pi->sram_end = SMC_RAM_END; 5743 5744 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; 5745 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; 5746 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT; 5747 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT; 5748 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT; 5749 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT; 5750 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT; 5751 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT; 5752 5753 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT; 5754 5755 pi->sclk_dpm_key_disabled = 0; 5756 pi->mclk_dpm_key_disabled = 0; 5757 pi->pcie_dpm_key_disabled = 0; 5758 pi->thermal_sclk_dpm_enabled = 0; 5759 5760 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ 5761 if ((rdev->pdev->device == 0x6658) && 5762 (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) { 5763 pi->mclk_dpm_key_disabled = 1; 5764 } 5765 5766 pi->caps_sclk_ds = true; 5767 5768 pi->mclk_strobe_mode_threshold = 40000; 5769 pi->mclk_stutter_mode_threshold = 40000; 5770 pi->mclk_edc_enable_threshold = 40000; 5771 pi->mclk_edc_wr_enable_threshold = 40000; 5772 5773 ci_initialize_powertune_defaults(rdev); 5774 5775 pi->caps_fps = false; 5776 5777 pi->caps_sclk_throttle_low_notification = false; 5778 5779 pi->caps_uvd_dpm = true; 5780 pi->caps_vce_dpm = true; 5781 5782 ci_get_leakage_voltages(rdev); 5783 ci_patch_dependency_tables_with_leakage(rdev); 5784 ci_set_private_data_variables_based_on_pptable(rdev); 5785 5786 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 5787 kcalloc(4, 5788 sizeof(struct radeon_clock_voltage_dependency_entry), 5789 GFP_KERNEL); 5790 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 5791 ci_dpm_fini(rdev); 5792 return -ENOMEM; 5793 } 5794 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; 5795 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; 5796 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; 5797 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; 5798 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; 5799 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; 5800 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; 5801 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; 5802 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; 5803 5804 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; 5805 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; 5806 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200; 5807 5808 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0; 5809 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; 5810 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 5811 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 5812 5813 if (rdev->family == CHIP_HAWAII) { 5814 pi->thermal_temp_setting.temperature_low = 94500; 5815 pi->thermal_temp_setting.temperature_high = 95000; 5816 pi->thermal_temp_setting.temperature_shutdown = 104000; 5817 } else { 5818 pi->thermal_temp_setting.temperature_low = 99500; 5819 pi->thermal_temp_setting.temperature_high = 100000; 5820 pi->thermal_temp_setting.temperature_shutdown = 104000; 5821 } 5822 5823 pi->uvd_enabled = false; 5824 5825 dpm_table = &pi->smc_state_table; 5826 5827 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID); 5828 if (gpio.valid) { 5829 dpm_table->VRHotGpio = gpio.shift; 5830 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5831 } else { 5832 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN; 5833 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5834 } 5835 5836 gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID); 5837 if (gpio.valid) { 5838 dpm_table->AcDcGpio = gpio.shift; 5839 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5840 } else { 5841 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN; 5842 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5843 } 5844 5845 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID); 5846 if (gpio.valid) { 5847 u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL); 5848 5849 switch (gpio.shift) { 5850 case 0: 5851 tmp &= ~GNB_SLOW_MODE_MASK; 5852 tmp |= GNB_SLOW_MODE(1); 5853 break; 5854 case 1: 5855 tmp &= ~GNB_SLOW_MODE_MASK; 5856 tmp |= GNB_SLOW_MODE(2); 5857 break; 5858 case 2: 5859 tmp |= GNB_SLOW; 5860 break; 5861 case 3: 5862 tmp |= FORCE_NB_PS1; 5863 break; 5864 case 4: 5865 tmp |= DPM_ENABLED; 5866 break; 5867 default: 5868 DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift); 5869 break; 5870 } 5871 WREG32_SMC(CNB_PWRMGT_CNTL, tmp); 5872 } 5873 5874 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5875 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5876 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5877 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) 5878 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5879 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 5880 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5881 5882 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) { 5883 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 5884 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5885 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) 5886 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5887 else 5888 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; 5889 } 5890 5891 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { 5892 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) 5893 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5894 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) 5895 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5896 else 5897 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL; 5898 } 5899 5900 pi->vddc_phase_shed_control = true; 5901 5902 #if defined(CONFIG_ACPI) 5903 pi->pcie_performance_request = 5904 radeon_acpi_is_pcie_performance_request_supported(rdev); 5905 #else 5906 pi->pcie_performance_request = false; 5907 #endif 5908 5909 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 5910 &frev, &crev, &data_offset)) { 5911 pi->caps_sclk_ss_support = true; 5912 pi->caps_mclk_ss_support = true; 5913 pi->dynamic_ss = true; 5914 } else { 5915 pi->caps_sclk_ss_support = false; 5916 pi->caps_mclk_ss_support = false; 5917 pi->dynamic_ss = true; 5918 } 5919 5920 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) 5921 pi->thermal_protection = true; 5922 else 5923 pi->thermal_protection = false; 5924 5925 pi->caps_dynamic_ac_timing = true; 5926 5927 pi->uvd_power_gated = false; 5928 5929 /* make sure dc limits are valid */ 5930 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || 5931 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) 5932 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = 5933 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 5934 5935 pi->fan_ctrl_is_in_default_mode = true; 5936 5937 return 0; 5938 } 5939 5940 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 5941 struct seq_file *m) 5942 { 5943 struct ci_power_info *pi = ci_get_pi(rdev); 5944 struct radeon_ps *rps = &pi->current_rps; 5945 u32 sclk = ci_get_average_sclk_freq(rdev); 5946 u32 mclk = ci_get_average_mclk_freq(rdev); 5947 5948 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); 5949 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); 5950 seq_printf(m, "power level avg sclk: %u mclk: %u\n", 5951 sclk, mclk); 5952 } 5953 5954 void ci_dpm_print_power_state(struct radeon_device *rdev, 5955 struct radeon_ps *rps) 5956 { 5957 struct ci_ps *ps = ci_get_ps(rps); 5958 struct ci_pl *pl; 5959 int i; 5960 5961 r600_dpm_print_class_info(rps->class, rps->class2); 5962 r600_dpm_print_cap_info(rps->caps); 5963 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 5964 for (i = 0; i < ps->performance_level_count; i++) { 5965 pl = &ps->performance_levels[i]; 5966 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n", 5967 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); 5968 } 5969 r600_dpm_print_ps_status(rdev, rps); 5970 } 5971 5972 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev) 5973 { 5974 u32 sclk = ci_get_average_sclk_freq(rdev); 5975 5976 return sclk; 5977 } 5978 5979 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev) 5980 { 5981 u32 mclk = ci_get_average_mclk_freq(rdev); 5982 5983 return mclk; 5984 } 5985 5986 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low) 5987 { 5988 struct ci_power_info *pi = ci_get_pi(rdev); 5989 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 5990 5991 if (low) 5992 return requested_state->performance_levels[0].sclk; 5993 else 5994 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; 5995 } 5996 5997 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low) 5998 { 5999 struct ci_power_info *pi = ci_get_pi(rdev); 6000 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 6001 6002 if (low) 6003 return requested_state->performance_levels[0].mclk; 6004 else 6005 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; 6006 } 6007