1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 26 #include "smumgr.h" 27 #include "smu10_inc.h" 28 #include "soc15_common.h" 29 #include "smu10_smumgr.h" 30 #include "ppatomctrl.h" 31 #include "rv_ppsmc.h" 32 #include "smu10_driver_if.h" 33 #include "smu10.h" 34 #include "pp_debug.h" 35 36 37 #define BUFFER_SIZE 80000 38 #define MAX_STRING_SIZE 15 39 #define BUFFER_SIZETWO 131072 40 41 #define MP0_Public 0x03800000 42 #define MP0_SRAM 0x03900000 43 #define MP1_Public 0x03b00000 44 #define MP1_SRAM 0x03c00004 45 46 #define smnMP1_FIRMWARE_FLAGS 0x3010028 47 48 49 static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr) 50 { 51 struct amdgpu_device *adev = hwmgr->adev; 52 uint32_t reg; 53 54 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 55 56 phm_wait_for_register_unequal(hwmgr, reg, 57 0, MP1_C2PMSG_90__CONTENT_MASK); 58 59 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); 60 } 61 62 static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, 63 uint16_t msg) 64 { 65 struct amdgpu_device *adev = hwmgr->adev; 66 67 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); 68 69 return 0; 70 } 71 72 static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) 73 { 74 struct amdgpu_device *adev = hwmgr->adev; 75 76 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 77 } 78 79 static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 80 { 81 struct amdgpu_device *adev = hwmgr->adev; 82 83 smu10_wait_for_response(hwmgr); 84 85 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 86 87 smu10_send_msg_to_smc_without_waiting(hwmgr, msg); 88 89 if (smu10_wait_for_response(hwmgr) == 0) 90 dev_err(adev->dev, "Failed to send Message %x.\n", msg); 91 92 return 0; 93 } 94 95 96 static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 97 uint16_t msg, uint32_t parameter) 98 { 99 struct amdgpu_device *adev = hwmgr->adev; 100 101 smu10_wait_for_response(hwmgr); 102 103 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 104 105 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); 106 107 smu10_send_msg_to_smc_without_waiting(hwmgr, msg); 108 109 110 if (smu10_wait_for_response(hwmgr) == 0) 111 dev_err(adev->dev, "Failed to send Message %x.\n", msg); 112 113 return 0; 114 } 115 116 static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, 117 uint8_t *table, int16_t table_id) 118 { 119 struct smu10_smumgr *priv = 120 (struct smu10_smumgr *)(hwmgr->smu_backend); 121 struct amdgpu_device *adev = hwmgr->adev; 122 123 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 124 "Invalid SMU Table ID!", return -EINVAL;); 125 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 126 "Invalid SMU Table version!", return -EINVAL;); 127 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 128 "Invalid SMU Table Length!", return -EINVAL;); 129 smum_send_msg_to_smc_with_parameter(hwmgr, 130 PPSMC_MSG_SetDriverDramAddrHigh, 131 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 132 NULL); 133 smum_send_msg_to_smc_with_parameter(hwmgr, 134 PPSMC_MSG_SetDriverDramAddrLow, 135 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 136 NULL); 137 smum_send_msg_to_smc_with_parameter(hwmgr, 138 PPSMC_MSG_TransferTableSmu2Dram, 139 priv->smu_tables.entry[table_id].table_id, 140 NULL); 141 142 amdgpu_asic_invalidate_hdp(adev, NULL); 143 144 memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, 145 priv->smu_tables.entry[table_id].size); 146 147 return 0; 148 } 149 150 static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr, 151 uint8_t *table, int16_t table_id) 152 { 153 struct smu10_smumgr *priv = 154 (struct smu10_smumgr *)(hwmgr->smu_backend); 155 struct amdgpu_device *adev = hwmgr->adev; 156 157 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 158 "Invalid SMU Table ID!", return -EINVAL;); 159 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 160 "Invalid SMU Table version!", return -EINVAL;); 161 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 162 "Invalid SMU Table Length!", return -EINVAL;); 163 164 memcpy(priv->smu_tables.entry[table_id].table, table, 165 priv->smu_tables.entry[table_id].size); 166 167 amdgpu_asic_flush_hdp(adev, NULL); 168 169 smum_send_msg_to_smc_with_parameter(hwmgr, 170 PPSMC_MSG_SetDriverDramAddrHigh, 171 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 172 NULL); 173 smum_send_msg_to_smc_with_parameter(hwmgr, 174 PPSMC_MSG_SetDriverDramAddrLow, 175 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 176 NULL); 177 smum_send_msg_to_smc_with_parameter(hwmgr, 178 PPSMC_MSG_TransferTableDram2Smu, 179 priv->smu_tables.entry[table_id].table_id, 180 NULL); 181 182 return 0; 183 } 184 185 static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr) 186 { 187 uint32_t smc_driver_if_version; 188 int ret = 0; 189 190 ret = smum_send_msg_to_smc(hwmgr, 191 PPSMC_MSG_GetDriverIfVersion, 192 &smc_driver_if_version); 193 if (ret) 194 return ret; 195 196 if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) && 197 (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) { 198 pr_err("Attempt to read SMC IF Version Number Failed!\n"); 199 return -EINVAL; 200 } 201 202 return 0; 203 } 204 205 static int smu10_smu_fini(struct pp_hwmgr *hwmgr) 206 { 207 struct smu10_smumgr *priv = 208 (struct smu10_smumgr *)(hwmgr->smu_backend); 209 210 if (priv) { 211 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle, 212 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 213 &priv->smu_tables.entry[SMU10_WMTABLE].table); 214 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle, 215 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr, 216 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table); 217 kfree(hwmgr->smu_backend); 218 hwmgr->smu_backend = NULL; 219 } 220 221 return 0; 222 } 223 224 static int smu10_start_smu(struct pp_hwmgr *hwmgr) 225 { 226 struct amdgpu_device *adev = hwmgr->adev; 227 228 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); 229 adev->pm.fw_version = hwmgr->smu_version >> 8; 230 231 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2) && 232 (adev->apu_flags & AMD_APU_IS_RAVEN) && 233 adev->pm.fw_version < 0x1e45) 234 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 235 236 if (smu10_verify_smc_interface(hwmgr)) 237 return -EINVAL; 238 239 return 0; 240 } 241 242 static int smu10_smu_init(struct pp_hwmgr *hwmgr) 243 { 244 struct smu10_smumgr *priv; 245 int r; 246 247 priv = kzalloc(sizeof(struct smu10_smumgr), GFP_KERNEL); 248 249 if (!priv) 250 return -ENOMEM; 251 252 hwmgr->smu_backend = priv; 253 254 /* allocate space for watermarks table */ 255 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 256 sizeof(Watermarks_t), PAGE_SIZE, 257 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, 258 &priv->smu_tables.entry[SMU10_WMTABLE].handle, 259 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 260 &priv->smu_tables.entry[SMU10_WMTABLE].table); 261 262 if (r) 263 goto err0; 264 265 priv->smu_tables.entry[SMU10_WMTABLE].version = 0x01; 266 priv->smu_tables.entry[SMU10_WMTABLE].size = sizeof(Watermarks_t); 267 priv->smu_tables.entry[SMU10_WMTABLE].table_id = TABLE_WATERMARKS; 268 269 /* allocate space for watermarks table */ 270 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 271 sizeof(DpmClocks_t), PAGE_SIZE, 272 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, 273 &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle, 274 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr, 275 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table); 276 277 if (r) 278 goto err1; 279 280 priv->smu_tables.entry[SMU10_CLOCKTABLE].version = 0x01; 281 priv->smu_tables.entry[SMU10_CLOCKTABLE].size = sizeof(DpmClocks_t); 282 priv->smu_tables.entry[SMU10_CLOCKTABLE].table_id = TABLE_DPMCLOCKS; 283 284 return 0; 285 286 err1: 287 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle, 288 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 289 &priv->smu_tables.entry[SMU10_WMTABLE].table); 290 err0: 291 kfree(priv); 292 return -EINVAL; 293 } 294 295 static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw) 296 { 297 int ret; 298 299 if (rw) 300 ret = smu10_copy_table_from_smc(hwmgr, table, table_id); 301 else 302 ret = smu10_copy_table_to_smc(hwmgr, table, table_id); 303 304 return ret; 305 } 306 307 308 const struct pp_smumgr_func smu10_smu_funcs = { 309 .name = "smu10_smu", 310 .smu_init = &smu10_smu_init, 311 .smu_fini = &smu10_smu_fini, 312 .start_smu = &smu10_start_smu, 313 .request_smu_load_specific_fw = NULL, 314 .send_msg_to_smc = &smu10_send_msg_to_smc, 315 .send_msg_to_smc_with_parameter = &smu10_send_msg_to_smc_with_parameter, 316 .download_pptable_settings = NULL, 317 .upload_pptable_settings = NULL, 318 .get_argument = smu10_read_arg_from_smc, 319 .smc_table_manager = smu10_smc_table_manager, 320 }; 321 322 323