1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 26 #include "smumgr.h" 27 #include "smu10_inc.h" 28 #include "soc15_common.h" 29 #include "smu10_smumgr.h" 30 #include "ppatomctrl.h" 31 #include "rv_ppsmc.h" 32 #include "smu10_driver_if.h" 33 #include "smu10.h" 34 #include "pp_debug.h" 35 36 37 #define BUFFER_SIZE 80000 38 #define MAX_STRING_SIZE 15 39 #define BUFFER_SIZETWO 131072 40 41 #define MP0_Public 0x03800000 42 #define MP0_SRAM 0x03900000 43 #define MP1_Public 0x03b00000 44 #define MP1_SRAM 0x03c00004 45 46 #define smnMP1_FIRMWARE_FLAGS 0x3010028 47 48 49 static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr) 50 { 51 struct amdgpu_device *adev = hwmgr->adev; 52 uint32_t reg; 53 54 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 55 56 phm_wait_for_register_unequal(hwmgr, reg, 57 0, MP1_C2PMSG_90__CONTENT_MASK); 58 59 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); 60 } 61 62 static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, 63 uint16_t msg) 64 { 65 struct amdgpu_device *adev = hwmgr->adev; 66 67 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); 68 69 return 0; 70 } 71 72 static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) 73 { 74 struct amdgpu_device *adev = hwmgr->adev; 75 76 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 77 } 78 79 static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 80 { 81 struct amdgpu_device *adev = hwmgr->adev; 82 83 smu10_wait_for_response(hwmgr); 84 85 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 86 87 smu10_send_msg_to_smc_without_waiting(hwmgr, msg); 88 89 if (smu10_wait_for_response(hwmgr) == 0) 90 dev_err(adev->dev, "Failed to send Message %x.\n", msg); 91 92 return 0; 93 } 94 95 96 static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 97 uint16_t msg, uint32_t parameter) 98 { 99 struct amdgpu_device *adev = hwmgr->adev; 100 101 smu10_wait_for_response(hwmgr); 102 103 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 104 105 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); 106 107 smu10_send_msg_to_smc_without_waiting(hwmgr, msg); 108 109 110 if (smu10_wait_for_response(hwmgr) == 0) 111 dev_err(adev->dev, "Failed to send Message %x.\n", msg); 112 113 return 0; 114 } 115 116 static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, 117 uint8_t *table, int16_t table_id) 118 { 119 struct smu10_smumgr *priv = 120 (struct smu10_smumgr *)(hwmgr->smu_backend); 121 struct amdgpu_device *adev = hwmgr->adev; 122 123 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 124 "Invalid SMU Table ID!", return -EINVAL;); 125 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 126 "Invalid SMU Table version!", return -EINVAL;); 127 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 128 "Invalid SMU Table Length!", return -EINVAL;); 129 smum_send_msg_to_smc_with_parameter(hwmgr, 130 PPSMC_MSG_SetDriverDramAddrHigh, 131 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 132 NULL); 133 smum_send_msg_to_smc_with_parameter(hwmgr, 134 PPSMC_MSG_SetDriverDramAddrLow, 135 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 136 NULL); 137 smum_send_msg_to_smc_with_parameter(hwmgr, 138 PPSMC_MSG_TransferTableSmu2Dram, 139 priv->smu_tables.entry[table_id].table_id, 140 NULL); 141 142 amdgpu_asic_invalidate_hdp(adev, NULL); 143 144 memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, 145 priv->smu_tables.entry[table_id].size); 146 147 return 0; 148 } 149 150 static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr, 151 uint8_t *table, int16_t table_id) 152 { 153 struct smu10_smumgr *priv = 154 (struct smu10_smumgr *)(hwmgr->smu_backend); 155 struct amdgpu_device *adev = hwmgr->adev; 156 157 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 158 "Invalid SMU Table ID!", return -EINVAL;); 159 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 160 "Invalid SMU Table version!", return -EINVAL;); 161 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 162 "Invalid SMU Table Length!", return -EINVAL;); 163 164 memcpy(priv->smu_tables.entry[table_id].table, table, 165 priv->smu_tables.entry[table_id].size); 166 167 amdgpu_asic_flush_hdp(adev, NULL); 168 169 smum_send_msg_to_smc_with_parameter(hwmgr, 170 PPSMC_MSG_SetDriverDramAddrHigh, 171 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 172 NULL); 173 smum_send_msg_to_smc_with_parameter(hwmgr, 174 PPSMC_MSG_SetDriverDramAddrLow, 175 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 176 NULL); 177 smum_send_msg_to_smc_with_parameter(hwmgr, 178 PPSMC_MSG_TransferTableDram2Smu, 179 priv->smu_tables.entry[table_id].table_id, 180 NULL); 181 182 return 0; 183 } 184 185 static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr) 186 { 187 uint32_t smc_driver_if_version; 188 189 smum_send_msg_to_smc(hwmgr, 190 PPSMC_MSG_GetDriverIfVersion, 191 &smc_driver_if_version); 192 193 if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) && 194 (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) { 195 pr_err("Attempt to read SMC IF Version Number Failed!\n"); 196 return -EINVAL; 197 } 198 199 return 0; 200 } 201 202 static int smu10_smu_fini(struct pp_hwmgr *hwmgr) 203 { 204 struct smu10_smumgr *priv = 205 (struct smu10_smumgr *)(hwmgr->smu_backend); 206 207 if (priv) { 208 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle, 209 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 210 &priv->smu_tables.entry[SMU10_WMTABLE].table); 211 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle, 212 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr, 213 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table); 214 kfree(hwmgr->smu_backend); 215 hwmgr->smu_backend = NULL; 216 } 217 218 return 0; 219 } 220 221 static int smu10_start_smu(struct pp_hwmgr *hwmgr) 222 { 223 struct amdgpu_device *adev = hwmgr->adev; 224 225 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); 226 adev->pm.fw_version = hwmgr->smu_version >> 8; 227 228 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2) && 229 (adev->apu_flags & AMD_APU_IS_RAVEN) && 230 adev->pm.fw_version < 0x1e45) 231 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 232 233 if (smu10_verify_smc_interface(hwmgr)) 234 return -EINVAL; 235 236 return 0; 237 } 238 239 static int smu10_smu_init(struct pp_hwmgr *hwmgr) 240 { 241 struct smu10_smumgr *priv; 242 int r; 243 244 priv = kzalloc(sizeof(struct smu10_smumgr), GFP_KERNEL); 245 246 if (!priv) 247 return -ENOMEM; 248 249 hwmgr->smu_backend = priv; 250 251 /* allocate space for watermarks table */ 252 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 253 sizeof(Watermarks_t), PAGE_SIZE, 254 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, 255 &priv->smu_tables.entry[SMU10_WMTABLE].handle, 256 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 257 &priv->smu_tables.entry[SMU10_WMTABLE].table); 258 259 if (r) 260 goto err0; 261 262 priv->smu_tables.entry[SMU10_WMTABLE].version = 0x01; 263 priv->smu_tables.entry[SMU10_WMTABLE].size = sizeof(Watermarks_t); 264 priv->smu_tables.entry[SMU10_WMTABLE].table_id = TABLE_WATERMARKS; 265 266 /* allocate space for watermarks table */ 267 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 268 sizeof(DpmClocks_t), PAGE_SIZE, 269 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, 270 &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle, 271 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr, 272 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table); 273 274 if (r) 275 goto err1; 276 277 priv->smu_tables.entry[SMU10_CLOCKTABLE].version = 0x01; 278 priv->smu_tables.entry[SMU10_CLOCKTABLE].size = sizeof(DpmClocks_t); 279 priv->smu_tables.entry[SMU10_CLOCKTABLE].table_id = TABLE_DPMCLOCKS; 280 281 return 0; 282 283 err1: 284 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle, 285 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 286 &priv->smu_tables.entry[SMU10_WMTABLE].table); 287 err0: 288 kfree(priv); 289 return -EINVAL; 290 } 291 292 static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw) 293 { 294 int ret; 295 296 if (rw) 297 ret = smu10_copy_table_from_smc(hwmgr, table, table_id); 298 else 299 ret = smu10_copy_table_to_smc(hwmgr, table, table_id); 300 301 return ret; 302 } 303 304 305 const struct pp_smumgr_func smu10_smu_funcs = { 306 .name = "smu10_smu", 307 .smu_init = &smu10_smu_init, 308 .smu_fini = &smu10_smu_fini, 309 .start_smu = &smu10_start_smu, 310 .request_smu_load_specific_fw = NULL, 311 .send_msg_to_smc = &smu10_send_msg_to_smc, 312 .send_msg_to_smc_with_parameter = &smu10_send_msg_to_smc_with_parameter, 313 .download_pptable_settings = NULL, 314 .upload_pptable_settings = NULL, 315 .get_argument = smu10_read_arg_from_smc, 316 .smc_table_manager = smu10_smc_table_manager, 317 }; 318 319 320