1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "smumgr.h"
25 #include "vega20_inc.h"
26 #include "soc15_common.h"
27 #include "vega20_smumgr.h"
28 #include "vega20_ppsmc.h"
29 #include "smu11_driver_if.h"
30 #include "ppatomctrl.h"
31 #include "pp_debug.h"
32 #include "smu_ucode_xfer_vi.h"
33 #include "smu7_smumgr.h"
34 #include "vega20_hwmgr.h"
35
36 #include "smu_v11_0_i2c.h"
37
38 /* MP Apertures */
39 #define MP0_Public 0x03800000
40 #define MP0_SRAM 0x03900000
41 #define MP1_Public 0x03b00000
42 #define MP1_SRAM 0x03c00004
43
44 /* address block */
45 #define smnMP1_FIRMWARE_FLAGS 0x3010024
46 #define smnMP0_FW_INTF 0x30101c0
47 #define smnMP1_PUB_CTRL 0x3010b14
48
vega20_is_smc_ram_running(struct pp_hwmgr * hwmgr)49 bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
50 {
51 struct amdgpu_device *adev = hwmgr->adev;
52 uint32_t mp1_fw_flags;
53
54 mp1_fw_flags = RREG32_PCIE(MP1_Public |
55 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
56
57 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
58 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
59 return true;
60
61 return false;
62 }
63
64 /*
65 * Check if SMC has responded to previous message.
66 *
67 * @param smumgr the address of the powerplay hardware manager.
68 * @return TRUE SMC has responded, FALSE otherwise.
69 */
vega20_wait_for_response(struct pp_hwmgr * hwmgr)70 static uint32_t vega20_wait_for_response(struct pp_hwmgr *hwmgr)
71 {
72 struct amdgpu_device *adev = hwmgr->adev;
73 uint32_t reg;
74
75 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
76
77 phm_wait_for_register_unequal(hwmgr, reg,
78 0, MP1_C2PMSG_90__CONTENT_MASK);
79
80 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
81 }
82
83 /*
84 * Send a message to the SMC, and do not wait for its response.
85 * @param smumgr the address of the powerplay hardware manager.
86 * @param msg the message to send.
87 * @return Always return 0.
88 */
vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr * hwmgr,uint16_t msg)89 static int vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
90 uint16_t msg)
91 {
92 struct amdgpu_device *adev = hwmgr->adev;
93
94 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
95
96 return 0;
97 }
98
99 /*
100 * Send a message to the SMC, and wait for its response.
101 * @param hwmgr the address of the powerplay hardware manager.
102 * @param msg the message to send.
103 * @return Always return 0.
104 */
vega20_send_msg_to_smc(struct pp_hwmgr * hwmgr,uint16_t msg)105 static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
106 {
107 struct amdgpu_device *adev = hwmgr->adev;
108 int ret = 0;
109
110 vega20_wait_for_response(hwmgr);
111
112 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
113
114 vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
115
116 ret = vega20_wait_for_response(hwmgr);
117 if (ret != PPSMC_Result_OK)
118 dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret);
119
120 return (ret == PPSMC_Result_OK) ? 0 : -EIO;
121 }
122
123 /*
124 * Send a message to the SMC with parameter
125 * @param hwmgr: the address of the powerplay hardware manager.
126 * @param msg: the message to send.
127 * @param parameter: the parameter to send
128 * @return Always return 0.
129 */
vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr * hwmgr,uint16_t msg,uint32_t parameter)130 static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
131 uint16_t msg, uint32_t parameter)
132 {
133 struct amdgpu_device *adev = hwmgr->adev;
134 int ret = 0;
135
136 vega20_wait_for_response(hwmgr);
137
138 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
139
140 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
141
142 vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
143
144 ret = vega20_wait_for_response(hwmgr);
145 if (ret != PPSMC_Result_OK)
146 dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret);
147
148 return (ret == PPSMC_Result_OK) ? 0 : -EIO;
149 }
150
vega20_get_argument(struct pp_hwmgr * hwmgr)151 static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr)
152 {
153 struct amdgpu_device *adev = hwmgr->adev;
154
155 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
156 }
157
158 /*
159 * Copy table from SMC into driver FB
160 * @param hwmgr the address of the HW manager
161 * @param table_id the driver's table ID to copy from
162 */
vega20_copy_table_from_smc(struct pp_hwmgr * hwmgr,uint8_t * table,int16_t table_id)163 static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
164 uint8_t *table, int16_t table_id)
165 {
166 struct vega20_smumgr *priv =
167 (struct vega20_smumgr *)(hwmgr->smu_backend);
168 struct amdgpu_device *adev = hwmgr->adev;
169 int ret = 0;
170
171 PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
172 "Invalid SMU Table ID!", return -EINVAL);
173 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
174 "Invalid SMU Table version!", return -EINVAL);
175 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
176 "Invalid SMU Table Length!", return -EINVAL);
177
178 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
179 PPSMC_MSG_SetDriverDramAddrHigh,
180 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
181 NULL)) == 0,
182 "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
183 return ret);
184 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
185 PPSMC_MSG_SetDriverDramAddrLow,
186 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
187 NULL)) == 0,
188 "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
189 return ret);
190 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
191 PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0,
192 "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
193 return ret);
194
195 amdgpu_asic_invalidate_hdp(adev, NULL);
196
197 memcpy(table, priv->smu_tables.entry[table_id].table,
198 priv->smu_tables.entry[table_id].size);
199
200 return 0;
201 }
202
203 /*
204 * Copy table from Driver FB into SMC
205 * @param hwmgr the address of the HW manager
206 * @param table_id the table to copy from
207 */
vega20_copy_table_to_smc(struct pp_hwmgr * hwmgr,uint8_t * table,int16_t table_id)208 static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
209 uint8_t *table, int16_t table_id)
210 {
211 struct vega20_smumgr *priv =
212 (struct vega20_smumgr *)(hwmgr->smu_backend);
213 struct amdgpu_device *adev = hwmgr->adev;
214 int ret = 0;
215
216 PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
217 "Invalid SMU Table ID!", return -EINVAL);
218 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
219 "Invalid SMU Table version!", return -EINVAL);
220 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
221 "Invalid SMU Table Length!", return -EINVAL);
222
223 memcpy(priv->smu_tables.entry[table_id].table, table,
224 priv->smu_tables.entry[table_id].size);
225
226 amdgpu_asic_flush_hdp(adev, NULL);
227
228 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
229 PPSMC_MSG_SetDriverDramAddrHigh,
230 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
231 NULL)) == 0,
232 "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
233 return ret);
234 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
235 PPSMC_MSG_SetDriverDramAddrLow,
236 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
237 NULL)) == 0,
238 "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
239 return ret);
240 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
241 PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0,
242 "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
243 return ret);
244
245 return 0;
246 }
247
vega20_set_activity_monitor_coeff(struct pp_hwmgr * hwmgr,uint8_t * table,uint16_t workload_type)248 int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
249 uint8_t *table, uint16_t workload_type)
250 {
251 struct vega20_smumgr *priv =
252 (struct vega20_smumgr *)(hwmgr->smu_backend);
253 struct amdgpu_device *adev = hwmgr->adev;
254 int ret = 0;
255
256 memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
257 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
258
259 amdgpu_asic_flush_hdp(adev, NULL);
260
261 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
262 PPSMC_MSG_SetDriverDramAddrHigh,
263 upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
264 NULL)) == 0,
265 "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
266 return ret);
267 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
268 PPSMC_MSG_SetDriverDramAddrLow,
269 lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
270 NULL)) == 0,
271 "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
272 return ret);
273 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
274 PPSMC_MSG_TransferTableDram2Smu,
275 TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16),
276 NULL)) == 0,
277 "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
278 return ret);
279
280 return 0;
281 }
282
vega20_get_activity_monitor_coeff(struct pp_hwmgr * hwmgr,uint8_t * table,uint16_t workload_type)283 int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
284 uint8_t *table, uint16_t workload_type)
285 {
286 struct vega20_smumgr *priv =
287 (struct vega20_smumgr *)(hwmgr->smu_backend);
288 struct amdgpu_device *adev = hwmgr->adev;
289 int ret = 0;
290
291 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
292 PPSMC_MSG_SetDriverDramAddrHigh,
293 upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
294 NULL)) == 0,
295 "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
296 return ret);
297 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
298 PPSMC_MSG_SetDriverDramAddrLow,
299 lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
300 NULL)) == 0,
301 "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
302 return ret);
303 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
304 PPSMC_MSG_TransferTableSmu2Dram,
305 TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0,
306 "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
307 return ret);
308
309 amdgpu_asic_invalidate_hdp(adev, NULL);
310
311 memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
312 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
313
314 return 0;
315 }
316
vega20_enable_smc_features(struct pp_hwmgr * hwmgr,bool enable,uint64_t feature_mask)317 int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
318 bool enable, uint64_t feature_mask)
319 {
320 uint32_t smu_features_low, smu_features_high;
321 int ret = 0;
322
323 smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
324 smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
325
326 if (enable) {
327 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
328 PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0,
329 "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
330 return ret);
331 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
332 PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
333 "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
334 return ret);
335 } else {
336 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
337 PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0,
338 "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
339 return ret);
340 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
341 PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
342 "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
343 return ret);
344 }
345
346 return 0;
347 }
348
vega20_get_enabled_smc_features(struct pp_hwmgr * hwmgr,uint64_t * features_enabled)349 int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
350 uint64_t *features_enabled)
351 {
352 uint32_t smc_features_low, smc_features_high;
353 int ret = 0;
354
355 if (features_enabled == NULL)
356 return -EINVAL;
357
358 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
359 PPSMC_MSG_GetEnabledSmuFeaturesLow,
360 &smc_features_low)) == 0,
361 "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
362 return ret);
363 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
364 PPSMC_MSG_GetEnabledSmuFeaturesHigh,
365 &smc_features_high)) == 0,
366 "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
367 return ret);
368
369 *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
370 (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
371
372 return 0;
373 }
374
vega20_set_tools_address(struct pp_hwmgr * hwmgr)375 static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
376 {
377 struct vega20_smumgr *priv =
378 (struct vega20_smumgr *)(hwmgr->smu_backend);
379 int ret = 0;
380
381 if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
382 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
383 PPSMC_MSG_SetToolsDramAddrHigh,
384 upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
385 NULL);
386 if (!ret)
387 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
388 PPSMC_MSG_SetToolsDramAddrLow,
389 lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
390 NULL);
391 }
392
393 return ret;
394 }
395
vega20_set_pptable_driver_address(struct pp_hwmgr * hwmgr)396 int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
397 {
398 struct vega20_smumgr *priv =
399 (struct vega20_smumgr *)(hwmgr->smu_backend);
400 int ret = 0;
401
402 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
403 PPSMC_MSG_SetDriverDramAddrHigh,
404 upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
405 NULL)) == 0,
406 "[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
407 return ret);
408 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
409 PPSMC_MSG_SetDriverDramAddrLow,
410 lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
411 NULL)) == 0,
412 "[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
413 return ret);
414
415 return ret;
416 }
417
vega20_smu_init(struct pp_hwmgr * hwmgr)418 static int vega20_smu_init(struct pp_hwmgr *hwmgr)
419 {
420 struct vega20_smumgr *priv;
421 unsigned long tools_size = 0x19000;
422 int ret = 0;
423 struct amdgpu_device *adev = hwmgr->adev;
424
425 struct cgs_firmware_info info = {0};
426
427 ret = cgs_get_firmware_info(hwmgr->device,
428 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
429 &info);
430 if (ret || !info.kptr)
431 return -EINVAL;
432
433 priv = kzalloc(sizeof(struct vega20_smumgr), GFP_KERNEL);
434 if (!priv)
435 return -ENOMEM;
436
437 hwmgr->smu_backend = priv;
438
439 /* allocate space for pptable */
440 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
441 sizeof(PPTable_t),
442 PAGE_SIZE,
443 AMDGPU_GEM_DOMAIN_VRAM,
444 &priv->smu_tables.entry[TABLE_PPTABLE].handle,
445 &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
446 &priv->smu_tables.entry[TABLE_PPTABLE].table);
447 if (ret)
448 goto free_backend;
449
450 priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01;
451 priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t);
452
453 /* allocate space for watermarks table */
454 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
455 sizeof(Watermarks_t),
456 PAGE_SIZE,
457 AMDGPU_GEM_DOMAIN_VRAM,
458 &priv->smu_tables.entry[TABLE_WATERMARKS].handle,
459 &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
460 &priv->smu_tables.entry[TABLE_WATERMARKS].table);
461 if (ret)
462 goto err0;
463
464 priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01;
465 priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t);
466
467 /* allocate space for pmstatuslog table */
468 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
469 tools_size,
470 PAGE_SIZE,
471 AMDGPU_GEM_DOMAIN_VRAM,
472 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
473 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
474 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
475 if (ret)
476 goto err1;
477
478 priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
479 priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
480
481 /* allocate space for OverDrive table */
482 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
483 sizeof(OverDriveTable_t),
484 PAGE_SIZE,
485 AMDGPU_GEM_DOMAIN_VRAM,
486 &priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
487 &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
488 &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
489 if (ret)
490 goto err2;
491
492 priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01;
493 priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t);
494
495 /* allocate space for SmuMetrics table */
496 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
497 sizeof(SmuMetrics_t),
498 PAGE_SIZE,
499 AMDGPU_GEM_DOMAIN_VRAM,
500 &priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
501 &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
502 &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
503 if (ret)
504 goto err3;
505
506 priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01;
507 priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t);
508
509 /* allocate space for ActivityMonitor table */
510 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
511 sizeof(DpmActivityMonitorCoeffInt_t),
512 PAGE_SIZE,
513 AMDGPU_GEM_DOMAIN_VRAM,
514 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
515 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
516 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
517 if (ret)
518 goto err4;
519
520 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
521 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
522
523 ret = smu_v11_0_i2c_control_init(adev);
524 if (ret)
525 goto err4;
526
527 return 0;
528
529 err4:
530 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
531 &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
532 &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
533 err3:
534 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
535 &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
536 &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
537 err2:
538 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
539 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
540 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
541 err1:
542 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
543 &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
544 &priv->smu_tables.entry[TABLE_WATERMARKS].table);
545 err0:
546 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
547 &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
548 &priv->smu_tables.entry[TABLE_PPTABLE].table);
549 free_backend:
550 kfree(hwmgr->smu_backend);
551
552 return -EINVAL;
553 }
554
vega20_smu_fini(struct pp_hwmgr * hwmgr)555 static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
556 {
557 struct vega20_smumgr *priv =
558 (struct vega20_smumgr *)(hwmgr->smu_backend);
559 struct amdgpu_device *adev = hwmgr->adev;
560
561 smu_v11_0_i2c_control_fini(adev);
562
563 if (priv) {
564 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
565 &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
566 &priv->smu_tables.entry[TABLE_PPTABLE].table);
567 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
568 &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
569 &priv->smu_tables.entry[TABLE_WATERMARKS].table);
570 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
571 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
572 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
573 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
574 &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
575 &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
576 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
577 &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
578 &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
579 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
580 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
581 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
582 kfree(hwmgr->smu_backend);
583 hwmgr->smu_backend = NULL;
584 }
585
586 return 0;
587 }
588
vega20_start_smu(struct pp_hwmgr * hwmgr)589 static int vega20_start_smu(struct pp_hwmgr *hwmgr)
590 {
591 int ret;
592
593 ret = vega20_is_smc_ram_running(hwmgr);
594 PP_ASSERT_WITH_CODE(ret,
595 "[Vega20StartSmu] SMC is not running!",
596 return -EINVAL);
597
598 ret = vega20_set_tools_address(hwmgr);
599 PP_ASSERT_WITH_CODE(!ret,
600 "[Vega20StartSmu] Failed to set tools address!",
601 return ret);
602
603 return 0;
604 }
605
vega20_is_dpm_running(struct pp_hwmgr * hwmgr)606 static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr)
607 {
608 uint64_t features_enabled = 0;
609
610 vega20_get_enabled_smc_features(hwmgr, &features_enabled);
611
612 if (features_enabled & SMC_DPM_FEATURES)
613 return true;
614 else
615 return false;
616 }
617
vega20_smc_table_manager(struct pp_hwmgr * hwmgr,uint8_t * table,uint16_t table_id,bool rw)618 static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
619 uint16_t table_id, bool rw)
620 {
621 int ret;
622
623 if (rw)
624 ret = vega20_copy_table_from_smc(hwmgr, table, table_id);
625 else
626 ret = vega20_copy_table_to_smc(hwmgr, table, table_id);
627
628 return ret;
629 }
630
631 const struct pp_smumgr_func vega20_smu_funcs = {
632 .name = "vega20_smu",
633 .smu_init = &vega20_smu_init,
634 .smu_fini = &vega20_smu_fini,
635 .start_smu = &vega20_start_smu,
636 .request_smu_load_specific_fw = NULL,
637 .send_msg_to_smc = &vega20_send_msg_to_smc,
638 .send_msg_to_smc_with_parameter = &vega20_send_msg_to_smc_with_parameter,
639 .download_pptable_settings = NULL,
640 .upload_pptable_settings = NULL,
641 .is_dpm_running = vega20_is_dpm_running,
642 .get_argument = vega20_get_argument,
643 .smc_table_manager = vega20_smc_table_manager,
644 };
645