1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/reboot.h>
27
28 #define SMU_13_0_PARTIAL_PPTABLE
29 #define SWSMU_CODE_LAYER_L3
30
31 #include "amdgpu.h"
32 #include "amdgpu_smu.h"
33 #include "atomfirmware.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_atombios.h"
36 #include "smu_v13_0.h"
37 #include "soc15_common.h"
38 #include "atom.h"
39 #include "amdgpu_ras.h"
40 #include "smu_cmn.h"
41
42 #include "asic_reg/thm/thm_13_0_2_offset.h"
43 #include "asic_reg/thm/thm_13_0_2_sh_mask.h"
44 #include "asic_reg/mp/mp_13_0_2_offset.h"
45 #include "asic_reg/mp/mp_13_0_2_sh_mask.h"
46 #include "asic_reg/smuio/smuio_13_0_2_offset.h"
47 #include "asic_reg/smuio/smuio_13_0_2_sh_mask.h"
48
49 /*
50 * DO NOT use these for err/warn/info/debug messages.
51 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
52 * They are more MGPU friendly.
53 */
54 #undef pr_err
55 #undef pr_warn
56 #undef pr_info
57 #undef pr_debug
58
59 MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
60 MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
61 MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
62 MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
63
64 #define mmMP1_SMN_C2PMSG_66 0x0282
65 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
66
67 #define mmMP1_SMN_C2PMSG_82 0x0292
68 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
69
70 #define mmMP1_SMN_C2PMSG_90 0x029a
71 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
72
73 #define SMU13_VOLTAGE_SCALE 4
74
75 #define LINK_WIDTH_MAX 6
76 #define LINK_SPEED_MAX 3
77
78 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
79 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
80 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
81 #define smnPCIE_LC_SPEED_CNTL 0x11140290
82 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0
83 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
84
85 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
86
87 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
88
89 const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
90 const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
91
smu_v13_0_init_microcode(struct smu_context * smu)92 int smu_v13_0_init_microcode(struct smu_context *smu)
93 {
94 struct amdgpu_device *adev = smu->adev;
95 char ucode_prefix[15];
96 int err = 0;
97 const struct smc_firmware_header_v1_0 *hdr;
98 const struct common_firmware_header *header;
99 struct amdgpu_firmware_info *ucode = NULL;
100
101 /* doesn't need to load smu firmware in IOV mode */
102 if (amdgpu_sriov_vf(adev))
103 return 0;
104
105 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
106 err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
107 if (err)
108 goto out;
109
110 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
111 amdgpu_ucode_print_smc_hdr(&hdr->header);
112 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
113
114 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
115 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
116 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
117 ucode->fw = adev->pm.fw;
118 header = (const struct common_firmware_header *)ucode->fw->data;
119 adev->firmware.fw_size +=
120 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
121 }
122
123 out:
124 if (err)
125 amdgpu_ucode_release(&adev->pm.fw);
126 return err;
127 }
128
smu_v13_0_fini_microcode(struct smu_context * smu)129 void smu_v13_0_fini_microcode(struct smu_context *smu)
130 {
131 struct amdgpu_device *adev = smu->adev;
132
133 amdgpu_ucode_release(&adev->pm.fw);
134 adev->pm.fw_version = 0;
135 }
136
smu_v13_0_load_microcode(struct smu_context * smu)137 int smu_v13_0_load_microcode(struct smu_context *smu)
138 {
139 #if 0
140 struct amdgpu_device *adev = smu->adev;
141 const uint32_t *src;
142 const struct smc_firmware_header_v1_0 *hdr;
143 uint32_t addr_start = MP1_SRAM;
144 uint32_t i;
145 uint32_t smc_fw_size;
146 uint32_t mp1_fw_flags;
147
148 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
149 src = (const uint32_t *)(adev->pm.fw->data +
150 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
151 smc_fw_size = hdr->header.ucode_size_bytes;
152
153 for (i = 1; i < smc_fw_size/4 - 1; i++) {
154 WREG32_PCIE(addr_start, src[i]);
155 addr_start += 4;
156 }
157
158 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
159 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
160 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
161 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
162
163 for (i = 0; i < adev->usec_timeout; i++) {
164 mp1_fw_flags = RREG32_PCIE(MP1_Public |
165 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
166 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
167 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
168 break;
169 udelay(1);
170 }
171
172 if (i == adev->usec_timeout)
173 return -ETIME;
174 #endif
175
176 return 0;
177 }
178
smu_v13_0_init_pptable_microcode(struct smu_context * smu)179 int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
180 {
181 struct amdgpu_device *adev = smu->adev;
182 struct amdgpu_firmware_info *ucode = NULL;
183 uint32_t size = 0, pptable_id = 0;
184 int ret = 0;
185 void *table;
186
187 /* doesn't need to load smu firmware in IOV mode */
188 if (amdgpu_sriov_vf(adev))
189 return 0;
190
191 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
192 return 0;
193
194 if (!adev->scpm_enabled)
195 return 0;
196
197 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 7)) ||
198 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) ||
199 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)))
200 return 0;
201
202 /* override pptable_id from driver parameter */
203 if (amdgpu_smu_pptable_id >= 0) {
204 pptable_id = amdgpu_smu_pptable_id;
205 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
206 } else {
207 pptable_id = smu->smu_table.boot_values.pp_table_id;
208 }
209
210 /* "pptable_id == 0" means vbios carries the pptable. */
211 if (!pptable_id)
212 return 0;
213
214 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
215 if (ret)
216 return ret;
217
218 smu->pptable_firmware.data = table;
219 smu->pptable_firmware.size = size;
220
221 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
222 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
223 ucode->fw = &smu->pptable_firmware;
224 adev->firmware.fw_size +=
225 ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
226
227 return 0;
228 }
229
smu_v13_0_check_fw_status(struct smu_context * smu)230 int smu_v13_0_check_fw_status(struct smu_context *smu)
231 {
232 struct amdgpu_device *adev = smu->adev;
233 uint32_t mp1_fw_flags;
234
235 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
236 case IP_VERSION(13, 0, 4):
237 case IP_VERSION(13, 0, 11):
238 mp1_fw_flags = RREG32_PCIE(MP1_Public |
239 (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
240 break;
241 default:
242 mp1_fw_flags = RREG32_PCIE(MP1_Public |
243 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
244 break;
245 }
246
247 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
248 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
249 return 0;
250
251 return -EIO;
252 }
253
smu_v13_0_check_fw_version(struct smu_context * smu)254 int smu_v13_0_check_fw_version(struct smu_context *smu)
255 {
256 struct amdgpu_device *adev = smu->adev;
257 uint32_t if_version = 0xff, smu_version = 0xff;
258 uint8_t smu_program, smu_major, smu_minor, smu_debug;
259 int ret = 0;
260
261 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
262 if (ret)
263 return ret;
264
265 smu_program = (smu_version >> 24) & 0xff;
266 smu_major = (smu_version >> 16) & 0xff;
267 smu_minor = (smu_version >> 8) & 0xff;
268 smu_debug = (smu_version >> 0) & 0xff;
269 if (smu->is_apu ||
270 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) ||
271 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
272 adev->pm.fw_version = smu_version;
273
274 /* only for dGPU w/ SMU13*/
275 if (adev->pm.fw)
276 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
277 smu_program, smu_version, smu_major, smu_minor, smu_debug);
278
279 /*
280 * 1. if_version mismatch is not critical as our fw is designed
281 * to be backward compatible.
282 * 2. New fw usually brings some optimizations. But that's visible
283 * only on the paired driver.
284 * Considering above, we just leave user a verbal message instead
285 * of halt driver loading.
286 */
287 if (if_version != smu->smc_driver_if_version) {
288 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
289 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
290 smu->smc_driver_if_version, if_version,
291 smu_program, smu_version, smu_major, smu_minor, smu_debug);
292 dev_info(adev->dev, "SMU driver if version not matched\n");
293 }
294
295 return ret;
296 }
297
smu_v13_0_set_pptable_v2_0(struct smu_context * smu,void ** table,uint32_t * size)298 static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
299 {
300 struct amdgpu_device *adev = smu->adev;
301 uint32_t ppt_offset_bytes;
302 const struct smc_firmware_header_v2_0 *v2;
303
304 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
305
306 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
307 *size = le32_to_cpu(v2->ppt_size_bytes);
308 *table = (uint8_t *)v2 + ppt_offset_bytes;
309
310 return 0;
311 }
312
smu_v13_0_set_pptable_v2_1(struct smu_context * smu,void ** table,uint32_t * size,uint32_t pptable_id)313 static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table,
314 uint32_t *size, uint32_t pptable_id)
315 {
316 struct amdgpu_device *adev = smu->adev;
317 const struct smc_firmware_header_v2_1 *v2_1;
318 struct smc_soft_pptable_entry *entries;
319 uint32_t pptable_count = 0;
320 int i = 0;
321
322 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
323 entries = (struct smc_soft_pptable_entry *)
324 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
325 pptable_count = le32_to_cpu(v2_1->pptable_count);
326 for (i = 0; i < pptable_count; i++) {
327 if (le32_to_cpu(entries[i].id) == pptable_id) {
328 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
329 *size = le32_to_cpu(entries[i].ppt_size_bytes);
330 break;
331 }
332 }
333
334 if (i == pptable_count)
335 return -EINVAL;
336
337 return 0;
338 }
339
smu_v13_0_get_pptable_from_vbios(struct smu_context * smu,void ** table,uint32_t * size)340 static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
341 {
342 struct amdgpu_device *adev = smu->adev;
343 uint16_t atom_table_size;
344 uint8_t frev, crev;
345 int ret, index;
346
347 dev_info(adev->dev, "use vbios provided pptable\n");
348 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
349 powerplayinfo);
350
351 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
352 (uint8_t **)table);
353 if (ret)
354 return ret;
355
356 if (size)
357 *size = atom_table_size;
358
359 return 0;
360 }
361
smu_v13_0_get_pptable_from_firmware(struct smu_context * smu,void ** table,uint32_t * size,uint32_t pptable_id)362 int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
363 void **table,
364 uint32_t *size,
365 uint32_t pptable_id)
366 {
367 const struct smc_firmware_header_v1_0 *hdr;
368 struct amdgpu_device *adev = smu->adev;
369 uint16_t version_major, version_minor;
370 int ret;
371
372 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
373 if (!hdr)
374 return -EINVAL;
375
376 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
377
378 version_major = le16_to_cpu(hdr->header.header_version_major);
379 version_minor = le16_to_cpu(hdr->header.header_version_minor);
380 if (version_major != 2) {
381 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
382 version_major, version_minor);
383 return -EINVAL;
384 }
385
386 switch (version_minor) {
387 case 0:
388 ret = smu_v13_0_set_pptable_v2_0(smu, table, size);
389 break;
390 case 1:
391 ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id);
392 break;
393 default:
394 ret = -EINVAL;
395 break;
396 }
397
398 return ret;
399 }
400
smu_v13_0_setup_pptable(struct smu_context * smu)401 int smu_v13_0_setup_pptable(struct smu_context *smu)
402 {
403 struct amdgpu_device *adev = smu->adev;
404 uint32_t size = 0, pptable_id = 0;
405 void *table;
406 int ret = 0;
407
408 /* override pptable_id from driver parameter */
409 if (amdgpu_smu_pptable_id >= 0) {
410 pptable_id = amdgpu_smu_pptable_id;
411 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
412 } else {
413 pptable_id = smu->smu_table.boot_values.pp_table_id;
414 }
415
416 /* force using vbios pptable in sriov mode */
417 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
418 ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size);
419 else
420 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
421
422 if (ret)
423 return ret;
424
425 if (!smu->smu_table.power_play_table)
426 smu->smu_table.power_play_table = table;
427 if (!smu->smu_table.power_play_table_size)
428 smu->smu_table.power_play_table_size = size;
429
430 return 0;
431 }
432
smu_v13_0_init_smc_tables(struct smu_context * smu)433 int smu_v13_0_init_smc_tables(struct smu_context *smu)
434 {
435 struct smu_table_context *smu_table = &smu->smu_table;
436 struct smu_table *tables = smu_table->tables;
437 int ret = 0;
438
439 smu_table->driver_pptable =
440 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
441 if (!smu_table->driver_pptable) {
442 ret = -ENOMEM;
443 goto err0_out;
444 }
445
446 smu_table->max_sustainable_clocks =
447 kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL);
448 if (!smu_table->max_sustainable_clocks) {
449 ret = -ENOMEM;
450 goto err1_out;
451 }
452
453 /* Aldebaran does not support OVERDRIVE */
454 if (tables[SMU_TABLE_OVERDRIVE].size) {
455 smu_table->overdrive_table =
456 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
457 if (!smu_table->overdrive_table) {
458 ret = -ENOMEM;
459 goto err2_out;
460 }
461
462 smu_table->boot_overdrive_table =
463 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
464 if (!smu_table->boot_overdrive_table) {
465 ret = -ENOMEM;
466 goto err3_out;
467 }
468
469 smu_table->user_overdrive_table =
470 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
471 if (!smu_table->user_overdrive_table) {
472 ret = -ENOMEM;
473 goto err4_out;
474 }
475 }
476
477 smu_table->combo_pptable =
478 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
479 if (!smu_table->combo_pptable) {
480 ret = -ENOMEM;
481 goto err5_out;
482 }
483
484 return 0;
485
486 err5_out:
487 kfree(smu_table->user_overdrive_table);
488 err4_out:
489 kfree(smu_table->boot_overdrive_table);
490 err3_out:
491 kfree(smu_table->overdrive_table);
492 err2_out:
493 kfree(smu_table->max_sustainable_clocks);
494 err1_out:
495 kfree(smu_table->driver_pptable);
496 err0_out:
497 return ret;
498 }
499
smu_v13_0_fini_smc_tables(struct smu_context * smu)500 int smu_v13_0_fini_smc_tables(struct smu_context *smu)
501 {
502 struct smu_table_context *smu_table = &smu->smu_table;
503 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
504
505 kfree(smu_table->gpu_metrics_table);
506 kfree(smu_table->combo_pptable);
507 kfree(smu_table->user_overdrive_table);
508 kfree(smu_table->boot_overdrive_table);
509 kfree(smu_table->overdrive_table);
510 kfree(smu_table->max_sustainable_clocks);
511 kfree(smu_table->driver_pptable);
512 smu_table->gpu_metrics_table = NULL;
513 smu_table->combo_pptable = NULL;
514 smu_table->user_overdrive_table = NULL;
515 smu_table->boot_overdrive_table = NULL;
516 smu_table->overdrive_table = NULL;
517 smu_table->max_sustainable_clocks = NULL;
518 smu_table->driver_pptable = NULL;
519 kfree(smu_table->hardcode_pptable);
520 smu_table->hardcode_pptable = NULL;
521
522 kfree(smu_table->ecc_table);
523 kfree(smu_table->metrics_table);
524 kfree(smu_table->watermarks_table);
525 smu_table->ecc_table = NULL;
526 smu_table->metrics_table = NULL;
527 smu_table->watermarks_table = NULL;
528 smu_table->metrics_time = 0;
529
530 kfree(smu_dpm->dpm_policies);
531 kfree(smu_dpm->dpm_context);
532 kfree(smu_dpm->golden_dpm_context);
533 kfree(smu_dpm->dpm_current_power_state);
534 kfree(smu_dpm->dpm_request_power_state);
535 smu_dpm->dpm_policies = NULL;
536 smu_dpm->dpm_context = NULL;
537 smu_dpm->golden_dpm_context = NULL;
538 smu_dpm->dpm_context_size = 0;
539 smu_dpm->dpm_current_power_state = NULL;
540 smu_dpm->dpm_request_power_state = NULL;
541
542 return 0;
543 }
544
smu_v13_0_init_power(struct smu_context * smu)545 int smu_v13_0_init_power(struct smu_context *smu)
546 {
547 struct smu_power_context *smu_power = &smu->smu_power;
548
549 if (smu_power->power_context || smu_power->power_context_size != 0)
550 return -EINVAL;
551
552 smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
553 GFP_KERNEL);
554 if (!smu_power->power_context)
555 return -ENOMEM;
556 smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
557
558 return 0;
559 }
560
smu_v13_0_fini_power(struct smu_context * smu)561 int smu_v13_0_fini_power(struct smu_context *smu)
562 {
563 struct smu_power_context *smu_power = &smu->smu_power;
564
565 if (!smu_power->power_context || smu_power->power_context_size == 0)
566 return -EINVAL;
567
568 kfree(smu_power->power_context);
569 smu_power->power_context = NULL;
570 smu_power->power_context_size = 0;
571
572 return 0;
573 }
574
smu_v13_0_get_vbios_bootup_values(struct smu_context * smu)575 int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu)
576 {
577 int ret, index;
578 uint16_t size;
579 uint8_t frev, crev;
580 struct atom_common_table_header *header;
581 struct atom_firmware_info_v3_4 *v_3_4;
582 struct atom_firmware_info_v3_3 *v_3_3;
583 struct atom_firmware_info_v3_1 *v_3_1;
584 struct atom_smu_info_v3_6 *smu_info_v3_6;
585 struct atom_smu_info_v4_0 *smu_info_v4_0;
586
587 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
588 firmwareinfo);
589
590 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
591 (uint8_t **)&header);
592 if (ret)
593 return ret;
594
595 if (header->format_revision != 3) {
596 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n");
597 return -EINVAL;
598 }
599
600 switch (header->content_revision) {
601 case 0:
602 case 1:
603 case 2:
604 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
605 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
606 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
607 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
608 smu->smu_table.boot_values.socclk = 0;
609 smu->smu_table.boot_values.dcefclk = 0;
610 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
611 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
612 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
613 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
614 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
615 smu->smu_table.boot_values.pp_table_id = 0;
616 break;
617 case 3:
618 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
619 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
620 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
621 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
622 smu->smu_table.boot_values.socclk = 0;
623 smu->smu_table.boot_values.dcefclk = 0;
624 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
625 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
626 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
627 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
628 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
629 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
630 break;
631 case 4:
632 default:
633 v_3_4 = (struct atom_firmware_info_v3_4 *)header;
634 smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
635 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
636 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
637 smu->smu_table.boot_values.socclk = 0;
638 smu->smu_table.boot_values.dcefclk = 0;
639 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
640 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
641 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
642 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
643 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
644 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
645 break;
646 }
647
648 smu->smu_table.boot_values.format_revision = header->format_revision;
649 smu->smu_table.boot_values.content_revision = header->content_revision;
650
651 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
652 smu_info);
653 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
654 (uint8_t **)&header)) {
655
656 if ((frev == 3) && (crev == 6)) {
657 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
658
659 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
660 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
661 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
662 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
663 } else if ((frev == 3) && (crev == 1)) {
664 return 0;
665 } else if ((frev == 4) && (crev == 0)) {
666 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
667
668 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
669 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
670 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
671 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
672 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
673 } else {
674 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
675 (uint32_t)frev, (uint32_t)crev);
676 }
677 }
678
679 return 0;
680 }
681
682
smu_v13_0_notify_memory_pool_location(struct smu_context * smu)683 int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
684 {
685 struct smu_table_context *smu_table = &smu->smu_table;
686 struct smu_table *memory_pool = &smu_table->memory_pool;
687 int ret = 0;
688 uint64_t address;
689 uint32_t address_low, address_high;
690
691 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
692 return ret;
693
694 address = memory_pool->mc_address;
695 address_high = (uint32_t)upper_32_bits(address);
696 address_low = (uint32_t)lower_32_bits(address);
697
698 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
699 address_high, NULL);
700 if (ret)
701 return ret;
702 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
703 address_low, NULL);
704 if (ret)
705 return ret;
706 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
707 (uint32_t)memory_pool->size, NULL);
708 if (ret)
709 return ret;
710
711 return ret;
712 }
713
smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context * smu,uint32_t clk)714 int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
715 {
716 int ret;
717
718 ret = smu_cmn_send_smc_msg_with_param(smu,
719 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
720 if (ret)
721 dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
722
723 return ret;
724 }
725
smu_v13_0_set_driver_table_location(struct smu_context * smu)726 int smu_v13_0_set_driver_table_location(struct smu_context *smu)
727 {
728 struct smu_table *driver_table = &smu->smu_table.driver_table;
729 int ret = 0;
730
731 if (driver_table->mc_address) {
732 ret = smu_cmn_send_smc_msg_with_param(smu,
733 SMU_MSG_SetDriverDramAddrHigh,
734 upper_32_bits(driver_table->mc_address),
735 NULL);
736 if (!ret)
737 ret = smu_cmn_send_smc_msg_with_param(smu,
738 SMU_MSG_SetDriverDramAddrLow,
739 lower_32_bits(driver_table->mc_address),
740 NULL);
741 }
742
743 return ret;
744 }
745
smu_v13_0_set_tool_table_location(struct smu_context * smu)746 int smu_v13_0_set_tool_table_location(struct smu_context *smu)
747 {
748 int ret = 0;
749 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
750
751 if (tool_table->mc_address) {
752 ret = smu_cmn_send_smc_msg_with_param(smu,
753 SMU_MSG_SetToolsDramAddrHigh,
754 upper_32_bits(tool_table->mc_address),
755 NULL);
756 if (!ret)
757 ret = smu_cmn_send_smc_msg_with_param(smu,
758 SMU_MSG_SetToolsDramAddrLow,
759 lower_32_bits(tool_table->mc_address),
760 NULL);
761 }
762
763 return ret;
764 }
765
smu_v13_0_init_display_count(struct smu_context * smu,uint32_t count)766 int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
767 {
768 int ret = 0;
769
770 if (!smu->pm_enabled)
771 return ret;
772
773 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
774
775 return ret;
776 }
777
smu_v13_0_set_allowed_mask(struct smu_context * smu)778 int smu_v13_0_set_allowed_mask(struct smu_context *smu)
779 {
780 struct smu_feature *feature = &smu->smu_feature;
781 int ret = 0;
782 uint32_t feature_mask[2];
783
784 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
785 feature->feature_num < 64)
786 return -EINVAL;
787
788 bitmap_to_arr32(feature_mask, feature->allowed, 64);
789
790 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
791 feature_mask[1], NULL);
792 if (ret)
793 return ret;
794
795 return smu_cmn_send_smc_msg_with_param(smu,
796 SMU_MSG_SetAllowedFeaturesMaskLow,
797 feature_mask[0],
798 NULL);
799 }
800
smu_v13_0_gfx_off_control(struct smu_context * smu,bool enable)801 int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
802 {
803 int ret = 0;
804 struct amdgpu_device *adev = smu->adev;
805
806 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
807 case IP_VERSION(13, 0, 0):
808 case IP_VERSION(13, 0, 1):
809 case IP_VERSION(13, 0, 3):
810 case IP_VERSION(13, 0, 4):
811 case IP_VERSION(13, 0, 5):
812 case IP_VERSION(13, 0, 7):
813 case IP_VERSION(13, 0, 8):
814 case IP_VERSION(13, 0, 10):
815 case IP_VERSION(13, 0, 11):
816 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
817 return 0;
818 if (enable)
819 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
820 else
821 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
822 break;
823 default:
824 break;
825 }
826
827 return ret;
828 }
829
smu_v13_0_system_features_control(struct smu_context * smu,bool en)830 int smu_v13_0_system_features_control(struct smu_context *smu,
831 bool en)
832 {
833 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
834 SMU_MSG_DisableAllSmuFeatures), NULL);
835 }
836
smu_v13_0_notify_display_change(struct smu_context * smu)837 int smu_v13_0_notify_display_change(struct smu_context *smu)
838 {
839 int ret = 0;
840
841 if (!amdgpu_device_has_dc_support(smu->adev))
842 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL);
843
844 return ret;
845 }
846
847 static int
smu_v13_0_get_max_sustainable_clock(struct smu_context * smu,uint32_t * clock,enum smu_clk_type clock_select)848 smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
849 enum smu_clk_type clock_select)
850 {
851 int ret = 0;
852 int clk_id;
853
854 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
855 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
856 return 0;
857
858 clk_id = smu_cmn_to_asic_specific_index(smu,
859 CMN2ASIC_MAPPING_CLK,
860 clock_select);
861 if (clk_id < 0)
862 return -EINVAL;
863
864 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
865 clk_id << 16, clock);
866 if (ret) {
867 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
868 return ret;
869 }
870
871 if (*clock != 0)
872 return 0;
873
874 /* if DC limit is zero, return AC limit */
875 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
876 clk_id << 16, clock);
877 if (ret) {
878 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
879 return ret;
880 }
881
882 return 0;
883 }
884
smu_v13_0_init_max_sustainable_clocks(struct smu_context * smu)885 int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu)
886 {
887 struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks =
888 smu->smu_table.max_sustainable_clocks;
889 int ret = 0;
890
891 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
892 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
893 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
894 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
895 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
896 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
897
898 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
899 ret = smu_v13_0_get_max_sustainable_clock(smu,
900 &(max_sustainable_clocks->uclock),
901 SMU_UCLK);
902 if (ret) {
903 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
904 __func__);
905 return ret;
906 }
907 }
908
909 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
910 ret = smu_v13_0_get_max_sustainable_clock(smu,
911 &(max_sustainable_clocks->soc_clock),
912 SMU_SOCCLK);
913 if (ret) {
914 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
915 __func__);
916 return ret;
917 }
918 }
919
920 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
921 ret = smu_v13_0_get_max_sustainable_clock(smu,
922 &(max_sustainable_clocks->dcef_clock),
923 SMU_DCEFCLK);
924 if (ret) {
925 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
926 __func__);
927 return ret;
928 }
929
930 ret = smu_v13_0_get_max_sustainable_clock(smu,
931 &(max_sustainable_clocks->display_clock),
932 SMU_DISPCLK);
933 if (ret) {
934 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
935 __func__);
936 return ret;
937 }
938 ret = smu_v13_0_get_max_sustainable_clock(smu,
939 &(max_sustainable_clocks->phy_clock),
940 SMU_PHYCLK);
941 if (ret) {
942 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
943 __func__);
944 return ret;
945 }
946 ret = smu_v13_0_get_max_sustainable_clock(smu,
947 &(max_sustainable_clocks->pixel_clock),
948 SMU_PIXCLK);
949 if (ret) {
950 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
951 __func__);
952 return ret;
953 }
954 }
955
956 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
957 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
958
959 return 0;
960 }
961
smu_v13_0_get_current_power_limit(struct smu_context * smu,uint32_t * power_limit)962 int smu_v13_0_get_current_power_limit(struct smu_context *smu,
963 uint32_t *power_limit)
964 {
965 int power_src;
966 int ret = 0;
967
968 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
969 return -EINVAL;
970
971 power_src = smu_cmn_to_asic_specific_index(smu,
972 CMN2ASIC_MAPPING_PWR,
973 smu->adev->pm.ac_power ?
974 SMU_POWER_SOURCE_AC :
975 SMU_POWER_SOURCE_DC);
976 if (power_src < 0)
977 return -EINVAL;
978
979 ret = smu_cmn_send_smc_msg_with_param(smu,
980 SMU_MSG_GetPptLimit,
981 power_src << 16,
982 power_limit);
983 if (ret)
984 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
985
986 return ret;
987 }
988
smu_v13_0_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t limit)989 int smu_v13_0_set_power_limit(struct smu_context *smu,
990 enum smu_ppt_limit_type limit_type,
991 uint32_t limit)
992 {
993 int ret = 0;
994
995 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
996 return -EINVAL;
997
998 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
999 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
1000 return -EOPNOTSUPP;
1001 }
1002
1003 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
1004 if (ret) {
1005 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
1006 return ret;
1007 }
1008
1009 smu->current_power_limit = limit;
1010
1011 return 0;
1012 }
1013
smu_v13_0_allow_ih_interrupt(struct smu_context * smu)1014 static int smu_v13_0_allow_ih_interrupt(struct smu_context *smu)
1015 {
1016 return smu_cmn_send_smc_msg(smu,
1017 SMU_MSG_AllowIHHostInterrupt,
1018 NULL);
1019 }
1020
smu_v13_0_process_pending_interrupt(struct smu_context * smu)1021 static int smu_v13_0_process_pending_interrupt(struct smu_context *smu)
1022 {
1023 int ret = 0;
1024
1025 if (smu->dc_controlled_by_gpio &&
1026 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
1027 ret = smu_v13_0_allow_ih_interrupt(smu);
1028
1029 return ret;
1030 }
1031
smu_v13_0_enable_thermal_alert(struct smu_context * smu)1032 int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
1033 {
1034 int ret = 0;
1035
1036 if (!smu->irq_source.num_types)
1037 return 0;
1038
1039 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1040 if (ret)
1041 return ret;
1042
1043 return smu_v13_0_process_pending_interrupt(smu);
1044 }
1045
smu_v13_0_disable_thermal_alert(struct smu_context * smu)1046 int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
1047 {
1048 if (!smu->irq_source.num_types)
1049 return 0;
1050
1051 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1052 }
1053
convert_to_vddc(uint8_t vid)1054 static uint16_t convert_to_vddc(uint8_t vid)
1055 {
1056 return (uint16_t) ((6200 - (vid * 25)) / SMU13_VOLTAGE_SCALE);
1057 }
1058
smu_v13_0_get_gfx_vdd(struct smu_context * smu,uint32_t * value)1059 int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1060 {
1061 struct amdgpu_device *adev = smu->adev;
1062 uint32_t vdd = 0, val_vid = 0;
1063
1064 if (!value)
1065 return -EINVAL;
1066 val_vid = (RREG32_SOC15(SMUIO, 0, regSMUSVI0_TEL_PLANE0) &
1067 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1068 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1069
1070 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1071
1072 *value = vdd;
1073
1074 return 0;
1075
1076 }
1077
1078 int
smu_v13_0_display_clock_voltage_request(struct smu_context * smu,struct pp_display_clock_request * clock_req)1079 smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
1080 struct pp_display_clock_request
1081 *clock_req)
1082 {
1083 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1084 int ret = 0;
1085 enum smu_clk_type clk_select = 0;
1086 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1087
1088 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1089 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1090 switch (clk_type) {
1091 case amd_pp_dcef_clock:
1092 clk_select = SMU_DCEFCLK;
1093 break;
1094 case amd_pp_disp_clock:
1095 clk_select = SMU_DISPCLK;
1096 break;
1097 case amd_pp_pixel_clock:
1098 clk_select = SMU_PIXCLK;
1099 break;
1100 case amd_pp_phy_clock:
1101 clk_select = SMU_PHYCLK;
1102 break;
1103 case amd_pp_mem_clock:
1104 clk_select = SMU_UCLK;
1105 break;
1106 default:
1107 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
1108 ret = -EINVAL;
1109 break;
1110 }
1111
1112 if (ret)
1113 goto failed;
1114
1115 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1116 return 0;
1117
1118 ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
1119
1120 if (clk_select == SMU_UCLK)
1121 smu->hard_min_uclk_req_from_dal = clk_freq;
1122 }
1123
1124 failed:
1125 return ret;
1126 }
1127
smu_v13_0_get_fan_control_mode(struct smu_context * smu)1128 uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
1129 {
1130 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1131 return AMD_FAN_CTRL_MANUAL;
1132 else
1133 return AMD_FAN_CTRL_AUTO;
1134 }
1135
1136 static int
smu_v13_0_auto_fan_control(struct smu_context * smu,bool auto_fan_control)1137 smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1138 {
1139 int ret = 0;
1140
1141 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1142 return 0;
1143
1144 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1145 if (ret)
1146 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1147 __func__, (auto_fan_control ? "Start" : "Stop"));
1148
1149 return ret;
1150 }
1151
1152 static int
smu_v13_0_set_fan_static_mode(struct smu_context * smu,uint32_t mode)1153 smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1154 {
1155 struct amdgpu_device *adev = smu->adev;
1156
1157 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2,
1158 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2),
1159 CG_FDO_CTRL2, TMIN, 0));
1160 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2,
1161 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2),
1162 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1163
1164 return 0;
1165 }
1166
smu_v13_0_set_fan_speed_pwm(struct smu_context * smu,uint32_t speed)1167 int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu,
1168 uint32_t speed)
1169 {
1170 struct amdgpu_device *adev = smu->adev;
1171 uint32_t duty100, duty;
1172 uint64_t tmp64;
1173
1174 speed = min_t(uint32_t, speed, 255);
1175
1176 if (smu_v13_0_auto_fan_control(smu, 0))
1177 return -EINVAL;
1178
1179 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL1),
1180 CG_FDO_CTRL1, FMAX_DUTY100);
1181 if (!duty100)
1182 return -EINVAL;
1183
1184 tmp64 = (uint64_t)speed * duty100;
1185 do_div(tmp64, 255);
1186 duty = (uint32_t)tmp64;
1187
1188 WREG32_SOC15(THM, 0, regCG_FDO_CTRL0,
1189 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL0),
1190 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1191
1192 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1193 }
1194
1195 int
smu_v13_0_set_fan_control_mode(struct smu_context * smu,uint32_t mode)1196 smu_v13_0_set_fan_control_mode(struct smu_context *smu,
1197 uint32_t mode)
1198 {
1199 int ret = 0;
1200
1201 switch (mode) {
1202 case AMD_FAN_CTRL_NONE:
1203 ret = smu_v13_0_set_fan_speed_pwm(smu, 255);
1204 break;
1205 case AMD_FAN_CTRL_MANUAL:
1206 ret = smu_v13_0_auto_fan_control(smu, 0);
1207 break;
1208 case AMD_FAN_CTRL_AUTO:
1209 ret = smu_v13_0_auto_fan_control(smu, 1);
1210 break;
1211 default:
1212 break;
1213 }
1214
1215 if (ret) {
1216 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1217 return -EINVAL;
1218 }
1219
1220 return ret;
1221 }
1222
smu_v13_0_set_fan_speed_rpm(struct smu_context * smu,uint32_t speed)1223 int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
1224 uint32_t speed)
1225 {
1226 struct amdgpu_device *adev = smu->adev;
1227 uint32_t crystal_clock_freq = 2500;
1228 uint32_t tach_period;
1229 int ret;
1230
1231 if (!speed)
1232 return -EINVAL;
1233
1234 ret = smu_v13_0_auto_fan_control(smu, 0);
1235 if (ret)
1236 return ret;
1237
1238 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1239 WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
1240 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
1241 CG_TACH_CTRL, TARGET_PERIOD,
1242 tach_period));
1243
1244 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1245 }
1246
smu_v13_0_set_xgmi_pstate(struct smu_context * smu,uint32_t pstate)1247 int smu_v13_0_set_xgmi_pstate(struct smu_context *smu,
1248 uint32_t pstate)
1249 {
1250 int ret = 0;
1251 ret = smu_cmn_send_smc_msg_with_param(smu,
1252 SMU_MSG_SetXgmiMode,
1253 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
1254 NULL);
1255 return ret;
1256 }
1257
smu_v13_0_set_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned tyep,enum amdgpu_interrupt_state state)1258 static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
1259 struct amdgpu_irq_src *source,
1260 unsigned tyep,
1261 enum amdgpu_interrupt_state state)
1262 {
1263 struct smu_context *smu = adev->powerplay.pp_handle;
1264 uint32_t low, high;
1265 uint32_t val = 0;
1266
1267 switch (state) {
1268 case AMDGPU_IRQ_STATE_DISABLE:
1269 /* For THM irqs */
1270 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1271 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
1272 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
1273 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
1274
1275 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0);
1276
1277 /* For MP1 SW irqs */
1278 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1279 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1280 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1281
1282 break;
1283 case AMDGPU_IRQ_STATE_ENABLE:
1284 /* For THM irqs */
1285 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1286 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1287 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1288 smu->thermal_range.software_shutdown_temp);
1289
1290 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1291 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1292 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1293 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1294 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1295 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1296 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1297 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1298 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
1299
1300 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1301 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1302 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1303 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val);
1304
1305 /* For MP1 SW irqs */
1306 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
1307 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1308 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1309 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
1310
1311 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1312 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1313 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1314
1315 break;
1316 default:
1317 break;
1318 }
1319
1320 return 0;
1321 }
1322
smu_v13_0_interrupt_work(struct smu_context * smu)1323 void smu_v13_0_interrupt_work(struct smu_context *smu)
1324 {
1325 smu_cmn_send_smc_msg(smu,
1326 SMU_MSG_ReenableAcDcInterrupt,
1327 NULL);
1328 }
1329
1330 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
1331 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
1332 #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
1333
smu_v13_0_irq_process(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1334 static int smu_v13_0_irq_process(struct amdgpu_device *adev,
1335 struct amdgpu_irq_src *source,
1336 struct amdgpu_iv_entry *entry)
1337 {
1338 struct smu_context *smu = adev->powerplay.pp_handle;
1339 uint32_t client_id = entry->client_id;
1340 uint32_t src_id = entry->src_id;
1341 /*
1342 * ctxid is used to distinguish different
1343 * events for SMCToHost interrupt.
1344 */
1345 uint32_t ctxid = entry->src_data[0];
1346 uint32_t data;
1347 uint32_t high;
1348
1349 if (client_id == SOC15_IH_CLIENTID_THM) {
1350 switch (src_id) {
1351 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1352 schedule_delayed_work(&smu->swctf_delayed_work,
1353 msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
1354 break;
1355 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1356 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
1357 break;
1358 default:
1359 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
1360 src_id);
1361 break;
1362 }
1363 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
1364 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
1365 /*
1366 * HW CTF just occurred. Shutdown to prevent further damage.
1367 */
1368 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
1369 orderly_poweroff(true);
1370 } else if (client_id == SOC15_IH_CLIENTID_MP1) {
1371 if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
1372 /* ACK SMUToHost interrupt */
1373 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1374 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1375 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
1376
1377 switch (ctxid) {
1378 case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
1379 dev_dbg(adev->dev, "Switched to AC mode!\n");
1380 schedule_work(&smu->interrupt_work);
1381 adev->pm.ac_power = true;
1382 break;
1383 case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
1384 dev_dbg(adev->dev, "Switched to DC mode!\n");
1385 schedule_work(&smu->interrupt_work);
1386 adev->pm.ac_power = false;
1387 break;
1388 case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
1389 /*
1390 * Increment the throttle interrupt counter
1391 */
1392 atomic64_inc(&smu->throttle_int_counter);
1393
1394 if (!atomic_read(&adev->throttling_logging_enabled))
1395 return 0;
1396
1397 if (__ratelimit(&adev->throttling_logging_rs))
1398 schedule_work(&smu->throttling_logging_work);
1399
1400 break;
1401 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
1402 high = smu->thermal_range.software_shutdown_temp +
1403 smu->thermal_range.software_shutdown_temp_offset;
1404 high = min_t(typeof(high),
1405 SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1406 high);
1407 dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
1408 high,
1409 smu->thermal_range.software_shutdown_temp_offset);
1410
1411 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1412 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
1413 DIG_THERM_INTH,
1414 (high & 0xff));
1415 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1416 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
1417 break;
1418 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
1419 high = min_t(typeof(high),
1420 SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1421 smu->thermal_range.software_shutdown_temp);
1422 dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
1423
1424 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1425 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
1426 DIG_THERM_INTH,
1427 (high & 0xff));
1428 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1429 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
1430 break;
1431 default:
1432 dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
1433 ctxid, client_id);
1434 break;
1435 }
1436 }
1437 }
1438
1439 return 0;
1440 }
1441
1442 static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = {
1443 .set = smu_v13_0_set_irq_state,
1444 .process = smu_v13_0_irq_process,
1445 };
1446
smu_v13_0_register_irq_handler(struct smu_context * smu)1447 int smu_v13_0_register_irq_handler(struct smu_context *smu)
1448 {
1449 struct amdgpu_device *adev = smu->adev;
1450 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1451 int ret = 0;
1452
1453 if (amdgpu_sriov_vf(adev))
1454 return 0;
1455
1456 irq_src->num_types = 1;
1457 irq_src->funcs = &smu_v13_0_irq_funcs;
1458
1459 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1460 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1461 irq_src);
1462 if (ret)
1463 return ret;
1464
1465 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1466 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1467 irq_src);
1468 if (ret)
1469 return ret;
1470
1471 /* Register CTF(GPIO_19) interrupt */
1472 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
1473 SMUIO_11_0__SRCID__SMUIO_GPIO19,
1474 irq_src);
1475 if (ret)
1476 return ret;
1477
1478 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1479 SMU_IH_INTERRUPT_ID_TO_DRIVER,
1480 irq_src);
1481 if (ret)
1482 return ret;
1483
1484 return ret;
1485 }
1486
smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context * smu,struct pp_smu_nv_clock_table * max_clocks)1487 int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1488 struct pp_smu_nv_clock_table *max_clocks)
1489 {
1490 struct smu_table_context *table_context = &smu->smu_table;
1491 struct smu_13_0_max_sustainable_clocks *sustainable_clocks = NULL;
1492
1493 if (!max_clocks || !table_context->max_sustainable_clocks)
1494 return -EINVAL;
1495
1496 sustainable_clocks = table_context->max_sustainable_clocks;
1497
1498 max_clocks->dcfClockInKhz =
1499 (unsigned int) sustainable_clocks->dcef_clock * 1000;
1500 max_clocks->displayClockInKhz =
1501 (unsigned int) sustainable_clocks->display_clock * 1000;
1502 max_clocks->phyClockInKhz =
1503 (unsigned int) sustainable_clocks->phy_clock * 1000;
1504 max_clocks->pixelClockInKhz =
1505 (unsigned int) sustainable_clocks->pixel_clock * 1000;
1506 max_clocks->uClockInKhz =
1507 (unsigned int) sustainable_clocks->uclock * 1000;
1508 max_clocks->socClockInKhz =
1509 (unsigned int) sustainable_clocks->soc_clock * 1000;
1510 max_clocks->dscClockInKhz = 0;
1511 max_clocks->dppClockInKhz = 0;
1512 max_clocks->fabricClockInKhz = 0;
1513
1514 return 0;
1515 }
1516
smu_v13_0_set_azalia_d3_pme(struct smu_context * smu)1517 int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu)
1518 {
1519 int ret = 0;
1520
1521 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1522
1523 return ret;
1524 }
1525
smu_v13_0_wait_for_reset_complete(struct smu_context * smu,uint64_t event_arg)1526 static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
1527 uint64_t event_arg)
1528 {
1529 int ret = 0;
1530
1531 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
1532 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
1533
1534 return ret;
1535 }
1536
smu_v13_0_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)1537 int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1538 uint64_t event_arg)
1539 {
1540 int ret = -EINVAL;
1541
1542 switch (event) {
1543 case SMU_EVENT_RESET_COMPLETE:
1544 ret = smu_v13_0_wait_for_reset_complete(smu, event_arg);
1545 break;
1546 default:
1547 break;
1548 }
1549
1550 return ret;
1551 }
1552
smu_v13_0_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)1553 int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1554 uint32_t *min, uint32_t *max)
1555 {
1556 int ret = 0, clk_id = 0;
1557 uint32_t param = 0;
1558 uint32_t clock_limit;
1559
1560 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1561 ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit);
1562 if (ret)
1563 return ret;
1564
1565 /* clock in Mhz unit */
1566 if (min)
1567 *min = clock_limit / 100;
1568 if (max)
1569 *max = clock_limit / 100;
1570
1571 return 0;
1572 }
1573
1574 clk_id = smu_cmn_to_asic_specific_index(smu,
1575 CMN2ASIC_MAPPING_CLK,
1576 clk_type);
1577 if (clk_id < 0) {
1578 ret = -EINVAL;
1579 goto failed;
1580 }
1581 param = (clk_id & 0xffff) << 16;
1582
1583 if (max) {
1584 if (smu->adev->pm.ac_power)
1585 ret = smu_cmn_send_smc_msg_with_param(smu,
1586 SMU_MSG_GetMaxDpmFreq,
1587 param,
1588 max);
1589 else
1590 ret = smu_cmn_send_smc_msg_with_param(smu,
1591 SMU_MSG_GetDcModeMaxDpmFreq,
1592 param,
1593 max);
1594 if (ret)
1595 goto failed;
1596 }
1597
1598 if (min) {
1599 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1600 if (ret)
1601 goto failed;
1602 }
1603
1604 failed:
1605 return ret;
1606 }
1607
smu_v13_0_set_soft_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max,bool automatic)1608 int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
1609 enum smu_clk_type clk_type,
1610 uint32_t min,
1611 uint32_t max,
1612 bool automatic)
1613 {
1614 int ret = 0, clk_id = 0;
1615 uint32_t param;
1616
1617 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1618 return 0;
1619
1620 clk_id = smu_cmn_to_asic_specific_index(smu,
1621 CMN2ASIC_MAPPING_CLK,
1622 clk_type);
1623 if (clk_id < 0)
1624 return clk_id;
1625
1626 if (max > 0) {
1627 if (automatic)
1628 param = (uint32_t)((clk_id << 16) | 0xffff);
1629 else
1630 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1631 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1632 param, NULL);
1633 if (ret)
1634 goto out;
1635 }
1636
1637 if (min > 0) {
1638 if (automatic)
1639 param = (uint32_t)((clk_id << 16) | 0);
1640 else
1641 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1642 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1643 param, NULL);
1644 if (ret)
1645 goto out;
1646 }
1647
1648 out:
1649 return ret;
1650 }
1651
smu_v13_0_set_hard_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)1652 int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
1653 enum smu_clk_type clk_type,
1654 uint32_t min,
1655 uint32_t max)
1656 {
1657 int ret = 0, clk_id = 0;
1658 uint32_t param;
1659
1660 if (min <= 0 && max <= 0)
1661 return -EINVAL;
1662
1663 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1664 return 0;
1665
1666 clk_id = smu_cmn_to_asic_specific_index(smu,
1667 CMN2ASIC_MAPPING_CLK,
1668 clk_type);
1669 if (clk_id < 0)
1670 return clk_id;
1671
1672 if (max > 0) {
1673 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1674 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1675 param, NULL);
1676 if (ret)
1677 return ret;
1678 }
1679
1680 if (min > 0) {
1681 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1682 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1683 param, NULL);
1684 if (ret)
1685 return ret;
1686 }
1687
1688 return ret;
1689 }
1690
smu_v13_0_set_performance_level(struct smu_context * smu,enum amd_dpm_forced_level level)1691 int smu_v13_0_set_performance_level(struct smu_context *smu,
1692 enum amd_dpm_forced_level level)
1693 {
1694 struct smu_13_0_dpm_context *dpm_context =
1695 smu->smu_dpm.dpm_context;
1696 struct smu_13_0_dpm_table *gfx_table =
1697 &dpm_context->dpm_tables.gfx_table;
1698 struct smu_13_0_dpm_table *mem_table =
1699 &dpm_context->dpm_tables.uclk_table;
1700 struct smu_13_0_dpm_table *soc_table =
1701 &dpm_context->dpm_tables.soc_table;
1702 struct smu_13_0_dpm_table *vclk_table =
1703 &dpm_context->dpm_tables.vclk_table;
1704 struct smu_13_0_dpm_table *dclk_table =
1705 &dpm_context->dpm_tables.dclk_table;
1706 struct smu_13_0_dpm_table *fclk_table =
1707 &dpm_context->dpm_tables.fclk_table;
1708 struct smu_umd_pstate_table *pstate_table =
1709 &smu->pstate_table;
1710 struct amdgpu_device *adev = smu->adev;
1711 uint32_t sclk_min = 0, sclk_max = 0;
1712 uint32_t mclk_min = 0, mclk_max = 0;
1713 uint32_t socclk_min = 0, socclk_max = 0;
1714 uint32_t vclk_min = 0, vclk_max = 0;
1715 uint32_t dclk_min = 0, dclk_max = 0;
1716 uint32_t fclk_min = 0, fclk_max = 0;
1717 int ret = 0, i;
1718 bool auto_level = false;
1719
1720 switch (level) {
1721 case AMD_DPM_FORCED_LEVEL_HIGH:
1722 sclk_min = sclk_max = gfx_table->max;
1723 mclk_min = mclk_max = mem_table->max;
1724 socclk_min = socclk_max = soc_table->max;
1725 vclk_min = vclk_max = vclk_table->max;
1726 dclk_min = dclk_max = dclk_table->max;
1727 fclk_min = fclk_max = fclk_table->max;
1728 break;
1729 case AMD_DPM_FORCED_LEVEL_LOW:
1730 sclk_min = sclk_max = gfx_table->min;
1731 mclk_min = mclk_max = mem_table->min;
1732 socclk_min = socclk_max = soc_table->min;
1733 vclk_min = vclk_max = vclk_table->min;
1734 dclk_min = dclk_max = dclk_table->min;
1735 fclk_min = fclk_max = fclk_table->min;
1736 break;
1737 case AMD_DPM_FORCED_LEVEL_AUTO:
1738 sclk_min = gfx_table->min;
1739 sclk_max = gfx_table->max;
1740 mclk_min = mem_table->min;
1741 mclk_max = mem_table->max;
1742 socclk_min = soc_table->min;
1743 socclk_max = soc_table->max;
1744 vclk_min = vclk_table->min;
1745 vclk_max = vclk_table->max;
1746 dclk_min = dclk_table->min;
1747 dclk_max = dclk_table->max;
1748 fclk_min = fclk_table->min;
1749 fclk_max = fclk_table->max;
1750 auto_level = true;
1751 break;
1752 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1753 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1754 mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1755 socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
1756 vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
1757 dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
1758 fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
1759 break;
1760 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1761 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1762 break;
1763 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1764 mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1765 break;
1766 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1767 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1768 mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1769 socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
1770 vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
1771 dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
1772 fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
1773 break;
1774 case AMD_DPM_FORCED_LEVEL_MANUAL:
1775 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1776 return 0;
1777 default:
1778 dev_err(adev->dev, "Invalid performance level %d\n", level);
1779 return -EINVAL;
1780 }
1781
1782 /*
1783 * Unset those settings for SMU 13.0.2. As soft limits settings
1784 * for those clock domains are not supported.
1785 */
1786 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) {
1787 mclk_min = mclk_max = 0;
1788 socclk_min = socclk_max = 0;
1789 vclk_min = vclk_max = 0;
1790 dclk_min = dclk_max = 0;
1791 fclk_min = fclk_max = 0;
1792 auto_level = false;
1793 }
1794
1795 if (sclk_min && sclk_max) {
1796 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1797 SMU_GFXCLK,
1798 sclk_min,
1799 sclk_max,
1800 auto_level);
1801 if (ret)
1802 return ret;
1803
1804 pstate_table->gfxclk_pstate.curr.min = sclk_min;
1805 pstate_table->gfxclk_pstate.curr.max = sclk_max;
1806 }
1807
1808 if (mclk_min && mclk_max) {
1809 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1810 SMU_MCLK,
1811 mclk_min,
1812 mclk_max,
1813 auto_level);
1814 if (ret)
1815 return ret;
1816
1817 pstate_table->uclk_pstate.curr.min = mclk_min;
1818 pstate_table->uclk_pstate.curr.max = mclk_max;
1819 }
1820
1821 if (socclk_min && socclk_max) {
1822 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1823 SMU_SOCCLK,
1824 socclk_min,
1825 socclk_max,
1826 auto_level);
1827 if (ret)
1828 return ret;
1829
1830 pstate_table->socclk_pstate.curr.min = socclk_min;
1831 pstate_table->socclk_pstate.curr.max = socclk_max;
1832 }
1833
1834 if (vclk_min && vclk_max) {
1835 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1836 if (adev->vcn.harvest_config & (1 << i))
1837 continue;
1838 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1839 i ? SMU_VCLK1 : SMU_VCLK,
1840 vclk_min,
1841 vclk_max,
1842 auto_level);
1843 if (ret)
1844 return ret;
1845 }
1846 pstate_table->vclk_pstate.curr.min = vclk_min;
1847 pstate_table->vclk_pstate.curr.max = vclk_max;
1848 }
1849
1850 if (dclk_min && dclk_max) {
1851 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1852 if (adev->vcn.harvest_config & (1 << i))
1853 continue;
1854 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1855 i ? SMU_DCLK1 : SMU_DCLK,
1856 dclk_min,
1857 dclk_max,
1858 auto_level);
1859 if (ret)
1860 return ret;
1861 }
1862 pstate_table->dclk_pstate.curr.min = dclk_min;
1863 pstate_table->dclk_pstate.curr.max = dclk_max;
1864 }
1865
1866 if (fclk_min && fclk_max) {
1867 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1868 SMU_FCLK,
1869 fclk_min,
1870 fclk_max,
1871 auto_level);
1872 if (ret)
1873 return ret;
1874
1875 pstate_table->fclk_pstate.curr.min = fclk_min;
1876 pstate_table->fclk_pstate.curr.max = fclk_max;
1877 }
1878
1879 return ret;
1880 }
1881
smu_v13_0_set_power_source(struct smu_context * smu,enum smu_power_src_type power_src)1882 int smu_v13_0_set_power_source(struct smu_context *smu,
1883 enum smu_power_src_type power_src)
1884 {
1885 int pwr_source;
1886
1887 pwr_source = smu_cmn_to_asic_specific_index(smu,
1888 CMN2ASIC_MAPPING_PWR,
1889 (uint32_t)power_src);
1890 if (pwr_source < 0)
1891 return -EINVAL;
1892
1893 return smu_cmn_send_smc_msg_with_param(smu,
1894 SMU_MSG_NotifyPowerSource,
1895 pwr_source,
1896 NULL);
1897 }
1898
smu_v13_0_get_boot_freq_by_index(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)1899 int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
1900 enum smu_clk_type clk_type,
1901 uint32_t *value)
1902 {
1903 int ret = 0;
1904
1905 switch (clk_type) {
1906 case SMU_MCLK:
1907 case SMU_UCLK:
1908 *value = smu->smu_table.boot_values.uclk;
1909 break;
1910 case SMU_FCLK:
1911 *value = smu->smu_table.boot_values.fclk;
1912 break;
1913 case SMU_GFXCLK:
1914 case SMU_SCLK:
1915 *value = smu->smu_table.boot_values.gfxclk;
1916 break;
1917 case SMU_SOCCLK:
1918 *value = smu->smu_table.boot_values.socclk;
1919 break;
1920 case SMU_VCLK:
1921 *value = smu->smu_table.boot_values.vclk;
1922 break;
1923 case SMU_DCLK:
1924 *value = smu->smu_table.boot_values.dclk;
1925 break;
1926 default:
1927 ret = -EINVAL;
1928 break;
1929 }
1930 return ret;
1931 }
1932
smu_v13_0_get_dpm_freq_by_index(struct smu_context * smu,enum smu_clk_type clk_type,uint16_t level,uint32_t * value)1933 int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
1934 enum smu_clk_type clk_type, uint16_t level,
1935 uint32_t *value)
1936 {
1937 int ret = 0, clk_id = 0;
1938 uint32_t param;
1939
1940 if (!value)
1941 return -EINVAL;
1942
1943 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1944 return smu_v13_0_get_boot_freq_by_index(smu, clk_type, value);
1945
1946 clk_id = smu_cmn_to_asic_specific_index(smu,
1947 CMN2ASIC_MAPPING_CLK,
1948 clk_type);
1949 if (clk_id < 0)
1950 return clk_id;
1951
1952 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1953
1954 ret = smu_cmn_send_smc_msg_with_param(smu,
1955 SMU_MSG_GetDpmFreqByIndex,
1956 param,
1957 value);
1958 if (ret)
1959 return ret;
1960
1961 *value = *value & 0x7fffffff;
1962
1963 return ret;
1964 }
1965
smu_v13_0_get_dpm_level_count(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)1966 static int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
1967 enum smu_clk_type clk_type,
1968 uint32_t *value)
1969 {
1970 int ret;
1971
1972 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
1973 /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */
1974 if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value))
1975 ++(*value);
1976
1977 return ret;
1978 }
1979
smu_v13_0_get_fine_grained_status(struct smu_context * smu,enum smu_clk_type clk_type,bool * is_fine_grained_dpm)1980 static int smu_v13_0_get_fine_grained_status(struct smu_context *smu,
1981 enum smu_clk_type clk_type,
1982 bool *is_fine_grained_dpm)
1983 {
1984 int ret = 0, clk_id = 0;
1985 uint32_t param;
1986 uint32_t value;
1987
1988 if (!is_fine_grained_dpm)
1989 return -EINVAL;
1990
1991 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1992 return 0;
1993
1994 clk_id = smu_cmn_to_asic_specific_index(smu,
1995 CMN2ASIC_MAPPING_CLK,
1996 clk_type);
1997 if (clk_id < 0)
1998 return clk_id;
1999
2000 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
2001
2002 ret = smu_cmn_send_smc_msg_with_param(smu,
2003 SMU_MSG_GetDpmFreqByIndex,
2004 param,
2005 &value);
2006 if (ret)
2007 return ret;
2008
2009 /*
2010 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM
2011 * now, we un-support it
2012 */
2013 *is_fine_grained_dpm = value & 0x80000000;
2014
2015 return 0;
2016 }
2017
smu_v13_0_set_single_dpm_table(struct smu_context * smu,enum smu_clk_type clk_type,struct smu_13_0_dpm_table * single_dpm_table)2018 int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
2019 enum smu_clk_type clk_type,
2020 struct smu_13_0_dpm_table *single_dpm_table)
2021 {
2022 int ret = 0;
2023 uint32_t clk;
2024 int i;
2025
2026 ret = smu_v13_0_get_dpm_level_count(smu,
2027 clk_type,
2028 &single_dpm_table->count);
2029 if (ret) {
2030 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
2031 return ret;
2032 }
2033
2034 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) {
2035 ret = smu_v13_0_get_fine_grained_status(smu,
2036 clk_type,
2037 &single_dpm_table->is_fine_grained);
2038 if (ret) {
2039 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
2040 return ret;
2041 }
2042 }
2043
2044 for (i = 0; i < single_dpm_table->count; i++) {
2045 ret = smu_v13_0_get_dpm_freq_by_index(smu,
2046 clk_type,
2047 i,
2048 &clk);
2049 if (ret) {
2050 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
2051 return ret;
2052 }
2053
2054 single_dpm_table->dpm_levels[i].value = clk;
2055 single_dpm_table->dpm_levels[i].enabled = true;
2056
2057 if (i == 0)
2058 single_dpm_table->min = clk;
2059 else if (i == single_dpm_table->count - 1)
2060 single_dpm_table->max = clk;
2061 }
2062
2063 return 0;
2064 }
2065
smu_v13_0_get_current_pcie_link_width_level(struct smu_context * smu)2066 int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu)
2067 {
2068 struct amdgpu_device *adev = smu->adev;
2069
2070 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
2071 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
2072 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
2073 }
2074
smu_v13_0_get_current_pcie_link_width(struct smu_context * smu)2075 int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu)
2076 {
2077 uint32_t width_level;
2078
2079 width_level = smu_v13_0_get_current_pcie_link_width_level(smu);
2080 if (width_level > LINK_WIDTH_MAX)
2081 width_level = 0;
2082
2083 return link_width[width_level];
2084 }
2085
smu_v13_0_get_current_pcie_link_speed_level(struct smu_context * smu)2086 int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu)
2087 {
2088 struct amdgpu_device *adev = smu->adev;
2089
2090 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
2091 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
2092 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
2093 }
2094
smu_v13_0_get_current_pcie_link_speed(struct smu_context * smu)2095 int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu)
2096 {
2097 uint32_t speed_level;
2098
2099 speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu);
2100 if (speed_level > LINK_SPEED_MAX)
2101 speed_level = 0;
2102
2103 return link_speed[speed_level];
2104 }
2105
smu_v13_0_set_vcn_enable(struct smu_context * smu,bool enable,int inst)2106 int smu_v13_0_set_vcn_enable(struct smu_context *smu,
2107 bool enable,
2108 int inst)
2109 {
2110 struct amdgpu_device *adev = smu->adev;
2111 int i, ret = 0;
2112
2113 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2114 if (adev->vcn.harvest_config & (1 << i))
2115 continue;
2116
2117 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
2118 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
2119 i << 16U, NULL);
2120 if (ret)
2121 return ret;
2122 }
2123
2124 return ret;
2125 }
2126
smu_v13_0_set_jpeg_enable(struct smu_context * smu,bool enable)2127 int smu_v13_0_set_jpeg_enable(struct smu_context *smu,
2128 bool enable)
2129 {
2130 return smu_cmn_send_smc_msg_with_param(smu, enable ?
2131 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
2132 0, NULL);
2133 }
2134
smu_v13_0_run_btc(struct smu_context * smu)2135 int smu_v13_0_run_btc(struct smu_context *smu)
2136 {
2137 int res;
2138
2139 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
2140 if (res)
2141 dev_err(smu->adev->dev, "RunDcBtc failed!\n");
2142
2143 return res;
2144 }
2145
smu_v13_0_gpo_control(struct smu_context * smu,bool enablement)2146 int smu_v13_0_gpo_control(struct smu_context *smu,
2147 bool enablement)
2148 {
2149 int res;
2150
2151 res = smu_cmn_send_smc_msg_with_param(smu,
2152 SMU_MSG_AllowGpo,
2153 enablement ? 1 : 0,
2154 NULL);
2155 if (res)
2156 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
2157
2158 return res;
2159 }
2160
smu_v13_0_deep_sleep_control(struct smu_context * smu,bool enablement)2161 int smu_v13_0_deep_sleep_control(struct smu_context *smu,
2162 bool enablement)
2163 {
2164 struct amdgpu_device *adev = smu->adev;
2165 int ret = 0;
2166
2167 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2168 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2169 if (ret) {
2170 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
2171 return ret;
2172 }
2173 }
2174
2175 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
2176 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
2177 if (ret) {
2178 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
2179 return ret;
2180 }
2181 }
2182
2183 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
2184 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
2185 if (ret) {
2186 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
2187 return ret;
2188 }
2189 }
2190
2191 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2192 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2193 if (ret) {
2194 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
2195 return ret;
2196 }
2197 }
2198
2199 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2200 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2201 if (ret) {
2202 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
2203 return ret;
2204 }
2205 }
2206
2207 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
2208 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
2209 if (ret) {
2210 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
2211 return ret;
2212 }
2213 }
2214
2215 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
2216 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
2217 if (ret) {
2218 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
2219 return ret;
2220 }
2221 }
2222
2223 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
2224 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
2225 if (ret) {
2226 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
2227 return ret;
2228 }
2229 }
2230
2231 return ret;
2232 }
2233
smu_v13_0_gfx_ulv_control(struct smu_context * smu,bool enablement)2234 int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
2235 bool enablement)
2236 {
2237 int ret = 0;
2238
2239 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2240 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2241
2242 return ret;
2243 }
2244
smu_v13_0_baco_set_armd3_sequence(struct smu_context * smu,enum smu_baco_seq baco_seq)2245 static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
2246 enum smu_baco_seq baco_seq)
2247 {
2248 struct smu_baco_context *smu_baco = &smu->smu_baco;
2249 int ret;
2250
2251 ret = smu_cmn_send_smc_msg_with_param(smu,
2252 SMU_MSG_ArmD3,
2253 baco_seq,
2254 NULL);
2255 if (ret)
2256 return ret;
2257
2258 if (baco_seq == BACO_SEQ_BAMACO ||
2259 baco_seq == BACO_SEQ_BACO)
2260 smu_baco->state = SMU_BACO_STATE_ENTER;
2261 else
2262 smu_baco->state = SMU_BACO_STATE_EXIT;
2263
2264 return 0;
2265 }
2266
smu_v13_0_baco_get_state(struct smu_context * smu)2267 static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
2268 {
2269 struct smu_baco_context *smu_baco = &smu->smu_baco;
2270
2271 return smu_baco->state;
2272 }
2273
smu_v13_0_baco_set_state(struct smu_context * smu,enum smu_baco_state state)2274 static int smu_v13_0_baco_set_state(struct smu_context *smu,
2275 enum smu_baco_state state)
2276 {
2277 struct smu_baco_context *smu_baco = &smu->smu_baco;
2278 struct amdgpu_device *adev = smu->adev;
2279 int ret = 0;
2280
2281 if (smu_v13_0_baco_get_state(smu) == state)
2282 return 0;
2283
2284 if (state == SMU_BACO_STATE_ENTER) {
2285 ret = smu_cmn_send_smc_msg_with_param(smu,
2286 SMU_MSG_EnterBaco,
2287 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
2288 BACO_SEQ_BAMACO : BACO_SEQ_BACO,
2289 NULL);
2290 } else {
2291 ret = smu_cmn_send_smc_msg(smu,
2292 SMU_MSG_ExitBaco,
2293 NULL);
2294 if (ret)
2295 return ret;
2296
2297 /* clear vbios scratch 6 and 7 for coming asic reinit */
2298 WREG32(adev->bios_scratch_reg_offset + 6, 0);
2299 WREG32(adev->bios_scratch_reg_offset + 7, 0);
2300 }
2301
2302 if (!ret)
2303 smu_baco->state = state;
2304
2305 return ret;
2306 }
2307
smu_v13_0_get_bamaco_support(struct smu_context * smu)2308 int smu_v13_0_get_bamaco_support(struct smu_context *smu)
2309 {
2310 struct smu_baco_context *smu_baco = &smu->smu_baco;
2311 int bamaco_support = 0;
2312
2313 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
2314 return 0;
2315
2316 if (smu_baco->maco_support)
2317 bamaco_support |= MACO_SUPPORT;
2318
2319 /* return true if ASIC is in BACO state already */
2320 if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
2321 return bamaco_support |= BACO_SUPPORT;
2322
2323 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
2324 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
2325 return 0;
2326
2327 return (bamaco_support |= BACO_SUPPORT);
2328 }
2329
smu_v13_0_baco_enter(struct smu_context * smu)2330 int smu_v13_0_baco_enter(struct smu_context *smu)
2331 {
2332 struct amdgpu_device *adev = smu->adev;
2333 int ret;
2334
2335 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
2336 return smu_v13_0_baco_set_armd3_sequence(smu,
2337 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
2338 BACO_SEQ_BAMACO : BACO_SEQ_BACO);
2339 } else {
2340 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
2341 if (!ret)
2342 usleep_range(10000, 11000);
2343
2344 return ret;
2345 }
2346 }
2347
smu_v13_0_baco_exit(struct smu_context * smu)2348 int smu_v13_0_baco_exit(struct smu_context *smu)
2349 {
2350 struct amdgpu_device *adev = smu->adev;
2351 int ret;
2352
2353 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
2354 /* Wait for PMFW handling for the Dstate change */
2355 usleep_range(10000, 11000);
2356 ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
2357 } else {
2358 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
2359 }
2360
2361 if (!ret)
2362 adev->gfx.is_poweron = false;
2363
2364 return ret;
2365 }
2366
smu_v13_0_set_gfx_power_up_by_imu(struct smu_context * smu)2367 int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
2368 {
2369 uint16_t index;
2370 struct amdgpu_device *adev = smu->adev;
2371
2372 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2373 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
2374 ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
2375 }
2376
2377 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2378 SMU_MSG_EnableGfxImu);
2379 return smu_cmn_send_msg_without_waiting(smu, index,
2380 ENABLE_IMU_ARG_GFXOFF_ENABLE);
2381 }
2382
smu_v13_0_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)2383 int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
2384 enum PP_OD_DPM_TABLE_COMMAND type,
2385 long input[], uint32_t size)
2386 {
2387 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
2388 int ret = 0;
2389
2390 /* Only allowed in manual mode */
2391 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
2392 return -EINVAL;
2393
2394 switch (type) {
2395 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2396 if (size != 2) {
2397 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2398 return -EINVAL;
2399 }
2400
2401 if (input[0] == 0) {
2402 if (input[1] < smu->gfx_default_hard_min_freq) {
2403 dev_warn(smu->adev->dev,
2404 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
2405 input[1], smu->gfx_default_hard_min_freq);
2406 return -EINVAL;
2407 }
2408 smu->gfx_actual_hard_min_freq = input[1];
2409 } else if (input[0] == 1) {
2410 if (input[1] > smu->gfx_default_soft_max_freq) {
2411 dev_warn(smu->adev->dev,
2412 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
2413 input[1], smu->gfx_default_soft_max_freq);
2414 return -EINVAL;
2415 }
2416 smu->gfx_actual_soft_max_freq = input[1];
2417 } else {
2418 return -EINVAL;
2419 }
2420 break;
2421 case PP_OD_RESTORE_DEFAULT_TABLE:
2422 if (size != 0) {
2423 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2424 return -EINVAL;
2425 }
2426 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
2427 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
2428 break;
2429 case PP_OD_COMMIT_DPM_TABLE:
2430 if (size != 0) {
2431 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2432 return -EINVAL;
2433 }
2434 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
2435 dev_err(smu->adev->dev,
2436 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
2437 smu->gfx_actual_hard_min_freq,
2438 smu->gfx_actual_soft_max_freq);
2439 return -EINVAL;
2440 }
2441
2442 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
2443 smu->gfx_actual_hard_min_freq,
2444 NULL);
2445 if (ret) {
2446 dev_err(smu->adev->dev, "Set hard min sclk failed!");
2447 return ret;
2448 }
2449
2450 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
2451 smu->gfx_actual_soft_max_freq,
2452 NULL);
2453 if (ret) {
2454 dev_err(smu->adev->dev, "Set soft max sclk failed!");
2455 return ret;
2456 }
2457 break;
2458 default:
2459 return -ENOSYS;
2460 }
2461
2462 return ret;
2463 }
2464
smu_v13_0_set_default_dpm_tables(struct smu_context * smu)2465 int smu_v13_0_set_default_dpm_tables(struct smu_context *smu)
2466 {
2467 struct smu_table_context *smu_table = &smu->smu_table;
2468
2469 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
2470 smu_table->clocks_table, false);
2471 }
2472
smu_v13_0_set_smu_mailbox_registers(struct smu_context * smu)2473 void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
2474 {
2475 struct amdgpu_device *adev = smu->adev;
2476
2477 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
2478 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
2479 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
2480 }
2481
smu_v13_0_mode1_reset(struct smu_context * smu)2482 int smu_v13_0_mode1_reset(struct smu_context *smu)
2483 {
2484 int ret = 0;
2485
2486 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
2487 if (!ret)
2488 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
2489
2490 return ret;
2491 }
2492
smu_v13_0_update_pcie_parameters(struct smu_context * smu,uint8_t pcie_gen_cap,uint8_t pcie_width_cap)2493 int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
2494 uint8_t pcie_gen_cap,
2495 uint8_t pcie_width_cap)
2496 {
2497 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
2498 struct smu_13_0_pcie_table *pcie_table =
2499 &dpm_context->dpm_tables.pcie_table;
2500 int num_of_levels = pcie_table->num_of_link_levels;
2501 uint32_t smu_pcie_arg;
2502 int ret, i;
2503
2504 if (!num_of_levels)
2505 return 0;
2506
2507 if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
2508 if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
2509 pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
2510
2511 if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
2512 pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
2513
2514 /* Force all levels to use the same settings */
2515 for (i = 0; i < num_of_levels; i++) {
2516 pcie_table->pcie_gen[i] = pcie_gen_cap;
2517 pcie_table->pcie_lane[i] = pcie_width_cap;
2518 }
2519 } else {
2520 for (i = 0; i < num_of_levels; i++) {
2521 if (pcie_table->pcie_gen[i] > pcie_gen_cap)
2522 pcie_table->pcie_gen[i] = pcie_gen_cap;
2523 if (pcie_table->pcie_lane[i] > pcie_width_cap)
2524 pcie_table->pcie_lane[i] = pcie_width_cap;
2525 }
2526 }
2527
2528 for (i = 0; i < num_of_levels; i++) {
2529 smu_pcie_arg = i << 16;
2530 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
2531 smu_pcie_arg |= pcie_table->pcie_lane[i];
2532
2533 ret = smu_cmn_send_smc_msg_with_param(smu,
2534 SMU_MSG_OverridePcieParameters,
2535 smu_pcie_arg,
2536 NULL);
2537 if (ret)
2538 return ret;
2539 }
2540
2541 return 0;
2542 }
2543
smu_v13_0_disable_pmfw_state(struct smu_context * smu)2544 int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
2545 {
2546 int ret;
2547 struct amdgpu_device *adev = smu->adev;
2548
2549 WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0);
2550
2551 ret = RREG32_PCIE(MP1_Public |
2552 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
2553
2554 return ret == 0 ? 0 : -EINVAL;
2555 }
2556
smu_v13_0_enable_uclk_shadow(struct smu_context * smu,bool enable)2557 int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable)
2558 {
2559 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL);
2560 }
2561
smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context * smu,struct freq_band_range * exclusion_ranges)2562 int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
2563 struct freq_band_range *exclusion_ranges)
2564 {
2565 WifiBandEntryTable_t wifi_bands;
2566 int valid_entries = 0;
2567 int ret, i;
2568
2569 memset(&wifi_bands, 0, sizeof(wifi_bands));
2570 for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) {
2571 if (!exclusion_ranges[i].start && !exclusion_ranges[i].end)
2572 break;
2573
2574 /* PMFW expects the inputs to be in Mhz unit */
2575 wifi_bands.WifiBandEntry[valid_entries].LowFreq =
2576 DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ);
2577 wifi_bands.WifiBandEntry[valid_entries++].HighFreq =
2578 DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ);
2579 }
2580 wifi_bands.WifiBandEntryNum = valid_entries;
2581
2582 /*
2583 * Per confirm with PMFW team, WifiBandEntryNum = 0
2584 * is a valid setting.
2585 *
2586 * Considering the scenarios below:
2587 * - At first the wifi device adds an exclusion range e.g. (2400,2500) to
2588 * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1
2589 * and pass the WifiBandEntry (2400, 2500) to PMFW.
2590 *
2591 * - Later the wifi device removes the wifiband list added above and
2592 * our driver gets notified again. At this time, driver will set
2593 * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW.
2594 *
2595 * - PMFW may still need to do some uclk shadow update(e.g. switching
2596 * from shadow clock back to primary clock) on receiving this.
2597 */
2598 ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true);
2599 if (ret)
2600 dev_warn(smu->adev->dev, "Failed to set wifiband!");
2601
2602 return ret;
2603 }
2604