1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/reboot.h>
27
28 #define SMU_13_0_PARTIAL_PPTABLE
29 #define SWSMU_CODE_LAYER_L3
30
31 #include "amdgpu.h"
32 #include "amdgpu_smu.h"
33 #include "atomfirmware.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_atombios.h"
36 #include "smu_v13_0.h"
37 #include "soc15_common.h"
38 #include "atom.h"
39 #include "amdgpu_ras.h"
40 #include "smu_cmn.h"
41
42 #include "asic_reg/thm/thm_13_0_2_offset.h"
43 #include "asic_reg/thm/thm_13_0_2_sh_mask.h"
44 #include "asic_reg/mp/mp_13_0_2_offset.h"
45 #include "asic_reg/mp/mp_13_0_2_sh_mask.h"
46 #include "asic_reg/smuio/smuio_13_0_2_offset.h"
47 #include "asic_reg/smuio/smuio_13_0_2_sh_mask.h"
48
49 /*
50 * DO NOT use these for err/warn/info/debug messages.
51 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
52 * They are more MGPU friendly.
53 */
54 #undef pr_err
55 #undef pr_warn
56 #undef pr_info
57 #undef pr_debug
58
59 MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
60 MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
61 MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin");
62 MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
63 MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
64
65 #define mmMP1_SMN_C2PMSG_66 0x0282
66 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
67
68 #define mmMP1_SMN_C2PMSG_82 0x0292
69 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
70
71 #define mmMP1_SMN_C2PMSG_90 0x029a
72 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
73
74 #define SMU13_VOLTAGE_SCALE 4
75
76 #define LINK_WIDTH_MAX 6
77 #define LINK_SPEED_MAX 3
78
79 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
80 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
81 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
82 #define smnPCIE_LC_SPEED_CNTL 0x11140290
83 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0
84 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
85
86 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
87
88 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
89
90 const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
91 const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
92
smu_v13_0_init_microcode(struct smu_context * smu)93 int smu_v13_0_init_microcode(struct smu_context *smu)
94 {
95 struct amdgpu_device *adev = smu->adev;
96 char ucode_prefix[30];
97 int err = 0;
98 const struct smc_firmware_header_v1_0 *hdr;
99 const struct common_firmware_header *header;
100 struct amdgpu_firmware_info *ucode = NULL;
101
102 /* doesn't need to load smu firmware in IOV mode */
103 if (amdgpu_sriov_vf(adev))
104 return 0;
105
106 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
107
108 if (amdgpu_is_kicker_fw(adev))
109 err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
110 "amdgpu/%s_kicker.bin", ucode_prefix);
111 else
112 err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
113 "amdgpu/%s.bin", ucode_prefix);
114 if (err)
115 goto out;
116
117 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
118 amdgpu_ucode_print_smc_hdr(&hdr->header);
119 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
120
121 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
122 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
123 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
124 ucode->fw = adev->pm.fw;
125 header = (const struct common_firmware_header *)ucode->fw->data;
126 adev->firmware.fw_size +=
127 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
128 }
129
130 out:
131 if (err)
132 amdgpu_ucode_release(&adev->pm.fw);
133 return err;
134 }
135
smu_v13_0_fini_microcode(struct smu_context * smu)136 void smu_v13_0_fini_microcode(struct smu_context *smu)
137 {
138 struct amdgpu_device *adev = smu->adev;
139
140 amdgpu_ucode_release(&adev->pm.fw);
141 adev->pm.fw_version = 0;
142 }
143
smu_v13_0_load_microcode(struct smu_context * smu)144 int smu_v13_0_load_microcode(struct smu_context *smu)
145 {
146 #if 0
147 struct amdgpu_device *adev = smu->adev;
148 const uint32_t *src;
149 const struct smc_firmware_header_v1_0 *hdr;
150 uint32_t addr_start = MP1_SRAM;
151 uint32_t i;
152 uint32_t smc_fw_size;
153 uint32_t mp1_fw_flags;
154
155 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
156 src = (const uint32_t *)(adev->pm.fw->data +
157 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
158 smc_fw_size = hdr->header.ucode_size_bytes;
159
160 for (i = 1; i < smc_fw_size/4 - 1; i++) {
161 WREG32_PCIE(addr_start, src[i]);
162 addr_start += 4;
163 }
164
165 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
166 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
167 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
168 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
169
170 for (i = 0; i < adev->usec_timeout; i++) {
171 mp1_fw_flags = RREG32_PCIE(MP1_Public |
172 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
173 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
174 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
175 break;
176 udelay(1);
177 }
178
179 if (i == adev->usec_timeout)
180 return -ETIME;
181 #endif
182
183 return 0;
184 }
185
smu_v13_0_init_pptable_microcode(struct smu_context * smu)186 int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
187 {
188 struct amdgpu_device *adev = smu->adev;
189 struct amdgpu_firmware_info *ucode = NULL;
190 uint32_t size = 0, pptable_id = 0;
191 int ret = 0;
192 void *table;
193
194 /* doesn't need to load smu firmware in IOV mode */
195 if (amdgpu_sriov_vf(adev))
196 return 0;
197
198 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
199 return 0;
200
201 if (!adev->scpm_enabled)
202 return 0;
203
204 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 7)) ||
205 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) ||
206 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)))
207 return 0;
208
209 /* override pptable_id from driver parameter */
210 if (amdgpu_smu_pptable_id >= 0) {
211 pptable_id = amdgpu_smu_pptable_id;
212 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
213 } else {
214 pptable_id = smu->smu_table.boot_values.pp_table_id;
215 }
216
217 /* "pptable_id == 0" means vbios carries the pptable. */
218 if (!pptable_id)
219 return 0;
220
221 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
222 if (ret)
223 return ret;
224
225 smu->pptable_firmware.data = table;
226 smu->pptable_firmware.size = size;
227
228 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
229 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
230 ucode->fw = &smu->pptable_firmware;
231 adev->firmware.fw_size +=
232 ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
233
234 return 0;
235 }
236
smu_v13_0_check_fw_status(struct smu_context * smu)237 int smu_v13_0_check_fw_status(struct smu_context *smu)
238 {
239 struct amdgpu_device *adev = smu->adev;
240 uint32_t mp1_fw_flags;
241
242 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
243 case IP_VERSION(13, 0, 4):
244 case IP_VERSION(13, 0, 11):
245 mp1_fw_flags = RREG32_PCIE(MP1_Public |
246 (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
247 break;
248 default:
249 mp1_fw_flags = RREG32_PCIE(MP1_Public |
250 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
251 break;
252 }
253
254 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
255 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
256 return 0;
257
258 return -EIO;
259 }
260
smu_v13_0_check_fw_version(struct smu_context * smu)261 int smu_v13_0_check_fw_version(struct smu_context *smu)
262 {
263 struct amdgpu_device *adev = smu->adev;
264 uint32_t if_version = 0xff, smu_version = 0xff;
265 uint8_t smu_program, smu_major, smu_minor, smu_debug;
266 int ret = 0;
267
268 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
269 if (ret)
270 return ret;
271
272 smu_program = (smu_version >> 24) & 0xff;
273 smu_major = (smu_version >> 16) & 0xff;
274 smu_minor = (smu_version >> 8) & 0xff;
275 smu_debug = (smu_version >> 0) & 0xff;
276 adev->pm.fw_version = smu_version;
277
278 /* only for dGPU w/ SMU13*/
279 if (adev->pm.fw)
280 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
281 smu_program, smu_version, smu_major, smu_minor, smu_debug);
282
283 /*
284 * 1. if_version mismatch is not critical as our fw is designed
285 * to be backward compatible.
286 * 2. New fw usually brings some optimizations. But that's visible
287 * only on the paired driver.
288 * Considering above, we just leave user a verbal message instead
289 * of halt driver loading.
290 */
291 if (smu->smc_driver_if_version != SMU_IGNORE_IF_VERSION &&
292 if_version != smu->smc_driver_if_version) {
293 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
294 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
295 smu->smc_driver_if_version, if_version,
296 smu_program, smu_version, smu_major, smu_minor, smu_debug);
297 dev_info(adev->dev, "SMU driver if version not matched\n");
298 }
299
300 return ret;
301 }
302
smu_v13_0_set_pptable_v2_0(struct smu_context * smu,void ** table,uint32_t * size)303 static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
304 {
305 struct amdgpu_device *adev = smu->adev;
306 uint32_t ppt_offset_bytes;
307 const struct smc_firmware_header_v2_0 *v2;
308
309 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
310
311 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
312 *size = le32_to_cpu(v2->ppt_size_bytes);
313 *table = (uint8_t *)v2 + ppt_offset_bytes;
314
315 return 0;
316 }
317
smu_v13_0_set_pptable_v2_1(struct smu_context * smu,void ** table,uint32_t * size,uint32_t pptable_id)318 static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table,
319 uint32_t *size, uint32_t pptable_id)
320 {
321 struct amdgpu_device *adev = smu->adev;
322 const struct smc_firmware_header_v2_1 *v2_1;
323 struct smc_soft_pptable_entry *entries;
324 uint32_t pptable_count = 0;
325 int i = 0;
326
327 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
328 entries = (struct smc_soft_pptable_entry *)
329 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
330 pptable_count = le32_to_cpu(v2_1->pptable_count);
331 for (i = 0; i < pptable_count; i++) {
332 if (le32_to_cpu(entries[i].id) == pptable_id) {
333 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
334 *size = le32_to_cpu(entries[i].ppt_size_bytes);
335 break;
336 }
337 }
338
339 if (i == pptable_count)
340 return -EINVAL;
341
342 return 0;
343 }
344
smu_v13_0_get_pptable_from_vbios(struct smu_context * smu,void ** table,uint32_t * size)345 static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
346 {
347 struct amdgpu_device *adev = smu->adev;
348 uint16_t atom_table_size;
349 uint8_t frev, crev;
350 int ret, index;
351
352 dev_info(adev->dev, "use vbios provided pptable\n");
353 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
354 powerplayinfo);
355
356 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
357 (uint8_t **)table);
358 if (ret)
359 return ret;
360
361 if (size)
362 *size = atom_table_size;
363
364 return 0;
365 }
366
smu_v13_0_get_pptable_from_firmware(struct smu_context * smu,void ** table,uint32_t * size,uint32_t pptable_id)367 int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
368 void **table,
369 uint32_t *size,
370 uint32_t pptable_id)
371 {
372 const struct smc_firmware_header_v1_0 *hdr;
373 struct amdgpu_device *adev = smu->adev;
374 uint16_t version_major, version_minor;
375 int ret;
376
377 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
378 if (!hdr)
379 return -EINVAL;
380
381 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
382
383 version_major = le16_to_cpu(hdr->header.header_version_major);
384 version_minor = le16_to_cpu(hdr->header.header_version_minor);
385 if (version_major != 2) {
386 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
387 version_major, version_minor);
388 return -EINVAL;
389 }
390
391 switch (version_minor) {
392 case 0:
393 ret = smu_v13_0_set_pptable_v2_0(smu, table, size);
394 break;
395 case 1:
396 ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id);
397 break;
398 default:
399 ret = -EINVAL;
400 break;
401 }
402
403 return ret;
404 }
405
smu_v13_0_setup_pptable(struct smu_context * smu)406 int smu_v13_0_setup_pptable(struct smu_context *smu)
407 {
408 struct amdgpu_device *adev = smu->adev;
409 uint32_t size = 0, pptable_id = 0;
410 void *table;
411 int ret = 0;
412
413 /* override pptable_id from driver parameter */
414 if (amdgpu_smu_pptable_id >= 0) {
415 pptable_id = amdgpu_smu_pptable_id;
416 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
417 } else {
418 pptable_id = smu->smu_table.boot_values.pp_table_id;
419 }
420
421 /* force using vbios pptable in sriov mode */
422 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
423 ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size);
424 else
425 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
426
427 if (ret)
428 return ret;
429
430 if (!smu->smu_table.power_play_table)
431 smu->smu_table.power_play_table = table;
432 if (!smu->smu_table.power_play_table_size)
433 smu->smu_table.power_play_table_size = size;
434
435 return 0;
436 }
437
smu_v13_0_init_smc_tables(struct smu_context * smu)438 int smu_v13_0_init_smc_tables(struct smu_context *smu)
439 {
440 struct smu_table_context *smu_table = &smu->smu_table;
441 struct smu_table *tables = smu_table->tables;
442 int ret = 0;
443
444 smu_table->driver_pptable =
445 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
446 if (!smu_table->driver_pptable) {
447 ret = -ENOMEM;
448 goto err0_out;
449 }
450
451 smu_table->max_sustainable_clocks =
452 kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL);
453 if (!smu_table->max_sustainable_clocks) {
454 ret = -ENOMEM;
455 goto err1_out;
456 }
457
458 /* Aldebaran does not support OVERDRIVE */
459 if (tables[SMU_TABLE_OVERDRIVE].size) {
460 smu_table->overdrive_table =
461 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
462 if (!smu_table->overdrive_table) {
463 ret = -ENOMEM;
464 goto err2_out;
465 }
466
467 smu_table->boot_overdrive_table =
468 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
469 if (!smu_table->boot_overdrive_table) {
470 ret = -ENOMEM;
471 goto err3_out;
472 }
473
474 smu_table->user_overdrive_table =
475 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
476 if (!smu_table->user_overdrive_table) {
477 ret = -ENOMEM;
478 goto err4_out;
479 }
480 }
481
482 smu_table->combo_pptable =
483 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
484 if (!smu_table->combo_pptable) {
485 ret = -ENOMEM;
486 goto err5_out;
487 }
488
489 return 0;
490
491 err5_out:
492 kfree(smu_table->user_overdrive_table);
493 err4_out:
494 kfree(smu_table->boot_overdrive_table);
495 err3_out:
496 kfree(smu_table->overdrive_table);
497 err2_out:
498 kfree(smu_table->max_sustainable_clocks);
499 err1_out:
500 kfree(smu_table->driver_pptable);
501 err0_out:
502 return ret;
503 }
504
smu_v13_0_fini_smc_tables(struct smu_context * smu)505 int smu_v13_0_fini_smc_tables(struct smu_context *smu)
506 {
507 struct smu_table_context *smu_table = &smu->smu_table;
508 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
509
510 kfree(smu_table->gpu_metrics_table);
511 kfree(smu_table->combo_pptable);
512 kfree(smu_table->user_overdrive_table);
513 kfree(smu_table->boot_overdrive_table);
514 kfree(smu_table->overdrive_table);
515 kfree(smu_table->max_sustainable_clocks);
516 kfree(smu_table->driver_pptable);
517 smu_table->gpu_metrics_table = NULL;
518 smu_table->combo_pptable = NULL;
519 smu_table->user_overdrive_table = NULL;
520 smu_table->boot_overdrive_table = NULL;
521 smu_table->overdrive_table = NULL;
522 smu_table->max_sustainable_clocks = NULL;
523 smu_table->driver_pptable = NULL;
524 kfree(smu_table->hardcode_pptable);
525 smu_table->hardcode_pptable = NULL;
526
527 kfree(smu_table->ecc_table);
528 kfree(smu_table->metrics_table);
529 kfree(smu_table->watermarks_table);
530 smu_table->ecc_table = NULL;
531 smu_table->metrics_table = NULL;
532 smu_table->watermarks_table = NULL;
533 smu_table->metrics_time = 0;
534
535 kfree(smu_dpm->dpm_policies);
536 kfree(smu_dpm->dpm_context);
537 kfree(smu_dpm->golden_dpm_context);
538 kfree(smu_dpm->dpm_current_power_state);
539 kfree(smu_dpm->dpm_request_power_state);
540 smu_dpm->dpm_policies = NULL;
541 smu_dpm->dpm_context = NULL;
542 smu_dpm->golden_dpm_context = NULL;
543 smu_dpm->dpm_context_size = 0;
544 smu_dpm->dpm_current_power_state = NULL;
545 smu_dpm->dpm_request_power_state = NULL;
546
547 return 0;
548 }
549
smu_v13_0_init_power(struct smu_context * smu)550 int smu_v13_0_init_power(struct smu_context *smu)
551 {
552 struct smu_power_context *smu_power = &smu->smu_power;
553
554 if (smu_power->power_context || smu_power->power_context_size != 0)
555 return -EINVAL;
556
557 smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
558 GFP_KERNEL);
559 if (!smu_power->power_context)
560 return -ENOMEM;
561 smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
562
563 return 0;
564 }
565
smu_v13_0_fini_power(struct smu_context * smu)566 int smu_v13_0_fini_power(struct smu_context *smu)
567 {
568 struct smu_power_context *smu_power = &smu->smu_power;
569
570 if (!smu_power->power_context || smu_power->power_context_size == 0)
571 return -EINVAL;
572
573 kfree(smu_power->power_context);
574 smu_power->power_context = NULL;
575 smu_power->power_context_size = 0;
576
577 return 0;
578 }
579
smu_v13_0_get_vbios_bootup_values(struct smu_context * smu)580 int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu)
581 {
582 int ret, index;
583 uint16_t size;
584 uint8_t frev, crev;
585 struct atom_common_table_header *header;
586 struct atom_firmware_info_v3_4 *v_3_4;
587 struct atom_firmware_info_v3_3 *v_3_3;
588 struct atom_firmware_info_v3_1 *v_3_1;
589 struct atom_smu_info_v3_6 *smu_info_v3_6;
590 struct atom_smu_info_v4_0 *smu_info_v4_0;
591
592 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
593 firmwareinfo);
594
595 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
596 (uint8_t **)&header);
597 if (ret)
598 return ret;
599
600 if (header->format_revision != 3) {
601 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n");
602 return -EINVAL;
603 }
604
605 switch (header->content_revision) {
606 case 0:
607 case 1:
608 case 2:
609 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
610 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
611 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
612 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
613 smu->smu_table.boot_values.socclk = 0;
614 smu->smu_table.boot_values.dcefclk = 0;
615 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
616 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
617 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
618 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
619 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
620 smu->smu_table.boot_values.pp_table_id = 0;
621 break;
622 case 3:
623 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
624 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
625 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
626 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
627 smu->smu_table.boot_values.socclk = 0;
628 smu->smu_table.boot_values.dcefclk = 0;
629 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
630 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
631 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
632 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
633 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
634 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
635 break;
636 case 4:
637 default:
638 v_3_4 = (struct atom_firmware_info_v3_4 *)header;
639 smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
640 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
641 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
642 smu->smu_table.boot_values.socclk = 0;
643 smu->smu_table.boot_values.dcefclk = 0;
644 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
645 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
646 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
647 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
648 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
649 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
650 break;
651 }
652
653 smu->smu_table.boot_values.format_revision = header->format_revision;
654 smu->smu_table.boot_values.content_revision = header->content_revision;
655
656 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
657 smu_info);
658 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
659 (uint8_t **)&header)) {
660
661 if ((frev == 3) && (crev == 6)) {
662 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
663
664 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
665 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
666 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
667 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
668 } else if ((frev == 3) && (crev == 1)) {
669 return 0;
670 } else if ((frev == 4) && (crev == 0)) {
671 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
672
673 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
674 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
675 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
676 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
677 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
678 } else {
679 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
680 (uint32_t)frev, (uint32_t)crev);
681 }
682 }
683
684 return 0;
685 }
686
687
smu_v13_0_notify_memory_pool_location(struct smu_context * smu)688 int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
689 {
690 struct smu_table_context *smu_table = &smu->smu_table;
691 struct smu_table *memory_pool = &smu_table->memory_pool;
692 int ret = 0;
693 uint64_t address;
694 uint32_t address_low, address_high;
695
696 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
697 return ret;
698
699 address = memory_pool->mc_address;
700 address_high = (uint32_t)upper_32_bits(address);
701 address_low = (uint32_t)lower_32_bits(address);
702
703 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
704 address_high, NULL);
705 if (ret)
706 return ret;
707 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
708 address_low, NULL);
709 if (ret)
710 return ret;
711 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
712 (uint32_t)memory_pool->size, NULL);
713 if (ret)
714 return ret;
715
716 return ret;
717 }
718
smu_v13_0_set_driver_table_location(struct smu_context * smu)719 int smu_v13_0_set_driver_table_location(struct smu_context *smu)
720 {
721 struct smu_table *driver_table = &smu->smu_table.driver_table;
722 int ret = 0;
723
724 if (driver_table->mc_address) {
725 ret = smu_cmn_send_smc_msg_with_param(smu,
726 SMU_MSG_SetDriverDramAddrHigh,
727 upper_32_bits(driver_table->mc_address),
728 NULL);
729 if (!ret)
730 ret = smu_cmn_send_smc_msg_with_param(smu,
731 SMU_MSG_SetDriverDramAddrLow,
732 lower_32_bits(driver_table->mc_address),
733 NULL);
734 }
735
736 return ret;
737 }
738
smu_v13_0_set_tool_table_location(struct smu_context * smu)739 int smu_v13_0_set_tool_table_location(struct smu_context *smu)
740 {
741 int ret = 0;
742 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
743
744 if (tool_table->mc_address) {
745 ret = smu_cmn_send_smc_msg_with_param(smu,
746 SMU_MSG_SetToolsDramAddrHigh,
747 upper_32_bits(tool_table->mc_address),
748 NULL);
749 if (!ret)
750 ret = smu_cmn_send_smc_msg_with_param(smu,
751 SMU_MSG_SetToolsDramAddrLow,
752 lower_32_bits(tool_table->mc_address),
753 NULL);
754 }
755
756 return ret;
757 }
758
smu_v13_0_set_allowed_mask(struct smu_context * smu)759 int smu_v13_0_set_allowed_mask(struct smu_context *smu)
760 {
761 struct smu_feature *feature = &smu->smu_feature;
762 int ret = 0;
763 uint32_t feature_mask[2];
764
765 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
766 feature->feature_num < 64)
767 return -EINVAL;
768
769 bitmap_to_arr32(feature_mask, feature->allowed, 64);
770
771 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
772 feature_mask[1], NULL);
773 if (ret)
774 return ret;
775
776 return smu_cmn_send_smc_msg_with_param(smu,
777 SMU_MSG_SetAllowedFeaturesMaskLow,
778 feature_mask[0],
779 NULL);
780 }
781
smu_v13_0_gfx_off_control(struct smu_context * smu,bool enable)782 int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
783 {
784 int ret = 0;
785 struct amdgpu_device *adev = smu->adev;
786
787 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
788 case IP_VERSION(13, 0, 0):
789 case IP_VERSION(13, 0, 1):
790 case IP_VERSION(13, 0, 3):
791 case IP_VERSION(13, 0, 4):
792 case IP_VERSION(13, 0, 5):
793 case IP_VERSION(13, 0, 7):
794 case IP_VERSION(13, 0, 8):
795 case IP_VERSION(13, 0, 10):
796 case IP_VERSION(13, 0, 11):
797 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
798 return 0;
799 if (enable)
800 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
801 else
802 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
803 break;
804 default:
805 break;
806 }
807
808 return ret;
809 }
810
smu_v13_0_system_features_control(struct smu_context * smu,bool en)811 int smu_v13_0_system_features_control(struct smu_context *smu,
812 bool en)
813 {
814 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
815 SMU_MSG_DisableAllSmuFeatures), NULL);
816 }
817
smu_v13_0_notify_display_change(struct smu_context * smu)818 int smu_v13_0_notify_display_change(struct smu_context *smu)
819 {
820 int ret = 0;
821
822 if (!amdgpu_device_has_dc_support(smu->adev))
823 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL);
824
825 return ret;
826 }
827
828 static int
smu_v13_0_get_max_sustainable_clock(struct smu_context * smu,uint32_t * clock,enum smu_clk_type clock_select)829 smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
830 enum smu_clk_type clock_select)
831 {
832 int ret = 0;
833 int clk_id;
834
835 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
836 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
837 return 0;
838
839 clk_id = smu_cmn_to_asic_specific_index(smu,
840 CMN2ASIC_MAPPING_CLK,
841 clock_select);
842 if (clk_id < 0)
843 return -EINVAL;
844
845 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
846 clk_id << 16, clock);
847 if (ret) {
848 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
849 return ret;
850 }
851
852 if (*clock != 0)
853 return 0;
854
855 /* if DC limit is zero, return AC limit */
856 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
857 clk_id << 16, clock);
858 if (ret) {
859 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
860 return ret;
861 }
862
863 return 0;
864 }
865
smu_v13_0_init_max_sustainable_clocks(struct smu_context * smu)866 int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu)
867 {
868 struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks =
869 smu->smu_table.max_sustainable_clocks;
870 int ret = 0;
871
872 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
873 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
874 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
875 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
876 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
877 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
878
879 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
880 ret = smu_v13_0_get_max_sustainable_clock(smu,
881 &(max_sustainable_clocks->uclock),
882 SMU_UCLK);
883 if (ret) {
884 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
885 __func__);
886 return ret;
887 }
888 }
889
890 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
891 ret = smu_v13_0_get_max_sustainable_clock(smu,
892 &(max_sustainable_clocks->soc_clock),
893 SMU_SOCCLK);
894 if (ret) {
895 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
896 __func__);
897 return ret;
898 }
899 }
900
901 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
902 ret = smu_v13_0_get_max_sustainable_clock(smu,
903 &(max_sustainable_clocks->dcef_clock),
904 SMU_DCEFCLK);
905 if (ret) {
906 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
907 __func__);
908 return ret;
909 }
910
911 ret = smu_v13_0_get_max_sustainable_clock(smu,
912 &(max_sustainable_clocks->display_clock),
913 SMU_DISPCLK);
914 if (ret) {
915 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
916 __func__);
917 return ret;
918 }
919 ret = smu_v13_0_get_max_sustainable_clock(smu,
920 &(max_sustainable_clocks->phy_clock),
921 SMU_PHYCLK);
922 if (ret) {
923 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
924 __func__);
925 return ret;
926 }
927 ret = smu_v13_0_get_max_sustainable_clock(smu,
928 &(max_sustainable_clocks->pixel_clock),
929 SMU_PIXCLK);
930 if (ret) {
931 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
932 __func__);
933 return ret;
934 }
935 }
936
937 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
938 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
939
940 return 0;
941 }
942
smu_v13_0_get_current_power_limit(struct smu_context * smu,uint32_t * power_limit)943 int smu_v13_0_get_current_power_limit(struct smu_context *smu,
944 uint32_t *power_limit)
945 {
946 int power_src;
947 int ret = 0;
948
949 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
950 return -EINVAL;
951
952 power_src = smu_cmn_to_asic_specific_index(smu,
953 CMN2ASIC_MAPPING_PWR,
954 smu->adev->pm.ac_power ?
955 SMU_POWER_SOURCE_AC :
956 SMU_POWER_SOURCE_DC);
957 if (power_src < 0)
958 return -EINVAL;
959
960 ret = smu_cmn_send_smc_msg_with_param(smu,
961 SMU_MSG_GetPptLimit,
962 power_src << 16,
963 power_limit);
964 if (ret)
965 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
966
967 return ret;
968 }
969
smu_v13_0_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t limit)970 int smu_v13_0_set_power_limit(struct smu_context *smu,
971 enum smu_ppt_limit_type limit_type,
972 uint32_t limit)
973 {
974 int ret = 0;
975
976 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
977 return -EINVAL;
978
979 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
980 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
981 return -EOPNOTSUPP;
982 }
983
984 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
985 if (ret) {
986 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
987 return ret;
988 }
989
990 smu->current_power_limit = limit;
991
992 return 0;
993 }
994
smu_v13_0_allow_ih_interrupt(struct smu_context * smu)995 static int smu_v13_0_allow_ih_interrupt(struct smu_context *smu)
996 {
997 return smu_cmn_send_smc_msg(smu,
998 SMU_MSG_AllowIHHostInterrupt,
999 NULL);
1000 }
1001
smu_v13_0_process_pending_interrupt(struct smu_context * smu)1002 static int smu_v13_0_process_pending_interrupt(struct smu_context *smu)
1003 {
1004 int ret = 0;
1005
1006 if (smu->dc_controlled_by_gpio &&
1007 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
1008 ret = smu_v13_0_allow_ih_interrupt(smu);
1009
1010 return ret;
1011 }
1012
smu_v13_0_enable_thermal_alert(struct smu_context * smu)1013 int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
1014 {
1015 int ret = 0;
1016
1017 if (!smu->irq_source.num_types)
1018 return 0;
1019
1020 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1021 if (ret)
1022 return ret;
1023
1024 return smu_v13_0_process_pending_interrupt(smu);
1025 }
1026
smu_v13_0_disable_thermal_alert(struct smu_context * smu)1027 int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
1028 {
1029 if (!smu->irq_source.num_types)
1030 return 0;
1031
1032 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1033 }
1034
convert_to_vddc(uint8_t vid)1035 static uint16_t convert_to_vddc(uint8_t vid)
1036 {
1037 return (uint16_t) ((6200 - (vid * 25)) / SMU13_VOLTAGE_SCALE);
1038 }
1039
smu_v13_0_get_gfx_vdd(struct smu_context * smu,uint32_t * value)1040 int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1041 {
1042 struct amdgpu_device *adev = smu->adev;
1043 uint32_t vdd = 0, val_vid = 0;
1044
1045 if (!value)
1046 return -EINVAL;
1047 val_vid = (RREG32_SOC15(SMUIO, 0, regSMUSVI0_TEL_PLANE0) &
1048 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1049 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1050
1051 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1052
1053 *value = vdd;
1054
1055 return 0;
1056
1057 }
1058
smu_v13_0_get_fan_control_mode(struct smu_context * smu)1059 uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
1060 {
1061 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1062 return AMD_FAN_CTRL_MANUAL;
1063 else
1064 return AMD_FAN_CTRL_AUTO;
1065 }
1066
1067 static int
smu_v13_0_auto_fan_control(struct smu_context * smu,bool auto_fan_control)1068 smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1069 {
1070 int ret = 0;
1071
1072 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1073 return 0;
1074
1075 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1076 if (ret)
1077 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1078 __func__, (auto_fan_control ? "Start" : "Stop"));
1079
1080 return ret;
1081 }
1082
1083 static int
smu_v13_0_set_fan_static_mode(struct smu_context * smu,uint32_t mode)1084 smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1085 {
1086 struct amdgpu_device *adev = smu->adev;
1087
1088 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2,
1089 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2),
1090 CG_FDO_CTRL2, TMIN, 0));
1091 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2,
1092 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2),
1093 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1094
1095 return 0;
1096 }
1097
smu_v13_0_set_fan_speed_pwm(struct smu_context * smu,uint32_t speed)1098 int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu,
1099 uint32_t speed)
1100 {
1101 struct amdgpu_device *adev = smu->adev;
1102 uint32_t duty100, duty;
1103 uint64_t tmp64;
1104
1105 speed = min_t(uint32_t, speed, 255);
1106
1107 if (smu_v13_0_auto_fan_control(smu, 0))
1108 return -EINVAL;
1109
1110 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL1),
1111 CG_FDO_CTRL1, FMAX_DUTY100);
1112 if (!duty100)
1113 return -EINVAL;
1114
1115 tmp64 = (uint64_t)speed * duty100;
1116 do_div(tmp64, 255);
1117 duty = (uint32_t)tmp64;
1118
1119 WREG32_SOC15(THM, 0, regCG_FDO_CTRL0,
1120 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL0),
1121 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1122
1123 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1124 }
1125
1126 int
smu_v13_0_set_fan_control_mode(struct smu_context * smu,uint32_t mode)1127 smu_v13_0_set_fan_control_mode(struct smu_context *smu,
1128 uint32_t mode)
1129 {
1130 int ret = 0;
1131
1132 switch (mode) {
1133 case AMD_FAN_CTRL_NONE:
1134 ret = smu_v13_0_set_fan_speed_pwm(smu, 255);
1135 break;
1136 case AMD_FAN_CTRL_MANUAL:
1137 ret = smu_v13_0_auto_fan_control(smu, 0);
1138 break;
1139 case AMD_FAN_CTRL_AUTO:
1140 ret = smu_v13_0_auto_fan_control(smu, 1);
1141 break;
1142 default:
1143 break;
1144 }
1145
1146 if (ret) {
1147 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1148 return -EINVAL;
1149 }
1150
1151 return ret;
1152 }
1153
smu_v13_0_set_fan_speed_rpm(struct smu_context * smu,uint32_t speed)1154 int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
1155 uint32_t speed)
1156 {
1157 struct amdgpu_device *adev = smu->adev;
1158 uint32_t crystal_clock_freq = 2500;
1159 uint32_t tach_period;
1160 int ret;
1161
1162 if (!speed || speed > UINT_MAX/8)
1163 return -EINVAL;
1164
1165 ret = smu_v13_0_auto_fan_control(smu, 0);
1166 if (ret)
1167 return ret;
1168
1169 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1170 WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
1171 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
1172 CG_TACH_CTRL, TARGET_PERIOD,
1173 tach_period));
1174
1175 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1176 }
1177
smu_v13_0_set_xgmi_pstate(struct smu_context * smu,uint32_t pstate)1178 int smu_v13_0_set_xgmi_pstate(struct smu_context *smu,
1179 uint32_t pstate)
1180 {
1181 int ret = 0;
1182 ret = smu_cmn_send_smc_msg_with_param(smu,
1183 SMU_MSG_SetXgmiMode,
1184 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
1185 NULL);
1186 return ret;
1187 }
1188
smu_v13_0_set_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned tyep,enum amdgpu_interrupt_state state)1189 static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
1190 struct amdgpu_irq_src *source,
1191 unsigned tyep,
1192 enum amdgpu_interrupt_state state)
1193 {
1194 struct smu_context *smu = adev->powerplay.pp_handle;
1195 uint32_t low, high;
1196 uint32_t val = 0;
1197
1198 switch (state) {
1199 case AMDGPU_IRQ_STATE_DISABLE:
1200 /* For THM irqs */
1201 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1202 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
1203 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
1204 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
1205
1206 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0);
1207
1208 /* For MP1 SW irqs */
1209 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1210 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1211 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1212
1213 break;
1214 case AMDGPU_IRQ_STATE_ENABLE:
1215 /* For THM irqs */
1216 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1217 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1218 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1219 smu->thermal_range.software_shutdown_temp);
1220
1221 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1222 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1223 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1224 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1225 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1226 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1227 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1228 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1229 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
1230
1231 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1232 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1233 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1234 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val);
1235
1236 /* For MP1 SW irqs */
1237 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
1238 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1239 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1240 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
1241
1242 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1243 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1244 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1245
1246 break;
1247 default:
1248 break;
1249 }
1250
1251 return 0;
1252 }
1253
smu_v13_0_interrupt_work(struct smu_context * smu)1254 void smu_v13_0_interrupt_work(struct smu_context *smu)
1255 {
1256 smu_cmn_send_smc_msg(smu,
1257 SMU_MSG_ReenableAcDcInterrupt,
1258 NULL);
1259 }
1260
1261 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
1262 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
1263 #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
1264
smu_v13_0_irq_process(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1265 static int smu_v13_0_irq_process(struct amdgpu_device *adev,
1266 struct amdgpu_irq_src *source,
1267 struct amdgpu_iv_entry *entry)
1268 {
1269 struct smu_context *smu = adev->powerplay.pp_handle;
1270 uint32_t client_id = entry->client_id;
1271 uint32_t src_id = entry->src_id;
1272 /*
1273 * ctxid is used to distinguish different
1274 * events for SMCToHost interrupt.
1275 */
1276 uint32_t ctxid = entry->src_data[0];
1277 uint32_t data;
1278 uint32_t high;
1279
1280 if (client_id == SOC15_IH_CLIENTID_THM) {
1281 switch (src_id) {
1282 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1283 schedule_delayed_work(&smu->swctf_delayed_work,
1284 msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
1285 break;
1286 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1287 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
1288 break;
1289 default:
1290 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
1291 src_id);
1292 break;
1293 }
1294 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
1295 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
1296 /*
1297 * HW CTF just occurred. Shutdown to prevent further damage.
1298 */
1299 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
1300 orderly_poweroff(true);
1301 } else if (client_id == SOC15_IH_CLIENTID_MP1) {
1302 if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
1303 /* ACK SMUToHost interrupt */
1304 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1305 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1306 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
1307
1308 switch (ctxid) {
1309 case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
1310 dev_dbg(adev->dev, "Switched to AC mode!\n");
1311 schedule_work(&smu->interrupt_work);
1312 adev->pm.ac_power = true;
1313 break;
1314 case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
1315 dev_dbg(adev->dev, "Switched to DC mode!\n");
1316 schedule_work(&smu->interrupt_work);
1317 adev->pm.ac_power = false;
1318 break;
1319 case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
1320 /*
1321 * Increment the throttle interrupt counter
1322 */
1323 atomic64_inc(&smu->throttle_int_counter);
1324
1325 if (!atomic_read(&adev->throttling_logging_enabled))
1326 return 0;
1327
1328 if (__ratelimit(&adev->throttling_logging_rs))
1329 schedule_work(&smu->throttling_logging_work);
1330
1331 break;
1332 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
1333 high = smu->thermal_range.software_shutdown_temp +
1334 smu->thermal_range.software_shutdown_temp_offset;
1335 high = min_t(typeof(high),
1336 SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1337 high);
1338 dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
1339 high,
1340 smu->thermal_range.software_shutdown_temp_offset);
1341
1342 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1343 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
1344 DIG_THERM_INTH,
1345 (high & 0xff));
1346 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1347 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
1348 break;
1349 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
1350 high = min_t(typeof(high),
1351 SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1352 smu->thermal_range.software_shutdown_temp);
1353 dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
1354
1355 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1356 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
1357 DIG_THERM_INTH,
1358 (high & 0xff));
1359 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1360 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
1361 break;
1362 default:
1363 dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
1364 ctxid, client_id);
1365 break;
1366 }
1367 }
1368 }
1369
1370 return 0;
1371 }
1372
1373 static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = {
1374 .set = smu_v13_0_set_irq_state,
1375 .process = smu_v13_0_irq_process,
1376 };
1377
smu_v13_0_register_irq_handler(struct smu_context * smu)1378 int smu_v13_0_register_irq_handler(struct smu_context *smu)
1379 {
1380 struct amdgpu_device *adev = smu->adev;
1381 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1382 int ret = 0;
1383
1384 if (amdgpu_sriov_vf(adev))
1385 return 0;
1386
1387 irq_src->num_types = 1;
1388 irq_src->funcs = &smu_v13_0_irq_funcs;
1389
1390 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1391 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1392 irq_src);
1393 if (ret)
1394 return ret;
1395
1396 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1397 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1398 irq_src);
1399 if (ret)
1400 return ret;
1401
1402 /* Register CTF(GPIO_19) interrupt */
1403 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
1404 SMUIO_11_0__SRCID__SMUIO_GPIO19,
1405 irq_src);
1406 if (ret)
1407 return ret;
1408
1409 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1410 SMU_IH_INTERRUPT_ID_TO_DRIVER,
1411 irq_src);
1412 if (ret)
1413 return ret;
1414
1415 return ret;
1416 }
1417
smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context * smu,struct pp_smu_nv_clock_table * max_clocks)1418 int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1419 struct pp_smu_nv_clock_table *max_clocks)
1420 {
1421 struct smu_table_context *table_context = &smu->smu_table;
1422 struct smu_13_0_max_sustainable_clocks *sustainable_clocks = NULL;
1423
1424 if (!max_clocks || !table_context->max_sustainable_clocks)
1425 return -EINVAL;
1426
1427 sustainable_clocks = table_context->max_sustainable_clocks;
1428
1429 max_clocks->dcfClockInKhz =
1430 (unsigned int) sustainable_clocks->dcef_clock * 1000;
1431 max_clocks->displayClockInKhz =
1432 (unsigned int) sustainable_clocks->display_clock * 1000;
1433 max_clocks->phyClockInKhz =
1434 (unsigned int) sustainable_clocks->phy_clock * 1000;
1435 max_clocks->pixelClockInKhz =
1436 (unsigned int) sustainable_clocks->pixel_clock * 1000;
1437 max_clocks->uClockInKhz =
1438 (unsigned int) sustainable_clocks->uclock * 1000;
1439 max_clocks->socClockInKhz =
1440 (unsigned int) sustainable_clocks->soc_clock * 1000;
1441 max_clocks->dscClockInKhz = 0;
1442 max_clocks->dppClockInKhz = 0;
1443 max_clocks->fabricClockInKhz = 0;
1444
1445 return 0;
1446 }
1447
smu_v13_0_set_azalia_d3_pme(struct smu_context * smu)1448 int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu)
1449 {
1450 int ret = 0;
1451
1452 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1453
1454 return ret;
1455 }
1456
smu_v13_0_wait_for_reset_complete(struct smu_context * smu,uint64_t event_arg)1457 static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
1458 uint64_t event_arg)
1459 {
1460 int ret = 0;
1461
1462 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
1463 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
1464
1465 return ret;
1466 }
1467
smu_v13_0_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)1468 int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1469 uint64_t event_arg)
1470 {
1471 int ret = -EINVAL;
1472
1473 switch (event) {
1474 case SMU_EVENT_RESET_COMPLETE:
1475 ret = smu_v13_0_wait_for_reset_complete(smu, event_arg);
1476 break;
1477 default:
1478 break;
1479 }
1480
1481 return ret;
1482 }
1483
smu_v13_0_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)1484 int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1485 uint32_t *min, uint32_t *max)
1486 {
1487 int ret = 0, clk_id = 0;
1488 uint32_t param = 0;
1489 uint32_t clock_limit;
1490
1491 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1492 ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit);
1493 if (ret)
1494 return ret;
1495
1496 /* clock in Mhz unit */
1497 if (min)
1498 *min = clock_limit / 100;
1499 if (max)
1500 *max = clock_limit / 100;
1501
1502 return 0;
1503 }
1504
1505 clk_id = smu_cmn_to_asic_specific_index(smu,
1506 CMN2ASIC_MAPPING_CLK,
1507 clk_type);
1508 if (clk_id < 0) {
1509 ret = -EINVAL;
1510 goto failed;
1511 }
1512 param = (clk_id & 0xffff) << 16;
1513
1514 if (max) {
1515 if (smu->adev->pm.ac_power)
1516 ret = smu_cmn_send_smc_msg_with_param(smu,
1517 SMU_MSG_GetMaxDpmFreq,
1518 param,
1519 max);
1520 else
1521 ret = smu_cmn_send_smc_msg_with_param(smu,
1522 SMU_MSG_GetDcModeMaxDpmFreq,
1523 param,
1524 max);
1525 if (ret)
1526 goto failed;
1527 }
1528
1529 if (min) {
1530 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1531 if (ret)
1532 goto failed;
1533 }
1534
1535 failed:
1536 return ret;
1537 }
1538
smu_v13_0_set_soft_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max,bool automatic)1539 int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
1540 enum smu_clk_type clk_type,
1541 uint32_t min,
1542 uint32_t max,
1543 bool automatic)
1544 {
1545 int ret = 0, clk_id = 0;
1546 uint32_t param;
1547
1548 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1549 return 0;
1550
1551 clk_id = smu_cmn_to_asic_specific_index(smu,
1552 CMN2ASIC_MAPPING_CLK,
1553 clk_type);
1554 if (clk_id < 0)
1555 return clk_id;
1556
1557 if (max > 0) {
1558 if (automatic)
1559 param = (uint32_t)((clk_id << 16) | 0xffff);
1560 else
1561 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1562 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1563 param, NULL);
1564 if (ret)
1565 goto out;
1566 }
1567
1568 if (min > 0) {
1569 if (automatic)
1570 param = (uint32_t)((clk_id << 16) | 0);
1571 else
1572 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1573 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1574 param, NULL);
1575 if (ret)
1576 goto out;
1577 }
1578
1579 out:
1580 return ret;
1581 }
1582
smu_v13_0_set_performance_level(struct smu_context * smu,enum amd_dpm_forced_level level)1583 int smu_v13_0_set_performance_level(struct smu_context *smu,
1584 enum amd_dpm_forced_level level)
1585 {
1586 struct smu_13_0_dpm_context *dpm_context =
1587 smu->smu_dpm.dpm_context;
1588 struct smu_13_0_dpm_table *gfx_table =
1589 &dpm_context->dpm_tables.gfx_table;
1590 struct smu_13_0_dpm_table *mem_table =
1591 &dpm_context->dpm_tables.uclk_table;
1592 struct smu_13_0_dpm_table *soc_table =
1593 &dpm_context->dpm_tables.soc_table;
1594 struct smu_13_0_dpm_table *vclk_table =
1595 &dpm_context->dpm_tables.vclk_table;
1596 struct smu_13_0_dpm_table *dclk_table =
1597 &dpm_context->dpm_tables.dclk_table;
1598 struct smu_13_0_dpm_table *fclk_table =
1599 &dpm_context->dpm_tables.fclk_table;
1600 struct smu_umd_pstate_table *pstate_table =
1601 &smu->pstate_table;
1602 struct amdgpu_device *adev = smu->adev;
1603 uint32_t sclk_min = 0, sclk_max = 0;
1604 uint32_t mclk_min = 0, mclk_max = 0;
1605 uint32_t socclk_min = 0, socclk_max = 0;
1606 uint32_t vclk_min = 0, vclk_max = 0;
1607 uint32_t dclk_min = 0, dclk_max = 0;
1608 uint32_t fclk_min = 0, fclk_max = 0;
1609 int ret = 0, i;
1610 bool auto_level = false;
1611
1612 switch (level) {
1613 case AMD_DPM_FORCED_LEVEL_HIGH:
1614 sclk_min = sclk_max = gfx_table->max;
1615 mclk_min = mclk_max = mem_table->max;
1616 socclk_min = socclk_max = soc_table->max;
1617 vclk_min = vclk_max = vclk_table->max;
1618 dclk_min = dclk_max = dclk_table->max;
1619 fclk_min = fclk_max = fclk_table->max;
1620 break;
1621 case AMD_DPM_FORCED_LEVEL_LOW:
1622 sclk_min = sclk_max = gfx_table->min;
1623 mclk_min = mclk_max = mem_table->min;
1624 socclk_min = socclk_max = soc_table->min;
1625 vclk_min = vclk_max = vclk_table->min;
1626 dclk_min = dclk_max = dclk_table->min;
1627 fclk_min = fclk_max = fclk_table->min;
1628 break;
1629 case AMD_DPM_FORCED_LEVEL_AUTO:
1630 sclk_min = gfx_table->min;
1631 sclk_max = gfx_table->max;
1632 mclk_min = mem_table->min;
1633 mclk_max = mem_table->max;
1634 socclk_min = soc_table->min;
1635 socclk_max = soc_table->max;
1636 vclk_min = vclk_table->min;
1637 vclk_max = vclk_table->max;
1638 dclk_min = dclk_table->min;
1639 dclk_max = dclk_table->max;
1640 fclk_min = fclk_table->min;
1641 fclk_max = fclk_table->max;
1642 auto_level = true;
1643 break;
1644 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1645 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1646 mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1647 socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
1648 vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
1649 dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
1650 fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
1651 break;
1652 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1653 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1654 break;
1655 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1656 mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1657 break;
1658 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1659 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1660 mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1661 socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
1662 vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
1663 dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
1664 fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
1665 break;
1666 case AMD_DPM_FORCED_LEVEL_MANUAL:
1667 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1668 return 0;
1669 default:
1670 dev_err(adev->dev, "Invalid performance level %d\n", level);
1671 return -EINVAL;
1672 }
1673
1674 /*
1675 * Unset those settings for SMU 13.0.2. As soft limits settings
1676 * for those clock domains are not supported.
1677 */
1678 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) {
1679 mclk_min = mclk_max = 0;
1680 socclk_min = socclk_max = 0;
1681 vclk_min = vclk_max = 0;
1682 dclk_min = dclk_max = 0;
1683 fclk_min = fclk_max = 0;
1684 auto_level = false;
1685 }
1686
1687 if (sclk_min && sclk_max) {
1688 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1689 SMU_GFXCLK,
1690 sclk_min,
1691 sclk_max,
1692 auto_level);
1693 if (ret)
1694 return ret;
1695
1696 pstate_table->gfxclk_pstate.curr.min = sclk_min;
1697 pstate_table->gfxclk_pstate.curr.max = sclk_max;
1698 }
1699
1700 if (mclk_min && mclk_max) {
1701 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1702 SMU_MCLK,
1703 mclk_min,
1704 mclk_max,
1705 auto_level);
1706 if (ret)
1707 return ret;
1708
1709 pstate_table->uclk_pstate.curr.min = mclk_min;
1710 pstate_table->uclk_pstate.curr.max = mclk_max;
1711 }
1712
1713 if (socclk_min && socclk_max) {
1714 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1715 SMU_SOCCLK,
1716 socclk_min,
1717 socclk_max,
1718 auto_level);
1719 if (ret)
1720 return ret;
1721
1722 pstate_table->socclk_pstate.curr.min = socclk_min;
1723 pstate_table->socclk_pstate.curr.max = socclk_max;
1724 }
1725
1726 if (vclk_min && vclk_max) {
1727 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1728 if (adev->vcn.harvest_config & (1 << i))
1729 continue;
1730 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1731 i ? SMU_VCLK1 : SMU_VCLK,
1732 vclk_min,
1733 vclk_max,
1734 auto_level);
1735 if (ret)
1736 return ret;
1737 }
1738 pstate_table->vclk_pstate.curr.min = vclk_min;
1739 pstate_table->vclk_pstate.curr.max = vclk_max;
1740 }
1741
1742 if (dclk_min && dclk_max) {
1743 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1744 if (adev->vcn.harvest_config & (1 << i))
1745 continue;
1746 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1747 i ? SMU_DCLK1 : SMU_DCLK,
1748 dclk_min,
1749 dclk_max,
1750 auto_level);
1751 if (ret)
1752 return ret;
1753 }
1754 pstate_table->dclk_pstate.curr.min = dclk_min;
1755 pstate_table->dclk_pstate.curr.max = dclk_max;
1756 }
1757
1758 if (fclk_min && fclk_max) {
1759 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1760 SMU_FCLK,
1761 fclk_min,
1762 fclk_max,
1763 auto_level);
1764 if (ret)
1765 return ret;
1766
1767 pstate_table->fclk_pstate.curr.min = fclk_min;
1768 pstate_table->fclk_pstate.curr.max = fclk_max;
1769 }
1770
1771 return ret;
1772 }
1773
smu_v13_0_set_power_source(struct smu_context * smu,enum smu_power_src_type power_src)1774 int smu_v13_0_set_power_source(struct smu_context *smu,
1775 enum smu_power_src_type power_src)
1776 {
1777 int pwr_source;
1778
1779 pwr_source = smu_cmn_to_asic_specific_index(smu,
1780 CMN2ASIC_MAPPING_PWR,
1781 (uint32_t)power_src);
1782 if (pwr_source < 0)
1783 return -EINVAL;
1784
1785 return smu_cmn_send_smc_msg_with_param(smu,
1786 SMU_MSG_NotifyPowerSource,
1787 pwr_source,
1788 NULL);
1789 }
1790
smu_v13_0_get_boot_freq_by_index(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)1791 int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
1792 enum smu_clk_type clk_type,
1793 uint32_t *value)
1794 {
1795 int ret = 0;
1796
1797 switch (clk_type) {
1798 case SMU_MCLK:
1799 case SMU_UCLK:
1800 *value = smu->smu_table.boot_values.uclk;
1801 break;
1802 case SMU_FCLK:
1803 *value = smu->smu_table.boot_values.fclk;
1804 break;
1805 case SMU_GFXCLK:
1806 case SMU_SCLK:
1807 *value = smu->smu_table.boot_values.gfxclk;
1808 break;
1809 case SMU_SOCCLK:
1810 *value = smu->smu_table.boot_values.socclk;
1811 break;
1812 case SMU_VCLK:
1813 *value = smu->smu_table.boot_values.vclk;
1814 break;
1815 case SMU_DCLK:
1816 *value = smu->smu_table.boot_values.dclk;
1817 break;
1818 default:
1819 ret = -EINVAL;
1820 break;
1821 }
1822 return ret;
1823 }
1824
smu_v13_0_get_dpm_freq_by_index(struct smu_context * smu,enum smu_clk_type clk_type,uint16_t level,uint32_t * value)1825 int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
1826 enum smu_clk_type clk_type, uint16_t level,
1827 uint32_t *value)
1828 {
1829 int ret = 0, clk_id = 0;
1830 uint32_t param;
1831
1832 if (!value)
1833 return -EINVAL;
1834
1835 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1836 return smu_v13_0_get_boot_freq_by_index(smu, clk_type, value);
1837
1838 clk_id = smu_cmn_to_asic_specific_index(smu,
1839 CMN2ASIC_MAPPING_CLK,
1840 clk_type);
1841 if (clk_id < 0)
1842 return clk_id;
1843
1844 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1845
1846 ret = smu_cmn_send_smc_msg_with_param(smu,
1847 SMU_MSG_GetDpmFreqByIndex,
1848 param,
1849 value);
1850 if (ret)
1851 return ret;
1852
1853 *value = *value & 0x7fffffff;
1854
1855 return ret;
1856 }
1857
smu_v13_0_get_dpm_level_count(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)1858 static int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
1859 enum smu_clk_type clk_type,
1860 uint32_t *value)
1861 {
1862 int ret;
1863
1864 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
1865 /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */
1866 if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value))
1867 ++(*value);
1868
1869 return ret;
1870 }
1871
smu_v13_0_get_fine_grained_status(struct smu_context * smu,enum smu_clk_type clk_type,bool * is_fine_grained_dpm)1872 static int smu_v13_0_get_fine_grained_status(struct smu_context *smu,
1873 enum smu_clk_type clk_type,
1874 bool *is_fine_grained_dpm)
1875 {
1876 int ret = 0, clk_id = 0;
1877 uint32_t param;
1878 uint32_t value;
1879
1880 if (!is_fine_grained_dpm)
1881 return -EINVAL;
1882
1883 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1884 return 0;
1885
1886 clk_id = smu_cmn_to_asic_specific_index(smu,
1887 CMN2ASIC_MAPPING_CLK,
1888 clk_type);
1889 if (clk_id < 0)
1890 return clk_id;
1891
1892 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
1893
1894 ret = smu_cmn_send_smc_msg_with_param(smu,
1895 SMU_MSG_GetDpmFreqByIndex,
1896 param,
1897 &value);
1898 if (ret)
1899 return ret;
1900
1901 /*
1902 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM
1903 * now, we un-support it
1904 */
1905 *is_fine_grained_dpm = value & 0x80000000;
1906
1907 return 0;
1908 }
1909
smu_v13_0_set_single_dpm_table(struct smu_context * smu,enum smu_clk_type clk_type,struct smu_13_0_dpm_table * single_dpm_table)1910 int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
1911 enum smu_clk_type clk_type,
1912 struct smu_13_0_dpm_table *single_dpm_table)
1913 {
1914 int ret = 0;
1915 uint32_t clk;
1916 int i;
1917
1918 ret = smu_v13_0_get_dpm_level_count(smu,
1919 clk_type,
1920 &single_dpm_table->count);
1921 if (ret) {
1922 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
1923 return ret;
1924 }
1925
1926 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) {
1927 ret = smu_v13_0_get_fine_grained_status(smu,
1928 clk_type,
1929 &single_dpm_table->is_fine_grained);
1930 if (ret) {
1931 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
1932 return ret;
1933 }
1934 }
1935
1936 for (i = 0; i < single_dpm_table->count; i++) {
1937 ret = smu_v13_0_get_dpm_freq_by_index(smu,
1938 clk_type,
1939 i,
1940 &clk);
1941 if (ret) {
1942 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
1943 return ret;
1944 }
1945
1946 single_dpm_table->dpm_levels[i].value = clk;
1947 single_dpm_table->dpm_levels[i].enabled = true;
1948
1949 if (i == 0)
1950 single_dpm_table->min = clk;
1951 else if (i == single_dpm_table->count - 1)
1952 single_dpm_table->max = clk;
1953 }
1954
1955 return 0;
1956 }
1957
smu_v13_0_get_current_pcie_link_width_level(struct smu_context * smu)1958 int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu)
1959 {
1960 struct amdgpu_device *adev = smu->adev;
1961
1962 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
1963 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
1964 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
1965 }
1966
smu_v13_0_get_current_pcie_link_width(struct smu_context * smu)1967 int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu)
1968 {
1969 uint32_t width_level;
1970
1971 width_level = smu_v13_0_get_current_pcie_link_width_level(smu);
1972 if (width_level > LINK_WIDTH_MAX)
1973 width_level = 0;
1974
1975 return link_width[width_level];
1976 }
1977
smu_v13_0_get_current_pcie_link_speed_level(struct smu_context * smu)1978 int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu)
1979 {
1980 struct amdgpu_device *adev = smu->adev;
1981
1982 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
1983 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
1984 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
1985 }
1986
smu_v13_0_get_current_pcie_link_speed(struct smu_context * smu)1987 int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu)
1988 {
1989 uint32_t speed_level;
1990
1991 speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu);
1992 if (speed_level > LINK_SPEED_MAX)
1993 speed_level = 0;
1994
1995 return link_speed[speed_level];
1996 }
1997
smu_v13_0_set_vcn_enable(struct smu_context * smu,bool enable,int inst)1998 int smu_v13_0_set_vcn_enable(struct smu_context *smu,
1999 bool enable,
2000 int inst)
2001 {
2002 struct amdgpu_device *adev = smu->adev;
2003 int ret = 0;
2004
2005 if (adev->vcn.harvest_config & (1 << inst))
2006 return ret;
2007
2008 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
2009 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
2010 inst << 16U, NULL);
2011
2012 return ret;
2013 }
2014
smu_v13_0_set_jpeg_enable(struct smu_context * smu,bool enable)2015 int smu_v13_0_set_jpeg_enable(struct smu_context *smu,
2016 bool enable)
2017 {
2018 return smu_cmn_send_smc_msg_with_param(smu, enable ?
2019 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
2020 0, NULL);
2021 }
2022
smu_v13_0_run_btc(struct smu_context * smu)2023 int smu_v13_0_run_btc(struct smu_context *smu)
2024 {
2025 int res;
2026
2027 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
2028 if (res)
2029 dev_err(smu->adev->dev, "RunDcBtc failed!\n");
2030
2031 return res;
2032 }
2033
smu_v13_0_gpo_control(struct smu_context * smu,bool enablement)2034 int smu_v13_0_gpo_control(struct smu_context *smu,
2035 bool enablement)
2036 {
2037 int res;
2038
2039 res = smu_cmn_send_smc_msg_with_param(smu,
2040 SMU_MSG_AllowGpo,
2041 enablement ? 1 : 0,
2042 NULL);
2043 if (res)
2044 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
2045
2046 return res;
2047 }
2048
smu_v13_0_deep_sleep_control(struct smu_context * smu,bool enablement)2049 int smu_v13_0_deep_sleep_control(struct smu_context *smu,
2050 bool enablement)
2051 {
2052 struct amdgpu_device *adev = smu->adev;
2053 int ret = 0;
2054
2055 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2056 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2057 if (ret) {
2058 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
2059 return ret;
2060 }
2061 }
2062
2063 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
2064 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
2065 if (ret) {
2066 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
2067 return ret;
2068 }
2069 }
2070
2071 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
2072 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
2073 if (ret) {
2074 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
2075 return ret;
2076 }
2077 }
2078
2079 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2080 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2081 if (ret) {
2082 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
2083 return ret;
2084 }
2085 }
2086
2087 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2088 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2089 if (ret) {
2090 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
2091 return ret;
2092 }
2093 }
2094
2095 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
2096 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
2097 if (ret) {
2098 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
2099 return ret;
2100 }
2101 }
2102
2103 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
2104 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
2105 if (ret) {
2106 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
2107 return ret;
2108 }
2109 }
2110
2111 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
2112 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
2113 if (ret) {
2114 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
2115 return ret;
2116 }
2117 }
2118
2119 return ret;
2120 }
2121
smu_v13_0_gfx_ulv_control(struct smu_context * smu,bool enablement)2122 int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
2123 bool enablement)
2124 {
2125 int ret = 0;
2126
2127 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2128 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2129
2130 return ret;
2131 }
2132
smu_v13_0_baco_set_armd3_sequence(struct smu_context * smu,enum smu_baco_seq baco_seq)2133 static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
2134 enum smu_baco_seq baco_seq)
2135 {
2136 struct smu_baco_context *smu_baco = &smu->smu_baco;
2137 int ret;
2138
2139 ret = smu_cmn_send_smc_msg_with_param(smu,
2140 SMU_MSG_ArmD3,
2141 baco_seq,
2142 NULL);
2143 if (ret)
2144 return ret;
2145
2146 if (baco_seq == BACO_SEQ_BAMACO ||
2147 baco_seq == BACO_SEQ_BACO)
2148 smu_baco->state = SMU_BACO_STATE_ENTER;
2149 else
2150 smu_baco->state = SMU_BACO_STATE_EXIT;
2151
2152 return 0;
2153 }
2154
smu_v13_0_baco_get_state(struct smu_context * smu)2155 static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
2156 {
2157 struct smu_baco_context *smu_baco = &smu->smu_baco;
2158
2159 return smu_baco->state;
2160 }
2161
smu_v13_0_baco_set_state(struct smu_context * smu,enum smu_baco_state state)2162 static int smu_v13_0_baco_set_state(struct smu_context *smu,
2163 enum smu_baco_state state)
2164 {
2165 struct smu_baco_context *smu_baco = &smu->smu_baco;
2166 struct amdgpu_device *adev = smu->adev;
2167 int ret = 0;
2168
2169 if (smu_v13_0_baco_get_state(smu) == state)
2170 return 0;
2171
2172 if (state == SMU_BACO_STATE_ENTER) {
2173 ret = smu_cmn_send_smc_msg_with_param(smu,
2174 SMU_MSG_EnterBaco,
2175 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
2176 BACO_SEQ_BAMACO : BACO_SEQ_BACO,
2177 NULL);
2178 } else {
2179 ret = smu_cmn_send_smc_msg(smu,
2180 SMU_MSG_ExitBaco,
2181 NULL);
2182 if (ret)
2183 return ret;
2184
2185 /* clear vbios scratch 6 and 7 for coming asic reinit */
2186 WREG32(adev->bios_scratch_reg_offset + 6, 0);
2187 WREG32(adev->bios_scratch_reg_offset + 7, 0);
2188 }
2189
2190 if (!ret)
2191 smu_baco->state = state;
2192
2193 return ret;
2194 }
2195
smu_v13_0_get_bamaco_support(struct smu_context * smu)2196 int smu_v13_0_get_bamaco_support(struct smu_context *smu)
2197 {
2198 struct smu_baco_context *smu_baco = &smu->smu_baco;
2199 int bamaco_support = 0;
2200
2201 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
2202 return 0;
2203
2204 if (smu_baco->maco_support)
2205 bamaco_support |= MACO_SUPPORT;
2206
2207 /* return true if ASIC is in BACO state already */
2208 if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
2209 return bamaco_support |= BACO_SUPPORT;
2210
2211 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
2212 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
2213 return 0;
2214
2215 return (bamaco_support |= BACO_SUPPORT);
2216 }
2217
smu_v13_0_baco_enter(struct smu_context * smu)2218 int smu_v13_0_baco_enter(struct smu_context *smu)
2219 {
2220 struct amdgpu_device *adev = smu->adev;
2221 int ret;
2222
2223 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
2224 return smu_v13_0_baco_set_armd3_sequence(smu,
2225 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
2226 BACO_SEQ_BAMACO : BACO_SEQ_BACO);
2227 } else {
2228 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
2229 if (!ret)
2230 usleep_range(10000, 11000);
2231
2232 return ret;
2233 }
2234 }
2235
smu_v13_0_baco_exit(struct smu_context * smu)2236 int smu_v13_0_baco_exit(struct smu_context *smu)
2237 {
2238 struct amdgpu_device *adev = smu->adev;
2239 int ret;
2240
2241 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
2242 /* Wait for PMFW handling for the Dstate change */
2243 usleep_range(10000, 11000);
2244 ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
2245 } else {
2246 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
2247 }
2248
2249 if (!ret)
2250 adev->gfx.is_poweron = false;
2251
2252 return ret;
2253 }
2254
smu_v13_0_set_gfx_power_up_by_imu(struct smu_context * smu)2255 int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
2256 {
2257 uint16_t index;
2258 struct amdgpu_device *adev = smu->adev;
2259
2260 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2261 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
2262 ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
2263 }
2264
2265 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2266 SMU_MSG_EnableGfxImu);
2267 return smu_cmn_send_msg_without_waiting(smu, index,
2268 ENABLE_IMU_ARG_GFXOFF_ENABLE);
2269 }
2270
smu_v13_0_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)2271 int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
2272 enum PP_OD_DPM_TABLE_COMMAND type,
2273 long input[], uint32_t size)
2274 {
2275 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
2276 int ret = 0;
2277
2278 /* Only allowed in manual mode */
2279 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
2280 return -EINVAL;
2281
2282 switch (type) {
2283 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2284 if (size != 2) {
2285 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2286 return -EINVAL;
2287 }
2288
2289 if (input[0] == 0) {
2290 if (input[1] < smu->gfx_default_hard_min_freq) {
2291 dev_warn(smu->adev->dev,
2292 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
2293 input[1], smu->gfx_default_hard_min_freq);
2294 return -EINVAL;
2295 }
2296 smu->gfx_actual_hard_min_freq = input[1];
2297 } else if (input[0] == 1) {
2298 if (input[1] > smu->gfx_default_soft_max_freq) {
2299 dev_warn(smu->adev->dev,
2300 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
2301 input[1], smu->gfx_default_soft_max_freq);
2302 return -EINVAL;
2303 }
2304 smu->gfx_actual_soft_max_freq = input[1];
2305 } else {
2306 return -EINVAL;
2307 }
2308 break;
2309 case PP_OD_RESTORE_DEFAULT_TABLE:
2310 if (size != 0) {
2311 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2312 return -EINVAL;
2313 }
2314 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
2315 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
2316 break;
2317 case PP_OD_COMMIT_DPM_TABLE:
2318 if (size != 0) {
2319 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2320 return -EINVAL;
2321 }
2322 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
2323 dev_err(smu->adev->dev,
2324 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
2325 smu->gfx_actual_hard_min_freq,
2326 smu->gfx_actual_soft_max_freq);
2327 return -EINVAL;
2328 }
2329
2330 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
2331 smu->gfx_actual_hard_min_freq,
2332 NULL);
2333 if (ret) {
2334 dev_err(smu->adev->dev, "Set hard min sclk failed!");
2335 return ret;
2336 }
2337
2338 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
2339 smu->gfx_actual_soft_max_freq,
2340 NULL);
2341 if (ret) {
2342 dev_err(smu->adev->dev, "Set soft max sclk failed!");
2343 return ret;
2344 }
2345 break;
2346 default:
2347 return -ENOSYS;
2348 }
2349
2350 return ret;
2351 }
2352
smu_v13_0_set_default_dpm_tables(struct smu_context * smu)2353 int smu_v13_0_set_default_dpm_tables(struct smu_context *smu)
2354 {
2355 struct smu_table_context *smu_table = &smu->smu_table;
2356
2357 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
2358 smu_table->clocks_table, false);
2359 }
2360
smu_v13_0_set_smu_mailbox_registers(struct smu_context * smu)2361 void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
2362 {
2363 struct amdgpu_device *adev = smu->adev;
2364
2365 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
2366 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
2367 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
2368 }
2369
smu_v13_0_mode1_reset(struct smu_context * smu)2370 int smu_v13_0_mode1_reset(struct smu_context *smu)
2371 {
2372 int ret = 0;
2373
2374 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
2375 if (!ret)
2376 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
2377
2378 return ret;
2379 }
2380
smu_v13_0_update_pcie_parameters(struct smu_context * smu,uint8_t pcie_gen_cap,uint8_t pcie_width_cap)2381 int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
2382 uint8_t pcie_gen_cap,
2383 uint8_t pcie_width_cap)
2384 {
2385 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
2386 struct smu_13_0_pcie_table *pcie_table =
2387 &dpm_context->dpm_tables.pcie_table;
2388 int num_of_levels = pcie_table->num_of_link_levels;
2389 uint32_t smu_pcie_arg;
2390 int ret = 0;
2391 int i;
2392
2393 if (!num_of_levels)
2394 return 0;
2395
2396 if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
2397 if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
2398 pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
2399
2400 if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
2401 pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
2402
2403 /* Force all levels to use the same settings */
2404 for (i = 0; i < num_of_levels; i++) {
2405 pcie_table->pcie_gen[i] = pcie_gen_cap;
2406 pcie_table->pcie_lane[i] = pcie_width_cap;
2407 smu_pcie_arg = i << 16;
2408 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
2409 smu_pcie_arg |= pcie_table->pcie_lane[i];
2410
2411 ret = smu_cmn_send_smc_msg_with_param(smu,
2412 SMU_MSG_OverridePcieParameters,
2413 smu_pcie_arg,
2414 NULL);
2415 if (ret)
2416 break;
2417 }
2418 } else {
2419 for (i = 0; i < num_of_levels; i++) {
2420 if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
2421 pcie_table->pcie_lane[i] > pcie_width_cap) {
2422 pcie_table->pcie_gen[i] = pcie_gen_cap;
2423 pcie_table->pcie_lane[i] = pcie_width_cap;
2424 smu_pcie_arg = i << 16;
2425 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
2426 smu_pcie_arg |= pcie_table->pcie_lane[i];
2427
2428 ret = smu_cmn_send_smc_msg_with_param(smu,
2429 SMU_MSG_OverridePcieParameters,
2430 smu_pcie_arg,
2431 NULL);
2432 if (ret)
2433 break;
2434 }
2435 }
2436 }
2437
2438 return ret;
2439 }
2440
smu_v13_0_disable_pmfw_state(struct smu_context * smu)2441 int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
2442 {
2443 int ret;
2444 struct amdgpu_device *adev = smu->adev;
2445
2446 WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0);
2447
2448 ret = RREG32_PCIE(MP1_Public |
2449 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
2450
2451 return ret == 0 ? 0 : -EINVAL;
2452 }
2453
smu_v13_0_enable_uclk_shadow(struct smu_context * smu,bool enable)2454 int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable)
2455 {
2456 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL);
2457 }
2458
smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context * smu,struct freq_band_range * exclusion_ranges)2459 int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
2460 struct freq_band_range *exclusion_ranges)
2461 {
2462 WifiBandEntryTable_t wifi_bands;
2463 int valid_entries = 0;
2464 int ret, i;
2465
2466 memset(&wifi_bands, 0, sizeof(wifi_bands));
2467 for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) {
2468 if (!exclusion_ranges[i].start && !exclusion_ranges[i].end)
2469 break;
2470
2471 /* PMFW expects the inputs to be in Mhz unit */
2472 wifi_bands.WifiBandEntry[valid_entries].LowFreq =
2473 DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ);
2474 wifi_bands.WifiBandEntry[valid_entries++].HighFreq =
2475 DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ);
2476 }
2477 wifi_bands.WifiBandEntryNum = valid_entries;
2478
2479 /*
2480 * Per confirm with PMFW team, WifiBandEntryNum = 0
2481 * is a valid setting.
2482 *
2483 * Considering the scenarios below:
2484 * - At first the wifi device adds an exclusion range e.g. (2400,2500) to
2485 * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1
2486 * and pass the WifiBandEntry (2400, 2500) to PMFW.
2487 *
2488 * - Later the wifi device removes the wifiband list added above and
2489 * our driver gets notified again. At this time, driver will set
2490 * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW.
2491 *
2492 * - PMFW may still need to do some uclk shadow update(e.g. switching
2493 * from shadow clock back to primary clock) on receiving this.
2494 */
2495 ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true);
2496 if (ret)
2497 dev_warn(smu->adev->dev, "Failed to set wifiband!");
2498
2499 return ret;
2500 }
2501
smu_v13_0_reset_custom_level(struct smu_context * smu)2502 void smu_v13_0_reset_custom_level(struct smu_context *smu)
2503 {
2504 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
2505
2506 pstate_table->uclk_pstate.custom.min = 0;
2507 pstate_table->uclk_pstate.custom.max = 0;
2508 pstate_table->gfxclk_pstate.custom.min = 0;
2509 pstate_table->gfxclk_pstate.custom.max = 0;
2510 }
2511