1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L4
24
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_cmn.h"
28 #include "soc15_common.h"
29
30 /*
31 * DO NOT use these for err/warn/info/debug messages.
32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33 * They are more MGPU friendly.
34 */
35 #undef pr_err
36 #undef pr_warn
37 #undef pr_info
38 #undef pr_debug
39
40 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
41
42 const int link_speed[] = {25, 50, 80, 160, 320, 640};
43
44 #undef __SMU_DUMMY_MAP
45 #define __SMU_DUMMY_MAP(type) #type
46 static const char * const __smu_message_names[] = {
47 SMU_MESSAGE_TYPES
48 };
49
50 #define smu_cmn_call_asic_func(intf, smu, args...) \
51 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
52 (smu)->ppt_funcs->intf(smu, ##args) : \
53 -ENOTSUPP) : \
54 -EINVAL)
55
56 #define SMU_MSG_V1_DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
57 #define SMU_MSG_V1_DEFAULT_RATELIMIT_BURST 10
58
smu_get_message_name(struct smu_context * smu,enum smu_message_type type)59 static const char *smu_get_message_name(struct smu_context *smu,
60 enum smu_message_type type)
61 {
62 if (type >= SMU_MSG_MAX_COUNT)
63 return "unknown smu message";
64
65 return __smu_message_names[type];
66 }
67
68 /* Redefine the SMU error codes here.
69 *
70 * Note that these definitions are redundant and should be removed
71 * when the SMU has exported a unified header file containing these
72 * macros, which header file we can just include and use the SMU's
73 * macros. At the moment, these error codes are defined by the SMU
74 * per-ASIC unfortunately, yet we're a one driver for all ASICs.
75 */
76 #define SMU_RESP_NONE 0
77 #define SMU_RESP_OK 1
78 #define SMU_RESP_CMD_FAIL 0xFF
79 #define SMU_RESP_CMD_UNKNOWN 0xFE
80 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
81 #define SMU_RESP_BUSY_OTHER 0xFC
82 #define SMU_RESP_DEBUG_END 0xFB
83
84 #define SMU_RESP_UNEXP (~0U)
85
smu_msg_v1_send_debug_msg(struct smu_msg_ctl * ctl,u32 msg,u32 param)86 static int smu_msg_v1_send_debug_msg(struct smu_msg_ctl *ctl, u32 msg, u32 param)
87 {
88 struct amdgpu_device *adev = ctl->smu->adev;
89 struct smu_msg_config *cfg = &ctl->config;
90
91 if (!(ctl->flags & SMU_MSG_CTL_DEBUG_MAILBOX))
92 return -EOPNOTSUPP;
93
94 mutex_lock(&ctl->lock);
95
96 WREG32(cfg->debug_param_reg, param);
97 WREG32(cfg->debug_msg_reg, msg);
98 WREG32(cfg->debug_resp_reg, 0);
99
100 mutex_unlock(&ctl->lock);
101
102 return 0;
103 }
104
__smu_cmn_send_debug_msg(struct smu_msg_ctl * ctl,u32 msg,u32 param)105 static int __smu_cmn_send_debug_msg(struct smu_msg_ctl *ctl,
106 u32 msg,
107 u32 param)
108 {
109 if (!ctl->ops || !ctl->ops->send_debug_msg)
110 return -EOPNOTSUPP;
111
112 return ctl->ops->send_debug_msg(ctl, msg, param);
113 }
114
115 /**
116 * smu_cmn_wait_for_response -- wait for response from the SMU
117 * @smu: pointer to an SMU context
118 *
119 * Wait for status from the SMU.
120 *
121 * Return 0 on success, -errno on error, indicating the execution
122 * status and result of the message being waited for. See
123 * smu_msg_v1_decode_response() for details of the -errno.
124 */
smu_cmn_wait_for_response(struct smu_context * smu)125 int smu_cmn_wait_for_response(struct smu_context *smu)
126 {
127 return smu_msg_wait_response(&smu->msg_ctl, 0);
128 }
129
130 /**
131 * smu_cmn_send_smc_msg_with_param -- send a message with parameter
132 * @smu: pointer to an SMU context
133 * @msg: message to send
134 * @param: parameter to send to the SMU
135 * @read_arg: pointer to u32 to return a value from the SMU back
136 * to the caller
137 *
138 * Send the message @msg with parameter @param to the SMU, wait for
139 * completion of the command, and return back a value from the SMU in
140 * @read_arg pointer.
141 *
142 * Return 0 on success, -errno when a problem is encountered sending
143 * message or receiving reply. If there is a PCI bus recovery or
144 * the destination is a virtual GPU which does not allow this message
145 * type, the message is simply dropped and success is also returned.
146 * See smu_msg_v1_decode_response() for details of the -errno.
147 *
148 * If we weren't able to send the message to the SMU, we also print
149 * the error to the standard log.
150 *
151 * Command completion status is printed only if the -errno is
152 * -EREMOTEIO, indicating that the SMU returned back an
153 * undefined/unknown/unspecified result. All other cases are
154 * well-defined, not printed, but instead given back to the client to
155 * decide what further to do.
156 *
157 * The return value, @read_arg is read back regardless, to give back
158 * more information to the client, which on error would most likely be
159 * @param, but we can't assume that. This also eliminates more
160 * conditionals.
161 */
smu_cmn_send_smc_msg_with_param(struct smu_context * smu,enum smu_message_type msg,uint32_t param,uint32_t * read_arg)162 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
163 enum smu_message_type msg,
164 uint32_t param,
165 uint32_t *read_arg)
166 {
167 struct smu_msg_ctl *ctl = &smu->msg_ctl;
168 struct smu_msg_args args = {
169 .msg = msg,
170 .args[0] = param,
171 .num_args = 1,
172 .num_out_args = read_arg ? 1 : 0,
173 .flags = 0,
174 .timeout = 0,
175 };
176 int ret;
177
178 ret = ctl->ops->send_msg(ctl, &args);
179
180 if (read_arg)
181 *read_arg = args.out_args[0];
182
183 return ret;
184 }
185
smu_cmn_send_smc_msg(struct smu_context * smu,enum smu_message_type msg,uint32_t * read_arg)186 int smu_cmn_send_smc_msg(struct smu_context *smu,
187 enum smu_message_type msg,
188 uint32_t *read_arg)
189 {
190 return smu_cmn_send_smc_msg_with_param(smu,
191 msg,
192 0,
193 read_arg);
194 }
195
smu_cmn_send_debug_smc_msg(struct smu_context * smu,uint32_t msg)196 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
197 uint32_t msg)
198 {
199 return __smu_cmn_send_debug_msg(&smu->msg_ctl, msg, 0);
200 }
201
smu_cmn_send_debug_smc_msg_with_param(struct smu_context * smu,uint32_t msg,uint32_t param)202 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
203 uint32_t msg, uint32_t param)
204 {
205 return __smu_cmn_send_debug_msg(&smu->msg_ctl, msg, param);
206 }
207
smu_msg_v1_decode_response(u32 resp)208 static int smu_msg_v1_decode_response(u32 resp)
209 {
210 int res;
211
212 switch (resp) {
213 case SMU_RESP_NONE:
214 /* The SMU is busy--still executing your command.
215 */
216 res = -ETIME;
217 break;
218 case SMU_RESP_OK:
219 res = 0;
220 break;
221 case SMU_RESP_CMD_FAIL:
222 /* Command completed successfully, but the command
223 * status was failure.
224 */
225 res = -EIO;
226 break;
227 case SMU_RESP_CMD_UNKNOWN:
228 /* Unknown command--ignored by the SMU.
229 */
230 res = -EOPNOTSUPP;
231 break;
232 case SMU_RESP_CMD_BAD_PREREQ:
233 /* Valid command--bad prerequisites.
234 */
235 res = -EINVAL;
236 break;
237 case SMU_RESP_BUSY_OTHER:
238 /* The SMU is busy with other commands. The client
239 * should retry in 10 us.
240 */
241 res = -EBUSY;
242 break;
243 default:
244 /* Unknown or debug response from the SMU.
245 */
246 res = -EREMOTEIO;
247 break;
248 }
249
250 return res;
251 }
252
__smu_msg_v1_poll_stat(struct smu_msg_ctl * ctl,u32 timeout_us)253 static u32 __smu_msg_v1_poll_stat(struct smu_msg_ctl *ctl, u32 timeout_us)
254 {
255 struct amdgpu_device *adev = ctl->smu->adev;
256 struct smu_msg_config *cfg = &ctl->config;
257 u32 timeout = timeout_us ? timeout_us : ctl->default_timeout;
258 u32 reg;
259
260 for (; timeout > 0; timeout--) {
261 reg = RREG32(cfg->resp_reg);
262 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
263 break;
264 udelay(1);
265 }
266
267 return reg;
268 }
269
__smu_msg_v1_send(struct smu_msg_ctl * ctl,u16 index,struct smu_msg_args * args)270 static void __smu_msg_v1_send(struct smu_msg_ctl *ctl, u16 index,
271 struct smu_msg_args *args)
272 {
273 struct amdgpu_device *adev = ctl->smu->adev;
274 struct smu_msg_config *cfg = &ctl->config;
275 int i;
276
277 WREG32(cfg->resp_reg, 0);
278 for (i = 0; i < args->num_args; i++)
279 WREG32(cfg->arg_regs[i], args->args[i]);
280 WREG32(cfg->msg_reg, index);
281 }
282
__smu_msg_v1_read_out_args(struct smu_msg_ctl * ctl,struct smu_msg_args * args)283 static void __smu_msg_v1_read_out_args(struct smu_msg_ctl *ctl,
284 struct smu_msg_args *args)
285 {
286 struct amdgpu_device *adev = ctl->smu->adev;
287 int i;
288
289 for (i = 0; i < args->num_out_args; i++)
290 args->out_args[i] = RREG32(ctl->config.arg_regs[i]);
291 }
292
__smu_msg_v1_print_err_limited(struct smu_msg_ctl * ctl,struct smu_msg_args * args,char * err_msg)293 static void __smu_msg_v1_print_err_limited(struct smu_msg_ctl *ctl,
294 struct smu_msg_args *args,
295 char *err_msg)
296 {
297 static DEFINE_RATELIMIT_STATE(_rs,
298 SMU_MSG_V1_DEFAULT_RATELIMIT_INTERVAL,
299 SMU_MSG_V1_DEFAULT_RATELIMIT_BURST);
300 struct smu_context *smu = ctl->smu;
301 struct amdgpu_device *adev = smu->adev;
302
303 if (__ratelimit(&_rs)) {
304 u32 in[SMU_MSG_MAX_ARGS];
305 int i;
306
307 dev_err(adev->dev, "%s msg_reg: %x resp_reg: %x", err_msg,
308 RREG32(ctl->config.msg_reg),
309 RREG32(ctl->config.resp_reg));
310 if (args->num_args > 0) {
311 for (i = 0; i < args->num_args; i++)
312 in[i] = RREG32(ctl->config.arg_regs[i]);
313 print_hex_dump(KERN_ERR, "in params:", DUMP_PREFIX_NONE,
314 16, 4, in, args->num_args * sizeof(u32),
315 false);
316 }
317 }
318 }
319
__smu_msg_v1_print_error(struct smu_msg_ctl * ctl,u32 resp,struct smu_msg_args * args)320 static void __smu_msg_v1_print_error(struct smu_msg_ctl *ctl,
321 u32 resp,
322 struct smu_msg_args *args)
323 {
324 struct smu_context *smu = ctl->smu;
325 struct amdgpu_device *adev = smu->adev;
326 int index = ctl->message_map[args->msg].map_to;
327
328 switch (resp) {
329 case SMU_RESP_NONE:
330 __smu_msg_v1_print_err_limited(ctl, args, "SMU: No response");
331 break;
332 case SMU_RESP_OK:
333 break;
334 case SMU_RESP_CMD_FAIL:
335 break;
336 case SMU_RESP_CMD_UNKNOWN:
337 __smu_msg_v1_print_err_limited(ctl, args,
338 "SMU: unknown command");
339 break;
340 case SMU_RESP_CMD_BAD_PREREQ:
341 __smu_msg_v1_print_err_limited(
342 ctl, args, "SMU: valid command, bad prerequisites");
343 break;
344 case SMU_RESP_BUSY_OTHER:
345 if (args->msg != SMU_MSG_GetBadPageCount)
346 __smu_msg_v1_print_err_limited(ctl, args,
347 "SMU: I'm very busy");
348 break;
349 case SMU_RESP_DEBUG_END:
350 __smu_msg_v1_print_err_limited(ctl, args, "SMU: Debug Err");
351 break;
352 case SMU_RESP_UNEXP:
353 if (amdgpu_device_bus_status_check(adev)) {
354 dev_err(adev->dev,
355 "SMU: bus error for message: %s(%d) response:0x%08X ",
356 smu_get_message_name(smu, args->msg), index,
357 resp);
358 if (args->num_args > 0)
359 print_hex_dump(KERN_ERR,
360 "in params:", DUMP_PREFIX_NONE,
361 16, 4, args->args,
362 args->num_args * sizeof(u32),
363 false);
364 }
365 break;
366 default:
367 __smu_msg_v1_print_err_limited(ctl, args,
368 "SMU: unknown response");
369 break;
370 }
371 }
372
__smu_msg_v1_ras_filter(struct smu_msg_ctl * ctl,enum smu_message_type msg,u32 msg_flags,bool * skip_pre_poll)373 static int __smu_msg_v1_ras_filter(struct smu_msg_ctl *ctl,
374 enum smu_message_type msg, u32 msg_flags,
375 bool *skip_pre_poll)
376 {
377 struct smu_context *smu = ctl->smu;
378 struct amdgpu_device *adev = smu->adev;
379 bool fed_status;
380 u32 reg;
381
382 if (!(smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI))
383 return 0;
384
385 fed_status = amdgpu_ras_get_fed_status(adev);
386
387 /* Block non-RAS-priority messages during RAS error */
388 if (fed_status && !(msg_flags & SMU_MSG_RAS_PRI)) {
389 dev_dbg(adev->dev, "RAS error detected, skip sending %s",
390 smu_get_message_name(smu, msg));
391 return -EACCES;
392 }
393
394 /* Skip pre-poll for priority messages or during RAS error */
395 if ((msg_flags & SMU_MSG_NO_PRECHECK) || fed_status) {
396 reg = RREG32(ctl->config.resp_reg);
397 dev_dbg(adev->dev,
398 "Sending priority message %s response status: %x",
399 smu_get_message_name(smu, msg), reg);
400 if (reg == 0)
401 *skip_pre_poll = true;
402 }
403
404 return 0;
405 }
406
407 /**
408 * smu_msg_proto_v1_send_msg - Complete V1 protocol with all filtering
409 * @ctl: Message control block
410 * @args: Message arguments
411 *
412 * Return: 0 on success, negative errno on failure
413 */
smu_msg_v1_send_msg(struct smu_msg_ctl * ctl,struct smu_msg_args * args)414 static int smu_msg_v1_send_msg(struct smu_msg_ctl *ctl,
415 struct smu_msg_args *args)
416 {
417 struct smu_context *smu = ctl->smu;
418 struct amdgpu_device *adev = smu->adev;
419 const struct cmn2asic_msg_mapping *mapping;
420 u32 reg, msg_flags;
421 int ret, index;
422 bool skip_pre_poll = false;
423 bool lock_held = args->flags & SMU_MSG_FLAG_LOCK_HELD;
424
425 /* Early exit if no HW access */
426 if (adev->no_hw_access)
427 return 0;
428
429 /* Message index translation */
430 if (args->msg >= SMU_MSG_MAX_COUNT || !ctl->message_map)
431 return -EINVAL;
432
433 if (args->num_args > ctl->config.num_arg_regs ||
434 args->num_out_args > ctl->config.num_arg_regs)
435 return -EINVAL;
436
437 mapping = &ctl->message_map[args->msg];
438 if (!mapping->valid_mapping)
439 return -EINVAL;
440
441 msg_flags = mapping->flags;
442 index = mapping->map_to;
443
444 /* VF filter - skip messages not valid for VF */
445 if (amdgpu_sriov_vf(adev) && !(msg_flags & SMU_MSG_VF_FLAG))
446 return 0;
447
448 if (!lock_held)
449 mutex_lock(&ctl->lock);
450
451 /* RAS priority filter */
452 ret = __smu_msg_v1_ras_filter(ctl, args->msg, msg_flags,
453 &skip_pre_poll);
454 if (ret)
455 goto out;
456
457 /* FW state checks */
458 if (smu->smc_fw_state == SMU_FW_HANG) {
459 dev_err(adev->dev,
460 "SMU is in hanged state, failed to send smu message!\n");
461 ret = -EREMOTEIO;
462 goto out;
463 } else if (smu->smc_fw_state == SMU_FW_INIT) {
464 skip_pre_poll = true;
465 smu->smc_fw_state = SMU_FW_RUNTIME;
466 }
467
468 /* Pre-poll: ensure previous message completed */
469 if (!skip_pre_poll) {
470 reg = __smu_msg_v1_poll_stat(ctl, args->timeout);
471 ret = smu_msg_v1_decode_response(reg);
472 if (reg == SMU_RESP_NONE || ret == -EREMOTEIO) {
473 __smu_msg_v1_print_error(ctl, reg, args);
474 goto out;
475 }
476 }
477
478 /* Send message */
479 __smu_msg_v1_send(ctl, (u16)index, args);
480
481 /* Post-poll (skip if ASYNC) */
482 if (args->flags & SMU_MSG_FLAG_ASYNC) {
483 ret = 0;
484 goto out;
485 }
486
487 reg = __smu_msg_v1_poll_stat(ctl, args->timeout);
488 ret = smu_msg_v1_decode_response(reg);
489
490 /* FW state update on fatal error */
491 if (ret == -EREMOTEIO) {
492 smu->smc_fw_state = SMU_FW_HANG;
493 __smu_msg_v1_print_error(ctl, reg, args);
494 } else if (ret != 0) {
495 __smu_msg_v1_print_error(ctl, reg, args);
496 }
497
498 /* Read output args */
499 if (ret == 0 && args->num_out_args > 0) {
500 __smu_msg_v1_read_out_args(ctl, args);
501 dev_dbg(adev->dev, "smu send message: %s(%d) resp : 0x%08x",
502 smu_get_message_name(smu, args->msg), index, reg);
503 if (args->num_args > 0)
504 print_hex_dump_debug("in params:", DUMP_PREFIX_NONE, 16,
505 4, args->args,
506 args->num_args * sizeof(u32),
507 false);
508 print_hex_dump_debug("out params:", DUMP_PREFIX_NONE, 16, 4,
509 args->out_args,
510 args->num_out_args * sizeof(u32), false);
511 } else {
512 dev_dbg(adev->dev, "smu send message: %s(%d), resp: 0x%08x\n",
513 smu_get_message_name(smu, args->msg), index, reg);
514 if (args->num_args > 0)
515 print_hex_dump_debug("in params:", DUMP_PREFIX_NONE, 16,
516 4, args->args,
517 args->num_args * sizeof(u32),
518 false);
519 }
520
521 out:
522 /* Debug halt on error */
523 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
524 ret) {
525 amdgpu_device_halt(adev);
526 WARN_ON(1);
527 }
528
529 if (!lock_held)
530 mutex_unlock(&ctl->lock);
531 return ret;
532 }
533
smu_msg_v1_wait_response(struct smu_msg_ctl * ctl,u32 timeout_us)534 static int smu_msg_v1_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us)
535 {
536 struct smu_context *smu = ctl->smu;
537 struct amdgpu_device *adev = smu->adev;
538 u32 reg;
539 int ret;
540
541 reg = __smu_msg_v1_poll_stat(ctl, timeout_us);
542 ret = smu_msg_v1_decode_response(reg);
543
544 if (ret == -EREMOTEIO)
545 smu->smc_fw_state = SMU_FW_HANG;
546
547 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
548 ret && (ret != -ETIME)) {
549 amdgpu_device_halt(adev);
550 WARN_ON(1);
551 }
552
553 return ret;
554 }
555
556 const struct smu_msg_ops smu_msg_v1_ops = {
557 .send_msg = smu_msg_v1_send_msg,
558 .wait_response = smu_msg_v1_wait_response,
559 .decode_response = smu_msg_v1_decode_response,
560 .send_debug_msg = smu_msg_v1_send_debug_msg,
561 };
562
smu_msg_wait_response(struct smu_msg_ctl * ctl,u32 timeout_us)563 int smu_msg_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us)
564 {
565 return ctl->ops->wait_response(ctl, timeout_us);
566 }
567
568 /**
569 * smu_msg_send_async_locked - Send message asynchronously, caller holds lock
570 * @ctl: Message control block
571 * @msg: Message type
572 * @param: Message parameter
573 *
574 * Send an SMU message without waiting for response. Caller must hold ctl->lock
575 * and call smu_msg_wait_response() later to get the result.
576 *
577 * Return: 0 on success, negative errno on failure
578 */
smu_msg_send_async_locked(struct smu_msg_ctl * ctl,enum smu_message_type msg,u32 param)579 int smu_msg_send_async_locked(struct smu_msg_ctl *ctl,
580 enum smu_message_type msg, u32 param)
581 {
582 struct smu_msg_args args = {
583 .msg = msg,
584 .args[0] = param,
585 .num_args = 1,
586 .num_out_args = 0,
587 .flags = SMU_MSG_FLAG_ASYNC | SMU_MSG_FLAG_LOCK_HELD,
588 .timeout = 0,
589 };
590
591 return ctl->ops->send_msg(ctl, &args);
592 }
593
smu_cmn_to_asic_specific_index(struct smu_context * smu,enum smu_cmn2asic_mapping_type type,uint32_t index)594 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
595 enum smu_cmn2asic_mapping_type type,
596 uint32_t index)
597 {
598 struct cmn2asic_msg_mapping msg_mapping;
599 struct cmn2asic_mapping mapping;
600
601 switch (type) {
602 case CMN2ASIC_MAPPING_MSG:
603 if (index >= SMU_MSG_MAX_COUNT ||
604 !smu->msg_ctl.message_map)
605 return -EINVAL;
606
607 msg_mapping = smu->msg_ctl.message_map[index];
608 if (!msg_mapping.valid_mapping)
609 return -EINVAL;
610
611 if (amdgpu_sriov_vf(smu->adev) &&
612 !(msg_mapping.flags & SMU_MSG_VF_FLAG))
613 return -EACCES;
614
615 return msg_mapping.map_to;
616
617 case CMN2ASIC_MAPPING_CLK:
618 if (index >= SMU_CLK_COUNT ||
619 !smu->clock_map)
620 return -EINVAL;
621
622 mapping = smu->clock_map[index];
623 if (!mapping.valid_mapping)
624 return -EINVAL;
625
626 return mapping.map_to;
627
628 case CMN2ASIC_MAPPING_FEATURE:
629 if (index >= SMU_FEATURE_COUNT ||
630 !smu->feature_map)
631 return -EINVAL;
632
633 mapping = smu->feature_map[index];
634 if (!mapping.valid_mapping)
635 return -EINVAL;
636
637 return mapping.map_to;
638
639 case CMN2ASIC_MAPPING_TABLE:
640 if (index >= SMU_TABLE_COUNT ||
641 !smu->table_map)
642 return -EINVAL;
643
644 mapping = smu->table_map[index];
645 if (!mapping.valid_mapping)
646 return -EINVAL;
647
648 return mapping.map_to;
649
650 case CMN2ASIC_MAPPING_PWR:
651 if (index >= SMU_POWER_SOURCE_COUNT ||
652 !smu->pwr_src_map)
653 return -EINVAL;
654
655 mapping = smu->pwr_src_map[index];
656 if (!mapping.valid_mapping)
657 return -EINVAL;
658
659 return mapping.map_to;
660
661 case CMN2ASIC_MAPPING_WORKLOAD:
662 if (index >= PP_SMC_POWER_PROFILE_COUNT ||
663 !smu->workload_map)
664 return -EINVAL;
665
666 mapping = smu->workload_map[index];
667 if (!mapping.valid_mapping)
668 return -ENOTSUPP;
669
670 return mapping.map_to;
671
672 default:
673 return -EINVAL;
674 }
675 }
676
smu_cmn_feature_is_supported(struct smu_context * smu,enum smu_feature_mask mask)677 int smu_cmn_feature_is_supported(struct smu_context *smu,
678 enum smu_feature_mask mask)
679 {
680 int feature_id;
681
682 feature_id = smu_cmn_to_asic_specific_index(smu,
683 CMN2ASIC_MAPPING_FEATURE,
684 mask);
685 if (feature_id < 0)
686 return 0;
687
688 return smu_feature_list_is_set(smu, SMU_FEATURE_LIST_SUPPORTED,
689 feature_id);
690 }
691
__smu_get_enabled_features(struct smu_context * smu,struct smu_feature_bits * enabled_features)692 static int __smu_get_enabled_features(struct smu_context *smu,
693 struct smu_feature_bits *enabled_features)
694 {
695 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
696 }
697
smu_cmn_feature_is_enabled(struct smu_context * smu,enum smu_feature_mask mask)698 int smu_cmn_feature_is_enabled(struct smu_context *smu,
699 enum smu_feature_mask mask)
700 {
701 struct amdgpu_device *adev = smu->adev;
702 struct smu_feature_bits enabled_features;
703 int feature_id;
704
705 if (__smu_get_enabled_features(smu, &enabled_features)) {
706 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
707 return 0;
708 }
709
710 /*
711 * For Renoir and Cyan Skillfish, they are assumed to have all features
712 * enabled. Also considering they have no feature_map available, the
713 * check here can avoid unwanted feature_map check below.
714 */
715 if (smu_feature_bits_full(&enabled_features,
716 smu->smu_feature.feature_num))
717 return 1;
718
719 feature_id = smu_cmn_to_asic_specific_index(smu,
720 CMN2ASIC_MAPPING_FEATURE,
721 mask);
722 if (feature_id < 0)
723 return 0;
724
725 return smu_feature_bits_is_set(&enabled_features, feature_id);
726 }
727
smu_cmn_clk_dpm_is_enabled(struct smu_context * smu,enum smu_clk_type clk_type)728 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
729 enum smu_clk_type clk_type)
730 {
731 enum smu_feature_mask feature_id = 0;
732
733 switch (clk_type) {
734 case SMU_MCLK:
735 case SMU_UCLK:
736 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
737 break;
738 case SMU_GFXCLK:
739 case SMU_SCLK:
740 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
741 break;
742 case SMU_SOCCLK:
743 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
744 break;
745 case SMU_VCLK:
746 case SMU_VCLK1:
747 feature_id = SMU_FEATURE_DPM_VCLK_BIT;
748 break;
749 case SMU_DCLK:
750 case SMU_DCLK1:
751 feature_id = SMU_FEATURE_DPM_DCLK_BIT;
752 break;
753 case SMU_FCLK:
754 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
755 break;
756 default:
757 return true;
758 }
759
760 if (!smu_cmn_feature_is_enabled(smu, feature_id))
761 return false;
762
763 return true;
764 }
765
smu_cmn_get_enabled_mask(struct smu_context * smu,struct smu_feature_bits * feature_mask)766 int smu_cmn_get_enabled_mask(struct smu_context *smu,
767 struct smu_feature_bits *feature_mask)
768 {
769 uint32_t features[2];
770 int ret = 0, index = 0;
771
772 if (!feature_mask)
773 return -EINVAL;
774
775 index = smu_cmn_to_asic_specific_index(smu,
776 CMN2ASIC_MAPPING_MSG,
777 SMU_MSG_GetEnabledSmuFeatures);
778 if (index > 0) {
779 ret = smu_cmn_send_smc_msg_with_param(
780 smu, SMU_MSG_GetEnabledSmuFeatures, 0, &features[0]);
781 if (ret)
782 return ret;
783
784 ret = smu_cmn_send_smc_msg_with_param(
785 smu, SMU_MSG_GetEnabledSmuFeatures, 1, &features[1]);
786 } else {
787 ret = smu_cmn_send_smc_msg(
788 smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &features[1]);
789 if (ret)
790 return ret;
791
792 ret = smu_cmn_send_smc_msg(
793 smu, SMU_MSG_GetEnabledSmuFeaturesLow, &features[0]);
794 }
795
796 if (!ret)
797 smu_feature_bits_from_arr32(feature_mask, features,
798 SMU_FEATURE_NUM_DEFAULT);
799
800 return ret;
801 }
802
smu_cmn_get_indep_throttler_status(const unsigned long dep_status,const uint8_t * throttler_map)803 uint64_t smu_cmn_get_indep_throttler_status(
804 const unsigned long dep_status,
805 const uint8_t *throttler_map)
806 {
807 uint64_t indep_status = 0;
808 uint8_t dep_bit = 0;
809
810 for_each_set_bit(dep_bit, &dep_status, 32)
811 indep_status |= 1ULL << throttler_map[dep_bit];
812
813 return indep_status;
814 }
815
smu_cmn_feature_update_enable_state(struct smu_context * smu,uint64_t feature_mask,bool enabled)816 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
817 uint64_t feature_mask,
818 bool enabled)
819 {
820 int ret = 0;
821
822 if (enabled) {
823 ret = smu_cmn_send_smc_msg_with_param(smu,
824 SMU_MSG_EnableSmuFeaturesLow,
825 lower_32_bits(feature_mask),
826 NULL);
827 if (ret)
828 return ret;
829 ret = smu_cmn_send_smc_msg_with_param(smu,
830 SMU_MSG_EnableSmuFeaturesHigh,
831 upper_32_bits(feature_mask),
832 NULL);
833 } else {
834 ret = smu_cmn_send_smc_msg_with_param(smu,
835 SMU_MSG_DisableSmuFeaturesLow,
836 lower_32_bits(feature_mask),
837 NULL);
838 if (ret)
839 return ret;
840 ret = smu_cmn_send_smc_msg_with_param(smu,
841 SMU_MSG_DisableSmuFeaturesHigh,
842 upper_32_bits(feature_mask),
843 NULL);
844 }
845
846 return ret;
847 }
848
smu_cmn_feature_set_enabled(struct smu_context * smu,enum smu_feature_mask mask,bool enable)849 int smu_cmn_feature_set_enabled(struct smu_context *smu,
850 enum smu_feature_mask mask,
851 bool enable)
852 {
853 int feature_id;
854
855 feature_id = smu_cmn_to_asic_specific_index(smu,
856 CMN2ASIC_MAPPING_FEATURE,
857 mask);
858 if (feature_id < 0)
859 return -EINVAL;
860
861 return smu_cmn_feature_update_enable_state(smu,
862 1ULL << feature_id,
863 enable);
864 }
865
866 #undef __SMU_DUMMY_MAP
867 #define __SMU_DUMMY_MAP(fea) #fea
868 static const char *__smu_feature_names[] = {
869 SMU_FEATURE_MASKS
870 };
871
smu_get_feature_name(struct smu_context * smu,enum smu_feature_mask feature)872 static const char *smu_get_feature_name(struct smu_context *smu,
873 enum smu_feature_mask feature)
874 {
875 if (feature >= SMU_FEATURE_COUNT)
876 return "unknown smu feature";
877 return __smu_feature_names[feature];
878 }
879
smu_cmn_get_pp_feature_mask(struct smu_context * smu,char * buf)880 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
881 char *buf)
882 {
883 int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
884 struct smu_feature_bits feature_mask;
885 uint32_t features[2];
886 int i, feature_index;
887 uint32_t count = 0;
888 size_t size = 0;
889
890 if (__smu_get_enabled_features(smu, &feature_mask))
891 return 0;
892
893 /* TBD: Need to handle for > 64 bits */
894 smu_feature_bits_to_arr32(&feature_mask, features, 64);
895 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
896 features[1], features[0]);
897
898 memset(sort_feature, -1, sizeof(sort_feature));
899
900 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
901 feature_index = smu_cmn_to_asic_specific_index(smu,
902 CMN2ASIC_MAPPING_FEATURE,
903 i);
904 if (feature_index < 0)
905 continue;
906
907 sort_feature[feature_index] = i;
908 }
909
910 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
911 "No", "Feature", "Bit", "State");
912
913 for (feature_index = 0; feature_index < smu->smu_feature.feature_num;
914 feature_index++) {
915 if (sort_feature[feature_index] < 0)
916 continue;
917
918 size += sysfs_emit_at(
919 buf, size, "%02d. %-20s (%2d) : %s\n", count++,
920 smu_get_feature_name(smu, sort_feature[feature_index]),
921 feature_index,
922 smu_feature_bits_is_set(&feature_mask, feature_index) ?
923 "enabled" :
924 "disabled");
925 }
926
927 return size;
928 }
929
smu_cmn_set_pp_feature_mask(struct smu_context * smu,uint64_t new_mask)930 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
931 uint64_t new_mask)
932 {
933 int ret = 0;
934 struct smu_feature_bits feature_mask;
935 uint64_t feature_mask_u64;
936 uint64_t feature_2_enabled = 0;
937 uint64_t feature_2_disabled = 0;
938
939 ret = __smu_get_enabled_features(smu, &feature_mask);
940 if (ret)
941 return ret;
942
943 feature_mask_u64 = *(uint64_t *)feature_mask.bits;
944 feature_2_enabled = ~feature_mask_u64 & new_mask;
945 feature_2_disabled = feature_mask_u64 & ~new_mask;
946
947 if (feature_2_enabled) {
948 ret = smu_cmn_feature_update_enable_state(smu,
949 feature_2_enabled,
950 true);
951 if (ret)
952 return ret;
953 }
954 if (feature_2_disabled) {
955 ret = smu_cmn_feature_update_enable_state(smu,
956 feature_2_disabled,
957 false);
958 if (ret)
959 return ret;
960 }
961
962 return ret;
963 }
964
965 /**
966 * smu_cmn_disable_all_features_with_exception - disable all dpm features
967 * except this specified by
968 * @mask
969 *
970 * @smu: smu_context pointer
971 * @mask: the dpm feature which should not be disabled
972 * SMU_FEATURE_COUNT: no exception, all dpm features
973 * to disable
974 *
975 * Returns:
976 * 0 on success or a negative error code on failure.
977 */
smu_cmn_disable_all_features_with_exception(struct smu_context * smu,enum smu_feature_mask mask)978 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
979 enum smu_feature_mask mask)
980 {
981 uint64_t features_to_disable = U64_MAX;
982 int skipped_feature_id;
983
984 if (mask != SMU_FEATURE_COUNT) {
985 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
986 CMN2ASIC_MAPPING_FEATURE,
987 mask);
988 if (skipped_feature_id < 0)
989 return -EINVAL;
990
991 features_to_disable &= ~(1ULL << skipped_feature_id);
992 }
993
994 return smu_cmn_feature_update_enable_state(smu,
995 features_to_disable,
996 0);
997 }
998
smu_cmn_get_smc_version(struct smu_context * smu,uint32_t * if_version,uint32_t * smu_version)999 int smu_cmn_get_smc_version(struct smu_context *smu,
1000 uint32_t *if_version,
1001 uint32_t *smu_version)
1002 {
1003 int ret = 0;
1004
1005 if (!if_version && !smu_version)
1006 return -EINVAL;
1007
1008 if (smu->smc_fw_if_version && smu->smc_fw_version)
1009 {
1010 if (if_version)
1011 *if_version = smu->smc_fw_if_version;
1012
1013 if (smu_version)
1014 *smu_version = smu->smc_fw_version;
1015
1016 return 0;
1017 }
1018
1019 if (if_version) {
1020 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
1021 if (ret)
1022 return ret;
1023
1024 smu->smc_fw_if_version = *if_version;
1025 }
1026
1027 if (smu_version) {
1028 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
1029 if (ret)
1030 return ret;
1031
1032 smu->smc_fw_version = *smu_version;
1033 }
1034
1035 return ret;
1036 }
1037
smu_cmn_update_table(struct smu_context * smu,enum smu_table_id table_index,int argument,void * table_data,bool drv2smu)1038 int smu_cmn_update_table(struct smu_context *smu,
1039 enum smu_table_id table_index,
1040 int argument,
1041 void *table_data,
1042 bool drv2smu)
1043 {
1044 struct smu_table_context *smu_table = &smu->smu_table;
1045 struct amdgpu_device *adev = smu->adev;
1046 struct smu_table *table = &smu_table->driver_table;
1047 int table_id = smu_cmn_to_asic_specific_index(smu,
1048 CMN2ASIC_MAPPING_TABLE,
1049 table_index);
1050 uint32_t table_size;
1051 int ret = 0;
1052 if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
1053 return -EINVAL;
1054
1055 table_size = smu_table->tables[table_index].size;
1056
1057 if (drv2smu) {
1058 memcpy(table->cpu_addr, table_data, table_size);
1059 /*
1060 * Flush hdp cache: to guard the content seen by
1061 * GPU is consitent with CPU.
1062 */
1063 amdgpu_hdp_flush(adev, NULL);
1064 }
1065
1066 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
1067 SMU_MSG_TransferTableDram2Smu :
1068 SMU_MSG_TransferTableSmu2Dram,
1069 table_id | ((argument & 0xFFFF) << 16),
1070 NULL);
1071 if (ret)
1072 return ret;
1073
1074 if (!drv2smu) {
1075 amdgpu_hdp_invalidate(adev, NULL);
1076 memcpy(table_data, table->cpu_addr, table_size);
1077 }
1078
1079 return 0;
1080 }
1081
smu_cmn_write_watermarks_table(struct smu_context * smu)1082 int smu_cmn_write_watermarks_table(struct smu_context *smu)
1083 {
1084 void *watermarks_table = smu->smu_table.watermarks_table;
1085
1086 if (!watermarks_table)
1087 return -EINVAL;
1088
1089 return smu_cmn_update_table(smu,
1090 SMU_TABLE_WATERMARKS,
1091 0,
1092 watermarks_table,
1093 true);
1094 }
1095
smu_cmn_write_pptable(struct smu_context * smu)1096 int smu_cmn_write_pptable(struct smu_context *smu)
1097 {
1098 void *pptable = smu->smu_table.driver_pptable;
1099
1100 return smu_cmn_update_table(smu,
1101 SMU_TABLE_PPTABLE,
1102 0,
1103 pptable,
1104 true);
1105 }
1106
smu_cmn_get_metrics_table(struct smu_context * smu,void * metrics_table,bool bypass_cache)1107 int smu_cmn_get_metrics_table(struct smu_context *smu,
1108 void *metrics_table,
1109 bool bypass_cache)
1110 {
1111 struct smu_table_context *smu_table = &smu->smu_table;
1112 uint32_t table_size =
1113 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
1114 int ret = 0;
1115
1116 if (bypass_cache ||
1117 !smu_table->metrics_time ||
1118 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
1119 ret = smu_cmn_update_table(smu,
1120 SMU_TABLE_SMU_METRICS,
1121 0,
1122 smu_table->metrics_table,
1123 false);
1124 if (ret) {
1125 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
1126 return ret;
1127 }
1128 smu_table->metrics_time = jiffies;
1129 }
1130
1131 if (metrics_table)
1132 memcpy(metrics_table, smu_table->metrics_table, table_size);
1133
1134 return 0;
1135 }
1136
smu_cmn_get_combo_pptable(struct smu_context * smu)1137 int smu_cmn_get_combo_pptable(struct smu_context *smu)
1138 {
1139 void *pptable = smu->smu_table.combo_pptable;
1140
1141 return smu_cmn_update_table(smu,
1142 SMU_TABLE_COMBO_PPTABLE,
1143 0,
1144 pptable,
1145 false);
1146 }
1147
smu_cmn_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)1148 int smu_cmn_set_mp1_state(struct smu_context *smu,
1149 enum pp_mp1_state mp1_state)
1150 {
1151 enum smu_message_type msg;
1152 int ret;
1153
1154 switch (mp1_state) {
1155 case PP_MP1_STATE_SHUTDOWN:
1156 msg = SMU_MSG_PrepareMp1ForShutdown;
1157 break;
1158 case PP_MP1_STATE_UNLOAD:
1159 msg = SMU_MSG_PrepareMp1ForUnload;
1160 break;
1161 case PP_MP1_STATE_RESET:
1162 msg = SMU_MSG_PrepareMp1ForReset;
1163 break;
1164 case PP_MP1_STATE_NONE:
1165 default:
1166 return 0;
1167 }
1168
1169 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1170 if (ret)
1171 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1172
1173 return ret;
1174 }
1175
smu_cmn_is_audio_func_enabled(struct amdgpu_device * adev)1176 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1177 {
1178 struct pci_dev *p = NULL;
1179 bool snd_driver_loaded;
1180
1181 /*
1182 * If the ASIC comes with no audio function, we always assume
1183 * it is "enabled".
1184 */
1185 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1186 adev->pdev->bus->number, 1);
1187 if (!p)
1188 return true;
1189
1190 snd_driver_loaded = pci_is_enabled(p) ? true : false;
1191
1192 pci_dev_put(p);
1193
1194 return snd_driver_loaded;
1195 }
1196
smu_soc_policy_get_desc(struct smu_dpm_policy * policy,int level)1197 static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level)
1198 {
1199 if (level < 0 || !(policy->level_mask & BIT(level)))
1200 return "Invalid";
1201
1202 switch (level) {
1203 case SOC_PSTATE_DEFAULT:
1204 return "soc_pstate_default";
1205 case SOC_PSTATE_0:
1206 return "soc_pstate_0";
1207 case SOC_PSTATE_1:
1208 return "soc_pstate_1";
1209 case SOC_PSTATE_2:
1210 return "soc_pstate_2";
1211 }
1212
1213 return "Invalid";
1214 }
1215
1216 static struct smu_dpm_policy_desc pstate_policy_desc = {
1217 .name = STR_SOC_PSTATE_POLICY,
1218 .get_desc = smu_soc_policy_get_desc,
1219 };
1220
smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy * policy)1221 void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy)
1222 {
1223 policy->desc = &pstate_policy_desc;
1224 }
1225
smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy * policy,int level)1226 static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy,
1227 int level)
1228 {
1229 if (level < 0 || !(policy->level_mask & BIT(level)))
1230 return "Invalid";
1231
1232 switch (level) {
1233 case XGMI_PLPD_DISALLOW:
1234 return "plpd_disallow";
1235 case XGMI_PLPD_DEFAULT:
1236 return "plpd_default";
1237 case XGMI_PLPD_OPTIMIZED:
1238 return "plpd_optimized";
1239 }
1240
1241 return "Invalid";
1242 }
1243
1244 static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = {
1245 .name = STR_XGMI_PLPD_POLICY,
1246 .get_desc = smu_xgmi_plpd_policy_get_desc,
1247 };
1248
smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy * policy)1249 void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
1250 {
1251 policy->desc = &xgmi_plpd_policy_desc;
1252 }
1253
smu_cmn_get_backend_workload_mask(struct smu_context * smu,u32 workload_mask,u32 * backend_workload_mask)1254 void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
1255 u32 workload_mask,
1256 u32 *backend_workload_mask)
1257 {
1258 int workload_type;
1259 u32 profile_mode;
1260
1261 *backend_workload_mask = 0;
1262
1263 for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
1264 if (!(workload_mask & (1 << profile_mode)))
1265 continue;
1266
1267 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1268 workload_type = smu_cmn_to_asic_specific_index(smu,
1269 CMN2ASIC_MAPPING_WORKLOAD,
1270 profile_mode);
1271
1272 if (workload_type < 0)
1273 continue;
1274
1275 *backend_workload_mask |= 1 << workload_type;
1276 }
1277 }
1278
smu_cmn_freqs_match(uint32_t freq1,uint32_t freq2)1279 static inline bool smu_cmn_freqs_match(uint32_t freq1, uint32_t freq2)
1280 {
1281 /* Frequencies within 25 MHz are considered equal */
1282 return (abs((int)freq1 - (int)freq2) <= 25);
1283 }
1284
smu_cmn_print_dpm_clk_levels(struct smu_context * smu,struct smu_dpm_table * dpm_table,uint32_t cur_clk,char * buf,int * offset)1285 int smu_cmn_print_dpm_clk_levels(struct smu_context *smu,
1286 struct smu_dpm_table *dpm_table,
1287 uint32_t cur_clk, char *buf, int *offset)
1288 {
1289 uint32_t min_clk, max_clk, level_index, count;
1290 uint32_t freq_values[3];
1291 int size, lvl, i;
1292 bool is_fine_grained;
1293 bool is_deep_sleep;
1294 bool freq_match;
1295
1296 if (!dpm_table || !buf)
1297 return -EINVAL;
1298
1299 level_index = 0;
1300 size = *offset;
1301 count = dpm_table->count;
1302 is_fine_grained = dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED;
1303 min_clk = SMU_DPM_TABLE_MIN(dpm_table);
1304 max_clk = SMU_DPM_TABLE_MAX(dpm_table);
1305
1306 /* Deep sleep - current clock < min_clock/2, TBD: cur_clk = 0 as GFXOFF */
1307 is_deep_sleep = cur_clk < min_clk / 2;
1308 if (is_deep_sleep) {
1309 size += sysfs_emit_at(buf, size, "S: %uMhz *\n", cur_clk);
1310 level_index = 1;
1311 }
1312
1313 if (!is_fine_grained) {
1314 for (i = 0; i < count; i++) {
1315 freq_match = !is_deep_sleep &&
1316 smu_cmn_freqs_match(
1317 cur_clk,
1318 dpm_table->dpm_levels[i].value);
1319 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1320 level_index + i,
1321 dpm_table->dpm_levels[i].value,
1322 freq_match ? "*" : "");
1323 }
1324 } else {
1325 count = 2;
1326 freq_values[0] = min_clk;
1327 freq_values[1] = max_clk;
1328
1329 if (!is_deep_sleep) {
1330 if (smu_cmn_freqs_match(cur_clk, min_clk)) {
1331 lvl = 0;
1332 } else if (smu_cmn_freqs_match(cur_clk, max_clk)) {
1333 lvl = 1;
1334 } else {
1335 /* NOTE: use index '1' to show current clock value */
1336 lvl = 1;
1337 count = 3;
1338 freq_values[1] = cur_clk;
1339 freq_values[2] = max_clk;
1340 }
1341 }
1342
1343 for (i = 0; i < count; i++) {
1344 size += sysfs_emit_at(
1345 buf, size, "%d: %uMhz %s\n", level_index + i,
1346 freq_values[i],
1347 (!is_deep_sleep && i == lvl) ? "*" : "");
1348 }
1349 }
1350
1351 *offset = size;
1352
1353 return 0;
1354 }
1355
smu_cmn_print_pcie_levels(struct smu_context * smu,struct smu_pcie_table * pcie_table,uint32_t cur_gen,uint32_t cur_lane,char * buf,int * offset)1356 int smu_cmn_print_pcie_levels(struct smu_context *smu,
1357 struct smu_pcie_table *pcie_table,
1358 uint32_t cur_gen, uint32_t cur_lane, char *buf,
1359 int *offset)
1360 {
1361 int size, i;
1362
1363 if (!pcie_table || !buf)
1364 return -EINVAL;
1365
1366 size = *offset;
1367
1368 for (i = 0; i < pcie_table->lclk_levels; i++) {
1369 size += sysfs_emit_at(
1370 buf, size, "%d: %s %s %dMhz %s\n", i,
1371 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1372 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1373 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1374 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
1375 (pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," :
1376 (pcie_table->pcie_gen[i] == 5) ? "64.0GT/s," :
1377 "",
1378 (pcie_table->pcie_lane[i] == 1) ? "x1" :
1379 (pcie_table->pcie_lane[i] == 2) ? "x2" :
1380 (pcie_table->pcie_lane[i] == 3) ? "x4" :
1381 (pcie_table->pcie_lane[i] == 4) ? "x8" :
1382 (pcie_table->pcie_lane[i] == 5) ? "x12" :
1383 (pcie_table->pcie_lane[i] == 6) ? "x16" :
1384 (pcie_table->pcie_lane[i] == 7) ? "x32" :
1385 "",
1386 pcie_table->lclk_freq[i],
1387 (cur_gen == pcie_table->pcie_gen[i]) &&
1388 (cur_lane == pcie_table->pcie_lane[i]) ?
1389 "*" :
1390 "");
1391 }
1392
1393 *offset = size;
1394
1395 return 0;
1396 }
1397
smu_cmn_dpm_pcie_gen_idx(int gen)1398 int smu_cmn_dpm_pcie_gen_idx(int gen)
1399 {
1400 int ret;
1401
1402 switch (gen) {
1403 case 1 ... 5:
1404 ret = gen - 1;
1405 break;
1406 default:
1407 ret = -1;
1408 break;
1409 }
1410
1411 return ret;
1412 }
1413
smu_cmn_dpm_pcie_width_idx(int width)1414 int smu_cmn_dpm_pcie_width_idx(int width)
1415 {
1416 int ret;
1417
1418 switch (width) {
1419 case 1:
1420 ret = 1;
1421 break;
1422 case 2:
1423 ret = 2;
1424 break;
1425 case 4:
1426 ret = 3;
1427 break;
1428 case 8:
1429 ret = 4;
1430 break;
1431 case 12:
1432 ret = 5;
1433 break;
1434 case 16:
1435 ret = 6;
1436 break;
1437 case 32:
1438 ret = 7;
1439 break;
1440 default:
1441 ret = -1;
1442 break;
1443 }
1444
1445 return ret;
1446 }
1447